diff --git a/sys/contrib/dev/iwlwifi/cfg/22000.c b/sys/contrib/dev/iwlwifi/cfg/22000.c index 192ecc480f90..d594694206b3 100644 --- a/sys/contrib/dev/iwlwifi/cfg/22000.c +++ b/sys/contrib/dev/iwlwifi/cfg/22000.c @@ -1,1009 +1,428 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include #include #include "iwl-config.h" #include "iwl-prph.h" #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 73 +#define IWL_22000_UCODE_API_MAX 77 /* Lowest firmware API version supported */ -#define IWL_22000_UCODE_API_MIN 39 +#define IWL_22000_UCODE_API_MIN 50 /* NVM versions */ #define IWL_22000_NVM_VERSION 0x0a1d /* Memory offsets and lengths */ #define IWL_22000_DCCM_OFFSET 0x800000 /* LMAC1 */ #define IWL_22000_DCCM_LEN 0x10000 /* LMAC1 */ #define IWL_22000_DCCM2_OFFSET 0x880000 #define IWL_22000_DCCM2_LEN 0x8000 #define IWL_22000_SMEM_OFFSET 0x400000 #define IWL_22000_SMEM_LEN 0xD0000 -#define IWL_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-" -#define IWL_QNJ_B_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-" -#define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0-" -#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" -#define IWL_QU_C_JF_B_FW_PRE "iwlwifi-Qu-c0-jf-b0-" -#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-" -#define IWL_QUZ_A_JF_B_FW_PRE "iwlwifi-QuZ-a0-jf-b0-" -#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-" -#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-" -#define IWL_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-" -#define IWL_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0-" -#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-" -#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-" -#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-" -#define IWL_SO_A_MR_A_FW_PRE "iwlwifi-so-a0-mr-a0-" -#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-" -#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-" -#define IWL_SNJ_A_HR_B_FW_PRE "iwlwifi-SoSnj-a0-hr-b0-" -#define IWL_SNJ_A_JF_B_FW_PRE "iwlwifi-SoSnj-a0-jf-b0-" -#define IWL_MA_A_HR_B_FW_PRE "iwlwifi-ma-a0-hr-b0-" -#define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0-" -#define IWL_MA_A_GF4_A_FW_PRE "iwlwifi-ma-a0-gf4-a0-" -#define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0-" -#define IWL_MA_A_FM_A_FW_PRE "iwlwifi-ma-a0-fm-a0-" -#define IWL_SNJ_A_MR_A_FW_PRE "iwlwifi-SoSnj-a0-mr-a0-" -#define IWL_BZ_A_HR_B_FW_PRE "iwlwifi-bz-a0-hr-b0-" -#define IWL_BZ_A_GF_A_FW_PRE "iwlwifi-bz-a0-gf-a0-" -#define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0-" -#define IWL_BZ_A_MR_A_FW_PRE "iwlwifi-bz-a0-mr-a0-" -#define IWL_BZ_A_FM_A_FW_PRE "iwlwifi-bz-a0-fm-a0-" -#define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm-a0-" -#define IWL_BZ_Z_GF_A_FW_PRE "iwlwifi-bz-z0-gf-a0-" -#define IWL_BNJ_A_FM_A_FW_PRE "iwlwifi-BzBnj-a0-fm-a0-" -#define IWL_BNJ_A_FM4_A_FW_PRE "iwlwifi-BzBnj-a0-fm4-a0-" -#define IWL_BNJ_A_GF_A_FW_PRE "iwlwifi-BzBnj-a0-gf-a0-" -#define IWL_BNJ_A_GF4_A_FW_PRE "iwlwifi-BzBnj-a0-gf4-a0-" -#define IWL_BNJ_A_HR_B_FW_PRE "iwlwifi-BzBnj-a0-hr-b0-" - +#define IWL_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0" +#define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0" +#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0" +#define IWL_QU_C_JF_B_FW_PRE "iwlwifi-Qu-c0-jf-b0" +#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0" +#define IWL_QUZ_A_JF_B_FW_PRE "iwlwifi-QuZ-a0-jf-b0" +#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0" #define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \ - IWL_QU_B_HR_B_FW_PRE __stringify(api) ".ucode" -#define IWL_QNJ_B_HR_B_MODULE_FIRMWARE(api) \ - IWL_QNJ_B_HR_B_FW_PRE __stringify(api) ".ucode" + IWL_QU_B_HR_B_FW_PRE "-" __stringify(api) ".ucode" #define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \ - IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode" + IWL_QUZ_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" #define IWL_QUZ_A_JF_B_MODULE_FIRMWARE(api) \ - IWL_QUZ_A_JF_B_FW_PRE __stringify(api) ".ucode" + IWL_QUZ_A_JF_B_FW_PRE "-" __stringify(api) ".ucode" #define IWL_QU_C_HR_B_MODULE_FIRMWARE(api) \ - IWL_QU_C_HR_B_FW_PRE __stringify(api) ".ucode" + IWL_QU_C_HR_B_FW_PRE "-" __stringify(api) ".ucode" #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ - IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" -#define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \ - IWL_QNJ_B_JF_B_FW_PRE __stringify(api) ".ucode" + IWL_QU_B_JF_B_FW_PRE "-" __stringify(api) ".ucode" #define IWL_CC_A_MODULE_FIRMWARE(api) \ - IWL_CC_A_FW_PRE __stringify(api) ".ucode" -#define IWL_SO_A_JF_B_MODULE_FIRMWARE(api) \ - IWL_SO_A_JF_B_FW_PRE __stringify(api) ".ucode" -#define IWL_SO_A_HR_B_MODULE_FIRMWARE(api) \ - IWL_SO_A_HR_B_FW_PRE __stringify(api) ".ucode" -#define IWL_SO_A_GF_A_MODULE_FIRMWARE(api) \ - IWL_SO_A_GF_A_FW_PRE __stringify(api) ".ucode" -#define IWL_TY_A_GF_A_MODULE_FIRMWARE(api) \ - IWL_TY_A_GF_A_FW_PRE __stringify(api) ".ucode" -#define IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(api) \ - IWL_SNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode" -#define IWL_SNJ_A_GF_A_MODULE_FIRMWARE(api) \ - IWL_SNJ_A_GF_A_FW_PRE __stringify(api) ".ucode" -#define IWL_SNJ_A_HR_B_MODULE_FIRMWARE(api) \ - IWL_SNJ_A_HR_B_FW_PRE __stringify(api) ".ucode" -#define IWL_SNJ_A_JF_B_MODULE_FIRMWARE(api) \ - IWL_SNJ_A_JF_B_FW_PRE __stringify(api) ".ucode" -#define IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(api) \ - IWL_MA_A_HR_B_FW_PRE __stringify(api) ".ucode" -#define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api) \ - IWL_MA_A_GF_A_FW_PRE __stringify(api) ".ucode" -#define IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(api) \ - IWL_MA_A_GF4_A_FW_PRE __stringify(api) ".ucode" -#define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \ - IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode" -#define IWL_MA_A_FM_A_FW_MODULE_FIRMWARE(api) \ - IWL_MA_A_FM_A_FW_PRE __stringify(api) ".ucode" -#define IWL_SNJ_A_MR_A_MODULE_FIRMWARE(api) \ - IWL_SNJ_A_MR_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \ - IWL_BZ_A_HR_B_FW_PRE __stringify(api) ".ucode" -#define IWL_BZ_A_GF_A_MODULE_FIRMWARE(api) \ - IWL_BZ_A_GF_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BZ_A_GF4_A_MODULE_FIRMWARE(api) \ - IWL_BZ_A_GF4_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BZ_A_MR_A_MODULE_FIRMWARE(api) \ - IWL_BZ_A_MR_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BZ_A_FM_A_MODULE_FIRMWARE(api) \ - IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode" -#define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \ - IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BNJ_A_FM_A_MODULE_FIRMWARE(api) \ - IWL_BNJ_A_FM_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(api) \ - IWL_BNJ_A_FM4_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BNJ_A_GF_A_MODULE_FIRMWARE(api) \ - IWL_BNJ_A_GF_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(api) \ - IWL_BNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BNJ_A_HR_B_MODULE_FIRMWARE(api) \ - IWL_BNJ_A_HR_B_FW_PRE __stringify(api) ".ucode" + IWL_CC_A_FW_PRE "-" __stringify(api) ".ucode" static const struct iwl_base_params iwl_22000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, .num_of_queues = 512, .max_tfd_queue_size = 256, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, }; -static const struct iwl_base_params iwl_ax210_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, - .num_of_queues = 512, - .max_tfd_queue_size = 65536, - .shadow_ram_support = true, - .led_compensation = 57, - .wd_timeout = IWL_LONG_WD_TIMEOUT, - .max_event_log_size = 512, - .shadow_reg_enable = true, - .pcie_l1_allowed = true, -}; - -static const struct iwl_ht_params iwl_22000_ht_params = { +const struct iwl_ht_params iwl_22000_ht_params = { .stbc = true, .ldpc = true, .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) | BIT(NL80211_BAND_6GHZ), }; #define IWL_DEVICE_22000_COMMON \ - .ucode_api_max = IWL_22000_UCODE_API_MAX, \ .ucode_api_min = IWL_22000_UCODE_API_MIN, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = 10, \ .non_shared_ant = ANT_B, \ .dccm_offset = IWL_22000_DCCM_OFFSET, \ .dccm_len = IWL_22000_DCCM_LEN, \ .dccm2_offset = IWL_22000_DCCM2_OFFSET, \ .dccm2_len = IWL_22000_DCCM2_LEN, \ .smem_offset = IWL_22000_SMEM_OFFSET, \ .smem_len = IWL_22000_SMEM_LEN, \ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ .apmg_not_supported = true, \ .trans.mq_rx_supported = true, \ .vht_mu_mimo_supported = true, \ .mac_addr_from_csr = 0x380, \ .ht_params = &iwl_22000_ht_params, \ .nvm_ver = IWL_22000_NVM_VERSION, \ - .trans.use_tfh = true, \ .trans.rf_id = true, \ .trans.gen2 = true, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ .min_umac_error_event_table = 0x400000, \ .d3_debug_data_base_addr = 0x401000, \ .d3_debug_data_length = 60 * 1024, \ .mon_smem_regs = { \ .write_ptr = { \ .addr = LDBG_M2S_BUF_WPTR, \ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ }, \ .cycle_cnt = { \ .addr = LDBG_M2S_BUF_WRAP_CNT, \ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ }, \ } #define IWL_DEVICE_22500 \ IWL_DEVICE_22000_COMMON, \ + .ucode_api_max = IWL_22000_UCODE_API_MAX, \ .trans.device_family = IWL_DEVICE_FAMILY_22000, \ .trans.base_params = &iwl_22000_base_params, \ .gp2_reg_addr = 0xa02c68, \ .mon_dram_regs = { \ .write_ptr = { \ .addr = MON_BUFF_WRPTR_VER2, \ .mask = 0xffffffff, \ }, \ .cycle_cnt = { \ .addr = MON_BUFF_CYCLE_CNT_VER2, \ .mask = 0xffffffff, \ }, \ } -#define IWL_DEVICE_AX210 \ - IWL_DEVICE_22000_COMMON, \ - .trans.umac_prph_offset = 0x300000, \ - .trans.device_family = IWL_DEVICE_FAMILY_AX210, \ - .trans.base_params = &iwl_ax210_base_params, \ - .min_txq_size = 128, \ - .gp2_reg_addr = 0xd02c68, \ - .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE, \ - .mon_dram_regs = { \ - .write_ptr = { \ - .addr = DBGC_CUR_DBGBUF_STATUS, \ - .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ - }, \ - .cycle_cnt = { \ - .addr = DBGC_DBGBUF_WRAP_AROUND, \ - .mask = 0xffffffff, \ - }, \ - .cur_frag = { \ - .addr = DBGC_CUR_DBGBUF_STATUS, \ - .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ - }, \ - } - -#define IWL_DEVICE_BZ_COMMON \ - .ucode_api_max = IWL_22000_UCODE_API_MAX, \ - .ucode_api_min = IWL_22000_UCODE_API_MIN, \ - .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = 10, \ - .non_shared_ant = ANT_B, \ - .dccm_offset = IWL_22000_DCCM_OFFSET, \ - .dccm_len = IWL_22000_DCCM_LEN, \ - .dccm2_offset = IWL_22000_DCCM2_OFFSET, \ - .dccm2_len = IWL_22000_DCCM2_LEN, \ - .smem_offset = IWL_22000_SMEM_OFFSET, \ - .smem_len = IWL_22000_SMEM_LEN, \ - .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, \ - .apmg_not_supported = true, \ - .trans.mq_rx_supported = true, \ - .vht_mu_mimo_supported = true, \ - .mac_addr_from_csr = 0x30, \ - .ht_params = &iwl_22000_ht_params, \ - .nvm_ver = IWL_22000_NVM_VERSION, \ - .trans.use_tfh = true, \ - .trans.rf_id = true, \ - .trans.gen2 = true, \ - .nvm_type = IWL_NVM_EXT, \ - .dbgc_supported = true, \ - .min_umac_error_event_table = 0x400000, \ - .d3_debug_data_base_addr = 0x401000, \ - .d3_debug_data_length = 60 * 1024, \ - .mon_smem_regs = { \ - .write_ptr = { \ - .addr = LDBG_M2S_BUF_WPTR, \ - .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ - }, \ - .cycle_cnt = { \ - .addr = LDBG_M2S_BUF_WRAP_CNT, \ - .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ - }, \ - } - -#define IWL_DEVICE_BZ \ - IWL_DEVICE_BZ_COMMON, \ - .trans.umac_prph_offset = 0x300000, \ - .trans.device_family = IWL_DEVICE_FAMILY_BZ, \ - .trans.base_params = &iwl_ax210_base_params, \ - .min_txq_size = 128, \ - .gp2_reg_addr = 0xd02c68, \ - .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \ - .mon_dram_regs = { \ - .write_ptr = { \ - .addr = DBGC_CUR_DBGBUF_STATUS, \ - .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ - }, \ - .cycle_cnt = { \ - .addr = DBGC_DBGBUF_WRAP_AROUND, \ - .mask = 0xffffffff, \ - }, \ - .cur_frag = { \ - .addr = DBGC_CUR_DBGBUF_STATUS, \ - .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ - }, \ - }, \ - .mon_dbgi_regs = { \ - .write_ptr = { \ - .addr = DBGI_SRAM_FIFO_POINTERS, \ - .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \ - }, \ - } - -const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = { - .mq_rx_supported = true, - .use_tfh = true, - .rf_id = true, - .gen2 = true, - .device_family = IWL_DEVICE_FAMILY_22000, - .base_params = &iwl_22000_base_params, -}; - const struct iwl_cfg_trans_params iwl_qu_trans_cfg = { .mq_rx_supported = true, - .use_tfh = true, .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_22000, .base_params = &iwl_22000_base_params, .integrated = true, .xtal_latency = 500, .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US, }; const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg = { .mq_rx_supported = true, - .use_tfh = true, .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_22000, .base_params = &iwl_22000_base_params, .integrated = true, .xtal_latency = 1820, .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_1820US, }; const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = { .mq_rx_supported = true, - .use_tfh = true, .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_22000, .base_params = &iwl_22000_base_params, .integrated = true, .xtal_latency = 12000, .low_latency_xtal = true, .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; -const struct iwl_cfg_trans_params iwl_snj_trans_cfg = { - .mq_rx_supported = true, - .use_tfh = true, - .rf_id = true, - .gen2 = true, - .device_family = IWL_DEVICE_FAMILY_AX210, - .base_params = &iwl_ax210_base_params, - .umac_prph_offset = 0x300000, -}; - -const struct iwl_cfg_trans_params iwl_so_trans_cfg = { - .mq_rx_supported = true, - .use_tfh = true, - .rf_id = true, - .gen2 = true, - .device_family = IWL_DEVICE_FAMILY_AX210, - .base_params = &iwl_ax210_base_params, - .umac_prph_offset = 0x300000, - .integrated = true, - /* TODO: the following values need to be checked */ - .xtal_latency = 500, - .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US, -}; - -const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = { - .mq_rx_supported = true, - .use_tfh = true, - .rf_id = true, - .gen2 = true, - .device_family = IWL_DEVICE_FAMILY_AX210, - .base_params = &iwl_ax210_base_params, - .umac_prph_offset = 0x300000, - .integrated = true, - .low_latency_xtal = true, - .xtal_latency = 12000, - .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, -}; - -const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = { - .mq_rx_supported = true, - .use_tfh = true, - .rf_id = true, - .gen2 = true, - .device_family = IWL_DEVICE_FAMILY_AX210, - .base_params = &iwl_ax210_base_params, - .umac_prph_offset = 0x300000, - .integrated = true, - .low_latency_xtal = true, - .xtal_latency = 12000, - .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, - .imr_enabled = true, -}; - /* * If the device doesn't support HE, no need to have that many buffers. * 22000 devices can split multiple frames into a single RB, so fewer are * needed; AX210 cannot (but use smaller RBs by default) - these sizes * were picked according to 8 MSDUs inside 256 A-MSDUs in an A-MPDU, with * additional overhead to account for processing time. */ #define IWL_NUM_RBDS_NON_HE 512 #define IWL_NUM_RBDS_22000_HE 2048 -#define IWL_NUM_RBDS_AX210_HE 4096 /* * All JF radio modules are part of the 9000 series, but the MAC part * looks more like 22000. That's why this device is here, but called * 9560 nevertheless. */ const struct iwl_cfg iwl9560_qu_b0_jf_b0_cfg = { .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, IWL_DEVICE_22500, .num_rbds = IWL_NUM_RBDS_NON_HE, }; const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg = { .fw_name_pre = IWL_QU_C_JF_B_FW_PRE, IWL_DEVICE_22500, .num_rbds = IWL_NUM_RBDS_NON_HE, }; const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg = { .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_NON_HE, }; -const struct iwl_cfg iwl9560_qnj_b0_jf_b0_cfg = { - .fw_name_pre = IWL_QNJ_B_JF_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_NON_HE, -}; - const struct iwl_cfg_trans_params iwl_ax200_trans_cfg = { .device_family = IWL_DEVICE_FAMILY_22000, .base_params = &iwl_22000_base_params, .mq_rx_supported = true, - .use_tfh = true, .rf_id = true, .gen2 = true, .bisr_workaround = 1, }; -const struct iwl_cfg_trans_params iwl_ma_trans_cfg = { - .device_family = IWL_DEVICE_FAMILY_AX210, - .base_params = &iwl_ax210_base_params, - .mq_rx_supported = true, - .use_tfh = true, - .rf_id = true, - .gen2 = true, - .integrated = true, - .umac_prph_offset = 0x300000 -}; - -const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { - .device_family = IWL_DEVICE_FAMILY_BZ, - .base_params = &iwl_ax210_base_params, - .mq_rx_supported = true, - .use_tfh = true, - .rf_id = true, - .gen2 = true, - .integrated = true, - .umac_prph_offset = 0x300000, - .xtal_latency = 12000, - .low_latency_xtal = true, - .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, -}; - const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101"; const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz"; const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz"; const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203"; const char iwl_ax204_name[] = "Intel(R) Wi-Fi 6 AX204 160MHz"; -const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz"; -const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz"; -const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz"; -const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz"; -const char iwl_bz_name[] = "Intel(R) TBD Bz device"; const char iwl_ax200_killer_1650w_name[] = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)"; const char iwl_ax200_killer_1650x_name[] = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)"; const char iwl_ax201_killer_1650s_name[] = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)"; const char iwl_ax201_killer_1650i_name[] = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)"; -const char iwl_ax210_killer_1675w_name[] = - "Killer(R) Wi-Fi 6E AX1675w 160MHz Wireless Network Adapter (210D2W)"; -const char iwl_ax210_killer_1675x_name[] = - "Killer(R) Wi-Fi 6E AX1675x 160MHz Wireless Network Adapter (210NGW)"; -const char iwl_ax211_killer_1675s_name[] = - "Killer(R) Wi-Fi 6E AX1675s 160MHz Wireless Network Adapter (211NGW)"; -const char iwl_ax211_killer_1675i_name[] = - "Killer(R) Wi-Fi 6E AX1675i 160MHz Wireless Network Adapter (211NGW)"; -const char iwl_ax411_killer_1690s_name[] = - "Killer(R) Wi-Fi 6E AX1690s 160MHz Wireless Network Adapter (411D2W)"; -const char iwl_ax411_killer_1690i_name[] = - "Killer(R) Wi-Fi 6E AX1690i 160MHz Wireless Network Adapter (411NGW)"; const struct iwl_cfg iwl_qu_b0_hr1_b0 = { .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .tx_with_siso_diversity = true, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_qu_b0_hr_b0 = { .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_ax201_cfg_qu_hr = { .name = "Intel(R) Wi-Fi 6 AX201 160MHz", .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_qu_c0_hr1_b0 = { .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .tx_with_siso_diversity = true, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_qu_c0_hr_b0 = { .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = { .name = "Intel(R) Wi-Fi 6 AX201 160MHz", .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_quz_a0_hr1_b0 = { .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .tx_with_siso_diversity = true, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_ax201_cfg_quz_hr = { .name = "Intel(R) Wi-Fi 6 AX201 160MHz", .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = { .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = { .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg iwl_ax200_cfg_cc = { .fw_name_pre = IWL_CC_A_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)", .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)", .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = { .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)", .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = { .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)", .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; -const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg = { - .fw_name_pre = IWL_QNJ_B_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0 = { - .name = "Intel(R) Wireless-AC 9560 160MHz", - .fw_name_pre = IWL_SO_A_JF_B_FW_PRE, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_NON_HE, -}; - -const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = { - .name = iwl_ax211_name, - .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = { - .name = iwl_ax211_name, - .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, - .trans.xtal_latency = 12000, - .trans.low_latency_xtal = true, -}; - -const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = { - .name = "Intel(R) Wi-Fi 6 AX210 160MHz", - .fw_name_pre = IWL_TY_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = { - .name = iwl_ax411_name, - .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = { - .name = iwl_ax411_name, - .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, - .trans.xtal_latency = 12000, - .trans.low_latency_xtal = true, -}; - -const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = { - .name = iwl_ax411_name, - .fw_name_pre = IWL_SNJ_A_GF4_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwlax211_cfg_snj_gf_a0 = { - .name = iwl_ax211_name, - .fw_name_pre = IWL_SNJ_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_snj_hr_b0 = { - .fw_name_pre = IWL_SNJ_A_HR_B_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_snj_a0_jf_b0 = { - .fw_name_pre = IWL_SNJ_A_JF_B_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_ma_a0_hr_b0 = { - .fw_name_pre = IWL_MA_A_HR_B_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_ma_a0_gf_a0 = { - .fw_name_pre = IWL_MA_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0 = { - .fw_name_pre = IWL_MA_A_GF4_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = { - .fw_name_pre = IWL_MA_A_MR_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_ma_a0_ms_a0 = { - .fw_name_pre = IWL_MA_A_MR_A_FW_PRE, - .uhb_supported = false, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_so_a0_ms_a0 = { - .fw_name_pre = IWL_SO_A_MR_A_FW_PRE, - .uhb_supported = false, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_ma_a0_fm_a0 = { - .fw_name_pre = IWL_MA_A_FM_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = { - .fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_snj_a0_ms_a0 = { - .fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE, - .uhb_supported = false, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = { - .fw_name_pre = IWL_SO_A_HR_B_FW_PRE, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = { .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap * so we need to restrict the size of transmitted aggregation to the * HT size; mac80211 would otherwise pick the HE max (256) by default. */ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, .num_rbds = IWL_NUM_RBDS_22000_HE, }; -const struct iwl_cfg iwl_cfg_bz_a0_hr_b0 = { - .fw_name_pre = IWL_BZ_A_HR_B_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bz_a0_gf_a0 = { - .fw_name_pre = IWL_BZ_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0 = { - .fw_name_pre = IWL_BZ_A_GF4_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bz_a0_mr_a0 = { - .fw_name_pre = IWL_BZ_A_MR_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bz_a0_fm_a0 = { - .fw_name_pre = IWL_BZ_A_FM_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_gl_a0_fm_a0 = { - .fw_name_pre = IWL_GL_A_FM_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bz_z0_gf_a0 = { - .fw_name_pre = IWL_BZ_Z_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0 = { - .fw_name_pre = IWL_BNJ_A_FM_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0 = { - .fw_name_pre = IWL_BNJ_A_FM4_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0 = { - .fw_name_pre = IWL_BNJ_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0 = { - .fw_name_pre = IWL_BNJ_A_GF4_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0 = { - .fw_name_pre = IWL_BNJ_A_HR_B_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SO_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SNJ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_MA_A_FM_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SNJ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BZ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BZ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_GL_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BNJ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/sys/contrib/dev/iwlwifi/cfg/7000.c b/sys/contrib/dev/iwlwifi/cfg/7000.c index b24dc5523a52..4e2afdedf4c6 100644 --- a/sys/contrib/dev/iwlwifi/cfg/7000.c +++ b/sys/contrib/dev/iwlwifi/cfg/7000.c @@ -1,298 +1,298 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2020, 2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 Intel Deutschland GmbH */ #include #include #include "iwl-config.h" /* Highest firmware API version supported */ #define IWL7260_UCODE_API_MAX 17 #define IWL7265_UCODE_API_MAX 17 #define IWL7265D_UCODE_API_MAX 29 #define IWL3168_UCODE_API_MAX 29 /* Lowest firmware API version supported */ #define IWL7260_UCODE_API_MIN 17 #define IWL7265_UCODE_API_MIN 17 #define IWL7265D_UCODE_API_MIN 22 #define IWL3168_UCODE_API_MIN 22 /* NVM versions */ #define IWL7260_NVM_VERSION 0x0a1d #define IWL3160_NVM_VERSION 0x709 #define IWL3165_NVM_VERSION 0x709 #define IWL3168_NVM_VERSION 0xd01 #define IWL7265_NVM_VERSION 0x0a1d #define IWL7265D_NVM_VERSION 0x0c11 /* DCCM offsets and lengths */ #define IWL7000_DCCM_OFFSET 0x800000 #define IWL7260_DCCM_LEN 0x14000 #define IWL3160_DCCM_LEN 0x10000 #define IWL7265_DCCM_LEN 0x17A00 -#define IWL7260_FW_PRE "iwlwifi-7260-" -#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode" +#define IWL7260_FW_PRE "iwlwifi-7260" +#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE "-" __stringify(api) ".ucode" -#define IWL3160_FW_PRE "iwlwifi-3160-" -#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" +#define IWL3160_FW_PRE "iwlwifi-3160" +#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE "-" __stringify(api) ".ucode" -#define IWL3168_FW_PRE "iwlwifi-3168-" -#define IWL3168_MODULE_FIRMWARE(api) IWL3168_FW_PRE __stringify(api) ".ucode" +#define IWL3168_FW_PRE "iwlwifi-3168" +#define IWL3168_MODULE_FIRMWARE(api) IWL3168_FW_PRE "-" __stringify(api) ".ucode" -#define IWL7265_FW_PRE "iwlwifi-7265-" -#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" +#define IWL7265_FW_PRE "iwlwifi-7265" +#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE "-" __stringify(api) ".ucode" -#define IWL7265D_FW_PRE "iwlwifi-7265D-" -#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode" +#define IWL7265D_FW_PRE "iwlwifi-7265D" +#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE "-" __stringify(api) ".ucode" static const struct iwl_base_params iwl7000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_16K, .num_of_queues = 31, .max_tfd_queue_size = 256, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, .apmg_wake_up_wa = true, }; static const struct iwl_tt_params iwl7000_high_temp_tt_params = { .ct_kill_entry = 118, .ct_kill_exit = 96, .ct_kill_duration = 5, .dynamic_smps_entry = 114, .dynamic_smps_exit = 110, .tx_protection_entry = 114, .tx_protection_exit = 108, .tx_backoff = { {.temperature = 112, .backoff = 300}, {.temperature = 113, .backoff = 800}, {.temperature = 114, .backoff = 1500}, {.temperature = 115, .backoff = 3000}, {.temperature = 116, .backoff = 5000}, {.temperature = 117, .backoff = 10000}, }, .support_ct_kill = true, .support_dynamic_smps = true, .support_tx_protection = true, .support_tx_backoff = true, }; static const struct iwl_ht_params iwl7000_ht_params = { .stbc = true, .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; #define IWL_DEVICE_7000_COMMON \ .trans.device_family = IWL_DEVICE_FAMILY_7000, \ .trans.base_params = &iwl7000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = 0, \ .non_shared_ant = ANT_A, \ .dccm_offset = IWL7000_DCCM_OFFSET #define IWL_DEVICE_7000 \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL7260_UCODE_API_MAX, \ .ucode_api_min = IWL7260_UCODE_API_MIN #define IWL_DEVICE_7005 \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL7265_UCODE_API_MAX, \ .ucode_api_min = IWL7265_UCODE_API_MIN #define IWL_DEVICE_3008 \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL3168_UCODE_API_MAX, \ .ucode_api_min = IWL3168_UCODE_API_MIN #define IWL_DEVICE_7005D \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL7265D_UCODE_API_MAX, \ .ucode_api_min = IWL7265D_UCODE_API_MIN const struct iwl_cfg iwl7260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 7260", .fw_name_pre = IWL7260_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, .host_interrupt_operation_mode = true, .lp_xtal_workaround = true, .dccm_len = IWL7260_DCCM_LEN, }; const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { .name = "Intel(R) Dual Band Wireless AC 7260", .fw_name_pre = IWL7260_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, .high_temp = true, .host_interrupt_operation_mode = true, .lp_xtal_workaround = true, .dccm_len = IWL7260_DCCM_LEN, .thermal_params = &iwl7000_high_temp_tt_params, }; const struct iwl_cfg iwl7260_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 7260", .fw_name_pre = IWL7260_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, .host_interrupt_operation_mode = true, .lp_xtal_workaround = true, .dccm_len = IWL7260_DCCM_LEN, }; const struct iwl_cfg iwl7260_n_cfg = { .name = "Intel(R) Wireless N 7260", .fw_name_pre = IWL7260_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, .host_interrupt_operation_mode = true, .lp_xtal_workaround = true, .dccm_len = IWL7260_DCCM_LEN, }; const struct iwl_cfg iwl3160_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 3160", .fw_name_pre = IWL3160_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3160_NVM_VERSION, .host_interrupt_operation_mode = true, .dccm_len = IWL3160_DCCM_LEN, }; const struct iwl_cfg iwl3160_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 3160", .fw_name_pre = IWL3160_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3160_NVM_VERSION, .host_interrupt_operation_mode = true, .dccm_len = IWL3160_DCCM_LEN, }; const struct iwl_cfg iwl3160_n_cfg = { .name = "Intel(R) Wireless N 3160", .fw_name_pre = IWL3160_FW_PRE, IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3160_NVM_VERSION, .host_interrupt_operation_mode = true, .dccm_len = IWL3160_DCCM_LEN, }; static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = { {.pwr = 1600, .backoff = 0}, {.pwr = 1300, .backoff = 467}, {.pwr = 900, .backoff = 1900}, {.pwr = 800, .backoff = 2630}, {.pwr = 700, .backoff = 3720}, {.pwr = 600, .backoff = 5550}, {.pwr = 500, .backoff = 9350}, {0}, }; static const struct iwl_ht_params iwl7265_ht_params = { .stbc = true, .ldpc = true, .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; const struct iwl_cfg iwl3165_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 3165", .fw_name_pre = IWL7265D_FW_PRE, IWL_DEVICE_7005D, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3165_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; const struct iwl_cfg iwl3168_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 3168", .fw_name_pre = IWL3168_FW_PRE, IWL_DEVICE_3008, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3168_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, .nvm_type = IWL_NVM_SDP, }; const struct iwl_cfg iwl7265_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 7265", .fw_name_pre = IWL7265_FW_PRE, IWL_DEVICE_7005, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; const struct iwl_cfg iwl7265_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 7265", .fw_name_pre = IWL7265_FW_PRE, IWL_DEVICE_7005, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; const struct iwl_cfg iwl7265_n_cfg = { .name = "Intel(R) Wireless N 7265", .fw_name_pre = IWL7265_FW_PRE, IWL_DEVICE_7005, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; const struct iwl_cfg iwl7265d_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 7265", .fw_name_pre = IWL7265D_FW_PRE, IWL_DEVICE_7005D, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265D_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; const struct iwl_cfg iwl7265d_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 7265", .fw_name_pre = IWL7265D_FW_PRE, IWL_DEVICE_7005D, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265D_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; const struct iwl_cfg iwl7265d_n_cfg = { .name = "Intel(R) Wireless N 7265", .fw_name_pre = IWL7265D_FW_PRE, IWL_DEVICE_7005D, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265D_NVM_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX)); MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_MAX)); MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_MAX)); MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_MAX)); diff --git a/sys/contrib/dev/iwlwifi/cfg/8000.c b/sys/contrib/dev/iwlwifi/cfg/8000.c index a6454287d506..d09cf8d7dc01 100644 --- a/sys/contrib/dev/iwlwifi/cfg/8000.c +++ b/sys/contrib/dev/iwlwifi/cfg/8000.c @@ -1,157 +1,157 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2014, 2018-2020 Intel Corporation + * Copyright (C) 2014, 2018-2020, 2023 Intel Corporation * Copyright (C) 2014-2015 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ #include #include #include "iwl-config.h" /* Highest firmware API version supported */ #define IWL8000_UCODE_API_MAX 36 #define IWL8265_UCODE_API_MAX 36 /* Lowest firmware API version supported */ #define IWL8000_UCODE_API_MIN 22 #define IWL8265_UCODE_API_MIN 22 /* NVM versions */ #define IWL8000_NVM_VERSION 0x0a1d /* Memory offsets and lengths */ #define IWL8260_DCCM_OFFSET 0x800000 #define IWL8260_DCCM_LEN 0x18000 #define IWL8260_DCCM2_OFFSET 0x880000 #define IWL8260_DCCM2_LEN 0x8000 #define IWL8260_SMEM_OFFSET 0x400000 #define IWL8260_SMEM_LEN 0x68000 -#define IWL8000_FW_PRE "iwlwifi-8000C-" +#define IWL8000_FW_PRE "iwlwifi-8000C" #define IWL8000_MODULE_FIRMWARE(api) \ - IWL8000_FW_PRE __stringify(api) ".ucode" + IWL8000_FW_PRE "-" __stringify(api) ".ucode" -#define IWL8265_FW_PRE "iwlwifi-8265-" +#define IWL8265_FW_PRE "iwlwifi-8265" #define IWL8265_MODULE_FIRMWARE(api) \ - IWL8265_FW_PRE __stringify(api) ".ucode" + IWL8265_FW_PRE "-" __stringify(api) ".ucode" #define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C" static const struct iwl_base_params iwl8000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, .num_of_queues = 31, .max_tfd_queue_size = 256, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, }; static const struct iwl_ht_params iwl8000_ht_params = { .stbc = true, .ldpc = true, .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; static const struct iwl_tt_params iwl8000_tt_params = { .ct_kill_entry = 115, .ct_kill_exit = 93, .ct_kill_duration = 5, .dynamic_smps_entry = 111, .dynamic_smps_exit = 107, .tx_protection_entry = 112, .tx_protection_exit = 105, .tx_backoff = { {.temperature = 110, .backoff = 200}, {.temperature = 111, .backoff = 600}, {.temperature = 112, .backoff = 1200}, {.temperature = 113, .backoff = 2000}, {.temperature = 114, .backoff = 4000}, }, .support_ct_kill = true, .support_dynamic_smps = true, .support_tx_protection = true, .support_tx_backoff = true, }; #define IWL_DEVICE_8000_COMMON \ .trans.device_family = IWL_DEVICE_FAMILY_8000, \ .trans.base_params = &iwl8000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = 10, \ .features = NETIF_F_RXCSUM, \ .non_shared_ant = ANT_A, \ .dccm_offset = IWL8260_DCCM_OFFSET, \ .dccm_len = IWL8260_DCCM_LEN, \ .dccm2_offset = IWL8260_DCCM2_OFFSET, \ .dccm2_len = IWL8260_DCCM2_LEN, \ .smem_offset = IWL8260_SMEM_OFFSET, \ .smem_len = IWL8260_SMEM_LEN, \ .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \ .thermal_params = &iwl8000_tt_params, \ .apmg_not_supported = true, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ .min_umac_error_event_table = 0x800000 #define IWL_DEVICE_8000 \ IWL_DEVICE_8000_COMMON, \ .ucode_api_max = IWL8000_UCODE_API_MAX, \ .ucode_api_min = IWL8000_UCODE_API_MIN \ #define IWL_DEVICE_8260 \ IWL_DEVICE_8000_COMMON, \ .ucode_api_max = IWL8000_UCODE_API_MAX, \ .ucode_api_min = IWL8000_UCODE_API_MIN \ #define IWL_DEVICE_8265 \ IWL_DEVICE_8000_COMMON, \ .ucode_api_max = IWL8265_UCODE_API_MAX, \ .ucode_api_min = IWL8265_UCODE_API_MIN \ const struct iwl_cfg iwl8260_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 8260", .fw_name_pre = IWL8000_FW_PRE, IWL_DEVICE_8260, .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, }; const struct iwl_cfg iwl8260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 8260", .fw_name_pre = IWL8000_FW_PRE, IWL_DEVICE_8260, .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, }; const struct iwl_cfg iwl8265_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 8265", .fw_name_pre = IWL8265_FW_PRE, IWL_DEVICE_8265, .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, .vht_mu_mimo_supported = true, }; const struct iwl_cfg iwl8275_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 8275", .fw_name_pre = IWL8265_FW_PRE, IWL_DEVICE_8265, .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, .vht_mu_mimo_supported = true, }; const struct iwl_cfg iwl4165_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 4165", .fw_name_pre = IWL8000_FW_PRE, IWL_DEVICE_8000, .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, }; MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_MAX)); diff --git a/sys/contrib/dev/iwlwifi/cfg/9000.c b/sys/contrib/dev/iwlwifi/cfg/9000.c index 7a7ca06d46c1..0130d9a9b78b 100644 --- a/sys/contrib/dev/iwlwifi/cfg/9000.c +++ b/sys/contrib/dev/iwlwifi/cfg/9000.c @@ -1,192 +1,192 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2021, 2023 Intel Corporation */ #include #include #include "iwl-config.h" #include "fw/file.h" #include "iwl-prph.h" /* Highest firmware API version supported */ #define IWL9000_UCODE_API_MAX 46 /* Lowest firmware API version supported */ #define IWL9000_UCODE_API_MIN 30 /* NVM versions */ #define IWL9000_NVM_VERSION 0x0a1d /* Memory offsets and lengths */ #define IWL9000_DCCM_OFFSET 0x800000 #define IWL9000_DCCM_LEN 0x18000 #define IWL9000_DCCM2_OFFSET 0x880000 #define IWL9000_DCCM2_LEN 0x8000 #define IWL9000_SMEM_OFFSET 0x400000 #define IWL9000_SMEM_LEN 0x68000 -#define IWL9000_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-" -#define IWL9260_FW_PRE "iwlwifi-9260-th-b0-jf-b0-" +#define IWL9000_FW_PRE "iwlwifi-9000-pu-b0-jf-b0" +#define IWL9260_FW_PRE "iwlwifi-9260-th-b0-jf-b0" #define IWL9000_MODULE_FIRMWARE(api) \ - IWL9000_FW_PRE __stringify(api) ".ucode" + IWL9000_FW_PRE "-" __stringify(api) ".ucode" #define IWL9260_MODULE_FIRMWARE(api) \ - IWL9260_FW_PRE __stringify(api) ".ucode" + IWL9260_FW_PRE "-" __stringify(api) ".ucode" static const struct iwl_base_params iwl9000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, .num_of_queues = 31, .max_tfd_queue_size = 256, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, .shadow_reg_enable = true, .pcie_l1_allowed = true, }; static const struct iwl_ht_params iwl9000_ht_params = { .stbc = true, .ldpc = true, .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; static const struct iwl_tt_params iwl9000_tt_params = { .ct_kill_entry = 115, .ct_kill_exit = 93, .ct_kill_duration = 5, .dynamic_smps_entry = 111, .dynamic_smps_exit = 107, .tx_protection_entry = 112, .tx_protection_exit = 105, .tx_backoff = { {.temperature = 110, .backoff = 200}, {.temperature = 111, .backoff = 600}, {.temperature = 112, .backoff = 1200}, {.temperature = 113, .backoff = 2000}, {.temperature = 114, .backoff = 4000}, }, .support_ct_kill = true, .support_dynamic_smps = true, .support_tx_protection = true, .support_tx_backoff = true, }; #define IWL_DEVICE_9000 \ .ucode_api_max = IWL9000_UCODE_API_MAX, \ .ucode_api_min = IWL9000_UCODE_API_MIN, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = 10, \ .non_shared_ant = ANT_B, \ .dccm_offset = IWL9000_DCCM_OFFSET, \ .dccm_len = IWL9000_DCCM_LEN, \ .dccm2_offset = IWL9000_DCCM2_OFFSET, \ .dccm2_len = IWL9000_DCCM2_LEN, \ .smem_offset = IWL9000_SMEM_OFFSET, \ .smem_len = IWL9000_SMEM_LEN, \ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ .thermal_params = &iwl9000_tt_params, \ .apmg_not_supported = true, \ .num_rbds = 512, \ .vht_mu_mimo_supported = true, \ .mac_addr_from_csr = 0x380, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ .min_umac_error_event_table = 0x800000, \ .d3_debug_data_base_addr = 0x401000, \ .d3_debug_data_length = 92 * 1024, \ .ht_params = &iwl9000_ht_params, \ .nvm_ver = IWL9000_NVM_VERSION, \ .mon_smem_regs = { \ .write_ptr = { \ .addr = LDBG_M2S_BUF_WPTR, \ .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ }, \ .cycle_cnt = { \ .addr = LDBG_M2S_BUF_WRAP_CNT, \ .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ }, \ }, \ .mon_dram_regs = { \ .write_ptr = { \ .addr = MON_BUFF_WRPTR_VER2, \ .mask = 0xffffffff, \ }, \ .cycle_cnt = { \ .addr = MON_BUFF_CYCLE_CNT_VER2, \ .mask = 0xffffffff, \ }, \ } const struct iwl_cfg_trans_params iwl9000_trans_cfg = { .device_family = IWL_DEVICE_FAMILY_9000, .base_params = &iwl9000_base_params, .mq_rx_supported = true, .rf_id = true, }; const struct iwl_cfg_trans_params iwl9560_trans_cfg = { .device_family = IWL_DEVICE_FAMILY_9000, .base_params = &iwl9000_base_params, .mq_rx_supported = true, .rf_id = true, .integrated = true, .xtal_latency = 650, }; const struct iwl_cfg_trans_params iwl9560_long_latency_trans_cfg = { .device_family = IWL_DEVICE_FAMILY_9000, .base_params = &iwl9000_base_params, .mq_rx_supported = true, .rf_id = true, .integrated = true, .xtal_latency = 2820, }; const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg = { .device_family = IWL_DEVICE_FAMILY_9000, .base_params = &iwl9000_base_params, .mq_rx_supported = true, .rf_id = true, .integrated = true, .xtal_latency = 670, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK }; const char iwl9162_name[] = "Intel(R) Wireless-AC 9162"; const char iwl9260_name[] = "Intel(R) Wireless-AC 9260"; const char iwl9260_1_name[] = "Intel(R) Wireless-AC 9260-1"; const char iwl9270_name[] = "Intel(R) Wireless-AC 9270"; const char iwl9461_name[] = "Intel(R) Wireless-AC 9461"; const char iwl9462_name[] = "Intel(R) Wireless-AC 9462"; const char iwl9560_name[] = "Intel(R) Wireless-AC 9560"; const char iwl9162_160_name[] = "Intel(R) Wireless-AC 9162 160MHz"; const char iwl9260_160_name[] = "Intel(R) Wireless-AC 9260 160MHz"; const char iwl9270_160_name[] = "Intel(R) Wireless-AC 9270 160MHz"; const char iwl9461_160_name[] = "Intel(R) Wireless-AC 9461 160MHz"; const char iwl9462_160_name[] = "Intel(R) Wireless-AC 9462 160MHz"; const char iwl9560_160_name[] = "Intel(R) Wireless-AC 9560 160MHz"; const char iwl9260_killer_1550_name[] = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW) 160MHz"; const char iwl9560_killer_1550i_name[] = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)"; const char iwl9560_killer_1550i_160_name[] = "Killer(R) Wireless-AC 1550i Wireless Network Adapter (9560NGW) 160MHz"; const char iwl9560_killer_1550s_name[] = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)"; const char iwl9560_killer_1550s_160_name[] = "Killer(R) Wireless-AC 1550s Wireless Network Adapter (9560D2W) 160MHz"; const struct iwl_cfg iwl9260_2ac_cfg = { .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9560_2ac_cfg_soc = { .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, }; MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); diff --git a/sys/contrib/dev/iwlwifi/cfg/ax210.c b/sys/contrib/dev/iwlwifi/cfg/ax210.c new file mode 100644 index 000000000000..8d5f9dce71d5 --- /dev/null +++ b/sys/contrib/dev/iwlwifi/cfg/ax210.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2023 Intel Corporation + */ +#include +#include +#include "iwl-config.h" +#include "iwl-prph.h" +#include "fw/api/txq.h" + +/* Highest firmware API version supported */ +#define IWL_AX210_UCODE_API_MAX 83 + +/* Lowest firmware API version supported */ +#define IWL_AX210_UCODE_API_MIN 59 + +/* NVM versions */ +#define IWL_AX210_NVM_VERSION 0x0a1d + +/* Memory offsets and lengths */ +#define IWL_AX210_DCCM_OFFSET 0x800000 /* LMAC1 */ +#define IWL_AX210_DCCM_LEN 0x10000 /* LMAC1 */ +#define IWL_AX210_DCCM2_OFFSET 0x880000 +#define IWL_AX210_DCCM2_LEN 0x8000 +#define IWL_AX210_SMEM_OFFSET 0x400000 +#define IWL_AX210_SMEM_LEN 0xD0000 + +#define IWL_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0" +#define IWL_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0" +#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0" +#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0" +#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0" +#define IWL_SO_A_MR_A_FW_PRE "iwlwifi-so-a0-mr-a0" +#define IWL_MA_A_HR_B_FW_PRE "iwlwifi-ma-a0-hr-b0" +#define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0" +#define IWL_MA_A_GF4_A_FW_PRE "iwlwifi-ma-a0-gf4-a0" +#define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0" +#define IWL_MA_B_HR_B_FW_PRE "iwlwifi-ma-b0-hr-b0" +#define IWL_MA_B_GF_A_FW_PRE "iwlwifi-ma-b0-gf-a0" +#define IWL_MA_B_GF4_A_FW_PRE "iwlwifi-ma-b0-gf4-a0" +#define IWL_MA_B_MR_A_FW_PRE "iwlwifi-ma-b0-mr-a0" + +#define IWL_SO_A_JF_B_MODULE_FIRMWARE(api) \ + IWL_SO_A_JF_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_SO_A_HR_B_MODULE_FIRMWARE(api) \ + IWL_SO_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_SO_A_GF_A_MODULE_FIRMWARE(api) \ + IWL_SO_A_GF_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_TY_A_GF_A_MODULE_FIRMWARE(api) \ + IWL_TY_A_GF_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(api) \ + IWL_MA_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api) \ + IWL_MA_A_GF_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(api) \ + IWL_MA_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \ + IWL_MA_A_MR_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(api) \ + IWL_MA_B_HR_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_MA_B_GF_A_FW_MODULE_FIRMWARE(api) \ + IWL_MA_B_GF_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_MA_B_GF4_A_FW_MODULE_FIRMWARE(api) \ + IWL_MA_B_GF4_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_MA_B_MR_A_FW_MODULE_FIRMWARE(api) \ + IWL_MA_B_MR_A_FW_PRE "-" __stringify(api) ".ucode" + +static const struct iwl_base_params iwl_ax210_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, + .num_of_queues = 512, + .max_tfd_queue_size = 65536, + .shadow_ram_support = true, + .led_compensation = 57, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = true, + .pcie_l1_allowed = true, +}; + +#define IWL_DEVICE_AX210_COMMON \ + .ucode_api_min = IWL_AX210_UCODE_API_MIN, \ + .led_mode = IWL_LED_RF_STATE, \ + .nvm_hw_section_num = 10, \ + .non_shared_ant = ANT_B, \ + .dccm_offset = IWL_AX210_DCCM_OFFSET, \ + .dccm_len = IWL_AX210_DCCM_LEN, \ + .dccm2_offset = IWL_AX210_DCCM2_OFFSET, \ + .dccm2_len = IWL_AX210_DCCM2_LEN, \ + .smem_offset = IWL_AX210_SMEM_OFFSET, \ + .smem_len = IWL_AX210_SMEM_LEN, \ + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ + .apmg_not_supported = true, \ + .trans.mq_rx_supported = true, \ + .vht_mu_mimo_supported = true, \ + .mac_addr_from_csr = 0x380, \ + .ht_params = &iwl_22000_ht_params, \ + .nvm_ver = IWL_AX210_NVM_VERSION, \ + .trans.rf_id = true, \ + .trans.gen2 = true, \ + .nvm_type = IWL_NVM_EXT, \ + .dbgc_supported = true, \ + .min_umac_error_event_table = 0x400000, \ + .d3_debug_data_base_addr = 0x401000, \ + .d3_debug_data_length = 60 * 1024, \ + .mon_smem_regs = { \ + .write_ptr = { \ + .addr = LDBG_M2S_BUF_WPTR, \ + .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = LDBG_M2S_BUF_WRAP_CNT, \ + .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ + }, \ + } + +#define IWL_DEVICE_AX210 \ + IWL_DEVICE_AX210_COMMON, \ + .ucode_api_max = IWL_AX210_UCODE_API_MAX, \ + .trans.umac_prph_offset = 0x300000, \ + .trans.device_family = IWL_DEVICE_FAMILY_AX210, \ + .trans.base_params = &iwl_ax210_base_params, \ + .min_txq_size = 128, \ + .gp2_reg_addr = 0xd02c68, \ + .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE, \ + .mon_dram_regs = { \ + .write_ptr = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = DBGC_DBGBUF_WRAP_AROUND, \ + .mask = 0xffffffff, \ + }, \ + .cur_frag = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ + }, \ + } + +const struct iwl_cfg_trans_params iwl_so_trans_cfg = { + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .device_family = IWL_DEVICE_FAMILY_AX210, + .base_params = &iwl_ax210_base_params, + .umac_prph_offset = 0x300000, + .integrated = true, + /* TODO: the following values need to be checked */ + .xtal_latency = 500, + .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US, +}; + +const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = { + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .device_family = IWL_DEVICE_FAMILY_AX210, + .base_params = &iwl_ax210_base_params, + .umac_prph_offset = 0x300000, + .integrated = true, + .low_latency_xtal = true, + .xtal_latency = 12000, + .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, +}; + +const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = { + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .device_family = IWL_DEVICE_FAMILY_AX210, + .base_params = &iwl_ax210_base_params, + .umac_prph_offset = 0x300000, + .integrated = true, + .low_latency_xtal = true, + .xtal_latency = 12000, + .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, + .imr_enabled = true, +}; + +/* + * If the device doesn't support HE, no need to have that many buffers. + * AX210 devices can split multiple frames into a single RB, so fewer are + * needed; AX210 cannot (but use smaller RBs by default) - these sizes + * were picked according to 8 MSDUs inside 256 A-MSDUs in an A-MPDU, with + * additional overhead to account for processing time. + */ +#define IWL_NUM_RBDS_NON_HE 512 +#define IWL_NUM_RBDS_AX210_HE 4096 + +const struct iwl_cfg_trans_params iwl_ma_trans_cfg = { + .device_family = IWL_DEVICE_FAMILY_AX210, + .base_params = &iwl_ax210_base_params, + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .integrated = true, + .umac_prph_offset = 0x300000 +}; + +const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz"; +const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz"; +const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz"; +const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz"; + +const char iwl_ax210_killer_1675w_name[] = + "Killer(R) Wi-Fi 6E AX1675w 160MHz Wireless Network Adapter (210D2W)"; +const char iwl_ax210_killer_1675x_name[] = + "Killer(R) Wi-Fi 6E AX1675x 160MHz Wireless Network Adapter (210NGW)"; +const char iwl_ax211_killer_1675s_name[] = + "Killer(R) Wi-Fi 6E AX1675s 160MHz Wireless Network Adapter (211NGW)"; +const char iwl_ax211_killer_1675i_name[] = + "Killer(R) Wi-Fi 6E AX1675i 160MHz Wireless Network Adapter (211NGW)"; +const char iwl_ax411_killer_1690s_name[] = + "Killer(R) Wi-Fi 6E AX1690s 160MHz Wireless Network Adapter (411D2W)"; +const char iwl_ax411_killer_1690i_name[] = + "Killer(R) Wi-Fi 6E AX1690i 160MHz Wireless Network Adapter (411NGW)"; + +const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0 = { + .name = "Intel(R) Wireless-AC 9560 160MHz", + .fw_name_pre = IWL_SO_A_JF_B_FW_PRE, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_NON_HE, +}; + +const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = { + .name = iwl_ax211_name, + .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = { + .name = iwl_ax211_name, + .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, + .trans.xtal_latency = 12000, + .trans.low_latency_xtal = true, +}; + +const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = { + .name = "Intel(R) Wi-Fi 6 AX210 160MHz", + .fw_name_pre = IWL_TY_A_GF_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = { + .name = iwl_ax411_name, + .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = { + .name = iwl_ax411_name, + .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, + .trans.xtal_latency = 12000, + .trans.low_latency_xtal = true, +}; + +const struct iwl_cfg iwl_cfg_so_a0_ms_a0 = { + .fw_name_pre = IWL_SO_A_MR_A_FW_PRE, + .uhb_supported = false, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_ma = { + .fw_name_mac = "ma", + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = { + .fw_name_pre = IWL_SO_A_HR_B_FW_PRE, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +MODULE_FIRMWARE(IWL_SO_A_JF_B_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SO_A_HR_B_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SO_A_GF_A_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_B_GF_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_B_GF4_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_B_MR_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); diff --git a/sys/contrib/dev/iwlwifi/cfg/bz.c b/sys/contrib/dev/iwlwifi/cfg/bz.c new file mode 100644 index 000000000000..b9893b22e41d --- /dev/null +++ b/sys/contrib/dev/iwlwifi/cfg/bz.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2023 Intel Corporation + */ +#include +#include +#include "iwl-config.h" +#include "iwl-prph.h" +#include "fw/api/txq.h" + +/* Highest firmware API version supported */ +#define IWL_BZ_UCODE_API_MAX 83 + +/* Lowest firmware API version supported */ +#define IWL_BZ_UCODE_API_MIN 80 + +/* NVM versions */ +#define IWL_BZ_NVM_VERSION 0x0a1d + +/* Memory offsets and lengths */ +#define IWL_BZ_DCCM_OFFSET 0x800000 /* LMAC1 */ +#define IWL_BZ_DCCM_LEN 0x10000 /* LMAC1 */ +#define IWL_BZ_DCCM2_OFFSET 0x880000 +#define IWL_BZ_DCCM2_LEN 0x8000 +#define IWL_BZ_SMEM_OFFSET 0x400000 +#define IWL_BZ_SMEM_LEN 0xD0000 + +#define IWL_BZ_A_HR_B_FW_PRE "iwlwifi-bz-a0-hr-b0" +#define IWL_BZ_A_GF_A_FW_PRE "iwlwifi-bz-a0-gf-a0" +#define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0" +#define IWL_BZ_A_FM_B_FW_PRE "iwlwifi-bz-a0-fm-b0" +#define IWL_BZ_A_FM_C_FW_PRE "iwlwifi-bz-a0-fm-c0" +#define IWL_BZ_A_FM4_B_FW_PRE "iwlwifi-bz-a0-fm4-b0" +#define IWL_GL_B_FM_B_FW_PRE "iwlwifi-gl-b0-fm-b0" +#define IWL_GL_C_FM_C_FW_PRE "iwlwifi-gl-c0-fm-c0" + +#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \ + IWL_BZ_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_BZ_A_GF_A_MODULE_FIRMWARE(api) \ + IWL_BZ_A_GF_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_BZ_A_GF4_A_MODULE_FIRMWARE(api) \ + IWL_BZ_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_BZ_A_FM_B_MODULE_FIRMWARE(api) \ + IWL_BZ_A_FM_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_BZ_A_FM_C_MODULE_FIRMWARE(api) \ + IWL_BZ_A_FM_C_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_BZ_A_FM4_B_MODULE_FIRMWARE(api) \ + IWL_BZ_A_FM4_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_GL_B_FM_B_MODULE_FIRMWARE(api) \ + IWL_GL_B_FM_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_GL_C_FM_C_MODULE_FIRMWARE(api) \ + IWL_GL_C_FM_C_FW_PRE "-" __stringify(api) ".ucode" + +static const struct iwl_base_params iwl_bz_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, + .num_of_queues = 512, + .max_tfd_queue_size = 65536, + .shadow_ram_support = true, + .led_compensation = 57, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = true, + .pcie_l1_allowed = true, +}; + +#define IWL_DEVICE_BZ_COMMON \ + .ucode_api_max = IWL_BZ_UCODE_API_MAX, \ + .ucode_api_min = IWL_BZ_UCODE_API_MIN, \ + .led_mode = IWL_LED_RF_STATE, \ + .nvm_hw_section_num = 10, \ + .non_shared_ant = ANT_B, \ + .dccm_offset = IWL_BZ_DCCM_OFFSET, \ + .dccm_len = IWL_BZ_DCCM_LEN, \ + .dccm2_offset = IWL_BZ_DCCM2_OFFSET, \ + .dccm2_len = IWL_BZ_DCCM2_LEN, \ + .smem_offset = IWL_BZ_SMEM_OFFSET, \ + .smem_len = IWL_BZ_SMEM_LEN, \ + .apmg_not_supported = true, \ + .trans.mq_rx_supported = true, \ + .vht_mu_mimo_supported = true, \ + .mac_addr_from_csr = 0x30, \ + .nvm_ver = IWL_BZ_NVM_VERSION, \ + .trans.rf_id = true, \ + .trans.gen2 = true, \ + .nvm_type = IWL_NVM_EXT, \ + .dbgc_supported = true, \ + .min_umac_error_event_table = 0xD0000, \ + .d3_debug_data_base_addr = 0x401000, \ + .d3_debug_data_length = 60 * 1024, \ + .mon_smem_regs = { \ + .write_ptr = { \ + .addr = LDBG_M2S_BUF_WPTR, \ + .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = LDBG_M2S_BUF_WRAP_CNT, \ + .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ + }, \ + }, \ + .trans.umac_prph_offset = 0x300000, \ + .trans.device_family = IWL_DEVICE_FAMILY_BZ, \ + .trans.base_params = &iwl_bz_base_params, \ + .min_txq_size = 128, \ + .gp2_reg_addr = 0xd02c68, \ + .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \ + .mon_dram_regs = { \ + .write_ptr = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = DBGC_DBGBUF_WRAP_AROUND, \ + .mask = 0xffffffff, \ + }, \ + .cur_frag = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ + }, \ + }, \ + .mon_dbgi_regs = { \ + .write_ptr = { \ + .addr = DBGI_SRAM_FIFO_POINTERS, \ + .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \ + }, \ + } + +#define IWL_DEVICE_BZ \ + IWL_DEVICE_BZ_COMMON, \ + .ht_params = &iwl_22000_ht_params + +#define IWL_DEVICE_GL_A \ + IWL_DEVICE_BZ_COMMON, \ + .ht_params = &iwl_gl_a_ht_params + +/* + * If the device doesn't support HE, no need to have that many buffers. + * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an + * A-MPDU, with additional overhead to account for processing time. + */ +#define IWL_NUM_RBDS_NON_HE 512 +#define IWL_NUM_RBDS_BZ_HE 4096 + +const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { + .device_family = IWL_DEVICE_FAMILY_BZ, + .base_params = &iwl_bz_base_params, + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .integrated = true, + .umac_prph_offset = 0x300000, + .xtal_latency = 12000, + .low_latency_xtal = true, + .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, +}; + +const char iwl_bz_name[] = "Intel(R) TBD Bz device"; + +const struct iwl_cfg iwl_cfg_bz = { + .fw_name_mac = "bz", + .uhb_supported = true, + IWL_DEVICE_BZ, + .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, + .num_rbds = IWL_NUM_RBDS_BZ_HE, +}; + +const struct iwl_cfg iwl_cfg_gl = { + .fw_name_mac = "gl", + .uhb_supported = true, + IWL_DEVICE_BZ, + .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, + .num_rbds = IWL_NUM_RBDS_BZ_HE, +}; + + +MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_FM_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_FM4_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_GL_B_FM_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_GL_C_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); diff --git a/sys/contrib/dev/iwlwifi/cfg/sc.c b/sys/contrib/dev/iwlwifi/cfg/sc.c new file mode 100644 index 000000000000..ad283fd22e2a --- /dev/null +++ b/sys/contrib/dev/iwlwifi/cfg/sc.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2023 Intel Corporation + */ +#include +#include +#include "iwl-config.h" +#include "iwl-prph.h" +#include "fw/api/txq.h" + +/* Highest firmware API version supported */ +#define IWL_SC_UCODE_API_MAX 83 + +/* Lowest firmware API version supported */ +#define IWL_SC_UCODE_API_MIN 82 + +/* NVM versions */ +#define IWL_SC_NVM_VERSION 0x0a1d + +/* Memory offsets and lengths */ +#define IWL_SC_DCCM_OFFSET 0x800000 /* LMAC1 */ +#define IWL_SC_DCCM_LEN 0x10000 /* LMAC1 */ +#define IWL_SC_DCCM2_OFFSET 0x880000 +#define IWL_SC_DCCM2_LEN 0x8000 +#define IWL_SC_SMEM_OFFSET 0x400000 +#define IWL_SC_SMEM_LEN 0xD0000 + +#define IWL_SC_A_FM_B_FW_PRE "iwlwifi-sc-a0-fm-b0" +#define IWL_SC_A_FM_C_FW_PRE "iwlwifi-sc-a0-fm-c0" +#define IWL_SC_A_HR_A_FW_PRE "iwlwifi-sc-a0-hr-b0" +#define IWL_SC_A_HR_B_FW_PRE "iwlwifi-sc-a0-hr-b0" +#define IWL_SC_A_GF_A_FW_PRE "iwlwifi-sc-a0-gf-a0" +#define IWL_SC_A_GF4_A_FW_PRE "iwlwifi-sc-a0-gf4-a0" +#define IWL_SC_A_WH_A_FW_PRE "iwlwifi-sc-a0-wh-a0" + +#define IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(api) \ + IWL_SC_A_FM_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_SC_A_FM_C_FW_MODULE_FIRMWARE(api) \ + IWL_SC_A_FM_C_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_SC_A_HR_A_FW_MODULE_FIRMWARE(api) \ + IWL_SC_A_HR_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(api) \ + IWL_SC_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_SC_A_GF_A_FW_MODULE_FIRMWARE(api) \ + IWL_SC_A_GF_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_SC_A_GF4_A_FW_MODULE_FIRMWARE(api) \ + IWL_SC_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(api) \ + IWL_SC_A_WH_A_FW_PRE "-" __stringify(api) ".ucode" + +static const struct iwl_base_params iwl_sc_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, + .num_of_queues = 512, + .max_tfd_queue_size = 65536, + .shadow_ram_support = true, + .led_compensation = 57, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = true, + .pcie_l1_allowed = true, +}; + +#define IWL_DEVICE_BZ_COMMON \ + .ucode_api_max = IWL_SC_UCODE_API_MAX, \ + .ucode_api_min = IWL_SC_UCODE_API_MIN, \ + .led_mode = IWL_LED_RF_STATE, \ + .nvm_hw_section_num = 10, \ + .non_shared_ant = ANT_B, \ + .dccm_offset = IWL_SC_DCCM_OFFSET, \ + .dccm_len = IWL_SC_DCCM_LEN, \ + .dccm2_offset = IWL_SC_DCCM2_OFFSET, \ + .dccm2_len = IWL_SC_DCCM2_LEN, \ + .smem_offset = IWL_SC_SMEM_OFFSET, \ + .smem_len = IWL_SC_SMEM_LEN, \ + .apmg_not_supported = true, \ + .trans.mq_rx_supported = true, \ + .vht_mu_mimo_supported = true, \ + .mac_addr_from_csr = 0x30, \ + .nvm_ver = IWL_SC_NVM_VERSION, \ + .trans.rf_id = true, \ + .trans.gen2 = true, \ + .nvm_type = IWL_NVM_EXT, \ + .dbgc_supported = true, \ + .min_umac_error_event_table = 0xD0000, \ + .d3_debug_data_base_addr = 0x401000, \ + .d3_debug_data_length = 60 * 1024, \ + .mon_smem_regs = { \ + .write_ptr = { \ + .addr = LDBG_M2S_BUF_WPTR, \ + .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = LDBG_M2S_BUF_WRAP_CNT, \ + .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ + }, \ + }, \ + .trans.umac_prph_offset = 0x300000, \ + .trans.device_family = IWL_DEVICE_FAMILY_SC, \ + .trans.base_params = &iwl_sc_base_params, \ + .min_txq_size = 128, \ + .gp2_reg_addr = 0xd02c68, \ + .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \ + .mon_dram_regs = { \ + .write_ptr = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = DBGC_DBGBUF_WRAP_AROUND, \ + .mask = 0xffffffff, \ + }, \ + .cur_frag = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ + }, \ + }, \ + .mon_dbgi_regs = { \ + .write_ptr = { \ + .addr = DBGI_SRAM_FIFO_POINTERS, \ + .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \ + }, \ + } + +#define IWL_DEVICE_SC \ + IWL_DEVICE_BZ_COMMON, \ + .ht_params = &iwl_22000_ht_params + +/* + * If the device doesn't support HE, no need to have that many buffers. + * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an + * A-MPDU, with additional overhead to account for processing time. + */ +#define IWL_NUM_RBDS_NON_HE 512 +#define IWL_NUM_RBDS_SC_HE 4096 + +const struct iwl_cfg_trans_params iwl_sc_trans_cfg = { + .device_family = IWL_DEVICE_FAMILY_SC, + .base_params = &iwl_sc_base_params, + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .integrated = true, + .umac_prph_offset = 0x300000, + .xtal_latency = 12000, + .low_latency_xtal = true, + .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, +}; + +const char iwl_sc_name[] = "Intel(R) TBD Sc device"; + +const struct iwl_cfg iwl_cfg_sc = { + .fw_name_mac = "sc", + .uhb_supported = true, + IWL_DEVICE_SC, + .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, + .num_rbds = IWL_NUM_RBDS_SC_HE, +}; + +MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SC_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SC_A_HR_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SC_A_GF_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SC_A_GF4_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); diff --git a/sys/contrib/dev/iwlwifi/fw/acpi.c b/sys/contrib/dev/iwlwifi/fw/acpi.c index 33aae639ad37..dfe8357036eb 100644 --- a/sys/contrib/dev/iwlwifi/fw/acpi.c +++ b/sys/contrib/dev/iwlwifi/fw/acpi.c @@ -1,1177 +1,1287 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2019-2022 Intel Corporation + * Copyright (C) 2019-2023 Intel Corporation */ #include #include #include "iwl-drv.h" #include "iwl-debug.h" #include "acpi.h" #include "fw/runtime.h" const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6, 0xA5, 0xB3, 0x1F, 0x73, 0x8E, 0x28, 0x5A, 0xDE); IWL_EXPORT_SYMBOL(iwl_guid); const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29, 0x81, 0x4F, 0x75, 0xE4, 0xDD, 0x26, 0xB5, 0xFD); IWL_EXPORT_SYMBOL(iwl_rfi_guid); static const struct dmi_system_id dmi_ppag_approved_list[] = { { .ident = "HP", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), }, }, { .ident = "SAMSUNG", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), }, }, { .ident = "MSFT", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), }, }, { .ident = "ASUS", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."), + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + }, + }, + { .ident = "GOOGLE-HP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), + }, + }, + { .ident = "GOOGLE-ASUS", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."), + }, + }, + { .ident = "GOOGLE-SAMSUNG", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), + }, + }, + { .ident = "DELL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + }, + }, + { .ident = "DELL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), }, }, {} }; static int iwl_acpi_get_handle(struct device *dev, acpi_string method, acpi_handle *ret_handle) { acpi_handle root_handle; acpi_status status; root_handle = ACPI_HANDLE(dev); if (!root_handle) { IWL_DEBUG_DEV_RADIO(dev, "ACPI: Could not retrieve root port handle\n"); return -ENOENT; } status = acpi_get_handle(root_handle, method, ret_handle); if (ACPI_FAILURE(status)) { IWL_DEBUG_DEV_RADIO(dev, "ACPI: %s method not found\n", method); return -ENOENT; } return 0; } -void *iwl_acpi_get_object(struct device *dev, acpi_string method) +static void *iwl_acpi_get_object(struct device *dev, acpi_string method) { struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; acpi_handle handle; acpi_status status; int ret; ret = iwl_acpi_get_handle(dev, method, &handle); if (ret) return ERR_PTR(-ENOENT); /* Call the method with no arguments */ status = acpi_evaluate_object(handle, NULL, NULL, &buf); if (ACPI_FAILURE(status)) { IWL_DEBUG_DEV_RADIO(dev, "ACPI: %s method invocation failed (status: 0x%x)\n", method, status); return ERR_PTR(-ENOENT); } return buf.pointer; } -IWL_EXPORT_SYMBOL(iwl_acpi_get_object); /* * Generic function for evaluating a method defined in the device specific * method (DSM) interface. The returned acpi object must be freed by calling * function. */ static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func, union acpi_object *args, const guid_t *guid) { union acpi_object *obj; obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), guid, rev, func, args); if (!obj) { IWL_DEBUG_DEV_RADIO(dev, "ACPI: DSM method invocation failed (rev: %d, func:%d)\n", rev, func); return ERR_PTR(-ENOENT); } return obj; } /* * Generic function to evaluate a DSM with no arguments * and an integer return value, * (as an integer object or inside a buffer object), * verify and assign the value in the "value" parameter. * return 0 in success and the appropriate errno otherwise. */ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func, const guid_t *guid, u64 *value, size_t expected_size) { union acpi_object *obj; int ret = 0; obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL, guid); if (IS_ERR(obj)) { IWL_DEBUG_DEV_RADIO(dev, "Failed to get DSM object. func= %d\n", func); return -ENOENT; } if (obj->type == ACPI_TYPE_INTEGER) { *value = obj->integer.value; } else if (obj->type == ACPI_TYPE_BUFFER) { __le64 le_value = 0; if (WARN_ON_ONCE(expected_size > sizeof(le_value))) return -EINVAL; /* if the buffer size doesn't match the expected size */ if (obj->buffer.length != expected_size) IWL_DEBUG_DEV_RADIO(dev, "ACPI: DSM invalid buffer size, padding or truncating (%d)\n", obj->buffer.length); /* assuming LE from Intel BIOS spec */ memcpy(&le_value, obj->buffer.pointer, min_t(size_t, expected_size, (size_t)obj->buffer.length)); *value = le64_to_cpu(le_value); } else { IWL_DEBUG_DEV_RADIO(dev, "ACPI: DSM method did not return a valid object, type=%d\n", obj->type); ret = -EINVAL; goto out; } IWL_DEBUG_DEV_RADIO(dev, "ACPI: DSM method evaluated: func=%d, ret=%d\n", func, ret); out: ACPI_FREE(obj); return ret; } /* * Evaluate a DSM with no arguments and a u8 return value, */ int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, const guid_t *guid, u8 *value) { int ret; u64 val; ret = iwl_acpi_get_dsm_integer(dev, rev, func, guid, &val, sizeof(u8)); if (ret < 0) return ret; /* cast val (u64) to be u8 */ *value = (u8)val; return 0; } IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8); /* * Evaluate a DSM with no arguments and a u32 return value, */ int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func, const guid_t *guid, u32 *value) { int ret; u64 val; ret = iwl_acpi_get_dsm_integer(dev, rev, func, guid, &val, sizeof(u32)); if (ret < 0) return ret; /* cast val (u64) to be u32 */ *value = (u32)val; return 0; } IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32); -union acpi_object *iwl_acpi_get_wifi_pkg_range(struct device *dev, - union acpi_object *data, - int min_data_size, - int max_data_size, - int *tbl_rev) +static union acpi_object * +iwl_acpi_get_wifi_pkg_range(struct device *dev, + union acpi_object *data, + int min_data_size, + int max_data_size, + int *tbl_rev) { int i; union acpi_object *wifi_pkg; /* * We need at least one entry in the wifi package that * describes the domain, and one more entry, otherwise there's * no point in reading it. */ if (WARN_ON_ONCE(min_data_size < 2 || min_data_size > max_data_size)) return ERR_PTR(-EINVAL); /* * We need at least two packages, one for the revision and one * for the data itself. Also check that the revision is valid * (i.e. it is an integer (each caller has to check by itself * if the returned revision is supported)). */ if (data->type != ACPI_TYPE_PACKAGE || data->package.count < 2 || data->package.elements[0].type != ACPI_TYPE_INTEGER) { IWL_DEBUG_DEV_RADIO(dev, "Invalid packages structure\n"); return ERR_PTR(-EINVAL); } *tbl_rev = data->package.elements[0].integer.value; /* loop through all the packages to find the one for WiFi */ for (i = 1; i < data->package.count; i++) { union acpi_object *domain; wifi_pkg = &data->package.elements[i]; /* skip entries that are not a package with the right size */ if (wifi_pkg->type != ACPI_TYPE_PACKAGE || wifi_pkg->package.count < min_data_size || wifi_pkg->package.count > max_data_size) continue; domain = &wifi_pkg->package.elements[0]; if (domain->type == ACPI_TYPE_INTEGER && domain->integer.value == ACPI_WIFI_DOMAIN) goto found; } return ERR_PTR(-ENOENT); found: return wifi_pkg; } -IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range); + +static union acpi_object * +iwl_acpi_get_wifi_pkg(struct device *dev, + union acpi_object *data, + int data_size, int *tbl_rev) +{ + return iwl_acpi_get_wifi_pkg_range(dev, data, data_size, data_size, + tbl_rev); +} + int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, union iwl_tas_config_cmd *cmd, int fw_ver) { union acpi_object *wifi_pkg, *data; int ret, tbl_rev, i, block_list_size, enabled; data = iwl_acpi_get_object(fwrt->dev, ACPI_WTAS_METHOD); if (IS_ERR(data)) return PTR_ERR(data); /* try to read wtas table revision 1 or revision 0*/ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_WTAS_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg)) { ret = PTR_ERR(wifi_pkg); goto out_free; } if (tbl_rev == 1 && wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) { u32 tas_selection = (u32)wifi_pkg->package.elements[1].integer.value; u16 override_iec = (tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS; u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >> ACPI_WTAS_ENABLE_IEC_POS; u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS; enabled = tas_selection & ACPI_WTAS_ENABLED_MSK; if (fw_ver <= 3) { cmd->v3.override_tas_iec = cpu_to_le16(override_iec); cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec); } else { cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb; cmd->v4.override_tas_iec = (u8)override_iec; cmd->v4.enable_tas_iec = (u8)enabled_iec; } } else if (tbl_rev == 0 && wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) { enabled = !!wifi_pkg->package.elements[1].integer.value; } else { ret = -EINVAL; goto out_free; } if (!enabled) { IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n"); ret = 0; goto out_free; } IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", tbl_rev); if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER || wifi_pkg->package.elements[2].integer.value > APCI_WTAS_BLACK_LIST_MAX) { IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n", wifi_pkg->package.elements[2].integer.value); ret = -EINVAL; goto out_free; } block_list_size = wifi_pkg->package.elements[2].integer.value; cmd->v4.block_list_size = cpu_to_le32(block_list_size); IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size); if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) { IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n", block_list_size); ret = -EINVAL; goto out_free; } for (i = 0; i < block_list_size; i++) { u32 country; if (wifi_pkg->package.elements[3 + i].type != ACPI_TYPE_INTEGER) { IWL_DEBUG_RADIO(fwrt, "TAS invalid array elem %d\n", 3 + i); ret = -EINVAL; goto out_free; } country = wifi_pkg->package.elements[3 + i].integer.value; cmd->v4.block_list_array[i] = cpu_to_le32(country); IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country); } ret = 1; out_free: kfree(data); return ret; } IWL_EXPORT_SYMBOL(iwl_acpi_get_tas); int iwl_acpi_get_mcc(struct device *dev, char *mcc) { union acpi_object *wifi_pkg, *data; u32 mcc_val; int ret, tbl_rev; data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD); if (IS_ERR(data)) return PTR_ERR(data); wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg)) { ret = PTR_ERR(wifi_pkg); goto out_free; } if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || tbl_rev != 0) { ret = -EINVAL; goto out_free; } mcc_val = wifi_pkg->package.elements[1].integer.value; mcc[0] = (mcc_val >> 8) & 0xff; mcc[1] = mcc_val & 0xff; mcc[2] = '\0'; ret = 0; out_free: kfree(data); return ret; } IWL_EXPORT_SYMBOL(iwl_acpi_get_mcc); u64 iwl_acpi_get_pwr_limit(struct device *dev) { union acpi_object *data, *wifi_pkg; u64 dflt_pwr_limit; int tbl_rev; data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD); if (IS_ERR(data)) { dflt_pwr_limit = 0; goto out; } wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_SPLC_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg) || tbl_rev != 0 || wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) { dflt_pwr_limit = 0; goto out_free; } dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value; out_free: kfree(data); out: return dflt_pwr_limit; } IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit); int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk) { union acpi_object *wifi_pkg, *data; int ret, tbl_rev; data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD); if (IS_ERR(data)) return PTR_ERR(data); wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE, &tbl_rev); if (IS_ERR(wifi_pkg)) { ret = PTR_ERR(wifi_pkg); goto out_free; } if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || tbl_rev != 0) { ret = -EINVAL; goto out_free; } *extl_clk = wifi_pkg->package.elements[1].integer.value; ret = 0; out_free: kfree(data); return ret; } IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv); static int iwl_sar_set_profile(union acpi_object *table, struct iwl_sar_profile *profile, bool enabled, u8 num_chains, u8 num_sub_bands) { int i, j, idx = 0; /* * The table from ACPI is flat, but we store it in a * structured array. */ for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV2; i++) { for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS_REV2; j++) { /* if we don't have the values, use the default */ if (i >= num_chains || j >= num_sub_bands) { profile->chains[i].subbands[j] = 0; } else { if (table[idx].type != ACPI_TYPE_INTEGER || table[idx].integer.value > U8_MAX) return -EINVAL; profile->chains[i].subbands[j] = table[idx].integer.value; idx++; } } } /* Only if all values were valid can the profile be enabled */ profile->enabled = enabled; return 0; } static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt, __le16 *per_chain, u32 n_subbands, int prof_a, int prof_b) { int profs[ACPI_SAR_NUM_CHAINS_REV0] = { prof_a, prof_b }; int i, j; for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV0; i++) { struct iwl_sar_profile *prof; /* don't allow SAR to be disabled (profile 0 means disable) */ if (profs[i] == 0) return -EPERM; /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */ if (profs[i] > ACPI_SAR_PROFILE_NUM) return -EINVAL; /* profiles go from 1 to 4, so decrement to access the array */ prof = &fwrt->sar_profiles[profs[i] - 1]; /* if the profile is disabled, do nothing */ if (!prof->enabled) { IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n", profs[i]); /* * if one of the profiles is disabled, we * ignore all of them and return 1 to * differentiate disabled from other failures. */ return 1; } IWL_DEBUG_INFO(fwrt, "SAR EWRD: chain %d profile index %d\n", i, profs[i]); IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i); for (j = 0; j < n_subbands; j++) { per_chain[i * n_subbands + j] = cpu_to_le16(prof->chains[i].subbands[j]); IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n", j, prof->chains[i].subbands[j]); } } return 0; } int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, __le16 *per_chain, u32 n_tables, u32 n_subbands, int prof_a, int prof_b) { int i, ret = 0; for (i = 0; i < n_tables; i++) { ret = iwl_sar_fill_table(fwrt, &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAINS_REV0], n_subbands, prof_a, prof_b); if (ret) break; } return ret; } IWL_EXPORT_SYMBOL(iwl_sar_select_profile); int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *table, *data; int ret, tbl_rev; u32 flags; u8 num_chains, num_sub_bands; data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDS_METHOD); if (IS_ERR(data)) return PTR_ERR(data); /* start by trying to read revision 2 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_WRDS_WIFI_DATA_SIZE_REV2, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 2) { ret = PTR_ERR(wifi_pkg); goto out_free; } num_chains = ACPI_SAR_NUM_CHAINS_REV2; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2; goto read_table; } /* then try revision 1 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_WRDS_WIFI_DATA_SIZE_REV1, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 1) { ret = PTR_ERR(wifi_pkg); goto out_free; } num_chains = ACPI_SAR_NUM_CHAINS_REV1; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1; goto read_table; } /* then finally revision 0 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_WRDS_WIFI_DATA_SIZE_REV0, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 0) { ret = PTR_ERR(wifi_pkg); goto out_free; } num_chains = ACPI_SAR_NUM_CHAINS_REV0; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0; goto read_table; } ret = PTR_ERR(wifi_pkg); goto out_free; read_table: if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { ret = -EINVAL; goto out_free; } IWL_DEBUG_RADIO(fwrt, "Reading WRDS tbl_rev=%d\n", tbl_rev); flags = wifi_pkg->package.elements[1].integer.value; fwrt->reduced_power_flags = flags >> IWL_REDUCE_POWER_FLAGS_POS; /* position of the actual table */ table = &wifi_pkg->package.elements[2]; /* The profile from WRDS is officially profile 1, but goes * into sar_profiles[0] (because we don't have a profile 0). */ ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], flags & IWL_SAR_ENABLE_MSK, num_chains, num_sub_bands); out_free: kfree(data); return ret; } IWL_EXPORT_SYMBOL(iwl_sar_get_wrds_table); int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *data; bool enabled; int i, n_profiles, tbl_rev, pos; int ret = 0; u8 num_chains, num_sub_bands; data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD); if (IS_ERR(data)) return PTR_ERR(data); /* start by trying to read revision 2 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_EWRD_WIFI_DATA_SIZE_REV2, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 2) { ret = PTR_ERR(wifi_pkg); goto out_free; } num_chains = ACPI_SAR_NUM_CHAINS_REV2; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2; goto read_table; } /* then try revision 1 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_EWRD_WIFI_DATA_SIZE_REV1, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 1) { ret = PTR_ERR(wifi_pkg); goto out_free; } num_chains = ACPI_SAR_NUM_CHAINS_REV1; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1; goto read_table; } /* then finally revision 0 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_EWRD_WIFI_DATA_SIZE_REV0, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 0) { ret = PTR_ERR(wifi_pkg); goto out_free; } num_chains = ACPI_SAR_NUM_CHAINS_REV0; num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0; goto read_table; } ret = PTR_ERR(wifi_pkg); goto out_free; read_table: if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) { ret = -EINVAL; goto out_free; } enabled = !!(wifi_pkg->package.elements[1].integer.value); n_profiles = wifi_pkg->package.elements[2].integer.value; /* * Check the validity of n_profiles. The EWRD profiles start * from index 1, so the maximum value allowed here is * ACPI_SAR_PROFILES_NUM - 1. */ if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) { ret = -EINVAL; goto out_free; } /* the tables start at element 3 */ pos = 3; for (i = 0; i < n_profiles; i++) { /* The EWRD profiles officially go from 2 to 4, but we * save them in sar_profiles[1-3] (because we don't * have profile 0). So in the array we start from 1. */ ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos], &fwrt->sar_profiles[i + 1], enabled, num_chains, num_sub_bands); if (ret < 0) break; /* go to the next table */ pos += num_chains * num_sub_bands; } out_free: kfree(data); return ret; } IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table); int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *data; int i, j, k, ret, tbl_rev; u8 num_bands, num_profiles; static const struct { u8 revisions; u8 bands; u8 profiles; u8 min_profiles; } rev_data[] = { { .revisions = BIT(3), .bands = ACPI_GEO_NUM_BANDS_REV2, .profiles = ACPI_NUM_GEO_PROFILES_REV3, .min_profiles = 3, }, { .revisions = BIT(2), .bands = ACPI_GEO_NUM_BANDS_REV2, .profiles = ACPI_NUM_GEO_PROFILES, }, { .revisions = BIT(0) | BIT(1), .bands = ACPI_GEO_NUM_BANDS_REV0, .profiles = ACPI_NUM_GEO_PROFILES, }, }; int idx; /* start from one to skip the domain */ int entry_idx = 1; BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES_REV3 != IWL_NUM_GEO_PROFILES_V3); BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES != IWL_NUM_GEO_PROFILES); data = iwl_acpi_get_object(fwrt->dev, ACPI_WGDS_METHOD); if (IS_ERR(data)) return PTR_ERR(data); /* read the highest revision we understand first */ for (idx = 0; idx < ARRAY_SIZE(rev_data); idx++) { /* min_profiles != 0 requires num_profiles header */ u32 hdr_size = 1 + !!rev_data[idx].min_profiles; u32 profile_size = ACPI_GEO_PER_CHAIN_SIZE * rev_data[idx].bands; u32 max_size = hdr_size + profile_size * rev_data[idx].profiles; u32 min_size; if (!rev_data[idx].min_profiles) min_size = max_size; else min_size = hdr_size + profile_size * rev_data[idx].min_profiles; wifi_pkg = iwl_acpi_get_wifi_pkg_range(fwrt->dev, data, min_size, max_size, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (!(BIT(tbl_rev) & rev_data[idx].revisions)) continue; num_bands = rev_data[idx].bands; num_profiles = rev_data[idx].profiles; if (rev_data[idx].min_profiles) { /* read header that says # of profiles */ union acpi_object *entry; entry = &wifi_pkg->package.elements[entry_idx]; entry_idx++; if (entry->type != ACPI_TYPE_INTEGER || entry->integer.value > num_profiles) { ret = -EINVAL; goto out_free; } num_profiles = entry->integer.value; /* * this also validates >= min_profiles since we * otherwise wouldn't have gotten the data when * looking up in ACPI */ if (wifi_pkg->package.count != hdr_size + profile_size * num_profiles) { ret = -EINVAL; goto out_free; } } goto read_table; } } if (idx < ARRAY_SIZE(rev_data)) ret = PTR_ERR(wifi_pkg); else ret = -ENOENT; goto out_free; read_table: fwrt->geo_rev = tbl_rev; for (i = 0; i < num_profiles; i++) { for (j = 0; j < ACPI_GEO_NUM_BANDS_REV2; j++) { union acpi_object *entry; /* * num_bands is either 2 or 3, if it's only 2 then * fill the third band (6 GHz) with the values from * 5 GHz (second band) */ if (j >= num_bands) { fwrt->geo_profiles[i].bands[j].max = fwrt->geo_profiles[i].bands[1].max; } else { entry = &wifi_pkg->package.elements[entry_idx]; entry_idx++; if (entry->type != ACPI_TYPE_INTEGER || entry->integer.value > U8_MAX) { ret = -EINVAL; goto out_free; } fwrt->geo_profiles[i].bands[j].max = entry->integer.value; } for (k = 0; k < ACPI_GEO_NUM_CHAINS; k++) { /* same here as above */ if (j >= num_bands) { fwrt->geo_profiles[i].bands[j].chains[k] = fwrt->geo_profiles[i].bands[1].chains[k]; } else { entry = &wifi_pkg->package.elements[entry_idx]; entry_idx++; if (entry->type != ACPI_TYPE_INTEGER || entry->integer.value > U8_MAX) { ret = -EINVAL; goto out_free; } fwrt->geo_profiles[i].bands[j].chains[k] = entry->integer.value; } } } } fwrt->geo_num_profiles = num_profiles; fwrt->geo_enabled = true; ret = 0; out_free: kfree(data); return ret; } IWL_EXPORT_SYMBOL(iwl_sar_get_wgds_table); bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) { /* * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on * earlier firmware versions. Unfortunately, we don't have a * TLV API flag to rely on, so rely on the major version which * is in the first byte of ucode_ver. This was implemented * initially on version 38 and then backported to 17. It was * also backported to 29, but only for 7265D devices. The * intention was to have it in 36 as well, but not all 8000 * family got this feature enabled. The 8000 family is the * only one using version 36, so skip this version entirely. */ return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 || (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 && fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) || (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 && ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)); } IWL_EXPORT_SYMBOL(iwl_sar_geo_support); int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, struct iwl_per_chain_offset *table, u32 n_bands, u32 n_profiles) { int i, j; + if (!fwrt->geo_enabled) + return -ENODATA; + if (!iwl_sar_geo_support(fwrt)) return -EOPNOTSUPP; for (i = 0; i < n_profiles; i++) { for (j = 0; j < n_bands; j++) { struct iwl_per_chain_offset *chain = &table[i * n_bands + j]; chain->max_tx_power = cpu_to_le16(fwrt->geo_profiles[i].bands[j].max); chain->chain_a = fwrt->geo_profiles[i].bands[j].chains[0]; chain->chain_b = fwrt->geo_profiles[i].bands[j].chains[1]; IWL_DEBUG_RADIO(fwrt, "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n", i, j, fwrt->geo_profiles[i].bands[j].chains[0], fwrt->geo_profiles[i].bands[j].chains[1], fwrt->geo_profiles[i].bands[j].max); } } return 0; } IWL_EXPORT_SYMBOL(iwl_sar_geo_init); __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) { int ret; u8 value; __le32 config_bitmap = 0; /* ** Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2' */ ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0, DSM_FUNC_ENABLE_INDONESIA_5G2, &iwl_guid, &value); if (!ret && value == DSM_VALUE_INDONESIA_ENABLE) config_bitmap |= cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); /* ** Evaluate func 'DSM_FUNC_DISABLE_SRD' */ ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0, DSM_FUNC_DISABLE_SRD, &iwl_guid, &value); if (!ret) { if (value == DSM_VALUE_SRD_PASSIVE) config_bitmap |= cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK); else if (value == DSM_VALUE_SRD_DISABLE) config_bitmap |= cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK); } return config_bitmap; } IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap); int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *data, *flags; int i, j, ret, tbl_rev, num_sub_bands = 0; int idx = 2; + u8 cmd_ver; fwrt->ppag_flags = 0; + fwrt->ppag_table_valid = false; data = iwl_acpi_get_object(fwrt->dev, ACPI_PPAG_METHOD); if (IS_ERR(data)) return PTR_ERR(data); /* try to read ppag table rev 2 or 1 (both have the same data size) */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev == 1 || tbl_rev == 2) { num_sub_bands = IWL_NUM_SUB_BANDS_V2; IWL_DEBUG_RADIO(fwrt, "Reading PPAG table v2 (tbl_rev=%d)\n", tbl_rev); goto read_table; } else { ret = -EINVAL; goto out_free; } } /* try to read ppag table revision 0 */ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, ACPI_PPAG_WIFI_DATA_SIZE_V1, &tbl_rev); if (!IS_ERR(wifi_pkg)) { if (tbl_rev != 0) { ret = -EINVAL; goto out_free; } num_sub_bands = IWL_NUM_SUB_BANDS_V1; IWL_DEBUG_RADIO(fwrt, "Reading PPAG table v1 (tbl_rev=0)\n"); goto read_table; } read_table: fwrt->ppag_ver = tbl_rev; flags = &wifi_pkg->package.elements[1]; if (flags->type != ACPI_TYPE_INTEGER) { ret = -EINVAL; goto out_free; } fwrt->ppag_flags = flags->integer.value & ACPI_PPAG_MASK; - - if (!fwrt->ppag_flags) { + cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, + WIDE_ID(PHY_OPS_GROUP, + PER_PLATFORM_ANT_GAIN_CMD), + IWL_FW_CMD_VER_UNKNOWN); + if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN) { + ret = -EINVAL; + goto out_free; + } + if (!fwrt->ppag_flags && cmd_ver <= 3) { ret = 0; goto out_free; } /* * read, verify gain values and save them into the PPAG table. * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the * following sub-bands to High-Band (5GHz). */ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { for (j = 0; j < num_sub_bands; j++) { union acpi_object *ent; ent = &wifi_pkg->package.elements[idx++]; if (ent->type != ACPI_TYPE_INTEGER) { ret = -EINVAL; goto out_free; } fwrt->ppag_chains[i].subbands[j] = ent->integer.value; - + /* from ver 4 the fw deals with out of range values */ + if (cmd_ver >= 4) + continue; if ((j == 0 && (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB || fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) || (j != 0 && (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB || fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) { - fwrt->ppag_flags = 0; ret = -EINVAL; goto out_free; } } } - + fwrt->ppag_table_valid = true; ret = 0; out_free: kfree(data); return ret; } IWL_EXPORT_SYMBOL(iwl_acpi_get_ppag_table); int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd, int *cmd_size) { u8 cmd_ver; int i, j, num_sub_bands; s8 *gain; + /* many firmware images for JF lie about this */ + if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) == + CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) + return -EOPNOTSUPP; + if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) { IWL_DEBUG_RADIO(fwrt, "PPAG capability not supported by FW, command not sent.\n"); return -EINVAL; - } - if (!fwrt->ppag_flags) { - IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n"); - return -EINVAL; - } + } + + cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, + WIDE_ID(PHY_OPS_GROUP, + PER_PLATFORM_ANT_GAIN_CMD), + IWL_FW_CMD_VER_UNKNOWN); + if (!fwrt->ppag_table_valid || (cmd_ver <= 3 && !fwrt->ppag_flags)) { + IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n"); + return -EINVAL; + } /* The 'flags' field is the same in v1 and in v2 so we can just * use v1 to access it. */ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags); - cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, - WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD), - IWL_FW_CMD_VER_UNKNOWN); + + IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver); if (cmd_ver == 1) { num_sub_bands = IWL_NUM_SUB_BANDS_V1; gain = cmd->v1.gain[0]; *cmd_size = sizeof(cmd->v1); if (fwrt->ppag_ver == 1 || fwrt->ppag_ver == 2) { + /* in this case FW supports revision 0 */ IWL_DEBUG_RADIO(fwrt, - "PPAG table rev is %d but FW supports v1, sending truncated table\n", + "PPAG table rev is %d, send truncated table\n", fwrt->ppag_ver); - cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); } - } else if (cmd_ver == 2 || cmd_ver == 3) { + } else if (cmd_ver >= 2 && cmd_ver <= 4) { num_sub_bands = IWL_NUM_SUB_BANDS_V2; gain = cmd->v2.gain[0]; *cmd_size = sizeof(cmd->v2); if (fwrt->ppag_ver == 0) { + /* in this case FW supports revisions 1 or 2 */ IWL_DEBUG_RADIO(fwrt, - "PPAG table is v1 but FW supports v2, sending padded table\n"); - } else if (cmd_ver == 2 && fwrt->ppag_ver == 2) { - IWL_DEBUG_RADIO(fwrt, - "PPAG table is v3 but FW supports v2, sending partial bitmap.\n"); - cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); + "PPAG table rev is 0, send padded table\n"); } } else { IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n"); return -EINVAL; } + /* ppag mode */ + IWL_DEBUG_RADIO(fwrt, + "PPAG MODE bits were read from bios: %d\n", + cmd->v1.flags & cpu_to_le32(ACPI_PPAG_MASK)); + if ((cmd_ver == 1 && !fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) || + (cmd_ver == 2 && fwrt->ppag_ver == 2)) { + cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); + IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n"); + } else { + IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n"); + } + + IWL_DEBUG_RADIO(fwrt, + "PPAG MODE bits going to be sent: %d\n", + cmd->v1.flags & cpu_to_le32(ACPI_PPAG_MASK)); + for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { for (j = 0; j < num_sub_bands; j++) { gain[i * num_sub_bands + j] = fwrt->ppag_chains[i].subbands[j]; IWL_DEBUG_RADIO(fwrt, "PPAG table: chain[%d] band[%d]: gain = %d\n", i, j, gain[i * num_sub_bands + j]); } } return 0; } IWL_EXPORT_SYMBOL(iwl_read_ppag_table); bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt) { if (!dmi_check_system(dmi_ppag_approved_list)) { IWL_DEBUG_RADIO(fwrt, "System vendor '%s' is not in the approved list, disabling PPAG.\n", dmi_get_system_info(DMI_SYS_VENDOR)); fwrt->ppag_flags = 0; return false; } return true; } IWL_EXPORT_SYMBOL(iwl_acpi_is_ppag_approved); + +void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt, + struct iwl_phy_specific_cfg *filters) +{ + struct iwl_phy_specific_cfg tmp = {}; + union acpi_object *wifi_pkg, *data; + int tbl_rev, i; + + data = iwl_acpi_get_object(fwrt->dev, ACPI_WPFC_METHOD); + if (IS_ERR(data)) + return; + + /* try to read wtas table revision 1 or revision 0*/ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_WPFC_WIFI_DATA_SIZE, + &tbl_rev); + if (IS_ERR(wifi_pkg)) + goto out_free; + + if (tbl_rev != 0) + goto out_free; + + BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) != ACPI_WPFC_WIFI_DATA_SIZE); + + for (i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) { + if (wifi_pkg->package.elements[i].type != ACPI_TYPE_INTEGER) + return; + tmp.filter_cfg_chains[i] = + cpu_to_le32(wifi_pkg->package.elements[i].integer.value); + } + + IWL_DEBUG_RADIO(fwrt, "Loaded WPFC filter config from ACPI\n"); + *filters = tmp; +out_free: + kfree(data); +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_phy_filters); diff --git a/sys/contrib/dev/iwlwifi/fw/acpi.h b/sys/contrib/dev/iwlwifi/fw/acpi.h index 6f361c59106f..c36c62d6414d 100644 --- a/sys/contrib/dev/iwlwifi/fw/acpi.h +++ b/sys/contrib/dev/iwlwifi/fw/acpi.h @@ -1,349 +1,330 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #ifndef __iwl_fw_acpi__ #define __iwl_fw_acpi__ #include #include "fw/api/commands.h" #include "fw/api/power.h" #include "fw/api/phy.h" #include "fw/api/nvm-reg.h" +#include "fw/api/config.h" #include "fw/img.h" #include "iwl-trans.h" #define ACPI_WRDS_METHOD "WRDS" #define ACPI_EWRD_METHOD "EWRD" #define ACPI_WGDS_METHOD "WGDS" #define ACPI_WRDD_METHOD "WRDD" #define ACPI_SPLC_METHOD "SPLC" #define ACPI_ECKV_METHOD "ECKV" #define ACPI_PPAG_METHOD "PPAG" #define ACPI_WTAS_METHOD "WTAS" +#define ACPI_WPFC_METHOD "WPFC" #define ACPI_WIFI_DOMAIN (0x07) #define ACPI_SAR_PROFILE_NUM 4 #define ACPI_NUM_GEO_PROFILES 3 #define ACPI_NUM_GEO_PROFILES_REV3 8 #define ACPI_GEO_PER_CHAIN_SIZE 3 #define ACPI_SAR_NUM_CHAINS_REV0 2 #define ACPI_SAR_NUM_CHAINS_REV1 2 #define ACPI_SAR_NUM_CHAINS_REV2 4 #define ACPI_SAR_NUM_SUB_BANDS_REV0 5 #define ACPI_SAR_NUM_SUB_BANDS_REV1 11 #define ACPI_SAR_NUM_SUB_BANDS_REV2 11 #define ACPI_WRDS_WIFI_DATA_SIZE_REV0 (ACPI_SAR_NUM_CHAINS_REV0 * \ ACPI_SAR_NUM_SUB_BANDS_REV0 + 2) #define ACPI_WRDS_WIFI_DATA_SIZE_REV1 (ACPI_SAR_NUM_CHAINS_REV1 * \ ACPI_SAR_NUM_SUB_BANDS_REV1 + 2) #define ACPI_WRDS_WIFI_DATA_SIZE_REV2 (ACPI_SAR_NUM_CHAINS_REV2 * \ ACPI_SAR_NUM_SUB_BANDS_REV2 + 2) #define ACPI_EWRD_WIFI_DATA_SIZE_REV0 ((ACPI_SAR_PROFILE_NUM - 1) * \ ACPI_SAR_NUM_CHAINS_REV0 * \ ACPI_SAR_NUM_SUB_BANDS_REV0 + 3) #define ACPI_EWRD_WIFI_DATA_SIZE_REV1 ((ACPI_SAR_PROFILE_NUM - 1) * \ ACPI_SAR_NUM_CHAINS_REV1 * \ ACPI_SAR_NUM_SUB_BANDS_REV1 + 3) #define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \ ACPI_SAR_NUM_CHAINS_REV2 * \ ACPI_SAR_NUM_SUB_BANDS_REV2 + 3) +#define ACPI_WPFC_WIFI_DATA_SIZE 4 /* 4 filter config words */ /* revision 0 and 1 are identical, except for the semantics in the FW */ #define ACPI_GEO_NUM_BANDS_REV0 2 #define ACPI_GEO_NUM_BANDS_REV2 3 #define ACPI_GEO_NUM_CHAINS 2 #define ACPI_WRDD_WIFI_DATA_SIZE 2 #define ACPI_SPLC_WIFI_DATA_SIZE 2 #define ACPI_ECKV_WIFI_DATA_SIZE 2 /* * TAS size: 1 elelment for type, * 1 element for enabled field, * 1 element for block list size, * 16 elements for block list array */ #define APCI_WTAS_BLACK_LIST_MAX 16 #define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX) #define ACPI_WTAS_ENABLED_MSK 0x1 #define ACPI_WTAS_OVERRIDE_IEC_MSK 0x2 #define ACPI_WTAS_ENABLE_IEC_MSK 0x4 #define ACPI_WTAS_OVERRIDE_IEC_POS 0x1 #define ACPI_WTAS_ENABLE_IEC_POS 0x2 #define ACPI_WTAS_USA_UHB_MSK BIT(16) #define ACPI_WTAS_USA_UHB_POS 16 #define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \ IWL_NUM_SUB_BANDS_V1) + 2) #define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \ IWL_NUM_SUB_BANDS_V2) + 2) /* PPAG gain value bounds in 1/8 dBm */ #define ACPI_PPAG_MIN_LB -16 #define ACPI_PPAG_MAX_LB 24 #define ACPI_PPAG_MIN_HB -16 #define ACPI_PPAG_MAX_HB 40 #define ACPI_PPAG_MASK 3 #define IWL_PPAG_ETSI_MASK BIT(0) #define IWL_SAR_ENABLE_MSK BIT(0) #define IWL_REDUCE_POWER_FLAGS_POS 1 /* * The profile for revision 2 is a superset of revision 1, which is in * turn a superset of revision 0. So we can store all revisions * inside revision 2, which is what we represent here. */ struct iwl_sar_profile_chain { u8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2]; }; struct iwl_sar_profile { bool enabled; struct iwl_sar_profile_chain chains[ACPI_SAR_NUM_CHAINS_REV2]; }; /* Same thing as with SAR, all revisions fit in revision 2 */ struct iwl_geo_profile_band { u8 max; u8 chains[ACPI_GEO_NUM_CHAINS]; }; struct iwl_geo_profile { struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2]; }; /* Same thing as with SAR, all revisions fit in revision 2 */ struct iwl_ppag_chain { s8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2]; }; enum iwl_dsm_funcs_rev_0 { DSM_FUNC_QUERY = 0, DSM_FUNC_DISABLE_SRD = 1, DSM_FUNC_ENABLE_INDONESIA_5G2 = 2, DSM_FUNC_ENABLE_6E = 3, DSM_FUNC_11AX_ENABLEMENT = 6, DSM_FUNC_ENABLE_UNII4_CHAN = 7, DSM_FUNC_ACTIVATE_CHANNEL = 8, DSM_FUNC_FORCE_DISABLE_CHANNELS = 9 }; enum iwl_dsm_values_srd { DSM_VALUE_SRD_ACTIVE, DSM_VALUE_SRD_PASSIVE, DSM_VALUE_SRD_DISABLE, DSM_VALUE_SRD_MAX }; enum iwl_dsm_values_indonesia { DSM_VALUE_INDONESIA_DISABLE, DSM_VALUE_INDONESIA_ENABLE, DSM_VALUE_INDONESIA_RESERVED, DSM_VALUE_INDONESIA_MAX }; /* DSM RFI uses a different GUID, so need separate definitions */ #define DSM_RFI_FUNC_ENABLE 3 enum iwl_dsm_values_rfi { DSM_VALUE_RFI_ENABLE, DSM_VALUE_RFI_DISABLE, DSM_VALUE_RFI_MAX }; #ifdef CONFIG_ACPI struct iwl_fw_runtime; extern const guid_t iwl_guid; extern const guid_t iwl_rfi_guid; -void *iwl_acpi_get_object(struct device *dev, acpi_string method); - int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, const guid_t *guid, u8 *value); int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func, const guid_t *guid, u32 *value); -union acpi_object *iwl_acpi_get_wifi_pkg_range(struct device *dev, - union acpi_object *data, - int min_data_size, - int max_data_size, - int *tbl_rev); /** * iwl_acpi_get_mcc - read MCC from ACPI, if available * * @dev: the struct device * @mcc: output buffer (3 bytes) that will get the MCC * * This function tries to read the current MCC from ACPI if available. */ int iwl_acpi_get_mcc(struct device *dev, char *mcc); u64 iwl_acpi_get_pwr_limit(struct device *dev); /* * iwl_acpi_get_eckv - read external clock validation from ACPI, if available * * @dev: the struct device * @extl_clk: output var (2 bytes) that will get the clk indication. * * This function tries to read the external clock indication * from ACPI if available. */ int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk); int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, __le16 *per_chain, u32 n_tables, u32 n_subbands, int prof_a, int prof_b); int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt); int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt); int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt); bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt); int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, struct iwl_per_chain_offset *table, u32 n_bands, u32 n_profiles); int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, union iwl_tas_config_cmd *cmd, int fw_ver); __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt); int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt); int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd, int *cmd_size); bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt); -#else /* CONFIG_ACPI */ +void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt, + struct iwl_phy_specific_cfg *filters); -static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method) -{ - return ERR_PTR(-ENOENT); -} +#else /* CONFIG_ACPI */ static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func, union acpi_object *args) { return ERR_PTR(-ENOENT); } static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, const guid_t *guid, u8 *value) { return -ENOENT; } static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func, const guid_t *guid, u32 *value) { return -ENOENT; } -static inline union acpi_object * -iwl_acpi_get_wifi_pkg_range(struct device *dev, - union acpi_object *data, - int min_data_size, int max_data_size, - int *tbl_rev) -{ - return ERR_PTR(-ENOENT); -} - static inline int iwl_acpi_get_mcc(struct device *dev, char *mcc) { return -ENOENT; } static inline u64 iwl_acpi_get_pwr_limit(struct device *dev) { return 0; } static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk) { return -ENOENT; } static inline int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, __le16 *per_chain, u32 n_tables, u32 n_subbands, int prof_a, int prof_b) { return -ENOENT; } static inline int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) { return -ENOENT; } static inline int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) { return -ENOENT; } static inline int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) { return 1; } static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) { return false; } static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, union iwl_tas_config_cmd *cmd, int fw_ver) { return -ENOENT; } static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) { return 0; } static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt) { return -ENOENT; } static inline int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd, int *cmd_size) { return -ENOENT; } static inline bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt) { return false; } -#endif /* CONFIG_ACPI */ - -static inline union acpi_object * -iwl_acpi_get_wifi_pkg(struct device *dev, - union acpi_object *data, - int data_size, int *tbl_rev) +static inline void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt, + struct iwl_phy_specific_cfg *filters) { - return iwl_acpi_get_wifi_pkg_range(dev, data, data_size, data_size, - tbl_rev); } +#endif /* CONFIG_ACPI */ + #endif /* __iwl_fw_acpi__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/binding.h b/sys/contrib/dev/iwlwifi/fw/api/binding.h index 29e2816e7052..d9044ada6a43 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/binding.h +++ b/sys/contrib/dev/iwlwifi/fw/api/binding.h @@ -1,134 +1,126 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2020 Intel Corporation + * Copyright (C) 2012-2014, 2020, 2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_binding_h__ #define __iwl_fw_api_binding_h__ #include #include #define MAX_MACS_IN_BINDING (3) #define MAX_BINDINGS (4) /** * struct iwl_binding_cmd_v1 - configuring bindings * ( BINDING_CONTEXT_CMD = 0x2b ) * @id_and_color: ID and color of the relevant Binding, * &enum iwl_ctxt_id_and_color - * @action: action to perform, one of FW_CTXT_ACTION_* + * @action: action to perform, see &enum iwl_ctxt_action * @macs: array of MAC id and colors which belong to the binding, * &enum iwl_ctxt_id_and_color * @phy: PHY id and color which belongs to the binding, * &enum iwl_ctxt_id_and_color */ struct iwl_binding_cmd_v1 { /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; /* BINDING_DATA_API_S_VER_1 */ __le32 macs[MAX_MACS_IN_BINDING]; __le32 phy; } __packed; /* BINDING_CMD_API_S_VER_1 */ /** * struct iwl_binding_cmd - configuring bindings * ( BINDING_CONTEXT_CMD = 0x2b ) * @id_and_color: ID and color of the relevant Binding, * &enum iwl_ctxt_id_and_color - * @action: action to perform, one of FW_CTXT_ACTION_* + * @action: action to perform, see &enum iwl_ctxt_action * @macs: array of MAC id and colors which belong to the binding * &enum iwl_ctxt_id_and_color * @phy: PHY id and color which belongs to the binding * &enum iwl_ctxt_id_and_color * @lmac_id: the lmac id the binding belongs to */ struct iwl_binding_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; /* BINDING_DATA_API_S_VER_1 */ __le32 macs[MAX_MACS_IN_BINDING]; __le32 phy; __le32 lmac_id; } __packed; /* BINDING_CMD_API_S_VER_2 */ #define IWL_BINDING_CMD_SIZE_V1 sizeof(struct iwl_binding_cmd_v1) #define IWL_LMAC_24G_INDEX 0 #define IWL_LMAC_5G_INDEX 1 -static inline u32 iwl_mvm_get_lmac_id(const struct iwl_fw *fw, - enum nl80211_band band){ - if (!fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_CDB_SUPPORT) || - band == NL80211_BAND_2GHZ) - return IWL_LMAC_24G_INDEX; - return IWL_LMAC_5G_INDEX; -} - /* The maximal number of fragments in the FW's schedule session */ #define IWL_MVM_MAX_QUOTA 128 /** * struct iwl_time_quota_data_v1 - configuration of time quota per binding * @id_and_color: ID and color of the relevant Binding, * &enum iwl_ctxt_id_and_color * @quota: absolute time quota in TU. The scheduler will try to divide the * remainig quota (after Time Events) according to this quota. * @max_duration: max uninterrupted context duration in TU */ struct iwl_time_quota_data_v1 { __le32 id_and_color; __le32 quota; __le32 max_duration; } __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */ /** * struct iwl_time_quota_cmd - configuration of time quota between bindings * ( TIME_QUOTA_CMD = 0x2c ) * @quotas: allocations per binding * Note: on non-CDB the fourth one is the auxilary mac and is * essentially zero. * On CDB the fourth one is a regular binding. */ struct iwl_time_quota_cmd_v1 { struct iwl_time_quota_data_v1 quotas[MAX_BINDINGS]; } __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */ enum iwl_quota_low_latency { IWL_QUOTA_LOW_LATENCY_NONE = 0, IWL_QUOTA_LOW_LATENCY_TX = BIT(0), IWL_QUOTA_LOW_LATENCY_RX = BIT(1), IWL_QUOTA_LOW_LATENCY_TX_RX = IWL_QUOTA_LOW_LATENCY_TX | IWL_QUOTA_LOW_LATENCY_RX, }; /** * struct iwl_time_quota_data - configuration of time quota per binding * @id_and_color: ID and color of the relevant Binding. * @quota: absolute time quota in TU. The scheduler will try to divide the * remainig quota (after Time Events) according to this quota. * @max_duration: max uninterrupted context duration in TU * @low_latency: low latency status, &enum iwl_quota_low_latency */ struct iwl_time_quota_data { __le32 id_and_color; __le32 quota; __le32 max_duration; __le32 low_latency; } __packed; /* TIME_QUOTA_DATA_API_S_VER_2 */ /** * struct iwl_time_quota_cmd - configuration of time quota between bindings * ( TIME_QUOTA_CMD = 0x2c ) * Note: on non-CDB the fourth one is the auxilary mac and is essentially zero. * On CDB the fourth one is a regular binding. * * @quotas: allocations per binding */ struct iwl_time_quota_cmd { struct iwl_time_quota_data quotas[MAX_BINDINGS]; } __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_2 */ #endif /* __iwl_fw_api_binding_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/commands.h b/sys/contrib/dev/iwlwifi/fw/api/commands.h index c78d2f1c722c..13cb0d53a1a3 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/commands.h +++ b/sys/contrib/dev/iwlwifi/fw/api/commands.h @@ -1,610 +1,625 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #ifndef __iwl_fw_api_commands_h__ #define __iwl_fw_api_commands_h__ /** * enum iwl_mvm_command_groups - command groups for the firmware * @LEGACY_GROUP: legacy group, uses command IDs from &enum iwl_legacy_cmds * @LONG_GROUP: legacy group with long header, also uses command IDs * from &enum iwl_legacy_cmds * @SYSTEM_GROUP: system group, uses command IDs from * &enum iwl_system_subcmd_ids * @MAC_CONF_GROUP: MAC configuration group, uses command IDs from * &enum iwl_mac_conf_subcmd_ids * @PHY_OPS_GROUP: PHY operations group, uses command IDs from * &enum iwl_phy_ops_subcmd_ids * @DATA_PATH_GROUP: data path group, uses command IDs from * &enum iwl_data_path_subcmd_ids + * @SCAN_GROUP: scan group, uses command IDs from + * &enum iwl_scan_subcmd_ids * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids * @LOCATION_GROUP: location group, uses command IDs from * &enum iwl_location_subcmd_ids * @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from * &enum iwl_prot_offload_subcmd_ids * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from * &enum iwl_regulatory_and_nvm_subcmd_ids * @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds */ enum iwl_mvm_command_groups { LEGACY_GROUP = 0x0, LONG_GROUP = 0x1, SYSTEM_GROUP = 0x2, MAC_CONF_GROUP = 0x3, PHY_OPS_GROUP = 0x4, DATA_PATH_GROUP = 0x5, + SCAN_GROUP = 0x6, NAN_GROUP = 0x7, LOCATION_GROUP = 0x8, PROT_OFFLOAD_GROUP = 0xb, REGULATORY_AND_NVM_GROUP = 0xc, DEBUG_GROUP = 0xf, }; /** * enum iwl_legacy_cmds - legacy group command IDs */ enum iwl_legacy_cmds { /** * @UCODE_ALIVE_NTFY: * Alive data from the firmware, as described in * &struct iwl_alive_ntf_v3 or &struct iwl_alive_ntf_v4 or * &struct iwl_alive_ntf_v5 or &struct iwl_alive_ntf_v6. */ UCODE_ALIVE_NTFY = 0x1, /** * @REPLY_ERROR: Cause an error in the firmware, for testing purposes. */ REPLY_ERROR = 0x2, /** * @ECHO_CMD: Send data to the device to have it returned immediately. */ ECHO_CMD = 0x3, /** * @INIT_COMPLETE_NOTIF: Notification that initialization is complete. */ INIT_COMPLETE_NOTIF = 0x4, /** * @PHY_CONTEXT_CMD: * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd * or &struct iwl_phy_context_cmd_v1. */ PHY_CONTEXT_CMD = 0x8, /** * @DBG_CFG: Debug configuration command. */ DBG_CFG = 0x9, /** * @SCAN_ITERATION_COMPLETE_UMAC: * Firmware indicates a scan iteration completed, using * &struct iwl_umac_scan_iter_complete_notif. */ SCAN_ITERATION_COMPLETE_UMAC = 0xb5, /** * @SCAN_CFG_CMD: * uses &struct iwl_scan_config_v1, &struct iwl_scan_config_v2 * or &struct iwl_scan_config */ SCAN_CFG_CMD = 0xc, /** * @SCAN_REQ_UMAC: uses &struct iwl_scan_req_umac */ SCAN_REQ_UMAC = 0xd, /** * @SCAN_ABORT_UMAC: uses &struct iwl_umac_scan_abort */ SCAN_ABORT_UMAC = 0xe, /** * @SCAN_COMPLETE_UMAC: uses &struct iwl_umac_scan_complete */ SCAN_COMPLETE_UMAC = 0xf, /** * @BA_WINDOW_STATUS_NOTIFICATION_ID: * uses &struct iwl_ba_window_status_notif */ BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13, /** * @ADD_STA_KEY: * &struct iwl_mvm_add_sta_key_cmd_v1 or * &struct iwl_mvm_add_sta_key_cmd. */ ADD_STA_KEY = 0x17, /** * @ADD_STA: * &struct iwl_mvm_add_sta_cmd or &struct iwl_mvm_add_sta_cmd_v7. */ ADD_STA = 0x18, /** * @REMOVE_STA: &struct iwl_mvm_rm_sta_cmd */ REMOVE_STA = 0x19, - /** - * @FW_GET_ITEM_CMD: uses &struct iwl_fw_get_item_cmd - */ - FW_GET_ITEM_CMD = 0x1a, - /** * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or * &struct iwl_tx_cmd_gen3, * response in &struct iwl_mvm_tx_resp or * &struct iwl_mvm_tx_resp_v3 */ TX_CMD = 0x1c, /** * @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd + * response in &struct iwl_tx_path_flush_cmd_rsp */ TXPATH_FLUSH = 0x1e, /** * @MGMT_MCAST_KEY: * &struct iwl_mvm_mgmt_mcast_key_cmd or * &struct iwl_mvm_mgmt_mcast_key_cmd_v1 */ MGMT_MCAST_KEY = 0x1f, /* scheduler config */ /** * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware, * &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp * for newer (22000) hardware. */ SCD_QUEUE_CFG = 0x1d, /** * @WEP_KEY: uses &struct iwl_mvm_wep_key_cmd */ WEP_KEY = 0x20, /** * @SHARED_MEM_CFG: * retrieve shared memory configuration - response in * &struct iwl_shared_mem_cfg */ SHARED_MEM_CFG = 0x25, /** * @TDLS_CHANNEL_SWITCH_CMD: uses &struct iwl_tdls_channel_switch_cmd */ TDLS_CHANNEL_SWITCH_CMD = 0x27, /** * @TDLS_CHANNEL_SWITCH_NOTIFICATION: * uses &struct iwl_tdls_channel_switch_notif */ TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa, /** * @TDLS_CONFIG_CMD: * &struct iwl_tdls_config_cmd, response in &struct iwl_tdls_config_res */ TDLS_CONFIG_CMD = 0xa7, /** * @MAC_CONTEXT_CMD: &struct iwl_mac_ctx_cmd */ MAC_CONTEXT_CMD = 0x28, /** * @TIME_EVENT_CMD: * &struct iwl_time_event_cmd, response in &struct iwl_time_event_resp */ TIME_EVENT_CMD = 0x29, /* both CMD and response */ /** * @TIME_EVENT_NOTIFICATION: &struct iwl_time_event_notif */ TIME_EVENT_NOTIFICATION = 0x2a, /** * @BINDING_CONTEXT_CMD: * &struct iwl_binding_cmd or &struct iwl_binding_cmd_v1 */ BINDING_CONTEXT_CMD = 0x2b, /** * @TIME_QUOTA_CMD: &struct iwl_time_quota_cmd */ TIME_QUOTA_CMD = 0x2c, /** * @NON_QOS_TX_COUNTER_CMD: * command is &struct iwl_nonqos_seq_query_cmd */ NON_QOS_TX_COUNTER_CMD = 0x2d, /** * @LEDS_CMD: command is &struct iwl_led_cmd */ LEDS_CMD = 0x48, /** * @LQ_CMD: using &struct iwl_lq_cmd */ LQ_CMD = 0x4e, /** * @FW_PAGING_BLOCK_CMD: * &struct iwl_fw_paging_cmd */ FW_PAGING_BLOCK_CMD = 0x4f, /** * @SCAN_OFFLOAD_REQUEST_CMD: uses &struct iwl_scan_req_lmac */ SCAN_OFFLOAD_REQUEST_CMD = 0x51, /** * @SCAN_OFFLOAD_ABORT_CMD: abort the scan - no further contents */ SCAN_OFFLOAD_ABORT_CMD = 0x52, /** * @HOT_SPOT_CMD: uses &struct iwl_hs20_roc_req */ HOT_SPOT_CMD = 0x53, + /** + * @WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION: Time Sync + * measurement notification for TM/FTM. Sent on receipt of + * respective WNM action frame for TM protocol or public action + * frame for FTM protocol from peer device along with additional + * meta data specified in &struct iwl_time_msmt_notify + */ + WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION = 0x67, + + /** + * @WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION: Time Sync + * measurement confirmation notification for TM/FTM. Sent on + * receipt of Ack from peer for previously Tx'ed TM/FTM + * action frame along with additional meta data specified in + * &struct iwl_time_msmt_cfm_notify + */ + WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION = 0x68, + /** * @SCAN_OFFLOAD_COMPLETE: * notification, &struct iwl_periodic_scan_complete */ SCAN_OFFLOAD_COMPLETE = 0x6D, /** * @SCAN_OFFLOAD_UPDATE_PROFILES_CMD: * update scan offload (scheduled scan) profiles/blocklist/etc. */ SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E, /** * @MATCH_FOUND_NOTIFICATION: scan match found */ MATCH_FOUND_NOTIFICATION = 0xd9, /** * @SCAN_ITERATION_COMPLETE: * uses &struct iwl_lmac_scan_complete_notif */ SCAN_ITERATION_COMPLETE = 0xe7, /* Phy */ /** * @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd_v1 or &struct iwl_phy_cfg_cmd_v3 */ PHY_CONFIGURATION_CMD = 0x6a, /** * @CALIB_RES_NOTIF_PHY_DB: &struct iwl_calib_res_notif_phy_db */ CALIB_RES_NOTIF_PHY_DB = 0x6b, /** * @PHY_DB_CMD: &struct iwl_phy_db_cmd */ PHY_DB_CMD = 0x6c, /** * @POWER_TABLE_CMD: &struct iwl_device_power_cmd */ POWER_TABLE_CMD = 0x77, /** * @PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION: * &struct iwl_uapsd_misbehaving_ap_notif */ PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78, /** * @LTR_CONFIG: &struct iwl_ltr_config_cmd */ LTR_CONFIG = 0xee, /** * @REPLY_THERMAL_MNG_BACKOFF: * Thermal throttling command */ REPLY_THERMAL_MNG_BACKOFF = 0x7e, /** * @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd */ NVM_ACCESS_CMD = 0x88, /** * @BEACON_NOTIFICATION: &struct iwl_extended_beacon_notif */ BEACON_NOTIFICATION = 0x90, /** * @BEACON_TEMPLATE_CMD: * Uses one of &struct iwl_mac_beacon_cmd_v6, * &struct iwl_mac_beacon_cmd_v7 or &struct iwl_mac_beacon_cmd * depending on the device version. */ BEACON_TEMPLATE_CMD = 0x91, /** * @TX_ANT_CONFIGURATION_CMD: &struct iwl_tx_ant_cfg_cmd */ TX_ANT_CONFIGURATION_CMD = 0x98, /** * @STATISTICS_CMD: * one of &struct iwl_statistics_cmd, * &struct iwl_notif_statistics_v11, * &struct iwl_notif_statistics_v10, * &struct iwl_notif_statistics, * &struct iwl_statistics_operational_ntfy_ver_14 */ STATISTICS_CMD = 0x9c, /** * @STATISTICS_NOTIFICATION: * one of &struct iwl_notif_statistics_v10, * &struct iwl_notif_statistics_v11, * &struct iwl_notif_statistic, * &struct iwl_statistics_operational_ntfy_ver_14 * &struct iwl_statistics_operational_ntfy */ STATISTICS_NOTIFICATION = 0x9d, /** * @EOSP_NOTIFICATION: * Notify that a service period ended, * &struct iwl_mvm_eosp_notification */ EOSP_NOTIFICATION = 0x9e, /** * @REDUCE_TX_POWER_CMD: * &struct iwl_dev_tx_power_cmd */ REDUCE_TX_POWER_CMD = 0x9f, /** * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif */ MISSED_BEACONS_NOTIFICATION = 0xa2, /** * @MAC_PM_POWER_TABLE: using &struct iwl_mac_power_cmd */ MAC_PM_POWER_TABLE = 0xa9, /** * @MFUART_LOAD_NOTIFICATION: &struct iwl_mfuart_load_notif */ MFUART_LOAD_NOTIFICATION = 0xb1, /** * @RSS_CONFIG_CMD: &struct iwl_rss_config_cmd */ RSS_CONFIG_CMD = 0xb3, /** * @REPLY_RX_PHY_CMD: &struct iwl_rx_phy_info */ REPLY_RX_PHY_CMD = 0xc0, /** * @REPLY_RX_MPDU_CMD: * &struct iwl_rx_mpdu_res_start or &struct iwl_rx_mpdu_desc */ REPLY_RX_MPDU_CMD = 0xc1, /** * @BAR_FRAME_RELEASE: Frame release from BAR notification, used for * multi-TID BAR (previously, the BAR frame itself was reported * instead). Uses &struct iwl_bar_frame_release. */ BAR_FRAME_RELEASE = 0xc2, /** * @FRAME_RELEASE: * Frame release (reorder helper) notification, uses * &struct iwl_frame_release */ FRAME_RELEASE = 0xc3, /** * @BA_NOTIF: * BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif * or &struct iwl_mvm_ba_notif depending on the HW */ BA_NOTIF = 0xc5, /* Location Aware Regulatory */ /** * @MCC_UPDATE_CMD: using &struct iwl_mcc_update_cmd */ MCC_UPDATE_CMD = 0xc8, /** * @MCC_CHUB_UPDATE_CMD: using &struct iwl_mcc_chub_notif */ MCC_CHUB_UPDATE_CMD = 0xc9, /** * @MARKER_CMD: trace marker command, uses &struct iwl_mvm_marker * with &struct iwl_mvm_marker_rsp */ MARKER_CMD = 0xcb, /** * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif */ BT_PROFILE_NOTIFICATION = 0xce, /** * @BT_CONFIG: &struct iwl_bt_coex_cmd */ BT_CONFIG = 0x9b, /** * @BT_COEX_UPDATE_REDUCED_TXP: * &struct iwl_bt_coex_reduced_txp_update_cmd */ BT_COEX_UPDATE_REDUCED_TXP = 0x5c, /** * @BT_COEX_CI: &struct iwl_bt_coex_ci_cmd */ BT_COEX_CI = 0x5d, /** * @REPLY_SF_CFG_CMD: &struct iwl_sf_cfg_cmd */ REPLY_SF_CFG_CMD = 0xd1, /** * @REPLY_BEACON_FILTERING_CMD: &struct iwl_beacon_filter_cmd */ REPLY_BEACON_FILTERING_CMD = 0xd2, /** * @DTS_MEASUREMENT_NOTIFICATION: * &struct iwl_dts_measurement_notif_v1 or * &struct iwl_dts_measurement_notif_v2 */ DTS_MEASUREMENT_NOTIFICATION = 0xdd, /** * @LDBG_CONFIG_CMD: configure continuous trace recording */ LDBG_CONFIG_CMD = 0xf6, /** * @DEBUG_LOG_MSG: Debugging log data from firmware */ DEBUG_LOG_MSG = 0xf7, /** * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd */ MCAST_FILTER_CMD = 0xd0, /** * @D3_CONFIG_CMD: &struct iwl_d3_manager_config */ D3_CONFIG_CMD = 0xd3, /** * @PROT_OFFLOAD_CONFIG_CMD: Depending on firmware, uses one of * &struct iwl_proto_offload_cmd_v1, &struct iwl_proto_offload_cmd_v2, * &struct iwl_proto_offload_cmd_v3_small, * &struct iwl_proto_offload_cmd_v3_large */ PROT_OFFLOAD_CONFIG_CMD = 0xd4, - /** - * @OFFLOADS_QUERY_CMD: - * No data in command, response in &struct iwl_wowlan_status - */ - OFFLOADS_QUERY_CMD = 0xd5, - /** * @D0I3_END_CMD: End D0i3/D3 state, no command data */ D0I3_END_CMD = 0xed, /** * @WOWLAN_PATTERNS: &struct iwl_wowlan_patterns_cmd */ WOWLAN_PATTERNS = 0xe0, /** * @WOWLAN_CONFIGURATION: &struct iwl_wowlan_config_cmd */ WOWLAN_CONFIGURATION = 0xe1, /** * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd_v4, * &struct iwl_wowlan_rsc_tsc_params_cmd */ WOWLAN_TSC_RSC_PARAM = 0xe2, /** * @WOWLAN_TKIP_PARAM: &struct iwl_wowlan_tkip_params_cmd */ WOWLAN_TKIP_PARAM = 0xe3, /** - * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd + * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd_v2, + * &struct iwl_wowlan_kek_kck_material_cmd_v3 or + * &struct iwl_wowlan_kek_kck_material_cmd_v4 */ WOWLAN_KEK_KCK_MATERIAL = 0xe4, /** - * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status + * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status_v6, + * &struct iwl_wowlan_status_v7, &struct iwl_wowlan_status_v9 or + * &struct iwl_wowlan_status_v12 */ WOWLAN_GET_STATUSES = 0xe5, /** - * @SCAN_OFFLOAD_PROFILES_QUERY_CMD: - * No command data, response is &struct iwl_scan_offload_profiles_query + * @SCAN_OFFLOAD_PROFILES_QUERY_CMD: No command data, response is + * &struct iwl_scan_offload_profiles_query_v1 */ SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56, }; /** * enum iwl_system_subcmd_ids - system group command IDs */ enum iwl_system_subcmd_ids { /** * @SHARED_MEM_CFG_CMD: * response in &struct iwl_shared_mem_cfg or * &struct iwl_shared_mem_cfg_v2 */ SHARED_MEM_CFG_CMD = 0x0, /** * @SOC_CONFIGURATION_CMD: &struct iwl_soc_configuration_cmd */ SOC_CONFIGURATION_CMD = 0x01, /** * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd */ INIT_EXTENDED_CFG_CMD = 0x03, /** * @FW_ERROR_RECOVERY_CMD: &struct iwl_fw_error_recovery_cmd */ FW_ERROR_RECOVERY_CMD = 0x7, /** * @RFI_CONFIG_CMD: &struct iwl_rfi_config_cmd */ RFI_CONFIG_CMD = 0xb, /** * @RFI_GET_FREQ_TABLE_CMD: &struct iwl_rfi_config_cmd */ RFI_GET_FREQ_TABLE_CMD = 0xc, /** * @SYSTEM_FEATURES_CONTROL_CMD: &struct iwl_system_features_control_cmd */ SYSTEM_FEATURES_CONTROL_CMD = 0xd, /** * @RFI_DEACTIVATE_NOTIF: &struct iwl_rfi_deactivate_notif */ RFI_DEACTIVATE_NOTIF = 0xff, }; #endif /* __iwl_fw_api_commands_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/config.h b/sys/contrib/dev/iwlwifi/fw/api/config.h index 087354b3c308..4419631604b4 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/config.h +++ b/sys/contrib/dev/iwlwifi/fw/api/config.h @@ -1,117 +1,112 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2019 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_config_h__ #define __iwl_fw_api_config_h__ /* * struct iwl_dqa_enable_cmd * @cmd_queue: the TXQ number of the command queue */ struct iwl_dqa_enable_cmd { __le32 cmd_queue; } __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */ /* * struct iwl_tx_ant_cfg_cmd * @valid: valid antenna configuration */ struct iwl_tx_ant_cfg_cmd { __le32 valid; } __packed; /** * struct iwl_calib_ctrl - Calibration control struct. * Sent as part of the phy configuration command. * @flow_trigger: bitmap for which calibrations to perform according to * flow triggers, using &enum iwl_calib_cfg * @event_trigger: bitmap for which calibrations to perform according to * event triggers, using &enum iwl_calib_cfg */ struct iwl_calib_ctrl { __le32 flow_trigger; __le32 event_trigger; } __packed; /* This enum defines the bitmap of various calibrations to enable in both * init ucode and runtime ucode through CALIBRATION_CFG_CMD. */ enum iwl_calib_cfg { IWL_CALIB_CFG_XTAL_IDX = BIT(0), IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1), IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2), IWL_CALIB_CFG_PAPD_IDX = BIT(3), IWL_CALIB_CFG_TX_PWR_IDX = BIT(4), IWL_CALIB_CFG_DC_IDX = BIT(5), IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6), IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7), IWL_CALIB_CFG_TX_IQ_IDX = BIT(8), IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9), IWL_CALIB_CFG_RX_IQ_IDX = BIT(10), IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11), IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12), IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13), IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14), IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15), IWL_CALIB_CFG_DAC_IDX = BIT(16), IWL_CALIB_CFG_ABS_IDX = BIT(17), IWL_CALIB_CFG_AGC_IDX = BIT(18), }; /** * struct iwl_phy_specific_cfg - specific PHY filter configuration * * Sent as part of the phy configuration command (v3) to configure specific FW * defined PHY filters that can be applied to each antenna. * - * @filter_cfg_chain_a: filter config id for LMAC1 chain A - * @filter_cfg_chain_b: filter config id for LMAC1 chain B - * @filter_cfg_chain_c: filter config id for LMAC2 chain A - * @filter_cfg_chain_d: filter config id for LMAC2 chain B - * values: 0 - no filter; 0xffffffff - reserved; otherwise - filter id + * @filter_cfg_chains: filter config id for LMAC1 chain A, LMAC1 chain B, + * LMAC2 chain A, LMAC2 chain B (in that order) + * values: 0: no filter; 0xffffffff: reserved; otherwise: filter id */ struct iwl_phy_specific_cfg { - __le32 filter_cfg_chain_a; - __le32 filter_cfg_chain_b; - __le32 filter_cfg_chain_c; - __le32 filter_cfg_chain_d; + __le32 filter_cfg_chains[4]; } __packed; /* PHY_SPECIFIC_CONFIGURATION_API_VER_1*/ /** * struct iwl_phy_cfg_cmd - Phy configuration command * * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg * @calib_control: calibration control data */ struct iwl_phy_cfg_cmd_v1 { __le32 phy_cfg; struct iwl_calib_ctrl calib_control; } __packed; /** * struct iwl_phy_cfg_cmd_v3 - Phy configuration command (v3) * * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg * @calib_control: calibration control data * @phy_specific_cfg: configure predefined PHY filters */ struct iwl_phy_cfg_cmd_v3 { __le32 phy_cfg; struct iwl_calib_ctrl calib_control; struct iwl_phy_specific_cfg phy_specific_cfg; } __packed; /* PHY_CONFIGURATION_CMD_API_S_VER_3 */ /* * enum iwl_dc2dc_config_id - flag ids * * Ids of dc2dc configuration flags */ enum iwl_dc2dc_config_id { DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */ DCDC_FREQ_TUNE_SET = 0x2, }; /* MARKER_ID_API_E_VER_1 */ #endif /* __iwl_fw_api_config_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/context.h b/sys/contrib/dev/iwlwifi/fw/api/context.h index 105ba7170c3f..1fa5678c1cd6 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/context.h +++ b/sys/contrib/dev/iwlwifi/fw/api/context.h @@ -1,38 +1,43 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014 Intel Corporation + * Copyright (C) 2012-2014, 2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_context_h__ #define __iwl_fw_api_context_h__ /** * enum iwl_ctxt_id_and_color - ID and color fields in context dword * @FW_CTXT_ID_POS: position of the ID * @FW_CTXT_ID_MSK: mask of the ID * @FW_CTXT_COLOR_POS: position of the color * @FW_CTXT_COLOR_MSK: mask of the color * @FW_CTXT_INVALID: value used to indicate unused/invalid */ enum iwl_ctxt_id_and_color { FW_CTXT_ID_POS = 0, FW_CTXT_ID_MSK = 0xff << FW_CTXT_ID_POS, FW_CTXT_COLOR_POS = 8, FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS, FW_CTXT_INVALID = 0xffffffff, }; #define FW_CMD_ID_AND_COLOR(_id, _color) (((_id) << FW_CTXT_ID_POS) |\ ((_color) << FW_CTXT_COLOR_POS)) -/* Possible actions on PHYs, MACs and Bindings */ +/** + * enum iwl_ctxt_action - Posssible actions on PHYs, MACs, Bindings and other + * @FW_CTXT_ACTION_INVALID: unused, invalid action + * @FW_CTXT_ACTION_ADD: add the context + * @FW_CTXT_ACTION_MODIFY: modify the context + * @FW_CTXT_ACTION_REMOVE: remove the context + */ enum iwl_ctxt_action { - FW_CTXT_ACTION_STUB = 0, + FW_CTXT_ACTION_INVALID = 0, FW_CTXT_ACTION_ADD, FW_CTXT_ACTION_MODIFY, FW_CTXT_ACTION_REMOVE, - FW_CTXT_ACTION_NUM }; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */ #endif /* __iwl_fw_api_context_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/d3.h b/sys/contrib/dev/iwlwifi/fw/api/d3.h index 4cd9ab23954e..72d461c47323 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/d3.h +++ b/sys/contrib/dev/iwlwifi/fw/api/d3.h @@ -1,771 +1,867 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_d3_h__ #define __iwl_fw_api_d3_h__ #include /** * enum iwl_d0i3_flags - d0i3 flags * @IWL_D0I3_RESET_REQUIRE: FW require reset upon resume */ enum iwl_d0i3_flags { IWL_D0I3_RESET_REQUIRE = BIT(0), }; /** * enum iwl_d3_wakeup_flags - D3 manager wakeup flags * @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert */ enum iwl_d3_wakeup_flags { IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0), }; /* D3_MANAGER_WAKEUP_CONFIG_API_E_VER_3 */ /** * struct iwl_d3_manager_config - D3 manager configuration command * @min_sleep_time: minimum sleep time (in usec) * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags * @wakeup_host_timer: force wakeup after this many seconds * * The structure is used for the D3_CONFIG_CMD command. */ struct iwl_d3_manager_config { __le32 min_sleep_time; __le32 wakeup_flags; __le32 wakeup_host_timer; } __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */ /* TODO: OFFLOADS_QUERY_API_S_VER_1 */ /** * enum iwl_d3_proto_offloads - enabled protocol offloads * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled * @IWL_D3_PROTO_IPV4_VALID: IPv4 data is valid * @IWL_D3_PROTO_IPV6_VALID: IPv6 data is valid + * @IWL_D3_PROTO_OFFLOAD_BTM: BTM offload is enabled */ enum iwl_proto_offloads { IWL_D3_PROTO_OFFLOAD_ARP = BIT(0), IWL_D3_PROTO_OFFLOAD_NS = BIT(1), IWL_D3_PROTO_IPV4_VALID = BIT(2), IWL_D3_PROTO_IPV6_VALID = BIT(3), + IWL_D3_PROTO_OFFLOAD_BTM = BIT(4), }; #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2 #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6 #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L 12 #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S 4 #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 12 #define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L 4 #define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S 2 /** * struct iwl_proto_offload_cmd_common - ARP/NS offload common part * @enabled: enable flags * @remote_ipv4_addr: remote address to answer to (or zero if all) * @host_ipv4_addr: our IPv4 address to respond to queries for * @arp_mac_addr: our MAC address for ARP responses * @reserved: unused */ struct iwl_proto_offload_cmd_common { __le32 enabled; __be32 remote_ipv4_addr; __be32 host_ipv4_addr; u8 arp_mac_addr[ETH_ALEN]; __le16 reserved; } __packed; /** * struct iwl_proto_offload_cmd_v1 - ARP/NS offload configuration * @common: common/IPv4 configuration * @remote_ipv6_addr: remote address to answer to (or zero if all) * @solicited_node_ipv6_addr: broken -- solicited node address exists * for each target address * @target_ipv6_addr: our target addresses * @ndp_mac_addr: neighbor solicitation response MAC address * @reserved2: reserved */ struct iwl_proto_offload_cmd_v1 { struct iwl_proto_offload_cmd_common common; u8 remote_ipv6_addr[16]; u8 solicited_node_ipv6_addr[16]; u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16]; u8 ndp_mac_addr[ETH_ALEN]; __le16 reserved2; } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */ /** * struct iwl_proto_offload_cmd_v2 - ARP/NS offload configuration * @common: common/IPv4 configuration * @remote_ipv6_addr: remote address to answer to (or zero if all) * @solicited_node_ipv6_addr: broken -- solicited node address exists * for each target address * @target_ipv6_addr: our target addresses * @ndp_mac_addr: neighbor solicitation response MAC address * @num_valid_ipv6_addrs: number of valid IPv6 addresses * @reserved2: reserved */ struct iwl_proto_offload_cmd_v2 { struct iwl_proto_offload_cmd_common common; u8 remote_ipv6_addr[16]; u8 solicited_node_ipv6_addr[16]; u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16]; u8 ndp_mac_addr[ETH_ALEN]; u8 num_valid_ipv6_addrs; u8 reserved2[3]; } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */ struct iwl_ns_config { struct in6_addr source_ipv6_addr; struct in6_addr dest_ipv6_addr; u8 target_mac_addr[ETH_ALEN]; __le16 reserved; } __packed; /* NS_OFFLOAD_CONFIG */ struct iwl_targ_addr { struct in6_addr addr; __le32 config_num; } __packed; /* TARGET_IPV6_ADDRESS */ /** * struct iwl_proto_offload_cmd_v3_small - ARP/NS offload configuration * @common: common/IPv4 configuration * @num_valid_ipv6_addrs: number of valid IPv6 addresses * @targ_addrs: target IPv6 addresses * @ns_config: NS offload configurations */ struct iwl_proto_offload_cmd_v3_small { struct iwl_proto_offload_cmd_common common; __le32 num_valid_ipv6_addrs; struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S]; struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S]; } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */ /** * struct iwl_proto_offload_cmd_v3_large - ARP/NS offload configuration * @common: common/IPv4 configuration * @num_valid_ipv6_addrs: number of valid IPv6 addresses * @targ_addrs: target IPv6 addresses * @ns_config: NS offload configurations */ struct iwl_proto_offload_cmd_v3_large { struct iwl_proto_offload_cmd_common common; __le32 num_valid_ipv6_addrs; struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L]; struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L]; } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */ /** * struct iwl_proto_offload_cmd_v4 - ARP/NS offload configuration * @sta_id: station id * @common: common/IPv4 configuration * @num_valid_ipv6_addrs: number of valid IPv6 addresses * @targ_addrs: target IPv6 addresses * @ns_config: NS offload configurations */ struct iwl_proto_offload_cmd_v4 { __le32 sta_id; struct iwl_proto_offload_cmd_common common; __le32 num_valid_ipv6_addrs; struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L]; struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L]; } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_4 */ /* * WOWLAN_PATTERNS */ #define IWL_WOWLAN_MIN_PATTERN_LEN 16 #define IWL_WOWLAN_MAX_PATTERN_LEN 128 struct iwl_wowlan_pattern_v1 { u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN]; u8 mask_size; u8 pattern_size; __le16 reserved; } __packed; /* WOWLAN_PATTERN_API_S_VER_1 */ #define IWL_WOWLAN_MAX_PATTERNS 20 /** * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns */ struct iwl_wowlan_patterns_cmd_v1 { /** * @n_patterns: number of patterns */ __le32 n_patterns; /** * @patterns: the patterns, array length in @n_patterns */ struct iwl_wowlan_pattern_v1 patterns[]; } __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */ #define IPV4_ADDR_SIZE 4 #define IPV6_ADDR_SIZE 16 enum iwl_wowlan_pattern_type { WOWLAN_PATTERN_TYPE_BITMASK, WOWLAN_PATTERN_TYPE_IPV4_TCP_SYN, WOWLAN_PATTERN_TYPE_IPV6_TCP_SYN, WOWLAN_PATTERN_TYPE_IPV4_TCP_SYN_WILDCARD, WOWLAN_PATTERN_TYPE_IPV6_TCP_SYN_WILDCARD, }; /* WOWLAN_PATTERN_TYPE_API_E_VER_1 */ /** * struct iwl_wowlan_ipv4_tcp_syn - WoWLAN IPv4 TCP SYN pattern data */ struct iwl_wowlan_ipv4_tcp_syn { /** * @src_addr: source IP address to match */ u8 src_addr[IPV4_ADDR_SIZE]; /** * @dst_addr: destination IP address to match */ u8 dst_addr[IPV4_ADDR_SIZE]; /** * @src_port: source TCP port to match */ __le16 src_port; /** * @dst_port: destination TCP port to match */ __le16 dst_port; } __packed; /* WOWLAN_IPV4_TCP_SYN_API_S_VER_1 */ /** * struct iwl_wowlan_ipv6_tcp_syn - WoWLAN Ipv6 TCP SYN pattern data */ struct iwl_wowlan_ipv6_tcp_syn { /** * @src_addr: source IP address to match */ u8 src_addr[IPV6_ADDR_SIZE]; /** * @dst_addr: destination IP address to match */ u8 dst_addr[IPV6_ADDR_SIZE]; /** * @src_port: source TCP port to match */ __le16 src_port; /** * @dst_port: destination TCP port to match */ __le16 dst_port; } __packed; /* WOWLAN_IPV6_TCP_SYN_API_S_VER_1 */ /** * union iwl_wowlan_pattern_data - Data for the different pattern types * * If wildcard addresses/ports are to be used, the union can be left * undefined. */ union iwl_wowlan_pattern_data { /** * @bitmask: bitmask pattern data */ struct iwl_wowlan_pattern_v1 bitmask; /** * @ipv4_tcp_syn: IPv4 TCP SYN pattern data */ struct iwl_wowlan_ipv4_tcp_syn ipv4_tcp_syn; /** * @ipv6_tcp_syn: IPv6 TCP SYN pattern data */ struct iwl_wowlan_ipv6_tcp_syn ipv6_tcp_syn; }; /* WOWLAN_PATTERN_API_U_VER_1 */ /** * struct iwl_wowlan_pattern_v2 - Pattern entry for the WoWLAN wakeup patterns */ struct iwl_wowlan_pattern_v2 { /** * @pattern_type: defines the struct type to be used in the union */ u8 pattern_type; /** * @reserved: reserved for alignment */ u8 reserved[3]; /** * @u: the union containing the match data, or undefined for * wildcard matches */ union iwl_wowlan_pattern_data u; } __packed; /* WOWLAN_PATTERN_API_S_VER_2 */ /** * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns command */ struct iwl_wowlan_patterns_cmd { /** * @n_patterns: number of patterns */ u8 n_patterns; /** * @n_patterns: sta_id */ u8 sta_id; /** * @reserved: reserved for alignment */ __le16 reserved; /** * @patterns: the patterns, array length in @n_patterns */ struct iwl_wowlan_pattern_v2 patterns[]; } __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_3 */ enum iwl_wowlan_wakeup_filters { IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0), IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1), IWL_WOWLAN_WAKEUP_BEACON_MISS = BIT(2), IWL_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3), IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4), IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5), IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6), IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(7), IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8), IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9), IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10), IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11), IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12), IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13), IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14), IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15), IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16), }; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */ enum iwl_wowlan_flags { IS_11W_ASSOC = BIT(0), ENABLE_L3_FILTERING = BIT(1), ENABLE_NBNS_FILTERING = BIT(2), ENABLE_DHCP_FILTERING = BIT(3), ENABLE_STORE_BEACON = BIT(4), }; /** * struct iwl_wowlan_config_cmd - WoWLAN configuration (versions 5 and 6) * @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters * @non_qos_seq: non-QoS sequence counter to use next. * Reserved if the struct has version >= 6. * @qos_seq: QoS sequence counters to use next * @wowlan_ba_teardown_tids: bitmap of BA sessions to tear down * @is_11n_connection: indicates HT connection * @offloading_tid: TID reserved for firmware use * @flags: extra flags, see &enum iwl_wowlan_flags * @sta_id: station ID for wowlan. * @reserved: reserved */ struct iwl_wowlan_config_cmd { __le32 wakeup_filter; __le16 non_qos_seq; __le16 qos_seq[8]; u8 wowlan_ba_teardown_tids; u8 is_11n_connection; u8 offloading_tid; u8 flags; u8 sta_id; u8 reserved; } __packed; /* WOWLAN_CONFIG_API_S_VER_5 */ #define IWL_NUM_RSC 16 #define WOWLAN_KEY_MAX_SIZE 32 #define WOWLAN_GTK_KEYS_NUM 2 #define WOWLAN_IGTK_KEYS_NUM 2 +#define WOWLAN_IGTK_MIN_INDEX 4 /* * WOWLAN_TSC_RSC_PARAMS */ struct tkip_sc { __le16 iv16; __le16 pad; __le32 iv32; } __packed; /* TKIP_SC_API_U_VER_1 */ struct iwl_tkip_rsc_tsc { struct tkip_sc unicast_rsc[IWL_NUM_RSC]; struct tkip_sc multicast_rsc[IWL_NUM_RSC]; struct tkip_sc tsc; } __packed; /* TKIP_TSC_RSC_API_S_VER_1 */ struct aes_sc { __le64 pn; } __packed; /* TKIP_AES_SC_API_U_VER_1 */ struct iwl_aes_rsc_tsc { struct aes_sc unicast_rsc[IWL_NUM_RSC]; struct aes_sc multicast_rsc[IWL_NUM_RSC]; struct aes_sc tsc; } __packed; /* AES_TSC_RSC_API_S_VER_1 */ union iwl_all_tsc_rsc { struct iwl_tkip_rsc_tsc tkip; struct iwl_aes_rsc_tsc aes; }; /* ALL_TSC_RSC_API_S_VER_2 */ struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 { union iwl_all_tsc_rsc all_tsc_rsc; } __packed; /* ALL_TSC_RSC_API_S_VER_2 */ struct iwl_wowlan_rsc_tsc_params_cmd_v4 { struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 params; __le32 sta_id; } __packed; /* ALL_TSC_RSC_API_S_VER_4 */ struct iwl_wowlan_rsc_tsc_params_cmd { __le64 ucast_rsc[IWL_MAX_TID_COUNT]; __le64 mcast_rsc[WOWLAN_GTK_KEYS_NUM][IWL_MAX_TID_COUNT]; __le32 sta_id; #define IWL_MCAST_KEY_MAP_INVALID 0xff u8 mcast_key_id_map[4]; } __packed; /* ALL_TSC_RSC_API_S_VER_5 */ #define IWL_MIC_KEY_SIZE 8 struct iwl_mic_keys { u8 tx[IWL_MIC_KEY_SIZE]; u8 rx_unicast[IWL_MIC_KEY_SIZE]; u8 rx_mcast[IWL_MIC_KEY_SIZE]; } __packed; /* MIC_KEYS_API_S_VER_1 */ #define IWL_P1K_SIZE 5 struct iwl_p1k_cache { __le16 p1k[IWL_P1K_SIZE]; } __packed; #define IWL_NUM_RX_P1K_CACHE 2 struct iwl_wowlan_tkip_params_cmd_ver_1 { struct iwl_mic_keys mic_keys; struct iwl_p1k_cache tx; struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE]; struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE]; } __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */ struct iwl_wowlan_tkip_params_cmd { struct iwl_mic_keys mic_keys; struct iwl_p1k_cache tx; struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE]; struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE]; u8 reversed[2]; __le32 sta_id; } __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_2 */ #define IWL_KCK_MAX_SIZE 32 #define IWL_KEK_MAX_SIZE 32 struct iwl_wowlan_kek_kck_material_cmd_v2 { u8 kck[IWL_KCK_MAX_SIZE]; u8 kek[IWL_KEK_MAX_SIZE]; __le16 kck_len; __le16 kek_len; __le64 replay_ctr; } __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */ struct iwl_wowlan_kek_kck_material_cmd_v3 { u8 kck[IWL_KCK_MAX_SIZE]; u8 kek[IWL_KEK_MAX_SIZE]; __le16 kck_len; __le16 kek_len; __le64 replay_ctr; __le32 akm; __le32 gtk_cipher; __le32 igtk_cipher; __le32 bigtk_cipher; } __packed; /* KEK_KCK_MATERIAL_API_S_VER_3 */ struct iwl_wowlan_kek_kck_material_cmd_v4 { __le32 sta_id; u8 kck[IWL_KCK_MAX_SIZE]; u8 kek[IWL_KEK_MAX_SIZE]; __le16 kck_len; __le16 kek_len; __le64 replay_ctr; __le32 akm; __le32 gtk_cipher; __le32 igtk_cipher; __le32 bigtk_cipher; } __packed; /* KEK_KCK_MATERIAL_API_S_VER_4 */ struct iwl_wowlan_get_status_cmd { __le32 sta_id; } __packed; /* WOWLAN_GET_STATUSES_CMD_API_S_VER_1 */ #define RF_KILL_INDICATOR_FOR_WOWLAN 0x87 enum iwl_wowlan_rekey_status { IWL_WOWLAN_REKEY_POST_REKEY = 0, IWL_WOWLAN_REKEY_WHILE_REKEY = 1, }; /* WOWLAN_REKEY_STATUS_API_E_VER_1 */ enum iwl_wowlan_wakeup_reason { IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS = 0, IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET = BIT(0), IWL_WOWLAN_WAKEUP_BY_PATTERN = BIT(1), IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON = BIT(2), IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH = BIT(3), IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE = BIT(4), IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED = BIT(5), IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR = BIT(6), IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST = BIT(7), IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8), IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9), IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10), IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11), IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12), IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET = BIT(13), IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14), IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15), IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16), IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC = BIT(17), IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN = BIT(18), IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD = BIT(19), IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN = BIT(20), IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD = BIT(21), }; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */ struct iwl_wowlan_gtk_status_v1 { u8 key_index; u8 reserved[3]; u8 decrypt_key[16]; u8 tkip_mic_key[8]; struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc; } __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */ /** * struct iwl_wowlan_gtk_status_v2 - GTK status * @key: GTK material * @key_len: GTK legth, if set to 0, the key is not available * @key_flags: information about the key: * bits[0:1]: key index assigned by the AP * bits[2:6]: GTK index of the key in the internal DB * bit[7]: Set iff this is the currently used GTK * @reserved: padding * @tkip_mic_key: TKIP RX MIC key * @rsc: TSC RSC counters */ struct iwl_wowlan_gtk_status_v2 { u8 key[WOWLAN_KEY_MAX_SIZE]; u8 key_len; u8 key_flags; u8 reserved[2]; u8 tkip_mic_key[8]; struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc; } __packed; /* WOWLAN_GTK_MATERIAL_VER_2 */ /** * struct iwl_wowlan_all_rsc_tsc_v5 - key counters * @ucast_rsc: unicast RSC values * @mcast_rsc: multicast RSC values (per key map value) * @sta_id: station ID * @mcast_key_id_map: map of key id to @mcast_rsc entry */ struct iwl_wowlan_all_rsc_tsc_v5 { __le64 ucast_rsc[IWL_MAX_TID_COUNT]; __le64 mcast_rsc[2][IWL_MAX_TID_COUNT]; __le32 sta_id; u8 mcast_key_id_map[4]; } __packed; /* ALL_TSC_RSC_API_S_VER_5 */ /** * struct iwl_wowlan_gtk_status_v3 - GTK status * @key: GTK material * @key_len: GTK length, if set to 0, the key is not available * @key_flags: information about the key: * bits[0:1]: key index assigned by the AP * bits[2:6]: GTK index of the key in the internal DB * bit[7]: Set iff this is the currently used GTK * @reserved: padding * @tkip_mic_key: TKIP RX MIC key * @sc: RSC/TSC counters */ struct iwl_wowlan_gtk_status_v3 { u8 key[WOWLAN_KEY_MAX_SIZE]; u8 key_len; u8 key_flags; u8 reserved[2]; u8 tkip_mic_key[IWL_MIC_KEY_SIZE]; struct iwl_wowlan_all_rsc_tsc_v5 sc; } __packed; /* WOWLAN_GTK_MATERIAL_VER_3 */ #define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1)) +#define IWL_WOWLAN_IGTK_BIGTK_IDX_MASK (BIT(0)) /** * struct iwl_wowlan_igtk_status - IGTK status * @key: IGTK material * @ipn: the IGTK packet number (replay counter) * @key_len: IGTK length, if set to 0, the key is not available * @key_flags: information about the key: * bits[0]: key index assigned by the AP (0: index 4, 1: index 5) * bits[1:5]: IGTK index of the key in the internal DB * bit[6]: Set iff this is the currently used IGTK */ struct iwl_wowlan_igtk_status { u8 key[WOWLAN_KEY_MAX_SIZE]; u8 ipn[6]; u8 key_len; u8 key_flags; } __packed; /* WOWLAN_IGTK_MATERIAL_VER_1 */ /** * struct iwl_wowlan_status_v6 - WoWLAN status * @gtk: GTK data * @replay_ctr: GTK rekey replay counter * @pattern_number: number of the matched pattern * @non_qos_seq_ctr: non-QoS sequence counter to use next * @qos_seq_ctr: QoS sequence counters to use next * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason * @num_of_gtk_rekeys: number of GTK rekeys * @transmitted_ndps: number of transmitted neighbor discovery packets * @received_beacons: number of received beacons * @wake_packet_length: wakeup packet length * @wake_packet_bufsize: wakeup packet buffer size * @wake_packet: wakeup packet */ struct iwl_wowlan_status_v6 { struct iwl_wowlan_gtk_status_v1 gtk; __le64 replay_ctr; __le16 pattern_number; __le16 non_qos_seq_ctr; __le16 qos_seq_ctr[8]; __le32 wakeup_reasons; __le32 num_of_gtk_rekeys; __le32 transmitted_ndps; __le32 received_beacons; __le32 wake_packet_length; __le32 wake_packet_bufsize; u8 wake_packet[]; /* can be truncated from _length to _bufsize */ } __packed; /* WOWLAN_STATUSES_API_S_VER_6 */ /** * struct iwl_wowlan_status_v7 - WoWLAN status * @gtk: GTK data * @igtk: IGTK data * @replay_ctr: GTK rekey replay counter * @pattern_number: number of the matched pattern * @non_qos_seq_ctr: non-QoS sequence counter to use next * @qos_seq_ctr: QoS sequence counters to use next * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason * @num_of_gtk_rekeys: number of GTK rekeys * @transmitted_ndps: number of transmitted neighbor discovery packets * @received_beacons: number of received beacons * @wake_packet_length: wakeup packet length * @wake_packet_bufsize: wakeup packet buffer size * @wake_packet: wakeup packet */ struct iwl_wowlan_status_v7 { struct iwl_wowlan_gtk_status_v2 gtk[WOWLAN_GTK_KEYS_NUM]; struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; __le64 replay_ctr; __le16 pattern_number; __le16 non_qos_seq_ctr; __le16 qos_seq_ctr[8]; __le32 wakeup_reasons; __le32 num_of_gtk_rekeys; __le32 transmitted_ndps; __le32 received_beacons; __le32 wake_packet_length; __le32 wake_packet_bufsize; u8 wake_packet[]; /* can be truncated from _length to _bufsize */ } __packed; /* WOWLAN_STATUSES_API_S_VER_7 */ /** * struct iwl_wowlan_status_v9 - WoWLAN status (versions 9 and 10) * @gtk: GTK data * @igtk: IGTK data * @replay_ctr: GTK rekey replay counter * @pattern_number: number of the matched pattern * @non_qos_seq_ctr: non-QoS sequence counter to use next. * Reserved if the struct has version >= 10. * @qos_seq_ctr: QoS sequence counters to use next * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason * @num_of_gtk_rekeys: number of GTK rekeys * @transmitted_ndps: number of transmitted neighbor discovery packets * @received_beacons: number of received beacons * @wake_packet_length: wakeup packet length * @wake_packet_bufsize: wakeup packet buffer size * @tid_tear_down: bit mask of tids whose BA sessions were closed * in suspend state * @reserved: unused * @wake_packet: wakeup packet */ struct iwl_wowlan_status_v9 { struct iwl_wowlan_gtk_status_v2 gtk[WOWLAN_GTK_KEYS_NUM]; struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; __le64 replay_ctr; __le16 pattern_number; __le16 non_qos_seq_ctr; __le16 qos_seq_ctr[8]; __le32 wakeup_reasons; __le32 num_of_gtk_rekeys; __le32 transmitted_ndps; __le32 received_beacons; __le32 wake_packet_length; __le32 wake_packet_bufsize; u8 tid_tear_down; u8 reserved[3]; u8 wake_packet[]; /* can be truncated from _length to _bufsize */ } __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_9 */ /** * struct iwl_wowlan_status_v12 - WoWLAN status * @gtk: GTK data * @igtk: IGTK data * @replay_ctr: GTK rekey replay counter * @pattern_number: number of the matched pattern * @non_qos_seq_ctr: non-QoS sequence counter to use next. * Reserved if the struct has version >= 10. * @qos_seq_ctr: QoS sequence counters to use next * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason * @num_of_gtk_rekeys: number of GTK rekeys * @transmitted_ndps: number of transmitted neighbor discovery packets * @received_beacons: number of received beacons * @wake_packet_length: wakeup packet length * @wake_packet_bufsize: wakeup packet buffer size * @tid_tear_down: bit mask of tids whose BA sessions were closed * in suspend state * @reserved: unused * @wake_packet: wakeup packet */ struct iwl_wowlan_status_v12 { struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM]; struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; __le64 replay_ctr; __le16 pattern_number; __le16 non_qos_seq_ctr; __le16 qos_seq_ctr[8]; __le32 wakeup_reasons; __le32 num_of_gtk_rekeys; __le32 transmitted_ndps; __le32 received_beacons; __le32 wake_packet_length; __le32 wake_packet_bufsize; u8 tid_tear_down; u8 reserved[3]; u8 wake_packet[]; /* can be truncated from _length to _bufsize */ } __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_12 */ +/** + * struct iwl_wowlan_info_notif_v1 - WoWLAN information notification + * @gtk: GTK data + * @igtk: IGTK data + * @replay_ctr: GTK rekey replay counter + * @pattern_number: number of the matched patterns + * @reserved1: reserved + * @qos_seq_ctr: QoS sequence counters to use next + * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason + * @num_of_gtk_rekeys: number of GTK rekeys + * @transmitted_ndps: number of transmitted neighbor discovery packets + * @received_beacons: number of received beacons + * @wake_packet_length: wakeup packet length + * @wake_packet_bufsize: wakeup packet buffer size + * @tid_tear_down: bit mask of tids whose BA sessions were closed + * in suspend state + * @station_id: station id + * @reserved2: reserved + */ +struct iwl_wowlan_info_notif_v1 { + struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM]; + struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 reserved1; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + __le32 wake_packet_length; + __le32 wake_packet_bufsize; + u8 tid_tear_down; + u8 station_id; + u8 reserved2[2]; +} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_1 */ + +/** + * struct iwl_wowlan_info_notif - WoWLAN information notification + * @gtk: GTK data + * @igtk: IGTK data + * @replay_ctr: GTK rekey replay counter + * @pattern_number: number of the matched patterns + * @reserved1: reserved + * @qos_seq_ctr: QoS sequence counters to use next + * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason + * @num_of_gtk_rekeys: number of GTK rekeys + * @transmitted_ndps: number of transmitted neighbor discovery packets + * @received_beacons: number of received beacons + * @tid_tear_down: bit mask of tids whose BA sessions were closed + * in suspend state + * @station_id: station id + * @reserved2: reserved + */ +struct iwl_wowlan_info_notif { + struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM]; + struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 reserved1; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + u8 tid_tear_down; + u8 station_id; + u8 reserved2[2]; +} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_2 */ + +/** + * struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification + * @wake_packet_length: wakeup packet length + * @station_id: station id + * @reserved: unused + * @wake_packet: wakeup packet + */ +struct iwl_wowlan_wake_pkt_notif { + __le32 wake_packet_length; + u8 station_id; + u8 reserved[3]; + u8 wake_packet[1]; +} __packed; /* WOWLAN_WAKE_PKT_NTFY_API_S_VER_1 */ + +/** + * struct iwl_mvm_d3_end_notif - d3 end notification + * @flags: See &enum iwl_d0i3_flags + */ +struct iwl_mvm_d3_end_notif { + __le32 flags; +} __packed; + /* TODO: NetDetect API */ #endif /* __iwl_fw_api_d3_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/datapath.h b/sys/contrib/dev/iwlwifi/fw/api/datapath.h index 43619acc29fd..751b596ea1a5 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/datapath.h +++ b/sys/contrib/dev/iwlwifi/fw/api/datapath.h @@ -1,406 +1,673 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_datapath_h__ #define __iwl_fw_api_datapath_h__ /** * enum iwl_data_path_subcmd_ids - data path group commands */ enum iwl_data_path_subcmd_ids { /** * @DQA_ENABLE_CMD: &struct iwl_dqa_enable_cmd */ DQA_ENABLE_CMD = 0x0, /** * @UPDATE_MU_GROUPS_CMD: &struct iwl_mu_group_mgmt_cmd */ UPDATE_MU_GROUPS_CMD = 0x1, /** * @TRIGGER_RX_QUEUES_NOTIF_CMD: &struct iwl_rxq_sync_cmd */ TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, /** - * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd + * @WNM_PLATFORM_PTM_REQUEST_CMD: &struct iwl_time_sync_cfg_cmd + */ + WNM_PLATFORM_PTM_REQUEST_CMD = 0x3, + + /** + * @WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD: + * &struct iwl_time_sync_cfg_cmd + */ + WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD = 0x4, + + /** + * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd_v1, + * &struct iwl_he_sta_context_cmd_v2 or + * &struct iwl_he_sta_context_cmd_v3 */ STA_HE_CTXT_CMD = 0x7, /** * @RLC_CONFIG_CMD: &struct iwl_rlc_config_cmd */ RLC_CONFIG_CMD = 0x8, /** * @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config */ RFH_QUEUE_CONFIG_CMD = 0xD, /** * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd_v4 */ TLC_MNG_CONFIG_CMD = 0xF, /** * @HE_AIR_SNIFFER_CONFIG_CMD: &struct iwl_he_monitor_cmd */ HE_AIR_SNIFFER_CONFIG_CMD = 0x13, /** * @CHEST_COLLECTOR_FILTER_CONFIG_CMD: Configure the CSI * matrix collection, uses &struct iwl_channel_estimation_cfg */ CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14, /** * @RX_BAID_ALLOCATION_CONFIG_CMD: Allocate/deallocate a BAID for an RX * blockack session, uses &struct iwl_rx_baid_cfg_cmd for the * command, and &struct iwl_rx_baid_cfg_resp as a response. */ RX_BAID_ALLOCATION_CONFIG_CMD = 0x16, /** * @SCD_QUEUE_CONFIG_CMD: new scheduler queue allocation/config/removal * command, uses &struct iwl_scd_queue_cfg_cmd and the response * is (same as before) &struct iwl_tx_queue_cfg_rsp. */ SCD_QUEUE_CONFIG_CMD = 0x17, + /** + * @SEC_KEY_CMD: security key command, uses &struct iwl_sec_key_cmd + */ + SEC_KEY_CMD = 0x18, + /** * @MONITOR_NOTIF: Datapath monitoring notification, using * &struct iwl_datapath_monitor_notif */ MONITOR_NOTIF = 0xF4, /** - * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data + * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data or &struct iwl_rx_no_data_ver_3 */ RX_NO_DATA_NOTIF = 0xF5, /** * @THERMAL_DUAL_CHAIN_DISABLE_REQ: firmware request for SMPS mode, * &struct iwl_thermal_dual_chain_request */ THERMAL_DUAL_CHAIN_REQUEST = 0xF6, /** * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif */ TLC_MNG_UPDATE_NOTIF = 0xF7, /** * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification */ STA_PM_NOTIF = 0xFD, /** * @MU_GROUP_MGMT_NOTIF: &struct iwl_mu_group_mgmt_notif */ MU_GROUP_MGMT_NOTIF = 0xFE, /** * @RX_QUEUES_NOTIFICATION: &struct iwl_rxq_sync_notification */ RX_QUEUES_NOTIFICATION = 0xFF, }; /** * struct iwl_mu_group_mgmt_cmd - VHT MU-MIMO group configuration * * @reserved: reserved * @membership_status: a bitmap of MU groups * @user_position:the position of station in a group. If the station is in the * group then bits (group * 2) is the position -1 */ struct iwl_mu_group_mgmt_cmd { __le32 reserved; __le32 membership_status[2]; __le32 user_position[4]; } __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */ /** * struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification * * @membership_status: a bitmap of MU groups * @user_position: the position of station in a group. If the station is in the * group then bits (group * 2) is the position -1 */ struct iwl_mu_group_mgmt_notif { __le32 membership_status[2]; __le32 user_position[4]; } __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */ enum iwl_channel_estimation_flags { IWL_CHANNEL_ESTIMATION_ENABLE = BIT(0), IWL_CHANNEL_ESTIMATION_TIMER = BIT(1), IWL_CHANNEL_ESTIMATION_COUNTER = BIT(2), }; +enum iwl_time_sync_protocol_type { + IWL_TIME_SYNC_PROTOCOL_TM = BIT(0), + IWL_TIME_SYNC_PROTOCOL_FTM = BIT(1), +}; /* WNM_TIMING_ENABLED_PROTOCOL_API_E_VER_1 */ + +/** + * struct iwl_time_sync_cfg_cmd - TM/FTM time sync measurement configuration + * + * @protocols: The type of frames to raise notifications for. A bitmap + * of @iwl_time_sync_protocol_type + * @peer_addr: peer address with which TM/FTM measurements are required + * @reserved: for alignment + */ +struct iwl_time_sync_cfg_cmd { + __le32 protocols; + u8 peer_addr[ETH_ALEN]; + u8 reserved[2]; +} __packed; /* WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD_API_S_VER_1 */ + +/** + * enum iwl_synced_time_operation - PTM request options + * + * @IWL_SYNCED_TIME_OPERATION_READ_ARTB: read only the ARTB time + * @IWL_SYNCED_TIME_OPERATION_READ_GP2: read only the GP2 time + * @IWL_SYNCED_TIME_OPERATION_READ_BOTH: latch the ARTB and GP2 clocks and + * provide timestamps from both clocks for the same time point + */ +enum iwl_synced_time_operation { + IWL_SYNCED_TIME_OPERATION_READ_ARTB = 1, + IWL_SYNCED_TIME_OPERATION_READ_GP2, + IWL_SYNCED_TIME_OPERATION_READ_BOTH, +}; + +/** + * struct iwl_synced_time_cmd - request synced GP2/ARTB timestamps + * + * @operation: one of &enum iwl_synced_time_operation + */ +struct iwl_synced_time_cmd { + __le32 operation; +} __packed; /* WNM_80211V_TIMING_CMD_API_S_VER_1 */ + +/** + * struct iwl_synced_time_rsp - response to iwl_synced_time_cmd + * + * @operation: one of &enum iwl_synced_time_operation + * @platform_timestamp_hi: high DWORD of the ARTB clock timestamp in nanoseconds + * @platform_timestamp_lo: low DWORD of the ARTB clock timestamp in nanoseconds + * @gp2_timestamp_hi: high DWORD of the GP2 clock timestamp in 10's of + * nanoseconds + * @gp2_timestamp_lo: low DWORD of the GP2 clock timestamp in 10's of + * nanoseconds + */ +struct iwl_synced_time_rsp { + __le32 operation; + __le32 platform_timestamp_hi; + __le32 platform_timestamp_lo; + __le32 gp2_timestamp_hi; + __le32 gp2_timestamp_lo; +} __packed; /* WNM_80211V_TIMING_RSP_API_S_VER_1 */ + +/* PTP_CTX_MAX_DATA_SIZE_IN_API_D_VER_1 */ +#define PTP_CTX_MAX_DATA_SIZE 128 + +/** + * struct iwl_time_msmt_ptp_ctx - Vendor specific information element + * to allow a space for flexibility for the userspace App + * + * @element_id: element id of vendor specific ie + * @length: length of vendor specific ie + * @reserved: for alignment + * @data: vendor specific data blob + */ +struct iwl_time_msmt_ptp_ctx { + /* Differentiate between FTM and TM specific Vendor IEs */ + union { + struct { + u8 element_id; + u8 length; + __le16 reserved; + u8 data[PTP_CTX_MAX_DATA_SIZE]; + } ftm; /* FTM specific vendor IE */ + struct { + u8 element_id; + u8 length; + u8 data[PTP_CTX_MAX_DATA_SIZE]; + } tm; /* TM specific vendor IE */ + }; +} __packed /* PTP_CTX_VER_1 */; + +/** + * struct iwl_time_msmt_notify - Time Sync measurement notification + * for TM/FTM, along with additional meta data. + * + * @peer_addr: peer address + * @reserved: for alignment + * @dialog_token: measurement flow dialog token number + * @followup_dialog_token: Measurement flow previous dialog token number + * @t1_hi: high dword of t1-time of the Tx'ed action frame departure on + * sender side in units of 10 nano seconds + * @t1_lo: low dword of t1-time of the Tx'ed action frame departure on + * sender side in units of 10 nano seconds + * @t1_max_err: maximum t1-time error in units of 10 nano seconds + * @t4_hi: high dword of t4-time of the Rx'ed action frame's Ack arrival on + * sender side in units of 10 nano seconds + * @t4_lo: low dword of t4-time of the Rx'ed action frame's Ack arrival on + * sender side in units of 10 nano seconds + * @t4_max_err: maximum t4-time error in units of 10 nano seconds + * @t2_hi: high dword of t2-time of the Rx'ed action frame arrival on + * receiver side in units of 10 nano seconds + * @t2_lo: low dword of t2-time of the Rx'ed action frame arrival on + * receiver side in units of 10 nano seconds + * @t2_max_err: maximum t2-time error in units of 10 nano seconds + * @t3_hi: high dword of t3-time of the Tx'ed action frame's Ack departure on + * receiver side in units of 10 nano seconds + * @t3_lo: low dword of t3-time of the Tx'ed action frame's Ack departure on + * receiver side in units of 10 nano seconds + * @t3_max_err: maximum t3-time error in units of 10 nano seconds + * @ptp: vendor specific information element + */ +struct iwl_time_msmt_notify { + u8 peer_addr[ETH_ALEN]; + u8 reserved[2]; + __le32 dialog_token; + __le32 followup_dialog_token; + __le32 t1_hi; + __le32 t1_lo; + __le32 t1_max_err; + __le32 t4_hi; + __le32 t4_lo; + __le32 t4_max_err; + __le32 t2_hi; + __le32 t2_lo; + __le32 t2_max_err; + __le32 t3_hi; + __le32 t3_lo; + __le32 t3_max_err; + struct iwl_time_msmt_ptp_ctx ptp; +} __packed; /* WNM_80211V_TIMING_MEASUREMENT_NTFY_API_S_VER_1 */ + +/** + * struct iwl_time_msmt_cfm_notify - Time Sync measurement confirmation + * notification for TM/FTM. Sent on receipt of 802.11 Ack from peer for the + * Tx'ed TM/FTM measurement action frame. + * + * @peer_addr: peer address + * @reserved: for alignment + * @dialog_token: measurement flow dialog token number + * @t1_hi: high dword of t1-time of the Tx'ed action frame departure on + * sender side in units of 10 nano seconds + * @t1_lo: low dword of t1-time of the Tx'ed action frame departure on + * sender side in units of 10 nano seconds + * @t1_max_err: maximum t1-time error in units of 10 nano seconds + * @t4_hi: high dword of t4-time of the Rx'ed action frame's Ack arrival on + * sender side in units of 10 nano seconds + * @t4_lo: low dword of t4-time of the Rx'ed action frame's Ack arrival on + * sender side in units of 10 nano seconds + * @t4_max_err: maximum t4-time error in units of 10 nano seconds + */ +struct iwl_time_msmt_cfm_notify { + u8 peer_addr[ETH_ALEN]; + u8 reserved[2]; + __le32 dialog_token; + __le32 t1_hi; + __le32 t1_lo; + __le32 t1_max_err; + __le32 t4_hi; + __le32 t4_lo; + __le32 t4_max_err; +} __packed; /* WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NTFY_API_S_VER_1 */ + /** * struct iwl_channel_estimation_cfg - channel estimation reporting config */ struct iwl_channel_estimation_cfg { /** * @flags: flags, see &enum iwl_channel_estimation_flags */ __le32 flags; /** * @timer: if enabled via flags, automatically disable after this many * microseconds */ __le32 timer; /** * @count: if enabled via flags, automatically disable after this many * frames with channel estimation matrix were captured */ __le32 count; /** * @rate_n_flags_mask: only try to record the channel estimation matrix * if the rate_n_flags value for the received frame (let's call * that rx_rnf) matches the mask/value given here like this: * (rx_rnf & rate_n_flags_mask) == rate_n_flags_val. */ __le32 rate_n_flags_mask; /** * @rate_n_flags_val: see @rate_n_flags_mask */ __le32 rate_n_flags_val; /** * @reserved: reserved (for alignment) */ __le32 reserved; /** * @frame_types: bitmap of frame types to capture, the received frame's * subtype|type takes 6 bits in the frame and the corresponding bit * in this field must be set to 1 to capture channel estimation for * that frame type. Set to all-ones to enable capturing for all * frame types. */ __le64 frame_types; } __packed; /* CHEST_COLLECTOR_FILTER_CMD_API_S_VER_1 */ enum iwl_datapath_monitor_notif_type { IWL_DP_MON_NOTIF_TYPE_EXT_CCA, }; struct iwl_datapath_monitor_notif { __le32 type; u8 mac_id; u8 reserved[3]; } __packed; /* MONITOR_NTF_API_S_VER_1 */ /** * enum iwl_thermal_dual_chain_req_events - firmware SMPS request event * @THERMAL_DUAL_CHAIN_REQ_ENABLE: (re-)enable dual-chain operation * (subject to other constraints) * @THERMAL_DUAL_CHAIN_REQ_DISABLE: disable dual-chain operation * (static SMPS) */ enum iwl_thermal_dual_chain_req_events { THERMAL_DUAL_CHAIN_REQ_ENABLE, THERMAL_DUAL_CHAIN_REQ_DISABLE, }; /* THERMAL_DUAL_CHAIN_DISABLE_STATE_API_E_VER_1 */ /** * struct iwl_thermal_dual_chain_request - SMPS request * @event: the type of request, see &enum iwl_thermal_dual_chain_req_events */ struct iwl_thermal_dual_chain_request { __le32 event; } __packed; /* THERMAL_DUAL_CHAIN_DISABLE_REQ_NTFY_API_S_VER_1 */ enum iwl_rlc_chain_info { IWL_RLC_CHAIN_INFO_DRIVER_FORCE = BIT(0), IWL_RLC_CHAIN_INFO_VALID = 0x000e, IWL_RLC_CHAIN_INFO_FORCE = 0x0070, IWL_RLC_CHAIN_INFO_FORCE_MIMO = 0x0380, IWL_RLC_CHAIN_INFO_COUNT = 0x0c00, IWL_RLC_CHAIN_INFO_MIMO_COUNT = 0x3000, }; /** * struct iwl_rlc_properties - RLC properties * @rx_chain_info: RX chain info, &enum iwl_rlc_chain_info * @reserved: reserved */ struct iwl_rlc_properties { __le32 rx_chain_info; __le32 reserved; } __packed; /* RLC_PROPERTIES_S_VER_1 */ enum iwl_sad_mode { IWL_SAD_MODE_ENABLED = BIT(0), IWL_SAD_MODE_DEFAULT_ANT_MSK = 0x6, IWL_SAD_MODE_DEFAULT_ANT_FW = 0x0, IWL_SAD_MODE_DEFAULT_ANT_A = 0x2, IWL_SAD_MODE_DEFAULT_ANT_B = 0x4, }; /** * struct iwl_sad_properties - SAD properties * @chain_a_sad_mode: chain A SAD mode, &enum iwl_sad_mode * @chain_b_sad_mode: chain B SAD mode, &enum iwl_sad_mode * @mac_id: MAC index * @reserved: reserved */ struct iwl_sad_properties { __le32 chain_a_sad_mode; __le32 chain_b_sad_mode; __le32 mac_id; __le32 reserved; } __packed; /** * struct iwl_rlc_config_cmd - RLC configuration * @phy_id: PHY index * @rlc: RLC properties, &struct iwl_rlc_properties * @sad: SAD (single antenna diversity) options, &struct iwl_sad_properties - * @flags: flags, &enum iwl_rlc_flags + * @flags: flags (unused) * @reserved: reserved */ struct iwl_rlc_config_cmd { __le32 phy_id; struct iwl_rlc_properties rlc; struct iwl_sad_properties sad; u8 flags; u8 reserved[3]; } __packed; /* RLC_CONFIG_CMD_API_S_VER_2 */ #define IWL_MAX_BAID_OLD 16 /* MAX_IMMEDIATE_BA_API_D_VER_2 */ #define IWL_MAX_BAID 32 /* MAX_IMMEDIATE_BA_API_D_VER_3 */ /** * enum iwl_rx_baid_action - BAID allocation/config action * @IWL_RX_BAID_ACTION_ADD: add a new BAID session * @IWL_RX_BAID_ACTION_MODIFY: modify the BAID session * @IWL_RX_BAID_ACTION_REMOVE: remove the BAID session */ enum iwl_rx_baid_action { IWL_RX_BAID_ACTION_ADD, IWL_RX_BAID_ACTION_MODIFY, IWL_RX_BAID_ACTION_REMOVE, }; /* RX_BAID_ALLOCATION_ACTION_E_VER_1 */ /** * struct iwl_rx_baid_cfg_cmd_alloc - BAID allocation data * @sta_id_mask: station ID mask * @tid: the TID for this session * @reserved: reserved * @ssn: the starting sequence number * @win_size: RX BA session window size */ struct iwl_rx_baid_cfg_cmd_alloc { __le32 sta_id_mask; u8 tid; u8 reserved[3]; __le16 ssn; __le16 win_size; } __packed; /* RX_BAID_ALLOCATION_ADD_CMD_API_S_VER_1 */ /** * struct iwl_rx_baid_cfg_cmd_modify - BAID modification data * @old_sta_id_mask: old station ID mask * @new_sta_id_mask: new station ID mask * @tid: TID of the BAID */ struct iwl_rx_baid_cfg_cmd_modify { __le32 old_sta_id_mask; __le32 new_sta_id_mask; __le32 tid; } __packed; /* RX_BAID_ALLOCATION_MODIFY_CMD_API_S_VER_2 */ /** * struct iwl_rx_baid_cfg_cmd_remove_v1 - BAID removal data * @baid: the BAID to remove */ struct iwl_rx_baid_cfg_cmd_remove_v1 { __le32 baid; } __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_1 */ /** * struct iwl_rx_baid_cfg_cmd_remove - BAID removal data * @sta_id_mask: the station mask of the BAID to remove * @tid: the TID of the BAID to remove */ struct iwl_rx_baid_cfg_cmd_remove { __le32 sta_id_mask; __le32 tid; } __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_2 */ /** * struct iwl_rx_baid_cfg_cmd - BAID allocation/config command * @action: the action, from &enum iwl_rx_baid_action */ struct iwl_rx_baid_cfg_cmd { __le32 action; union { struct iwl_rx_baid_cfg_cmd_alloc alloc; struct iwl_rx_baid_cfg_cmd_modify modify; struct iwl_rx_baid_cfg_cmd_remove_v1 remove_v1; struct iwl_rx_baid_cfg_cmd_remove remove; }; /* RX_BAID_ALLOCATION_OPERATION_API_U_VER_2 */ } __packed; /* RX_BAID_ALLOCATION_CONFIG_CMD_API_S_VER_2 */ /** * struct iwl_rx_baid_cfg_resp - BAID allocation response * @baid: the allocated BAID */ struct iwl_rx_baid_cfg_resp { __le32 baid; }; /* RX_BAID_ALLOCATION_RESPONSE_API_S_VER_1 */ /** * enum iwl_scd_queue_cfg_operation - scheduler queue operation * @IWL_SCD_QUEUE_ADD: allocate a new queue * @IWL_SCD_QUEUE_REMOVE: remove a queue * @IWL_SCD_QUEUE_MODIFY: modify a queue */ enum iwl_scd_queue_cfg_operation { IWL_SCD_QUEUE_ADD = 0, IWL_SCD_QUEUE_REMOVE = 1, IWL_SCD_QUEUE_MODIFY = 2, }; /** * struct iwl_scd_queue_cfg_cmd - scheduler queue allocation command * @operation: the operation, see &enum iwl_scd_queue_cfg_operation * @u.add.sta_mask: station mask * @u.add.tid: TID * @u.add.reserved: reserved * @u.add.flags: flags from &enum iwl_tx_queue_cfg_actions, except * %TX_QUEUE_CFG_ENABLE_QUEUE is not valid * @u.add.cb_size: size code * @u.add.bc_dram_addr: byte-count table IOVA * @u.add.tfdq_dram_addr: TFD queue IOVA - * @u.remove.queue: queue ID for removal - * @u.modify.sta_mask: new station mask for modify - * @u.modify.queue: queue ID to modify + * @u.remove.sta_mask: station mask of queue to remove + * @u.remove.tid: TID of queue to remove + * @u.modify.old_sta_mask: old station mask for modify + * @u.modify.tid: TID of queue to modify + * @u.modify.new_sta_mask: new station mask for modify */ struct iwl_scd_queue_cfg_cmd { __le32 operation; union { struct { __le32 sta_mask; u8 tid; u8 reserved[3]; __le32 flags; __le32 cb_size; __le64 bc_dram_addr; __le64 tfdq_dram_addr; } __packed add; /* TX_QUEUE_CFG_CMD_ADD_API_S_VER_1 */ struct { - __le32 queue; + __le32 sta_mask; + __le32 tid; } __packed remove; /* TX_QUEUE_CFG_CMD_REMOVE_API_S_VER_1 */ struct { - __le32 sta_mask; - __le32 queue; + __le32 old_sta_mask; + __le32 tid; + __le32 new_sta_mask; } __packed modify; /* TX_QUEUE_CFG_CMD_MODIFY_API_S_VER_1 */ } __packed u; /* TX_QUEUE_CFG_CMD_OPERATION_API_U_VER_1 */ } __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_3 */ +/** + * enum iwl_sec_key_flags - security key command key flags + * @IWL_SEC_KEY_FLAG_CIPHER_MASK: cipher mask + * @IWL_SEC_KEY_FLAG_CIPHER_WEP: WEP cipher + * @IWL_SEC_KEY_FLAG_CIPHER_CCMP: CCMP/CMAC cipher + * @IWL_SEC_KEY_FLAG_CIPHER_TKIP: TKIP cipher + * @IWL_SEC_KEY_FLAG_CIPHER_GCMP: GCMP/GMAC cipher + * @IWL_SEC_KEY_FLAG_NO_TX: don't install for TX + * @IWL_SEC_KEY_FLAG_KEY_SIZE: large key size (WEP-104, GCMP-256, GMAC-256) + * @IWL_SEC_KEY_FLAG_MFP: MFP is in used for this key + * @IWL_SEC_KEY_FLAG_MCAST_KEY: this is a multicast key + * @IWL_SEC_KEY_FLAG_SPP_AMSDU: SPP A-MSDU should be used + */ +enum iwl_sec_key_flags { + IWL_SEC_KEY_FLAG_CIPHER_MASK = 0x07, + IWL_SEC_KEY_FLAG_CIPHER_WEP = 0x01, + IWL_SEC_KEY_FLAG_CIPHER_CCMP = 0x02, + IWL_SEC_KEY_FLAG_CIPHER_TKIP = 0x03, + IWL_SEC_KEY_FLAG_CIPHER_GCMP = 0x05, + IWL_SEC_KEY_FLAG_NO_TX = 0x08, + IWL_SEC_KEY_FLAG_KEY_SIZE = 0x10, + IWL_SEC_KEY_FLAG_MFP = 0x20, + IWL_SEC_KEY_FLAG_MCAST_KEY = 0x40, + IWL_SEC_KEY_FLAG_SPP_AMSDU = 0x80, +}; + +#define IWL_SEC_WEP_KEY_OFFSET 3 + +/** + * struct iwl_sec_key_cmd - security key command + * @action: action from &enum iwl_ctxt_action + * @u.add.sta_mask: station mask for the new key + * @u.add.key_id: key ID (0-7) for the new key + * @u.add.key_flags: key flags per &enum iwl_sec_key_flags + * @u.add.key: key material. WEP keys should start from &IWL_SEC_WEP_KEY_OFFSET. + * @u.add.tkip_mic_rx_key: TKIP MIC RX key + * @u.add.tkip_mic_tx_key: TKIP MIC TX key + * @u.add.rx_seq: RX sequence counter value + * @u.add.tx_seq: TX sequence counter value + * @u.modify.old_sta_mask: old station mask + * @u.modify.new_sta_mask: new station mask + * @u.modify.key_id: key ID + * @u.modify.key_flags: new key flags + * @u.remove.sta_mask: station mask + * @u.remove.key_id: key ID + * @u.remove.key_flags: key flags + */ +struct iwl_sec_key_cmd { + __le32 action; + union { + struct { + __le32 sta_mask; + __le32 key_id; + __le32 key_flags; + u8 key[32]; + u8 tkip_mic_rx_key[8]; + u8 tkip_mic_tx_key[8]; + __le64 rx_seq; + __le64 tx_seq; + } __packed add; /* SEC_KEY_ADD_CMD_API_S_VER_1 */ + struct { + __le32 old_sta_mask; + __le32 new_sta_mask; + __le32 key_id; + __le32 key_flags; + } __packed modify; /* SEC_KEY_MODIFY_CMD_API_S_VER_1 */ + struct { + __le32 sta_mask; + __le32 key_id; + __le32 key_flags; + } __packed remove; /* SEC_KEY_REMOVE_CMD_API_S_VER_1 */ + } __packed u; /* SEC_KEY_OPERATION_API_U_VER_1 */ +} __packed; /* SEC_KEY_CMD_API_S_VER_1 */ + #endif /* __iwl_fw_api_datapath_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/dbg-tlv.h b/sys/contrib/dev/iwlwifi/fw/api/dbg-tlv.h index 52bf96585fc6..ba538d70985f 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/dbg-tlv.h +++ b/sys/contrib/dev/iwlwifi/fw/api/dbg-tlv.h @@ -1,528 +1,528 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2018-2022 Intel Corporation */ #ifndef __iwl_fw_dbg_tlv_h__ #define __iwl_fw_dbg_tlv_h__ #include #define IWL_FW_INI_MAX_REGION_ID 64 #define IWL_FW_INI_MAX_NAME 32 #define IWL_FW_INI_MAX_CFG_NAME 64 #define IWL_FW_INI_DOMAIN_ALWAYS_ON 0 #define IWL_FW_INI_REGION_ID_MASK GENMASK(15, 0) #define IWL_FW_INI_REGION_DUMP_POLICY_MASK GENMASK(31, 16) /** * struct iwl_fw_ini_hcmd * * @id: the debug configuration command type for instance: 0xf6 / 0xf5 / DHC * @group: the desired cmd group * @reserved: to align to FW struct * @data: all of the relevant command data to be sent */ struct iwl_fw_ini_hcmd { u8 id; u8 group; __le16 reserved; - u8 data[0]; + u8 data[]; } __packed; /* FW_DEBUG_TLV_HCMD_DATA_API_S_VER_1 */ /** * struct iwl_fw_ini_header - Common Header for all ini debug TLV's structures * * @version: TLV version * @domain: domain of the TLV. One of &enum iwl_fw_ini_dbg_domain */ struct iwl_fw_ini_header { __le32 version; __le32 domain; /* followed by the data */ } __packed; /* FW_TLV_DEBUG_HEADER_S_VER_1 */ /** * struct iwl_fw_ini_region_dev_addr - Configuration to read device addresses * * @size: size of each memory chunk * @offset: offset to add to the base address of each chunk */ struct iwl_fw_ini_region_dev_addr { __le32 size; __le32 offset; } __packed; /* FW_TLV_DEBUG_DEVICE_ADDR_API_S_VER_1 */ /** * struct iwl_fw_ini_region_fifos - Configuration to read Tx/Rx fifos * * @fid: fifos ids array. Used to determine what fifos to collect * @hdr_only: if non zero, collect only the registers * @offset: offset to add to the registers addresses */ struct iwl_fw_ini_region_fifos { __le32 fid[2]; __le32 hdr_only; __le32 offset; } __packed; /* FW_TLV_DEBUG_REGION_FIFOS_API_S_VER_1 */ /** * struct iwl_fw_ini_region_err_table - error table region data * * Configuration to read Umac/Lmac error table * * @version: version of the error table * @base_addr: base address of the error table * @size: size of the error table * @offset: offset to add to &base_addr */ struct iwl_fw_ini_region_err_table { __le32 version; __le32 base_addr; __le32 size; __le32 offset; } __packed; /* FW_TLV_DEBUG_REGION_ERROR_TABLE_API_S_VER_1 */ /** * struct iwl_fw_ini_region_special_device_memory - special device memory * * Configuration to read a special memory * * @type: type of the special memory * @version: version of the special memory * @base_addr: base address of the error table * @size: size of the error table * @offset: offset to add to &base_addr */ struct iwl_fw_ini_region_special_device_memory { __le16 type; __le16 version; __le32 base_addr; __le32 size; __le32 offset; } __packed; /* FW_TLV_DEBUG_REGION_SPECIAL_DEVICE_ADDR_API_S_VER_1 */ /** * struct iwl_fw_ini_region_internal_buffer - internal buffer region data * * Configuration to read internal monitor buffer * * @alloc_id: allocation id one of &enum iwl_fw_ini_allocation_id * @base_addr: internal buffer base address * @size: size internal buffer size */ struct iwl_fw_ini_region_internal_buffer { __le32 alloc_id; __le32 base_addr; __le32 size; } __packed; /* FW_TLV_DEBUG_REGION_INTERNAL_BUFFER_API_S_VER_1 */ /** * struct iwl_fw_ini_region_tlv - region TLV * * Configures parameters for region data collection * * @hdr: debug header * @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID * @type: region type. One of &enum iwl_fw_ini_region_type * @sub_type: region sub type * @sub_type_ver: region sub type version * @reserved: not in use * @name: region name * @dev_addr: device address configuration. Used by * &IWL_FW_INI_REGION_DEVICE_MEMORY, &IWL_FW_INI_REGION_PERIPHERY_MAC, * &IWL_FW_INI_REGION_PERIPHERY_PHY, &IWL_FW_INI_REGION_PERIPHERY_AUX, * &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR, * &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG * &IWL_FW_INI_REGION_DBGI_SRAM, &FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM, * @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and * &IWL_FW_INI_REGION_RXF * @err_table: error table configuration. Used by * IWL_FW_INI_REGION_LMAC_ERROR_TABLE and * IWL_FW_INI_REGION_UMAC_ERROR_TABLE * @internal_buffer: internal monitor buffer configuration. Used by * &IWL_FW_INI_REGION_INTERNAL_BUFFER * @dram_alloc_id: dram allocation id. One of &enum iwl_fw_ini_allocation_id. * Used by &IWL_FW_INI_REGION_DRAM_BUFFER * @tlv_mask: tlv collection mask. Used by &IWL_FW_INI_REGION_TLV * @addrs: array of addresses attached to the end of the region tlv */ struct iwl_fw_ini_region_tlv { struct iwl_fw_ini_header hdr; __le32 id; u8 type; u8 sub_type; u8 sub_type_ver; u8 reserved; u8 name[IWL_FW_INI_MAX_NAME]; union { struct iwl_fw_ini_region_dev_addr dev_addr; struct iwl_fw_ini_region_fifos fifos; struct iwl_fw_ini_region_err_table err_table; struct iwl_fw_ini_region_internal_buffer internal_buffer; struct iwl_fw_ini_region_special_device_memory special_mem; __le32 dram_alloc_id; __le32 tlv_mask; }; /* FW_TLV_DEBUG_REGION_CONF_PARAMS_API_U_VER_1 */ __le32 addrs[]; } __packed; /* FW_TLV_DEBUG_REGION_API_S_VER_1 */ /** * struct iwl_fw_ini_debug_info_tlv * * debug configuration name for a specific image * * @hdr: debug header * @image_type: image type * @debug_cfg_name: debug configuration name */ struct iwl_fw_ini_debug_info_tlv { struct iwl_fw_ini_header hdr; __le32 image_type; u8 debug_cfg_name[IWL_FW_INI_MAX_CFG_NAME]; } __packed; /* FW_TLV_DEBUG_INFO_API_S_VER_1 */ /** * struct iwl_fw_ini_allocation_tlv - Allocates DRAM buffers * * @hdr: debug header * @alloc_id: allocation id. One of &enum iwl_fw_ini_allocation_id * @buf_location: buffer location. One of &enum iwl_fw_ini_buffer_location * @req_size: requested buffer size * @max_frags_num: maximum number of fragments * @min_size: minimum buffer size */ struct iwl_fw_ini_allocation_tlv { struct iwl_fw_ini_header hdr; __le32 alloc_id; __le32 buf_location; __le32 req_size; __le32 max_frags_num; __le32 min_size; } __packed; /* FW_TLV_DEBUG_BUFFER_ALLOCATION_API_S_VER_1 */ /** * struct iwl_fw_ini_trigger_tlv - trigger TLV * * Trigger that upon firing, determines what regions to collect * * @hdr: debug header * @time_point: time point. One of &enum iwl_fw_ini_time_point * @trigger_reason: trigger reason * @apply_policy: uses &enum iwl_fw_ini_trigger_apply_policy * @dump_delay: delay from trigger fire to dump, in usec * @occurrences: max trigger fire occurrences allowed * @reserved: unused * @ignore_consec: ignore consecutive triggers, in usec * @reset_fw: if non zero, will reset and reload the FW * @multi_dut: initiate debug dump data on several DUTs * @regions_mask: mask of regions to collect * @data: trigger data */ struct iwl_fw_ini_trigger_tlv { struct iwl_fw_ini_header hdr; __le32 time_point; __le32 trigger_reason; __le32 apply_policy; __le32 dump_delay; __le32 occurrences; __le32 reserved; __le32 ignore_consec; __le32 reset_fw; __le32 multi_dut; __le64 regions_mask; __le32 data[]; } __packed; /* FW_TLV_DEBUG_TRIGGER_API_S_VER_1 */ /** * struct iwl_fw_ini_hcmd_tlv - Generic Host command pass through TLV * * @hdr: debug header * @time_point: time point. One of &enum iwl_fw_ini_time_point * @period_msec: interval at which the hcmd will be sent to the FW. * Measured in msec (0 = one time command) * @hcmd: a variable length host-command to be sent to apply the configuration */ struct iwl_fw_ini_hcmd_tlv { struct iwl_fw_ini_header hdr; __le32 time_point; __le32 period_msec; struct iwl_fw_ini_hcmd hcmd; } __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */ /** * struct iwl_fw_ini_addr_val - Address and value to set it to * * @address: the base address * @value: value to set at address */ struct iwl_fw_ini_addr_val { __le32 address; __le32 value; } __packed; /* FW_TLV_DEBUG_ADDR_VALUE_VER_1 */ /** * struct iwl_fw_ini_conf_tlv - configuration TLV to set register/memory. * * @hdr: debug header * @time_point: time point to apply config. One of &enum iwl_fw_ini_time_point * @set_type: write access type preset token for time point. * one of &enum iwl_fw_ini_config_set_type * @addr_offset: the offset to add to any item in address[0] field * @addr_val: address value pair */ struct iwl_fw_ini_conf_set_tlv { struct iwl_fw_ini_header hdr; __le32 time_point; __le32 set_type; __le32 addr_offset; - struct iwl_fw_ini_addr_val addr_val[0]; + struct iwl_fw_ini_addr_val addr_val[]; } __packed; /* FW_TLV_DEBUG_CONFIG_SET_API_S_VER_1 */ /** * enum iwl_fw_ini_config_set_type * * @IWL_FW_INI_CONFIG_SET_TYPE_INVALID: invalid config set * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC: for PERIPHERY MAC configuration * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_PHY: for PERIPHERY PHY configuration * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_AUX: for PERIPHERY AUX configuration * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY: for DEVICE MEMORY configuration * @IWL_FW_INI_CONFIG_SET_TYPE_CSR: for CSR configuration * @IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: for DBGC_DRAM_ADDR configuration * @IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: for PERIPH SCRATCH HWM configuration * @IWL_FW_INI_ALLOCATION_NUM: max number of configuration supported */ enum iwl_fw_ini_config_set_type { IWL_FW_INI_CONFIG_SET_TYPE_INVALID = 0, IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC, IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_PHY, IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_AUX, IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY, IWL_FW_INI_CONFIG_SET_TYPE_CSR, IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR, IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM, IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM, } __packed; /** * enum iwl_fw_ini_allocation_id * * @IWL_FW_INI_ALLOCATION_INVALID: invalid * @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration * @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration * @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration * @IWL_FW_INI_ALLOCATION_ID_DBGC4: allocation meant for DBGC4 configuration * @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids */ enum iwl_fw_ini_allocation_id { IWL_FW_INI_ALLOCATION_INVALID, IWL_FW_INI_ALLOCATION_ID_DBGC1, IWL_FW_INI_ALLOCATION_ID_DBGC2, IWL_FW_INI_ALLOCATION_ID_DBGC3, IWL_FW_INI_ALLOCATION_ID_DBGC4, IWL_FW_INI_ALLOCATION_NUM, }; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */ /** * enum iwl_fw_ini_buffer_location * * @IWL_FW_INI_LOCATION_INVALID: invalid * @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location * @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location * @IWL_FW_INI_LOCATION_NPK_PATH: NPK location */ enum iwl_fw_ini_buffer_location { IWL_FW_INI_LOCATION_INVALID, IWL_FW_INI_LOCATION_SRAM_PATH, IWL_FW_INI_LOCATION_DRAM_PATH, IWL_FW_INI_LOCATION_NPK_PATH, IWL_FW_INI_LOCATION_NUM, }; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */ /** * enum iwl_fw_ini_region_type * * @IWL_FW_INI_REGION_INVALID: invalid * @IWL_FW_INI_REGION_TLV: uCode and debug TLVs * @IWL_FW_INI_REGION_INTERNAL_BUFFER: monitor SMEM buffer * @IWL_FW_INI_REGION_DRAM_BUFFER: monitor DRAM buffer * @IWL_FW_INI_REGION_TXF: TX fifos * @IWL_FW_INI_REGION_RXF: RX fifo * @IWL_FW_INI_REGION_LMAC_ERROR_TABLE: lmac error table * @IWL_FW_INI_REGION_UMAC_ERROR_TABLE: umac error table * @IWL_FW_INI_REGION_RSP_OR_NOTIF: FW response or notification data * @IWL_FW_INI_REGION_DEVICE_MEMORY: device internal memory * @IWL_FW_INI_REGION_PERIPHERY_MAC: periphery registers of MAC * @IWL_FW_INI_REGION_PERIPHERY_PHY: periphery registers of PHY * @IWL_FW_INI_REGION_PERIPHERY_AUX: periphery registers of AUX * @IWL_FW_INI_REGION_PAGING: paging memory * @IWL_FW_INI_REGION_CSR: CSR registers * @IWL_FW_INI_REGION_DRAM_IMR: IMR memory * @IWL_FW_INI_REGION_PCI_IOSF_CONFIG: PCI/IOSF config * @IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY: special device memory * @IWL_FW_INI_REGION_DBGI_SRAM: periphery registers of DBGI SRAM * @IWL_FW_INI_REGION_NUM: number of region types */ enum iwl_fw_ini_region_type { IWL_FW_INI_REGION_INVALID, IWL_FW_INI_REGION_TLV, IWL_FW_INI_REGION_INTERNAL_BUFFER, IWL_FW_INI_REGION_DRAM_BUFFER, IWL_FW_INI_REGION_TXF, IWL_FW_INI_REGION_RXF, IWL_FW_INI_REGION_LMAC_ERROR_TABLE, IWL_FW_INI_REGION_UMAC_ERROR_TABLE, IWL_FW_INI_REGION_RSP_OR_NOTIF, IWL_FW_INI_REGION_DEVICE_MEMORY, IWL_FW_INI_REGION_PERIPHERY_MAC, IWL_FW_INI_REGION_PERIPHERY_PHY, IWL_FW_INI_REGION_PERIPHERY_AUX, IWL_FW_INI_REGION_PAGING, IWL_FW_INI_REGION_CSR, IWL_FW_INI_REGION_DRAM_IMR, IWL_FW_INI_REGION_PCI_IOSF_CONFIG, IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY, IWL_FW_INI_REGION_DBGI_SRAM, IWL_FW_INI_REGION_NUM }; /* FW_TLV_DEBUG_REGION_TYPE_API_E */ enum iwl_fw_ini_region_device_memory_subtype { IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM = 1, IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE = 5, IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE = 7, IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE = 10, IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE = 14, IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE = 16, IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE = 18, IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE = 20, }; /* FW_TLV_DEBUG_REGION_DEVICE_MEMORY_SUBTYPE_API_E */ /** * enum iwl_fw_ini_time_point * * Hard coded time points in which the driver can send hcmd or perform dump * collection * * @IWL_FW_INI_TIME_POINT_EARLY: pre loading the FW * @IWL_FW_INI_TIME_POINT_AFTER_ALIVE: first cmd from host after alive notif * @IWL_FW_INI_TIME_POINT_POST_INIT: last cmd in series of init sequence * @IWL_FW_INI_TIME_POINT_FW_ASSERT: FW assert * @IWL_FW_INI_TIME_POINT_FW_HW_ERROR: FW HW error * @IWL_FW_INI_TIME_POINT_FW_TFD_Q_HANG: TFD queue hang * @IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION: DHC cmd response and notif * @IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF: FW response or notification. * data field holds id and group * @IWL_FW_INI_TIME_POINT_USER_TRIGGER: user trigger time point * @IWL_FW_INI_TIME_POINT_PERIODIC: periodic timepoint that fires in constant * intervals. data field holds the interval time in msec * @IWL_FW_INI_TIME_POINT_RESERVED: reserved * @IWL_FW_INI_TIME_POINT_HOST_ASSERT: Unused * @IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT: alive timeout * @IWL_FW_INI_TIME_POINT_HOST_DEVICE_ENABLE: device enable * @IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE: device disable * @IWL_FW_INI_TIME_POINT_HOST_D3_START: D3 start * @IWL_FW_INI_TIME_POINT_HOST_D3_END: D3 end * @IWL_FW_INI_TIME_POINT_MISSED_BEACONS: missed beacons * @IWL_FW_INI_TIME_POINT_ASSOC_FAILED: association failure * @IWL_FW_INI_TIME_POINT_TX_FAILED: Tx frame failed * @IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED: wifi direct action * frame failed * @IWL_FW_INI_TIME_POINT_TX_LATENCY_THRESHOLD: Tx latency threshold * @IWL_FW_INI_TIME_POINT_HANG_OCCURRED: hang occurred * @IWL_FW_INI_TIME_POINT_EAPOL_FAILED: EAPOL failed * @IWL_FW_INI_TIME_POINT_FAKE_TX: fake Tx * @IWL_FW_INI_TIME_POINT_DEASSOC: de association * @IWL_FW_INI_TIME_POINT_NUM: number of time points */ enum iwl_fw_ini_time_point { IWL_FW_INI_TIME_POINT_INVALID, IWL_FW_INI_TIME_POINT_EARLY, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, IWL_FW_INI_TIME_POINT_POST_INIT, IWL_FW_INI_TIME_POINT_FW_ASSERT, IWL_FW_INI_TIME_POINT_FW_HW_ERROR, IWL_FW_INI_TIME_POINT_FW_TFD_Q_HANG, IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION, IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF, IWL_FW_INI_TIME_POINT_USER_TRIGGER, IWL_FW_INI_TIME_POINT_PERIODIC, IWL_FW_INI_TIME_POINT_RESERVED, IWL_FW_INI_TIME_POINT_HOST_ASSERT, IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT, IWL_FW_INI_TIME_POINT_HOST_DEVICE_ENABLE, IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE, IWL_FW_INI_TIME_POINT_HOST_D3_START, IWL_FW_INI_TIME_POINT_HOST_D3_END, IWL_FW_INI_TIME_POINT_MISSED_BEACONS, IWL_FW_INI_TIME_POINT_ASSOC_FAILED, IWL_FW_INI_TIME_POINT_TX_FAILED, IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED, IWL_FW_INI_TIME_POINT_TX_LATENCY_THRESHOLD, IWL_FW_INI_TIME_POINT_HANG_OCCURRED, IWL_FW_INI_TIME_POINT_EAPOL_FAILED, IWL_FW_INI_TIME_POINT_FAKE_TX, IWL_FW_INI_TIME_POINT_DEASSOC, IWL_FW_INI_TIME_POINT_NUM, }; /* FW_TLV_DEBUG_TIME_POINT_API_E */ /** * enum iwl_fw_ini_trigger_apply_policy - Determines how to apply triggers * * @IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT: match by time point * @IWL_FW_INI_APPLY_POLICY_MATCH_DATA: match by trigger data * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS: override regions mask. * Append otherwise * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG: override trigger configuration * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data. * Append otherwise * @IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD: send cmd once dump collected */ enum iwl_fw_ini_trigger_apply_policy { IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0), IWL_FW_INI_APPLY_POLICY_MATCH_DATA = BIT(1), IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS = BIT(8), IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9), IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10), IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD = BIT(16), }; /** * enum iwl_fw_ini_trigger_reset_fw_policy - Determines how to handle reset * * @IWL_FW_INI_RESET_FW_MODE_NOTHING: do not stop FW and reload (default) * @IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY: stop FW without reload FW * @IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW: stop FW with reload FW */ enum iwl_fw_ini_trigger_reset_fw_policy { IWL_FW_INI_RESET_FW_MODE_NOTHING = 0, IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY, IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW }; /** * enum iwl_fw_ini_dump_policy - Determines how to handle dump based on enabled flags * * @IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT: OS has no limit of dump size * @IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB: mini dump only 600KB region dump * @IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB: mini dump 5MB size dump */ enum iwl_fw_ini_dump_policy { IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT = BIT(0), IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB = BIT(1), IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB = BIT(2), }; /** * enum iwl_fw_ini_dump_type - Determines dump type based on size defined by FW. * * @IWL_FW_INI_DUMP_BRIEF : only dump the most important regions * @IWL_FW_INI_DEBUG_MEDIUM: dump more regions than "brief", but not all regions * @IWL_FW_INI_DUMP_VERBOSE : dump all regions */ enum iwl_fw_ini_dump_type { IWL_FW_INI_DUMP_BRIEF, IWL_FW_INI_DUMP_MEDIUM, IWL_FW_INI_DUMP_VERBOSE, }; #endif diff --git a/sys/contrib/dev/iwlwifi/fw/api/debug.h b/sys/contrib/dev/iwlwifi/fw/api/debug.h index 6255257ddebe..8fef38139bf6 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/debug.h +++ b/sys/contrib/dev/iwlwifi/fw/api/debug.h @@ -1,424 +1,520 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2005-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_debug_h__ #define __iwl_fw_api_debug_h__ /** * enum iwl_debug_cmds - debug commands */ enum iwl_debug_cmds { /** * @LMAC_RD_WR: * LMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and * &struct iwl_dbg_mem_access_rsp */ LMAC_RD_WR = 0x0, /** * @UMAC_RD_WR: * UMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and * &struct iwl_dbg_mem_access_rsp */ UMAC_RD_WR = 0x1, /** * @HOST_EVENT_CFG: * updates the enabled event severities * &struct iwl_dbg_host_event_cfg_cmd */ HOST_EVENT_CFG = 0x3, /** * @DBGC_SUSPEND_RESUME: * DBGC suspend/resume commad. Uses a single dword as data: * 0 - resume DBGC recording * 1 - suspend DBGC recording */ DBGC_SUSPEND_RESUME = 0x7, /** * @BUFFER_ALLOCATION: * passes DRAM buffers to a DBGC * &struct iwl_buf_alloc_cmd */ BUFFER_ALLOCATION = 0x8, + /** + * @GET_TAS_STATUS: + * sends command to fw to get TAS status + * the response is &struct iwl_mvm_tas_status_resp + */ + GET_TAS_STATUS = 0xA, /** * @FW_DUMP_COMPLETE_CMD: * sends command to fw once dump collection completed * &struct iwl_dbg_dump_complete_cmd */ FW_DUMP_COMPLETE_CMD = 0xB, /** * @MFU_ASSERT_DUMP_NTF: * &struct iwl_mfu_assert_dump_notif */ MFU_ASSERT_DUMP_NTF = 0xFE, }; /* Error response/notification */ enum { FW_ERR_UNKNOWN_CMD = 0x0, FW_ERR_INVALID_CMD_PARAM = 0x1, FW_ERR_SERVICE = 0x2, FW_ERR_ARC_MEMORY = 0x3, FW_ERR_ARC_CODE = 0x4, FW_ERR_WATCH_DOG = 0x5, FW_ERR_WEP_GRP_KEY_INDX = 0x10, FW_ERR_WEP_KEY_SIZE = 0x11, FW_ERR_OBSOLETE_FUNC = 0x12, FW_ERR_UNEXPECTED = 0xFE, FW_ERR_FATAL = 0xFF }; /** enum iwl_dbg_suspend_resume_cmds - dbgc suspend resume operations * dbgc suspend resume command operations * @DBGC_RESUME_CMD: resume dbgc recording * @DBGC_SUSPEND_CMD: stop dbgc recording */ enum iwl_dbg_suspend_resume_cmds { DBGC_RESUME_CMD, DBGC_SUSPEND_CMD, }; /** * struct iwl_error_resp - FW error indication * ( REPLY_ERROR = 0x2 ) * @error_type: one of FW_ERR_* * @cmd_id: the command ID for which the error occurred * @reserved1: reserved * @bad_cmd_seq_num: sequence number of the erroneous command * @error_service: which service created the error, applicable only if * error_type = 2, otherwise 0 * @timestamp: TSF in usecs. */ struct iwl_error_resp { __le32 error_type; u8 cmd_id; u8 reserved1; __le16 bad_cmd_seq_num; __le32 error_service; __le64 timestamp; } __packed; #define TX_FIFO_MAX_NUM_9000 8 #define TX_FIFO_MAX_NUM 15 #define RX_FIFO_MAX_NUM 2 #define TX_FIFO_INTERNAL_MAX_NUM 6 /** * struct iwl_shared_mem_cfg_v2 - Shared memory configuration information * * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not * accessible) * @shared_mem_size: shared memory size * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to * 0x0 as accessible only via DBGM RDAT) * @sample_buff_size: internal sample buff size * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre * 8000 HW set to 0x0 as not accessible) * @txfifo_size: size of TXF0 ... TXF7 * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0 * @page_buff_addr: used by UMAC and performance debug (page miss analysis), * when paging is not supported this should be 0 * @page_buff_size: size of %page_buff_addr * @rxfifo_addr: Start address of rxFifo * @internal_txfifo_addr: start address of internalFifo * @internal_txfifo_size: internal fifos' size * * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG * set, the last 3 members don't exist. */ struct iwl_shared_mem_cfg_v2 { __le32 shared_mem_addr; __le32 shared_mem_size; __le32 sample_buff_addr; __le32 sample_buff_size; __le32 txfifo_addr; __le32 txfifo_size[TX_FIFO_MAX_NUM_9000]; __le32 rxfifo_size[RX_FIFO_MAX_NUM]; __le32 page_buff_addr; __le32 page_buff_size; __le32 rxfifo_addr; __le32 internal_txfifo_addr; __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; } __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ /** * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration * * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB) * @txfifo_size: size of TX FIFOs * @rxfifo1_addr: RXF1 addr * @rxfifo1_size: RXF1 size */ struct iwl_shared_mem_lmac_cfg { __le32 txfifo_addr; __le32 txfifo_size[TX_FIFO_MAX_NUM]; __le32 rxfifo1_addr; __le32 rxfifo1_size; } __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */ /** * struct iwl_shared_mem_cfg - Shared memory configuration information * * @shared_mem_addr: shared memory address * @shared_mem_size: shared memory size * @sample_buff_addr: internal sample (mon/adc) buff addr * @sample_buff_size: internal sample buff size * @rxfifo2_addr: start addr of RXF2 * @rxfifo2_size: size of RXF2 * @page_buff_addr: used by UMAC and performance debug (page miss analysis), * when paging is not supported this should be 0 * @page_buff_size: size of %page_buff_addr * @lmac_num: number of LMACs (1 or 2) * @lmac_smem: per - LMAC smem data * @rxfifo2_control_addr: start addr of RXF2C * @rxfifo2_control_size: size of RXF2C */ struct iwl_shared_mem_cfg { __le32 shared_mem_addr; __le32 shared_mem_size; __le32 sample_buff_addr; __le32 sample_buff_size; __le32 rxfifo2_addr; __le32 rxfifo2_size; __le32 page_buff_addr; __le32 page_buff_size; __le32 lmac_num; struct iwl_shared_mem_lmac_cfg lmac_smem[3]; __le32 rxfifo2_control_addr; __le32 rxfifo2_control_size; } __packed; /* SHARED_MEM_ALLOC_API_S_VER_4 */ /** * struct iwl_mfuart_load_notif_v1 - mfuart image version & status * ( MFUART_LOAD_NOTIFICATION = 0xb1 ) * @installed_ver: installed image version * @external_ver: external image version * @status: MFUART loading status * @duration: MFUART loading time */ struct iwl_mfuart_load_notif_v1 { __le32 installed_ver; __le32 external_ver; __le32 status; __le32 duration; } __packed; /* MFU_LOADER_NTFY_API_S_VER_1 */ /** * struct iwl_mfuart_load_notif - mfuart image version & status * ( MFUART_LOAD_NOTIFICATION = 0xb1 ) * @installed_ver: installed image version * @external_ver: external image version * @status: MFUART loading status * @duration: MFUART loading time * @image_size: MFUART image size in bytes */ struct iwl_mfuart_load_notif { __le32 installed_ver; __le32 external_ver; __le32 status; __le32 duration; /* image size valid only in v2 of the command */ __le32 image_size; } __packed; /* MFU_LOADER_NTFY_API_S_VER_2 */ /** * struct iwl_mfu_assert_dump_notif - mfuart dump logs * ( MFU_ASSERT_DUMP_NTF = 0xfe ) * @assert_id: mfuart assert id that cause the notif * @curr_reset_num: number of asserts since uptime * @index_num: current chunk id * @parts_num: total number of chunks * @data_size: number of data bytes sent * @data: data buffer */ struct iwl_mfu_assert_dump_notif { __le32 assert_id; __le32 curr_reset_num; __le16 index_num; __le16 parts_num; __le32 data_size; - __le32 data[0]; + __le32 data[]; } __packed; /* MFU_DUMP_ASSERT_API_S_VER_1 */ /** * enum iwl_mvm_marker_id - marker ids * * The ids for different type of markers to insert into the usniffer logs * * @MARKER_ID_TX_FRAME_LATENCY: TX latency marker * @MARKER_ID_SYNC_CLOCK: sync FW time and systime */ enum iwl_mvm_marker_id { MARKER_ID_TX_FRAME_LATENCY = 1, MARKER_ID_SYNC_CLOCK = 2, }; /* MARKER_ID_API_E_VER_2 */ /** * struct iwl_mvm_marker - mark info into the usniffer logs * * (MARKER_CMD = 0xcb) * * Mark the UTC time stamp into the usniffer logs together with additional * metadata, so the usniffer output can be parsed. * In the command response the ucode will return the GP2 time. * * @dw_len: The amount of dwords following this byte including this byte. * @marker_id: A unique marker id (iwl_mvm_marker_id). * @reserved: reserved. * @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC * @metadata: additional meta data that will be written to the unsiffer log */ struct iwl_mvm_marker { u8 dw_len; u8 marker_id; __le16 reserved; __le64 timestamp; - __le32 metadata[0]; + __le32 metadata[]; } __packed; /* MARKER_API_S_VER_1 */ /** * struct iwl_mvm_marker_rsp - Response to marker cmd * * @gp2: The gp2 clock value in the FW */ struct iwl_mvm_marker_rsp { __le32 gp2; } __packed; /* Operation types for the debug mem access */ enum { DEBUG_MEM_OP_READ = 0, DEBUG_MEM_OP_WRITE = 1, DEBUG_MEM_OP_WRITE_BYTES = 2, }; #define DEBUG_MEM_MAX_SIZE_DWORDS 32 /** * struct iwl_dbg_mem_access_cmd - Request the device to read/write memory * @op: DEBUG_MEM_OP_* * @addr: address to read/write from/to * @len: in dwords, to read/write * @data: for write opeations, contains the source buffer */ struct iwl_dbg_mem_access_cmd { __le32 op; __le32 addr; __le32 len; __le32 data[]; } __packed; /* DEBUG_(U|L)MAC_RD_WR_CMD_API_S_VER_1 */ /* Status responses for the debug mem access */ enum { DEBUG_MEM_STATUS_SUCCESS = 0x0, DEBUG_MEM_STATUS_FAILED = 0x1, DEBUG_MEM_STATUS_LOCKED = 0x2, DEBUG_MEM_STATUS_HIDDEN = 0x3, DEBUG_MEM_STATUS_LENGTH = 0x4, }; /** * struct iwl_dbg_mem_access_rsp - Response to debug mem commands * @status: DEBUG_MEM_STATUS_* * @len: read dwords (0 for write operations) * @data: contains the read DWs */ struct iwl_dbg_mem_access_rsp { __le32 status; __le32 len; __le32 data[]; } __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */ /** * struct iwl_dbg_suspend_resume_cmd - dbgc suspend resume command * @operation: suspend or resume operation, uses * &enum iwl_dbg_suspend_resume_cmds */ struct iwl_dbg_suspend_resume_cmd { __le32 operation; } __packed; #define BUF_ALLOC_MAX_NUM_FRAGS 16 /** * struct iwl_buf_alloc_frag - a DBGC fragment * @addr: base address of the fragment * @size: size of the fragment */ struct iwl_buf_alloc_frag { __le64 addr; __le32 size; } __packed; /* FRAGMENT_STRUCTURE_API_S_VER_1 */ /** * struct iwl_buf_alloc_cmd - buffer allocation command * @alloc_id: &enum iwl_fw_ini_allocation_id * @buf_location: &enum iwl_fw_ini_buffer_location * @num_frags: number of fragments * @frags: fragments array */ struct iwl_buf_alloc_cmd { __le32 alloc_id; __le32 buf_location; __le32 num_frags; struct iwl_buf_alloc_frag frags[BUF_ALLOC_MAX_NUM_FRAGS]; } __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_2 */ #define DRAM_INFO_FIRST_MAGIC_WORD 0x76543210 #define DRAM_INFO_SECOND_MAGIC_WORD 0x89ABCDEF /** * struct iwL_dram_info - DRAM fragments allocation struct * * Driver will fill in the first 1K(+) of the pointed DRAM fragment * * @first_word: magic word value * @second_word: magic word value * @framfrags: DRAM fragmentaion detail */ struct iwl_dram_info { __le32 first_word; __le32 second_word; struct iwl_buf_alloc_cmd dram_frags[IWL_FW_INI_ALLOCATION_NUM - 1]; } __packed; /* INIT_DRAM_FRAGS_ALLOCATIONS_S_VER_1 */ /** * struct iwl_dbgc1_info - DBGC1 address and size * * Driver will fill the dbcg1 address and size at address based on config TLV. * * @first_word: all 0 set as identifier * @dbgc1_add_lsb: LSB bits of DBGC1 physical address * @dbgc1_add_msb: MSB bits of DBGC1 physical address * @dbgc1_size: DBGC1 size */ struct iwl_dbgc1_info { __le32 first_word; __le32 dbgc1_add_lsb; __le32 dbgc1_add_msb; __le32 dbgc1_size; } __packed; /* INIT_DRAM_FRAGS_ALLOCATIONS_S_VER_1 */ /** * struct iwl_dbg_host_event_cfg_cmd * @enabled_severities: enabled severities */ struct iwl_dbg_host_event_cfg_cmd { __le32 enabled_severities; } __packed; /* DEBUG_HOST_EVENT_CFG_CMD_API_S_VER_1 */ /** * struct iwl_dbg_dump_complete_cmd - dump complete cmd * * @tp: timepoint whose dump has completed * @tp_data: timepoint data */ struct iwl_dbg_dump_complete_cmd { __le32 tp; __le32 tp_data; } __packed; /* FW_DUMP_COMPLETE_CMD_API_S_VER_1 */ +#define TAS_LMAC_BAND_HB 0 +#define TAS_LMAC_BAND_LB 1 +#define TAS_LMAC_BAND_UHB 2 +#define TAS_LMAC_BAND_INVALID 3 + +/** + * struct iwl_mvm_tas_status_per_mac - tas status per lmac + * @static_status: tas statically enabled or disabled per lmac - TRUE/FALSE + * @static_dis_reason: TAS static disable reason, uses + * &enum iwl_mvm_tas_statically_disabled_reason + * @dynamic_status: Current TAS status. uses + * &enum iwl_mvm_tas_dyna_status + * @near_disconnection: is TAS currently near disconnection per lmac? - TRUE/FALSE + * @max_reg_pwr_limit: Regulatory power limits in dBm + * @sar_limit: SAR limits per lmac in dBm + * @band: Band per lmac + * @reserved: reserved + */ +struct iwl_mvm_tas_status_per_mac { + u8 static_status; + u8 static_dis_reason; + u8 dynamic_status; + u8 near_disconnection; + __le16 max_reg_pwr_limit; + __le16 sar_limit; + u8 band; + u8 reserved[3]; +} __packed; /*DEBUG_GET_TAS_STATUS_PER_MAC_S_VER_1*/ + +/** + * struct iwl_mvm_tas_status_resp - Response to GET_TAS_STATUS + * @tas_fw_version: TAS FW version + * @is_uhb_for_usa_enable: is UHB enabled in USA? - TRUE/FALSE + * @curr_mcc: current mcc + * @block_list: country block list + * @tas_status_mac: TAS status per lmac, uses + * &struct iwl_mvm_tas_status_per_mac + * @in_dual_radio: is TAS in dual radio? - TRUE/FALSE + * @reserved: reserved + */ +struct iwl_mvm_tas_status_resp { + u8 tas_fw_version; + u8 is_uhb_for_usa_enable; + __le16 curr_mcc; + __le16 block_list[16]; + struct iwl_mvm_tas_status_per_mac tas_status_mac[2]; + u8 in_dual_radio; + u8 reserved[3]; +} __packed; /*DEBUG_GET_TAS_STATUS_RSP_API_S_VER_3*/ + +/** + * enum iwl_mvm_tas_dyna_status - TAS current running status + * @TAS_DYNA_INACTIVE: TAS status is inactive + * @TAS_DYNA_INACTIVE_MVM_MODE: TAS is disabled due because FW is in MVM mode + * or is in softap mode. + * @TAS_DYNA_INACTIVE_TRIGGER_MODE: TAS is disabled because FW is in + * multi user trigger mode + * @TAS_DYNA_INACTIVE_BLOCK_LISTED: TAS is disabled because current mcc + * is blocklisted mcc + * @TAS_DYNA_INACTIVE_UHB_NON_US: TAS is disabled because current band is UHB + * and current mcc is USA + * @TAS_DYNA_ACTIVE: TAS is currently active + * @TAS_DYNA_STATUS_MAX: TAS status max value + */ +enum iwl_mvm_tas_dyna_status { + TAS_DYNA_INACTIVE, + TAS_DYNA_INACTIVE_MVM_MODE, + TAS_DYNA_INACTIVE_TRIGGER_MODE, + TAS_DYNA_INACTIVE_BLOCK_LISTED, + TAS_DYNA_INACTIVE_UHB_NON_US, + TAS_DYNA_ACTIVE, + + TAS_DYNA_STATUS_MAX, +}; /*_TAS_DYNA_STATUS_E*/ + +/** + * enum iwl_mvm_tas_statically_disabled_reason - TAS statically disabled reason + * @TAS_DISABLED_DUE_TO_BIOS: TAS is disabled because TAS is disabled in BIOS + * @TAS_DISABLED_DUE_TO_SAR_6DBM: TAS is disabled because SAR limit is less than 6 Dbm + * @TAS_DISABLED_REASON_INVALID: TAS disable reason is invalid + * @TAS_DISABLED_REASON_MAX: TAS disable reason max value + */ +enum iwl_mvm_tas_statically_disabled_reason { + TAS_DISABLED_DUE_TO_BIOS, + TAS_DISABLED_DUE_TO_SAR_6DBM, + TAS_DISABLED_REASON_INVALID, + + TAS_DISABLED_REASON_MAX, +}; /*_TAS_STATICALLY_DISABLED_REASON_E*/ + #endif /* __iwl_fw_api_debug_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/filter.h b/sys/contrib/dev/iwlwifi/fw/api/filter.h index e44c70b7c790..88fe61d144d4 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/filter.h +++ b/sys/contrib/dev/iwlwifi/fw/api/filter.h @@ -1,39 +1,39 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2012-2014 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_filter_h__ #define __iwl_fw_api_filter_h__ #include "fw/api/mac.h" #define MAX_PORT_ID_NUM 2 #define MAX_MCAST_FILTERING_ADDRESSES 256 /** * struct iwl_mcast_filter_cmd - configure multicast filter. * @filter_own: Set 1 to filter out multicast packets sent by station itself * @port_id: Multicast MAC addresses array specifier. This is a strange way * to identify network interface adopted in host-device IF. * It is used by FW as index in array of addresses. This array has * MAX_PORT_ID_NUM members. * @count: Number of MAC addresses in the array * @pass_all: Set 1 to pass all multicast packets. * @bssid: current association BSSID. * @reserved: reserved * @addr_list: Place holder for array of MAC addresses. * IMPORTANT: add padding if necessary to ensure DWORD alignment. */ struct iwl_mcast_filter_cmd { u8 filter_own; u8 port_id; u8 count; u8 pass_all; u8 bssid[6]; u8 reserved[2]; - u8 addr_list[0]; + u8 addr_list[]; } __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ #endif /* __iwl_fw_api_filter_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/location.h b/sys/contrib/dev/iwlwifi/fw/api/location.h index 12af94e166ed..b044990c7b87 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/location.h +++ b/sys/contrib/dev/iwlwifi/fw/api/location.h @@ -1,1694 +1,1700 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #ifndef __iwl_fw_api_location_h__ #define __iwl_fw_api_location_h__ /** * enum iwl_location_subcmd_ids - location group command IDs */ enum iwl_location_subcmd_ids { /** * @TOF_RANGE_REQ_CMD: TOF ranging request, * uses one of &struct iwl_tof_range_req_cmd_v5, * &struct iwl_tof_range_req_cmd_v7, * &struct iwl_tof_range_req_cmd_v8, * &struct iwl_tof_range_req_cmd_v9, * &struct iwl_tof_range_req_cmd_v11, * &struct iwl_tof_range_req_cmd_v7 */ TOF_RANGE_REQ_CMD = 0x0, /** * @TOF_CONFIG_CMD: TOF configuration, uses &struct iwl_tof_config_cmd */ TOF_CONFIG_CMD = 0x1, /** * @TOF_RANGE_ABORT_CMD: abort ongoing ranging, uses * &struct iwl_tof_range_abort_cmd */ TOF_RANGE_ABORT_CMD = 0x2, /** * @TOF_RANGE_REQ_EXT_CMD: TOF extended ranging config, * uses &struct iwl_tof_range_req_ext_cmd */ TOF_RANGE_REQ_EXT_CMD = 0x3, /** - * @TOF_RESPONDER_CONFIG_CMD: FTM responder configuration, - * uses &struct iwl_tof_responder_config_cmd + * @TOF_RESPONDER_CONFIG_CMD: FTM responder configuration, one of + * &struct iwl_tof_responder_config_cmd_v6, + * &struct iwl_tof_responder_config_cmd_v7, + * &struct iwl_tof_responder_config_cmd_v8 or + * &struct iwl_tof_responder_config_cmd_v9 */ TOF_RESPONDER_CONFIG_CMD = 0x4, /** * @TOF_RESPONDER_DYN_CONFIG_CMD: FTM dynamic configuration, * uses &struct iwl_tof_responder_dyn_config_cmd */ TOF_RESPONDER_DYN_CONFIG_CMD = 0x5, /** * @CSI_HEADER_NOTIFICATION: CSI header */ CSI_HEADER_NOTIFICATION = 0xFA, /** * @CSI_CHUNKS_NOTIFICATION: CSI chunk, * uses &struct iwl_csi_chunk_notification */ CSI_CHUNKS_NOTIFICATION = 0xFB, /** * @TOF_LC_NOTIF: used for LCI/civic location, contains just * the action frame */ TOF_LC_NOTIF = 0xFC, /** * @TOF_RESPONDER_STATS: FTM responder statistics notification, * uses &struct iwl_ftm_responder_stats */ TOF_RESPONDER_STATS = 0xFD, /** * @TOF_MCSI_DEBUG_NOTIF: MCSI debug notification, uses * &struct iwl_tof_mcsi_notif */ TOF_MCSI_DEBUG_NOTIF = 0xFE, /** - * @TOF_RANGE_RESPONSE_NOTIF: ranging response, using - * &struct iwl_tof_range_rsp_ntfy + * @TOF_RANGE_RESPONSE_NOTIF: ranging response, using one of + * &struct iwl_tof_range_rsp_ntfy_v5, + * &struct iwl_tof_range_rsp_ntfy_v6, + * &struct iwl_tof_range_rsp_ntfy_v7 or + * &struct iwl_tof_range_rsp_ntfy_v8 */ TOF_RANGE_RESPONSE_NOTIF = 0xFF, }; /** * struct iwl_tof_config_cmd - ToF configuration * @tof_disabled: indicates if ToF is disabled (or not) * @one_sided_disabled: indicates if one-sided is disabled (or not) * @is_debug_mode: indiciates if debug mode is active * @is_buf_required: indicates if channel estimation buffer is required */ struct iwl_tof_config_cmd { u8 tof_disabled; u8 one_sided_disabled; u8 is_debug_mode; u8 is_buf_required; } __packed; /** * enum iwl_tof_bandwidth - values for iwl_tof_range_req_ap_entry.bandwidth * @IWL_TOF_BW_20_LEGACY: 20 MHz non-HT * @IWL_TOF_BW_20_HT: 20 MHz HT * @IWL_TOF_BW_40: 40 MHz * @IWL_TOF_BW_80: 80 MHz * @IWL_TOF_BW_160: 160 MHz * @IWL_TOF_BW_NUM: number of tof bandwidths */ enum iwl_tof_bandwidth { IWL_TOF_BW_20_LEGACY, IWL_TOF_BW_20_HT, IWL_TOF_BW_40, IWL_TOF_BW_80, IWL_TOF_BW_160, IWL_TOF_BW_NUM, }; /* LOCAT_BW_TYPE_E */ /* * enum iwl_tof_algo_type - Algorithym type for range measurement request */ enum iwl_tof_algo_type { IWL_TOF_ALGO_TYPE_MAX_LIKE = 0, IWL_TOF_ALGO_TYPE_LINEAR_REG = 1, IWL_TOF_ALGO_TYPE_FFT = 2, /* Keep last */ IWL_TOF_ALGO_TYPE_INVALID, }; /* ALGO_TYPE_E */ /* * enum iwl_tof_mcsi_ntfy - Enable/Disable MCSI notifications */ enum iwl_tof_mcsi_enable { IWL_TOF_MCSI_DISABLED = 0, IWL_TOF_MCSI_ENABLED = 1, }; /* MCSI_ENABLE_E */ /** * enum iwl_tof_responder_cmd_valid_field - valid fields in the responder cfg * @IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO: channel info is valid * @IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET: ToA offset is valid * @IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB: common calibration mode is valid * @IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB: spefici calibration mode is * valid * @IWL_TOF_RESPONDER_CMD_VALID_BSSID: BSSID is valid * @IWL_TOF_RESPONDER_CMD_VALID_TX_ANT: TX antenna is valid * @IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE: algorithm type is valid * @IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT: non-ASAP support is valid * @IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT: statistics report * support is valid * @IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT: MCSI notification support * is valid * @IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT: fast algorithm support * is valid * @IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL: retry on algorithm failure * is valid * @IWL_TOF_RESPONDER_CMD_VALID_STA_ID: station ID is valid * @IWL_TOF_RESPONDER_CMD_VALID_NDP_SUPPORT: enable/disable NDP ranging support * is valid * @IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS: NDP parameters are valid * @IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK: LMR feedback support is valid * @IWL_TOF_RESPONDER_CMD_VALID_SESSION_ID: session id flag is valid * @IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR: the bss_color field is valid * @IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR: the * min_time_between_msr and max_time_between_msr fields are valid */ enum iwl_tof_responder_cmd_valid_field { IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO = BIT(0), IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET = BIT(1), IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB = BIT(2), IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB = BIT(3), IWL_TOF_RESPONDER_CMD_VALID_BSSID = BIT(4), IWL_TOF_RESPONDER_CMD_VALID_TX_ANT = BIT(5), IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE = BIT(6), IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT = BIT(7), IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT = BIT(8), IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT = BIT(9), IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT = BIT(10), IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL = BIT(11), IWL_TOF_RESPONDER_CMD_VALID_STA_ID = BIT(12), IWL_TOF_RESPONDER_CMD_VALID_NDP_SUPPORT = BIT(22), IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS = BIT(23), IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK = BIT(24), IWL_TOF_RESPONDER_CMD_VALID_SESSION_ID = BIT(25), IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR = BIT(26), IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR = BIT(27), }; /** * enum iwl_tof_responder_cfg_flags - responder configuration flags * @IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT: non-ASAP support * @IWL_TOF_RESPONDER_FLAGS_REPORT_STATISTICS: report statistics * @IWL_TOF_RESPONDER_FLAGS_REPORT_MCSI: report MCSI * @IWL_TOF_RESPONDER_FLAGS_ALGO_TYPE: algorithm type * @IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE: ToA offset mode * @IWL_TOF_RESPONDER_FLAGS_COMMON_CALIB_MODE: common calibration mode * @IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE: specific calibration mode * @IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT: fast algorithm support * @IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL: retry on algorithm fail * @IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT: TX antenna mask * @IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT: support NDP ranging * @IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK: request for LMR feedback if the * initiator supports it * @IWL_TOF_RESPONDER_FLAGS_SESSION_ID: send the session id in the initial FTM * frame. */ enum iwl_tof_responder_cfg_flags { IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT = BIT(0), IWL_TOF_RESPONDER_FLAGS_REPORT_STATISTICS = BIT(1), IWL_TOF_RESPONDER_FLAGS_REPORT_MCSI = BIT(2), IWL_TOF_RESPONDER_FLAGS_ALGO_TYPE = BIT(3) | BIT(4) | BIT(5), IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE = BIT(6), IWL_TOF_RESPONDER_FLAGS_COMMON_CALIB_MODE = BIT(7), IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE = BIT(8), IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT = BIT(9), IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL = BIT(10), IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_AB_MSK, IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT = BIT(24), IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK = BIT(25), IWL_TOF_RESPONDER_FLAGS_SESSION_ID = BIT(27), }; /** * struct iwl_tof_responder_config_cmd_v6 - ToF AP mode (for debug) * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field * @responder_cfg_flags: &iwl_tof_responder_cfg_flags * @bandwidth: current AP Bandwidth: &enum iwl_tof_bandwidth * @rate: current AP rate * @channel_num: current AP Channel * @ctrl_ch_position: coding of the control channel position relative to * the center frequency, see iwl_mvm_get_ctrl_pos() * @sta_id: index of the AP STA when in AP mode * @reserved1: reserved * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug * purposes, simulating station movement by adding various values * to this field * @common_calib: XVT: common calibration value * @specific_calib: XVT: specific calibration value * @bssid: Current AP BSSID * @reserved2: reserved */ struct iwl_tof_responder_config_cmd_v6 { __le32 cmd_valid_fields; __le32 responder_cfg_flags; u8 bandwidth; u8 rate; u8 channel_num; u8 ctrl_ch_position; u8 sta_id; u8 reserved1; __le16 toa_offset; __le16 common_calib; __le16 specific_calib; u8 bssid[ETH_ALEN]; __le16 reserved2; } __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */ /** * struct iwl_tof_responder_config_cmd_v7 - ToF AP mode (for debug) * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field * @responder_cfg_flags: &iwl_tof_responder_cfg_flags * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. * bits 4 - 7: &enum iwl_location_bw. * @rate: current AP rate * @channel_num: current AP Channel * @ctrl_ch_position: coding of the control channel position relative to * the center frequency, see iwl_mvm_get_ctrl_pos() * @sta_id: index of the AP STA when in AP mode * @reserved1: reserved * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug * purposes, simulating station movement by adding various values * to this field * @common_calib: XVT: common calibration value * @specific_calib: XVT: specific calibration value * @bssid: Current AP BSSID * @reserved2: reserved */ struct iwl_tof_responder_config_cmd_v7 { __le32 cmd_valid_fields; __le32 responder_cfg_flags; u8 format_bw; u8 rate; u8 channel_num; u8 ctrl_ch_position; u8 sta_id; u8 reserved1; __le16 toa_offset; __le16 common_calib; __le16 specific_calib; u8 bssid[ETH_ALEN]; __le16 reserved2; } __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_7 */ #define IWL_RESPONDER_STS_POS 3 #define IWL_RESPONDER_TOTAL_LTF_POS 6 /** * struct iwl_tof_responder_config_cmd_v8 - ToF AP mode (for debug) * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field * @responder_cfg_flags: &iwl_tof_responder_cfg_flags * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. * bits 4 - 7: &enum iwl_location_bw. * @rate: current AP rate * @channel_num: current AP Channel * @ctrl_ch_position: coding of the control channel position relative to * the center frequency, see iwl_mvm_get_ctrl_pos() * @sta_id: index of the AP STA when in AP mode * @reserved1: reserved * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug * purposes, simulating station movement by adding various values * to this field * @common_calib: XVT: common calibration value * @specific_calib: XVT: specific calibration value * @bssid: Current AP BSSID * @r2i_ndp_params: parameters for R2I NDP. * bits 0 - 2: max number of LTF repetitions * bits 3 - 5: max number of spatial streams (supported values are < 2) * bits 6 - 7: max number of total LTFs see * &enum ieee80211_range_params_max_total_ltf * @i2r_ndp_params: parameters for I2R NDP. * bits 0 - 2: max number of LTF repetitions * bits 3 - 5: max number of spatial streams * bits 6 - 7: max number of total LTFs see * &enum ieee80211_range_params_max_total_ltf */ struct iwl_tof_responder_config_cmd_v8 { __le32 cmd_valid_fields; __le32 responder_cfg_flags; u8 format_bw; u8 rate; u8 channel_num; u8 ctrl_ch_position; u8 sta_id; u8 reserved1; __le16 toa_offset; __le16 common_calib; __le16 specific_calib; u8 bssid[ETH_ALEN]; u8 r2i_ndp_params; u8 i2r_ndp_params; } __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */ /** * struct iwl_tof_responder_config_cmd_v9 - ToF AP mode (for debug) * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field * @responder_cfg_flags: &iwl_tof_responder_cfg_flags * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. * bits 4 - 7: &enum iwl_location_bw. * @bss_color: current AP bss_color * @channel_num: current AP Channel * @ctrl_ch_position: coding of the control channel position relative to * the center frequency, see iwl_mvm_get_ctrl_pos() * @sta_id: index of the AP STA when in AP mode * @reserved1: reserved * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug * purposes, simulating station movement by adding various values * to this field * @common_calib: XVT: common calibration value * @specific_calib: XVT: specific calibration value * @bssid: Current AP BSSID * @r2i_ndp_params: parameters for R2I NDP. * bits 0 - 2: max number of LTF repetitions * bits 3 - 5: max number of spatial streams (supported values are < 2) * bits 6 - 7: max number of total LTFs see * &enum ieee80211_range_params_max_total_ltf * @i2r_ndp_params: parameters for I2R NDP. * bits 0 - 2: max number of LTF repetitions * bits 3 - 5: max number of spatial streams * bits 6 - 7: max number of total LTFs see * &enum ieee80211_range_params_max_total_ltf * @min_time_between_msr: for non trigger based NDP ranging, minimum time * between measurements in milliseconds. * @max_time_between_msr: for non trigger based NDP ranging, maximum time * between measurements in milliseconds. */ struct iwl_tof_responder_config_cmd_v9 { __le32 cmd_valid_fields; __le32 responder_cfg_flags; u8 format_bw; u8 bss_color; u8 channel_num; u8 ctrl_ch_position; u8 sta_id; u8 reserved1; __le16 toa_offset; __le16 common_calib; __le16 specific_calib; u8 bssid[ETH_ALEN]; u8 r2i_ndp_params; u8 i2r_ndp_params; __le16 min_time_between_msr; __le16 max_time_between_msr; } __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */ #define IWL_LCI_CIVIC_IE_MAX_SIZE 400 /** * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings * @lci_len: The length of the 1st (LCI) part in the @lci_civic buffer * @civic_len: The length of the 2nd (CIVIC) part in the @lci_civic buffer * @lci_civic: The LCI/CIVIC buffer. LCI data (if exists) comes first, then, if * needed, 0-padding such that the next part is dword-aligned, then CIVIC * data (if exists) follows, and then 0-padding again to complete a * 4-multiple long buffer. */ struct iwl_tof_responder_dyn_config_cmd_v2 { __le32 lci_len; __le32 civic_len; u8 lci_civic[]; } __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_2 */ #define IWL_LCI_MAX_SIZE 160 #define IWL_CIVIC_MAX_SIZE 160 #define HLTK_11AZ_LEN 32 /** * enum iwl_responder_dyn_cfg_valid_flags - valid flags for dyn_config_cmd * @IWL_RESPONDER_DYN_CFG_VALID_LCI: LCI data is valid * @IWL_RESPONDER_DYN_CFG_VALID_CIVIC: Civic data is valid * @IWL_RESPONDER_DYN_CFG_VALID_PASN_STA: the pasn_addr, HLTK and cipher fields * are valid. */ enum iwl_responder_dyn_cfg_valid_flags { IWL_RESPONDER_DYN_CFG_VALID_LCI = BIT(0), IWL_RESPONDER_DYN_CFG_VALID_CIVIC = BIT(1), IWL_RESPONDER_DYN_CFG_VALID_PASN_STA = BIT(2), }; /** * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings * @cipher: The negotiated cipher. see &enum iwl_location_cipher. * @valid_flags: flags indicating which fields in the command are valid. see * &enum iwl_responder_dyn_cfg_valid_flags. * @lci_len: length of the LCI data in bytes * @civic_len: length of the Civic data in bytes * @lci_buf: the LCI buffer * @civic_buf: the Civic buffer * @hltk_buf: HLTK for secure LTF bits generation for the specified station * @addr: mac address of the station for which to use the HLTK * @reserved: for alignment */ struct iwl_tof_responder_dyn_config_cmd { u8 cipher; u8 valid_flags; u8 lci_len; u8 civic_len; u8 lci_buf[IWL_LCI_MAX_SIZE]; u8 civic_buf[IWL_LCI_MAX_SIZE]; u8 hltk_buf[HLTK_11AZ_LEN]; u8 addr[ETH_ALEN]; u8 reserved[2]; } __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_3 */ /** * struct iwl_tof_range_req_ext_cmd - extended range req for WLS * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF * @reserved: reserved * @min_delta_ftm: Minimal time between two consecutive measurements, * in units of 100us. 0 means no preference by station * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended * value be sent to the AP * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended * value to be sent to the AP * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended * value to be sent to the AP */ struct iwl_tof_range_req_ext_cmd { __le16 tsf_timer_offset_msec; __le16 reserved; u8 min_delta_ftm; u8 ftm_format_and_bw20M; u8 ftm_format_and_bw40M; u8 ftm_format_and_bw80M; } __packed; /** * enum iwl_tof_location_query - values for query bitmap * @IWL_TOF_LOC_LCI: query LCI * @IWL_TOF_LOC_CIVIC: query civic */ enum iwl_tof_location_query { IWL_TOF_LOC_LCI = 0x01, IWL_TOF_LOC_CIVIC = 0x02, }; /** * struct iwl_tof_range_req_ap_entry_v2 - AP configuration parameters * @channel_num: Current AP Channel * @bandwidth: Current AP Bandwidth. One of iwl_tof_bandwidth. * @tsf_delta_direction: TSF relatively to the subject AP * @ctrl_ch_position: Coding of the control channel position relative to the * center frequency, see iwl_mvm_get_ctrl_pos(). * @bssid: AP's BSSID * @measure_type: Measurement type: 0 - two sided, 1 - One sided * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the * number of measurement iterations (min 2^0 = 1, max 2^14) * @burst_period: Recommended value to be sent to the AP. Measurement * periodicity In units of 100ms. ignored if num_of_bursts = 0 * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31); * 1-sided: how many rts/cts pairs should be used per burst. * @retries_per_sample: Max number of retries that the LMAC should send * in case of no replies by the AP. * @tsf_delta: TSF Delta in units of microseconds. * The difference between the AP TSF and the device local clock. * @location_req: Location Request Bit[0] LCI should be sent in the FTMR; * Bit[1] Civic should be sent in the FTMR * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided) * @enable_dyn_ack: Enable Dynamic ACK BW. * 0: Initiator interact with regular AP; * 1: Initiator interact with Responder machine: need to send the * Initiator Acks with HT 40MHz / 80MHz, since the Responder should * use it for its ch est measurement (this flag will be set when we * configure the opposite machine to be Responder). * @rssi: Last received value * legal values: -128-0 (0x7f). above 0x0 indicating an invalid value. * @algo_type: &enum iwl_tof_algo_type * @notify_mcsi: &enum iwl_tof_mcsi_ntfy. * @reserved: For alignment and future use */ struct iwl_tof_range_req_ap_entry_v2 { u8 channel_num; u8 bandwidth; u8 tsf_delta_direction; u8 ctrl_ch_position; u8 bssid[ETH_ALEN]; u8 measure_type; u8 num_of_bursts; __le16 burst_period; u8 samples_per_burst; u8 retries_per_sample; __le32 tsf_delta; u8 location_req; u8 asap_mode; u8 enable_dyn_ack; s8 rssi; u8 algo_type; u8 notify_mcsi; __le16 reserved; } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_2 */ /** * enum iwl_initiator_ap_flags - per responder FTM configuration flags * @IWL_INITIATOR_AP_FLAGS_ASAP: Request for ASAP measurement. * @IWL_INITIATOR_AP_FLAGS_LCI_REQUEST: Request for LCI information * @IWL_INITIATOR_AP_FLAGS_CIVIC_REQUEST: Request for CIVIC information * @IWL_INITIATOR_AP_FLAGS_DYN_ACK: Send HT/VHT ack for FTM frames. If not set, * 20Mhz dup acks will be sent. * @IWL_INITIATOR_AP_FLAGS_ALGO_LR: Use LR algo type for rtt calculation. * Default algo type is ML. * @IWL_INITIATOR_AP_FLAGS_ALGO_FFT: Use FFT algo type for rtt calculation. * Default algo type is ML. * @IWL_INITIATOR_AP_FLAGS_MCSI_REPORT: Send the MCSI for each FTM frame to the * driver. * @IWL_INITIATOR_AP_FLAGS_NON_TB: Use non trigger based flow * @IWL_INITIATOR_AP_FLAGS_TB: Use trigger based flow * @IWL_INITIATOR_AP_FLAGS_SECURED: request secure LTF measurement * @IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK: Send LMR feedback * @IWL_INITIATOR_AP_FLAGS_USE_CALIB: Use calibration values from the request * instead of fw internal values. * @IWL_INITIATOR_AP_FLAGS_PMF: request to protect the negotiation and LMR * frames with protected management frames. * @IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK: terminate the session if * the responder asked for LMR feedback although the initiator did not set * the LMR feedback bit in the FTM request. If not set, the initiator will * continue with the session and will provide the LMR feedback. */ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1), IWL_INITIATOR_AP_FLAGS_LCI_REQUEST = BIT(2), IWL_INITIATOR_AP_FLAGS_CIVIC_REQUEST = BIT(3), IWL_INITIATOR_AP_FLAGS_DYN_ACK = BIT(4), IWL_INITIATOR_AP_FLAGS_ALGO_LR = BIT(5), IWL_INITIATOR_AP_FLAGS_ALGO_FFT = BIT(6), IWL_INITIATOR_AP_FLAGS_MCSI_REPORT = BIT(8), IWL_INITIATOR_AP_FLAGS_NON_TB = BIT(9), IWL_INITIATOR_AP_FLAGS_TB = BIT(10), IWL_INITIATOR_AP_FLAGS_SECURED = BIT(11), IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK = BIT(12), IWL_INITIATOR_AP_FLAGS_USE_CALIB = BIT(13), IWL_INITIATOR_AP_FLAGS_PMF = BIT(14), IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK = BIT(15), }; /** * struct iwl_tof_range_req_ap_entry_v3 - AP configuration parameters * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. * @channel_num: AP Channel number * @bandwidth: AP bandwidth. One of iwl_tof_bandwidth. * @ctrl_ch_position: Coding of the control channel position relative to the * center frequency, see iwl_mvm_get_ctrl_pos(). * @ftmr_max_retries: Max number of retries to send the FTMR in case of no * reply from the AP. * @bssid: AP's BSSID * @burst_period: Recommended value to be sent to the AP. Measurement * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0 * @samples_per_burst: the number of FTMs pairs in single Burst (1-31); * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * the number of measurement iterations (min 2^0 = 1, max 2^14) * @reserved: For alignment and future use * @tsf_delta: not in use */ struct iwl_tof_range_req_ap_entry_v3 { __le32 initiator_ap_flags; u8 channel_num; u8 bandwidth; u8 ctrl_ch_position; u8 ftmr_max_retries; u8 bssid[ETH_ALEN]; __le16 burst_period; u8 samples_per_burst; u8 num_of_bursts; __le16 reserved; __le32 tsf_delta; } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_3 */ /** * enum iwl_location_frame_format - location frame formats * @IWL_LOCATION_FRAME_FORMAT_LEGACY: legacy * @IWL_LOCATION_FRAME_FORMAT_HT: HT * @IWL_LOCATION_FRAME_FORMAT_VHT: VHT * @IWL_LOCATION_FRAME_FORMAT_HE: HE */ enum iwl_location_frame_format { IWL_LOCATION_FRAME_FORMAT_LEGACY, IWL_LOCATION_FRAME_FORMAT_HT, IWL_LOCATION_FRAME_FORMAT_VHT, IWL_LOCATION_FRAME_FORMAT_HE, }; /** * enum iwl_location_bw - location bandwidth selection * @IWL_LOCATION_BW_20MHZ: 20MHz * @IWL_LOCATION_BW_40MHZ: 40MHz * @IWL_LOCATION_BW_80MHZ: 80MHz */ enum iwl_location_bw { IWL_LOCATION_BW_20MHZ, IWL_LOCATION_BW_40MHZ, IWL_LOCATION_BW_80MHZ, IWL_LOCATION_BW_160MHZ, }; #define TK_11AZ_LEN 32 #define LOCATION_BW_POS 4 /** * struct iwl_tof_range_req_ap_entry_v4 - AP configuration parameters * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. * @channel_num: AP Channel number * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. * bits 4 - 7: &enum iwl_location_bw. * @ctrl_ch_position: Coding of the control channel position relative to the * center frequency, see iwl_mvm_get_ctrl_pos(). * @ftmr_max_retries: Max number of retries to send the FTMR in case of no * reply from the AP. * @bssid: AP's BSSID * @burst_period: Recommended value to be sent to the AP. Measurement * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0 * @samples_per_burst: the number of FTMs pairs in single Burst (1-31); * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * the number of measurement iterations (min 2^0 = 1, max 2^14) * @reserved: For alignment and future use * @hltk: HLTK to be used for secured 11az measurement * @tk: TK to be used for secured 11az measurement */ struct iwl_tof_range_req_ap_entry_v4 { __le32 initiator_ap_flags; u8 channel_num; u8 format_bw; u8 ctrl_ch_position; u8 ftmr_max_retries; u8 bssid[ETH_ALEN]; __le16 burst_period; u8 samples_per_burst; u8 num_of_bursts; __le16 reserved; u8 hltk[HLTK_11AZ_LEN]; u8 tk[TK_11AZ_LEN]; } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_4 */ /** * enum iwl_location_cipher - location cipher selection * @IWL_LOCATION_CIPHER_CCMP_128: CCMP 128 * @IWL_LOCATION_CIPHER_GCMP_128: GCMP 128 * @IWL_LOCATION_CIPHER_GCMP_256: GCMP 256 * @IWL_LOCATION_CIPHER_INVALID: security is not used. * @IWL_LOCATION_CIPHER_MAX: maximum value for this enum. */ enum iwl_location_cipher { IWL_LOCATION_CIPHER_CCMP_128, IWL_LOCATION_CIPHER_GCMP_128, IWL_LOCATION_CIPHER_GCMP_256, IWL_LOCATION_CIPHER_INVALID, IWL_LOCATION_CIPHER_MAX, }; /** * struct iwl_tof_range_req_ap_entry_v6 - AP configuration parameters * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. * @channel_num: AP Channel number * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. * bits 4 - 7: &enum iwl_location_bw. * @ctrl_ch_position: Coding of the control channel position relative to the * center frequency, see iwl_mvm_get_ctrl_pos(). * @ftmr_max_retries: Max number of retries to send the FTMR in case of no * reply from the AP. * @bssid: AP's BSSID * @burst_period: Recommended value to be sent to the AP. Measurement * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0 * @samples_per_burst: the number of FTMs pairs in single Burst (1-31); * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * the number of measurement iterations (min 2^0 = 1, max 2^14) * @sta_id: the station id of the AP. Only relevant when associated to the AP, * otherwise should be set to &IWL_MVM_INVALID_STA. * @cipher: pairwise cipher suite for secured measurement. * &enum iwl_location_cipher. * @hltk: HLTK to be used for secured 11az measurement * @tk: TK to be used for secured 11az measurement * @calib: An array of calibration values per FTM rx bandwidth. * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the * calibration value that corresponds to the rx bandwidth of the FTM * frame. * @beacon_interval: beacon interval of the AP in TUs. Only required if * &IWL_INITIATOR_AP_FLAGS_TB is set. */ struct iwl_tof_range_req_ap_entry_v6 { __le32 initiator_ap_flags; u8 channel_num; u8 format_bw; u8 ctrl_ch_position; u8 ftmr_max_retries; u8 bssid[ETH_ALEN]; __le16 burst_period; u8 samples_per_burst; u8 num_of_bursts; u8 sta_id; u8 cipher; u8 hltk[HLTK_11AZ_LEN]; u8 tk[TK_11AZ_LEN]; __le16 calib[IWL_TOF_BW_NUM]; __le16 beacon_interval; } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_6 */ /** * struct iwl_tof_range_req_ap_entry_v7 - AP configuration parameters * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. * @channel_num: AP Channel number * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. * bits 4 - 7: &enum iwl_location_bw. * @ctrl_ch_position: Coding of the control channel position relative to the * center frequency, see iwl_mvm_get_ctrl_pos(). * @ftmr_max_retries: Max number of retries to send the FTMR in case of no * reply from the AP. * @bssid: AP's BSSID * @burst_period: Recommended value to be sent to the AP. Measurement * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0 * @samples_per_burst: the number of FTMs pairs in single Burst (1-31); * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * the number of measurement iterations (min 2^0 = 1, max 2^14) * @sta_id: the station id of the AP. Only relevant when associated to the AP, * otherwise should be set to &IWL_MVM_INVALID_STA. * @cipher: pairwise cipher suite for secured measurement. * &enum iwl_location_cipher. * @hltk: HLTK to be used for secured 11az measurement * @tk: TK to be used for secured 11az measurement * @calib: An array of calibration values per FTM rx bandwidth. * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the * calibration value that corresponds to the rx bandwidth of the FTM * frame. * @beacon_interval: beacon interval of the AP in TUs. Only required if * &IWL_INITIATOR_AP_FLAGS_TB is set. * @rx_pn: the next expected PN for protected management frames Rx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id * is set to &IWL_MVM_INVALID_STA. * @tx_pn: the next PN to use for protected management frames Tx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id * is set to &IWL_MVM_INVALID_STA. */ struct iwl_tof_range_req_ap_entry_v7 { __le32 initiator_ap_flags; u8 channel_num; u8 format_bw; u8 ctrl_ch_position; u8 ftmr_max_retries; u8 bssid[ETH_ALEN]; __le16 burst_period; u8 samples_per_burst; u8 num_of_bursts; u8 sta_id; u8 cipher; u8 hltk[HLTK_11AZ_LEN]; u8 tk[TK_11AZ_LEN]; __le16 calib[IWL_TOF_BW_NUM]; __le16 beacon_interval; u8 rx_pn[IEEE80211_CCMP_PN_LEN]; u8 tx_pn[IEEE80211_CCMP_PN_LEN]; } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_7 */ #define IWL_LOCATION_MAX_STS_POS 3 /** * struct iwl_tof_range_req_ap_entry_v8 - AP configuration parameters * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. * @channel_num: AP Channel number * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. * bits 4 - 7: &enum iwl_location_bw. * @ctrl_ch_position: Coding of the control channel position relative to the * center frequency, see iwl_mvm_get_ctrl_pos(). * @ftmr_max_retries: Max number of retries to send the FTMR in case of no * reply from the AP. * @bssid: AP's BSSID * @burst_period: Recommended value to be sent to the AP. Measurement * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0 * @samples_per_burst: the number of FTMs pairs in single Burst (1-31); * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * the number of measurement iterations (min 2^0 = 1, max 2^14) * @sta_id: the station id of the AP. Only relevant when associated to the AP, * otherwise should be set to &IWL_MVM_INVALID_STA. * @cipher: pairwise cipher suite for secured measurement. * &enum iwl_location_cipher. * @hltk: HLTK to be used for secured 11az measurement * @tk: TK to be used for secured 11az measurement * @calib: An array of calibration values per FTM rx bandwidth. * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the * calibration value that corresponds to the rx bandwidth of the FTM * frame. * @beacon_interval: beacon interval of the AP in TUs. Only required if * &IWL_INITIATOR_AP_FLAGS_TB is set. * @rx_pn: the next expected PN for protected management frames Rx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id * is set to &IWL_MVM_INVALID_STA. * @tx_pn: the next PN to use for protected management frames Tx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id * is set to &IWL_MVM_INVALID_STA. * @r2i_ndp_params: parameters for R2I NDP ranging negotiation. * bits 0 - 2: max LTF repetitions * bits 3 - 5: max number of spatial streams * bits 6 - 7: reserved * @i2r_ndp_params: parameters for I2R NDP ranging negotiation. * bits 0 - 2: max LTF repetitions * bits 3 - 5: max number of spatial streams (supported values are < 2) * bits 6 - 7: reserved * @r2i_max_total_ltf: R2I Max Total LTFs for NDP ranging negotiation. * One of &enum ieee80211_range_params_max_total_ltf. * @i2r_max_total_ltf: I2R Max Total LTFs for NDP ranging negotiation. * One of &enum ieee80211_range_params_max_total_ltf. */ struct iwl_tof_range_req_ap_entry_v8 { __le32 initiator_ap_flags; u8 channel_num; u8 format_bw; u8 ctrl_ch_position; u8 ftmr_max_retries; u8 bssid[ETH_ALEN]; __le16 burst_period; u8 samples_per_burst; u8 num_of_bursts; u8 sta_id; u8 cipher; u8 hltk[HLTK_11AZ_LEN]; u8 tk[TK_11AZ_LEN]; __le16 calib[IWL_TOF_BW_NUM]; __le16 beacon_interval; u8 rx_pn[IEEE80211_CCMP_PN_LEN]; u8 tx_pn[IEEE80211_CCMP_PN_LEN]; u8 r2i_ndp_params; u8 i2r_ndp_params; u8 r2i_max_total_ltf; u8 i2r_max_total_ltf; } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_8 */ /** * struct iwl_tof_range_req_ap_entry_v9 - AP configuration parameters * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. * @channel_num: AP Channel number * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. * bits 4 - 7: &enum iwl_location_bw. * @ctrl_ch_position: Coding of the control channel position relative to the * center frequency, see iwl_mvm_get_ctrl_pos(). * @ftmr_max_retries: Max number of retries to send the FTMR in case of no * reply from the AP. * @bssid: AP's BSSID * @burst_period: For EDCA based ranging: Recommended value to be sent to the * AP. Measurement periodicity In units of 100ms. ignored if * num_of_bursts_exp = 0. * For non trigger based NDP ranging, the maximum time between * measurements in units of milliseconds. * @samples_per_burst: the number of FTMs pairs in single Burst (1-31); * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * the number of measurement iterations (min 2^0 = 1, max 2^14) * @sta_id: the station id of the AP. Only relevant when associated to the AP, * otherwise should be set to &IWL_MVM_INVALID_STA. * @cipher: pairwise cipher suite for secured measurement. * &enum iwl_location_cipher. * @hltk: HLTK to be used for secured 11az measurement * @tk: TK to be used for secured 11az measurement * @calib: An array of calibration values per FTM rx bandwidth. * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the * calibration value that corresponds to the rx bandwidth of the FTM * frame. * @beacon_interval: beacon interval of the AP in TUs. Only required if * &IWL_INITIATOR_AP_FLAGS_TB is set. * @bss_color: the BSS color of the responder. Only valid if * &IWL_INITIATOR_AP_FLAGS_TB or &IWL_INITIATOR_AP_FLAGS_NON_TB is set. * @rx_pn: the next expected PN for protected management frames Rx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id * is set to &IWL_MVM_INVALID_STA. * @tx_pn: the next PN to use for protected management frames Tx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id * is set to &IWL_MVM_INVALID_STA. * @r2i_ndp_params: parameters for R2I NDP ranging negotiation. * bits 0 - 2: max LTF repetitions * bits 3 - 5: max number of spatial streams * bits 6 - 7: reserved * @i2r_ndp_params: parameters for I2R NDP ranging negotiation. * bits 0 - 2: max LTF repetitions * bits 3 - 5: max number of spatial streams (supported values are < 2) * bits 6 - 7: reserved * @r2i_max_total_ltf: R2I Max Total LTFs for NDP ranging negotiation. * One of &enum ieee80211_range_params_max_total_ltf. * @i2r_max_total_ltf: I2R Max Total LTFs for NDP ranging negotiation. * One of &enum ieee80211_range_params_max_total_ltf. * @bss_color: the BSS color of the responder. Only valid if * &IWL_INITIATOR_AP_FLAGS_NON_TB or &IWL_INITIATOR_AP_FLAGS_TB is set. * @band: 0 for 5.2 GHz, 1 for 2.4 GHz, 2 for 6GHz * @min_time_between_msr: For non trigger based NDP ranging, the minimum time * between measurements in units of milliseconds */ struct iwl_tof_range_req_ap_entry_v9 { __le32 initiator_ap_flags; u8 channel_num; u8 format_bw; u8 ctrl_ch_position; u8 ftmr_max_retries; u8 bssid[ETH_ALEN]; __le16 burst_period; u8 samples_per_burst; u8 num_of_bursts; u8 sta_id; u8 cipher; u8 hltk[HLTK_11AZ_LEN]; u8 tk[TK_11AZ_LEN]; __le16 calib[IWL_TOF_BW_NUM]; u16 beacon_interval; u8 rx_pn[IEEE80211_CCMP_PN_LEN]; u8 tx_pn[IEEE80211_CCMP_PN_LEN]; u8 r2i_ndp_params; u8 i2r_ndp_params; u8 r2i_max_total_ltf; u8 i2r_max_total_ltf; u8 bss_color; u8 band; __le16 min_time_between_msr; } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */ /** * enum iwl_tof_response_mode * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as * possible (not supported for this release) * @IWL_MVM_TOF_RESPONSE_TIMEOUT: report all AP measurements as a batch upon * timeout expiration * @IWL_MVM_TOF_RESPONSE_COMPLETE: report all AP measurements as a batch at the * earlier of: measurements completion / timeout * expiration. */ enum iwl_tof_response_mode { IWL_MVM_TOF_RESPONSE_ASAP, IWL_MVM_TOF_RESPONSE_TIMEOUT, IWL_MVM_TOF_RESPONSE_COMPLETE, }; /** * enum iwl_tof_initiator_flags * * @IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED: disable fast algo, meaning run * the algo on ant A+B, instead of only one of them. * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A: open RX antenna A for FTMs RX * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B: open RX antenna B for FTMs RX * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C: open RX antenna C for FTMs RX * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A: use antenna A fo TX ACKs during FTM * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B: use antenna B fo TX ACKs during FTM * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C: use antenna C fo TX ACKs during FTM * @IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM: use random mac address for FTM * @IWL_TOF_INITIATOR_FLAGS_SPECIFIC_CALIB: use the specific calib value from * the range request command * @IWL_TOF_INITIATOR_FLAGS_COMMON_CALIB: use the common calib value from the * ragne request command * @IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT: support non-asap measurements */ enum iwl_tof_initiator_flags { IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED = BIT(0), IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A = BIT(1), IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B = BIT(2), IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C = BIT(3), IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A = BIT(4), IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B = BIT(5), IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C = BIT(6), IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM = BIT(7), IWL_TOF_INITIATOR_FLAGS_SPECIFIC_CALIB = BIT(15), IWL_TOF_INITIATOR_FLAGS_COMMON_CALIB = BIT(16), IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT = BIT(20), }; /* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */ #define IWL_MVM_TOF_MAX_APS 5 #define IWL_MVM_TOF_MAX_TWO_SIDED_APS 5 /** * struct iwl_tof_range_req_cmd_v5 - start measurement cmd * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response * @initiator: 0- NW initiated, 1 - Client Initiated * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided, * '1' - run ML-Algo for ToF only * @req_timeout: Requested timeout of the response in units of 100ms. * This is equivalent to the session time configured to the * LMAC in Initiator Request * @report_policy: Supported partially for this release: For current release - * the range report will be uploaded as a batch when ready or * when the session is done (successfully / partially). * one of iwl_tof_response_mode. * @reserved0: reserved * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) * @macaddr_random: '0' Use default source MAC address (i.e. p2_p), * '1' Use MAC Address randomization according to the below * @range_req_bssid: ranging request BSSID * @macaddr_template: MAC address template to use for non-randomized bits * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. * Bits set to 1 shall be randomized by the UMAC * @ftm_rx_chains: Rx chain to open to receive Responder's FTMs (XVT) * @ftm_tx_chains: Tx chain to send the ack to the Responder FTM (XVT) * @common_calib: The common calib value to inject to this measurement calc * @specific_calib: The specific calib value to inject to this measurement calc * @ap: per-AP request data */ struct iwl_tof_range_req_cmd_v5 { __le32 initiator_flags; u8 request_id; u8 initiator; u8 one_sided_los_disable; u8 req_timeout; u8 report_policy; u8 reserved0; u8 num_of_ap; u8 macaddr_random; u8 range_req_bssid[ETH_ALEN]; u8 macaddr_template[ETH_ALEN]; u8 macaddr_mask[ETH_ALEN]; u8 ftm_rx_chains; u8 ftm_tx_chains; __le16 common_calib; __le16 specific_calib; struct iwl_tof_range_req_ap_entry_v2 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */ /** * struct iwl_tof_range_req_cmd_v7 - start measurement cmd * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) * @range_req_bssid: ranging request BSSID * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. * Bits set to 1 shall be randomized by the UMAC * @macaddr_template: MAC address template to use for non-randomized bits * @req_timeout_ms: Requested timeout of the response in units of milliseconds. * This is the session time for completing the measurement. * @tsf_mac_id: report the measurement start time for each ap in terms of the * TSF of this mac id. 0xff to disable TSF reporting. * @common_calib: The common calib value to inject to this measurement calc * @specific_calib: The specific calib value to inject to this measurement calc * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2. */ struct iwl_tof_range_req_cmd_v7 { __le32 initiator_flags; u8 request_id; u8 num_of_ap; u8 range_req_bssid[ETH_ALEN]; u8 macaddr_mask[ETH_ALEN]; u8 macaddr_template[ETH_ALEN]; __le32 req_timeout_ms; __le32 tsf_mac_id; __le16 common_calib; __le16 specific_calib; struct iwl_tof_range_req_ap_entry_v3 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */ /** * struct iwl_tof_range_req_cmd_v8 - start measurement cmd * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) * @range_req_bssid: ranging request BSSID * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. * Bits set to 1 shall be randomized by the UMAC * @macaddr_template: MAC address template to use for non-randomized bits * @req_timeout_ms: Requested timeout of the response in units of milliseconds. * This is the session time for completing the measurement. * @tsf_mac_id: report the measurement start time for each ap in terms of the * TSF of this mac id. 0xff to disable TSF reporting. * @common_calib: The common calib value to inject to this measurement calc * @specific_calib: The specific calib value to inject to this measurement calc * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2. */ struct iwl_tof_range_req_cmd_v8 { __le32 initiator_flags; u8 request_id; u8 num_of_ap; u8 range_req_bssid[ETH_ALEN]; u8 macaddr_mask[ETH_ALEN]; u8 macaddr_template[ETH_ALEN]; __le32 req_timeout_ms; __le32 tsf_mac_id; __le16 common_calib; __le16 specific_calib; struct iwl_tof_range_req_ap_entry_v4 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_8 */ /** * struct iwl_tof_range_req_cmd_v9 - start measurement cmd * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) * @range_req_bssid: ranging request BSSID * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. * Bits set to 1 shall be randomized by the UMAC * @macaddr_template: MAC address template to use for non-randomized bits * @req_timeout_ms: Requested timeout of the response in units of milliseconds. * This is the session time for completing the measurement. * @tsf_mac_id: report the measurement start time for each ap in terms of the * TSF of this mac id. 0xff to disable TSF reporting. * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2. */ struct iwl_tof_range_req_cmd_v9 { __le32 initiator_flags; u8 request_id; u8 num_of_ap; u8 range_req_bssid[ETH_ALEN]; u8 macaddr_mask[ETH_ALEN]; u8 macaddr_template[ETH_ALEN]; __le32 req_timeout_ms; __le32 tsf_mac_id; struct iwl_tof_range_req_ap_entry_v6 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_9 */ /** * struct iwl_tof_range_req_cmd_v11 - start measurement cmd * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) * @range_req_bssid: ranging request BSSID * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. * Bits set to 1 shall be randomized by the UMAC * @macaddr_template: MAC address template to use for non-randomized bits * @req_timeout_ms: Requested timeout of the response in units of milliseconds. * This is the session time for completing the measurement. * @tsf_mac_id: report the measurement start time for each ap in terms of the * TSF of this mac id. 0xff to disable TSF reporting. * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2. */ struct iwl_tof_range_req_cmd_v11 { __le32 initiator_flags; u8 request_id; u8 num_of_ap; u8 range_req_bssid[ETH_ALEN]; u8 macaddr_mask[ETH_ALEN]; u8 macaddr_template[ETH_ALEN]; __le32 req_timeout_ms; __le32 tsf_mac_id; struct iwl_tof_range_req_ap_entry_v7 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_11 */ /** * struct iwl_tof_range_req_cmd_v12 - start measurement cmd * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) * @range_req_bssid: ranging request BSSID * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. * Bits set to 1 shall be randomized by the UMAC * @macaddr_template: MAC address template to use for non-randomized bits * @req_timeout_ms: Requested timeout of the response in units of milliseconds. * This is the session time for completing the measurement. * @tsf_mac_id: report the measurement start time for each ap in terms of the * TSF of this mac id. 0xff to disable TSF reporting. * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2. */ struct iwl_tof_range_req_cmd_v12 { __le32 initiator_flags; u8 request_id; u8 num_of_ap; u8 range_req_bssid[ETH_ALEN]; u8 macaddr_mask[ETH_ALEN]; u8 macaddr_template[ETH_ALEN]; __le32 req_timeout_ms; __le32 tsf_mac_id; struct iwl_tof_range_req_ap_entry_v8 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_12 */ /** * struct iwl_tof_range_req_cmd_v13 - start measurement cmd * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) * @range_req_bssid: ranging request BSSID * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. * Bits set to 1 shall be randomized by the UMAC * @macaddr_template: MAC address template to use for non-randomized bits * @req_timeout_ms: Requested timeout of the response in units of milliseconds. * This is the session time for completing the measurement. * @tsf_mac_id: report the measurement start time for each ap in terms of the * TSF of this mac id. 0xff to disable TSF reporting. * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v9. */ struct iwl_tof_range_req_cmd_v13 { __le32 initiator_flags; u8 request_id; u8 num_of_ap; u8 range_req_bssid[ETH_ALEN]; u8 macaddr_mask[ETH_ALEN]; u8 macaddr_template[ETH_ALEN]; __le32 req_timeout_ms; __le32 tsf_mac_id; struct iwl_tof_range_req_ap_entry_v9 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */ /* * enum iwl_tof_range_request_status - status of the sent request * @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the * request * @IWL_TOF_RANGE_REQUEST_STATUS_BUSY - FW is busy with a previous request, the * sent request will not be handled */ enum iwl_tof_range_request_status { IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS, IWL_TOF_RANGE_REQUEST_STATUS_BUSY, }; /** * enum iwl_tof_entry_status * * @IWL_TOF_ENTRY_SUCCESS: successful measurement. * @IWL_TOF_ENTRY_GENERAL_FAILURE: General failure. * @IWL_TOF_ENTRY_NO_RESPONSE: Responder didn't reply to the request. * @IWL_TOF_ENTRY_REQUEST_REJECTED: Responder rejected the request. * @IWL_TOF_ENTRY_NOT_SCHEDULED: Time event was scheduled but not called yet. * @IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: Time event triggered but no * measurement was completed. * @IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE: No range due inability to switch * from the primary channel. * @IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED: Device doesn't support FTM. * @IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON: Request aborted due to unknown * reason. * @IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP: Failure due to invalid * T1/T4. * @IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE: Failure due to invalid FTM frame * structure. * @IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED: Request cannot be scheduled. * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE: Responder cannot serve the * initiator for some period, period supplied in @refusal_period. * @IWL_TOF_ENTRY_BAD_REQUEST_ARGS: Bad request arguments. * @IWL_TOF_ENTRY_WIFI_NOT_ENABLED: Wifi not enabled. * @IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS: Responder override the original * parameters within the current session. */ enum iwl_tof_entry_status { IWL_TOF_ENTRY_SUCCESS = 0, IWL_TOF_ENTRY_GENERAL_FAILURE = 1, IWL_TOF_ENTRY_NO_RESPONSE = 2, IWL_TOF_ENTRY_REQUEST_REJECTED = 3, IWL_TOF_ENTRY_NOT_SCHEDULED = 4, IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT = 5, IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE = 6, IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED = 7, IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON = 8, IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP = 9, IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE = 10, IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED = 11, IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE = 12, IWL_TOF_ENTRY_BAD_REQUEST_ARGS = 13, IWL_TOF_ENTRY_WIFI_NOT_ENABLED = 14, IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS = 15, }; /* LOCATION_RANGE_RSP_AP_ENTRY_NTFY_API_S_VER_2 */ /** * struct iwl_tof_range_rsp_ap_entry_ntfy_v3 - AP parameters (response) * @bssid: BSSID of the AP * @measure_status: current APs measurement status, one of * &enum iwl_tof_entry_status. * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz * @rtt: The Round Trip Time that took for the last measurement for * current AP [pSec] * @rtt_variance: The Variance of the RTT values measured for current AP * @rtt_spread: The Difference between the maximum and the minimum RTT * values measured for current AP in the current session [pSec] * @rssi: RSSI as uploaded in the Channel Estimation notification * @rssi_spread: The Difference between the maximum and the minimum RSSI values * measured for current AP in the current session * @reserved: reserved * @refusal_period: refusal period in case of * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec] * @range: Measured range [cm] * @range_variance: Measured range variance [cm] * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was * uploaded by the LMAC * @t2t3_initiator: as calculated from the algo in the initiator * @t1t4_responder: as calculated from the algo in the responder * @common_calib: Calib val that was used in for this AP measurement * @specific_calib: val that was used in for this AP measurement * @papd_calib_output: The result of the tof papd calibration that was injected * into the algorithm. */ struct iwl_tof_range_rsp_ap_entry_ntfy_v3 { u8 bssid[ETH_ALEN]; u8 measure_status; u8 measure_bw; __le32 rtt; __le32 rtt_variance; __le32 rtt_spread; s8 rssi; u8 rssi_spread; u8 reserved; u8 refusal_period; __le32 range; __le32 range_variance; __le32 timestamp; __le32 t2t3_initiator; __le32 t1t4_responder; __le16 common_calib; __le16 specific_calib; __le32 papd_calib_output; } __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_3 */ /** * struct iwl_tof_range_rsp_ap_entry_ntfy_v4 - AP parameters (response) * @bssid: BSSID of the AP * @measure_status: current APs measurement status, one of * &enum iwl_tof_entry_status. * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz * @rtt: The Round Trip Time that took for the last measurement for * current AP [pSec] * @rtt_variance: The Variance of the RTT values measured for current AP * @rtt_spread: The Difference between the maximum and the minimum RTT * values measured for current AP in the current session [pSec] * @rssi: RSSI as uploaded in the Channel Estimation notification * @rssi_spread: The Difference between the maximum and the minimum RSSI values * measured for current AP in the current session * @last_burst: 1 if no more FTM sessions are scheduled for this responder * @refusal_period: refusal period in case of * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec] * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was * uploaded by the LMAC * @start_tsf: measurement start time in TSF of the mac specified in the range * request * @rx_rate_n_flags: rate and flags of the last FTM frame received from this * responder * @tx_rate_n_flags: rate and flags of the last ack sent to this responder * @t2t3_initiator: as calculated from the algo in the initiator * @t1t4_responder: as calculated from the algo in the responder * @common_calib: Calib val that was used in for this AP measurement * @specific_calib: val that was used in for this AP measurement * @papd_calib_output: The result of the tof papd calibration that was injected * into the algorithm. */ struct iwl_tof_range_rsp_ap_entry_ntfy_v4 { u8 bssid[ETH_ALEN]; u8 measure_status; u8 measure_bw; __le32 rtt; __le32 rtt_variance; __le32 rtt_spread; s8 rssi; u8 rssi_spread; u8 last_burst; u8 refusal_period; __le32 timestamp; __le32 start_tsf; __le32 rx_rate_n_flags; __le32 tx_rate_n_flags; __le32 t2t3_initiator; __le32 t1t4_responder; __le16 common_calib; __le16 specific_calib; __le32 papd_calib_output; } __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_4 */ /** * struct iwl_tof_range_rsp_ap_entry_ntfy_v5 - AP parameters (response) * @bssid: BSSID of the AP * @measure_status: current APs measurement status, one of * &enum iwl_tof_entry_status. * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz * @rtt: The Round Trip Time that took for the last measurement for * current AP [pSec] * @rtt_variance: The Variance of the RTT values measured for current AP * @rtt_spread: The Difference between the maximum and the minimum RTT * values measured for current AP in the current session [pSec] * @rssi: RSSI as uploaded in the Channel Estimation notification * @rssi_spread: The Difference between the maximum and the minimum RSSI values * measured for current AP in the current session * @last_burst: 1 if no more FTM sessions are scheduled for this responder * @refusal_period: refusal period in case of * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec] * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was * uploaded by the LMAC * @start_tsf: measurement start time in TSF of the mac specified in the range * request * @rx_rate_n_flags: rate and flags of the last FTM frame received from this * responder * @tx_rate_n_flags: rate and flags of the last ack sent to this responder * @t2t3_initiator: as calculated from the algo in the initiator * @t1t4_responder: as calculated from the algo in the responder * @common_calib: Calib val that was used in for this AP measurement * @specific_calib: val that was used in for this AP measurement * @papd_calib_output: The result of the tof papd calibration that was injected * into the algorithm. * @rttConfidence: a value between 0 - 31 that represents the rtt accuracy. * @reserved: for alignment */ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 { u8 bssid[ETH_ALEN]; u8 measure_status; u8 measure_bw; __le32 rtt; __le32 rtt_variance; __le32 rtt_spread; s8 rssi; u8 rssi_spread; u8 last_burst; u8 refusal_period; __le32 timestamp; __le32 start_tsf; __le32 rx_rate_n_flags; __le32 tx_rate_n_flags; __le32 t2t3_initiator; __le32 t1t4_responder; __le16 common_calib; __le16 specific_calib; __le32 papd_calib_output; u8 rttConfidence; u8 reserved[3]; } __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */ /** * struct iwl_tof_range_rsp_ap_entry_ntfy_v6 - AP parameters (response) * @bssid: BSSID of the AP * @measure_status: current APs measurement status, one of * &enum iwl_tof_entry_status. * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz * @rtt: The Round Trip Time that took for the last measurement for * current AP [pSec] * @rtt_variance: The Variance of the RTT values measured for current AP * @rtt_spread: The Difference between the maximum and the minimum RTT * values measured for current AP in the current session [pSec] * @rssi: RSSI as uploaded in the Channel Estimation notification * @rssi_spread: The Difference between the maximum and the minimum RSSI values * measured for current AP in the current session * @last_burst: 1 if no more FTM sessions are scheduled for this responder * @refusal_period: refusal period in case of * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec] * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was * uploaded by the LMAC * @start_tsf: measurement start time in TSF of the mac specified in the range * request * @rx_rate_n_flags: rate and flags of the last FTM frame received from this * responder * @tx_rate_n_flags: rate and flags of the last ack sent to this responder * @t2t3_initiator: as calculated from the algo in the initiator * @t1t4_responder: as calculated from the algo in the responder * @common_calib: Calib val that was used in for this AP measurement * @specific_calib: val that was used in for this AP measurement * @papd_calib_output: The result of the tof papd calibration that was injected * into the algorithm. * @rttConfidence: a value between 0 - 31 that represents the rtt accuracy. * @reserved: for alignment * @rx_pn: the last PN used for this responder Rx in case PMF is configured in * LE byte order. * @tx_pn: the last PN used for this responder Tx in case PMF is configured in * LE byte order. */ struct iwl_tof_range_rsp_ap_entry_ntfy_v6 { u8 bssid[ETH_ALEN]; u8 measure_status; u8 measure_bw; __le32 rtt; __le32 rtt_variance; __le32 rtt_spread; s8 rssi; u8 rssi_spread; u8 last_burst; u8 refusal_period; __le32 timestamp; __le32 start_tsf; __le32 rx_rate_n_flags; __le32 tx_rate_n_flags; __le32 t2t3_initiator; __le32 t1t4_responder; __le16 common_calib; __le16 specific_calib; __le32 papd_calib_output; u8 rttConfidence; u8 reserved[3]; u8 rx_pn[IEEE80211_CCMP_PN_LEN]; u8 tx_pn[IEEE80211_CCMP_PN_LEN]; } __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_6, LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_7 */ /** * enum iwl_tof_response_status - tof response status * * @IWL_TOF_RESPONSE_SUCCESS: successful range. * @IWL_TOF_RESPONSE_TIMEOUT: request aborted due to timeout expiration. * partial result of ranges done so far is included in the response. * @IWL_TOF_RESPONSE_ABORTED: Measurement aborted by command. * @IWL_TOF_RESPONSE_FAILED: Measurement request command failed. */ enum iwl_tof_response_status { IWL_TOF_RESPONSE_SUCCESS = 0, IWL_TOF_RESPONSE_TIMEOUT = 1, IWL_TOF_RESPONSE_ABORTED = 4, IWL_TOF_RESPONSE_FAILED = 5, }; /* LOCATION_RNG_RSP_STATUS */ /** * struct iwl_tof_range_rsp_ntfy_v5 - ranging response notification * @request_id: A Token ID of the corresponding Range request * @request_status: status of current measurement session, one of * &enum iwl_tof_response_status. * @last_in_batch: reprot policy (when not all responses are uploaded at once) * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) * @ap: per-AP data */ struct iwl_tof_range_rsp_ntfy_v5 { u8 request_id; u8 request_status; u8 last_in_batch; u8 num_of_aps; struct iwl_tof_range_rsp_ap_entry_ntfy_v3 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_5 */ /** * struct iwl_tof_range_rsp_ntfy_v6 - ranging response notification * @request_id: A Token ID of the corresponding Range request * @num_of_aps: Number of APs results * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise. * @reserved: reserved * @ap: per-AP data */ struct iwl_tof_range_rsp_ntfy_v6 { u8 request_id; u8 num_of_aps; u8 last_report; u8 reserved; struct iwl_tof_range_rsp_ap_entry_ntfy_v4 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */ /** * struct iwl_tof_range_rsp_ntfy_v7 - ranging response notification * @request_id: A Token ID of the corresponding Range request * @num_of_aps: Number of APs results * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise. * @reserved: reserved * @ap: per-AP data */ struct iwl_tof_range_rsp_ntfy_v7 { u8 request_id; u8 num_of_aps; u8 last_report; u8 reserved; struct iwl_tof_range_rsp_ap_entry_ntfy_v5 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */ /** * struct iwl_tof_range_rsp_ntfy_v8 - ranging response notification * @request_id: A Token ID of the corresponding Range request * @num_of_aps: Number of APs results * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise. * @reserved: reserved * @ap: per-AP data */ struct iwl_tof_range_rsp_ntfy_v8 { u8 request_id; u8 num_of_aps; u8 last_report; u8 reserved; struct iwl_tof_range_rsp_ap_entry_ntfy_v6 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8, LOCATION_RANGE_RSP_NTFY_API_S_VER_9 */ #define IWL_MVM_TOF_MCSI_BUF_SIZE (245) /** * struct iwl_tof_mcsi_notif - used for debug * @token: token ID for the current session * @role: '0' - initiator, '1' - responder * @reserved: reserved * @initiator_bssid: initiator machine * @responder_bssid: responder machine * @mcsi_buffer: debug data */ struct iwl_tof_mcsi_notif { u8 token; u8 role; __le16 reserved; u8 initiator_bssid[ETH_ALEN]; u8 responder_bssid[ETH_ALEN]; u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4]; } __packed; /** * struct iwl_tof_range_abort_cmd * @request_id: corresponds to a range request * @reserved: reserved */ struct iwl_tof_range_abort_cmd { u8 request_id; u8 reserved[3]; } __packed; enum ftm_responder_stats_flags { FTM_RESP_STAT_NON_ASAP_STARTED = BIT(0), FTM_RESP_STAT_NON_ASAP_IN_WIN = BIT(1), FTM_RESP_STAT_NON_ASAP_OUT_WIN = BIT(2), FTM_RESP_STAT_TRIGGER_DUP = BIT(3), FTM_RESP_STAT_DUP = BIT(4), FTM_RESP_STAT_DUP_IN_WIN = BIT(5), FTM_RESP_STAT_DUP_OUT_WIN = BIT(6), FTM_RESP_STAT_SCHED_SUCCESS = BIT(7), FTM_RESP_STAT_ASAP_REQ = BIT(8), FTM_RESP_STAT_NON_ASAP_REQ = BIT(9), FTM_RESP_STAT_ASAP_RESP = BIT(10), FTM_RESP_STAT_NON_ASAP_RESP = BIT(11), FTM_RESP_STAT_FAIL_INITIATOR_INACTIVE = BIT(12), FTM_RESP_STAT_FAIL_INITIATOR_OUT_WIN = BIT(13), FTM_RESP_STAT_FAIL_INITIATOR_RETRY_LIM = BIT(14), FTM_RESP_STAT_FAIL_NEXT_SERVED = BIT(15), FTM_RESP_STAT_FAIL_TRIGGER_ERR = BIT(16), FTM_RESP_STAT_FAIL_GC = BIT(17), FTM_RESP_STAT_SUCCESS = BIT(18), FTM_RESP_STAT_INTEL_IE = BIT(19), FTM_RESP_STAT_INITIATOR_ACTIVE = BIT(20), FTM_RESP_STAT_MEASUREMENTS_AVAILABLE = BIT(21), FTM_RESP_STAT_TRIGGER_UNKNOWN = BIT(22), FTM_RESP_STAT_PROCESS_FAIL = BIT(23), FTM_RESP_STAT_ACK = BIT(24), FTM_RESP_STAT_NACK = BIT(25), FTM_RESP_STAT_INVALID_INITIATOR_ID = BIT(26), FTM_RESP_STAT_TIMER_MIN_DELTA = BIT(27), FTM_RESP_STAT_INITIATOR_REMOVED = BIT(28), FTM_RESP_STAT_INITIATOR_ADDED = BIT(29), FTM_RESP_STAT_ERR_LIST_FULL = BIT(30), FTM_RESP_STAT_INITIATOR_SCHED_NOW = BIT(31), }; /* RESP_IND_E */ /** * struct iwl_ftm_responder_stats - FTM responder statistics * @addr: initiator address * @success_ftm: number of successful ftm frames * @ftm_per_burst: num of FTM frames that were received * @flags: &enum ftm_responder_stats_flags * @duration: actual duration of FTM * @allocated_duration: time that was allocated for this FTM session * @bw: FTM request bandwidth * @rate: FTM request rate * @reserved: for alingment and future use */ struct iwl_ftm_responder_stats { u8 addr[ETH_ALEN]; u8 success_ftm; u8 ftm_per_burst; __le32 flags; __le32 duration; __le32 allocated_duration; u8 bw; u8 rate; __le16 reserved; } __packed; /* TOF_RESPONDER_STATISTICS_NTFY_S_VER_2 */ #define IWL_CSI_MAX_EXPECTED_CHUNKS 16 #define IWL_CSI_CHUNK_CTL_NUM_MASK_VER_1 0x0003 #define IWL_CSI_CHUNK_CTL_IDX_MASK_VER_1 0x000c #define IWL_CSI_CHUNK_CTL_NUM_MASK_VER_2 0x00ff #define IWL_CSI_CHUNK_CTL_IDX_MASK_VER_2 0xff00 struct iwl_csi_chunk_notification { __le32 token; __le16 seq; __le16 ctl; __le32 size; u8 data[]; } __packed; /* CSI_CHUNKS_HDR_NTFY_API_S_VER_1/VER_2 */ #endif /* __iwl_fw_api_location_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/mac-cfg.h b/sys/contrib/dev/iwlwifi/fw/api/mac-cfg.h index 712532f17630..184db5a6f06f 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/mac-cfg.h +++ b/sys/contrib/dev/iwlwifi/fw/api/mac-cfg.h @@ -1,185 +1,634 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_mac_cfg_h__ #define __iwl_fw_api_mac_cfg_h__ +#include "mac.h" + /** * enum iwl_mac_conf_subcmd_ids - mac configuration command IDs */ enum iwl_mac_conf_subcmd_ids { /** * @LOW_LATENCY_CMD: &struct iwl_mac_low_latency_cmd */ LOW_LATENCY_CMD = 0x3, /** * @CHANNEL_SWITCH_TIME_EVENT_CMD: &struct iwl_chan_switch_te_cmd */ CHANNEL_SWITCH_TIME_EVENT_CMD = 0x4, /** * @MISSED_VAP_NOTIF: &struct iwl_missed_vap_notif */ MISSED_VAP_NOTIF = 0xFA, /** * @SESSION_PROTECTION_CMD: &struct iwl_mvm_session_prot_cmd */ SESSION_PROTECTION_CMD = 0x5, /** * @CANCEL_CHANNEL_SWITCH_CMD: &struct iwl_cancel_channel_switch_cmd */ CANCEL_CHANNEL_SWITCH_CMD = 0x6, - + /** + * @MAC_CONFIG_CMD: &struct iwl_mac_config_cmd + */ + MAC_CONFIG_CMD = 0x8, + /** + * @LINK_CONFIG_CMD: &struct iwl_link_config_cmd + */ + LINK_CONFIG_CMD = 0x9, + /** + * @STA_CONFIG_CMD: &struct iwl_mvm_sta_cfg_cmd + */ + STA_CONFIG_CMD = 0xA, + /** + * @AUX_STA_CMD: &struct iwl_mvm_aux_sta_cmd + */ + AUX_STA_CMD = 0xB, + /** + * @STA_REMOVE_CMD: &struct iwl_mvm_remove_sta_cmd + */ + STA_REMOVE_CMD = 0xC, + /** + * @STA_DISABLE_TX_CMD: &struct iwl_mvm_sta_disable_tx_cmd + */ + STA_DISABLE_TX_CMD = 0xD, /** * @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif */ SESSION_PROTECTION_NOTIF = 0xFB, /** * @PROBE_RESPONSE_DATA_NOTIF: &struct iwl_probe_resp_data_notif */ PROBE_RESPONSE_DATA_NOTIF = 0xFC, /** * @CHANNEL_SWITCH_START_NOTIF: &struct iwl_channel_switch_start_notif */ CHANNEL_SWITCH_START_NOTIF = 0xFF, /** *@CHANNEL_SWITCH_ERROR_NOTIF: &struct iwl_channel_switch_error_notif */ CHANNEL_SWITCH_ERROR_NOTIF = 0xF9, }; #define IWL_P2P_NOA_DESC_COUNT (2) /** * struct iwl_p2p_noa_attr - NOA attr contained in probe resp FW notification * * @id: attribute id * @len_low: length low half * @len_high: length high half * @idx: instance of NoA timing * @ctwin: GO's ct window and pwer save capability * @desc: NoA descriptor * @reserved: reserved for alignment purposes */ struct iwl_p2p_noa_attr { u8 id; u8 len_low; u8 len_high; u8 idx; u8 ctwin; struct ieee80211_p2p_noa_desc desc[IWL_P2P_NOA_DESC_COUNT]; u8 reserved; } __packed; #define IWL_PROBE_RESP_DATA_NO_CSA (0xff) /** * struct iwl_probe_resp_data_notif - notification with NOA and CSA counter * * @mac_id: the mac which should send the probe response * @noa_active: notifies if the noa attribute should be handled * @noa_attr: P2P NOA attribute * @csa_counter: current csa counter * @reserved: reserved for alignment purposes */ struct iwl_probe_resp_data_notif { __le32 mac_id; __le32 noa_active; struct iwl_p2p_noa_attr noa_attr; u8 csa_counter; u8 reserved[3]; } __packed; /* PROBE_RESPONSE_DATA_NTFY_API_S_VER_1 */ /** * struct iwl_missed_vap_notif - notification of missing vap detection * * @mac_id: the mac for which the ucode sends the notification for * @num_beacon_intervals_elapsed: beacons elpased with no vap profile inside * @profile_periodicity: beacons period to have our profile inside * @reserved: reserved for alignment purposes */ struct iwl_missed_vap_notif { __le32 mac_id; u8 num_beacon_intervals_elapsed; u8 profile_periodicity; u8 reserved[2]; } __packed; /* MISSED_VAP_NTFY_API_S_VER_1 */ /** * struct iwl_channel_switch_start_notif - Channel switch start notification * * @id_and_color: ID and color of the MAC */ -struct iwl_channel_switch_start_notif { +struct iwl_channel_switch_start_notif_v1 { __le32 id_and_color; } __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */ +/** + * struct iwl_channel_switch_start_notif - Channel switch start notification + * + * @link_id: FW link id + */ +struct iwl_channel_switch_start_notif { + __le32 link_id; +} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_3 */ + #define CS_ERR_COUNT_ERROR BIT(0) #define CS_ERR_LONG_DELAY_AFTER_CS BIT(1) #define CS_ERR_LONG_TX_BLOCK BIT(2) #define CS_ERR_TX_BLOCK_TIMER_EXPIRED BIT(3) /** - * struct iwl_channel_switch_error_notif - Channel switch error notification + * struct iwl_channel_switch_error_notif_v1 - Channel switch error notification * * @mac_id: the mac for which the ucode sends the notification for * @csa_err_mask: mask of channel switch error that can occur */ -struct iwl_channel_switch_error_notif { +struct iwl_channel_switch_error_notif_v1 { __le32 mac_id; __le32 csa_err_mask; } __packed; /* CHANNEL_SWITCH_ERROR_NTFY_API_S_VER_1 */ +/** + * struct iwl_channel_switch_error_notif - Channel switch error notification + * + * @link_id: FW link id + * @csa_err_mask: mask of channel switch error that can occur + */ +struct iwl_channel_switch_error_notif { + __le32 link_id; + __le32 csa_err_mask; +} __packed; /* CHANNEL_SWITCH_ERROR_NTFY_API_S_VER_2 */ + /** * struct iwl_cancel_channel_switch_cmd - Cancel Channel Switch command * - * @mac_id: the mac that should cancel the channel switch + * @id: the id of the link or mac that should cancel the channel switch */ struct iwl_cancel_channel_switch_cmd { - __le32 mac_id; + __le32 id; } __packed; /* MAC_CANCEL_CHANNEL_SWITCH_S_VER_1 */ /** * struct iwl_chan_switch_te_cmd - Channel Switch Time Event command * * @mac_id: MAC ID for channel switch - * @action: action to perform, one of FW_CTXT_ACTION_* + * @action: action to perform, see &enum iwl_ctxt_action * @tsf: beacon tsf * @cs_count: channel switch count from CSA/eCSA IE * @cs_delayed_bcn_count: if set to N (!= 0) GO/AP can delay N beacon intervals * at the new channel after the channel switch, otherwise (N == 0) expect * beacon right after the channel switch. * @cs_mode: 1 - quiet, 0 - otherwise * @reserved: reserved for alignment purposes */ struct iwl_chan_switch_te_cmd { __le32 mac_id; __le32 action; __le32 tsf; u8 cs_count; u8 cs_delayed_bcn_count; u8 cs_mode; u8 reserved; } __packed; /* MAC_CHANNEL_SWITCH_TIME_EVENT_S_VER_2 */ /** * struct iwl_mac_low_latency_cmd - set/clear mac to 'low-latency mode' * * @mac_id: MAC ID to whom to apply the low-latency configurations * @low_latency_rx: 1/0 to set/clear Rx low latency direction * @low_latency_tx: 1/0 to set/clear Tx low latency direction * @reserved: reserved for alignment purposes */ struct iwl_mac_low_latency_cmd { __le32 mac_id; u8 low_latency_rx; u8 low_latency_tx; __le16 reserved; } __packed; /* MAC_LOW_LATENCY_API_S_VER_1 */ +/** + * struct iwl_mac_client_data - configuration data for client MAC context + * + * @is_assoc: 1 for associated state, 0 otherwise + * @esr_transition_timeout: the timeout required by the AP for the + * eSR transition. + * Available only from version 2 of the command. + * This values comes from the EMLSR transition delay in the EML + * Capabilities subfield. + * @medium_sync_delay: the value as it appeasr in P802.11be_D2.2 Figure 9-1002j. + * @assoc_id: unique ID assigned by the AP during association + * @reserved1: alignment + * @data_policy: see &enum iwl_mac_data_policy + * @reserved2: alignment + * @ctwin: client traffic window in TU (period after TBTT when GO is present). + * 0 indicates that there is no CT window. + */ +struct iwl_mac_client_data { + u8 is_assoc; + u8 esr_transition_timeout; + __le16 medium_sync_delay; + + __le16 assoc_id; + __le16 reserved1; + __le16 data_policy; + __le16 reserved2; + __le32 ctwin; +} __packed; /* MAC_CONTEXT_CONFIG_CLIENT_DATA_API_S_VER_2 */ + +/** + * struct iwl_mac_p2p_dev_data - configuration data for P2P device MAC context + * + * @is_disc_extended: if set to true, P2P Device discoverability is enabled on + * other channels as well. This should be to true only in case that the + * device is discoverable and there is an active GO. Note that setting this + * field when not needed, will increase the number of interrupts and have + * effect on the platform power, as this setting opens the Rx filters on + * all macs. + */ +struct iwl_mac_p2p_dev_data { + __le32 is_disc_extended; +} __packed; /* MAC_CONTEXT_CONFIG_P2P_DEV_DATA_API_S_VER_1 */ + +/** + * enum iwl_mac_config_filter_flags - MAC context configuration filter flags + * + * @MAC_CFG_FILTER_PROMISC: accept all data frames + * @MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT: pass all management and + * control frames to the host + * @MAC_CFG_FILTER_ACCEPT_GRP: accept multicast frames + * @MAC_CFG_FILTER_ACCEPT_BEACON: accept beacon frames + * @MAC_CFG_FILTER_ACCEPT_BCAST_PROBE_RESP: accept broadcast probe response + * @MAC_CFG_FILTER_ACCEPT_PROBE_REQ: accept probe requests + */ +enum iwl_mac_config_filter_flags { + MAC_CFG_FILTER_PROMISC = BIT(0), + MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT = BIT(1), + MAC_CFG_FILTER_ACCEPT_GRP = BIT(2), + MAC_CFG_FILTER_ACCEPT_BEACON = BIT(3), + MAC_CFG_FILTER_ACCEPT_BCAST_PROBE_RESP = BIT(4), + MAC_CFG_FILTER_ACCEPT_PROBE_REQ = BIT(5), +}; /* MAC_FILTER_FLAGS_MASK_E_VER_1 */ + +/** + * struct iwl_mac_config_cmd - command structure to configure MAC contexts in + * MLD API + * ( MAC_CONTEXT_CONFIG_CMD = 0x8 ) + * + * @id_and_color: ID and color of the MAC + * @action: action to perform, see &enum iwl_ctxt_action + * @mac_type: one of &enum iwl_mac_types + * @local_mld_addr: mld address + * @reserved_for_local_mld_addr: reserved + * @filter_flags: combination of &enum iwl_mac_config_filter_flags + * @he_support: does this MAC support HE + * @he_ap_support: HE AP enabled, "pseudo HE", no trigger frame handling + * @eht_support: does this MAC support EHT. Requires he_support + * @nic_not_ack_enabled: mark that the NIC doesn't support receiving + * ACK-enabled AGG, (i.e. both BACK and non-BACK frames in single AGG). + * If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0 + * len delim to determine if AGG or single. + * @client: client mac data + * @go_ibss: mac data for go or ibss + * @p2p_dev: mac data for p2p device + */ +struct iwl_mac_config_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* MAC_CONTEXT_TYPE_API_E */ + __le32 mac_type; + u8 local_mld_addr[6]; + __le16 reserved_for_local_mld_addr; + __le32 filter_flags; + __le16 he_support; + __le16 he_ap_support; + __le32 eht_support; + __le32 nic_not_ack_enabled; + /* MAC_CONTEXT_CONFIG_SPECIFIC_DATA_API_U_VER_2 */ + union { + struct iwl_mac_client_data client; + struct iwl_mac_p2p_dev_data p2p_dev; + }; +} __packed; /* MAC_CONTEXT_CONFIG_CMD_API_S_VER_2 */ + +/** + * enum iwl_link_ctx_modify_flags - indicate to the fw what fields are being + * modified in &iwl_link_ctx_cfg_cmd + * + * @LINK_CONTEXT_MODIFY_ACTIVE: covers iwl_link_ctx_cfg_cmd::active + * @LINK_CONTEXT_MODIFY_RATES_INFO: covers iwl_link_ctx_cfg_cmd::cck_rates, + * iwl_link_ctx_cfg_cmd::ofdm_rates, + * iwl_link_ctx_cfg_cmd::cck_short_preamble, + * iwl_link_ctx_cfg_cmd::short_slot + * @LINK_CONTEXT_MODIFY_PROTECT_FLAGS: covers + * iwl_link_ctx_cfg_cmd::protection_flags + * @LINK_CONTEXT_MODIFY_QOS_PARAMS: covers iwl_link_ctx_cfg_cmd::qos_flags, + * iwl_link_ctx_cfg_cmd::ac, + * @LINK_CONTEXT_MODIFY_BEACON_TIMING: covers iwl_link_ctx_cfg_cmd::bi, + * iwl_link_ctx_cfg_cmd::dtim_interval, + * iwl_link_ctx_cfg_cmd::dtim_time, + * iwl_link_ctx_cfg_cmd::dtim_tsf, + * iwl_link_ctx_cfg_cmd::assoc_beacon_arrive_time. + * This flag can be set only once after assoc. + * @LINK_CONTEXT_MODIFY_HE_PARAMS: covers + * iwl_link_ctx_cfg_cmd::htc_trig_based_pkt_ext + * iwl_link_ctx_cfg_cmd::rand_alloc_ecwmin, + * iwl_link_ctx_cfg_cmd::rand_alloc_ecwmax, + * iwl_link_ctx_cfg_cmd::trig_based_txf, + * iwl_link_ctx_cfg_cmd::bss_color, + * iwl_link_ctx_cfg_cmd::ndp_fdbk_buff_th_exp, + * iwl_link_ctx_cfg_cmd::ref_bssid_addr + * iwl_link_ctx_cfg_cmd::bssid_index, + * iwl_link_ctx_cfg_cmd::frame_time_rts_th. + * This flag can be set any time. + * @LINK_CONTEXT_MODIFY_BSS_COLOR_DISABLE: covers + * iwl_link_ctx_cfg_cmd::bss_color_disable + * @LINK_CONTEXT_MODIFY_EHT_PARAMS: covers iwl_link_ctx_cfg_cmd::puncture_mask. + * This flag can be set only if the MAC that this link relates to has + * eht_support set to true. + * @LINK_CONTEXT_MODIFY_ALL: set all above flags + */ +enum iwl_link_ctx_modify_flags { + LINK_CONTEXT_MODIFY_ACTIVE = BIT(0), + LINK_CONTEXT_MODIFY_RATES_INFO = BIT(1), + LINK_CONTEXT_MODIFY_PROTECT_FLAGS = BIT(2), + LINK_CONTEXT_MODIFY_QOS_PARAMS = BIT(3), + LINK_CONTEXT_MODIFY_BEACON_TIMING = BIT(4), + LINK_CONTEXT_MODIFY_HE_PARAMS = BIT(5), + LINK_CONTEXT_MODIFY_BSS_COLOR_DISABLE = BIT(6), + LINK_CONTEXT_MODIFY_EHT_PARAMS = BIT(7), + LINK_CONTEXT_MODIFY_ALL = 0xff, +}; /* LINK_CONTEXT_MODIFY_MASK_E_VER_1 */ + +/** + * enum iwl_link_ctx_protection_flags - link protection flags + * @LINK_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames, + * this will require CCK RTS/CTS2self. + * RTS/CTS will protect full burst time. + * @LINK_PROT_FLG_HT_PROT: enable HT protection + * @LINK_PROT_FLG_FAT_PROT: protect 40 MHz transmissions + * @LINK_PROT_FLG_SELF_CTS_EN: allow CTS2self + */ +enum iwl_link_ctx_protection_flags { + LINK_PROT_FLG_TGG_PROTECT = BIT(0), + LINK_PROT_FLG_HT_PROT = BIT(1), + LINK_PROT_FLG_FAT_PROT = BIT(2), + LINK_PROT_FLG_SELF_CTS_EN = BIT(3), +}; /* LINK_PROTECT_FLAGS_E_VER_1 */ + +/** + * enum iwl_link_ctx_flags - link context flags + * + * @LINK_FLG_BSS_COLOR_DIS: BSS color disable, don't use the BSS + * color for RX filter but use MAC header + * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG + * @LINK_FLG_MU_EDCA_CW: indicates that there is an element of MU EDCA + * parameter set, i.e. the backoff counters for trig-based ACs + * @LINK_FLG_RU_2MHZ_BLOCK: indicates that 26-tone RU OFDMA transmission are + * not allowed (as there are OBSS that might classify such transmissions as + * radar pulses). + * @LINK_FLG_NDP_FEEDBACK_ENABLED: mark support for NDP feedback and change + * of threshold + */ +enum iwl_link_ctx_flags { + LINK_FLG_BSS_COLOR_DIS = BIT(0), + LINK_FLG_MU_EDCA_CW = BIT(1), + LINK_FLG_RU_2MHZ_BLOCK = BIT(2), + LINK_FLG_NDP_FEEDBACK_ENABLED = BIT(3), +}; /* LINK_CONTEXT_FLAG_E_VER_1 */ + +/** + * struct iwl_link_config_cmd - command structure to configure the LINK context + * in MLD API + * ( LINK_CONFIG_CMD =0x9 ) + * + * @action: action to perform, see &enum iwl_ctxt_action + * @link_id: the id of the link that this cmd configures + * @mac_id: interface ID. Relevant only if action is FW_CTXT_ACTION_ADD + * @phy_id: PHY index. Can be changed only if the link was inactive + * (and stays inactive). If the link is active (or becomes active), + * this field is ignored. + * @local_link_addr: the links MAC address. Can be changed only if the link was + * inactive (and stays inactive). If the link is active + * (or becomes active), this field is ignored. + * @reserved_for_local_link_addr: reserved + * @modify_mask: from &enum iwl_link_ctx_modify_flags, selects what to change. + * Relevant only if action is FW_CTXT_ACTION_MODIFY + * @active: indicates whether the link is active or not + * @listen_lmac: indicates whether the link should be allocated on the Listen + * Lmac or on the Main Lmac. Cannot be changed on an active Link. + * Relevant only for eSR. + * @cck_rates: basic rates available for CCK + * @ofdm_rates: basic rates available for OFDM + * @cck_short_preamble: 1 for enabling short preamble, 0 otherwise + * @short_slot: 1 for enabling short slots, 0 otherwise + * @protection_flags: combination of &enum iwl_link_ctx_protection_flags + * @qos_flags: from &enum iwl_mac_qos_flags + * @ac: one iwl_mac_qos configuration for each AC + * @htc_trig_based_pkt_ext: default PE in 4us units + * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1 + * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1 + * @ndp_fdbk_buff_th_exp: set exponent for the NDP feedback buffered threshold + * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues + * @bi: beacon interval in TU, applicable only when associated + * @dtim_interval: DTIM interval in TU. + * Relevant only for GO, otherwise this is offloaded. + * @puncture_mask: puncture mask for EHT + * @frame_time_rts_th: HE duration RTS threshold, in units of 32us + * @flags: a combination from &enum iwl_link_ctx_flags + * @flags_mask: what of %flags have changed. Also &enum iwl_link_ctx_flags + * Below fields are for multi-bssid: + * @ref_bssid_addr: reference BSSID used by the AP + * @reserved_for_ref_bssid_addr: reserved + * @bssid_index: index of the associated VAP + * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame + * @spec_link_id: link_id as the AP knows it + * @reserved: alignment + * @ibss_bssid_addr: bssid for ibss + * @reserved_for_ibss_bssid_addr: reserved + * @reserved1: reserved for future use + */ +struct iwl_link_config_cmd { + __le32 action; + __le32 link_id; + __le32 mac_id; + __le32 phy_id; + u8 local_link_addr[6]; + __le16 reserved_for_local_link_addr; + __le32 modify_mask; + __le32 active; + __le32 listen_lmac; + __le32 cck_rates; + __le32 ofdm_rates; + __le32 cck_short_preamble; + __le32 short_slot; + __le32 protection_flags; + /* MAC_QOS_PARAM_API_S_VER_1 */ + __le32 qos_flags; + struct iwl_ac_qos ac[AC_NUM + 1]; + u8 htc_trig_based_pkt_ext; + u8 rand_alloc_ecwmin; + u8 rand_alloc_ecwmax; + u8 ndp_fdbk_buff_th_exp; + struct iwl_he_backoff_conf trig_based_txf[AC_NUM]; + __le32 bi; + __le32 dtim_interval; + __le16 puncture_mask; + __le16 frame_time_rts_th; + __le32 flags; + __le32 flags_mask; + /* The below fields are for multi-bssid */ + u8 ref_bssid_addr[6]; + __le16 reserved_for_ref_bssid_addr; + u8 bssid_index; + u8 bss_color; + u8 spec_link_id; + u8 reserved; + u8 ibss_bssid_addr[6]; + __le16 reserved_for_ibss_bssid_addr; + __le32 reserved1[8]; +} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1 */ + +/* Currently FW supports link ids in the range 0-3 and can have + * at most two active links for each vif. + */ +#define IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM 2 +#define IWL_MVM_FW_MAX_LINK_ID 3 + +/** + * enum iwl_fw_sta_type - FW station types + * @STATION_TYPE_PEER: represents a peer - AP in BSS, a TDLS sta, a client in + * P2P. + * @STATION_TYPE_BCAST_MGMT: The station used to send beacons and + * probe responses. Also used for traffic injection in sniffer mode + * @STATION_TYPE_MCAST: the station used for BCAST / MCAST in GO. Will be + * suspended / resumed at the right timing depending on the clients' + * power save state and the DTIM timing + * @STATION_TYPE_AUX: aux sta. In the FW there is no need for a special type + * for the aux sta, so this type is only for driver - internal use. + */ +enum iwl_fw_sta_type { + STATION_TYPE_PEER, + STATION_TYPE_BCAST_MGMT, + STATION_TYPE_MCAST, + STATION_TYPE_AUX, +}; /* STATION_TYPE_E_VER_1 */ + +/** + * struct iwl_mvm_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's + * station table + * ( STA_CONFIG_CMD = 0xA ) + * + * @sta_id: index of station in uCode's station table + * @link_id: the id of the link that is used to communicate with this sta + * @peer_mld_address: the peers mld address + * @reserved_for_peer_mld_address: reserved + * @peer_link_address: the address of the link that is used to communicate + * with this sta + * @reserved_for_peer_link_address: reserved + * @station_type: type of this station. See &enum iwl_fw_sta_type + * @assoc_id: for GO only + * @beamform_flags: beam forming controls + * @mfp: indicates whether the STA uses management frame protection or not. + * @mimo: indicates whether the sta uses mimo or not + * @mimo_protection: indicates whether the sta uses mimo protection or not + * @ack_enabled: indicates that the AP supports receiving ACK- + * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG + * @trig_rnd_alloc: indicates that trigger based random allocation + * is enabled according to UORA element existence + * @tx_ampdu_spacing: minimum A-MPDU spacing: + * 4 - 2us density, 5 - 4us density, 6 - 8us density, 7 - 16us density + * @tx_ampdu_max_size: maximum A-MPDU length: 0 - 8K, 1 - 16K, 2 - 32K, + * 3 - 64K, 4 - 128K, 5 - 256K, 6 - 512K, 7 - 1024K. + * @sp_length: the size of the SP in actual number of frames + * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver + * enabled ACs. + * @pkt_ext: optional, exists according to PPE-present bit in the HE/EHT-PHY + * capa + * @htc_flags: which features are supported in HTC + */ +struct iwl_mvm_sta_cfg_cmd { + __le32 sta_id; + __le32 link_id; + u8 peer_mld_address[ETH_ALEN]; + __le16 reserved_for_peer_mld_address; + u8 peer_link_address[ETH_ALEN]; + __le16 reserved_for_peer_link_address; + __le32 station_type; + __le32 assoc_id; + __le32 beamform_flags; + __le32 mfp; + __le32 mimo; + __le32 mimo_protection; + __le32 ack_enabled; + __le32 trig_rnd_alloc; + __le32 tx_ampdu_spacing; + __le32 tx_ampdu_max_size; + __le32 sp_length; + __le32 uapsd_acs; + struct iwl_he_pkt_ext_v2 pkt_ext; + __le32 htc_flags; +} __packed; /* STA_CMD_API_S_VER_1 */ + +/** + * struct iwl_mvm_aux_sta_cmd - command for AUX STA configuration + * ( AUX_STA_CMD = 0xB ) + * + * @sta_id: index of aux sta to configure + * @lmac_id: ? + * @mac_addr: mac addr of the auxilary sta + * @reserved_for_mac_addr: reserved + */ +struct iwl_mvm_aux_sta_cmd { + __le32 sta_id; + __le32 lmac_id; + u8 mac_addr[ETH_ALEN]; + __le16 reserved_for_mac_addr; + +} __packed; /* AUX_STA_CMD_API_S_VER_1 */ + +/** + * struct iwl_mvm_remove_sta_cmd - a cmd structure to remove a sta added by + * STA_CONFIG_CMD or AUX_STA_CONFIG_CMD + * ( STA_REMOVE_CMD = 0xC ) + * + * @sta_id: index of station to remove + */ +struct iwl_mvm_remove_sta_cmd { + __le32 sta_id; +} __packed; /* REMOVE_STA_API_S_VER_1 */ + +/** + * struct iwl_mvm_sta_disable_tx_cmd - disable / re-enable tx to a sta + * ( STA_DISABLE_TX_CMD = 0xD ) + * + * @sta_id: index of the station to disable tx to + * @disable: indicates if to disable or re-enable tx + */ +struct iwl_mvm_sta_disable_tx_cmd { + __le32 sta_id; + __le32 disable; +} __packed; /* STA_DISABLE_TX_API_S_VER_1 */ + #endif /* __iwl_fw_api_mac_cfg_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/mac.h b/sys/contrib/dev/iwlwifi/fw/api/mac.h index 9b7caf968346..55882190251c 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/mac.h +++ b/sys/contrib/dev/iwlwifi/fw/api/mac.h @@ -1,768 +1,786 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_mac_h__ #define __iwl_fw_api_mac_h__ /* * The first MAC indices (starting from 0) are available to the driver, * AUX indices follows - 1 for non-CDB, 2 for CDB. */ #define MAC_INDEX_AUX 4 #define MAC_INDEX_MIN_DRIVER 0 #define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX #define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1) #define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2) #define IWL_MVM_STATION_COUNT_MAX 16 #define IWL_MVM_INVALID_STA 0xFF enum iwl_ac { AC_BK, AC_BE, AC_VI, AC_VO, AC_NUM, }; /** * enum iwl_mac_protection_flags - MAC context flags * @MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames, * this will require CCK RTS/CTS2self. * RTS/CTS will protect full burst time. * @MAC_PROT_FLG_HT_PROT: enable HT protection * @MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions * @MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self */ enum iwl_mac_protection_flags { MAC_PROT_FLG_TGG_PROTECT = BIT(3), MAC_PROT_FLG_HT_PROT = BIT(23), MAC_PROT_FLG_FAT_PROT = BIT(24), MAC_PROT_FLG_SELF_CTS_EN = BIT(30), }; #define MAC_FLG_SHORT_SLOT BIT(4) #define MAC_FLG_SHORT_PREAMBLE BIT(5) /** * enum iwl_mac_types - Supported MAC types * @FW_MAC_TYPE_FIRST: lowest supported MAC type * @FW_MAC_TYPE_AUX: Auxiliary MAC (internal) * @FW_MAC_TYPE_LISTENER: monitor MAC type (?) * @FW_MAC_TYPE_PIBSS: Pseudo-IBSS * @FW_MAC_TYPE_IBSS: IBSS * @FW_MAC_TYPE_BSS_STA: BSS (managed) station * @FW_MAC_TYPE_P2P_DEVICE: P2P Device * @FW_MAC_TYPE_P2P_STA: P2P client * @FW_MAC_TYPE_GO: P2P GO * @FW_MAC_TYPE_TEST: ? * @FW_MAC_TYPE_MAX: highest support MAC type */ enum iwl_mac_types { FW_MAC_TYPE_FIRST = 1, FW_MAC_TYPE_AUX = FW_MAC_TYPE_FIRST, FW_MAC_TYPE_LISTENER, FW_MAC_TYPE_PIBSS, FW_MAC_TYPE_IBSS, FW_MAC_TYPE_BSS_STA, FW_MAC_TYPE_P2P_DEVICE, FW_MAC_TYPE_P2P_STA, FW_MAC_TYPE_GO, FW_MAC_TYPE_TEST, FW_MAC_TYPE_MAX = FW_MAC_TYPE_TEST }; /* MAC_CONTEXT_TYPE_API_E_VER_1 */ /** * enum iwl_tsf_id - TSF hw timer ID * @TSF_ID_A: use TSF A * @TSF_ID_B: use TSF B * @TSF_ID_C: use TSF C * @TSF_ID_D: use TSF D * @NUM_TSF_IDS: number of TSF timers available */ enum iwl_tsf_id { TSF_ID_A = 0, TSF_ID_B = 1, TSF_ID_C = 2, TSF_ID_D = 3, NUM_TSF_IDS = 4, }; /* TSF_ID_API_E_VER_1 */ /** * struct iwl_mac_data_ap - configuration data for AP MAC context * @beacon_time: beacon transmit time in system time * @beacon_tsf: beacon transmit time in TSF * @bi: beacon interval in TU * @reserved1: reserved * @dtim_interval: dtim transmit time in TU * @reserved2: reserved * @mcast_qid: queue ID for multicast traffic. * NOTE: obsolete from VER2 and on * @beacon_template: beacon template ID */ struct iwl_mac_data_ap { __le32 beacon_time; __le64 beacon_tsf; __le32 bi; __le32 reserved1; __le32 dtim_interval; __le32 reserved2; __le32 mcast_qid; __le32 beacon_template; } __packed; /* AP_MAC_DATA_API_S_VER_2 */ /** * struct iwl_mac_data_ibss - configuration data for IBSS MAC context * @beacon_time: beacon transmit time in system time * @beacon_tsf: beacon transmit time in TSF * @bi: beacon interval in TU * @reserved: reserved * @beacon_template: beacon template ID */ struct iwl_mac_data_ibss { __le32 beacon_time; __le64 beacon_tsf; __le32 bi; __le32 reserved; __le32 beacon_template; } __packed; /* IBSS_MAC_DATA_API_S_VER_1 */ /** * enum iwl_mac_data_policy - policy of the data path for this MAC * @TWT_SUPPORTED: twt is supported * @MORE_DATA_ACK_SUPPORTED: AP supports More Data Ack according to * paragraph 9.4.1.17 in P802.11ax_D4 specification. Used for TWT * early termination detection. * @FLEXIBLE_TWT_SUPPORTED: AP supports flexible TWT schedule * @PROTECTED_TWT_SUPPORTED: AP supports protected TWT frames (with 11w) * @BROADCAST_TWT_SUPPORTED: AP and STA support broadcast TWT * @COEX_HIGH_PRIORITY_ENABLE: high priority mode for BT coex, to be used * during 802.1X negotiation (and allowed during 4-way-HS) */ enum iwl_mac_data_policy { TWT_SUPPORTED = BIT(0), MORE_DATA_ACK_SUPPORTED = BIT(1), FLEXIBLE_TWT_SUPPORTED = BIT(2), PROTECTED_TWT_SUPPORTED = BIT(3), BROADCAST_TWT_SUPPORTED = BIT(4), COEX_HIGH_PRIORITY_ENABLE = BIT(5), }; /** * struct iwl_mac_data_sta - configuration data for station MAC context * @is_assoc: 1 for associated state, 0 otherwise * @dtim_time: DTIM arrival time in system time * @dtim_tsf: DTIM arrival time in TSF * @bi: beacon interval in TU, applicable only when associated * @reserved1: reserved * @dtim_interval: DTIM interval in TU, applicable only when associated * @data_policy: see &enum iwl_mac_data_policy * @listen_interval: in beacon intervals, applicable only when associated * @assoc_id: unique ID assigned by the AP during association * @assoc_beacon_arrive_time: TSF of first beacon after association */ struct iwl_mac_data_sta { __le32 is_assoc; __le32 dtim_time; __le64 dtim_tsf; __le32 bi; __le32 reserved1; __le32 dtim_interval; __le32 data_policy; __le32 listen_interval; __le32 assoc_id; __le32 assoc_beacon_arrive_time; } __packed; /* STA_MAC_DATA_API_S_VER_2 */ /** * struct iwl_mac_data_go - configuration data for P2P GO MAC context * @ap: iwl_mac_data_ap struct with most config data * @ctwin: client traffic window in TU (period after TBTT when GO is present). * 0 indicates that there is no CT window. * @opp_ps_enabled: indicate that opportunistic PS allowed */ struct iwl_mac_data_go { struct iwl_mac_data_ap ap; __le32 ctwin; __le32 opp_ps_enabled; } __packed; /* GO_MAC_DATA_API_S_VER_1 */ /** * struct iwl_mac_data_p2p_sta - configuration data for P2P client MAC context * @sta: iwl_mac_data_sta struct with most config data * @ctwin: client traffic window in TU (period after TBTT when GO is present). * 0 indicates that there is no CT window. */ struct iwl_mac_data_p2p_sta { struct iwl_mac_data_sta sta; __le32 ctwin; } __packed; /* P2P_STA_MAC_DATA_API_S_VER_2 */ /** * struct iwl_mac_data_pibss - Pseudo IBSS config data * @stats_interval: interval in TU between statistics notifications to host. */ struct iwl_mac_data_pibss { __le32 stats_interval; } __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */ /* * struct iwl_mac_data_p2p_dev - configuration data for the P2P Device MAC * context. * @is_disc_extended: if set to true, P2P Device discoverability is enabled on * other channels as well. This should be to true only in case that the * device is discoverable and there is an active GO. Note that setting this * field when not needed, will increase the number of interrupts and have * effect on the platform power, as this setting opens the Rx filters on * all macs. */ struct iwl_mac_data_p2p_dev { __le32 is_disc_extended; } __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */ /** * enum iwl_mac_filter_flags - MAC context filter flags * @MAC_FILTER_IN_PROMISC: accept all data frames * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all management and * control frames to the host * @MAC_FILTER_ACCEPT_GRP: accept multicast frames * @MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames * @MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames * @MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host * (in station mode when associated) * @MAC_FILTER_OUT_BCAST: filter out all broadcast frames * @MAC_FILTER_IN_CRC32: extract FCS and append it to frames * @MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host */ enum iwl_mac_filter_flags { MAC_FILTER_IN_PROMISC = BIT(0), MAC_FILTER_IN_CONTROL_AND_MGMT = BIT(1), MAC_FILTER_ACCEPT_GRP = BIT(2), MAC_FILTER_DIS_DECRYPT = BIT(3), MAC_FILTER_DIS_GRP_DECRYPT = BIT(4), MAC_FILTER_IN_BEACON = BIT(6), MAC_FILTER_OUT_BCAST = BIT(8), MAC_FILTER_IN_CRC32 = BIT(11), MAC_FILTER_IN_PROBE_REQUEST = BIT(12), /** * @MAC_FILTER_IN_11AX: mark BSS as supporting 802.11ax */ MAC_FILTER_IN_11AX = BIT(14), }; /** * enum iwl_mac_qos_flags - QoS flags * @MAC_QOS_FLG_UPDATE_EDCA: ? * @MAC_QOS_FLG_TGN: HT is enabled * @MAC_QOS_FLG_TXOP_TYPE: ? * */ enum iwl_mac_qos_flags { MAC_QOS_FLG_UPDATE_EDCA = BIT(0), MAC_QOS_FLG_TGN = BIT(1), MAC_QOS_FLG_TXOP_TYPE = BIT(4), }; /** * struct iwl_ac_qos - QOS timing params for MAC_CONTEXT_CMD * @cw_min: Contention window, start value in numbers of slots. * Should be a power-of-2, minus 1. Device's default is 0x0f. * @cw_max: Contention window, max value in numbers of slots. * Should be a power-of-2, minus 1. Device's default is 0x3f. * @aifsn: Number of slots in Arbitration Interframe Space (before * performing random backoff timing prior to Tx). Device default 1. * @fifos_mask: FIFOs used by this MAC for this AC * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. * * One instance of this config struct for each of 4 EDCA access categories * in struct iwl_qosparam_cmd. * * Device will automatically increase contention window by (2*CW) + 1 for each * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW * value, to cap the CW value. */ struct iwl_ac_qos { __le16 cw_min; __le16 cw_max; u8 aifsn; u8 fifos_mask; __le16 edca_txop; } __packed; /* AC_QOS_API_S_VER_2 */ /** * struct iwl_mac_ctx_cmd - command structure to configure MAC contexts * ( MAC_CONTEXT_CMD = 0x28 ) * @id_and_color: ID and color of the MAC - * @action: action to perform, one of FW_CTXT_ACTION_* + * @action: action to perform, see &enum iwl_ctxt_action * @mac_type: one of &enum iwl_mac_types * @tsf_id: TSF HW timer, one of &enum iwl_tsf_id * @node_addr: MAC address * @reserved_for_node_addr: reserved * @bssid_addr: BSSID * @reserved_for_bssid_addr: reserved * @cck_rates: basic rates available for CCK * @ofdm_rates: basic rates available for OFDM * @protection_flags: combination of &enum iwl_mac_protection_flags * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise * @short_slot: 0x10 for enabling short slots, 0 otherwise * @filter_flags: combination of &enum iwl_mac_filter_flags * @qos_flags: from &enum iwl_mac_qos_flags * @ac: one iwl_mac_qos configuration for each AC */ struct iwl_mac_ctx_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; /* MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */ __le32 mac_type; __le32 tsf_id; u8 node_addr[6]; __le16 reserved_for_node_addr; u8 bssid_addr[6]; __le16 reserved_for_bssid_addr; __le32 cck_rates; __le32 ofdm_rates; __le32 protection_flags; __le32 cck_short_preamble; __le32 short_slot; __le32 filter_flags; /* MAC_QOS_PARAM_API_S_VER_1 */ __le32 qos_flags; struct iwl_ac_qos ac[AC_NUM+1]; /* MAC_CONTEXT_COMMON_DATA_API_S */ union { struct iwl_mac_data_ap ap; struct iwl_mac_data_go go; struct iwl_mac_data_sta sta; struct iwl_mac_data_p2p_sta p2p_sta; struct iwl_mac_data_p2p_dev p2p_dev; struct iwl_mac_data_pibss pibss; struct iwl_mac_data_ibss ibss; }; } __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */ #define IWL_NONQOS_SEQ_GET 0x1 #define IWL_NONQOS_SEQ_SET 0x2 struct iwl_nonqos_seq_query_cmd { __le32 get_set_flag; __le32 mac_id_n_color; __le16 value; __le16 reserved; } __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */ /** - * struct iwl_missed_beacons_notif - information on missed beacons + * struct iwl_missed_beacons_notif_ver_3 - information on missed beacons * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) * @mac_id: interface ID * @consec_missed_beacons_since_last_rx: number of consecutive missed * beacons since last RX. * @consec_missed_beacons: number of consecutive missed beacons * @num_expected_beacons: number of expected beacons * @num_recvd_beacons: number of received beacons */ -struct iwl_missed_beacons_notif { +struct iwl_missed_beacons_notif_ver_3 { __le32 mac_id; __le32 consec_missed_beacons_since_last_rx; __le32 consec_missed_beacons; __le32 num_expected_beacons; __le32 num_recvd_beacons; } __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ +/** + * struct iwl_missed_beacons_notif - information on missed beacons + * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) + * @link_id: fw link ID + * @consec_missed_beacons_since_last_rx: number of consecutive missed + * beacons since last RX. + * @consec_missed_beacons: number of consecutive missed beacons + * @num_expected_beacons: number of expected beacons + * @num_recvd_beacons: number of received beacons + */ +struct iwl_missed_beacons_notif { + __le32 link_id; + __le32 consec_missed_beacons_since_last_rx; + __le32 consec_missed_beacons; + __le32 num_expected_beacons; + __le32 num_recvd_beacons; +} __packed; /* MISSED_BEACON_NTFY_API_S_VER_4 */ + /** * struct iwl_he_backoff_conf - used for backoff configuration * Per each trigger-based AC, (set by MU EDCA Parameter set info-element) * used for backoff configuration of TXF5..TXF8 trigger based. * The MU-TIMER is reloaded w/ MU_TIME each time a frame from the AC is sent via * trigger-based TX. * @cwmin: CW min * @cwmax: CW max * @aifsn: AIFSN * AIFSN=0, means that no backoff from the specified TRIG-BASED AC is * allowed till the MU-TIMER is 0 * @mu_time: MU time in 8TU units */ struct iwl_he_backoff_conf { __le16 cwmin; __le16 cwmax; __le16 aifsn; __le16 mu_time; } __packed; /* AC_QOS_DOT11AX_API_S */ /** * enum iwl_he_pkt_ext_constellations - PPE constellation indices * @IWL_HE_PKT_EXT_BPSK: BPSK * @IWL_HE_PKT_EXT_QPSK: QPSK * @IWL_HE_PKT_EXT_16QAM: 16-QAM * @IWL_HE_PKT_EXT_64QAM: 64-QAM * @IWL_HE_PKT_EXT_256QAM: 256-QAM * @IWL_HE_PKT_EXT_1024QAM: 1024-QAM - * @IWL_HE_PKT_EXT_RESERVED: reserved value + * @IWL_HE_PKT_EXT_4096QAM: 4096-QAM, for EHT only * @IWL_HE_PKT_EXT_NONE: not defined */ enum iwl_he_pkt_ext_constellations { IWL_HE_PKT_EXT_BPSK = 0, IWL_HE_PKT_EXT_QPSK, IWL_HE_PKT_EXT_16QAM, IWL_HE_PKT_EXT_64QAM, IWL_HE_PKT_EXT_256QAM, IWL_HE_PKT_EXT_1024QAM, - IWL_HE_PKT_EXT_RESERVED, + IWL_HE_PKT_EXT_4096QAM, IWL_HE_PKT_EXT_NONE, }; #define MAX_HE_SUPP_NSS 2 #define MAX_CHANNEL_BW_INDX_API_D_VER_2 4 #define MAX_CHANNEL_BW_INDX_API_D_VER_3 5 /** * struct iwl_he_pkt_ext_v1 - QAM thresholds * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS * The IE is organized in the following way: * Support for Nss x BW (or RU) matrix: * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) * Each entry contains 2 QAM thresholds for 8us and 16us: * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx: * QAM_tx < QAM_th1 --> PPE=0us * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us * QAM_th2 <= QAM_tx --> PPE=16us * @pkt_ext_qam_th: QAM thresholds * For each Nss/Bw define 2 QAM thrsholds (0..5) * For rates below the low_th, no need for PPE * For rates between low_th and high_th, need 8us PPE * For rates equal or higher then the high_th, need 16us PPE * Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x * (0-low_th, 1-high_th) */ struct iwl_he_pkt_ext_v1 { u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2]; } __packed; /* PKT_EXT_DOT11AX_API_S_VER_1 */ /** * struct iwl_he_pkt_ext_v2 - QAM thresholds * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS * The IE is organized in the following way: * Support for Nss x BW (or RU) matrix: * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) * Each entry contains 2 QAM thresholds for 8us and 16us: * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx: * QAM_tx < QAM_th1 --> PPE=0us * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us * QAM_th2 <= QAM_tx --> PPE=16us * @pkt_ext_qam_th: QAM thresholds * For each Nss/Bw define 2 QAM thrsholds (0..5) * For rates below the low_th, no need for PPE * For rates between low_th and high_th, need 8us PPE * For rates equal or higher then the high_th, need 16us PPE * Nss (0-siso, 1-mimo2) x * BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz, 4-320MHz) x * (0-low_th, 1-high_th) */ struct iwl_he_pkt_ext_v2 { u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_3][2]; } __packed; /* PKT_EXT_DOT11AX_API_S_VER_2 */ /** * enum iwl_he_sta_ctxt_flags - HE STA context flags * @STA_CTXT_HE_REF_BSSID_VALID: ref bssid addr valid (for receiving specific * control frames such as TRIG, NDPA, BACK) * @STA_CTXT_HE_BSS_COLOR_DIS: BSS color disable, don't use the BSS * color for RX filter but use MAC header * @STA_CTXT_HE_PARTIAL_BSS_COLOR: partial BSS color allocation * @STA_CTXT_HE_32BIT_BA_BITMAP: indicates the receiver supports BA bitmap * of 32-bits * @STA_CTXT_HE_PACKET_EXT: indicates that the packet-extension info is valid * and should be used * @STA_CTXT_HE_TRIG_RND_ALLOC: indicates that trigger based random allocation * is enabled according to UORA element existence * @STA_CTXT_HE_CONST_TRIG_RND_ALLOC: used for AV testing * @STA_CTXT_HE_ACK_ENABLED: indicates that the AP supports receiving ACK- * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG * @STA_CTXT_HE_MU_EDCA_CW: indicates that there is an element of MU EDCA * parameter set, i.e. the backoff counters for trig-based ACs * @STA_CTXT_HE_NIC_NOT_ACK_ENABLED: mark that the NIC doesn't support receiving * ACK-enabled AGG, (i.e. both BACK and non-BACK frames in single AGG). * If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0 * len delim to determine if AGG or single. * @STA_CTXT_HE_RU_2MHZ_BLOCK: indicates that 26-tone RU OFDMA transmission are * not allowed (as there are OBSS that might classify such transmissions as * radar pulses). * @STA_CTXT_HE_NDP_FEEDBACK_ENABLED: mark support for NDP feedback and change * of threshold * @STA_CTXT_EHT_PUNCTURE_MASK_VALID: indicates the puncture_mask field is valid * @STA_CTXT_EHT_LONG_PPE_ENABLED: indicates the PPE requirement should be * extended to 20us for BW > 160Mhz or for MCS w/ 4096-QAM. */ enum iwl_he_sta_ctxt_flags { STA_CTXT_HE_REF_BSSID_VALID = BIT(4), STA_CTXT_HE_BSS_COLOR_DIS = BIT(5), STA_CTXT_HE_PARTIAL_BSS_COLOR = BIT(6), STA_CTXT_HE_32BIT_BA_BITMAP = BIT(7), STA_CTXT_HE_PACKET_EXT = BIT(8), STA_CTXT_HE_TRIG_RND_ALLOC = BIT(9), STA_CTXT_HE_CONST_TRIG_RND_ALLOC = BIT(10), STA_CTXT_HE_ACK_ENABLED = BIT(11), STA_CTXT_HE_MU_EDCA_CW = BIT(12), STA_CTXT_HE_NIC_NOT_ACK_ENABLED = BIT(13), STA_CTXT_HE_RU_2MHZ_BLOCK = BIT(14), STA_CTXT_HE_NDP_FEEDBACK_ENABLED = BIT(15), STA_CTXT_EHT_PUNCTURE_MASK_VALID = BIT(16), STA_CTXT_EHT_LONG_PPE_ENABLED = BIT(17), }; /** * enum iwl_he_htc_flags - HE HTC support flags * @IWL_HE_HTC_SUPPORT: HE-HTC support * @IWL_HE_HTC_UL_MU_RESP_SCHED: HE UL MU response schedule * support via A-control field * @IWL_HE_HTC_BSR_SUPP: BSR support in A-control field * @IWL_HE_HTC_OMI_SUPP: A-OMI support in A-control field * @IWL_HE_HTC_BQR_SUPP: A-BQR support in A-control field */ enum iwl_he_htc_flags { IWL_HE_HTC_SUPPORT = BIT(0), IWL_HE_HTC_UL_MU_RESP_SCHED = BIT(3), IWL_HE_HTC_BSR_SUPP = BIT(4), IWL_HE_HTC_OMI_SUPP = BIT(5), IWL_HE_HTC_BQR_SUPP = BIT(6), }; /* * @IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK: the STA does not provide HE MFB * @IWL_HE_HTC_LINK_ADAP_UNSOLICITED: the STA provides only unsolicited HE MFB * @IWL_HE_HTC_LINK_ADAP_BOTH: the STA is capable of providing HE MFB in * response to HE MRQ and if the STA provides unsolicited HE MFB */ #define IWL_HE_HTC_LINK_ADAP_POS (1) #define IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK (0) #define IWL_HE_HTC_LINK_ADAP_UNSOLICITED (2 << IWL_HE_HTC_LINK_ADAP_POS) #define IWL_HE_HTC_LINK_ADAP_BOTH (3 << IWL_HE_HTC_LINK_ADAP_POS) /** * struct iwl_he_sta_context_cmd_v1 - configure FW to work with HE AP * @sta_id: STA id * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit * @reserved1: reserved byte for future use * @reserved2: reserved byte for future use * @flags: see %iwl_11ax_sta_ctxt_flags * @ref_bssid_addr: reference BSSID used by the AP * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes * @htc_flags: which features are supported in HTC * @frag_flags: frag support in A-MSDU * @frag_level: frag support level * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2) * @frag_min_size: min frag size (except last frag) * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame * @htc_trig_based_pkt_ext: default PE in 4us units * @frame_time_rts_th: HE duration RTS threshold, in units of 32us * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1 * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1 * @reserved3: reserved byte for future use * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues */ struct iwl_he_sta_context_cmd_v1 { u8 sta_id; u8 tid_limit; u8 reserved1; u8 reserved2; __le32 flags; /* The below fields are set via Multiple BSSID IE */ u8 ref_bssid_addr[6]; __le16 reserved0; /* The below fields are set via HE-capabilities IE */ __le32 htc_flags; u8 frag_flags; u8 frag_level; u8 frag_max_num; u8 frag_min_size; /* The below fields are set via PPE thresholds element */ struct iwl_he_pkt_ext_v1 pkt_ext; /* The below fields are set via HE-Operation IE */ u8 bss_color; u8 htc_trig_based_pkt_ext; __le16 frame_time_rts_th; /* Random access parameter set (i.e. RAPS) */ u8 rand_alloc_ecwmin; u8 rand_alloc_ecwmax; __le16 reserved3; /* The below fields are set via MU EDCA parameter set element */ struct iwl_he_backoff_conf trig_based_txf[AC_NUM]; } __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_1 */ /** * struct iwl_he_sta_context_cmd_v2 - configure FW to work with HE AP * @sta_id: STA id * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit * @reserved1: reserved byte for future use * @reserved2: reserved byte for future use * @flags: see %iwl_11ax_sta_ctxt_flags * @ref_bssid_addr: reference BSSID used by the AP * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes * @htc_flags: which features are supported in HTC * @frag_flags: frag support in A-MSDU * @frag_level: frag support level * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2) * @frag_min_size: min frag size (except last frag) * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame * @htc_trig_based_pkt_ext: default PE in 4us units * @frame_time_rts_th: HE duration RTS threshold, in units of 32us * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1 * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1 * @reserved3: reserved byte for future use * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues * @max_bssid_indicator: indicator of the max bssid supported on the associated * bss * @bssid_index: index of the associated VAP * @ema_ap: AP supports enhanced Multi BSSID advertisement * @profile_periodicity: number of Beacon periods that are needed to receive the * complete VAPs info * @bssid_count: actual number of VAPs in the MultiBSS Set * @reserved4: alignment */ struct iwl_he_sta_context_cmd_v2 { u8 sta_id; u8 tid_limit; u8 reserved1; u8 reserved2; __le32 flags; /* The below fields are set via Multiple BSSID IE */ u8 ref_bssid_addr[6]; __le16 reserved0; /* The below fields are set via HE-capabilities IE */ __le32 htc_flags; u8 frag_flags; u8 frag_level; u8 frag_max_num; u8 frag_min_size; /* The below fields are set via PPE thresholds element */ struct iwl_he_pkt_ext_v1 pkt_ext; /* The below fields are set via HE-Operation IE */ u8 bss_color; u8 htc_trig_based_pkt_ext; __le16 frame_time_rts_th; /* Random access parameter set (i.e. RAPS) */ u8 rand_alloc_ecwmin; u8 rand_alloc_ecwmax; __le16 reserved3; /* The below fields are set via MU EDCA parameter set element */ struct iwl_he_backoff_conf trig_based_txf[AC_NUM]; u8 max_bssid_indicator; u8 bssid_index; u8 ema_ap; u8 profile_periodicity; u8 bssid_count; u8 reserved4[3]; } __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */ /** * struct iwl_he_sta_context_cmd_v3 - configure FW to work with HE AP * @sta_id: STA id * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit * @reserved1: reserved byte for future use * @reserved2: reserved byte for future use * @flags: see %iwl_11ax_sta_ctxt_flags * @ref_bssid_addr: reference BSSID used by the AP * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes * @htc_flags: which features are supported in HTC * @frag_flags: frag support in A-MSDU * @frag_level: frag support level * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2) * @frag_min_size: min frag size (except last frag) * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame * @htc_trig_based_pkt_ext: default PE in 4us units * @frame_time_rts_th: HE duration RTS threshold, in units of 32us * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1 * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1 * @puncture_mask: puncture mask for EHT * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues * @max_bssid_indicator: indicator of the max bssid supported on the associated * bss * @bssid_index: index of the associated VAP * @ema_ap: AP supports enhanced Multi BSSID advertisement * @profile_periodicity: number of Beacon periods that are needed to receive the * complete VAPs info * @bssid_count: actual number of VAPs in the MultiBSS Set * @reserved4: alignment */ struct iwl_he_sta_context_cmd_v3 { u8 sta_id; u8 tid_limit; u8 reserved1; u8 reserved2; __le32 flags; /* The below fields are set via Multiple BSSID IE */ u8 ref_bssid_addr[6]; __le16 reserved0; /* The below fields are set via HE-capabilities IE */ __le32 htc_flags; u8 frag_flags; u8 frag_level; u8 frag_max_num; u8 frag_min_size; /* The below fields are set via PPE thresholds element */ struct iwl_he_pkt_ext_v2 pkt_ext; /* The below fields are set via HE-Operation IE */ u8 bss_color; u8 htc_trig_based_pkt_ext; __le16 frame_time_rts_th; /* Random access parameter set (i.e. RAPS) */ u8 rand_alloc_ecwmin; u8 rand_alloc_ecwmax; __le16 puncture_mask; /* The below fields are set via MU EDCA parameter set element */ struct iwl_he_backoff_conf trig_based_txf[AC_NUM]; u8 max_bssid_indicator; u8 bssid_index; u8 ema_ap; u8 profile_periodicity; u8 bssid_count; u8 reserved4[3]; } __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */ /** * struct iwl_he_monitor_cmd - configure air sniffer for HE * @bssid: the BSSID to sniff for * @reserved1: reserved for dword alignment * @aid: the AID to track on for HE MU * @reserved2: reserved for future use */ struct iwl_he_monitor_cmd { u8 bssid[6]; __le16 reserved1; __le16 aid; u8 reserved2[6]; } __packed; /* HE_AIR_SNIFFER_CONFIG_CMD_API_S_VER_1 */ #endif /* __iwl_fw_api_mac_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/nvm-reg.h b/sys/contrib/dev/iwlwifi/fw/api/nvm-reg.h index 91bfde6d5367..28bfabb399b2 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/nvm-reg.h +++ b/sys/contrib/dev/iwlwifi/fw/api/nvm-reg.h @@ -1,575 +1,611 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_nvm_reg_h__ #define __iwl_fw_api_nvm_reg_h__ /** * enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands */ enum iwl_regulatory_and_nvm_subcmd_ids { /** * @NVM_ACCESS_COMPLETE: &struct iwl_nvm_access_complete_cmd */ NVM_ACCESS_COMPLETE = 0x0, /** - * @LARI_CONFIG_CHANGE: &struct iwl_lari_config_change_cmd + * @LARI_CONFIG_CHANGE: &struct iwl_lari_config_change_cmd_v1, + * &struct iwl_lari_config_change_cmd_v2, + * &struct iwl_lari_config_change_cmd_v3, + * &struct iwl_lari_config_change_cmd_v4, + * &struct iwl_lari_config_change_cmd_v5 or + * &struct iwl_lari_config_change_cmd_v6 */ LARI_CONFIG_CHANGE = 0x1, /** * @NVM_GET_INFO: * Command is &struct iwl_nvm_get_info, * response is &struct iwl_nvm_get_info_rsp */ NVM_GET_INFO = 0x2, /** - * @TAS_CONFIG: &struct iwl_tas_config_cmd + * @TAS_CONFIG: &union iwl_tas_config_cmd */ TAS_CONFIG = 0x3, /** - * @SAR_OFFSET_MAPPING_TABLE_CMD: &iwl_sar_offset_mapping_cmd + * @SAR_OFFSET_MAPPING_TABLE_CMD: &struct iwl_sar_offset_mapping_cmd */ SAR_OFFSET_MAPPING_TABLE_CMD = 0x4, /** * @PNVM_INIT_COMPLETE_NTFY: &struct iwl_pnvm_init_complete_ntfy */ PNVM_INIT_COMPLETE_NTFY = 0xFE, }; /** * enum iwl_nvm_access_op - NVM access opcode * @IWL_NVM_READ: read NVM * @IWL_NVM_WRITE: write NVM */ enum iwl_nvm_access_op { IWL_NVM_READ = 0, IWL_NVM_WRITE = 1, }; /** * enum iwl_nvm_access_target - target of the NVM_ACCESS_CMD * @NVM_ACCESS_TARGET_CACHE: access the cache * @NVM_ACCESS_TARGET_OTP: access the OTP * @NVM_ACCESS_TARGET_EEPROM: access the EEPROM */ enum iwl_nvm_access_target { NVM_ACCESS_TARGET_CACHE = 0, NVM_ACCESS_TARGET_OTP = 1, NVM_ACCESS_TARGET_EEPROM = 2, }; /** * enum iwl_nvm_section_type - section types for NVM_ACCESS_CMD * @NVM_SECTION_TYPE_SW: software section * @NVM_SECTION_TYPE_REGULATORY: regulatory section * @NVM_SECTION_TYPE_CALIBRATION: calibration section * @NVM_SECTION_TYPE_PRODUCTION: production section * @NVM_SECTION_TYPE_REGULATORY_SDP: regulatory section used by 3168 series * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section * @NVM_MAX_NUM_SECTIONS: number of sections */ enum iwl_nvm_section_type { NVM_SECTION_TYPE_SW = 1, NVM_SECTION_TYPE_REGULATORY = 3, NVM_SECTION_TYPE_CALIBRATION = 4, NVM_SECTION_TYPE_PRODUCTION = 5, NVM_SECTION_TYPE_REGULATORY_SDP = 8, NVM_SECTION_TYPE_MAC_OVERRIDE = 11, NVM_SECTION_TYPE_PHY_SKU = 12, NVM_MAX_NUM_SECTIONS = 13, }; /** * struct iwl_nvm_access_cmd - Request the device to send an NVM section * @op_code: &enum iwl_nvm_access_op * @target: &enum iwl_nvm_access_target * @type: &enum iwl_nvm_section_type * @offset: offset in bytes into the section * @length: in bytes, to read/write * @data: if write operation, the data to write. On read its empty */ struct iwl_nvm_access_cmd { u8 op_code; u8 target; __le16 type; __le16 offset; __le16 length; u8 data[]; } __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */ /** * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD * @offset: offset in bytes into the section * @length: in bytes, either how much was written or read * @type: NVM_SECTION_TYPE_* * @status: 0 for success, fail otherwise * @data: if read operation, the data returned. Empty on write. */ struct iwl_nvm_access_resp { __le16 offset; __le16 length; __le16 type; __le16 status; u8 data[]; } __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */ /* * struct iwl_nvm_get_info - request to get NVM data */ struct iwl_nvm_get_info { __le32 reserved; } __packed; /* REGULATORY_NVM_GET_INFO_CMD_API_S_VER_1 */ /** * enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp * @NVM_GENERAL_FLAGS_EMPTY_OTP: 1 if OTP is empty */ enum iwl_nvm_info_general_flags { NVM_GENERAL_FLAGS_EMPTY_OTP = BIT(0), }; /** * struct iwl_nvm_get_info_general - general NVM data * @flags: bit 0: 1 - empty, 0 - non-empty * @nvm_version: nvm version * @board_type: board type * @n_hw_addrs: number of reserved MAC addresses */ struct iwl_nvm_get_info_general { __le32 flags; __le16 nvm_version; u8 board_type; u8 n_hw_addrs; } __packed; /* REGULATORY_NVM_GET_INFO_GENERAL_S_VER_2 */ /** * enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku * @NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED: true if 2.4 band enabled * @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled * @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled * @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled * @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled * @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled * @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled * @NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED: true if API lock enabled */ enum iwl_nvm_mac_sku_flags { NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED = BIT(0), NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1), NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2), NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3), /** * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled */ NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4), NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5), NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8), NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED = BIT(14), NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED = BIT(15), }; /** * struct iwl_nvm_get_info_sku - mac information * @mac_sku_flags: flags for SKU, see &enum iwl_nvm_mac_sku_flags */ struct iwl_nvm_get_info_sku { __le32 mac_sku_flags; } __packed; /* REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_2 */ /** * struct iwl_nvm_get_info_phy - phy information * @tx_chains: BIT 0 chain A, BIT 1 chain B * @rx_chains: BIT 0 chain A, BIT 1 chain B */ struct iwl_nvm_get_info_phy { __le32 tx_chains; __le32 rx_chains; } __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */ #define IWL_NUM_CHANNELS_V1 51 #define IWL_NUM_CHANNELS 110 /** * struct iwl_nvm_get_info_regulatory - regulatory information * @lar_enabled: is LAR enabled * @channel_profile: regulatory data of this channel * @reserved: reserved */ struct iwl_nvm_get_info_regulatory_v1 { __le32 lar_enabled; __le16 channel_profile[IWL_NUM_CHANNELS_V1]; __le16 reserved; } __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */ /** * struct iwl_nvm_get_info_regulatory - regulatory information * @lar_enabled: is LAR enabled * @n_channels: number of valid channels in the array * @channel_profile: regulatory data of this channel */ struct iwl_nvm_get_info_regulatory { __le32 lar_enabled; __le32 n_channels; __le32 channel_profile[IWL_NUM_CHANNELS]; } __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_2 */ /** * struct iwl_nvm_get_info_rsp_v3 - response to get NVM data * @general: general NVM data * @mac_sku: data relating to MAC sku * @phy_sku: data relating to PHY sku * @regulatory: regulatory data */ struct iwl_nvm_get_info_rsp_v3 { struct iwl_nvm_get_info_general general; struct iwl_nvm_get_info_sku mac_sku; struct iwl_nvm_get_info_phy phy_sku; struct iwl_nvm_get_info_regulatory_v1 regulatory; } __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */ /** * struct iwl_nvm_get_info_rsp - response to get NVM data * @general: general NVM data * @mac_sku: data relating to MAC sku * @phy_sku: data relating to PHY sku * @regulatory: regulatory data */ struct iwl_nvm_get_info_rsp { struct iwl_nvm_get_info_general general; struct iwl_nvm_get_info_sku mac_sku; struct iwl_nvm_get_info_phy phy_sku; struct iwl_nvm_get_info_regulatory regulatory; } __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_4 */ /** * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed * @reserved: reserved */ struct iwl_nvm_access_complete_cmd { __le32 reserved; } __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */ /** * struct iwl_mcc_update_cmd - Request the device to update geographic * regulatory profile according to the given MCC (Mobile Country Code). * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the * MCC in the cmd response will be the relevant MCC in the NVM. * @mcc: given mobile country code * @source_id: the source from where we got the MCC, see iwl_mcc_source * @reserved: reserved for alignment * @key: integrity key for MCC API OEM testing * @reserved2: reserved */ struct iwl_mcc_update_cmd { __le16 mcc; u8 source_id; u8 reserved; __le32 key; u8 reserved2[20]; } __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */ /** * enum iwl_geo_information - geographic information. * @GEO_NO_INFO: no special info for this geo profile. * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params * for the 5 GHz band. */ enum iwl_geo_information { GEO_NO_INFO = 0, GEO_WMM_ETSI_5GHZ_INFO = BIT(0), }; /** * struct iwl_mcc_update_resp_v3 - response to MCC_UPDATE_CMD. * Contains the new channel control profile map, if changed, and the new MCC * (mobile country code). * The new MCC may be different than what was requested in MCC_UPDATE_CMD. * @status: see &enum iwl_mcc_update_status * @mcc: the new applied MCC * @cap: capabilities for all channels which matches the MCC * @source_id: the MCC source, see iwl_mcc_source * @time: time elapsed from the MCC test start (in units of 30 seconds) * @geo_info: geographic specific profile information * see &enum iwl_geo_information. * @n_channels: number of channels in @channels_data. * @channels: channel control data map, DWORD for each channel. Only the first * 16bits are used. */ struct iwl_mcc_update_resp_v3 { __le32 status; __le16 mcc; u8 cap; u8 source_id; __le16 time; __le16 geo_info; __le32 n_channels; __le32 channels[]; } __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */ /** - * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD. + * struct iwl_mcc_update_resp_v4 - response to MCC_UPDATE_CMD. * Contains the new channel control profile map, if changed, and the new MCC * (mobile country code). * The new MCC may be different than what was requested in MCC_UPDATE_CMD. * @status: see &enum iwl_mcc_update_status * @mcc: the new applied MCC * @cap: capabilities for all channels which matches the MCC * @time: time elapsed from the MCC test start (in units of 30 seconds) * @geo_info: geographic specific profile information * see &enum iwl_geo_information. * @source_id: the MCC source, see iwl_mcc_source * @reserved: for four bytes alignment. * @n_channels: number of channels in @channels_data. * @channels: channel control data map, DWORD for each channel. Only the first * 16bits are used. */ -struct iwl_mcc_update_resp { +struct iwl_mcc_update_resp_v4 { __le32 status; __le16 mcc; __le16 cap; __le16 time; __le16 geo_info; u8 source_id; u8 reserved[3]; __le32 n_channels; __le32 channels[]; } __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_4 */ +/** + * struct iwl_mcc_update_resp_v8 - response to MCC_UPDATE_CMD. + * Contains the new channel control profile map, if changed, and the new MCC + * (mobile country code). + * The new MCC may be different than what was requested in MCC_UPDATE_CMD. + * @status: see &enum iwl_mcc_update_status + * @mcc: the new applied MCC + * @padding: padding for 2 bytes. + * @cap: capabilities for all channels which matches the MCC + * @time: time elapsed from the MCC test start (in units of 30 seconds) + * @geo_info: geographic specific profile information + * see &enum iwl_geo_information. + * @source_id: the MCC source, see iwl_mcc_source + * @reserved: for four bytes alignment. + * @n_channels: number of channels in @channels_data. + * @channels: channel control data map, DWORD for each channel. Only the first + * 16bits are used. + */ +struct iwl_mcc_update_resp_v8 { + __le32 status; + __le16 mcc; + u8 padding[2]; + __le32 cap; + __le16 time; + __le16 geo_info; + u8 source_id; + u8 reserved[3]; + __le32 n_channels; + __le32 channels[]; +} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_8 */ + /** * struct iwl_mcc_chub_notif - chub notifies of mcc change * (MCC_CHUB_UPDATE_CMD = 0xc9) * The Chub (Communication Hub, CommsHUB) is a HW component that connects to * the cellular and connectivity cores that gets updates of the mcc, and * notifies the ucode directly of any mcc change. * The ucode requests the driver to request the device to update geographic * regulatory profile according to the given MCC (Mobile Country Code). * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the * MCC in the cmd response will be the relevant MCC in the NVM. * @mcc: given mobile country code * @source_id: identity of the change originator, see iwl_mcc_source * @reserved1: reserved for alignment */ struct iwl_mcc_chub_notif { __le16 mcc; u8 source_id; u8 reserved1; } __packed; /* LAR_MCC_NOTIFY_S */ enum iwl_mcc_update_status { MCC_RESP_NEW_CHAN_PROFILE, MCC_RESP_SAME_CHAN_PROFILE, MCC_RESP_INVALID, MCC_RESP_NVM_DISABLED, MCC_RESP_ILLEGAL, MCC_RESP_LOW_PRIORITY, MCC_RESP_TEST_MODE_ACTIVE, MCC_RESP_TEST_MODE_NOT_ACTIVE, MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE, }; enum iwl_mcc_source { MCC_SOURCE_OLD_FW = 0, MCC_SOURCE_ME = 1, MCC_SOURCE_BIOS = 2, MCC_SOURCE_3G_LTE_HOST = 3, MCC_SOURCE_3G_LTE_DEVICE = 4, MCC_SOURCE_WIFI = 5, MCC_SOURCE_RESERVED = 6, MCC_SOURCE_DEFAULT = 7, MCC_SOURCE_UNINITIALIZED = 8, MCC_SOURCE_MCC_API = 9, MCC_SOURCE_GET_CURRENT = 0x10, MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11, }; #define IWL_TAS_BLOCK_LIST_MAX 16 /** * struct iwl_tas_config_cmd_v2 - configures the TAS * @block_list_size: size of relevant field in block_list_array * @block_list_array: list of countries where TAS must be disabled */ struct iwl_tas_config_cmd_v2 { __le32 block_list_size; __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX]; } __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */ /** * struct iwl_tas_config_cmd_v3 - configures the TAS * @block_list_size: size of relevant field in block_list_array * @block_list_array: list of countries where TAS must be disabled * @override_tas_iec: indicates whether to override default value of IEC regulatory * @enable_tas_iec: in case override_tas_iec is set - * indicates whether IEC regulatory is enabled or disabled */ struct iwl_tas_config_cmd_v3 { __le32 block_list_size; __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX]; __le16 override_tas_iec; __le16 enable_tas_iec; } __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */ /** * struct iwl_tas_config_cmd_v3 - configures the TAS * @block_list_size: size of relevant field in block_list_array * @block_list_array: list of countries where TAS must be disabled * @override_tas_iec: indicates whether to override default value of IEC regulatory * @enable_tas_iec: in case override_tas_iec is set - * indicates whether IEC regulatory is enabled or disabled * @usa_tas_uhb_allowed: if set, allow TAS UHB in the USA * @reserved: reserved */ struct iwl_tas_config_cmd_v4 { __le32 block_list_size; __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX]; u8 override_tas_iec; u8 enable_tas_iec; u8 usa_tas_uhb_allowed; u8 reserved; } __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */ union iwl_tas_config_cmd { struct iwl_tas_config_cmd_v2 v2; struct iwl_tas_config_cmd_v3 v3; struct iwl_tas_config_cmd_v4 v4; }; /** * enum iwl_lari_configs - bit masks for the various LARI config operations * @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine * @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan * @LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK: ETSI 5.8GHz SRD disabled * @LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK: enable 5.15/5.35GHz bands in * Indonesia */ enum iwl_lari_config_masks { LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK = BIT(0), LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK = BIT(1), LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK = BIT(2), LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK = BIT(3), }; #define IWL_11AX_UKRAINE_MASK 3 #define IWL_11AX_UKRAINE_SHIFT 8 /** * struct iwl_lari_config_change_cmd_v1 - change LARI configuration * @config_bitmap: bit map of the config commands. each bit will trigger a * different predefined FW config operation */ struct iwl_lari_config_change_cmd_v1 { __le32 config_bitmap; } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_1 */ /** * struct iwl_lari_config_change_cmd_v2 - change LARI configuration * @config_bitmap: bit map of the config commands. each bit will trigger a * different predefined FW config operation * @oem_uhb_allow_bitmap: bitmap of UHB enabled MCC sets */ struct iwl_lari_config_change_cmd_v2 { __le32 config_bitmap; __le32 oem_uhb_allow_bitmap; } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_2 */ /** * struct iwl_lari_config_change_cmd_v3 - change LARI configuration * @config_bitmap: bit map of the config commands. each bit will trigger a * different predefined FW config operation * @oem_uhb_allow_bitmap: bitmap of UHB enabled MCC sets * @oem_11ax_allow_bitmap: bitmap of 11ax allowed MCCs. * For each supported country, a pair of regulatory override bit and 11ax mode exist * in the bit field. */ struct iwl_lari_config_change_cmd_v3 { __le32 config_bitmap; __le32 oem_uhb_allow_bitmap; __le32 oem_11ax_allow_bitmap; } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_3 */ /** * struct iwl_lari_config_change_cmd_v4 - change LARI configuration * @config_bitmap: Bitmap of the config commands. Each bit will trigger a * different predefined FW config operation. * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits * per country, one to indicate whether to override and the other to * indicate the value to use. * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits * per country, one to indicate whether to override and the other to * indicate allow/disallow unii4 channels. */ struct iwl_lari_config_change_cmd_v4 { __le32 config_bitmap; __le32 oem_uhb_allow_bitmap; __le32 oem_11ax_allow_bitmap; __le32 oem_unii4_allow_bitmap; } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_4 */ /** * struct iwl_lari_config_change_cmd_v5 - change LARI configuration * @config_bitmap: Bitmap of the config commands. Each bit will trigger a * different predefined FW config operation. * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits * per country, one to indicate whether to override and the other to * indicate the value to use. * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits * per country, one to indicate whether to override and the other to * indicate allow/disallow unii4 channels. * @chan_state_active_bitmap: Bitmap for overriding channel state to active. * Each bit represents a country or region to activate, according to the BIOS * definitions. */ struct iwl_lari_config_change_cmd_v5 { __le32 config_bitmap; __le32 oem_uhb_allow_bitmap; __le32 oem_11ax_allow_bitmap; __le32 oem_unii4_allow_bitmap; __le32 chan_state_active_bitmap; } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_5 */ /** * struct iwl_lari_config_change_cmd_v6 - change LARI configuration * @config_bitmap: Bitmap of the config commands. Each bit will trigger a * different predefined FW config operation. * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits * per country, one to indicate whether to override and the other to * indicate the value to use. * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits * per country, one to indicate whether to override and the other to * indicate allow/disallow unii4 channels. * @chan_state_active_bitmap: Bitmap for overriding channel state to active. * Each bit represents a country or region to activate, according to the BIOS * definitions. * @force_disable_channels_bitmap: Bitmap of disabled bands/channels. * Each bit represents a set of channels in a specific band that should be disabled */ struct iwl_lari_config_change_cmd_v6 { __le32 config_bitmap; __le32 oem_uhb_allow_bitmap; __le32 oem_11ax_allow_bitmap; __le32 oem_unii4_allow_bitmap; __le32 chan_state_active_bitmap; __le32 force_disable_channels_bitmap; } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_6 */ /** * struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete * @status: PNVM image loading status */ struct iwl_pnvm_init_complete_ntfy { __le32 status; } __packed; /* PNVM_INIT_COMPLETE_NTFY_S_VER_1 */ #endif /* __iwl_fw_api_nvm_reg_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/offload.h b/sys/contrib/dev/iwlwifi/fw/api/offload.h index 5204aa94e72a..898bf351f6e4 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/offload.h +++ b/sys/contrib/dev/iwlwifi/fw/api/offload.h @@ -1,70 +1,86 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2012-2014 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2021 Intel Corporation + * Copyright (C) 2021-2022 Intel Corporation */ #ifndef __iwl_fw_api_offload_h__ #define __iwl_fw_api_offload_h__ /** * enum iwl_prot_offload_subcmd_ids - protocol offload commands */ enum iwl_prot_offload_subcmd_ids { /** - * @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif + * @WOWLAN_WAKE_PKT_NOTIFICATION: Notification in &struct iwl_wowlan_wake_pkt_notif + */ + WOWLAN_WAKE_PKT_NOTIFICATION = 0xFC, + + /** + * @WOWLAN_INFO_NOTIFICATION: Notification in &struct iwl_wowlan_info_notif + */ + WOWLAN_INFO_NOTIFICATION = 0xFD, + + /** + * @D3_END_NOTIFICATION: End D3 state notification + */ + D3_END_NOTIFICATION = 0xFE, + + /** + * @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif_v2 or + * &struct iwl_stored_beacon_notif_v3 */ STORED_BEACON_NTF = 0xFF, }; #define MAX_STORED_BEACON_SIZE 600 /** * struct iwl_stored_beacon_notif_common - Stored beacon notif common fields * * @system_time: system time on air rise * @tsf: TSF on air rise * @beacon_timestamp: beacon on air rise * @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition * @channel: channel this beacon was received on * @rates: rate in ucode internal format * @byte_count: frame's byte count */ struct iwl_stored_beacon_notif_common { __le32 system_time; __le64 tsf; __le32 beacon_timestamp; __le16 band; __le16 channel; __le32 rates; __le32 byte_count; } __packed; /** * struct iwl_stored_beacon_notif - Stored beacon notification * * @common: fields common for all versions * @data: beacon data, length in @byte_count */ struct iwl_stored_beacon_notif_v2 { struct iwl_stored_beacon_notif_common common; u8 data[MAX_STORED_BEACON_SIZE]; } __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */ /** * struct iwl_stored_beacon_notif_v3 - Stored beacon notification * * @common: fields common for all versions * @sta_id: station for which the beacon was received * @reserved: reserved for alignment * @data: beacon data, length in @byte_count */ struct iwl_stored_beacon_notif_v3 { struct iwl_stored_beacon_notif_common common; u8 sta_id; u8 reserved[3]; u8 data[MAX_STORED_BEACON_SIZE]; } __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_3 */ #endif /* __iwl_fw_api_offload_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/phy-ctxt.h b/sys/contrib/dev/iwlwifi/fw/api/phy-ctxt.h index e66f77924f83..8fe42cff1102 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/phy-ctxt.h +++ b/sys/contrib/dev/iwlwifi/fw/api/phy-ctxt.h @@ -1,161 +1,160 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_phy_ctxt_h__ #define __iwl_fw_api_phy_ctxt_h__ /* Supported bands */ #define PHY_BAND_5 (0) #define PHY_BAND_24 (1) #define PHY_BAND_6 (2) /* Supported channel width, vary if there is VHT support */ -#define PHY_VHT_CHANNEL_MODE20 (0x0) -#define PHY_VHT_CHANNEL_MODE40 (0x1) -#define PHY_VHT_CHANNEL_MODE80 (0x2) -#define PHY_VHT_CHANNEL_MODE160 (0x3) +#define IWL_PHY_CHANNEL_MODE20 0x0 +#define IWL_PHY_CHANNEL_MODE40 0x1 +#define IWL_PHY_CHANNEL_MODE80 0x2 +#define IWL_PHY_CHANNEL_MODE160 0x3 +/* and 320 MHz for EHT */ +#define IWL_PHY_CHANNEL_MODE320 0x4 /* * Control channel position: * For legacy set bit means upper channel, otherwise lower. * For VHT - bit-2 marks if the control is lower/upper relative to center-freq * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0. * center_freq - * | - * 40Mhz |_______|_______| - * 80Mhz |_______|_______|_______|_______| - * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______| - * code 011 010 001 000 | 100 101 110 111 + * For EHT - bit-3 is used for extended distance + * | + * 40Mhz |____|____| + * 80Mhz |____|____|____|____| + * 160Mhz |____|____|____|____|____|____|____|____| + * 320MHz |____|____|____|____|____|____|____|____|____|____|____|____|____|____|____|____| + * code 1011 1010 1001 1000 0011 0010 0001 0000 0100 0101 0110 0111 1100 1101 1110 1111 */ -#define PHY_VHT_CTRL_POS_1_BELOW (0x0) -#define PHY_VHT_CTRL_POS_2_BELOW (0x1) -#define PHY_VHT_CTRL_POS_3_BELOW (0x2) -#define PHY_VHT_CTRL_POS_4_BELOW (0x3) -#define PHY_VHT_CTRL_POS_1_ABOVE (0x4) -#define PHY_VHT_CTRL_POS_2_ABOVE (0x5) -#define PHY_VHT_CTRL_POS_3_ABOVE (0x6) -#define PHY_VHT_CTRL_POS_4_ABOVE (0x7) +#define IWL_PHY_CTRL_POS_ABOVE 0x4 +#define IWL_PHY_CTRL_POS_OFFS_EXT 0x8 +#define IWL_PHY_CTRL_POS_OFFS_MSK 0x3 /* * struct iwl_fw_channel_info_v1 - channel information * * @band: PHY_BAND_* * @channel: channel number * @width: PHY_[VHT|LEGACY]_CHANNEL_* * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* */ struct iwl_fw_channel_info_v1 { u8 band; u8 channel; u8 width; u8 ctrl_pos; } __packed; /* CHANNEL_CONFIG_API_S_VER_1 */ /* * struct iwl_fw_channel_info - channel information * * @channel: channel number * @band: PHY_BAND_* * @width: PHY_[VHT|LEGACY]_CHANNEL_* * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* * @reserved: for future use and alignment */ struct iwl_fw_channel_info { __le32 channel; u8 band; u8 width; u8 ctrl_pos; u8 reserved; } __packed; /*CHANNEL_CONFIG_API_S_VER_2 */ #define PHY_RX_CHAIN_DRIVER_FORCE_POS (0) #define PHY_RX_CHAIN_DRIVER_FORCE_MSK \ (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS) #define PHY_RX_CHAIN_VALID_POS (1) #define PHY_RX_CHAIN_VALID_MSK \ (0x7 << PHY_RX_CHAIN_VALID_POS) #define PHY_RX_CHAIN_FORCE_SEL_POS (4) #define PHY_RX_CHAIN_FORCE_SEL_MSK \ (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS) #define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7) #define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \ (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS) #define PHY_RX_CHAIN_CNT_POS (10) #define PHY_RX_CHAIN_CNT_MSK \ (0x3 << PHY_RX_CHAIN_CNT_POS) #define PHY_RX_CHAIN_MIMO_CNT_POS (12) #define PHY_RX_CHAIN_MIMO_CNT_MSK \ (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS) #define PHY_RX_CHAIN_MIMO_FORCE_POS (14) #define PHY_RX_CHAIN_MIMO_FORCE_MSK \ (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS) /* TODO: fix the value, make it depend on firmware at runtime? */ #define NUM_PHY_CTX 3 /* TODO: complete missing documentation */ /** * struct iwl_phy_context_cmd_tail - tail of iwl_phy_ctx_cmd for alignment with * various channel structures. * * @txchain_info: ??? * @rxchain_info: ??? * @acquisition_data: ??? * @dsp_cfg_flags: set to 0 */ struct iwl_phy_context_cmd_tail { __le32 txchain_info; __le32 rxchain_info; __le32 acquisition_data; __le32 dsp_cfg_flags; } __packed; /** * struct iwl_phy_context_cmd - config of the PHY context * ( PHY_CONTEXT_CMD = 0x8 ) * @id_and_color: ID and color of the relevant Binding - * @action: action to perform, one of FW_CTXT_ACTION_* + * @action: action to perform, see &enum iwl_ctxt_action * @apply_time: 0 means immediate apply and context switch. * other value means apply new params after X usecs * @tx_param_color: ??? * @ci: channel info * @tail: command tail */ struct iwl_phy_context_cmd_v1 { /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; /* PHY_CONTEXT_DATA_API_S_VER_3 */ __le32 apply_time; __le32 tx_param_color; struct iwl_fw_channel_info ci; struct iwl_phy_context_cmd_tail tail; } __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ /** * struct iwl_phy_context_cmd - config of the PHY context * ( PHY_CONTEXT_CMD = 0x8 ) * @id_and_color: ID and color of the relevant Binding - * @action: action to perform, one of FW_CTXT_ACTION_* + * @action: action to perform, see &enum iwl_ctxt_action * @lmac_id: the lmac id the phy context belongs to * @ci: channel info * @rxchain_info: ??? * @dsp_cfg_flags: set to 0 * @reserved: reserved to align to 64 bit */ struct iwl_phy_context_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; /* PHY_CONTEXT_DATA_API_S_VER_3, PHY_CONTEXT_DATA_API_S_VER_4 */ struct iwl_fw_channel_info ci; __le32 lmac_id; __le32 rxchain_info; /* reserved in _VER_4 */ __le32 dsp_cfg_flags; __le32 reserved; } __packed; /* PHY_CONTEXT_CMD_API_VER_3, PHY_CONTEXT_CMD_API_VER_4 */ #endif /* __iwl_fw_api_phy_ctxt_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/phy.h b/sys/contrib/dev/iwlwifi/fw/api/phy.h index b1b9c29859c1..5a3f30e5e06d 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/phy.h +++ b/sys/contrib/dev/iwlwifi/fw/api/phy.h @@ -1,226 +1,230 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2019-2021 Intel Corporation + * Copyright (C) 2012-2014, 2019-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_phy_h__ #define __iwl_fw_api_phy_h__ /** * enum iwl_phy_ops_subcmd_ids - PHY group commands */ enum iwl_phy_ops_subcmd_ids { /** * @CMD_DTS_MEASUREMENT_TRIGGER_WIDE: * Uses either &struct iwl_dts_measurement_cmd or * &struct iwl_ext_dts_measurement_cmd */ CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, /** * @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd */ CTDP_CONFIG_CMD = 0x03, /** * @TEMP_REPORTING_THRESHOLDS_CMD: &struct temp_report_ths_cmd */ TEMP_REPORTING_THRESHOLDS_CMD = 0x04, /** - * @PER_CHAIN_LIMIT_OFFSET_CMD: &struct iwl_geo_tx_power_profiles_cmd + * @PER_CHAIN_LIMIT_OFFSET_CMD: &struct iwl_geo_tx_power_profiles_cmd_v1, + * &struct iwl_geo_tx_power_profiles_cmd_v2, + * &struct iwl_geo_tx_power_profiles_cmd_v3, + * &struct iwl_geo_tx_power_profiles_cmd_v4 or + * &struct iwl_geo_tx_power_profiles_cmd_v5 */ PER_CHAIN_LIMIT_OFFSET_CMD = 0x05, /** - * @PER_PLATFORM_ANT_GAIN_CMD: &struct iwl_ppag_table_cmd + * @PER_PLATFORM_ANT_GAIN_CMD: &union iwl_ppag_table_cmd */ PER_PLATFORM_ANT_GAIN_CMD = 0x07, /** * @CT_KILL_NOTIFICATION: &struct ct_kill_notif */ CT_KILL_NOTIFICATION = 0xFE, /** * @DTS_MEASUREMENT_NOTIF_WIDE: * &struct iwl_dts_measurement_notif_v1 or * &struct iwl_dts_measurement_notif_v2 */ DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, }; /* DTS measurements */ enum iwl_dts_measurement_flags { DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0), DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1), }; /** * struct iwl_dts_measurement_cmd - request DTS temp and/or voltage measurements * * @flags: indicates which measurements we want as specified in * &enum iwl_dts_measurement_flags */ struct iwl_dts_measurement_cmd { __le32 flags; } __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */ /** * enum iwl_dts_control_measurement_mode - DTS measurement type * @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read * back (latest value. Not waiting for new value). Use automatic * SW DTS configuration. * @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings, * trigger DTS reading and provide read back temperature read * when available. * @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read * @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result, * without measurement trigger. */ enum iwl_dts_control_measurement_mode { DTS_AUTOMATIC = 0, DTS_REQUEST_READ = 1, DTS_OVER_WRITE = 2, DTS_DIRECT_WITHOUT_MEASURE = 3, }; /** * enum iwl_dts_used - DTS to use or used for measurement in the DTS request * @DTS_USE_TOP: Top * @DTS_USE_CHAIN_A: chain A * @DTS_USE_CHAIN_B: chain B * @DTS_USE_CHAIN_C: chain C * @XTAL_TEMPERATURE: read temperature from xtal */ enum iwl_dts_used { DTS_USE_TOP = 0, DTS_USE_CHAIN_A = 1, DTS_USE_CHAIN_B = 2, DTS_USE_CHAIN_C = 3, XTAL_TEMPERATURE = 4, }; /** * enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode * @DTS_BIT6_MODE: bit 6 mode * @DTS_BIT8_MODE: bit 8 mode */ enum iwl_dts_bit_mode { DTS_BIT6_MODE = 0, DTS_BIT8_MODE = 1, }; /** * struct iwl_ext_dts_measurement_cmd - request extended DTS temp measurements * @control_mode: see &enum iwl_dts_control_measurement_mode * @temperature: used when over write DTS mode is selected * @sensor: set temperature sensor to use. See &enum iwl_dts_used * @avg_factor: average factor to DTS in request DTS read mode * @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode * @step_duration: step duration for the DTS */ struct iwl_ext_dts_measurement_cmd { __le32 control_mode; __le32 temperature; __le32 sensor; __le32 avg_factor; __le32 bit_mode; __le32 step_duration; } __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */ /** * struct iwl_dts_measurement_notif_v1 - measurements notification * * @temp: the measured temperature * @voltage: the measured voltage */ struct iwl_dts_measurement_notif_v1 { __le32 temp; __le32 voltage; } __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/ /** * struct iwl_dts_measurement_notif_v2 - measurements notification * * @temp: the measured temperature * @voltage: the measured voltage * @threshold_idx: the trip index that was crossed */ struct iwl_dts_measurement_notif_v2 { __le32 temp; __le32 voltage; __le32 threshold_idx; } __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */ /** * struct iwl_dts_measurement_resp - measurements response * * @temp: the measured temperature */ struct iwl_dts_measurement_resp { __le32 temp; } __packed; /* CMD_DTS_MEASUREMENT_RSP_API_S_VER_1 */ /** * struct ct_kill_notif - CT-kill entry notification * This structure represent both versions of this notification. * * @temperature: the current temperature in celsius * @dts: only in v2: DTS that trigger the CT Kill bitmap: * bit 0: ToP master * bit 1: PA chain A master * bit 2: PA chain B master * bit 3: ToP slave * bit 4: PA chain A slave * bit 5: PA chain B slave) * bits 6,7: reserved (set to 0) * @scheme: only for v2: scheme that trigger the CT Kill (0-SW, 1-HW) */ struct ct_kill_notif { __le16 temperature; u8 dts; u8 scheme; } __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */ /** * enum ctdp_cmd_operation - CTDP command operations * @CTDP_CMD_OPERATION_START: update the current budget * @CTDP_CMD_OPERATION_STOP: stop ctdp * @CTDP_CMD_OPERATION_REPORT: get the average budget */ enum iwl_mvm_ctdp_cmd_operation { CTDP_CMD_OPERATION_START = 0x1, CTDP_CMD_OPERATION_STOP = 0x2, CTDP_CMD_OPERATION_REPORT = 0x4, };/* CTDP_CMD_OPERATION_TYPE_E */ /** * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget * * @operation: see &enum iwl_mvm_ctdp_cmd_operation * @budget: the budget in milliwatt * @window_size: defined in API but not used */ struct iwl_mvm_ctdp_cmd { __le32 operation; __le32 budget; __le32 window_size; } __packed; #define IWL_MAX_DTS_TRIPS 8 /** * struct temp_report_ths_cmd - set temperature thresholds * * @num_temps: number of temperature thresholds passed * @thresholds: array with the thresholds to be configured */ struct temp_report_ths_cmd { __le32 num_temps; __le16 thresholds[IWL_MAX_DTS_TRIPS]; } __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */ #endif /* __iwl_fw_api_phy_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/power.h b/sys/contrib/dev/iwlwifi/fw/api/power.h index f92cac1da764..85d89f559f6c 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/power.h +++ b/sys/contrib/dev/iwlwifi/fw/api/power.h @@ -1,670 +1,670 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_power_h__ #define __iwl_fw_api_power_h__ /* Power Management Commands, Responses, Notifications */ /** * enum iwl_ltr_config_flags - masks for LTR config command flags * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow * memory access * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR * reg change * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from * D0 to D3 * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD * @LTR_CFG_FLAG_UPDATE_VALUES: update config values and short * idle timeout */ enum iwl_ltr_config_flags { LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0), LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1), LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2), LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3), LTR_CFG_FLAG_SW_SET_SHORT = BIT(4), LTR_CFG_FLAG_SW_SET_LONG = BIT(5), LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6), LTR_CFG_FLAG_UPDATE_VALUES = BIT(7), }; /** * struct iwl_ltr_config_cmd_v1 - configures the LTR * @flags: See &enum iwl_ltr_config_flags * @static_long: static LTR Long register value. * @static_short: static LTR Short register value. */ struct iwl_ltr_config_cmd_v1 { __le32 flags; __le32 static_long; __le32 static_short; } __packed; /* LTR_CAPABLE_API_S_VER_1 */ #define LTR_VALID_STATES_NUM 4 /** * struct iwl_ltr_config_cmd - configures the LTR * @flags: See &enum iwl_ltr_config_flags * @static_long: static LTR Long register value. * @static_short: static LTR Short register value. * @ltr_cfg_values: LTR parameters table values (in usec) in folowing order: * TX, RX, Short Idle, Long Idle. Used only if %LTR_CFG_FLAG_UPDATE_VALUES * is set. * @ltr_short_idle_timeout: LTR Short Idle timeout (in usec). Used only if * %LTR_CFG_FLAG_UPDATE_VALUES is set. */ struct iwl_ltr_config_cmd { __le32 flags; __le32 static_long; __le32 static_short; __le32 ltr_cfg_values[LTR_VALID_STATES_NUM]; __le32 ltr_short_idle_timeout; } __packed; /* LTR_CAPABLE_API_S_VER_2 */ /* Radio LP RX Energy Threshold measured in dBm */ #define POWER_LPRX_RSSI_THRESHOLD 75 #define POWER_LPRX_RSSI_THRESHOLD_MAX 94 #define POWER_LPRX_RSSI_THRESHOLD_MIN 30 /** * enum iwl_power_flags - masks for power table command flags * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off * receiver and transmitter. '0' - does not allow. * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management, * '1' Driver enables PM (use rest of parameters) * @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM, * '1' PM could sleep over DTIM till listen Interval. * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all * access categories are both delivery and trigger enabled. * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and * PBW Snoozing enabled * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable. * @POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving * detection enablement */ enum iwl_power_flags { POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1), POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2), POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5), POWER_FLAGS_BT_SCO_ENA = BIT(8), POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9), POWER_FLAGS_LPRX_ENA_MSK = BIT(11), POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12), }; #define IWL_POWER_VEC_SIZE 5 /** * struct iwl_powertable_cmd - legacy power command. Beside old API support this * is used also with a new power API for device wide power settings. * POWER_TABLE_CMD = 0x77 (command, has simple generic response) * * @flags: Power table command flags from POWER_FLAGS_* * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. * Minimum allowed:- 3 * DTIM. Keep alive period must be * set regardless of power scheme or current power state. * FW use this value also when PM is disabled. * @debug_flags: debug flags * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to * PSM transition - legacy PM * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to * PSM transition - legacy PM * @sleep_interval: not in use * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag * is set. For example, if it is required to skip over * one DTIM, this value need to be set to 2 (DTIM periods). * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. * Default: 80dbm */ struct iwl_powertable_cmd { /* PM_POWER_TABLE_CMD_API_S_VER_6 */ __le16 flags; u8 keep_alive_seconds; u8 debug_flags; __le32 rx_data_timeout; __le32 tx_data_timeout; __le32 sleep_interval[IWL_POWER_VEC_SIZE]; __le32 skip_dtim_periods; __le32 lprx_rssi_threshold; } __packed; /** * enum iwl_device_power_flags - masks for device power command flags * @DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK: * '1' Allow to save power by turning off * receiver and transmitter. '0' - does not allow. * @DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK: * Device Retention indication, '1' indicate retention is enabled. * @DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK: * 32Khz external slow clock valid indication, '1' indicate cloack is * valid. */ enum iwl_device_power_flags { DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK = BIT(1), DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK = BIT(12), }; /** * struct iwl_device_power_cmd - device wide power command. * DEVICE_POWER_CMD = 0x77 (command, has simple generic response) * * @flags: Power table command flags from &enum iwl_device_power_flags * @reserved: reserved (padding) */ struct iwl_device_power_cmd { /* PM_POWER_TABLE_CMD_API_S_VER_6 */ __le16 flags; __le16 reserved; } __packed; /** * struct iwl_mac_power_cmd - New power command containing uAPSD support * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response) * @id_and_color: MAC contex identifier, &enum iwl_ctxt_id_and_color * @flags: Power table command flags from POWER_FLAGS_* * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. * Minimum allowed:- 3 * DTIM. Keep alive period must be * set regardless of power scheme or current power state. * FW use this value also when PM is disabled. * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to * PSM transition - legacy PM * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to * PSM transition - legacy PM * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag * is set. For example, if it is required to skip over * one DTIM, this value need to be set to 2 (DTIM periods). * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to * PSM transition - uAPSD * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to * PSM transition - uAPSD * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. * Default: 80dbm * @snooze_interval: Maximum time between attempts to retrieve buffered data * from the AP [msec] * @snooze_window: A window of time in which PBW snoozing insures that all * packets received. It is also the minimum time from last * received unicast RX packet, before client stops snoozing * for data. [msec] * @snooze_step: TBD * @qndp_tid: TID client shall use for uAPSD QNDP triggers * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for * each corresponding AC. * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values. * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct * values. * @heavy_tx_thld_packets: TX threshold measured in number of packets * @heavy_rx_thld_packets: RX threshold measured in number of packets * @heavy_tx_thld_percentage: TX threshold measured in load's percentage * @heavy_rx_thld_percentage: RX threshold measured in load's percentage * @limited_ps_threshold: (unused) * @reserved: reserved (padding) */ struct iwl_mac_power_cmd { /* CONTEXT_DESC_API_T_VER_1 */ __le32 id_and_color; /* CLIENT_PM_POWER_TABLE_S_VER_1 */ __le16 flags; __le16 keep_alive_seconds; __le32 rx_data_timeout; __le32 tx_data_timeout; __le32 rx_data_timeout_uapsd; __le32 tx_data_timeout_uapsd; u8 lprx_rssi_threshold; u8 skip_dtim_periods; __le16 snooze_interval; __le16 snooze_window; u8 snooze_step; u8 qndp_tid; u8 uapsd_ac_flags; u8 uapsd_max_sp; u8 heavy_tx_thld_packets; u8 heavy_rx_thld_packets; u8 heavy_tx_thld_percentage; u8 heavy_rx_thld_percentage; u8 limited_ps_threshold; u8 reserved; } __packed; /* * struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when * associated AP is identified as improperly implementing uAPSD protocol. * PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78 * @sta_id: index of station in uCode's station table - associated AP ID in * this context. */ struct iwl_uapsd_misbehaving_ap_notif { __le32 sta_id; u8 mac_id; u8 reserved[3]; } __packed; /** * struct iwl_reduce_tx_power_cmd - TX power reduction command * REDUCE_TX_POWER_CMD = 0x9f * @flags: (reserved for future implementation) * @mac_context_id: id of the mac ctx for which we are reducing TX power. * @pwr_restriction: TX power restriction in dBms. */ struct iwl_reduce_tx_power_cmd { u8 flags; u8 mac_context_id; __le16 pwr_restriction; } __packed; /* TX_REDUCED_POWER_API_S_VER_1 */ enum iwl_dev_tx_power_cmd_mode { IWL_TX_POWER_MODE_SET_MAC = 0, IWL_TX_POWER_MODE_SET_DEVICE = 1, IWL_TX_POWER_MODE_SET_CHAINS = 2, IWL_TX_POWER_MODE_SET_ACK = 3, IWL_TX_POWER_MODE_SET_SAR_TIMER = 4, IWL_TX_POWER_MODE_SET_SAR_TIMER_DEFAULT_TABLE = 5, }; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_5 */; #define IWL_NUM_CHAIN_TABLES 1 #define IWL_NUM_CHAIN_TABLES_V2 2 #define IWL_NUM_CHAIN_LIMITS 2 #define IWL_NUM_SUB_BANDS_V1 5 #define IWL_NUM_SUB_BANDS_V2 11 /** * struct iwl_dev_tx_power_common - Common part of the TX power reduction cmd * @set_mode: see &enum iwl_dev_tx_power_cmd_mode * @mac_context_id: id of the mac ctx for which we are reducing TX power. * @pwr_restriction: TX power restriction in 1/8 dBms. * @dev_24: device TX power restriction in 1/8 dBms * @dev_52_low: device TX power restriction upper band - low * @dev_52_high: device TX power restriction upper band - high */ struct iwl_dev_tx_power_common { __le32 set_mode; __le32 mac_context_id; __le16 pwr_restriction; __le16 dev_24; __le16 dev_52_low; __le16 dev_52_high; }; /** * struct iwl_dev_tx_power_cmd_v3 - TX power reduction command version 3 * @per_chain: per chain restrictions */ struct iwl_dev_tx_power_cmd_v3 { __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; } __packed; /* TX_REDUCED_POWER_API_S_VER_3 */ #define IWL_DEV_MAX_TX_POWER 0x7FFF /** * struct iwl_dev_tx_power_cmd_v4 - TX power reduction command version 4 * @per_chain: per chain restrictions * @enable_ack_reduction: enable or disable close range ack TX power * reduction. * @reserved: reserved (padding) */ struct iwl_dev_tx_power_cmd_v4 { __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; u8 enable_ack_reduction; u8 reserved[3]; } __packed; /* TX_REDUCED_POWER_API_S_VER_4 */ /** * struct iwl_dev_tx_power_cmd_v5 - TX power reduction command version 5 * @per_chain: per chain restrictions * @enable_ack_reduction: enable or disable close range ack TX power * reduction. * @per_chain_restriction_changed: is per_chain_restriction has changed * from last command. used if set_mode is * IWL_TX_POWER_MODE_SET_SAR_TIMER. * note: if not changed, the command is used for keep alive only. * @reserved: reserved (padding) * @timer_period: timer in milliseconds. if expires FW will change to default * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER */ struct iwl_dev_tx_power_cmd_v5 { __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; u8 enable_ack_reduction; u8 per_chain_restriction_changed; u8 reserved[2]; __le32 timer_period; } __packed; /* TX_REDUCED_POWER_API_S_VER_5 */ /** * struct iwl_dev_tx_power_cmd_v6 - TX power reduction command version 6 * @per_chain: per chain restrictions * @enable_ack_reduction: enable or disable close range ack TX power * reduction. * @per_chain_restriction_changed: is per_chain_restriction has changed * from last command. used if set_mode is * IWL_TX_POWER_MODE_SET_SAR_TIMER. * note: if not changed, the command is used for keep alive only. * @reserved: reserved (padding) * @timer_period: timer in milliseconds. if expires FW will change to default * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER */ struct iwl_dev_tx_power_cmd_v6 { __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; u8 enable_ack_reduction; u8 per_chain_restriction_changed; u8 reserved[2]; __le32 timer_period; } __packed; /* TX_REDUCED_POWER_API_S_VER_6 */ /** * struct iwl_dev_tx_power_cmd_v7 - TX power reduction command version 7 * @per_chain: per chain restrictions * @enable_ack_reduction: enable or disable close range ack TX power * reduction. * @per_chain_restriction_changed: is per_chain_restriction has changed * from last command. used if set_mode is * IWL_TX_POWER_MODE_SET_SAR_TIMER. * note: if not changed, the command is used for keep alive only. * @reserved: reserved (padding) * @timer_period: timer in milliseconds. if expires FW will change to default * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER * @flags: reduce power flags. */ struct iwl_dev_tx_power_cmd_v7 { __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; u8 enable_ack_reduction; u8 per_chain_restriction_changed; u8 reserved[2]; __le32 timer_period; __le32 flags; } __packed; /* TX_REDUCED_POWER_API_S_VER_7 */ /** * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion) * @common: common part of the command * @v3: version 3 part of the command * @v4: version 4 part of the command * @v5: version 5 part of the command * @v6: version 6 part of the command */ struct iwl_dev_tx_power_cmd { struct iwl_dev_tx_power_common common; union { struct iwl_dev_tx_power_cmd_v3 v3; struct iwl_dev_tx_power_cmd_v4 v4; struct iwl_dev_tx_power_cmd_v5 v5; struct iwl_dev_tx_power_cmd_v6 v6; struct iwl_dev_tx_power_cmd_v7 v7; }; }; #define IWL_NUM_GEO_PROFILES 3 #define IWL_NUM_GEO_PROFILES_V3 8 #define IWL_NUM_BANDS_PER_CHAIN_V1 2 #define IWL_NUM_BANDS_PER_CHAIN_V2 3 /** * enum iwl_geo_per_chain_offset_operation - type of operation * @IWL_PER_CHAIN_OFFSET_SET_TABLES: send the tables from the host to the FW. * @IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE: retrieve the last configured table. */ enum iwl_geo_per_chain_offset_operation { IWL_PER_CHAIN_OFFSET_SET_TABLES, IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE, }; /* PER_CHAIN_OFFSET_OPERATION_E */ /** * struct iwl_per_chain_offset - embedded struct for PER_CHAIN_LIMIT_OFFSET_CMD. * @max_tx_power: maximum allowed tx power. * @chain_a: tx power offset for chain a. * @chain_b: tx power offset for chain b. */ struct iwl_per_chain_offset { __le16 max_tx_power; u8 chain_a; u8 chain_b; } __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */ /** * struct iwl_geo_tx_power_profile_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. */ struct iwl_geo_tx_power_profiles_cmd_v1 { __le32 ops; struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1]; } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_1 */ /** * struct iwl_geo_tx_power_profile_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) */ struct iwl_geo_tx_power_profiles_cmd_v2 { __le32 ops; struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1]; __le32 table_revision; } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_2 */ /** * struct iwl_geo_tx_power_profile_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) */ struct iwl_geo_tx_power_profiles_cmd_v3 { __le32 ops; struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V2]; __le32 table_revision; } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_3 */ /** * struct iwl_geo_tx_power_profile_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) */ struct iwl_geo_tx_power_profiles_cmd_v4 { __le32 ops; struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES_V3][IWL_NUM_BANDS_PER_CHAIN_V1]; __le32 table_revision; } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_4 */ /** * struct iwl_geo_tx_power_profile_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) */ struct iwl_geo_tx_power_profiles_cmd_v5 { __le32 ops; struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES_V3][IWL_NUM_BANDS_PER_CHAIN_V2]; __le32 table_revision; } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_5 */ union iwl_geo_tx_power_profiles_cmd { struct iwl_geo_tx_power_profiles_cmd_v1 v1; struct iwl_geo_tx_power_profiles_cmd_v2 v2; struct iwl_geo_tx_power_profiles_cmd_v3 v3; struct iwl_geo_tx_power_profiles_cmd_v4 v4; struct iwl_geo_tx_power_profiles_cmd_v5 v5; }; /** * struct iwl_geo_tx_power_profiles_resp - response to PER_CHAIN_LIMIT_OFFSET_CMD cmd * @profile_idx: current geo profile in use */ struct iwl_geo_tx_power_profiles_resp { __le32 profile_idx; } __packed; /* PER_CHAIN_LIMIT_OFFSET_RSP */ /** * union iwl_ppag_table_cmd - union for all versions of PPAG command * @v1: version 1 * @v2: version 2 * * @flags: bit 0 - indicates enablement of PPAG for ETSI * bit 1 - indicates enablement of PPAG for CHINA BIOS * bit 1 can be used only in v3 (identical to v2) * @gain: table of antenna gain values per chain and sub-band * @reserved: reserved */ union iwl_ppag_table_cmd { struct { __le32 flags; s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V1]; s8 reserved[2]; } v1; struct { __le32 flags; s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2]; s8 reserved[2]; } v2; } __packed; #define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26 #define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13 /** * struct iwl_sar_offset_mapping_cmd - struct for SAR_OFFSET_MAPPING_TABLE_CMD * @offset_map: mapping a mcc to a geo sar group * @reserved: reserved */ struct iwl_sar_offset_mapping_cmd { u8 offset_map[MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE] [MCC_TO_SAR_OFFSET_TABLE_COL_SIZE]; - u16 reserved; + __le16 reserved; } __packed; /*SAR_OFFSET_MAPPING_TABLE_CMD_API_S*/ /** * struct iwl_beacon_filter_cmd * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon * to driver if delta in Energy values calculated for this and last * passed beacon is greater than this threshold. Zero value means that * the Energy change is ignored for beacon filtering, and beacon will * not be forced to be sent to driver regardless of this delta. Typical * energy delta 5dB. * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state. * Send beacon to driver if delta in Energy values calculated for this * and last passed beacon is greater than this threshold. Zero value * means that the Energy change is ignored for beacon filtering while in * Roaming state, typical energy delta 1dB. * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values * calculated for current beacon is less than the threshold, use * Roaming Energy Delta Threshold, otherwise use normal Energy Delta * Threshold. Typical energy threshold is -72dBm. * @bf_temp_threshold: This threshold determines the type of temperature * filtering (Slow or Fast) that is selected (Units are in Celsuis): * If the current temperature is above this threshold - Fast filter * will be used, If the current temperature is below this threshold - * Slow filter will be used. * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values * calculated for this and the last passed beacon is greater than this * threshold. Zero value means that the temperature change is ignored for * beacon filtering; beacons will not be forced to be sent to driver * regardless of whether its temerature has been changed. * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values * calculated for this and the last passed beacon is greater than this * threshold. Zero value means that the temperature change is ignored for * beacon filtering; beacons will not be forced to be sent to driver * regardless of whether its temerature has been changed. * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled. * @bf_debug_flag: beacon filtering debug configuration * @bf_escape_timer: Send beacons to to driver if no beacons were passed * for a specific period of time. Units: Beacons. * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed * for a longer period of time then this escape-timeout. Units: Beacons. * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled. * @bf_threshold_absolute_low: See below. * @bf_threshold_absolute_high: Send Beacon to driver if Energy value calculated * for this beacon crossed this absolute threshold. For the 'Increase' * direction the bf_energy_absolute_low[i] is used. For the 'Decrease' * direction the bf_energy_absolute_high[i] is used. Zero value means * that this specific threshold is ignored for beacon filtering, and * beacon will not be forced to be sent to driver due to this setting. */ struct iwl_beacon_filter_cmd { __le32 bf_energy_delta; __le32 bf_roaming_energy_delta; __le32 bf_roaming_state; __le32 bf_temp_threshold; __le32 bf_temp_fast_filter; __le32 bf_temp_slow_filter; __le32 bf_enable_beacon_filter; __le32 bf_debug_flag; __le32 bf_escape_timer; __le32 ba_escape_timer; __le32 ba_enable_beacon_abort; __le32 bf_threshold_absolute_low[2]; __le32 bf_threshold_absolute_high[2]; } __packed; /* BEACON_FILTER_CONFIG_API_S_VER_4 */ /* Beacon filtering and beacon abort */ #define IWL_BF_ENERGY_DELTA_DEFAULT 5 #define IWL_BF_ENERGY_DELTA_D0I3 20 #define IWL_BF_ENERGY_DELTA_MAX 255 #define IWL_BF_ENERGY_DELTA_MIN 0 #define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1 #define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20 #define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255 #define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0 #define IWL_BF_ROAMING_STATE_DEFAULT 72 #define IWL_BF_ROAMING_STATE_D0I3 72 #define IWL_BF_ROAMING_STATE_MAX 255 #define IWL_BF_ROAMING_STATE_MIN 0 #define IWL_BF_TEMP_THRESHOLD_DEFAULT 112 #define IWL_BF_TEMP_THRESHOLD_D0I3 112 #define IWL_BF_TEMP_THRESHOLD_MAX 255 #define IWL_BF_TEMP_THRESHOLD_MIN 0 #define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1 #define IWL_BF_TEMP_FAST_FILTER_D0I3 1 #define IWL_BF_TEMP_FAST_FILTER_MAX 255 #define IWL_BF_TEMP_FAST_FILTER_MIN 0 #define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5 #define IWL_BF_TEMP_SLOW_FILTER_D0I3 20 #define IWL_BF_TEMP_SLOW_FILTER_MAX 255 #define IWL_BF_TEMP_SLOW_FILTER_MIN 0 #define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1 #define IWL_BF_DEBUG_FLAG_DEFAULT 0 #define IWL_BF_DEBUG_FLAG_D0I3 0 #define IWL_BF_ESCAPE_TIMER_DEFAULT 0 #define IWL_BF_ESCAPE_TIMER_D0I3 0 #define IWL_BF_ESCAPE_TIMER_MAX 1024 #define IWL_BF_ESCAPE_TIMER_MIN 0 #define IWL_BA_ESCAPE_TIMER_DEFAULT 6 #define IWL_BA_ESCAPE_TIMER_D0I3 6 #define IWL_BA_ESCAPE_TIMER_D3 9 #define IWL_BA_ESCAPE_TIMER_MAX 1024 #define IWL_BA_ESCAPE_TIMER_MIN 0 #define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1 #define IWL_BF_CMD_CONFIG(mode) \ .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode), \ .bf_roaming_energy_delta = \ cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode), \ .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode), \ .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode), \ .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode), \ .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode), \ .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode), \ .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode), \ .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode) #define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT) #define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3) #endif /* __iwl_fw_api_power_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/rs.h b/sys/contrib/dev/iwlwifi/fw/api/rs.h index 687f804c46b7..a1a272433b09 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/rs.h +++ b/sys/contrib/dev/iwlwifi/fw/api/rs.h @@ -1,761 +1,768 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_rs_h__ #define __iwl_fw_api_rs_h__ #include "mac.h" /** * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC. For HE this enables STBC for * bandwidths <= 80MHz * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC * @IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK: enable STBC in HE at 160MHz * bandwidth * @IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK: enable HE Dual Carrier Modulation * for BPSK (MCS 0) with 1 spatial * stream * @IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK: enable HE Dual Carrier Modulation * for BPSK (MCS 0) with 2 spatial * streams + * @IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK: enable support for EHT extra LTF */ enum iwl_tlc_mng_cfg_flags { IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0), IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(1), IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK = BIT(2), IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK = BIT(3), IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK = BIT(4), + IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK = BIT(6), }; /** * enum iwl_tlc_mng_cfg_cw - channel width options * @IWL_TLC_MNG_CH_WIDTH_20MHZ: 20MHZ channel * @IWL_TLC_MNG_CH_WIDTH_40MHZ: 40MHZ channel * @IWL_TLC_MNG_CH_WIDTH_80MHZ: 80MHZ channel * @IWL_TLC_MNG_CH_WIDTH_160MHZ: 160MHZ channel - * @IWL_TLC_MNG_CH_WIDTH_LAST: maximum value + * @IWL_TLC_MNG_CH_WIDTH_320MHZ: 320MHZ channel */ enum iwl_tlc_mng_cfg_cw { IWL_TLC_MNG_CH_WIDTH_20MHZ, IWL_TLC_MNG_CH_WIDTH_40MHZ, IWL_TLC_MNG_CH_WIDTH_80MHZ, IWL_TLC_MNG_CH_WIDTH_160MHZ, - IWL_TLC_MNG_CH_WIDTH_LAST = IWL_TLC_MNG_CH_WIDTH_160MHZ, + IWL_TLC_MNG_CH_WIDTH_320MHZ, }; /** * enum iwl_tlc_mng_cfg_chains - possible chains * @IWL_TLC_MNG_CHAIN_A_MSK: chain A * @IWL_TLC_MNG_CHAIN_B_MSK: chain B */ enum iwl_tlc_mng_cfg_chains { IWL_TLC_MNG_CHAIN_A_MSK = BIT(0), IWL_TLC_MNG_CHAIN_B_MSK = BIT(1), }; /** * enum iwl_tlc_mng_cfg_mode - supported modes * @IWL_TLC_MNG_MODE_CCK: enable CCK * @IWL_TLC_MNG_MODE_OFDM_NON_HT: enable OFDM (non HT) * @IWL_TLC_MNG_MODE_NON_HT: enable non HT * @IWL_TLC_MNG_MODE_HT: enable HT * @IWL_TLC_MNG_MODE_VHT: enable VHT * @IWL_TLC_MNG_MODE_HE: enable HE - * @IWL_TLC_MNG_MODE_INVALID: invalid value - * @IWL_TLC_MNG_MODE_NUM: a count of possible modes + * @IWL_TLC_MNG_MODE_EHT: enable EHT */ enum iwl_tlc_mng_cfg_mode { IWL_TLC_MNG_MODE_CCK = 0, IWL_TLC_MNG_MODE_OFDM_NON_HT = IWL_TLC_MNG_MODE_CCK, IWL_TLC_MNG_MODE_NON_HT = IWL_TLC_MNG_MODE_CCK, IWL_TLC_MNG_MODE_HT, IWL_TLC_MNG_MODE_VHT, IWL_TLC_MNG_MODE_HE, - IWL_TLC_MNG_MODE_INVALID, - IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID, + IWL_TLC_MNG_MODE_EHT, }; /** * enum iwl_tlc_mng_ht_rates - HT/VHT/HE rates * @IWL_TLC_MNG_HT_RATE_MCS0: index of MCS0 * @IWL_TLC_MNG_HT_RATE_MCS1: index of MCS1 * @IWL_TLC_MNG_HT_RATE_MCS2: index of MCS2 * @IWL_TLC_MNG_HT_RATE_MCS3: index of MCS3 * @IWL_TLC_MNG_HT_RATE_MCS4: index of MCS4 * @IWL_TLC_MNG_HT_RATE_MCS5: index of MCS5 * @IWL_TLC_MNG_HT_RATE_MCS6: index of MCS6 * @IWL_TLC_MNG_HT_RATE_MCS7: index of MCS7 * @IWL_TLC_MNG_HT_RATE_MCS8: index of MCS8 * @IWL_TLC_MNG_HT_RATE_MCS9: index of MCS9 * @IWL_TLC_MNG_HT_RATE_MCS10: index of MCS10 * @IWL_TLC_MNG_HT_RATE_MCS11: index of MCS11 * @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT */ enum iwl_tlc_mng_ht_rates { IWL_TLC_MNG_HT_RATE_MCS0 = 0, IWL_TLC_MNG_HT_RATE_MCS1, IWL_TLC_MNG_HT_RATE_MCS2, IWL_TLC_MNG_HT_RATE_MCS3, IWL_TLC_MNG_HT_RATE_MCS4, IWL_TLC_MNG_HT_RATE_MCS5, IWL_TLC_MNG_HT_RATE_MCS6, IWL_TLC_MNG_HT_RATE_MCS7, IWL_TLC_MNG_HT_RATE_MCS8, IWL_TLC_MNG_HT_RATE_MCS9, IWL_TLC_MNG_HT_RATE_MCS10, IWL_TLC_MNG_HT_RATE_MCS11, IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS11, }; enum IWL_TLC_MNG_NSS { IWL_TLC_NSS_1, IWL_TLC_NSS_2, IWL_TLC_NSS_MAX }; /** * enum IWL_TLC_MCS_PER_BW - mcs index per BW * @IWL_TLC_MCS_PER_BW_80: mcs for bw - 20Hhz, 40Hhz, 80Hhz * @IWL_TLC_MCS_PER_BW_160: mcs for bw - 160Mhz * @IWL_TLC_MCS_PER_BW_320: mcs for bw - 320Mhz * @IWL_TLC_MCS_PER_BW_NUM_V3: number of entries up to version 3 * @IWL_TLC_MCS_PER_BW_NUM_V4: number of entries from version 4 */ enum IWL_TLC_MCS_PER_BW { IWL_TLC_MCS_PER_BW_80, IWL_TLC_MCS_PER_BW_160, IWL_TLC_MCS_PER_BW_320, IWL_TLC_MCS_PER_BW_NUM_V3 = IWL_TLC_MCS_PER_BW_160 + 1, IWL_TLC_MCS_PER_BW_NUM_V4 = IWL_TLC_MCS_PER_BW_320 + 1, }; /** * struct iwl_tlc_config_cmd_v3 - TLC configuration * @sta_id: station id * @reserved1: reserved * @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw * @mode: &enum iwl_tlc_mng_cfg_mode * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains * @amsdu: TX amsdu is supported * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags * @non_ht_rates: bitmap of supported legacy rates * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per &enum IWL_TLC_MCS_PER_BW * pair (0 - 80mhz width and below, 1 - 160mhz). * @max_mpdu_len: max MPDU length, in bytes * @sgi_ch_width_supp: bitmap of SGI support per channel width * use BIT(@enum iwl_tlc_mng_cfg_cw) * @reserved2: reserved * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI), * set zero for no limit. */ struct iwl_tlc_config_cmd_v3 { u8 sta_id; u8 reserved1[3]; u8 max_ch_width; u8 mode; u8 chains; u8 amsdu; __le16 flags; __le16 non_ht_rates; __le16 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V3]; __le16 max_mpdu_len; u8 sgi_ch_width_supp; u8 reserved2; __le32 max_tx_op; } __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */ /** * struct iwl_tlc_config_cmd_v4 - TLC configuration * @sta_id: station id * @reserved1: reserved * @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw * @mode: &enum iwl_tlc_mng_cfg_mode * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains * @sgi_ch_width_supp: bitmap of SGI support per channel width * use BIT(&enum iwl_tlc_mng_cfg_cw) * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags * @non_ht_rates: bitmap of supported legacy rates * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per * pair (0 - 80mhz width and below, 1 - 160mhz, 2 - 320mhz). * @max_mpdu_len: max MPDU length, in bytes * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI), * set zero for no limit. */ struct iwl_tlc_config_cmd_v4 { u8 sta_id; u8 reserved1[3]; u8 max_ch_width; u8 mode; u8 chains; u8 sgi_ch_width_supp; __le16 flags; __le16 non_ht_rates; __le16 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V4]; __le16 max_mpdu_len; __le16 max_tx_op; } __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_4 */ /** * enum iwl_tlc_update_flags - updated fields * @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update * @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update */ enum iwl_tlc_update_flags { IWL_TLC_NOTIF_FLAG_RATE = BIT(0), IWL_TLC_NOTIF_FLAG_AMSDU = BIT(1), }; /** * struct iwl_tlc_update_notif - TLC notification from FW * @sta_id: station id * @reserved: reserved * @flags: bitmap of notifications reported * @rate: current initial rate * @amsdu_size: Max AMSDU size, in bytes * @amsdu_enabled: bitmap for per-TID AMSDU enablement */ struct iwl_tlc_update_notif { u8 sta_id; u8 reserved[3]; __le32 flags; __le32 rate; __le32 amsdu_size; __le32 amsdu_enabled; } __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */ #define IWL_MAX_MCS_DISPLAY_SIZE 12 struct iwl_rate_mcs_info { char mbps[IWL_MAX_MCS_DISPLAY_SIZE]; char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; }; /* * These serve as indexes into * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT]; * TODO: avoid overlap between legacy and HT rates */ enum { IWL_RATE_1M_INDEX = 0, IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, IWL_RATE_2M_INDEX, IWL_RATE_5M_INDEX, IWL_RATE_11M_INDEX, IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, IWL_RATE_6M_INDEX, IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX, IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX, IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX, IWL_RATE_9M_INDEX, IWL_RATE_12M_INDEX, IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX, IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX, IWL_RATE_24M_INDEX, IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX, IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX, IWL_RATE_48M_INDEX, IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX, IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX, IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX, IWL_RATE_60M_INDEX, IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX, IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX, IWL_RATE_MCS_8_INDEX, IWL_RATE_MCS_9_INDEX, IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX, IWL_RATE_MCS_10_INDEX, IWL_RATE_MCS_11_INDEX, IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX, IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1, IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1, IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, IWL_RATE_INVALID = IWL_RATE_COUNT, }; #define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX) /* fw API values for legacy bit rates, both OFDM and CCK */ enum { IWL_RATE_6M_PLCP = 13, IWL_RATE_9M_PLCP = 15, IWL_RATE_12M_PLCP = 5, IWL_RATE_18M_PLCP = 7, IWL_RATE_24M_PLCP = 9, IWL_RATE_36M_PLCP = 11, IWL_RATE_48M_PLCP = 1, IWL_RATE_54M_PLCP = 3, IWL_RATE_1M_PLCP = 10, IWL_RATE_2M_PLCP = 20, IWL_RATE_5M_PLCP = 55, IWL_RATE_11M_PLCP = 110, IWL_RATE_INVM_PLCP = -1, }; /* * rate_n_flags bit fields version 1 * * The 32-bit value has different layouts in the low 8 bites depending on the * format. There are three formats, HT, VHT and legacy (11abg, with subformats * for CCK and OFDM). * * High-throughput (HT) rate format * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM) * Very High-throughput (VHT) rate format * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM) * Legacy OFDM rate format for bits 7:0 * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM) * Legacy CCK rate format for bits 7:0: * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK) */ /* Bit 8: (1) HT format, (0) legacy or VHT format */ #define RATE_MCS_HT_POS 8 #define RATE_MCS_HT_MSK_V1 BIT(RATE_MCS_HT_POS) /* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */ #define RATE_MCS_CCK_POS_V1 9 #define RATE_MCS_CCK_MSK_V1 BIT(RATE_MCS_CCK_POS_V1) /* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */ #define RATE_MCS_VHT_POS_V1 26 #define RATE_MCS_VHT_MSK_V1 BIT(RATE_MCS_VHT_POS_V1) /* * High-throughput (HT) rate format for bits 7:0 * * 2-0: MCS rate base * 0) 6 Mbps * 1) 12 Mbps * 2) 18 Mbps * 3) 24 Mbps * 4) 36 Mbps * 5) 48 Mbps * 6) 54 Mbps * 7) 60 Mbps * 4-3: 0) Single stream (SISO) * 1) Dual stream (MIMO) * 2) Triple stream (MIMO) * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data * (bits 7-6 are zero) * * Together the low 5 bits work out to the MCS index because we don't * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two * streams and 16-23 have three streams. We could also support MCS 32 * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.) */ #define RATE_HT_MCS_RATE_CODE_MSK_V1 0x7 #define RATE_HT_MCS_NSS_POS_V1 3 #define RATE_HT_MCS_NSS_MSK_V1 (3 << RATE_HT_MCS_NSS_POS_V1) #define RATE_HT_MCS_MIMO2_MSK BIT(RATE_HT_MCS_NSS_POS_V1) /* Bit 10: (1) Use Green Field preamble */ #define RATE_HT_MCS_GF_POS 10 #define RATE_HT_MCS_GF_MSK (1 << RATE_HT_MCS_GF_POS) #define RATE_HT_MCS_INDEX_MSK_V1 0x3f /* * Very High-throughput (VHT) rate format for bits 7:0 * * 3-0: VHT MCS (0-9) * 5-4: number of streams - 1: * 0) Single stream (SISO) * 1) Dual stream (MIMO) * 2) Triple stream (MIMO) */ /* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */ #define RATE_VHT_MCS_RATE_CODE_MSK 0xf -#define RATE_VHT_MCS_NSS_POS 4 -#define RATE_VHT_MCS_NSS_MSK (3 << RATE_VHT_MCS_NSS_POS) -#define RATE_VHT_MCS_MIMO2_MSK BIT(RATE_VHT_MCS_NSS_POS) /* * Legacy OFDM rate format for bits 7:0 * * 3-0: 0xD) 6 Mbps * 0xF) 9 Mbps * 0x5) 12 Mbps * 0x7) 18 Mbps * 0x9) 24 Mbps * 0xB) 36 Mbps * 0x1) 48 Mbps * 0x3) 54 Mbps * (bits 7-4 are 0) * * Legacy CCK rate format for bits 7:0: * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK): * * 6-0: 10) 1 Mbps * 20) 2 Mbps * 55) 5.5 Mbps * 110) 11 Mbps * (bit 7 is 0) */ #define RATE_LEGACY_RATE_MSK_V1 0xff /* Bit 10 - OFDM HE */ #define RATE_MCS_HE_POS_V1 10 #define RATE_MCS_HE_MSK_V1 BIT(RATE_MCS_HE_POS_V1) /* * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT */ #define RATE_MCS_CHAN_WIDTH_POS 11 #define RATE_MCS_CHAN_WIDTH_MSK_V1 (3 << RATE_MCS_CHAN_WIDTH_POS) /* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */ #define RATE_MCS_SGI_POS_V1 13 #define RATE_MCS_SGI_MSK_V1 BIT(RATE_MCS_SGI_POS_V1) /* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */ #define RATE_MCS_ANT_POS 14 #define RATE_MCS_ANT_A_MSK (1 << RATE_MCS_ANT_POS) #define RATE_MCS_ANT_B_MSK (2 << RATE_MCS_ANT_POS) #define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | \ RATE_MCS_ANT_B_MSK) #define RATE_MCS_ANT_MSK RATE_MCS_ANT_AB_MSK /* Bit 17: (0) SS, (1) SS*2 */ #define RATE_MCS_STBC_POS 17 #define RATE_MCS_STBC_MSK BIT(RATE_MCS_STBC_POS) /* Bit 18: OFDM-HE dual carrier mode */ #define RATE_HE_DUAL_CARRIER_MODE 18 #define RATE_HE_DUAL_CARRIER_MODE_MSK BIT(RATE_HE_DUAL_CARRIER_MODE) /* Bit 19: (0) Beamforming is off, (1) Beamforming is on */ #define RATE_MCS_BF_POS 19 #define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS) /* * Bit 20-21: HE LTF type and guard interval * HE (ext) SU: * 0 1xLTF+0.8us * 1 2xLTF+0.8us * 2 2xLTF+1.6us * 3 & SGI (bit 13) clear 4xLTF+3.2us * 3 & SGI (bit 13) set 4xLTF+0.8us * HE MU: * 0 4xLTF+0.8us * 1 2xLTF+0.8us * 2 2xLTF+1.6us * 3 4xLTF+3.2us - * HE TRIG: + * HE-EHT TRIG: * 0 1xLTF+1.6us * 1 2xLTF+1.6us * 2 4xLTF+3.2us * 3 (does not occur) + * EHT MU: + * 0 2xLTF+0.8us + * 1 2xLTF+1.6us + * 2 4xLTF+0.8us + * 3 4xLTF+3.2us */ #define RATE_MCS_HE_GI_LTF_POS 20 #define RATE_MCS_HE_GI_LTF_MSK_V1 (3 << RATE_MCS_HE_GI_LTF_POS) /* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */ #define RATE_MCS_HE_TYPE_POS_V1 22 #define RATE_MCS_HE_TYPE_SU_V1 (0 << RATE_MCS_HE_TYPE_POS_V1) #define RATE_MCS_HE_TYPE_EXT_SU_V1 BIT(RATE_MCS_HE_TYPE_POS_V1) #define RATE_MCS_HE_TYPE_MU_V1 (2 << RATE_MCS_HE_TYPE_POS_V1) #define RATE_MCS_HE_TYPE_TRIG_V1 (3 << RATE_MCS_HE_TYPE_POS_V1) #define RATE_MCS_HE_TYPE_MSK_V1 (3 << RATE_MCS_HE_TYPE_POS_V1) /* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */ #define RATE_MCS_DUP_POS_V1 24 #define RATE_MCS_DUP_MSK_V1 (3 << RATE_MCS_DUP_POS_V1) /* Bit 27: (1) LDPC enabled, (0) LDPC disabled */ #define RATE_MCS_LDPC_POS_V1 27 #define RATE_MCS_LDPC_MSK_V1 BIT(RATE_MCS_LDPC_POS_V1) /* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */ #define RATE_MCS_HE_106T_POS_V1 28 #define RATE_MCS_HE_106T_MSK_V1 BIT(RATE_MCS_HE_106T_POS_V1) /* Bit 30-31: (1) RTS, (2) CTS */ #define RATE_MCS_RTS_REQUIRED_POS (30) #define RATE_MCS_RTS_REQUIRED_MSK (0x1 << RATE_MCS_RTS_REQUIRED_POS) #define RATE_MCS_CTS_REQUIRED_POS (31) #define RATE_MCS_CTS_REQUIRED_MSK (0x1 << RATE_MCS_CTS_REQUIRED_POS) /* rate_n_flags bit field version 2 * * The 32-bit value has different layouts in the low 8 bits depending on the * format. There are three formats, HT, VHT and legacy (11abg, with subformats * for CCK and OFDM). * */ /* Bits 10-8: rate format * (0) Legacy CCK (1) Legacy OFDM (2) High-throughput (HT) * (3) Very High-throughput (VHT) (4) High-efficiency (HE) * (5) Extremely High-throughput (EHT) */ #define RATE_MCS_MOD_TYPE_POS 8 #define RATE_MCS_MOD_TYPE_MSK (0x7 << RATE_MCS_MOD_TYPE_POS) #define RATE_MCS_CCK_MSK (0 << RATE_MCS_MOD_TYPE_POS) #define RATE_MCS_LEGACY_OFDM_MSK (1 << RATE_MCS_MOD_TYPE_POS) #define RATE_MCS_HT_MSK (2 << RATE_MCS_MOD_TYPE_POS) #define RATE_MCS_VHT_MSK (3 << RATE_MCS_MOD_TYPE_POS) #define RATE_MCS_HE_MSK (4 << RATE_MCS_MOD_TYPE_POS) #define RATE_MCS_EHT_MSK (5 << RATE_MCS_MOD_TYPE_POS) /* * Legacy CCK rate format for bits 0:3: * * (0) 0xa - 1 Mbps * (1) 0x14 - 2 Mbps * (2) 0x37 - 5.5 Mbps * (3) 0x6e - 11 nbps * * Legacy OFDM rate format for bis 3:0: * * (0) 6 Mbps * (1) 9 Mbps * (2) 12 Mbps * (3) 18 Mbps * (4) 24 Mbps * (5) 36 Mbps * (6) 48 Mbps * (7) 54 Mbps * */ #define RATE_LEGACY_RATE_MSK 0x7 /* * HT, VHT, HE, EHT rate format for bits 3:0 * 3-0: MCS * */ #define RATE_HT_MCS_CODE_MSK 0x7 #define RATE_MCS_NSS_POS 4 #define RATE_MCS_NSS_MSK (1 << RATE_MCS_NSS_POS) #define RATE_MCS_CODE_MSK 0xf #define RATE_HT_MCS_INDEX(r) ((((r) & RATE_MCS_NSS_MSK) >> 1) | \ ((r) & RATE_HT_MCS_CODE_MSK)) /* Bits 7-5: reserved */ /* * Bits 13-11: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz, (4) 320MHz */ -#define RATE_MCS_CHAN_WIDTH_MSK (0x7 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_320 (4 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_MSK (0x7 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_20_VAL 0 +#define RATE_MCS_CHAN_WIDTH_20 (RATE_MCS_CHAN_WIDTH_20_VAL << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_40_VAL 1 +#define RATE_MCS_CHAN_WIDTH_40 (RATE_MCS_CHAN_WIDTH_40_VAL << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_80_VAL 2 +#define RATE_MCS_CHAN_WIDTH_80 (RATE_MCS_CHAN_WIDTH_80_VAL << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_160_VAL 3 +#define RATE_MCS_CHAN_WIDTH_160 (RATE_MCS_CHAN_WIDTH_160_VAL << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_320_VAL 4 +#define RATE_MCS_CHAN_WIDTH_320 (RATE_MCS_CHAN_WIDTH_320_VAL << RATE_MCS_CHAN_WIDTH_POS) /* Bit 15-14: Antenna selection: * Bit 14: Ant A active * Bit 15: Ant B active * * All relevant definitions are same as in v1 */ /* Bit 16 (1) LDPC enables, (0) LDPC disabled */ #define RATE_MCS_LDPC_POS 16 #define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS) /* Bit 17: (0) SS, (1) SS*2 (same as v1) */ /* Bit 18: OFDM-HE dual carrier mode (same as v1) */ /* Bit 19: (0) Beamforming is off, (1) Beamforming is on (same as v1) */ /* * Bit 22-20: HE LTF type and guard interval * CCK: * 0 long preamble * 1 short preamble * HT/VHT: * 0 0.8us * 1 0.4us * HE (ext) SU: * 0 1xLTF+0.8us * 1 2xLTF+0.8us * 2 2xLTF+1.6us * 3 4xLTF+3.2us * 4 4xLTF+0.8us * HE MU: * 0 4xLTF+0.8us * 1 2xLTF+0.8us * 2 2xLTF+1.6us * 3 4xLTF+3.2us * HE TRIG: * 0 1xLTF+1.6us * 1 2xLTF+1.6us * 2 4xLTF+3.2us * */ #define RATE_MCS_HE_GI_LTF_MSK (0x7 << RATE_MCS_HE_GI_LTF_POS) #define RATE_MCS_SGI_POS RATE_MCS_HE_GI_LTF_POS #define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS) #define RATE_MCS_HE_SU_4_LTF 3 #define RATE_MCS_HE_SU_4_LTF_08_GI 4 /* Bit 24-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */ #define RATE_MCS_HE_TYPE_POS 23 #define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS) #define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS) #define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS) #define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS) #define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS) /* Bit 25: duplicate channel enabled * * if this bit is set, duplicate is according to BW (bits 11-13): * * CCK: 2x 20MHz * OFDM Legacy: N x 20Mhz, (N = BW \ 2 , either 2, 4, 8, 16) * EHT: 2 x BW/2, (80 - 2x40, 160 - 2x80, 320 - 2x160) * */ #define RATE_MCS_DUP_POS 25 #define RATE_MCS_DUP_MSK (1 << RATE_MCS_DUP_POS) /* Bit 26: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */ #define RATE_MCS_HE_106T_POS 26 #define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS) /* Bit 27: EHT extra LTF: * instead of 1 LTF for SISO use 2 LTFs, * instead of 2 LTFs for NSTS=2 use 4 LTFs*/ #define RATE_MCS_EHT_EXTRA_LTF_POS 27 #define RATE_MCS_EHT_EXTRA_LTF_MSK (1 << RATE_MCS_EHT_EXTRA_LTF_POS) /* Bit 31-28: reserved */ /* Link Quality definitions */ /* # entries in rate scale table to support Tx retries */ #define LQ_MAX_RETRY_NUM 16 /* Link quality command flags bit fields */ /* Bit 0: (0) Don't use RTS (1) Use RTS */ #define LQ_FLAG_USE_RTS_POS 0 #define LQ_FLAG_USE_RTS_MSK (1 << LQ_FLAG_USE_RTS_POS) /* Bit 1-3: LQ command color. Used to match responses to LQ commands */ #define LQ_FLAG_COLOR_POS 1 #define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS) #define LQ_FLAG_COLOR_GET(_f) (((_f) & LQ_FLAG_COLOR_MSK) >>\ LQ_FLAG_COLOR_POS) #define LQ_FLAGS_COLOR_INC(_c) ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\ LQ_FLAG_COLOR_MSK) #define LQ_FLAG_COLOR_SET(_f, _c) ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK)) /* Bit 4-5: Tx RTS BW Signalling * (0) No RTS BW signalling * (1) Static BW signalling * (2) Dynamic BW signalling */ #define LQ_FLAG_RTS_BW_SIG_POS 4 #define LQ_FLAG_RTS_BW_SIG_NONE (0 << LQ_FLAG_RTS_BW_SIG_POS) #define LQ_FLAG_RTS_BW_SIG_STATIC (1 << LQ_FLAG_RTS_BW_SIG_POS) #define LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << LQ_FLAG_RTS_BW_SIG_POS) /* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection * Dyanmic BW selection allows Tx with narrower BW then requested in rates */ #define LQ_FLAG_DYNAMIC_BW_POS 6 #define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS) /* Single Stream Tx Parameters (lq_cmd->ss_params) * Flags to control a smart FW decision about whether BFER/STBC/SISO will be * used for single stream Tx. */ /* Bit 0-1: Max STBC streams allowed. Can be 0-3. * (0) - No STBC allowed * (1) - 2x1 STBC allowed (HT/VHT) * (2) - 4x2 STBC allowed (HT/VHT) * (3) - 3x2 STBC allowed (HT only) * All our chips are at most 2 antennas so only (1) is valid for now. */ #define LQ_SS_STBC_ALLOWED_POS 0 #define LQ_SS_STBC_ALLOWED_MSK (3 << LQ_SS_STBC_ALLOWED_MSK) /* 2x1 STBC is allowed */ #define LQ_SS_STBC_1SS_ALLOWED (1 << LQ_SS_STBC_ALLOWED_POS) /* Bit 2: Beamformer (VHT only) is allowed */ #define LQ_SS_BFER_ALLOWED_POS 2 #define LQ_SS_BFER_ALLOWED (1 << LQ_SS_BFER_ALLOWED_POS) /* Bit 3: Force BFER or STBC for testing * If this is set: * If BFER is allowed then force the ucode to choose BFER else * If STBC is allowed then force the ucode to choose STBC over SISO */ #define LQ_SS_FORCE_POS 3 #define LQ_SS_FORCE (1 << LQ_SS_FORCE_POS) /* Bit 31: ss_params field is valid. Used for FW backward compatibility * with other drivers which don't support the ss_params API yet */ #define LQ_SS_PARAMS_VALID_POS 31 #define LQ_SS_PARAMS_VALID (1 << LQ_SS_PARAMS_VALID_POS) /** * struct iwl_lq_cmd - link quality command * @sta_id: station to update * @reduced_tpc: reduced transmit power control value * @control: not used * @flags: combination of LQ_FLAG_* * @mimo_delim: the first SISO index in rs_table, which separates MIMO * and SISO rates * @single_stream_ant_msk: best antenna for SISO (can be dual in CDD). * Should be ANT_[ABC] * @dual_stream_ant_msk: best antennas for MIMO, combination of ANT_[ABC] * @initial_rate_index: first index from rs_table per AC category * @agg_time_limit: aggregation max time threshold in usec/100, meaning * value of 100 is one usec. Range is 100 to 8000 * @agg_disable_start_th: try-count threshold for starting aggregation. * If a frame has higher try-count, it should not be selected for * starting an aggregation sequence. * @agg_frame_cnt_limit: max frame count in an aggregation. * 0: no limit * 1: no aggregation (one frame per aggregation) * 2 - 0x3f: maximal number of frames (up to 3f == 63) * @reserved2: reserved * @rs_table: array of rates for each TX try, each is rate_n_flags, * meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP * @ss_params: single stream features. declare whether STBC or BFER are allowed. */ struct iwl_lq_cmd { u8 sta_id; u8 reduced_tpc; __le16 control; /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */ u8 flags; u8 mimo_delim; u8 single_stream_ant_msk; u8 dual_stream_ant_msk; u8 initial_rate_index[AC_NUM]; /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */ __le16 agg_time_limit; u8 agg_disable_start_th; u8 agg_frame_cnt_limit; __le32 reserved2; __le32 rs_table[LQ_MAX_RETRY_NUM]; __le32 ss_params; }; /* LINK_QUALITY_CMD_API_S_VER_1 */ u8 iwl_fw_rate_idx_to_plcp(int idx); u32 iwl_new_rate_from_v1(u32 rate_v1); const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx); const char *iwl_rs_pretty_ant(u8 ant); const char *iwl_rs_pretty_bw(int bw); int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate); bool iwl_he_is_sgi(u32 rate_n_flags); #endif /* __iwl_fw_api_rs_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/rx.h b/sys/contrib/dev/iwlwifi/fw/api/rx.h index 9f20f1432963..c8ae4d8005c7 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/rx.h +++ b/sys/contrib/dev/iwlwifi/fw/api/rx.h @@ -1,864 +1,1027 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_rx_h__ #define __iwl_fw_api_rx_h__ /* API for pre-9000 hardware */ #define IWL_RX_INFO_PHY_CNT 8 #define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1 #define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff #define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00 #define IWL_RX_INFO_ENERGY_ANT_A_POS 0 #define IWL_RX_INFO_ENERGY_ANT_B_POS 8 #define IWL_RX_INFO_ENERGY_ANT_C_POS 16 enum iwl_mac_context_info { MAC_CONTEXT_INFO_NONE, MAC_CONTEXT_INFO_GSCAN, }; /** * struct iwl_rx_phy_info - phy info * (REPLY_RX_PHY_CMD = 0xc0) * @non_cfg_phy_cnt: non configurable DSP phy data byte count * @cfg_phy_cnt: configurable DSP phy data byte count * @stat_id: configurable DSP phy data set ID * @reserved1: reserved * @system_timestamp: GP2 at on air rise * @timestamp: TSF at on air rise * @beacon_time_stamp: beacon at on-air rise * @phy_flags: general phy flags: band, modulation, ... * @channel: channel number * @non_cfg_phy: for various implementations of non_cfg_phy * @rate_n_flags: RATE_MCS_* * @byte_count: frame's byte-count * @frame_time: frame's time on the air, based on byte count and frame rate * calculation * @mac_active_msk: what MACs were active when the frame was received * @mac_context_info: additional info on the context in which the frame was * received as defined in &enum iwl_mac_context_info * * Before each Rx, the device sends this data. It contains PHY information * about the reception of the packet. */ struct iwl_rx_phy_info { u8 non_cfg_phy_cnt; u8 cfg_phy_cnt; u8 stat_id; u8 reserved1; __le32 system_timestamp; __le64 timestamp; __le32 beacon_time_stamp; __le16 phy_flags; __le16 channel; __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT]; __le32 rate_n_flags; __le32 byte_count; u8 mac_active_msk; u8 mac_context_info; __le16 frame_time; } __packed; /* * TCP offload Rx assist info * * bits 0:3 - reserved * bits 4:7 - MIC CRC length * bits 8:12 - MAC header length * bit 13 - Padding indication * bit 14 - A-AMSDU indication * bit 15 - Offload enabled */ enum iwl_csum_rx_assist_info { CSUM_RXA_RESERVED_MASK = 0x000f, CSUM_RXA_MICSIZE_MASK = 0x00f0, CSUM_RXA_HEADERLEN_MASK = 0x1f00, CSUM_RXA_PADD = BIT(13), CSUM_RXA_AMSDU = BIT(14), CSUM_RXA_ENA = BIT(15) }; /** * struct iwl_rx_mpdu_res_start - phy info * @byte_count: byte count of the frame * @assist: see &enum iwl_csum_rx_assist_info */ struct iwl_rx_mpdu_res_start { __le16 byte_count; __le16 assist; } __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */ /** * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags * @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band * @RX_RES_PHY_FLAGS_MOD_CCK: modulation is CCK * @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short * @RX_RES_PHY_FLAGS_NARROW_BAND: narrow band (<20 MHz) receive * @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received * @RX_RES_PHY_FLAGS_ANTENNA_POS: antenna bit position * @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU * @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame * @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame */ enum iwl_rx_phy_flags { RX_RES_PHY_FLAGS_BAND_24 = BIT(0), RX_RES_PHY_FLAGS_MOD_CCK = BIT(1), RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2), RX_RES_PHY_FLAGS_NARROW_BAND = BIT(3), RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4), RX_RES_PHY_FLAGS_ANTENNA_POS = 4, RX_RES_PHY_FLAGS_AGG = BIT(7), RX_RES_PHY_FLAGS_OFDM_HT = BIT(8), RX_RES_PHY_FLAGS_OFDM_GF = BIT(9), RX_RES_PHY_FLAGS_OFDM_VHT = BIT(10), }; /** * enum iwl_mvm_rx_status - written by fw for each Rx packet * @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow * @RX_MPDU_RES_STATUS_SRC_STA_FOUND: station was found * @RX_MPDU_RES_STATUS_KEY_VALID: key was valid * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked * in the driver. * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or * alg = CCM only. Checks replay attack for 11w frames. * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP * @RX_MPDU_RES_STATUS_SEC_EXT_ENC: this frame is encrypted using extension * algorithm * @RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC: this frame is protected using * CMAC or GMAC * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors * @RX_MPDU_RES_STATUS_STA_ID_MSK: station ID mask * @RX_MDPU_RES_STATUS_STA_ID_SHIFT: station ID bit shift */ enum iwl_mvm_rx_status { RX_MPDU_RES_STATUS_CRC_OK = BIT(0), RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1), RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2), RX_MPDU_RES_STATUS_KEY_VALID = BIT(3), RX_MPDU_RES_STATUS_ICV_OK = BIT(5), RX_MPDU_RES_STATUS_MIC_OK = BIT(6), RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = BIT(7), RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8), RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8), RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8), RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8), RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8), RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC = (6 << 8), RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8), RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8), RX_MPDU_RES_STATUS_DEC_DONE = BIT(11), RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16), RX_MPDU_RES_STATUS_CSUM_OK = BIT(17), RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24, RX_MPDU_RES_STATUS_STA_ID_MSK = 0x1f << RX_MDPU_RES_STATUS_STA_ID_SHIFT, }; /* 9000 series API */ enum iwl_rx_mpdu_mac_flags1 { IWL_RX_MDPU_MFLG1_ADDRTYPE_MASK = 0x03, IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK = 0xf0, /* shift should be 4, but the length is measured in 2-byte * words, so shifting only by 3 gives a byte result */ IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_SHIFT = 3, }; enum iwl_rx_mpdu_mac_flags2 { /* in 2-byte words */ IWL_RX_MPDU_MFLG2_HDR_LEN_MASK = 0x1f, IWL_RX_MPDU_MFLG2_PAD = 0x20, IWL_RX_MPDU_MFLG2_AMSDU = 0x40, }; enum iwl_rx_mpdu_amsdu_info { IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK = 0x7f, IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80, }; #define RX_MPDU_BAND_POS 6 #define RX_MPDU_BAND_MASK 0xC0 #define BAND_IN_RX_STATUS(_val) \ (((_val) & RX_MPDU_BAND_MASK) >> RX_MPDU_BAND_POS) enum iwl_rx_l3_proto_values { IWL_RX_L3_TYPE_NONE, IWL_RX_L3_TYPE_IPV4, IWL_RX_L3_TYPE_IPV4_FRAG, IWL_RX_L3_TYPE_IPV6_FRAG, IWL_RX_L3_TYPE_IPV6, IWL_RX_L3_TYPE_IPV6_IN_IPV4, IWL_RX_L3_TYPE_ARP, IWL_RX_L3_TYPE_EAPOL, }; #define IWL_RX_L3_PROTO_POS 4 enum iwl_rx_l3l4_flags { IWL_RX_L3L4_IP_HDR_CSUM_OK = BIT(0), IWL_RX_L3L4_TCP_UDP_CSUM_OK = BIT(1), IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = BIT(2), IWL_RX_L3L4_TCP_ACK = BIT(3), IWL_RX_L3L4_L3_PROTO_MASK = 0xf << IWL_RX_L3_PROTO_POS, IWL_RX_L3L4_L4_PROTO_MASK = 0xf << 8, IWL_RX_L3L4_RSS_HASH_MASK = 0xf << 12, }; enum iwl_rx_mpdu_status { IWL_RX_MPDU_STATUS_CRC_OK = BIT(0), IWL_RX_MPDU_STATUS_OVERRUN_OK = BIT(1), IWL_RX_MPDU_STATUS_SRC_STA_FOUND = BIT(2), IWL_RX_MPDU_STATUS_KEY_VALID = BIT(3), IWL_RX_MPDU_STATUS_ICV_OK = BIT(5), IWL_RX_MPDU_STATUS_MIC_OK = BIT(6), IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), /* overlayed since IWL_UCODE_TLV_API_DEPRECATE_TTAK */ IWL_RX_MPDU_STATUS_REPLAY_ERROR = BIT(7), IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8, IWL_RX_MPDU_STATUS_SEC_UNKNOWN = IWL_RX_MPDU_STATUS_SEC_MASK, IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8, IWL_RX_MPDU_STATUS_SEC_WEP = 0x1 << 8, IWL_RX_MPDU_STATUS_SEC_CCM = 0x2 << 8, IWL_RX_MPDU_STATUS_SEC_TKIP = 0x3 << 8, IWL_RX_MPDU_STATUS_SEC_EXT_ENC = 0x4 << 8, IWL_RX_MPDU_STATUS_SEC_GCM = 0x5 << 8, #if defined(__FreeBSD__) IWL_RX_MPDU_STATUS_SEC_ENC_ERR = 0x7 << 8, #endif IWL_RX_MPDU_STATUS_DECRYPTED = BIT(11), IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME = BIT(15), IWL_RX_MPDU_STATUS_DUPLICATE = BIT(22), IWL_RX_MPDU_STATUS_STA_ID = 0x1f000000, }; #define IWL_RX_REORDER_DATA_INVALID_BAID 0x7f enum iwl_rx_mpdu_reorder_data { IWL_RX_MPDU_REORDER_NSSN_MASK = 0x00000fff, IWL_RX_MPDU_REORDER_SN_MASK = 0x00fff000, IWL_RX_MPDU_REORDER_SN_SHIFT = 12, IWL_RX_MPDU_REORDER_BAID_MASK = 0x7f000000, IWL_RX_MPDU_REORDER_BAID_SHIFT = 24, IWL_RX_MPDU_REORDER_BA_OLD_SN = 0x80000000, }; enum iwl_rx_mpdu_phy_info { IWL_RX_MPDU_PHY_AMPDU = BIT(5), IWL_RX_MPDU_PHY_AMPDU_TOGGLE = BIT(6), IWL_RX_MPDU_PHY_SHORT_PREAMBLE = BIT(7), /* short preamble is only for CCK, for non-CCK overridden by this */ IWL_RX_MPDU_PHY_NCCK_ADDTL_NTFY = BIT(7), IWL_RX_MPDU_PHY_TSF_OVERLOAD = BIT(8), }; enum iwl_rx_mpdu_mac_info { IWL_RX_MPDU_PHY_MAC_INDEX_MASK = 0x0f, IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0, }; /* TSF overload low dword */ -enum iwl_rx_phy_data0 { +enum iwl_rx_phy_he_data0 { /* info type: HE any */ IWL_RX_PHY_DATA0_HE_BEAM_CHNG = 0x00000001, IWL_RX_PHY_DATA0_HE_UPLINK = 0x00000002, IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK = 0x000000fc, IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK = 0x00000f00, /* 1 bit reserved */ IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK = 0x000fe000, IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM = 0x00100000, IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK = 0x00600000, IWL_RX_PHY_DATA0_HE_PE_DISAMBIG = 0x00800000, IWL_RX_PHY_DATA0_HE_DOPPLER = 0x01000000, /* 6 bits reserved */ IWL_RX_PHY_DATA0_HE_DELIM_EOF = 0x80000000, }; +/* TSF overload low dword */ +enum iwl_rx_phy_eht_data0 { + /* info type: EHT any */ + IWL_RX_PHY_DATA0_EHT_VALIDATE = BIT(0), + IWL_RX_PHY_DATA0_EHT_UPLINK = BIT(1), + IWL_RX_PHY_DATA0_EHT_BSS_COLOR_MASK = 0x000000fc, + IWL_RX_PHY_DATA0_ETH_SPATIAL_REUSE_MASK = 0x00000f00, + IWL_RX_PHY_DATA0_EHT_PS160 = BIT(12), + IWL_RX_PHY_DATA0_EHT_TXOP_DUR_MASK = 0x000fe000, + IWL_RX_PHY_DATA0_EHT_LDPC_EXT_SYM = BIT(20), + IWL_RX_PHY_DATA0_EHT_PRE_FEC_PAD_MASK = 0x00600000, + IWL_RX_PHY_DATA0_EHT_PE_DISAMBIG = BIT(23), + IWL_RX_PHY_DATA0_EHT_BW320_SLOT = BIT(24), + IWL_RX_PHY_DATA0_EHT_SIGA_CRC_OK = BIT(25), + IWL_RX_PHY_DATA0_EHT_PHY_VER = 0x1c000000, + /* 2 bits reserved */ + IWL_RX_PHY_DATA0_EHT_DELIM_EOF = BIT(31), +}; + enum iwl_rx_phy_info_type { IWL_RX_PHY_INFO_TYPE_NONE = 0, IWL_RX_PHY_INFO_TYPE_CCK = 1, IWL_RX_PHY_INFO_TYPE_OFDM_LGCY = 2, IWL_RX_PHY_INFO_TYPE_HT = 3, IWL_RX_PHY_INFO_TYPE_VHT_SU = 4, IWL_RX_PHY_INFO_TYPE_VHT_MU = 5, IWL_RX_PHY_INFO_TYPE_HE_SU = 6, IWL_RX_PHY_INFO_TYPE_HE_MU = 7, IWL_RX_PHY_INFO_TYPE_HE_TB = 8, IWL_RX_PHY_INFO_TYPE_HE_MU_EXT = 9, IWL_RX_PHY_INFO_TYPE_HE_TB_EXT = 10, + IWL_RX_PHY_INFO_TYPE_EHT_MU = 11, + IWL_RX_PHY_INFO_TYPE_EHT_TB = 12, + IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT = 13, + IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT = 14, }; /* TSF overload high dword */ -enum iwl_rx_phy_data1 { +enum iwl_rx_phy_common_data1 { /* * check this first - if TSF overload is set, * see &enum iwl_rx_phy_info_type */ IWL_RX_PHY_DATA1_INFO_TYPE_MASK = 0xf0000000, - /* info type: HT/VHT/HE any */ + /* info type: HT/VHT/HE/EHT any */ IWL_RX_PHY_DATA1_LSIG_LEN_MASK = 0x0fff0000, +}; +/* TSF overload high dword For HE rates*/ +enum iwl_rx_phy_he_data1 { /* info type: HE MU/MU-EXT */ IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION = 0x00000001, IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK = 0x0000001e, /* info type: HE any */ IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK = 0x000000e0, IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80 = 0x00000100, /* trigger encoded */ IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK = 0x0000fe00, /* info type: HE TB/TX-EXT */ IWL_RX_PHY_DATA1_HE_TB_PILOT_TYPE = 0x00000001, IWL_RX_PHY_DATA1_HE_TB_LOW_SS_MASK = 0x0000000e, }; +/* TSF overload high dword For EHT-MU/TB rates*/ +enum iwl_rx_phy_eht_data1 { + /* info type: EHT-MU */ + IWL_RX_PHY_DATA1_EHT_MU_NUM_SIG_SYM_USIGA2 = 0x0000001f, + /* info type: EHT-TB */ + IWL_RX_PHY_DATA1_EHT_TB_PILOT_TYPE = BIT(0), + IWL_RX_PHY_DATA1_EHT_TB_LOW_SS = 0x0000001e, + + /* info type: EHT any */ + /* number of EHT-LTF symbols 0 - 1 EHT-LTF, 1 - 2 EHT-LTFs, 2 - 4 EHT-LTFs, + * 3 - 6 EHT-LTFs, 4 - 8 EHT-LTFs */ + IWL_RX_PHY_DATA1_EHT_SIG_LTF_NUM = 0x000000e0, + IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B0 = 0x00000100, + IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7 = 0x0000fe00, +}; + /* goes into Metadata DW 7 */ -enum iwl_rx_phy_data2 { +enum iwl_rx_phy_he_data2 { /* info type: HE MU-EXT */ /* the a1/a2/... is what the PHY/firmware calls the values */ IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0 = 0x000000ff, /* a1 */ IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2 = 0x0000ff00, /* a2 */ IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0 = 0x00ff0000, /* b1 */ IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2 = 0xff000000, /* b2 */ /* info type: HE TB-EXT */ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1 = 0x0000000f, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2 = 0x000000f0, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3 = 0x00000f00, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4 = 0x0000f000, }; /* goes into Metadata DW 8 */ -enum iwl_rx_phy_data3 { +enum iwl_rx_phy_he_data3 { /* info type: HE MU-EXT */ IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1 = 0x000000ff, /* c1 */ IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3 = 0x0000ff00, /* c2 */ IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1 = 0x00ff0000, /* d1 */ IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3 = 0xff000000, /* d2 */ }; /* goes into Metadata DW 4 high 16 bits */ -enum iwl_rx_phy_data4 { +enum iwl_rx_phy_he_he_data4 { /* info type: HE MU-EXT */ IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU = 0x0001, IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU = 0x0002, IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK = 0x0004, IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK = 0x0008, IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK = 0x00f0, IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM = 0x0100, IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK = 0x0600, }; +/* goes into Metadata DW 7 */ +enum iwl_rx_phy_eht_data2 { + /* info type: EHT-MU-EXT */ + /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_0_OUT */ + IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A1 = 0x000001ff, + IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A2 = 0x0003fe00, + IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_B1 = 0x07fc0000, + + /* info type: EHT-TB-EXT */ + IWL_RX_PHY_DATA2_EHT_TB_EXT_TRIG_SIGA1 = 0xffffffff, +}; + +/* goes into Metadata DW 8 */ +enum iwl_rx_phy_eht_data3 { + /* info type: EHT-MU-EXT */ + /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_1_OUT */ + IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_B2 = 0x000001ff, + IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_C1 = 0x0003fe00, + IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_C2 = 0x07fc0000, +}; + +/* goes into Metadata DW 4 */ +enum iwl_rx_phy_eht_data4 { + /* info type: EHT-MU-EXT */ + /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_2_OUT */ + IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_D1 = 0x000001ff, + IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_D2 = 0x0003fe00, + IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS = 0x000c0000, +}; + +/* goes into Metadata DW 16 */ +enum iwl_rx_phy_data5 { + /* info type: EHT any */ + IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP = 0x00000003, + /* info type: EHT-TB */ + IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE1 = 0x0000003c, + IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE2 = 0x000003c0, + /* info type: EHT-MU */ + IWL_RX_PHY_DATA5_EHT_MU_PUNC_CH_CODE = 0x0000007c, + IWL_RX_PHY_DATA5_EHT_MU_STA_ID_USR = 0x0003ff80, + IWL_RX_PHY_DATA5_EHT_MU_NUM_USR_NON_OFDMA = 0x001c0000, + IWL_RX_PHY_DATA5_EHT_MU_SPATIAL_CONF_USR_FIELD = 0x0fe00000, +}; + /** * struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor */ struct iwl_rx_mpdu_desc_v1 { /* DW7 - carries rss_hash only when rpa_en == 1 */ union { /** * @rss_hash: RSS hash value */ __le32 rss_hash; /** * @phy_data2: depends on info type (see @phy_data1) */ __le32 phy_data2; }; /* DW8 - carries filter_match only when rpa_en == 1 */ union { /** * @filter_match: filter match value */ __le32 filter_match; /** * @phy_data3: depends on info type (see @phy_data1) */ __le32 phy_data3; }; /* DW9 */ /** * @rate_n_flags: RX rate/flags encoding */ __le32 rate_n_flags; /* DW10 */ /** * @energy_a: energy chain A */ u8 energy_a; /** * @energy_b: energy chain B */ u8 energy_b; /** * @channel: channel number */ u8 channel; /** * @mac_context: MAC context mask */ u8 mac_context; /* DW11 */ /** * @gp2_on_air_rise: GP2 timer value on air rise (INA) */ __le32 gp2_on_air_rise; /* DW12 & DW13 */ union { /** * @tsf_on_air_rise: * TSF value on air rise (INA), only valid if * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set */ __le64 tsf_on_air_rise; struct { /** * @phy_data0: depends on info_type, see @phy_data1 */ __le32 phy_data0; /** * @phy_data1: valid only if * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set, - * see &enum iwl_rx_phy_data1. + * see &enum iwl_rx_phy_common_data1 or + * &enum iwl_rx_phy_he_data1 or + * &enum iwl_rx_phy_eht_data1. */ __le32 phy_data1; }; }; } __packed; /* RX_MPDU_RES_START_API_S_VER_4 */ /** * struct iwl_rx_mpdu_desc_v3 - RX MPDU descriptor */ struct iwl_rx_mpdu_desc_v3 { /* DW7 - carries filter_match only when rpa_en == 1 */ union { /** * @filter_match: filter match value */ __le32 filter_match; /** * @phy_data3: depends on info type (see @phy_data1) */ __le32 phy_data3; }; /* DW8 - carries rss_hash only when rpa_en == 1 */ union { /** * @rss_hash: RSS hash value */ __le32 rss_hash; /** * @phy_data2: depends on info type (see @phy_data1) */ __le32 phy_data2; }; /* DW9 */ /** * @partial_hash: 31:0 ip/tcp header hash * w/o some fields (such as IP SRC addr) */ __le32 partial_hash; /* DW10 */ /** * @raw_xsum: raw xsum value */ __be16 raw_xsum; /** * @reserved_xsum: reserved high bits in the raw checksum */ __le16 reserved_xsum; /* DW11 */ /** * @rate_n_flags: RX rate/flags encoding */ __le32 rate_n_flags; /* DW12 */ /** * @energy_a: energy chain A */ u8 energy_a; /** * @energy_b: energy chain B */ u8 energy_b; /** * @channel: channel number */ u8 channel; /** * @mac_context: MAC context mask */ u8 mac_context; /* DW13 */ /** * @gp2_on_air_rise: GP2 timer value on air rise (INA) */ __le32 gp2_on_air_rise; /* DW14 & DW15 */ union { /** * @tsf_on_air_rise: * TSF value on air rise (INA), only valid if * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set */ __le64 tsf_on_air_rise; struct { /** * @phy_data0: depends on info_type, see @phy_data1 */ __le32 phy_data0; /** * @phy_data1: valid only if * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set, * see &enum iwl_rx_phy_data1. */ __le32 phy_data1; }; }; - /* DW16 & DW17 */ + /* DW16 */ + /** + * @phy_data5: valid only if + * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set, + * see &enum iwl_rx_phy_data5. + */ + __le32 phy_data5; + /* DW17 */ /** * @reserved: reserved */ - __le32 reserved[2]; + __le32 reserved[1]; } __packed; /* RX_MPDU_RES_START_API_S_VER_3, RX_MPDU_RES_START_API_S_VER_5 */ /** * struct iwl_rx_mpdu_desc - RX MPDU descriptor */ struct iwl_rx_mpdu_desc { /* DW2 */ /** * @mpdu_len: MPDU length */ __le16 mpdu_len; /** * @mac_flags1: &enum iwl_rx_mpdu_mac_flags1 */ u8 mac_flags1; /** * @mac_flags2: &enum iwl_rx_mpdu_mac_flags2 */ u8 mac_flags2; /* DW3 */ /** * @amsdu_info: &enum iwl_rx_mpdu_amsdu_info */ u8 amsdu_info; /** * @phy_info: &enum iwl_rx_mpdu_phy_info */ __le16 phy_info; /** * @mac_phy_idx: MAC/PHY index */ u8 mac_phy_idx; - /* DW4 - carries csum data only when rpa_en == 1 */ - /** - * @raw_csum: raw checksum (alledgedly unreliable) - */ - __le16 raw_csum; - + /* DW4 */ union { + struct { + /* carries csum data only when rpa_en == 1 */ + /** + * @raw_csum: raw checksum (alledgedly unreliable) + */ + __le16 raw_csum; + + union { + /** + * @l3l4_flags: &enum iwl_rx_l3l4_flags + */ + __le16 l3l4_flags; + + /** + * @phy_data4: depends on info type, see phy_data1 + */ + __le16 phy_data4; + }; + }; /** - * @l3l4_flags: &enum iwl_rx_l3l4_flags - */ - __le16 l3l4_flags; - - /** - * @phy_data4: depends on info type, see phy_data1 + * @phy_eht_data4: depends on info type, see phy_data1 */ - __le16 phy_data4; + __le32 phy_eht_data4; }; /* DW5 */ /** * @status: &enum iwl_rx_mpdu_status */ __le32 status; /* DW6 */ /** * @reorder_data: &enum iwl_rx_mpdu_reorder_data */ __le32 reorder_data; union { struct iwl_rx_mpdu_desc_v1 v1; struct iwl_rx_mpdu_desc_v3 v3; }; } __packed; /* RX_MPDU_RES_START_API_S_VER_3, RX_MPDU_RES_START_API_S_VER_4, RX_MPDU_RES_START_API_S_VER_5 */ #define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1) #define RX_NO_DATA_CHAIN_A_POS 0 #define RX_NO_DATA_CHAIN_A_MSK (0xff << RX_NO_DATA_CHAIN_A_POS) #define RX_NO_DATA_CHAIN_B_POS 8 #define RX_NO_DATA_CHAIN_B_MSK (0xff << RX_NO_DATA_CHAIN_B_POS) #define RX_NO_DATA_CHANNEL_POS 16 #define RX_NO_DATA_CHANNEL_MSK (0xff << RX_NO_DATA_CHANNEL_POS) #define RX_NO_DATA_INFO_TYPE_POS 0 #define RX_NO_DATA_INFO_TYPE_MSK (0xff << RX_NO_DATA_INFO_TYPE_POS) #define RX_NO_DATA_INFO_TYPE_NONE 0 #define RX_NO_DATA_INFO_TYPE_RX_ERR 1 #define RX_NO_DATA_INFO_TYPE_NDP 2 #define RX_NO_DATA_INFO_TYPE_MU_UNMATCHED 3 -#define RX_NO_DATA_INFO_TYPE_HE_TB_UNMATCHED 4 +#define RX_NO_DATA_INFO_TYPE_TB_UNMATCHED 4 #define RX_NO_DATA_INFO_ERR_POS 8 #define RX_NO_DATA_INFO_ERR_MSK (0xff << RX_NO_DATA_INFO_ERR_POS) #define RX_NO_DATA_INFO_ERR_NONE 0 #define RX_NO_DATA_INFO_ERR_BAD_PLCP 1 #define RX_NO_DATA_INFO_ERR_UNSUPPORTED_RATE 2 #define RX_NO_DATA_INFO_ERR_NO_DELIM 3 #define RX_NO_DATA_INFO_ERR_BAD_MAC_HDR 4 +#define RX_NO_DATA_INFO_LOW_ENERGY 5 #define RX_NO_DATA_FRAME_TIME_POS 0 #define RX_NO_DATA_FRAME_TIME_MSK (0xfffff << RX_NO_DATA_FRAME_TIME_POS) #define RX_NO_DATA_RX_VEC0_HE_NSTS_MSK 0x03800000 #define RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK 0x38000000 +#define RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK 0x00f00000 + +/* content of OFDM_RX_VECTOR_USIG_A1_OUT */ +enum iwl_rx_usig_a1 { + IWL_RX_USIG_A1_ENHANCED_WIFI_VER_ID = 0x00000007, + IWL_RX_USIG_A1_BANDWIDTH = 0x00000038, + IWL_RX_USIG_A1_UL_FLAG = 0x00000040, + IWL_RX_USIG_A1_BSS_COLOR = 0x00001f80, + IWL_RX_USIG_A1_TXOP_DURATION = 0x000fe000, + IWL_RX_USIG_A1_DISREGARD = 0x01f00000, + IWL_RX_USIG_A1_VALIDATE = 0x02000000, + IWL_RX_USIG_A1_EHT_BW320_SLOT = 0x04000000, + IWL_RX_USIG_A1_EHT_TYPE = 0x18000000, + IWL_RX_USIG_A1_RDY = 0x80000000, +}; + +/* content of OFDM_RX_VECTOR_USIG_A2_EHT_OUT */ +enum iwl_rx_usig_a2_eht { + IWL_RX_USIG_A2_EHT_PPDU_TYPE = 0x00000003, + IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2 = 0x00000004, + IWL_RX_USIG_A2_EHT_PUNC_CHANNEL = 0x000000f8, + IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B8 = 0x00000100, + IWL_RX_USIG_A2_EHT_SIG_MCS = 0x00000600, + IWL_RX_USIG_A2_EHT_SIG_SYM_NUM = 0x0000f800, + IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_1 = 0x000f0000, + IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_2 = 0x00f00000, + IWL_RX_USIG_A2_EHT_TRIG_USIG2_DISREGARD = 0x1f000000, + IWL_RX_USIG_A2_EHT_CRC_OK = 0x40000000, + IWL_RX_USIG_A2_EHT_RDY = 0x80000000, +}; /** * struct iwl_rx_no_data - RX no data descriptor * @info: 7:0 frame type, 15:8 RX error type * @rssi: 7:0 energy chain-A, * 15:8 chain-B, measured at FINA time (FINA_ENERGY), 16:23 channel * @on_air_rise_time: GP2 during on air rise * @fr_time: frame time * @rate: rate/mcs of frame - * @phy_info: &enum iwl_rx_phy_data0 and &enum iwl_rx_phy_info_type + * @phy_info: &enum iwl_rx_phy_he_data0 or &enum iwl_rx_phy_eht_data0 + * based on &enum iwl_rx_phy_info_type * @rx_vec: DW-12:9 raw RX vectors from DSP according to modulation type. * for VHT: OFDM_RX_VECTOR_SIGA1_OUT, OFDM_RX_VECTOR_SIGA2_OUT * for HE: OFDM_RX_VECTOR_HE_SIGA1_OUT, OFDM_RX_VECTOR_HE_SIGA2_OUT */ struct iwl_rx_no_data { __le32 info; __le32 rssi; __le32 on_air_rise_time; __le32 fr_time; __le32 rate; __le32 phy_info[2]; __le32 rx_vec[2]; } __packed; /* RX_NO_DATA_NTFY_API_S_VER_1, - TX_NO_DATA_NTFY_API_S_VER_2 */ + RX_NO_DATA_NTFY_API_S_VER_2 */ + +/** + * struct iwl_rx_no_data_ver_3 - RX no data descriptor + * @info: 7:0 frame type, 15:8 RX error type + * @rssi: 7:0 energy chain-A, + * 15:8 chain-B, measured at FINA time (FINA_ENERGY), 16:23 channel + * @on_air_rise_time: GP2 during on air rise + * @fr_time: frame time + * @rate: rate/mcs of frame + * @phy_info: &enum iwl_rx_phy_eht_data0 and &enum iwl_rx_phy_info_type + * @rx_vec: DW-12:9 raw RX vectors from DSP according to modulation type. + * for VHT: OFDM_RX_VECTOR_SIGA1_OUT, OFDM_RX_VECTOR_SIGA2_OUT + * for HE: OFDM_RX_VECTOR_HE_SIGA1_OUT, OFDM_RX_VECTOR_HE_SIGA2_OUT + * for EHT: OFDM_RX_VECTOR_USIG_A1_OUT, OFDM_RX_VECTOR_USIG_A2_EHT_OUT, + * OFDM_RX_VECTOR_EHT_OUT, OFDM_RX_VECTOR_EHT_USER_FIELD_OUT + */ +struct iwl_rx_no_data_ver_3 { + __le32 info; + __le32 rssi; + __le32 on_air_rise_time; + __le32 fr_time; + __le32 rate; + __le32 phy_info[2]; + __le32 rx_vec[4]; +} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1, + RX_NO_DATA_NTFY_API_S_VER_2 + RX_NO_DATA_NTFY_API_S_VER_3 */ struct iwl_frame_release { u8 baid; u8 reserved; __le16 nssn; }; /** * enum iwl_bar_frame_release_sta_tid - STA/TID information for BAR release * @IWL_BAR_FRAME_RELEASE_TID_MASK: TID mask * @IWL_BAR_FRAME_RELEASE_STA_MASK: STA mask */ enum iwl_bar_frame_release_sta_tid { IWL_BAR_FRAME_RELEASE_TID_MASK = 0x0000000f, IWL_BAR_FRAME_RELEASE_STA_MASK = 0x000001f0, }; /** * enum iwl_bar_frame_release_ba_info - BA information for BAR release * @IWL_BAR_FRAME_RELEASE_NSSN_MASK: NSSN mask * @IWL_BAR_FRAME_RELEASE_SN_MASK: SN mask (ignored by driver) * @IWL_BAR_FRAME_RELEASE_BAID_MASK: BAID mask */ enum iwl_bar_frame_release_ba_info { IWL_BAR_FRAME_RELEASE_NSSN_MASK = 0x00000fff, IWL_BAR_FRAME_RELEASE_SN_MASK = 0x00fff000, IWL_BAR_FRAME_RELEASE_BAID_MASK = 0x3f000000, }; /** * struct iwl_bar_frame_release - frame release from BAR info * @sta_tid: STA & TID information, see &enum iwl_bar_frame_release_sta_tid. * @ba_info: BA information, see &enum iwl_bar_frame_release_ba_info. */ struct iwl_bar_frame_release { __le32 sta_tid; __le32 ba_info; } __packed; /* RX_BAR_TO_FRAME_RELEASE_API_S_VER_1 */ enum iwl_rss_hash_func_en { IWL_RSS_HASH_TYPE_IPV4_TCP, IWL_RSS_HASH_TYPE_IPV4_UDP, IWL_RSS_HASH_TYPE_IPV4_PAYLOAD, IWL_RSS_HASH_TYPE_IPV6_TCP, IWL_RSS_HASH_TYPE_IPV6_UDP, IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, }; #define IWL_RSS_HASH_KEY_CNT 10 #define IWL_RSS_INDIRECTION_TABLE_SIZE 128 #define IWL_RSS_ENABLE 1 /** * struct iwl_rss_config_cmd - RSS (Receive Side Scaling) configuration * * @flags: 1 - enable, 0 - disable * @hash_mask: Type of RSS to use. Values are from %iwl_rss_hash_func_en * @reserved: reserved * @secret_key: 320 bit input of random key configuration from driver * @indirection_table: indirection table */ struct iwl_rss_config_cmd { __le32 flags; u8 hash_mask; u8 reserved[3]; __le32 secret_key[IWL_RSS_HASH_KEY_CNT]; u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE]; } __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */ #define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0 #define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf /** * struct iwl_rxq_sync_cmd - RXQ notification trigger * * @flags: flags of the notification. bit 0:3 are the sender queue * @rxq_mask: rx queues to send the notification on * @count: number of bytes in payload, should be DWORD aligned * @payload: data to send to rx queues */ struct iwl_rxq_sync_cmd { __le32 flags; __le32 rxq_mask; __le32 count; #if defined(__linux__) u8 payload[]; #elif defined(__FreeBSD__) u8 payload[0]; #endif } __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */ /** * struct iwl_rxq_sync_notification - Notification triggered by RXQ * sync command * * @count: number of bytes in payload * @payload: data to send to rx queues */ struct iwl_rxq_sync_notification { __le32 count; #if defined(__linux__) u8 payload[]; #elif defined(__FreeBSD__) u8 payload[0]; #endif } __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */ /** * enum iwl_mvm_pm_event - type of station PM event * @IWL_MVM_PM_EVENT_AWAKE: station woke up * @IWL_MVM_PM_EVENT_ASLEEP: station went to sleep * @IWL_MVM_PM_EVENT_UAPSD: station sent uAPSD trigger * @IWL_MVM_PM_EVENT_PS_POLL: station sent PS-Poll */ enum iwl_mvm_pm_event { IWL_MVM_PM_EVENT_AWAKE, IWL_MVM_PM_EVENT_ASLEEP, IWL_MVM_PM_EVENT_UAPSD, IWL_MVM_PM_EVENT_PS_POLL, }; /* PEER_PM_NTFY_API_E_VER_1 */ /** * struct iwl_mvm_pm_state_notification - station PM state notification * @sta_id: station ID of the station changing state * @type: the new powersave state, see &enum iwl_mvm_pm_event */ struct iwl_mvm_pm_state_notification { u8 sta_id; u8 type; /* private: */ __le16 reserved; } __packed; /* PEER_PM_NTFY_API_S_VER_1 */ #define BA_WINDOW_STREAMS_MAX 16 #define BA_WINDOW_STATUS_TID_MSK 0x000F #define BA_WINDOW_STATUS_STA_ID_POS 4 #define BA_WINDOW_STATUS_STA_ID_MSK 0x01F0 #define BA_WINDOW_STATUS_VALID_MSK BIT(9) /** * struct iwl_ba_window_status_notif - reordering window's status notification * @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63] * @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid * @start_seq_num: the start sequence number of the bitmap * @mpdu_rx_count: the number of received MPDUs since entering D0i3 */ struct iwl_ba_window_status_notif { __le64 bitmap[BA_WINDOW_STREAMS_MAX]; __le16 ra_tid[BA_WINDOW_STREAMS_MAX]; __le32 start_seq_num[BA_WINDOW_STREAMS_MAX]; __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX]; } __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */ /** * struct iwl_rfh_queue_config - RX queue configuration * @q_num: Q num * @enable: enable queue * @reserved: alignment * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr * @fr_bd_cb: DMA address of freeRB table * @ur_bd_cb: DMA address of used RB table * @fr_bd_wid: Initial index of the free table */ struct iwl_rfh_queue_data { u8 q_num; u8 enable; __le16 reserved; __le64 urbd_stts_wrptr; __le64 fr_bd_cb; __le64 ur_bd_cb; __le32 fr_bd_wid; } __packed; /* RFH_QUEUE_CONFIG_S_VER_1 */ /** * struct iwl_rfh_queue_config - RX queue configuration * @num_queues: number of queues configured * @reserved: alignment * @data: DMA addresses per-queue */ struct iwl_rfh_queue_config { u8 num_queues; u8 reserved[3]; #if defined(__linux__) struct iwl_rfh_queue_data data[]; #elif defined(__FreeBSD__) struct iwl_rfh_queue_data data[0]; #endif } __packed; /* RFH_QUEUE_CONFIG_API_S_VER_1 */ #endif /* __iwl_fw_api_rx_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/scan.h b/sys/contrib/dev/iwlwifi/fw/api/scan.h index 5413087ae909..93078f8cc08c 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/scan.h +++ b/sys/contrib/dev/iwlwifi/fw/api/scan.h @@ -1,1237 +1,1261 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_scan_h__ #define __iwl_fw_api_scan_h__ /* Scan Commands, Responses, Notifications */ +/** + * enum iwl_scan_subcmd_ids - scan commands + */ +enum iwl_scan_subcmd_ids { + /** + * @OFFLOAD_MATCH_INFO_NOTIF: &struct iwl_scan_offload_match_info + */ + OFFLOAD_MATCH_INFO_NOTIF = 0xFC, +}; + /* Max number of IEs for direct SSID scans in a command */ #define PROBE_OPTION_MAX 20 #define SCAN_SHORT_SSID_MAX_SIZE 8 #define SCAN_BSSID_MAX_SIZE 16 /** * struct iwl_ssid_ie - directed scan network information element * * Up to 20 of these may appear in REPLY_SCAN_CMD, * selected by "type" bit field in struct iwl_scan_channel; * each channel may select different ssids from among the 20 entries. * SSID IEs get transmitted in reverse order of entry. * * @id: element ID * @len: element length * @ssid: element (SSID) data */ struct iwl_ssid_ie { u8 id; u8 len; u8 ssid[IEEE80211_MAX_SSID_LEN]; } __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */ /* scan offload */ #define IWL_SCAN_MAX_BLACKLIST_LEN 64 #define IWL_SCAN_SHORT_BLACKLIST_LEN 16 #define IWL_SCAN_MAX_PROFILES 11 #define IWL_SCAN_MAX_PROFILES_V2 8 #define SCAN_OFFLOAD_PROBE_REQ_SIZE 512 #define SCAN_NUM_BAND_PROBE_DATA_V_1 2 #define SCAN_NUM_BAND_PROBE_DATA_V_2 3 /* Default watchdog (in MS) for scheduled scan iteration */ #define IWL_SCHED_SCAN_WATCHDOG cpu_to_le16(15000) #define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) #define CAN_ABORT_STATUS 1 #define IWL_FULL_SCAN_MULTIPLIER 5 #define IWL_FAST_SCHED_SCAN_ITERATIONS 3 #define IWL_MAX_SCHED_SCAN_PLANS 2 enum scan_framework_client { SCAN_CLIENT_SCHED_SCAN = BIT(0), SCAN_CLIENT_NETDETECT = BIT(1), SCAN_CLIENT_ASSET_TRACKING = BIT(2), }; /** * struct iwl_scan_offload_blocklist - SCAN_OFFLOAD_BLACKLIST_S * @ssid: MAC address to filter out * @reported_rssi: AP rssi reported to the host * @client_bitmap: clients ignore this entry - enum scan_framework_client */ struct iwl_scan_offload_blocklist { u8 ssid[ETH_ALEN]; u8 reported_rssi; u8 client_bitmap; } __packed; enum iwl_scan_offload_network_type { IWL_NETWORK_TYPE_BSS = 1, IWL_NETWORK_TYPE_IBSS = 2, IWL_NETWORK_TYPE_ANY = 3, }; enum iwl_scan_offload_band_selection { IWL_SCAN_OFFLOAD_SELECT_2_4 = 0x4, IWL_SCAN_OFFLOAD_SELECT_5_2 = 0x8, IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc, }; enum iwl_scan_offload_auth_alg { IWL_AUTH_ALGO_UNSUPPORTED = 0x00, IWL_AUTH_ALGO_NONE = 0x01, IWL_AUTH_ALGO_PSK = 0x02, IWL_AUTH_ALGO_8021X = 0x04, IWL_AUTH_ALGO_SAE = 0x08, IWL_AUTH_ALGO_8021X_SHA384 = 0x10, IWL_AUTH_ALGO_OWE = 0x20, }; /** * struct iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S * @ssid_index: index to ssid list in fixed part * @unicast_cipher: encryption algorithm to match - bitmap * @auth_alg: authentication algorithm to match - bitmap * @network_type: enum iwl_scan_offload_network_type * @band_selection: enum iwl_scan_offload_band_selection * @client_bitmap: clients waiting for match - enum scan_framework_client * @reserved: reserved */ struct iwl_scan_offload_profile { u8 ssid_index; u8 unicast_cipher; u8 auth_alg; u8 network_type; u8 band_selection; u8 client_bitmap; u8 reserved[2]; } __packed; /** * struct iwl_scan_offload_profile_cfg_data * @blocklist_len: length of blocklist * @num_profiles: num of profiles in the list * @match_notify: clients waiting for match found notification * @pass_match: clients waiting for the results * @active_clients: active clients bitmap - enum scan_framework_client * @any_beacon_notify: clients waiting for match notification without match * @reserved: reserved */ struct iwl_scan_offload_profile_cfg_data { u8 blocklist_len; u8 num_profiles; u8 match_notify; u8 pass_match; u8 active_clients; u8 any_beacon_notify; u8 reserved[2]; } __packed; /** * struct iwl_scan_offload_profile_cfg * @profiles: profiles to search for match * @data: the rest of the data for profile_cfg */ struct iwl_scan_offload_profile_cfg_v1 { struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES]; struct iwl_scan_offload_profile_cfg_data data; } __packed; /* SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1-2*/ /** * struct iwl_scan_offload_profile_cfg * @profiles: profiles to search for match * @data: the rest of the data for profile_cfg */ struct iwl_scan_offload_profile_cfg { struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES_V2]; struct iwl_scan_offload_profile_cfg_data data; } __packed; /* SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_3*/ /** * struct iwl_scan_schedule_lmac - schedule of scan offload * @delay: delay between iterations, in seconds. * @iterations: num of scan iterations * @full_scan_mul: number of partial scans before each full scan */ struct iwl_scan_schedule_lmac { __le16 delay; u8 iterations; u8 full_scan_mul; } __packed; /* SCAN_SCHEDULE_API_S */ enum iwl_scan_offload_complete_status { IWL_SCAN_OFFLOAD_COMPLETED = 1, IWL_SCAN_OFFLOAD_ABORTED = 2, }; enum iwl_scan_ebs_status { IWL_SCAN_EBS_SUCCESS, IWL_SCAN_EBS_FAILED, IWL_SCAN_EBS_CHAN_NOT_FOUND, IWL_SCAN_EBS_INACTIVE, }; /** * struct iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S * @tx_flags: combination of TX_CMD_FLG_* * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is * cleared. Combination of RATE_MCS_* * @sta_id: index of destination station in FW station table * @reserved: for alignment and future use */ struct iwl_scan_req_tx_cmd { __le32 tx_flags; __le32 rate_n_flags; u8 sta_id; u8 reserved[3]; } __packed; enum iwl_scan_channel_flags_lmac { IWL_UNIFIED_SCAN_CHANNEL_FULL = BIT(27), IWL_UNIFIED_SCAN_CHANNEL_PARTIAL = BIT(28), }; /** * struct iwl_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2 * @flags: bits 1-20: directed scan to i'th ssid * other bits &enum iwl_scan_channel_flags_lmac * @channel_num: channel number 1-13 etc * @iter_count: scan iteration on this channel * @iter_interval: interval in seconds between iterations on one channel */ struct iwl_scan_channel_cfg_lmac { __le32 flags; __le16 channel_num; __le16 iter_count; __le32 iter_interval; } __packed; /** * struct iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1 * @offset: offset in the data block * @len: length of the segment */ struct iwl_scan_probe_segment { __le16 offset; __le16 len; } __packed; /** * struct iwl_scan_probe_req_v1 - PROBE_REQUEST_FRAME_API_S_VER_2 * @mac_header: first (and common) part of the probe * @band_data: band specific data * @common_data: last (and common) part of the probe * @buf: raw data block */ struct iwl_scan_probe_req_v1 { struct iwl_scan_probe_segment mac_header; struct iwl_scan_probe_segment band_data[SCAN_NUM_BAND_PROBE_DATA_V_1]; struct iwl_scan_probe_segment common_data; u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE]; } __packed; /** * struct iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2 * @mac_header: first (and common) part of the probe * @band_data: band specific data * @common_data: last (and common) part of the probe * @buf: raw data block */ struct iwl_scan_probe_req { struct iwl_scan_probe_segment mac_header; struct iwl_scan_probe_segment band_data[SCAN_NUM_BAND_PROBE_DATA_V_2]; struct iwl_scan_probe_segment common_data; u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE]; } __packed; enum iwl_scan_channel_flags { IWL_SCAN_CHANNEL_FLAG_EBS = BIT(0), IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = BIT(1), IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2), IWL_SCAN_CHANNEL_FLAG_EBS_FRAG = BIT(3), IWL_SCAN_CHANNEL_FLAG_FORCE_EBS = BIT(4), IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER = BIT(5), IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER = BIT(6), }; /** * struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S * @flags: enum iwl_scan_channel_flags * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is * involved. * 1 - EBS is disabled. * 2 - every second scan will be full scan(and so on). */ struct iwl_scan_channel_opt { __le16 flags; __le16 non_ebs_ratio; } __packed; /** * enum iwl_mvm_lmac_scan_flags - LMAC scan flags * @IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL: pass all beacons and probe responses * without filtering. * @IWL_MVM_LMAC_SCAN_FLAG_PASSIVE: force passive scan on all channels * @IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION: single channel scan * @IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE: send iteration complete notification * @IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS: multiple SSID matching * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report * and DS parameter set IEs into probe requests. * @IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL: use extended dwell time on channels * 1, 6 and 11. * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches */ enum iwl_mvm_lmac_scan_flags { IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0), IWL_MVM_LMAC_SCAN_FLAG_PASSIVE = BIT(1), IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION = BIT(2), IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE = BIT(3), IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL = BIT(7), IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9), }; enum iwl_scan_priority { IWL_SCAN_PRIORITY_LOW, IWL_SCAN_PRIORITY_MEDIUM, IWL_SCAN_PRIORITY_HIGH, }; enum iwl_scan_priority_ext { IWL_SCAN_PRIORITY_EXT_0_LOWEST, IWL_SCAN_PRIORITY_EXT_1, IWL_SCAN_PRIORITY_EXT_2, IWL_SCAN_PRIORITY_EXT_3, IWL_SCAN_PRIORITY_EXT_4, IWL_SCAN_PRIORITY_EXT_5, IWL_SCAN_PRIORITY_EXT_6, IWL_SCAN_PRIORITY_EXT_7_HIGHEST, }; /** * struct iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1 * @reserved1: for alignment and future use * @n_channels: num of channels to scan * @active_dwell: dwell time for active channels * @passive_dwell: dwell time for passive channels * @fragmented_dwell: dwell time for fragmented passive scan * @extended_dwell: dwell time for channels 1, 6 and 11 (in certain cases) * @reserved2: for alignment and future use * @rx_chain_select: PHY_RX_CHAIN_* flags * @scan_flags: &enum iwl_mvm_lmac_scan_flags * @max_out_time: max time (in TU) to be out of associated channel * @suspend_time: pause scan this long (TUs) when returning to service channel * @flags: RXON flags * @filter_flags: RXON filter * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz * @direct_scan: list of SSIDs for directed active scan * @scan_prio: enum iwl_scan_priority * @iter_num: number of scan iterations * @delay: delay in seconds before first iteration * @schedule: two scheduling plans. The first one is finite, the second one can * be infinite. * @channel_opt: channel optimization options, for full and partial scan * @data: channel configuration and probe request packet. */ struct iwl_scan_req_lmac { /* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */ __le32 reserved1; u8 n_channels; u8 active_dwell; u8 passive_dwell; u8 fragmented_dwell; u8 extended_dwell; u8 reserved2; __le16 rx_chain_select; __le32 scan_flags; __le32 max_out_time; __le32 suspend_time; /* RX_ON_FLAGS_API_S_VER_1 */ __le32 flags; __le32 filter_flags; struct iwl_scan_req_tx_cmd tx_cmd[2]; struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; __le32 scan_prio; /* SCAN_REQ_PERIODIC_PARAMS_API_S */ __le32 iter_num; __le32 delay; struct iwl_scan_schedule_lmac schedule[IWL_MAX_SCHED_SCAN_PLANS]; struct iwl_scan_channel_opt channel_opt[2]; u8 data[]; } __packed; /** * struct iwl_scan_results_notif - scan results for one channel - * SCAN_RESULT_NTF_API_S_VER_3 * @channel: which channel the results are from * @band: 0 for 5.2 GHz, 1 for 2.4 GHz * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request * @num_probe_not_sent: # of request that weren't sent due to not enough time * @duration: duration spent in channel, in usecs */ struct iwl_scan_results_notif { u8 channel; u8 band; u8 probe_status; u8 num_probe_not_sent; __le32 duration; } __packed; /** * struct iwl_lmac_scan_complete_notif - notifies end of scanning (all channels) * SCAN_COMPLETE_NTF_API_S_VER_3 * @scanned_channels: number of channels scanned (and number of valid results) * @status: one of SCAN_COMP_STATUS_* * @bt_status: BT on/off status * @last_channel: last channel that was scanned * @tsf_low: TSF timer (lower half) in usecs * @tsf_high: TSF timer (higher half) in usecs * @results: an array of scan results, only "scanned_channels" of them are valid */ struct iwl_lmac_scan_complete_notif { u8 scanned_channels; u8 status; u8 bt_status; u8 last_channel; __le32 tsf_low; __le32 tsf_high; struct iwl_scan_results_notif results[]; } __packed; /** * struct iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2 * @last_schedule_line: last schedule line executed (fast or regular) * @last_schedule_iteration: last scan iteration executed before scan abort * @status: &enum iwl_scan_offload_complete_status * @ebs_status: EBS success status &enum iwl_scan_ebs_status * @time_after_last_iter: time in seconds elapsed after last iteration * @reserved: reserved */ struct iwl_periodic_scan_complete { u8 last_schedule_line; u8 last_schedule_iteration; u8 status; u8 ebs_status; __le32 time_after_last_iter; __le32 reserved; } __packed; /* UMAC Scan API */ /* The maximum of either of these cannot exceed 8, because we use an * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h). */ #define IWL_MVM_MAX_UMAC_SCANS 4 #define IWL_MVM_MAX_LMAC_SCANS 1 enum scan_config_flags { SCAN_CONFIG_FLAG_ACTIVATE = BIT(0), SCAN_CONFIG_FLAG_DEACTIVATE = BIT(1), SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = BIT(2), SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = BIT(3), SCAN_CONFIG_FLAG_SET_TX_CHAINS = BIT(8), SCAN_CONFIG_FLAG_SET_RX_CHAINS = BIT(9), SCAN_CONFIG_FLAG_SET_AUX_STA_ID = BIT(10), SCAN_CONFIG_FLAG_SET_ALL_TIMES = BIT(11), SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = BIT(12), SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = BIT(13), SCAN_CONFIG_FLAG_SET_LEGACY_RATES = BIT(14), SCAN_CONFIG_FLAG_SET_MAC_ADDR = BIT(15), SCAN_CONFIG_FLAG_SET_FRAGMENTED = BIT(16), SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = BIT(17), SCAN_CONFIG_FLAG_SET_CAM_MODE = BIT(18), SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19), SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20), SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21), SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED = BIT(22), SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED = BIT(23), /* Bits 26-31 are for num of channels in channel_array */ #define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26) }; enum scan_config_rates { /* OFDM basic rates */ SCAN_CONFIG_RATE_6M = BIT(0), SCAN_CONFIG_RATE_9M = BIT(1), SCAN_CONFIG_RATE_12M = BIT(2), SCAN_CONFIG_RATE_18M = BIT(3), SCAN_CONFIG_RATE_24M = BIT(4), SCAN_CONFIG_RATE_36M = BIT(5), SCAN_CONFIG_RATE_48M = BIT(6), SCAN_CONFIG_RATE_54M = BIT(7), /* CCK basic rates */ SCAN_CONFIG_RATE_1M = BIT(8), SCAN_CONFIG_RATE_2M = BIT(9), SCAN_CONFIG_RATE_5M = BIT(10), SCAN_CONFIG_RATE_11M = BIT(11), /* Bits 16-27 are for supported rates */ #define SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16) }; enum iwl_channel_flags { IWL_CHANNEL_FLAG_EBS = BIT(0), IWL_CHANNEL_FLAG_ACCURATE_EBS = BIT(1), IWL_CHANNEL_FLAG_EBS_ADD = BIT(2), IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3), }; enum iwl_uhb_chan_cfg_flags { IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES = BIT(24), IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN = BIT(25), IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE = BIT(26), }; /** * struct iwl_scan_dwell * @active: default dwell time for active scan * @passive: default dwell time for passive scan * @fragmented: default dwell time for fragmented scan * @extended: default dwell time for channels 1, 6 and 11 */ struct iwl_scan_dwell { u8 active; u8 passive; u8 fragmented; u8 extended; } __packed; /** * struct iwl_scan_config_v1 - scan configuration command * @flags: enum scan_config_flags * @tx_chains: valid_tx antenna - ANT_* definitions * @rx_chains: valid_rx antenna - ANT_* definitions * @legacy_rates: default legacy rates - enum scan_config_rates * @out_of_channel_time: default max out of serving channel time * @suspend_time: default max suspend time * @dwell: dwells for the scan * @mac_addr: default mac address to be used in probes * @bcast_sta_id: the index of the station in the fw * @channel_flags: default channel flags - enum iwl_channel_flags * scan_config_channel_flag * @channel_array: default supported channels */ struct iwl_scan_config_v1 { __le32 flags; __le32 tx_chains; __le32 rx_chains; __le32 legacy_rates; __le32 out_of_channel_time; __le32 suspend_time; struct iwl_scan_dwell dwell; u8 mac_addr[ETH_ALEN]; u8 bcast_sta_id; u8 channel_flags; u8 channel_array[]; } __packed; /* SCAN_CONFIG_DB_CMD_API_S */ #define SCAN_TWO_LMACS 2 #define SCAN_LB_LMAC_IDX 0 #define SCAN_HB_LMAC_IDX 1 /** * struct iwl_scan_config_v2 - scan configuration command * @flags: enum scan_config_flags * @tx_chains: valid_tx antenna - ANT_* definitions * @rx_chains: valid_rx antenna - ANT_* definitions * @legacy_rates: default legacy rates - enum scan_config_rates * @out_of_channel_time: default max out of serving channel time * @suspend_time: default max suspend time * @dwell: dwells for the scan * @mac_addr: default mac address to be used in probes * @bcast_sta_id: the index of the station in the fw * @channel_flags: default channel flags - enum iwl_channel_flags * scan_config_channel_flag * @channel_array: default supported channels */ struct iwl_scan_config_v2 { __le32 flags; __le32 tx_chains; __le32 rx_chains; __le32 legacy_rates; __le32 out_of_channel_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; struct iwl_scan_dwell dwell; u8 mac_addr[ETH_ALEN]; u8 bcast_sta_id; u8 channel_flags; u8 channel_array[]; } __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */ /** * struct iwl_scan_config - scan configuration command * @enable_cam_mode: whether to enable CAM mode. * @enable_promiscouos_mode: whether to enable promiscouos mode * @bcast_sta_id: the index of the station in the fw. Deprecated starting with * API version 5. * @reserved: reserved * @tx_chains: valid_tx antenna - ANT_* definitions * @rx_chains: valid_rx antenna - ANT_* definitions */ struct iwl_scan_config { u8 enable_cam_mode; u8 enable_promiscouos_mode; u8 bcast_sta_id; u8 reserved; __le32 tx_chains; __le32 rx_chains; } __packed; /* SCAN_CONFIG_DB_CMD_API_S_5 */ /** * enum iwl_umac_scan_flags - UMAC scan flags * @IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request * can be preempted by other scan requests with higher priority. * The low priority scan will be resumed when the higher proirity scan is * completed. * @IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver * when scan starts. */ enum iwl_umac_scan_flags { IWL_UMAC_SCAN_FLAG_PREEMPTIVE = BIT(0), IWL_UMAC_SCAN_FLAG_START_NOTIF = BIT(1), }; enum iwl_umac_scan_uid_offsets { IWL_UMAC_SCAN_UID_TYPE_OFFSET = 0, IWL_UMAC_SCAN_UID_SEQ_OFFSET = 8, }; enum iwl_umac_scan_general_flags { IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0), IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1), IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2), IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3), IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4), IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5), IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6), IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7), IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8), IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9), IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10), /* Extended dwell is obselete when adaptive dwell is used, making this * bit reusable. Hence, probe request defer is used only when adaptive * dwell is supported. */ IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP = BIT(10), IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11), IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13), IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME = BIT(14), IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE = BIT(15), }; /** * enum iwl_umac_scan_general_flags2 - UMAC scan general flags #2 * @IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete * notification per channel or not. * @IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel * reorder optimization or not. */ enum iwl_umac_scan_general_flags2 { IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0), IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = BIT(1), }; /** * enum iwl_umac_scan_general_flags_v2 - UMAC scan general flags version 2 * * The FW flags were reordered and hence the driver introduce version 2 * * @IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC: periodic or scheduled * @IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL: pass all probe responses and beacons * during scan iterations * @IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE: send complete notification * on every iteration instead of only once after the last iteration * @IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1: fragmented scan LMAC1 * @IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2: fragmented scan LMAC2 * @IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH: does this scan check for profile matching * @IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS: use all valid chains for RX * @IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL: works with adaptive dwell * for active channel * @IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE: can be preempted by other requests * @IWL_UMAC_SCAN_GEN_FLAGS_V2_NTF_START: send notification of scan start * @IWL_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID: matching on multiple SSIDs * @IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE: all the channels scanned * as passive * @IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN: at the end of 2.4GHz and * 5.2Ghz bands scan, trigger scan on 6GHz band to discover * the reported collocated APs * @IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN: at the end of 2.4GHz and 5GHz * bands scan, if not APs were discovered, allow scan to conitnue and scan * 6GHz PSC channels in order to discover country information. * @IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN: in case * &IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN is enabled and scan is * activated over 6GHz PSC channels, filter in beacons and probe responses. * @IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE: if set, send probe requests in a minimum * rate of 5.5Mpbs, filter in broadcast probe responses and set the max * channel time indication field in the FILS request parameters element * (if included by the driver in the probe request IEs). */ enum iwl_umac_scan_general_flags_v2 { IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC = BIT(0), IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL = BIT(1), IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE = BIT(2), IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1 = BIT(3), IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2 = BIT(4), IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH = BIT(5), IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS = BIT(6), IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL = BIT(7), IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE = BIT(8), IWL_UMAC_SCAN_GEN_FLAGS_V2_NTF_START = BIT(9), IWL_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID = BIT(10), IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE = BIT(11), IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN = BIT(12), IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN = BIT(13), IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN = BIT(14), IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE = BIT(15), }; /** * enum iwl_umac_scan_general_params_flags2 - UMAC scan general flags2 * * @IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB: scan event scheduling * should be aware of a P2P GO operation on the 2GHz band. * @IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB: scan event scheduling * should be aware of a P2P GO operation on the 5GHz or 6GHz band. + * @IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT: don't toggle between + * valid antennas, and use the same antenna as in previous scan */ enum iwl_umac_scan_general_params_flags2 { IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB = BIT(0), IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB = BIT(1), + IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT = BIT(2), }; /** * struct iwl_scan_channel_cfg_umac * @flags: bitmap - 0-19: directed scan to i'th ssid. * @channel_num: channel number 1-13 etc. * @band: band of channel: 0 for 2GHz, 1 for 5GHz * @iter_count: repetition count for the channel. * @iter_interval: interval between two scan iterations on one channel. */ struct iwl_scan_channel_cfg_umac { +#define IWL_CHAN_CFG_FLAGS_BAND_POS 30 __le32 flags; - /* Both versions are of the same size, so use a union without adjusting + + /* All versions are of the same size, so use a union without adjusting * the command size later */ union { struct { u8 channel_num; u8 iter_count; __le16 iter_interval; } v1; /* SCAN_CHANNEL_CONFIG_API_S_VER_1 */ struct { u8 channel_num; u8 band; u8 iter_count; u8 iter_interval; } v2; /* SCAN_CHANNEL_CONFIG_API_S_VER_2 * SCAN_CHANNEL_CONFIG_API_S_VER_3 * SCAN_CHANNEL_CONFIG_API_S_VER_4 */ + struct { + u8 channel_num; + u8 psd_20; + u8 iter_count; + u8 iter_interval; + } v5; /* SCAN_CHANNEL_CONFIG_API_S_VER_5 */ }; } __packed; /** * struct iwl_scan_umac_schedule * @interval: interval in seconds between scan iterations * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop * @reserved: for alignment and future use */ struct iwl_scan_umac_schedule { __le16 interval; u8 iter_count; u8 reserved; } __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */ struct iwl_scan_req_umac_tail_v1 { /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */ struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS]; __le16 delay; __le16 reserved; /* SCAN_PROBE_PARAMS_API_S_VER_1 */ struct iwl_scan_probe_req_v1 preq; struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; } __packed; /** * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command * parameters following channels configuration array. * @schedule: two scheduling plans. * @delay: delay in TUs before starting the first scan iteration * @reserved: for future use and alignment * @preq: probe request with IEs blocks * @direct_scan: list of SSIDs for directed active scan */ struct iwl_scan_req_umac_tail_v2 { /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */ struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS]; __le16 delay; __le16 reserved; /* SCAN_PROBE_PARAMS_API_S_VER_2 */ struct iwl_scan_probe_req preq; struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; } __packed; /** * struct iwl_scan_umac_chan_param * @flags: channel flags &enum iwl_scan_channel_flags * @count: num of channels in scan request * @reserved: for future use and alignment */ struct iwl_scan_umac_chan_param { u8 flags; u8 count; __le16 reserved; } __packed; /*SCAN_CHANNEL_PARAMS_API_S_VER_1 */ /** * struct iwl_scan_req_umac * @flags: &enum iwl_umac_scan_flags * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @general_flags: &enum iwl_umac_scan_general_flags * @scan_start_mac_id: report the scan start TSF time according to this mac TSF * @extended_dwell: dwell time for channels 1, 6 and 11 * @active_dwell: dwell time for active scan per LMAC * @passive_dwell: dwell time for passive scan per LMAC * @fragmented_dwell: dwell time for fragmented passive scan * @adwell_default_n_aps: for adaptive dwell the default number of APs * per channel * @adwell_default_n_aps_social: for adaptive dwell the default * number of APs per social (1,6,11) channel * @general_flags2: &enum iwl_umac_scan_general_flags2 * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added * to total scan time * @max_out_time: max out of serving channel time, per LMAC - for CDB there * are 2 LMACs * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs * @scan_priority: scan internal prioritization &enum iwl_scan_priority * @num_of_fragments: Number of fragments needed for full coverage per band. * Relevant only for fragmented scan. * @channel: &struct iwl_scan_umac_chan_param * @reserved: for future use and alignment * @reserved3: for future use and alignment * @data: &struct iwl_scan_channel_cfg_umac and * &struct iwl_scan_req_umac_tail */ struct iwl_scan_req_umac { __le32 flags; __le32 uid; __le32 ooc_priority; __le16 general_flags; u8 reserved; u8 scan_start_mac_id; union { struct { u8 extended_dwell; u8 active_dwell; u8 passive_dwell; u8 fragmented_dwell; __le32 max_out_time; __le32 suspend_time; __le32 scan_priority; struct iwl_scan_umac_chan_param channel; u8 data[]; } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ struct { u8 extended_dwell; u8 active_dwell; u8 passive_dwell; u8 fragmented_dwell; __le32 max_out_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; struct iwl_scan_umac_chan_param channel; u8 data[]; } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */ struct { u8 active_dwell; u8 passive_dwell; u8 fragmented_dwell; u8 adwell_default_n_aps; u8 adwell_default_n_aps_social; u8 reserved3; __le16 adwell_max_budget; __le32 max_out_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; struct iwl_scan_umac_chan_param channel; u8 data[]; } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */ struct { u8 active_dwell[SCAN_TWO_LMACS]; u8 reserved2; u8 adwell_default_n_aps; u8 adwell_default_n_aps_social; u8 general_flags2; __le16 adwell_max_budget; __le32 max_out_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; u8 passive_dwell[SCAN_TWO_LMACS]; u8 num_of_fragments[SCAN_TWO_LMACS]; struct iwl_scan_umac_chan_param channel; u8 data[]; } v8; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_8 */ struct { u8 active_dwell[SCAN_TWO_LMACS]; u8 adwell_default_hb_n_aps; u8 adwell_default_lb_n_aps; u8 adwell_default_n_aps_social; u8 general_flags2; __le16 adwell_max_budget; __le32 max_out_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; u8 passive_dwell[SCAN_TWO_LMACS]; u8 num_of_fragments[SCAN_TWO_LMACS]; struct iwl_scan_umac_chan_param channel; u8 data[]; } v9; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_9 */ }; } __packed; #define IWL_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwl_scan_req_umac) #define IWL_SCAN_REQ_UMAC_SIZE_V7 48 #define IWL_SCAN_REQ_UMAC_SIZE_V6 44 #define IWL_SCAN_REQ_UMAC_SIZE_V1 36 /** * struct iwl_scan_probe_params_v3 * @preq: scan probe request params * @ssid_num: number of valid SSIDs in direct scan array * @short_ssid_num: number of valid short SSIDs in short ssid array * @bssid_num: number of valid bssid in bssids array * @reserved: reserved * @direct_scan: list of ssids * @short_ssid: array of short ssids * @bssid_array: array of bssids */ struct iwl_scan_probe_params_v3 { struct iwl_scan_probe_req preq; u8 ssid_num; u8 short_ssid_num; u8 bssid_num; u8 reserved; struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE]; u8 bssid_array[SCAN_BSSID_MAX_SIZE][ETH_ALEN]; } __packed; /* SCAN_PROBE_PARAMS_API_S_VER_3 */ /** * struct iwl_scan_probe_params_v4 * @preq: scan probe request params * @short_ssid_num: number of valid short SSIDs in short ssid array * @bssid_num: number of valid bssid in bssids array * @reserved: reserved * @direct_scan: list of ssids * @short_ssid: array of short ssids * @bssid_array: array of bssids */ struct iwl_scan_probe_params_v4 { struct iwl_scan_probe_req preq; u8 short_ssid_num; u8 bssid_num; __le16 reserved; struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; __le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE]; u8 bssid_array[SCAN_BSSID_MAX_SIZE][ETH_ALEN]; } __packed; /* SCAN_PROBE_PARAMS_API_S_VER_4 */ #define SCAN_MAX_NUM_CHANS_V3 67 /** * struct iwl_scan_channel_params_v4 * @flags: channel flags &enum iwl_scan_channel_flags * @count: num of channels in scan request * @num_of_aps_override: override the number of APs the FW uses to calculate * dwell time when adaptive dwell is used * @reserved: for future use and alignment * @channel_config: array of explicit channel configurations * for 2.4Ghz and 5.2Ghz bands * @adwell_ch_override_bitmap: when using adaptive dwell, override the number * of APs value with &num_of_aps_override for the channel. * To cast channel to index, use &iwl_mvm_scan_ch_and_band_to_idx */ struct iwl_scan_channel_params_v4 { u8 flags; u8 count; u8 num_of_aps_override; u8 reserved; struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3]; u8 adwell_ch_override_bitmap[16]; } __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_4 also SCAN_CHANNEL_PARAMS_API_S_VER_5 */ /** - * struct iwl_scan_channel_params_v6 + * struct iwl_scan_channel_params_v7 * @flags: channel flags &enum iwl_scan_channel_flags * @count: num of channels in scan request * @n_aps_override: override the number of APs the FW uses to calculate dwell * time when adaptive dwell is used. * Channel k will use n_aps_override[i] when BIT(20 + i) is set in * channel_config[k].flags * @channel_config: array of explicit channel configurations * for 2.4Ghz and 5.2Ghz bands */ -struct iwl_scan_channel_params_v6 { +struct iwl_scan_channel_params_v7 { u8 flags; u8 count; u8 n_aps_override[2]; struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3]; } __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_6 */ /** * struct iwl_scan_general_params_v11 * @flags: &enum iwl_umac_scan_general_flags_v2 * @reserved: reserved for future - * @scan_start_mac_id: report the scan start TSF time according to this mac TSF + * @scan_start_mac_or_link_id: report the scan start TSF time according to this + * mac (up to verion 11) or link (starting with version 12) TSF * @active_dwell: dwell time for active scan per LMAC * @adwell_default_2g: adaptive dwell default number of APs * for 2.4GHz channel * @adwell_default_5g: adaptive dwell default number of APs * for 5GHz channels * @adwell_default_social_chn: adaptive dwell default number of * APs per social channel * @flags2: for version 11 see &enum iwl_umac_scan_general_params_flags2. * Otherwise reserved. * @adwell_max_budget: the maximal number of TUs that adaptive dwell * can add to the total scan time * @max_out_of_time: max out of serving channel time, per LMAC * @suspend_time: max suspend time, per LMAC * @scan_priority: priority of the request * @passive_dwell: continues dwell time for passive channel * (without adaptive dwell) * @num_of_fragments: number of fragments needed for full fragmented * scan coverage. */ struct iwl_scan_general_params_v11 { __le16 flags; u8 reserved; - u8 scan_start_mac_id; + u8 scan_start_mac_or_link_id; u8 active_dwell[SCAN_TWO_LMACS]; u8 adwell_default_2g; u8 adwell_default_5g; u8 adwell_default_social_chn; u8 flags2; __le16 adwell_max_budget; __le32 max_out_of_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; u8 passive_dwell[SCAN_TWO_LMACS]; u8 num_of_fragments[SCAN_TWO_LMACS]; -} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_11 and *_VER_10 */ +} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_12, *_VER_11 and *_VER_10 */ /** * struct iwl_scan_periodic_parms_v1 * @schedule: can scheduling parameter * @delay: initial delay of the periodic scan in seconds * @reserved: reserved for future */ struct iwl_scan_periodic_parms_v1 { struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS]; __le16 delay; __le16 reserved; } __packed; /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */ /** * struct iwl_scan_req_params_v12 * @general_params: &struct iwl_scan_general_params_v11 * @channel_params: &struct iwl_scan_channel_params_v4 * @periodic_params: &struct iwl_scan_periodic_parms_v1 * @probe_params: &struct iwl_scan_probe_params_v3 */ struct iwl_scan_req_params_v12 { struct iwl_scan_general_params_v11 general_params; struct iwl_scan_channel_params_v4 channel_params; struct iwl_scan_periodic_parms_v1 periodic_params; struct iwl_scan_probe_params_v3 probe_params; } __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */ /** - * struct iwl_scan_req_params_v15 + * struct iwl_scan_req_params_v16 * @general_params: &struct iwl_scan_general_params_v11 - * @channel_params: &struct iwl_scan_channel_params_v6 + * @channel_params: &struct iwl_scan_channel_params_v7 * @periodic_params: &struct iwl_scan_periodic_parms_v1 * @probe_params: &struct iwl_scan_probe_params_v4 */ -struct iwl_scan_req_params_v15 { +struct iwl_scan_req_params_v17 { struct iwl_scan_general_params_v11 general_params; - struct iwl_scan_channel_params_v6 channel_params; + struct iwl_scan_channel_params_v7 channel_params; struct iwl_scan_periodic_parms_v1 periodic_params; struct iwl_scan_probe_params_v4 probe_params; -} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_15 and *_VER_14 */ +} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_17 - 14 */ /** * struct iwl_scan_req_umac_v12 * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @scan_params: scan parameters */ struct iwl_scan_req_umac_v12 { __le32 uid; __le32 ooc_priority; struct iwl_scan_req_params_v12 scan_params; } __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */ /** - * struct iwl_scan_req_umac_v15 + * struct iwl_scan_req_umac_v16 * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @scan_params: scan parameters */ -struct iwl_scan_req_umac_v15 { +struct iwl_scan_req_umac_v17 { __le32 uid; __le32 ooc_priority; - struct iwl_scan_req_params_v15 scan_params; -} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_15 and *_VER_14 */ + struct iwl_scan_req_params_v17 scan_params; +} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_17 - 14 */ /** * struct iwl_umac_scan_abort * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @flags: reserved */ struct iwl_umac_scan_abort { __le32 uid; __le32 flags; } __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */ /** * struct iwl_umac_scan_complete * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @last_schedule: last scheduling line * @last_iter: last scan iteration number * @status: &enum iwl_scan_offload_complete_status * @ebs_status: &enum iwl_scan_ebs_status * @time_from_last_iter: time elapsed from last iteration * @reserved: for future use */ struct iwl_umac_scan_complete { __le32 uid; u8 last_schedule; u8 last_iter; u8 status; u8 ebs_status; __le32 time_from_last_iter; __le32 reserved; } __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */ #define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 5 #define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 7 /** * struct iwl_scan_offload_profile_match_v1 - match information * @bssid: matched bssid * @reserved: reserved * @channel: channel where the match occurred * @energy: energy * @matching_feature: feature matches * @matching_channels: bitmap of channels that matched, referencing * the channels passed in the scan offload request. */ struct iwl_scan_offload_profile_match_v1 { u8 bssid[ETH_ALEN]; __le16 reserved; u8 channel; u8 energy; u8 matching_feature; u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1]; } __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */ /** * struct iwl_scan_offload_profiles_query_v1 - match results query response * @matched_profiles: bitmap of matched profiles, referencing the * matches passed in the scan offload request * @last_scan_age: age of the last offloaded scan * @n_scans_done: number of offloaded scans done * @gp2_d0u: GP2 when D0U occurred * @gp2_invoked: GP2 when scan offload was invoked * @resume_while_scanning: not used * @self_recovery: obsolete * @reserved: reserved * @matches: array of match information, one for each match */ struct iwl_scan_offload_profiles_query_v1 { __le32 matched_profiles; __le32 last_scan_age; __le32 n_scans_done; __le32 gp2_d0u; __le32 gp2_invoked; u8 resume_while_scanning; u8 self_recovery; __le16 reserved; - struct iwl_scan_offload_profile_match_v1 matches[0]; + struct iwl_scan_offload_profile_match_v1 matches[]; } __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */ /** * struct iwl_scan_offload_profile_match - match information * @bssid: matched bssid * @reserved: reserved * @channel: channel where the match occurred * @energy: energy * @matching_feature: feature matches * @matching_channels: bitmap of channels that matched, referencing * the channels passed in the scan offload request. */ struct iwl_scan_offload_profile_match { u8 bssid[ETH_ALEN]; __le16 reserved; u8 channel; u8 energy; u8 matching_feature; u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN]; } __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_2 */ /** - * struct iwl_scan_offload_profiles_query - match results query response + * struct iwl_scan_offload_match_info - match results information * @matched_profiles: bitmap of matched profiles, referencing the * matches passed in the scan offload request * @last_scan_age: age of the last offloaded scan * @n_scans_done: number of offloaded scans done * @gp2_d0u: GP2 when D0U occurred * @gp2_invoked: GP2 when scan offload was invoked * @resume_while_scanning: not used * @self_recovery: obsolete * @reserved: reserved * @matches: array of match information, one for each match */ -struct iwl_scan_offload_profiles_query { +struct iwl_scan_offload_match_info { __le32 matched_profiles; __le32 last_scan_age; __le32 n_scans_done; __le32 gp2_d0u; __le32 gp2_invoked; u8 resume_while_scanning; u8 self_recovery; __le16 reserved; - struct iwl_scan_offload_profile_match matches[0]; -} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 */ + struct iwl_scan_offload_profile_match matches[]; +} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 and + * SCAN_OFFLOAD_MATCH_INFO_NOTIFICATION_S_VER_1 + */ /** * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @scanned_channels: number of channels scanned and number of valid elements in * results array * @status: one of SCAN_COMP_STATUS_* * @bt_status: BT on/off status * @last_channel: last channel that was scanned * @start_tsf: TSF timer in usecs of the scan start time for the mac specified * in &struct iwl_scan_req_umac. * @results: array of scan results, length in @scanned_channels */ struct iwl_umac_scan_iter_complete_notif { __le32 uid; u8 scanned_channels; u8 status; u8 bt_status; u8 last_channel; __le64 start_tsf; struct iwl_scan_results_notif results[]; } __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */ #endif /* __iwl_fw_api_scan_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/sta.h b/sys/contrib/dev/iwlwifi/fw/api/sta.h index 5edbe27c0922..d62fed543276 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/sta.h +++ b/sys/contrib/dev/iwlwifi/fw/api/sta.h @@ -1,494 +1,494 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_sta_h__ #define __iwl_fw_api_sta_h__ /** * enum iwl_sta_flags - flags for the ADD_STA host command * @STA_FLG_REDUCED_TX_PWR_CTRL: reduced TX power (control frames) * @STA_FLG_REDUCED_TX_PWR_DATA: reduced TX power (data frames) * @STA_FLG_DISABLE_TX: set if TX should be disabled * @STA_FLG_PS: set if STA is in Power Save * @STA_FLG_DRAIN_FLOW: drain flow * @STA_FLG_PAN: STA is for PAN interface * @STA_FLG_CLASS_AUTH: station is authenticated * @STA_FLG_CLASS_ASSOC: station is associated * @STA_FLG_RTS_MIMO_PROT: station requires RTS MIMO protection (dynamic SMPS) * @STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU (mask) * @STA_FLG_MAX_AGG_SIZE_SHIFT: maximal size for A-MPDU (bit shift) * @STA_FLG_MAX_AGG_SIZE_8K: maximal size for A-MPDU (8k supported) * @STA_FLG_MAX_AGG_SIZE_16K: maximal size for A-MPDU (16k supported) * @STA_FLG_MAX_AGG_SIZE_32K: maximal size for A-MPDU (32k supported) * @STA_FLG_MAX_AGG_SIZE_64K: maximal size for A-MPDU (64k supported) * @STA_FLG_MAX_AGG_SIZE_128K: maximal size for A-MPDU (128k supported) * @STA_FLG_MAX_AGG_SIZE_256K: maximal size for A-MPDU (256k supported) * @STA_FLG_MAX_AGG_SIZE_512K: maximal size for A-MPDU (512k supported) * @STA_FLG_MAX_AGG_SIZE_1024K: maximal size for A-MPDU (1024k supported) * @STA_FLG_MAX_AGG_SIZE_2M: maximal size for A-MPDU (2M supported) * @STA_FLG_MAX_AGG_SIZE_4M: maximal size for A-MPDU (4M supported) * @STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation * @STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is * initialised by driver and can be updated by fw upon reception of * action frames that can change the channel width. When cleared the fw * will send all the frames in 20MHz even when FAT channel is requested. * @STA_FLG_FAT_EN_20MHZ: no wide channels are supported, only 20 MHz * @STA_FLG_FAT_EN_40MHZ: wide channels up to 40 MHz supported * @STA_FLG_FAT_EN_80MHZ: wide channels up to 80 MHz supported * @STA_FLG_FAT_EN_160MHZ: wide channels up to 160 MHz supported * @STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the * driver and can be updated by fw upon reception of action frames. * @STA_FLG_MIMO_EN_SISO: no support for MIMO * @STA_FLG_MIMO_EN_MIMO2: 2 streams supported * @STA_FLG_MIMO_EN_MIMO3: 3 streams supported * @STA_FLG_AGG_MPDU_DENS_MSK: A-MPDU density (mask) * @STA_FLG_AGG_MPDU_DENS_SHIFT: A-MPDU density (bit shift) * @STA_FLG_AGG_MPDU_DENS_2US: A-MPDU density (2 usec gap) * @STA_FLG_AGG_MPDU_DENS_4US: A-MPDU density (4 usec gap) * @STA_FLG_AGG_MPDU_DENS_8US: A-MPDU density (8 usec gap) * @STA_FLG_AGG_MPDU_DENS_16US: A-MPDU density (16 usec gap) */ enum iwl_sta_flags { STA_FLG_REDUCED_TX_PWR_CTRL = BIT(3), STA_FLG_REDUCED_TX_PWR_DATA = BIT(6), STA_FLG_DISABLE_TX = BIT(4), STA_FLG_PS = BIT(8), STA_FLG_DRAIN_FLOW = BIT(12), STA_FLG_PAN = BIT(13), STA_FLG_CLASS_AUTH = BIT(14), STA_FLG_CLASS_ASSOC = BIT(15), STA_FLG_RTS_MIMO_PROT = BIT(17), STA_FLG_MAX_AGG_SIZE_SHIFT = 19, STA_FLG_MAX_AGG_SIZE_8K = (0 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_16K = (1 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_32K = (2 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_64K = (3 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_128K = (4 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_256K = (5 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_512K = (6 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_1024K = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_2M = (8 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_4M = (9 << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_MAX_AGG_SIZE_MSK = (0xf << STA_FLG_MAX_AGG_SIZE_SHIFT), STA_FLG_AGG_MPDU_DENS_SHIFT = 23, STA_FLG_AGG_MPDU_DENS_2US = (4 << STA_FLG_AGG_MPDU_DENS_SHIFT), STA_FLG_AGG_MPDU_DENS_4US = (5 << STA_FLG_AGG_MPDU_DENS_SHIFT), STA_FLG_AGG_MPDU_DENS_8US = (6 << STA_FLG_AGG_MPDU_DENS_SHIFT), STA_FLG_AGG_MPDU_DENS_16US = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT), STA_FLG_AGG_MPDU_DENS_MSK = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT), STA_FLG_FAT_EN_20MHZ = (0 << 26), STA_FLG_FAT_EN_40MHZ = (1 << 26), STA_FLG_FAT_EN_80MHZ = (2 << 26), STA_FLG_FAT_EN_160MHZ = (3 << 26), STA_FLG_FAT_EN_MSK = (3 << 26), STA_FLG_MIMO_EN_SISO = (0 << 28), STA_FLG_MIMO_EN_MIMO2 = (1 << 28), STA_FLG_MIMO_EN_MIMO3 = (2 << 28), STA_FLG_MIMO_EN_MSK = (3 << 28), }; /** * enum iwl_sta_key_flag - key flags for the ADD_STA host command * @STA_KEY_FLG_NO_ENC: no encryption * @STA_KEY_FLG_WEP: WEP encryption algorithm * @STA_KEY_FLG_CCM: CCMP encryption algorithm * @STA_KEY_FLG_TKIP: TKIP encryption algorithm * @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support) * @STA_KEY_FLG_GCMP: GCMP encryption algorithm * @STA_KEY_FLG_CMAC: CMAC encryption algorithm * @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm * @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value * @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from * station info array (1 - n 1X mode) * @STA_KEY_FLG_KEYID_MSK: the index of the key * @STA_KEY_FLG_KEYID_POS: key index bit position * @STA_KEY_NOT_VALID: key is invalid * @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key * @STA_KEY_FLG_KEY_32BYTES: for non-wep key set for 32 bytes key * @STA_KEY_MULTICAST: set for multical key * @STA_KEY_MFP: key is used for Management Frame Protection */ enum iwl_sta_key_flag { STA_KEY_FLG_NO_ENC = (0 << 0), STA_KEY_FLG_WEP = (1 << 0), STA_KEY_FLG_CCM = (2 << 0), STA_KEY_FLG_TKIP = (3 << 0), STA_KEY_FLG_EXT = (4 << 0), STA_KEY_FLG_GCMP = (5 << 0), STA_KEY_FLG_CMAC = (6 << 0), STA_KEY_FLG_ENC_UNKNOWN = (7 << 0), STA_KEY_FLG_EN_MSK = (7 << 0), STA_KEY_FLG_WEP_KEY_MAP = BIT(3), STA_KEY_FLG_KEYID_POS = 8, STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS), STA_KEY_NOT_VALID = BIT(11), STA_KEY_FLG_WEP_13BYTES = BIT(12), STA_KEY_FLG_KEY_32BYTES = BIT(12), STA_KEY_MULTICAST = BIT(14), STA_KEY_MFP = BIT(15), }; /** * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed * @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_acs * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count * @STA_MODIFY_PROT_TH: modify RTS threshold * @STA_MODIFY_QUEUES: modify the queues used by this station */ enum iwl_sta_modify_flag { STA_MODIFY_QUEUE_REMOVAL = BIT(0), STA_MODIFY_TID_DISABLE_TX = BIT(1), STA_MODIFY_UAPSD_ACS = BIT(2), STA_MODIFY_ADD_BA_TID = BIT(3), STA_MODIFY_REMOVE_BA_TID = BIT(4), STA_MODIFY_SLEEPING_STA_TX_COUNT = BIT(5), STA_MODIFY_PROT_TH = BIT(6), STA_MODIFY_QUEUES = BIT(7), }; /** * enum iwl_sta_mode - station command mode * @STA_MODE_ADD: add new station * @STA_MODE_MODIFY: modify the station */ enum iwl_sta_mode { STA_MODE_ADD = 0, STA_MODE_MODIFY = 1, }; /** * enum iwl_sta_sleep_flag - type of sleep of the station * @STA_SLEEP_STATE_AWAKE: station is awake * @STA_SLEEP_STATE_PS_POLL: station is PS-polling * @STA_SLEEP_STATE_UAPSD: station uses U-APSD * @STA_SLEEP_STATE_MOREDATA: set more-data bit on * (last) released frame */ enum iwl_sta_sleep_flag { STA_SLEEP_STATE_AWAKE = 0, STA_SLEEP_STATE_PS_POLL = BIT(0), STA_SLEEP_STATE_UAPSD = BIT(1), STA_SLEEP_STATE_MOREDATA = BIT(2), }; #define STA_KEY_MAX_NUM (16) #define STA_KEY_IDX_INVALID (0xff) #define STA_KEY_MAX_DATA_KEY_NUM (4) #define IWL_MAX_GLOBAL_KEYS (4) #define STA_KEY_LEN_WEP40 (5) #define STA_KEY_LEN_WEP104 (13) #define IWL_ADD_STA_STATUS_MASK 0xFF #define IWL_ADD_STA_BAID_VALID_MASK 0x8000 #define IWL_ADD_STA_BAID_MASK 0x7F00 #define IWL_ADD_STA_BAID_SHIFT 8 /** * struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table. * ( REPLY_ADD_STA = 0x18 ) * @add_modify: see &enum iwl_sta_mode * @awake_acs: ACs to transmit data on while station is sleeping (for U-APSD) * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. * @mac_id_n_color: the Mac context this station belongs to, * see &enum iwl_ctxt_id_and_color * @addr: station's MAC address * @reserved2: reserved * @sta_id: index of station in uCode's station table * @modify_mask: from &enum iwl_sta_modify_flag, selects what to change * @reserved3: reserved * @station_flags: look at &enum iwl_sta_flags * @station_flags_msk: what of %station_flags have changed, * also &enum iwl_sta_flags * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set * add_immediate_ba_ssn. * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx) * Set %STA_MODIFY_REMOVE_BA_TID to use this field * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with * add_immediate_ba_tid. * @sleep_tx_count: number of packets to transmit to station even though it is * asleep. Used to synchronise PS-poll and u-APSD responses while ucode * keeps track of STA sleep state. * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag. * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP * mac-addr. * @beamform_flags: beam forming controls * @tfd_queue_msk: tfd queues used by this station * * The device contains an internal table of per-station information, with info * on security keys, aggregation parameters, and Tx rates for initial Tx * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD). * * ADD_STA sets up the table entry for one station, either creating a new * entry, or modifying a pre-existing one. */ struct iwl_mvm_add_sta_cmd_v7 { u8 add_modify; u8 awake_acs; __le16 tid_disable_tx; __le32 mac_id_n_color; u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */ __le16 reserved2; u8 sta_id; u8 modify_mask; __le16 reserved3; __le32 station_flags; __le32 station_flags_msk; u8 add_immediate_ba_tid; u8 remove_immediate_ba_tid; __le16 add_immediate_ba_ssn; __le16 sleep_tx_count; __le16 sleep_state_flags; __le16 assoc_id; __le16 beamform_flags; __le32 tfd_queue_msk; } __packed; /* ADD_STA_CMD_API_S_VER_7 */ /** * enum iwl_sta_type - FW station types * ( REPLY_ADD_STA = 0x18 ) * @IWL_STA_LINK: Link station - normal RX and TX traffic. * @IWL_STA_GENERAL_PURPOSE: General purpose. In AP mode used for beacons * and probe responses. * @IWL_STA_MULTICAST: multicast traffic, * @IWL_STA_TDLS_LINK: TDLS link station * @IWL_STA_AUX_ACTIVITY: auxilary station (scan, ROC and so on). */ enum iwl_sta_type { IWL_STA_LINK, IWL_STA_GENERAL_PURPOSE, IWL_STA_MULTICAST, IWL_STA_TDLS_LINK, IWL_STA_AUX_ACTIVITY, }; /** * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table. * ( REPLY_ADD_STA = 0x18 ) * @add_modify: see &enum iwl_sta_mode * @awake_acs: ACs to transmit data on while station is sleeping (for U-APSD) * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. * @mac_id_n_color: the Mac context this station belongs to, * see &enum iwl_ctxt_id_and_color * @addr: station's MAC address * @reserved2: reserved * @sta_id: index of station in uCode's station table * @modify_mask: from &enum iwl_sta_modify_flag, selects what to change * @reserved3: reserved * @station_flags: look at &enum iwl_sta_flags * @station_flags_msk: what of %station_flags have changed, * also &enum iwl_sta_flags * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set * add_immediate_ba_ssn. * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx) * Set %STA_MODIFY_REMOVE_BA_TID to use this field * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with * add_immediate_ba_tid. * @sleep_tx_count: number of packets to transmit to station even though it is * asleep. Used to synchronise PS-poll and u-APSD responses while ucode * keeps track of STA sleep state. * @station_type: type of this station. See &enum iwl_sta_type. * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag. * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP * mac-addr. * @beamform_flags: beam forming controls * @tfd_queue_msk: tfd queues used by this station. * Obselete for new TX API (9 and above). * @rx_ba_window: aggregation window size * @sp_length: the size of the SP in actual number of frames * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver * enabled ACs. * * The device contains an internal table of per-station information, with info * on security keys, aggregation parameters, and Tx rates for initial Tx * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD). * * ADD_STA sets up the table entry for one station, either creating a new * entry, or modifying a pre-existing one. */ struct iwl_mvm_add_sta_cmd { u8 add_modify; u8 awake_acs; __le16 tid_disable_tx; __le32 mac_id_n_color; /* can be used for lmac id when using cmd v12 */ u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */ __le16 reserved2; u8 sta_id; u8 modify_mask; __le16 reserved3; __le32 station_flags; __le32 station_flags_msk; u8 add_immediate_ba_tid; u8 remove_immediate_ba_tid; __le16 add_immediate_ba_ssn; __le16 sleep_tx_count; u8 sleep_state_flags; u8 station_type; __le16 assoc_id; __le16 beamform_flags; __le32 tfd_queue_msk; __le16 rx_ba_window; u8 sp_length; u8 uapsd_acs; } __packed; /* ADD_STA_CMD_API_S_VER_10 */ /** * struct iwl_mvm_add_sta_key_common - add/modify sta key common part * ( REPLY_ADD_STA_KEY = 0x17 ) * @sta_id: index of station in uCode's station table * @key_offset: key offset in key storage * @key_flags: type &enum iwl_sta_key_flag * @key: key material data * @rx_secur_seq_cnt: RX security sequence counter for the key */ struct iwl_mvm_add_sta_key_common { u8 sta_id; u8 key_offset; __le16 key_flags; u8 key[32]; u8 rx_secur_seq_cnt[16]; } __packed; /** * struct iwl_mvm_add_sta_key_cmd_v1 - add/modify sta key * @common: see &struct iwl_mvm_add_sta_key_common * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection * @reserved: reserved * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx */ struct iwl_mvm_add_sta_key_cmd_v1 { struct iwl_mvm_add_sta_key_common common; u8 tkip_rx_tsc_byte2; u8 reserved; __le16 tkip_rx_ttak[5]; } __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */ /** * struct iwl_mvm_add_sta_key_cmd - add/modify sta key * @common: see &struct iwl_mvm_add_sta_key_common * @rx_mic_key: TKIP RX unicast or multicast key * @tx_mic_key: TKIP TX key * @transmit_seq_cnt: TSC, transmit packet number * * Note: This is used for both v2 and v3, the difference being * in the way the common.rx_secur_seq_cnt is used, in v2 that's * the strange hole format, in v3 it's just a u64. */ struct iwl_mvm_add_sta_key_cmd { struct iwl_mvm_add_sta_key_common common; __le64 rx_mic_key; __le64 tx_mic_key; __le64 transmit_seq_cnt; } __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2, ADD_MODIFY_STA_KEY_API_S_VER_3 */ /** * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command * @ADD_STA_SUCCESS: operation was executed successfully * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table * @ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session * @ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station that * doesn't exist. */ enum iwl_mvm_add_sta_rsp_status { ADD_STA_SUCCESS = 0x1, ADD_STA_STATIONS_OVERLOAD = 0x2, ADD_STA_IMMEDIATE_BA_FAILURE = 0x4, ADD_STA_MODIFY_NON_EXISTING_STA = 0x8, }; /** * struct iwl_mvm_rm_sta_cmd - Add / modify a station in the fw's station table * ( REMOVE_STA = 0x19 ) * @sta_id: the station id of the station to be removed * @reserved: reserved */ struct iwl_mvm_rm_sta_cmd { u8 sta_id; u8 reserved[3]; } __packed; /* REMOVE_STA_CMD_API_S_VER_2 */ /** * struct iwl_mvm_mgmt_mcast_key_cmd_v1 * ( MGMT_MCAST_KEY = 0x1f ) * @ctrl_flags: &enum iwl_sta_key_flag * @igtk: IGTK key material * @k1: unused * @k2: unused * @sta_id: station ID that support IGTK * @key_id: key ID * @receive_seq_cnt: initial RSC/PN needed for replay check */ struct iwl_mvm_mgmt_mcast_key_cmd_v1 { __le32 ctrl_flags; u8 igtk[16]; u8 k1[16]; u8 k2[16]; __le32 key_id; __le32 sta_id; __le64 receive_seq_cnt; } __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */ /** * struct iwl_mvm_mgmt_mcast_key_cmd * ( MGMT_MCAST_KEY = 0x1f ) * @ctrl_flags: &enum iwl_sta_key_flag * @igtk: IGTK master key * @sta_id: station ID that support IGTK * @key_id: key ID * @receive_seq_cnt: initial RSC/PN needed for replay check */ struct iwl_mvm_mgmt_mcast_key_cmd { __le32 ctrl_flags; u8 igtk[32]; __le32 key_id; __le32 sta_id; __le64 receive_seq_cnt; } __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_2 */ struct iwl_mvm_wep_key { u8 key_index; u8 key_offset; __le16 reserved1; u8 key_size; u8 reserved2[3]; u8 key[16]; } __packed; struct iwl_mvm_wep_key_cmd { __le32 mac_id_n_color; u8 num_keys; u8 decryption_type; u8 flags; u8 reserved; - struct iwl_mvm_wep_key wep_key[0]; + struct iwl_mvm_wep_key wep_key[]; } __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */ /** * struct iwl_mvm_eosp_notification - EOSP notification from firmware * @remain_frame_count: # of frames remaining, non-zero if SP was cut * short by GO absence * @sta_id: station ID */ struct iwl_mvm_eosp_notification { __le32 remain_frame_count; __le32 sta_id; } __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */ #endif /* __iwl_fw_api_sta_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/tdls.h b/sys/contrib/dev/iwlwifi/fw/api/tdls.h index 14d35000abed..893438aadab0 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/tdls.h +++ b/sys/contrib/dev/iwlwifi/fw/api/tdls.h @@ -1,161 +1,161 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2012-2014, 2018 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_tdls_h__ #define __iwl_fw_api_tdls_h__ #include "fw/api/tx.h" #include "fw/api/phy-ctxt.h" #define IWL_MVM_TDLS_STA_COUNT 4 /* Type of TDLS request */ enum iwl_tdls_channel_switch_type { TDLS_SEND_CHAN_SW_REQ = 0, TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH, TDLS_MOVE_CH, }; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */ /** * struct iwl_tdls_channel_switch_timing - Switch timing in TDLS channel-switch * @frame_timestamp: GP2 timestamp of channel-switch request/response packet * received from peer * @max_offchan_duration: What amount of microseconds out of a DTIM is given * to the TDLS off-channel communication. For instance if the DTIM is * 200TU and the TDLS peer is to be given 25% of the time, the value * given will be 50TU, or 50 * 1024 if translated into microseconds. * @switch_time: switch time the peer sent in its channel switch timing IE * @switch_timeout: switch timeout the peer sent in its channel switch timing IE */ struct iwl_tdls_channel_switch_timing { __le32 frame_timestamp; /* GP2 time of peer packet Rx */ __le32 max_offchan_duration; /* given in micro-seconds */ __le32 switch_time; /* given in micro-seconds */ __le32 switch_timeout; /* given in micro-seconds */ } __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */ #define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200 /** * struct iwl_tdls_channel_switch_frame - TDLS channel switch frame template * * A template representing a TDLS channel-switch request or response frame * * @switch_time_offset: offset to the channel switch timing IE in the template * @tx_cmd: Tx parameters for the frame * @data: frame data */ struct iwl_tdls_channel_switch_frame { __le32 switch_time_offset; struct iwl_tx_cmd tx_cmd; u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE]; } __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */ /** * struct iwl_tdls_channel_switch_cmd_tail - tail of iwl_tdls_channel_switch_cmd * * @timing: timing related data for command * @frame: channel-switch request/response template, depending to switch_type */ struct iwl_tdls_channel_switch_cmd_tail { struct iwl_tdls_channel_switch_timing timing; struct iwl_tdls_channel_switch_frame frame; } __packed; /** * struct iwl_tdls_channel_switch_cmd - TDLS channel switch command * * The command is sent to initiate a channel switch and also in response to * incoming TDLS channel-switch request/response packets from remote peers. * * @switch_type: see &enum iwl_tdls_channel_switch_type * @peer_sta_id: station id of TDLS peer * @ci: channel we switch to * @tail: command tail */ struct iwl_tdls_channel_switch_cmd { u8 switch_type; __le32 peer_sta_id; struct iwl_fw_channel_info ci; struct iwl_tdls_channel_switch_cmd_tail tail; } __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */ /** * struct iwl_tdls_channel_switch_notif - TDLS channel switch start notification * * @status: non-zero on success * @offchannel_duration: duration given in microseconds * @sta_id: peer currently performing the channel-switch with */ struct iwl_tdls_channel_switch_notif { __le32 status; __le32 offchannel_duration; __le32 sta_id; } __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */ /** * struct iwl_tdls_sta_info - TDLS station info * * @sta_id: station id of the TDLS peer * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise */ struct iwl_tdls_sta_info { u8 sta_id; u8 tx_to_peer_tid; __le16 tx_to_peer_ssn; __le32 is_initiator; } __packed; /* TDLS_STA_INFO_VER_1 */ /** * struct iwl_tdls_config_cmd - TDLS basic config command * * @id_and_color: MAC id and color being configured * @tdls_peer_count: amount of currently connected TDLS peers * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP * @sta_info: per-station info. Only the first tdls_peer_count entries are set * @pti_req_data_offset: offset of network-level data for the PTI template * @pti_req_tx_cmd: Tx parameters for PTI request template * @pti_req_template: PTI request template data */ struct iwl_tdls_config_cmd { __le32 id_and_color; /* mac id and color */ u8 tdls_peer_count; u8 tx_to_ap_tid; __le16 tx_to_ap_ssn; struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT]; __le32 pti_req_data_offset; struct iwl_tx_cmd pti_req_tx_cmd; - u8 pti_req_template[0]; + u8 pti_req_template[]; } __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */ /** * struct iwl_tdls_config_sta_info_res - TDLS per-station config information * * @sta_id: station id of the TDLS peer * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to * the peer */ struct iwl_tdls_config_sta_info_res { __le16 sta_id; __le16 tx_to_peer_last_seq; } __packed; /* TDLS_STA_INFO_RSP_VER_1 */ /** * struct iwl_tdls_config_res - TDLS config information from FW * * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP * @sta_info: per-station TDLS config information */ struct iwl_tdls_config_res { __le32 tx_to_ap_last_seq; struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT]; } __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */ #endif /* __iwl_fw_api_tdls_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/time-event.h b/sys/contrib/dev/iwlwifi/fw/api/time-event.h index 904cd78a9fa0..7cc706731d70 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/time-event.h +++ b/sys/contrib/dev/iwlwifi/fw/api/time-event.h @@ -1,418 +1,419 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2020, 2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_time_event_h__ #define __iwl_fw_api_time_event_h__ #include "fw/api/phy-ctxt.h" /* Time Event types, according to MAC type */ enum iwl_time_event_type { /* BSS Station Events */ TE_BSS_STA_AGGRESSIVE_ASSOC, TE_BSS_STA_ASSOC, TE_BSS_EAP_DHCP_PROT, TE_BSS_QUIET_PERIOD, /* P2P Device Events */ TE_P2P_DEVICE_DISCOVERABLE, TE_P2P_DEVICE_LISTEN, TE_P2P_DEVICE_ACTION_SCAN, TE_P2P_DEVICE_FULL_SCAN, /* P2P Client Events */ TE_P2P_CLIENT_AGGRESSIVE_ASSOC, TE_P2P_CLIENT_ASSOC, TE_P2P_CLIENT_QUIET_PERIOD, /* P2P GO Events */ TE_P2P_GO_ASSOC_PROT, TE_P2P_GO_REPETITIVET_NOA, TE_P2P_GO_CT_WINDOW, /* WiDi Sync Events */ TE_WIDI_TX_SYNC, /* Channel Switch NoA */ TE_CHANNEL_SWITCH_PERIOD, TE_MAX }; /* MAC_EVENT_TYPE_API_E_VER_1 */ /* Time event - defines for command API v1 */ /* * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed. * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only * the first fragment is scheduled. * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only * the first 2 fragments are scheduled. * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any * number of fragments are valid. * * Other than the constant defined above, specifying a fragmentation value 'x' * means that the event can be fragmented but only the first 'x' will be * scheduled. */ enum { TE_V1_FRAG_NONE = 0, TE_V1_FRAG_SINGLE = 1, TE_V1_FRAG_DUAL = 2, TE_V1_FRAG_ENDLESS = 0xffffffff }; /* If a Time Event can be fragmented, this is the max number of fragments */ #define TE_V1_FRAG_MAX_MSK 0x0fffffff /* Repeat the time event endlessly (until removed) */ #define TE_V1_REPEAT_ENDLESS 0xffffffff /* If a Time Event has bounded repetitions, this is the maximal value */ #define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff /* Time Event dependencies: none, on another TE, or in a specific time */ enum { TE_V1_INDEPENDENT = 0, TE_V1_DEP_OTHER = BIT(0), TE_V1_DEP_TSF = BIT(1), TE_V1_EVENT_SOCIOPATHIC = BIT(2), }; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */ /* * @TE_V1_NOTIF_NONE: no notifications * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use. * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use. * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use. * * Supported Time event notifications configuration. * A notification (both event and fragment) includes a status indicating weather * the FW was able to schedule the event or not. For fragment start/end * notification the status is always success. There is no start/end fragment * notification for monolithic events. */ enum { TE_V1_NOTIF_NONE = 0, TE_V1_NOTIF_HOST_EVENT_START = BIT(0), TE_V1_NOTIF_HOST_EVENT_END = BIT(1), TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2), TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3), TE_V1_NOTIF_HOST_FRAG_START = BIT(4), TE_V1_NOTIF_HOST_FRAG_END = BIT(5), TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6), TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7), }; /* MAC_EVENT_ACTION_API_E_VER_2 */ /* Time event - defines for command API */ /* * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed. * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only * the first fragment is scheduled. * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only * the first 2 fragments are scheduled. * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any * number of fragments are valid. * * Other than the constant defined above, specifying a fragmentation value 'x' * means that the event can be fragmented but only the first 'x' will be * scheduled. */ enum { TE_V2_FRAG_NONE = 0, TE_V2_FRAG_SINGLE = 1, TE_V2_FRAG_DUAL = 2, TE_V2_FRAG_MAX = 0xfe, TE_V2_FRAG_ENDLESS = 0xff }; /* Repeat the time event endlessly (until removed) */ #define TE_V2_REPEAT_ENDLESS 0xff /* If a Time Event has bounded repetitions, this is the maximal value */ #define TE_V2_REPEAT_MAX 0xfe #define TE_V2_PLACEMENT_POS 12 #define TE_V2_ABSENCE_POS 15 /** * enum iwl_time_event_policy - Time event policy values * A notification (both event and fragment) includes a status indicating weather * the FW was able to schedule the event or not. For fragment start/end * notification the status is always success. There is no start/end fragment * notification for monolithic events. * * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use. * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. * @TE_V2_START_IMMEDIATELY: start time event immediately * @TE_V2_DEP_OTHER: depends on another time event * @TE_V2_DEP_TSF: depends on a specific time * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC * @TE_V2_ABSENCE: are we present or absent during the Time Event. */ enum iwl_time_event_policy { TE_V2_DEFAULT_POLICY = 0x0, /* notifications (event start/stop, fragment start/stop) */ TE_V2_NOTIF_HOST_EVENT_START = BIT(0), TE_V2_NOTIF_HOST_EVENT_END = BIT(1), TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2), TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3), TE_V2_NOTIF_HOST_FRAG_START = BIT(4), TE_V2_NOTIF_HOST_FRAG_END = BIT(5), TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), TE_V2_START_IMMEDIATELY = BIT(11), /* placement characteristics */ TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1), TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2), /* are we present or absent during the Time Event. */ TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS), }; /** * struct iwl_time_event_cmd - configuring Time Events * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also * with version 1. determined by IWL_UCODE_TLV_FLAGS) * ( TIME_EVENT_CMD = 0x29 ) * @id_and_color: ID and color of the relevant MAC, * &enum iwl_ctxt_id_and_color * @action: action to perform, one of &enum iwl_ctxt_action * @id: this field has two meanings, depending on the action: * If the action is ADD, then it means the type of event to add. * For all other actions it is the unique event ID assigned when the * event was added by the FW. * @apply_time: When to start the Time Event (in GP2) * @max_delay: maximum delay to event's start (apply time), in TU * @depends_on: the unique ID of the event we depend on (if any) * @interval: interval between repetitions, in TU * @duration: duration of event in TU * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS * @max_frags: maximal number of fragments the Time Event can be divided to * @policy: defines whether uCode shall notify the host or other uCode modules * on event and/or fragment start and/or end * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF * TE_EVENT_SOCIOPATHIC * using TE_ABSENCE and using TE_NOTIF_*, * &enum iwl_time_event_policy */ struct iwl_time_event_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; __le32 id; /* MAC_TIME_EVENT_DATA_API_S_VER_2 */ __le32 apply_time; __le32 max_delay; __le32 depends_on; __le32 interval; __le32 duration; u8 repeat; u8 max_frags; __le16 policy; } __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */ /** * struct iwl_time_event_resp - response structure to iwl_time_event_cmd * @status: bit 0 indicates success, all others specify errors * @id: the Time Event type * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE * @id_and_color: ID and color of the relevant MAC, * &enum iwl_ctxt_id_and_color */ struct iwl_time_event_resp { __le32 status; __le32 id; __le32 unique_id; __le32 id_and_color; } __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */ /** * struct iwl_time_event_notif - notifications of time event start/stop * ( TIME_EVENT_NOTIFICATION = 0x2a ) * @timestamp: action timestamp in GP2 * @session_id: session's unique id * @unique_id: unique id of the Time Event itself * @id_and_color: ID and color of the relevant MAC * @action: &enum iwl_time_event_policy * @status: true if scheduled, false otherwise (not executed) */ struct iwl_time_event_notif { __le32 timestamp; __le32 session_id; __le32 unique_id; __le32 id_and_color; __le32 action; __le32 status; } __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */ /* * struct iwl_hs20_roc_req_tail - tail of iwl_hs20_roc_req * * @node_addr: Our MAC Address * @reserved: reserved for alignment * @apply_time: GP2 value to start (should always be the current GP2 value) * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max * time by which start of the event is allowed to be postponed. * @duration: event duration in TU To calculate event duration: * timeEventDuration = min(duration, remainingQuota) */ struct iwl_hs20_roc_req_tail { u8 node_addr[ETH_ALEN]; __le16 reserved; __le32 apply_time; __le32 apply_time_max_delay; __le32 duration; } __packed; /* * Aux ROC command * * Command requests the firmware to create a time event for a certain duration * and remain on the given channel. This is done by using the Aux framework in * the FW. * The command was first used for Hot Spot issues - but can be used regardless * to Hot Spot. * * ( HOT_SPOT_CMD 0x53 ) * * @id_and_color: ID and color of the MAC - * @action: action to perform, one of FW_CTXT_ACTION_* + * @action: action to perform, see &enum iwl_ctxt_action * @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the * event_unique_id should be the id of the time event assigned by ucode. * Otherwise ignore the event_unique_id. * @sta_id_and_color: station id and color, resumed during "Remain On Channel" * activity. * @channel_info: channel info */ struct iwl_hs20_roc_req { /* COMMON_INDEX_HDR_API_S_VER_1 hdr */ __le32 id_and_color; __le32 action; __le32 event_unique_id; __le32 sta_id_and_color; struct iwl_fw_channel_info channel_info; struct iwl_hs20_roc_req_tail tail; } __packed; /* HOT_SPOT_CMD_API_S_VER_1 */ /* * values for AUX ROC result values */ enum iwl_mvm_hot_spot { HOT_SPOT_RSP_STATUS_OK, HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS, HOT_SPOT_MAX_NUM_OF_SESSIONS, }; /* * Aux ROC command response * * In response to iwl_hs20_roc_req the FW sends this command to notify the * driver the uid of the timevent. * * ( HOT_SPOT_CMD 0x53 ) * * @event_unique_id: Unique ID of time event assigned by ucode * @status: Return status 0 is success, all the rest used for specific errors */ struct iwl_hs20_roc_res { __le32 event_unique_id; __le32 status; } __packed; /* HOT_SPOT_RSP_API_S_VER_1 */ /** * enum iwl_mvm_session_prot_conf_id - session protection's configurations * @SESSION_PROTECT_CONF_ASSOC: Start a session protection for association. * The firmware will allocate two events. * Valid for BSS_STA and P2P_STA. * * A rather short event that can't be fragmented and with a very * high priority. If every goes well (99% of the cases) the * association should complete within this first event. During * that event, no other activity will happen in the firmware, * which is why it can't be too long. * The length of this event is hard-coded in the firmware: 300TUs. * * Another event which can be much longer (it's duration is * configurable by the driver) which has a slightly lower * priority and that can be fragmented allowing other activities * to run while this event is running. * The firmware will automatically remove both events once the driver sets * the BSS MAC as associated. Neither of the events will be removed * for the P2P_STA MAC. * Only the duration is configurable for this protection. * @SESSION_PROTECT_CONF_GO_CLIENT_ASSOC: not used * @SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV: Schedule the P2P Device to be in * listen mode. Will be fragmented. Valid only on the P2P Device MAC. * Valid only on the P2P Device MAC. The firmware will take into account * the duration, the interval and the repetition count. * @SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION: Schedule the P2P Device to be be * able to run the GO Negotiation. Will not be fragmented and not * repetitive. Valid only on the P2P Device MAC. Only the duration will * be taken into account. * @SESSION_PROTECT_CONF_MAX_ID: not used */ enum iwl_mvm_session_prot_conf_id { SESSION_PROTECT_CONF_ASSOC, SESSION_PROTECT_CONF_GO_CLIENT_ASSOC, SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV, SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION, SESSION_PROTECT_CONF_MAX_ID, }; /* SESSION_PROTECTION_CONF_ID_E_VER_1 */ /** * struct iwl_mvm_session_prot_cmd - configure a session protection * @id_and_color: the id and color of the mac for which this session protection * is sent - * @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE + * @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE, + * see &enum iwl_ctxt_action * @conf_id: see &enum iwl_mvm_session_prot_conf_id * @duration_tu: the duration of the whole protection in TUs. * @repetition_count: not used * @interval: not used * * Note: the session protection will always be scheduled to start as * early as possible, but the maximum delay is configuration dependent. * The firmware supports only one concurrent session protection per vif. * Adding a new session protection will remove any currently running session. */ struct iwl_mvm_session_prot_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 hdr */ __le32 id_and_color; __le32 action; __le32 conf_id; __le32 duration_tu; __le32 repetition_count; __le32 interval; } __packed; /* SESSION_PROTECTION_CMD_API_S_VER_1 */ /** * struct iwl_mvm_session_prot_notif - session protection started / ended * @mac_id: the mac id for which the session protection started / ended * @status: 1 means success, 0 means failure * @start: 1 means the session protection started, 0 means it ended * @conf_id: see &enum iwl_mvm_session_prot_conf_id * * Note that any session protection will always get two notifications: start * and end even the firmware could not schedule it. */ struct iwl_mvm_session_prot_notif { __le32 mac_id; __le32 status; __le32 start; __le32 conf_id; } __packed; /* SESSION_PROTECTION_NOTIFICATION_API_S_VER_2 */ #endif /* __iwl_fw_api_time_event_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/api/tx.h b/sys/contrib/dev/iwlwifi/fw/api/tx.h index ecc6706f66ed..842360b1e995 100644 --- a/sys/contrib/dev/iwlwifi/fw/api/tx.h +++ b/sys/contrib/dev/iwlwifi/fw/api/tx.h @@ -1,966 +1,957 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_tx_h__ #define __iwl_fw_api_tx_h__ #include /** * enum iwl_tx_flags - bitmasks for tx_flags in TX command * @TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame * @TX_CMD_FLG_WRITE_TX_POWER: update current tx power value in the mgmt frame * @TX_CMD_FLG_ACK: expect ACK from receiving station * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command. * Otherwise, use rate_n_flags from the TX command * @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected * Must set TX_CMD_FLG_ACK with this flag. * @TX_CMD_FLG_TXOP_PROT: TXOP protection requested * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC) * @TX_CMD_FLG_BT_PRIO_MASK: BT priority value * @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored * on old firmwares). * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control. * Should be set for mgmt, non-QOS data, mcast, bcast and in scan command * @TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU * @TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame * Should be set for beacons and probe responses * @TX_CMD_FLG_CALIB: activate PA TX power calibrations * @TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count * @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header. * Should be set for 26/30 length MAC headers * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped * @TX_CMD_FLG_EXEC_PAPD: execute PAPD * @TX_CMD_FLG_PAPD_TYPE: 0 for reference power, 1 for nominal power * @TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk */ enum iwl_tx_flags { TX_CMD_FLG_PROT_REQUIRE = BIT(0), TX_CMD_FLG_WRITE_TX_POWER = BIT(1), TX_CMD_FLG_ACK = BIT(3), TX_CMD_FLG_STA_RATE = BIT(4), TX_CMD_FLG_BAR = BIT(6), TX_CMD_FLG_TXOP_PROT = BIT(7), TX_CMD_FLG_VHT_NDPA = BIT(8), TX_CMD_FLG_HT_NDPA = BIT(9), TX_CMD_FLG_CSI_FDBK2HOST = BIT(10), TX_CMD_FLG_BT_PRIO_POS = 11, TX_CMD_FLG_BT_PRIO_MASK = BIT(11) | BIT(12), TX_CMD_FLG_BT_DIS = BIT(12), TX_CMD_FLG_SEQ_CTL = BIT(13), TX_CMD_FLG_MORE_FRAG = BIT(14), TX_CMD_FLG_TSF = BIT(16), TX_CMD_FLG_CALIB = BIT(17), TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18), TX_CMD_FLG_MH_PAD = BIT(20), TX_CMD_FLG_RESP_TO_DRV = BIT(21), TX_CMD_FLG_TKIP_MIC_DONE = BIT(23), TX_CMD_FLG_DUR = BIT(25), TX_CMD_FLG_FW_DROP = BIT(26), TX_CMD_FLG_EXEC_PAPD = BIT(27), TX_CMD_FLG_PAPD_TYPE = BIT(28), TX_CMD_FLG_HCCA_CHUNK = BIT(31) }; /* TX_FLAGS_BITS_API_S_VER_1 */ /** * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for 22000 * @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command * @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs * to a secured STA * @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate * selection, retry limits and BT kill */ enum iwl_tx_cmd_flags { IWL_TX_FLAGS_CMD_RATE = BIT(0), IWL_TX_FLAGS_ENCRYPT_DIS = BIT(1), IWL_TX_FLAGS_HIGH_PRI = BIT(2), /* Use these flags only from * TX_FLAGS_BITS_API_S_VER_4 and above */ IWL_TX_FLAGS_RTS = BIT(3), IWL_TX_FLAGS_CTS = BIT(4), }; /* TX_FLAGS_BITS_API_S_VER_3 */ /** * enum iwl_tx_pm_timeouts - pm timeout values in TX command * @PM_FRAME_NONE: no need to suspend sleep mode * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec */ enum iwl_tx_pm_timeouts { PM_FRAME_NONE = 0, PM_FRAME_MGMT = 2, PM_FRAME_ASSOC = 3, }; #define TX_CMD_SEC_MSK 0x07 #define TX_CMD_SEC_WEP_KEY_IDX_POS 6 #define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0 /** * enum iwl_tx_cmd_sec_ctrl - bitmasks for security control in TX command * @TX_CMD_SEC_WEP: WEP encryption algorithm. * @TX_CMD_SEC_CCM: CCM encryption algorithm. * @TX_CMD_SEC_TKIP: TKIP encryption algorithm. * @TX_CMD_SEC_EXT: extended cipher algorithm. * @TX_CMD_SEC_GCMP: GCMP encryption algorithm. * @TX_CMD_SEC_KEY128: set for 104 bits WEP key. * @TX_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken * from the table instead of from the TX command. * If the key is taken from the key table its index should be given by the * first byte of the TX command key field. */ enum iwl_tx_cmd_sec_ctrl { TX_CMD_SEC_WEP = 0x01, TX_CMD_SEC_CCM = 0x02, TX_CMD_SEC_TKIP = 0x03, TX_CMD_SEC_EXT = 0x04, TX_CMD_SEC_GCMP = 0x05, TX_CMD_SEC_KEY128 = 0x08, TX_CMD_SEC_KEY_FROM_TABLE = 0x10, }; /* * TX command Frame life time in us - to be written in pm_frame_timeout */ #define TX_CMD_LIFE_TIME_INFINITE 0xFFFFFFFF #define TX_CMD_LIFE_TIME_DEFAULT 2000000 /* 2000 ms*/ #define TX_CMD_LIFE_TIME_PROBE_RESP 40000 /* 40 ms */ #define TX_CMD_LIFE_TIME_EXPIRED_FRAME 0 /* * TID for non QoS frames - to be written in tid_tspec */ #define IWL_TID_NON_QOS 0 /* * Limits on the retransmissions - to be written in {data,rts}_retry_limit */ #define IWL_DEFAULT_TX_RETRY 15 #define IWL_MGMT_DFAULT_RETRY_LIMIT 3 #define IWL_RTS_DFAULT_RETRY_LIMIT 60 #define IWL_BAR_DFAULT_RETRY_LIMIT 60 #define IWL_LOW_RETRY_LIMIT 7 /** * enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd offload_assist values * @TX_CMD_OFFLD_IP_HDR: offset to start of IP header (in words) * from mac header end. For normal case it is 4 words for SNAP. * note: tx_cmd, mac header and pad are not counted in the offset. * This is used to help the offload in case there is tunneling such as * IPv6 in IPv4, in such case the ip header offset should point to the * inner ip header and IPv4 checksum of the external header should be * calculated by driver. * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum * @TX_CMD_OFFLD_L3_EN: enable IP header checksum * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV * field. Doesn't include the pad. * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for * alignment * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU */ enum iwl_tx_offload_assist_flags_pos { TX_CMD_OFFLD_IP_HDR = 0, TX_CMD_OFFLD_L4_EN = 6, TX_CMD_OFFLD_L3_EN = 7, TX_CMD_OFFLD_MH_SIZE = 8, TX_CMD_OFFLD_PAD = 13, TX_CMD_OFFLD_AMSDU = 14, }; #define IWL_TX_CMD_OFFLD_MH_MASK 0x1f #define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f -enum iwl_tx_offload_assist_bz { - IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS = 0x000003ff, - IWL_TX_CMD_OFFLD_BZ_START_OFFS = 0x001ff800, - IWL_TX_CMD_OFFLD_BZ_MH_LEN = 0x07c00000, - IWL_TX_CMD_OFFLD_BZ_MH_PAD = 0x08000000, - IWL_TX_CMD_OFFLD_BZ_AMSDU = 0x10000000, - IWL_TX_CMD_OFFLD_BZ_ZERO2ONES = 0x20000000, - IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM = 0x40000000, - IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM = 0x80000000, -}; - /* TODO: complete documentation for try_cnt and btkill_cnt */ /** * struct iwl_tx_cmd - TX command struct to FW * ( TX_CMD = 0x1c ) * @len: in bytes of the payload, see below for details * @offload_assist: TX offload configuration * @tx_flags: combination of TX_CMD_FLG_*, see &enum iwl_tx_flags * @scratch: scratch buffer used by the device * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is * cleared. Combination of RATE_MCS_* * @sta_id: index of destination station in FW station table * @sec_ctl: security control, TX_CMD_SEC_* * @initial_rate_index: index into the the rate table for initial TX attempt. * Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames. * @reserved2: reserved * @key: security key * @reserved3: reserved * @life_time: frame life time (usecs??) * @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt + * btkill_cnd + reserved), first 32 bits. "0" disables usage. * @dram_msb_ptr: upper bits of the scratch physical address * @rts_retry_limit: max attempts for RTS * @data_retry_limit: max attempts to send the data packet * @tid_tspec: TID/tspec * @pm_frame_timeout: PM TX frame timeout * @reserved4: reserved * @payload: payload (same as @hdr) * @hdr: 802.11 header (same as @payload) * * The byte count (both len and next_frame_len) includes MAC header * (24/26/30/32 bytes) * + 2 bytes pad if 26/30 header size * + 8 byte IV for CCM or TKIP (not used for WEP) * + Data payload * + 8-byte MIC (not used for CCM/WEP) * It does not include post-MAC padding, i.e., * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes. * Range of len: 14-2342 bytes. * * After the struct fields the MAC header is placed, plus any padding, * and then the actial payload. */ struct iwl_tx_cmd { __le16 len; __le16 offload_assist; __le32 tx_flags; struct { u8 try_cnt; u8 btkill_cnt; __le16 reserved; } scratch; /* DRAM_SCRATCH_API_U_VER_1 */ __le32 rate_n_flags; u8 sta_id; u8 sec_ctl; u8 initial_rate_index; u8 reserved2; u8 key[16]; __le32 reserved3; __le32 life_time; __le32 dram_lsb_ptr; u8 dram_msb_ptr; u8 rts_retry_limit; u8 data_retry_limit; u8 tid_tspec; __le16 pm_frame_timeout; __le16 reserved4; union { DECLARE_FLEX_ARRAY(u8, payload); DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr); }; } __packed; /* TX_CMD_API_S_VER_6 */ struct iwl_dram_sec_info { __le32 pn_low; __le16 pn_high; __le16 aux_info; } __packed; /* DRAM_SEC_INFO_API_S_VER_1 */ /** * struct iwl_tx_cmd_gen2 - TX command struct to FW for 22000 devices * ( TX_CMD = 0x1c ) * @len: in bytes of the payload, see below for details * @offload_assist: TX offload configuration * @flags: combination of &enum iwl_tx_cmd_flags * @dram_info: FW internal DRAM storage * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is * cleared. Combination of RATE_MCS_* * @hdr: 802.11 header */ struct iwl_tx_cmd_gen2 { __le16 len; __le16 offload_assist; __le32 flags; struct iwl_dram_sec_info dram_info; __le32 rate_n_flags; struct ieee80211_hdr hdr[]; } __packed; /* TX_CMD_API_S_VER_7, TX_CMD_API_S_VER_9 */ /** * struct iwl_tx_cmd_gen3 - TX command struct to FW for AX210+ devices * ( TX_CMD = 0x1c ) * @len: in bytes of the payload, see below for details * @flags: combination of &enum iwl_tx_cmd_flags * @offload_assist: TX offload configuration * @dram_info: FW internal DRAM storage * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is * cleared. Combination of RATE_MCS_* * @reserved: reserved * @hdr: 802.11 header */ struct iwl_tx_cmd_gen3 { __le16 len; __le16 flags; __le32 offload_assist; struct iwl_dram_sec_info dram_info; __le32 rate_n_flags; u8 reserved[8]; struct ieee80211_hdr hdr[]; } __packed; /* TX_CMD_API_S_VER_8, TX_CMD_API_S_VER_10 */ /* * TX response related data */ /* * enum iwl_tx_status - status that is returned by the fw after attempts to Tx * @TX_STATUS_SUCCESS: * @TX_STATUS_DIRECT_DONE: * @TX_STATUS_POSTPONE_DELAY: * @TX_STATUS_POSTPONE_FEW_BYTES: * @TX_STATUS_POSTPONE_BT_PRIO: * @TX_STATUS_POSTPONE_QUIET_PERIOD: * @TX_STATUS_POSTPONE_CALC_TTAK: * @TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY: * @TX_STATUS_FAIL_SHORT_LIMIT: * @TX_STATUS_FAIL_LONG_LIMIT: * @TX_STATUS_FAIL_UNDERRUN: * @TX_STATUS_FAIL_DRAIN_FLOW: * @TX_STATUS_FAIL_RFKILL_FLUSH: * @TX_STATUS_FAIL_LIFE_EXPIRE: * @TX_STATUS_FAIL_DEST_PS: * @TX_STATUS_FAIL_HOST_ABORTED: * @TX_STATUS_FAIL_BT_RETRY: * @TX_STATUS_FAIL_STA_INVALID: * @TX_TATUS_FAIL_FRAG_DROPPED: * @TX_STATUS_FAIL_TID_DISABLE: * @TX_STATUS_FAIL_FIFO_FLUSHED: * @TX_STATUS_FAIL_SMALL_CF_POLL: * @TX_STATUS_FAIL_FW_DROP: * @TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and * STA table * @TX_FRAME_STATUS_INTERNAL_ABORT: * @TX_MODE_MSK: * @TX_MODE_NO_BURST: * @TX_MODE_IN_BURST_SEQ: * @TX_MODE_FIRST_IN_BURST: * @TX_QUEUE_NUM_MSK: * * Valid only if frame_count =1 * TODO: complete documentation */ enum iwl_tx_status { TX_STATUS_MSK = 0x000000ff, TX_STATUS_SUCCESS = 0x01, TX_STATUS_DIRECT_DONE = 0x02, /* postpone TX */ TX_STATUS_POSTPONE_DELAY = 0x40, TX_STATUS_POSTPONE_FEW_BYTES = 0x41, TX_STATUS_POSTPONE_BT_PRIO = 0x42, TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43, TX_STATUS_POSTPONE_CALC_TTAK = 0x44, /* abort TX */ TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81, TX_STATUS_FAIL_SHORT_LIMIT = 0x82, TX_STATUS_FAIL_LONG_LIMIT = 0x83, TX_STATUS_FAIL_UNDERRUN = 0x84, TX_STATUS_FAIL_DRAIN_FLOW = 0x85, TX_STATUS_FAIL_RFKILL_FLUSH = 0x86, TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, TX_STATUS_FAIL_DEST_PS = 0x88, TX_STATUS_FAIL_HOST_ABORTED = 0x89, TX_STATUS_FAIL_BT_RETRY = 0x8a, TX_STATUS_FAIL_STA_INVALID = 0x8b, TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, TX_STATUS_FAIL_TID_DISABLE = 0x8d, TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e, TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f, TX_STATUS_FAIL_FW_DROP = 0x90, TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91, TX_STATUS_INTERNAL_ABORT = 0x92, TX_MODE_MSK = 0x00000f00, TX_MODE_NO_BURST = 0x00000000, TX_MODE_IN_BURST_SEQ = 0x00000100, TX_MODE_FIRST_IN_BURST = 0x00000200, TX_QUEUE_NUM_MSK = 0x0001f000, TX_NARROW_BW_MSK = 0x00060000, TX_NARROW_BW_1DIV2 = 0x00020000, TX_NARROW_BW_1DIV4 = 0x00040000, TX_NARROW_BW_1DIV8 = 0x00060000, }; /* * enum iwl_tx_agg_status - TX aggregation status * @AGG_TX_STATE_STATUS_MSK: * @AGG_TX_STATE_TRANSMITTED: * @AGG_TX_STATE_UNDERRUN: * @AGG_TX_STATE_BT_PRIO: * @AGG_TX_STATE_FEW_BYTES: * @AGG_TX_STATE_ABORT: * @AGG_TX_STATE_TX_ON_AIR_DROP: TX_ON_AIR signal drop without underrun or * BT detection * @AGG_TX_STATE_LAST_SENT_TRY_CNT: * @AGG_TX_STATE_LAST_SENT_BT_KILL: * @AGG_TX_STATE_SCD_QUERY: * @AGG_TX_STATE_TEST_BAD_CRC32: * @AGG_TX_STATE_RESPONSE: * @AGG_TX_STATE_DUMP_TX: * @AGG_TX_STATE_DELAY_TX: * @AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries * occur if tx failed for this frame when it was a member of a previous * aggregation block). If rate scaling is used, retry count indicates the * rate table entry used for all frames in the new agg. * @AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for * this frame * * TODO: complete documentation */ enum iwl_tx_agg_status { AGG_TX_STATE_STATUS_MSK = 0x00fff, AGG_TX_STATE_TRANSMITTED = 0x000, AGG_TX_STATE_UNDERRUN = 0x001, AGG_TX_STATE_BT_PRIO = 0x002, AGG_TX_STATE_FEW_BYTES = 0x004, AGG_TX_STATE_ABORT = 0x008, AGG_TX_STATE_TX_ON_AIR_DROP = 0x010, AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020, AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040, AGG_TX_STATE_SCD_QUERY = 0x080, AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100, AGG_TX_STATE_RESPONSE = 0x1ff, AGG_TX_STATE_DUMP_TX = 0x200, AGG_TX_STATE_DELAY_TX = 0x400, AGG_TX_STATE_TRY_CNT_POS = 12, AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS, }; /* * The mask below describes a status where we are absolutely sure that the MPDU * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've * written the bytes to the TXE, but we know nothing about what the DSP did. */ #define AGG_TX_STAT_FRAME_NOT_SENT (AGG_TX_STATE_FEW_BYTES | \ AGG_TX_STATE_ABORT | \ AGG_TX_STATE_SCD_QUERY) /* * REPLY_TX = 0x1c (response) * * This response may be in one of two slightly different formats, indicated * by the frame_count field: * * 1) No aggregation (frame_count == 1). This reports Tx results for a single * frame. Multiple attempts, at various bit rates, may have been made for * this frame. * * 2) Aggregation (frame_count > 1). This reports Tx results for two or more * frames that used block-acknowledge. All frames were transmitted at * same rate. Rate scaling may have been used if first frame in this new * agg block failed in previous agg block(s). * * Note that, for aggregation, ACK (block-ack) status is not delivered * here; block-ack has not been received by the time the device records * this status. * This status relates to reasons the tx might have been blocked or aborted * within the device, rather than whether it was received successfully by * the destination station. */ /** * struct agg_tx_status - per packet TX aggregation status * @status: See &enum iwl_tx_agg_status * @sequence: Sequence # for this frame's Tx cmd (not SSN!) */ struct agg_tx_status { __le16 status; __le16 sequence; } __packed; /* * definitions for initial rate index field * bits [3:0] initial rate index * bits [6:4] rate table color, used for the initial rate * bit-7 invalid rate indication */ #define TX_RES_INIT_RATE_INDEX_MSK 0x0f #define TX_RES_RATE_TABLE_COLOR_POS 4 #define TX_RES_RATE_TABLE_COLOR_MSK 0x70 #define TX_RES_INV_RATE_INDEX_MSK 0x80 #define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\ TX_RES_RATE_TABLE_COLOR_POS) #define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) #define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) /** * struct iwl_mvm_tx_resp_v3 - notifies that fw is TXing a packet * ( REPLY_TX = 0x1c ) * @frame_count: 1 no aggregation, >1 aggregation * @bt_kill_count: num of times blocked by bluetooth (unused for agg) * @failure_rts: num of failures due to unsuccessful RTS * @failure_frame: num failures due to no ACK (unused for agg) * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the * Tx of all the batch. RATE_MCS_* * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK. * for agg: RTS + CTS + aggregation tx time + block-ack time. * in usec. * @pa_status: tx power info * @pa_integ_res_a: tx power info * @pa_integ_res_b: tx power info * @pa_integ_res_c: tx power info * @measurement_req_id: tx power info * @reduced_tpc: transmit power reduction used * @reserved: reserved * @tfd_info: TFD information set by the FH * @seq_ctl: sequence control from the Tx cmd * @byte_cnt: byte count from the Tx cmd * @tlc_info: TLC rate info * @ra_tid: bits [3:0] = ra, bits [7:4] = tid * @frame_ctrl: frame control * @status: for non-agg: frame status TX_STATUS_* * for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields * follow this one, up to frame_count. Length in @frame_count. * * After the array of statuses comes the SSN of the SCD. Look at * %iwl_mvm_get_scd_ssn for more details. */ struct iwl_mvm_tx_resp_v3 { u8 frame_count; u8 bt_kill_count; u8 failure_rts; u8 failure_frame; __le32 initial_rate; __le16 wireless_media_time; u8 pa_status; u8 pa_integ_res_a[3]; u8 pa_integ_res_b[3]; u8 pa_integ_res_c[3]; __le16 measurement_req_id; u8 reduced_tpc; u8 reserved; __le32 tfd_info; __le16 seq_ctl; __le16 byte_cnt; u8 tlc_info; u8 ra_tid; __le16 frame_ctrl; struct agg_tx_status status[]; } __packed; /* TX_RSP_API_S_VER_3 */ /** * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet * ( REPLY_TX = 0x1c ) * @frame_count: 1 no aggregation, >1 aggregation * @bt_kill_count: num of times blocked by bluetooth (unused for agg) * @failure_rts: num of failures due to unsuccessful RTS * @failure_frame: num failures due to no ACK (unused for agg) * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the * Tx of all the batch. RATE_MCS_* * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK. * for agg: RTS + CTS + aggregation tx time + block-ack time. * in usec. * @pa_status: tx power info * @pa_integ_res_a: tx power info * @pa_integ_res_b: tx power info * @pa_integ_res_c: tx power info * @measurement_req_id: tx power info * @reduced_tpc: transmit power reduction used * @reserved: reserved * @tfd_info: TFD information set by the FH * @seq_ctl: sequence control from the Tx cmd * @byte_cnt: byte count from the Tx cmd * @tlc_info: TLC rate info * @ra_tid: bits [3:0] = ra, bits [7:4] = tid * @frame_ctrl: frame control * @tx_queue: TX queue for this response * @reserved2: reserved for padding/alignment * @status: for non-agg: frame status TX_STATUS_* * For version 6 TX response isn't received for aggregation at all. * * After the array of statuses comes the SSN of the SCD. Look at * %iwl_mvm_get_scd_ssn for more details. */ struct iwl_mvm_tx_resp { u8 frame_count; u8 bt_kill_count; u8 failure_rts; u8 failure_frame; __le32 initial_rate; __le16 wireless_media_time; u8 pa_status; u8 pa_integ_res_a[3]; u8 pa_integ_res_b[3]; u8 pa_integ_res_c[3]; __le16 measurement_req_id; u8 reduced_tpc; u8 reserved; __le32 tfd_info; __le16 seq_ctl; __le16 byte_cnt; u8 tlc_info; u8 ra_tid; __le16 frame_ctrl; __le16 tx_queue; __le16 reserved2; struct agg_tx_status status; } __packed; /* TX_RSP_API_S_VER_6, TX_RSP_API_S_VER_7 */ /** * struct iwl_mvm_ba_notif - notifies about reception of BA * ( BA_NOTIF = 0xc5 ) * @sta_addr: MAC address * @reserved: reserved * @sta_id: Index of recipient (BA-sending) station in fw's station table * @tid: tid of the session * @seq_ctl: sequence control field * @bitmap: the bitmap of the BA notification as seen in the air * @scd_flow: the tx queue this BA relates to * @scd_ssn: the index of the last contiguously sent packet * @txed: number of Txed frames in this batch * @txed_2_done: number of Acked frames in this batch * @reduced_txp: power reduced according to TPC. This is the actual value and * not a copy from the LQ command. Thus, if not the first rate was used * for Tx-ing then this value will be set to 0 by FW. * @reserved1: reserved */ struct iwl_mvm_ba_notif { u8 sta_addr[ETH_ALEN]; __le16 reserved; u8 sta_id; u8 tid; __le16 seq_ctl; __le64 bitmap; __le16 scd_flow; __le16 scd_ssn; u8 txed; u8 txed_2_done; u8 reduced_txp; u8 reserved1; } __packed; /** * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue * @q_num: TFD queue number * @tfd_index: Index of first un-acked frame in the TFD queue * @scd_queue: For debug only - the physical queue the TFD queue is bound to * @tid: TID of the queue (0-7) * @reserved: reserved for alignment */ struct iwl_mvm_compressed_ba_tfd { __le16 q_num; __le16 tfd_index; u8 scd_queue; u8 tid; u8 reserved[2]; } __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */ /** * struct iwl_mvm_compressed_ba_ratid - progress of a RA TID queue * @q_num: RA TID queue number * @tid: TID of the queue * @ssn: BA window current SSN */ struct iwl_mvm_compressed_ba_ratid { u8 q_num; u8 tid; __le16 ssn; } __packed; /* COMPRESSED_BA_RATID_API_S_VER_1 */ /* * enum iwl_mvm_ba_resp_flags - TX aggregation status * @IWL_MVM_BA_RESP_TX_AGG: generated due to BA * @IWL_MVM_BA_RESP_TX_BAR: generated due to BA after BAR * @IWL_MVM_BA_RESP_TX_AGG_FAIL: aggregation didn't receive BA * @IWL_MVM_BA_RESP_TX_UNDERRUN: aggregation got underrun * @IWL_MVM_BA_RESP_TX_BT_KILL: aggregation got BT-kill * @IWL_MVM_BA_RESP_TX_DSP_TIMEOUT: aggregation didn't finish within the * expected time */ enum iwl_mvm_ba_resp_flags { IWL_MVM_BA_RESP_TX_AGG, IWL_MVM_BA_RESP_TX_BAR, IWL_MVM_BA_RESP_TX_AGG_FAIL, IWL_MVM_BA_RESP_TX_UNDERRUN, IWL_MVM_BA_RESP_TX_BT_KILL, IWL_MVM_BA_RESP_TX_DSP_TIMEOUT }; /** * struct iwl_mvm_compressed_ba_notif - notifies about reception of BA * ( BA_NOTIF = 0xc5 ) * @flags: status flag, see the &iwl_mvm_ba_resp_flags * @sta_id: Index of recipient (BA-sending) station in fw's station table * @reduced_txp: power reduced according to TPC. This is the actual value and * not a copy from the LQ command. Thus, if not the first rate was used * for Tx-ing then this value will be set to 0 by FW. * @tlc_rate_info: TLC rate info, initial rate index, TLC table color * @retry_cnt: retry count * @query_byte_cnt: SCD query byte count * @query_frame_cnt: SCD query frame count * @txed: number of frames sent in the aggregation (all-TIDs) * @done: number of frames that were Acked by the BA (all-TIDs) * @reserved: reserved (for alignment) * @wireless_time: Wireless-media time * @tx_rate: the rate the aggregation was sent at * @tfd_cnt: number of TFD-Q elements * @ra_tid_cnt: number of RATID-Q elements * @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd * for details. Length in @tfd_cnt. * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See * &iwl_mvm_compressed_ba_ratid for more details. Length in @ra_tid_cnt. */ struct iwl_mvm_compressed_ba_notif { __le32 flags; u8 sta_id; u8 reduced_txp; u8 tlc_rate_info; u8 retry_cnt; __le32 query_byte_cnt; __le16 query_frame_cnt; __le16 txed; __le16 done; __le16 reserved; __le32 wireless_time; __le32 tx_rate; __le16 tfd_cnt; __le16 ra_tid_cnt; union { DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_ratid, ra_tid); DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_tfd, tfd); }; } __packed; /* COMPRESSED_BA_RES_API_S_VER_4, COMPRESSED_BA_RES_API_S_VER_5 */ /** * struct iwl_mac_beacon_cmd_v6 - beacon template command * @tx: the tx commands associated with the beacon frame * @template_id: currently equal to the mac context id of the coresponding * mac. * @tim_idx: the offset of the tim IE in the beacon * @tim_size: the length of the tim IE * @frame: the template of the beacon frame */ struct iwl_mac_beacon_cmd_v6 { struct iwl_tx_cmd tx; __le32 template_id; __le32 tim_idx; __le32 tim_size; struct ieee80211_hdr frame[]; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */ /** * struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA * @tx: the tx commands associated with the beacon frame * @template_id: currently equal to the mac context id of the coresponding * mac. * @tim_idx: the offset of the tim IE in the beacon * @tim_size: the length of the tim IE * @ecsa_offset: offset to the ECSA IE if present * @csa_offset: offset to the CSA IE if present * @frame: the template of the beacon frame */ struct iwl_mac_beacon_cmd_v7 { struct iwl_tx_cmd tx; __le32 template_id; __le32 tim_idx; __le32 tim_size; __le32 ecsa_offset; __le32 csa_offset; struct ieee80211_hdr frame[]; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */ /* Bit flags for BEACON_TEMPLATE_CMD_API until version 10 */ enum iwl_mac_beacon_flags_v1 { IWL_MAC_BEACON_CCK_V1 = BIT(8), IWL_MAC_BEACON_ANT_A_V1 = BIT(9), IWL_MAC_BEACON_ANT_B_V1 = BIT(10), IWL_MAC_BEACON_FILS_V1 = BIT(12), }; /* Bit flags for BEACON_TEMPLATE_CMD_API version 11 and above */ enum iwl_mac_beacon_flags { IWL_MAC_BEACON_CCK = BIT(5), IWL_MAC_BEACON_ANT_A = BIT(6), IWL_MAC_BEACON_ANT_B = BIT(7), IWL_MAC_BEACON_FILS = BIT(8), }; /** * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA * @byte_cnt: byte count of the beacon frame. * @flags: least significant byte for rate code. The most significant byte * is &enum iwl_mac_beacon_flags. * @short_ssid: Short SSID * @reserved: reserved - * @template_id: currently equal to the mac context id of the coresponding mac. + * @link_id: the firmware id of the link that will use this beacon * @tim_idx: the offset of the tim IE in the beacon * @tim_size: the length of the tim IE * @ecsa_offset: offset to the ECSA IE if present * @csa_offset: offset to the CSA IE if present * @frame: the template of the beacon frame */ struct iwl_mac_beacon_cmd { __le16 byte_cnt; __le16 flags; __le32 short_ssid; __le32 reserved; - __le32 template_id; + __le32 link_id; __le32 tim_idx; __le32 tim_size; __le32 ecsa_offset; __le32 csa_offset; struct ieee80211_hdr frame[]; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10, - BEACON_TEMPLATE_CMD_API_S_VER_11, - BEACON_TEMPLATE_CMD_API_S_VER_12 */ + * BEACON_TEMPLATE_CMD_API_S_VER_11, + * BEACON_TEMPLATE_CMD_API_S_VER_12, + * BEACON_TEMPLATE_CMD_API_S_VER_13 + */ struct iwl_beacon_notif { struct iwl_mvm_tx_resp beacon_notify_hdr; __le64 tsf; __le32 ibss_mgr_status; } __packed; /** * struct iwl_extended_beacon_notif_v5 - notifies about beacon transmission * @beacon_notify_hdr: tx response command associated with the beacon * @tsf: last beacon tsf * @ibss_mgr_status: whether IBSS is manager * @gp2: last beacon time in gp2 */ struct iwl_extended_beacon_notif_v5 { struct iwl_mvm_tx_resp beacon_notify_hdr; __le64 tsf; __le32 ibss_mgr_status; __le32 gp2; } __packed; /* BEACON_NTFY_API_S_VER_5 */ /** * struct iwl_extended_beacon_notif - notifies about beacon transmission * @status: the status of the Tx response of the beacon * @tsf: last beacon tsf * @ibss_mgr_status: whether IBSS is manager * @gp2: last beacon time in gp2 */ struct iwl_extended_beacon_notif { __le32 status; __le64 tsf; __le32 ibss_mgr_status; __le32 gp2; } __packed; /* BEACON_NTFY_API_S_VER_6_ */ /** * enum iwl_dump_control - dump (flush) control flags * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty * and the TFD queues are empty. */ enum iwl_dump_control { DUMP_TX_FIFO_FLUSH = BIT(1), }; /** * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command * @queues_ctl: bitmap of queues to flush * @flush_ctl: control flags * @reserved: reserved */ struct iwl_tx_path_flush_cmd_v1 { __le32 queues_ctl; __le16 flush_ctl; __le16 reserved; } __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */ /** * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command * @sta_id: station ID to flush * @tid_mask: TID mask to flush * @reserved: reserved */ struct iwl_tx_path_flush_cmd { __le32 sta_id; __le16 tid_mask; __le16 reserved; } __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */ #define IWL_TX_FLUSH_QUEUE_RSP 16 /** * struct iwl_flush_queue_info - virtual flush queue info * @queue_num: virtual queue id * @read_before_flush: read pointer before flush * @read_after_flush: read pointer after flush */ struct iwl_flush_queue_info { __le16 tid; __le16 queue_num; __le16 read_before_flush; __le16 read_after_flush; } __packed; /* TFDQ_FLUSH_INFO_API_S_VER_1 */ /** * struct iwl_tx_path_flush_cmd_rsp -- queue/FIFO flush command response * @num_flushed_queues: number of queues in queues array * @queues: all flushed queues */ struct iwl_tx_path_flush_cmd_rsp { __le16 sta_id; __le16 num_flushed_queues; struct iwl_flush_queue_info queues[IWL_TX_FLUSH_QUEUE_RSP]; } __packed; /* TX_PATH_FLUSH_CMD_RSP_API_S_VER_1 */ /* Available options for the SCD_QUEUE_CFG HCMD */ enum iwl_scd_cfg_actions { SCD_CFG_DISABLE_QUEUE = 0x0, SCD_CFG_ENABLE_QUEUE = 0x1, SCD_CFG_UPDATE_QUEUE_TID = 0x2, }; /** * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command * @token: unused * @sta_id: station id * @tid: TID * @scd_queue: scheduler queue to confiug * @action: 1 queue enable, 0 queue disable, 2 change txq's tid owner * Value is one of &enum iwl_scd_cfg_actions options * @aggregate: 1 aggregated queue, 0 otherwise * @tx_fifo: &enum iwl_mvm_tx_fifo * @window: BA window size * @ssn: SSN for the BA agreement * @reserved: reserved */ struct iwl_scd_txq_cfg_cmd { u8 token; u8 sta_id; u8 tid; u8 scd_queue; u8 action; u8 aggregate; u8 tx_fifo; u8 window; __le16 ssn; __le16 reserved; } __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */ /** * struct iwl_scd_txq_cfg_rsp * @token: taken from the command * @sta_id: station id from the command * @tid: tid from the command * @scd_queue: scd_queue from the command */ struct iwl_scd_txq_cfg_rsp { u8 token; u8 sta_id; u8 tid; u8 scd_queue; } __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */ #endif /* __iwl_fw_api_tx_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/dbg.c b/sys/contrib/dev/iwlwifi/fw/dbg.c index fd3483fe50b2..ba73701781f5 100644 --- a/sys/contrib/dev/iwlwifi/fw/dbg.c +++ b/sys/contrib/dev/iwlwifi/fw/dbg.c @@ -1,3172 +1,3253 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #include #if defined(__FreeBSD__) #include #endif #include "iwl-drv.h" #include "runtime.h" #include "dbg.h" #include "debugfs.h" #include "iwl-io.h" #include "iwl-prph.h" #include "iwl-csr.h" #include "iwl-fh.h" /** * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump * * @fwrt_ptr: pointer to the buffer coming from fwrt * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the * transport's data. * @trans_len: length of the valid data in trans_ptr * @fwrt_len: length of the valid data in fwrt_ptr */ struct iwl_fw_dump_ptrs { struct iwl_trans_dump_data *trans_ptr; void *fwrt_ptr; u32 fwrt_len; }; #define RADIO_REG_MAX_READ 0x2ad static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data) { u8 *pos = (void *)(*dump_data)->data; int i; IWL_DEBUG_INFO(fwrt, "WRT radio registers dump\n"); if (!iwl_trans_grab_nic_access(fwrt->trans)) return; (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG); (*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ); for (i = 0; i < RADIO_REG_MAX_READ; i++) { u32 rd_cmd = RADIO_RSP_RD_CMD; rd_cmd |= i << RADIO_RSP_ADDR_POS; iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd); *pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT); pos++; } *dump_data = iwl_fw_error_next_data(*dump_data); iwl_trans_release_nic_access(fwrt->trans); } static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data, int size, u32 offset, int fifo_num) { struct iwl_fw_error_dump_fifo *fifo_hdr; u32 *fifo_data; u32 fifo_len; int i; fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; fifo_len = size; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) return; /* Add a TLV for the RXF */ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); fifo_hdr->fifo_num = cpu_to_le32(fifo_num); fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_D_SPACE + offset)); fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_WR_PTR + offset)); fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_RD_PTR + offset)); fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_FENCE_PTR + offset)); fifo_hdr->fence_mode = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset)); /* Lock fence */ iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1); /* Set fence pointer to the same place like WR pointer */ iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1); /* Set fence offset */ iwl_trans_write_prph(fwrt->trans, RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0); /* Read FIFO */ fifo_len /= sizeof(u32); /* Size in DWORDS */ for (i = 0; i < fifo_len; i++) fifo_data[i] = iwl_trans_read_prph(fwrt->trans, RXF_FIFO_RD_FENCE_INC + offset); *dump_data = iwl_fw_error_next_data(*dump_data); } static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data, int size, u32 offset, int fifo_num) { struct iwl_fw_error_dump_fifo *fifo_hdr; u32 *fifo_data; u32 fifo_len; int i; fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; fifo_len = size; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) return; /* Add a TLV for the FIFO */ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF); (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); fifo_hdr->fifo_num = cpu_to_le32(fifo_num); fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_FIFO_ITEM_CNT + offset)); fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_WR_PTR + offset)); fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_RD_PTR + offset)); fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_FENCE_PTR + offset)); fifo_hdr->fence_mode = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_LOCK_FENCE + offset)); /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset, TXF_WR_PTR + offset); /* Dummy-read to advance the read pointer to the head */ iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset); /* Read FIFO */ for (i = 0; i < fifo_len / sizeof(u32); i++) fifo_data[i] = iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf) fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx, fifo_data, fifo_len); *dump_data = iwl_fw_error_next_data(*dump_data); } static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data) { struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; IWL_DEBUG_INFO(fwrt, "WRT RX FIFO dump\n"); if (!iwl_trans_grab_nic_access(fwrt->trans)) return; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) { /* Pull RXF1 */ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); /* Pull RXF2 */ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, RXF_DIFF_FROM_PREV + fwrt->trans->trans_cfg->umac_prph_offset, 1); /* Pull LMAC2 RXF1 */ if (fwrt->smem_cfg.num_lmacs > 1) iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size, LMAC2_PRPH_OFFSET, 2); } iwl_trans_release_nic_access(fwrt->trans); } static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data) { struct iwl_fw_error_dump_fifo *fifo_hdr; struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; u32 *fifo_data; u32 fifo_len; int i, j; IWL_DEBUG_INFO(fwrt, "WRT TX FIFO dump\n"); if (!iwl_trans_grab_nic_access(fwrt->trans)) return; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) { /* Pull TXF data from LMAC1 */ for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i); iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i], 0, i); } /* Pull TXF data from LMAC2 */ if (fwrt->smem_cfg.num_lmacs > 1) { for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM + LMAC2_PRPH_OFFSET, i); iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[1].txfifo_size[i], LMAC2_PRPH_OFFSET, i + cfg->num_txfifo_entries); } } } if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) && fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { /* Pull UMAC internal TXF data from all TXFs */ for (i = 0; i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); i++) { fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; fifo_len = fwrt->smem_cfg.internal_txfifo_size[i]; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) continue; /* Add a TLV for the internal FIFOs */ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF); (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); fifo_hdr->fifo_num = cpu_to_le32(i); /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i + fwrt->smem_cfg.num_txfifo_entries); fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_FIFO_ITEM_CNT)); fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_WR_PTR)); fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_RD_PTR)); fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_FENCE_PTR)); fifo_hdr->fence_mode = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_LOCK_FENCE)); /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */ iwl_trans_write_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_ADDR, TXF_CPU2_WR_PTR); /* Dummy-read to advance the read pointer to head */ iwl_trans_read_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_DATA); /* Read FIFO */ fifo_len /= sizeof(u32); /* Size in DWORDS */ for (j = 0; j < fifo_len; j++) fifo_data[j] = iwl_trans_read_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_DATA); *dump_data = iwl_fw_error_next_data(*dump_data); } } iwl_trans_release_nic_access(fwrt->trans); } struct iwl_prph_range { u32 start, end; }; static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = { { .start = 0x00a00000, .end = 0x00a00000 }, { .start = 0x00a0000c, .end = 0x00a00024 }, { .start = 0x00a0002c, .end = 0x00a0003c }, { .start = 0x00a00410, .end = 0x00a00418 }, { .start = 0x00a00420, .end = 0x00a00420 }, { .start = 0x00a00428, .end = 0x00a00428 }, { .start = 0x00a00430, .end = 0x00a0043c }, { .start = 0x00a00444, .end = 0x00a00444 }, { .start = 0x00a004c0, .end = 0x00a004cc }, { .start = 0x00a004d8, .end = 0x00a004d8 }, { .start = 0x00a004e0, .end = 0x00a004f0 }, { .start = 0x00a00840, .end = 0x00a00840 }, { .start = 0x00a00850, .end = 0x00a00858 }, { .start = 0x00a01004, .end = 0x00a01008 }, { .start = 0x00a01010, .end = 0x00a01010 }, { .start = 0x00a01018, .end = 0x00a01018 }, { .start = 0x00a01024, .end = 0x00a01024 }, { .start = 0x00a0102c, .end = 0x00a01034 }, { .start = 0x00a0103c, .end = 0x00a01040 }, { .start = 0x00a01048, .end = 0x00a01094 }, { .start = 0x00a01c00, .end = 0x00a01c20 }, { .start = 0x00a01c58, .end = 0x00a01c58 }, { .start = 0x00a01c7c, .end = 0x00a01c7c }, { .start = 0x00a01c28, .end = 0x00a01c54 }, { .start = 0x00a01c5c, .end = 0x00a01c5c }, { .start = 0x00a01c60, .end = 0x00a01cdc }, { .start = 0x00a01ce0, .end = 0x00a01d0c }, { .start = 0x00a01d18, .end = 0x00a01d20 }, { .start = 0x00a01d2c, .end = 0x00a01d30 }, { .start = 0x00a01d40, .end = 0x00a01d5c }, { .start = 0x00a01d80, .end = 0x00a01d80 }, { .start = 0x00a01d98, .end = 0x00a01d9c }, { .start = 0x00a01da8, .end = 0x00a01da8 }, { .start = 0x00a01db8, .end = 0x00a01df4 }, { .start = 0x00a01dc0, .end = 0x00a01dfc }, { .start = 0x00a01e00, .end = 0x00a01e2c }, { .start = 0x00a01e40, .end = 0x00a01e60 }, { .start = 0x00a01e68, .end = 0x00a01e6c }, { .start = 0x00a01e74, .end = 0x00a01e74 }, { .start = 0x00a01e84, .end = 0x00a01e90 }, { .start = 0x00a01e9c, .end = 0x00a01ec4 }, { .start = 0x00a01ed0, .end = 0x00a01ee0 }, { .start = 0x00a01f00, .end = 0x00a01f1c }, { .start = 0x00a01f44, .end = 0x00a01ffc }, { .start = 0x00a02000, .end = 0x00a02048 }, { .start = 0x00a02068, .end = 0x00a020f0 }, { .start = 0x00a02100, .end = 0x00a02118 }, { .start = 0x00a02140, .end = 0x00a0214c }, { .start = 0x00a02168, .end = 0x00a0218c }, { .start = 0x00a021c0, .end = 0x00a021c0 }, { .start = 0x00a02400, .end = 0x00a02410 }, { .start = 0x00a02418, .end = 0x00a02420 }, { .start = 0x00a02428, .end = 0x00a0242c }, { .start = 0x00a02434, .end = 0x00a02434 }, { .start = 0x00a02440, .end = 0x00a02460 }, { .start = 0x00a02468, .end = 0x00a024b0 }, { .start = 0x00a024c8, .end = 0x00a024cc }, { .start = 0x00a02500, .end = 0x00a02504 }, { .start = 0x00a0250c, .end = 0x00a02510 }, { .start = 0x00a02540, .end = 0x00a02554 }, { .start = 0x00a02580, .end = 0x00a025f4 }, { .start = 0x00a02600, .end = 0x00a0260c }, { .start = 0x00a02648, .end = 0x00a02650 }, { .start = 0x00a02680, .end = 0x00a02680 }, { .start = 0x00a026c0, .end = 0x00a026d0 }, { .start = 0x00a02700, .end = 0x00a0270c }, { .start = 0x00a02804, .end = 0x00a02804 }, { .start = 0x00a02818, .end = 0x00a0281c }, { .start = 0x00a02c00, .end = 0x00a02db4 }, { .start = 0x00a02df4, .end = 0x00a02fb0 }, { .start = 0x00a03000, .end = 0x00a03014 }, { .start = 0x00a0301c, .end = 0x00a0302c }, { .start = 0x00a03034, .end = 0x00a03038 }, { .start = 0x00a03040, .end = 0x00a03048 }, { .start = 0x00a03060, .end = 0x00a03068 }, { .start = 0x00a03070, .end = 0x00a03074 }, { .start = 0x00a0307c, .end = 0x00a0307c }, { .start = 0x00a03080, .end = 0x00a03084 }, { .start = 0x00a0308c, .end = 0x00a03090 }, { .start = 0x00a03098, .end = 0x00a03098 }, { .start = 0x00a030a0, .end = 0x00a030a0 }, { .start = 0x00a030a8, .end = 0x00a030b4 }, { .start = 0x00a030bc, .end = 0x00a030bc }, { .start = 0x00a030c0, .end = 0x00a0312c }, { .start = 0x00a03c00, .end = 0x00a03c5c }, { .start = 0x00a04400, .end = 0x00a04454 }, { .start = 0x00a04460, .end = 0x00a04474 }, { .start = 0x00a044c0, .end = 0x00a044ec }, { .start = 0x00a04500, .end = 0x00a04504 }, { .start = 0x00a04510, .end = 0x00a04538 }, { .start = 0x00a04540, .end = 0x00a04548 }, { .start = 0x00a04560, .end = 0x00a0457c }, { .start = 0x00a04590, .end = 0x00a04598 }, { .start = 0x00a045c0, .end = 0x00a045f4 }, }; static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { { .start = 0x00a05c00, .end = 0x00a05c18 }, { .start = 0x00a05400, .end = 0x00a056e8 }, { .start = 0x00a08000, .end = 0x00a098bc }, { .start = 0x00a02400, .end = 0x00a02758 }, { .start = 0x00a04764, .end = 0x00a0476c }, { .start = 0x00a04770, .end = 0x00a04774 }, { .start = 0x00a04620, .end = 0x00a04624 }, }; static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = { { .start = 0x00a00000, .end = 0x00a00000 }, { .start = 0x00a0000c, .end = 0x00a00024 }, { .start = 0x00a0002c, .end = 0x00a00034 }, { .start = 0x00a0003c, .end = 0x00a0003c }, { .start = 0x00a00410, .end = 0x00a00418 }, { .start = 0x00a00420, .end = 0x00a00420 }, { .start = 0x00a00428, .end = 0x00a00428 }, { .start = 0x00a00430, .end = 0x00a0043c }, { .start = 0x00a00444, .end = 0x00a00444 }, { .start = 0x00a00840, .end = 0x00a00840 }, { .start = 0x00a00850, .end = 0x00a00858 }, { .start = 0x00a01004, .end = 0x00a01008 }, { .start = 0x00a01010, .end = 0x00a01010 }, { .start = 0x00a01018, .end = 0x00a01018 }, { .start = 0x00a01024, .end = 0x00a01024 }, { .start = 0x00a0102c, .end = 0x00a01034 }, { .start = 0x00a0103c, .end = 0x00a01040 }, { .start = 0x00a01048, .end = 0x00a01050 }, { .start = 0x00a01058, .end = 0x00a01058 }, { .start = 0x00a01060, .end = 0x00a01070 }, { .start = 0x00a0108c, .end = 0x00a0108c }, { .start = 0x00a01c20, .end = 0x00a01c28 }, { .start = 0x00a01d10, .end = 0x00a01d10 }, { .start = 0x00a01e28, .end = 0x00a01e2c }, { .start = 0x00a01e60, .end = 0x00a01e60 }, { .start = 0x00a01e80, .end = 0x00a01e80 }, { .start = 0x00a01ea0, .end = 0x00a01ea0 }, { .start = 0x00a02000, .end = 0x00a0201c }, { .start = 0x00a02024, .end = 0x00a02024 }, { .start = 0x00a02040, .end = 0x00a02048 }, { .start = 0x00a020c0, .end = 0x00a020e0 }, { .start = 0x00a02400, .end = 0x00a02404 }, { .start = 0x00a0240c, .end = 0x00a02414 }, { .start = 0x00a0241c, .end = 0x00a0243c }, { .start = 0x00a02448, .end = 0x00a024bc }, { .start = 0x00a024c4, .end = 0x00a024cc }, { .start = 0x00a02508, .end = 0x00a02508 }, { .start = 0x00a02510, .end = 0x00a02514 }, { .start = 0x00a0251c, .end = 0x00a0251c }, { .start = 0x00a0252c, .end = 0x00a0255c }, { .start = 0x00a02564, .end = 0x00a025a0 }, { .start = 0x00a025a8, .end = 0x00a025b4 }, { .start = 0x00a025c0, .end = 0x00a025c0 }, { .start = 0x00a025e8, .end = 0x00a025f4 }, { .start = 0x00a02c08, .end = 0x00a02c18 }, { .start = 0x00a02c2c, .end = 0x00a02c38 }, { .start = 0x00a02c68, .end = 0x00a02c78 }, { .start = 0x00a03000, .end = 0x00a03000 }, { .start = 0x00a03010, .end = 0x00a03014 }, { .start = 0x00a0301c, .end = 0x00a0302c }, { .start = 0x00a03034, .end = 0x00a03038 }, { .start = 0x00a03040, .end = 0x00a03044 }, { .start = 0x00a03060, .end = 0x00a03068 }, { .start = 0x00a03070, .end = 0x00a03070 }, { .start = 0x00a0307c, .end = 0x00a03084 }, { .start = 0x00a0308c, .end = 0x00a03090 }, { .start = 0x00a03098, .end = 0x00a03098 }, { .start = 0x00a030a0, .end = 0x00a030a0 }, { .start = 0x00a030a8, .end = 0x00a030b4 }, { .start = 0x00a030bc, .end = 0x00a030c0 }, { .start = 0x00a030c8, .end = 0x00a030f4 }, { .start = 0x00a03100, .end = 0x00a0312c }, { .start = 0x00a03c00, .end = 0x00a03c5c }, { .start = 0x00a04400, .end = 0x00a04454 }, { .start = 0x00a04460, .end = 0x00a04474 }, { .start = 0x00a044c0, .end = 0x00a044ec }, { .start = 0x00a04500, .end = 0x00a04504 }, { .start = 0x00a04510, .end = 0x00a04538 }, { .start = 0x00a04540, .end = 0x00a04548 }, { .start = 0x00a04560, .end = 0x00a04560 }, { .start = 0x00a04570, .end = 0x00a0457c }, { .start = 0x00a04590, .end = 0x00a04590 }, { .start = 0x00a04598, .end = 0x00a04598 }, { .start = 0x00a045c0, .end = 0x00a045f4 }, { .start = 0x00a05c18, .end = 0x00a05c1c }, { .start = 0x00a0c000, .end = 0x00a0c018 }, { .start = 0x00a0c020, .end = 0x00a0c028 }, { .start = 0x00a0c038, .end = 0x00a0c094 }, { .start = 0x00a0c0c0, .end = 0x00a0c104 }, { .start = 0x00a0c10c, .end = 0x00a0c118 }, { .start = 0x00a0c150, .end = 0x00a0c174 }, { .start = 0x00a0c17c, .end = 0x00a0c188 }, { .start = 0x00a0c190, .end = 0x00a0c198 }, { .start = 0x00a0c1a0, .end = 0x00a0c1a8 }, { .start = 0x00a0c1b0, .end = 0x00a0c1b8 }, }; static const struct iwl_prph_range iwl_prph_dump_addr_ax210[] = { { .start = 0x00d03c00, .end = 0x00d03c64 }, { .start = 0x00d05c18, .end = 0x00d05c1c }, { .start = 0x00d0c000, .end = 0x00d0c174 }, }; static void iwl_read_prph_block(struct iwl_trans *trans, u32 start, u32 len_bytes, __le32 *data) { u32 i; for (i = 0; i < len_bytes; i += 4) *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i)); } static void iwl_dump_prph(struct iwl_fw_runtime *fwrt, const struct iwl_prph_range *iwl_prph_dump_addr, u32 range_len, void *ptr) { struct iwl_fw_error_dump_prph *prph; struct iwl_trans *trans = fwrt->trans; struct iwl_fw_error_dump_data **data = (struct iwl_fw_error_dump_data **)ptr; u32 i; if (!data) return; IWL_DEBUG_INFO(trans, "WRT PRPH dump\n"); if (!iwl_trans_grab_nic_access(trans)) return; for (i = 0; i < range_len; i++) { /* The range includes both boundaries */ int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); (*data)->len = cpu_to_le32(sizeof(*prph) + num_bytes_in_chunk); prph = (void *)(*data)->data; prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start); iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start, /* our range is inclusive, hence + 4 */ iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4, (void *)prph->data); *data = iwl_fw_error_next_data(*data); } iwl_trans_release_nic_access(trans); } /* * alloc_sgtable - allocates scallerlist table in the given size, * fills it with pages and returns it * @size: the size (in bytes) of the table */ static struct scatterlist *alloc_sgtable(int size) { int alloc_size, nents, i; struct page *new_page; struct scatterlist *iter; struct scatterlist *table; nents = DIV_ROUND_UP(size, PAGE_SIZE); table = kcalloc(nents, sizeof(*table), GFP_KERNEL); if (!table) return NULL; sg_init_table(table, nents); iter = table; for_each_sg(table, iter, sg_nents(table), i) { new_page = alloc_page(GFP_KERNEL); if (!new_page) { /* release all previous allocated pages in the table */ iter = table; for_each_sg(table, iter, sg_nents(table), i) { new_page = sg_page(iter); if (new_page) __free_page(new_page); } kfree(table); return NULL; } alloc_size = min_t(int, size, PAGE_SIZE); size -= PAGE_SIZE; sg_set_page(iter, new_page, alloc_size, 0); } return table; } static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt, const struct iwl_prph_range *iwl_prph_dump_addr, u32 range_len, void *ptr) { u32 *prph_len = (u32 *)ptr; int i, num_bytes_in_chunk; if (!prph_len) return; for (i = 0; i < range_len; i++) { /* The range includes both boundaries */ num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4; *prph_len += sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_prph) + num_bytes_in_chunk; } } static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr, void (*handler)(struct iwl_fw_runtime *, const struct iwl_prph_range *, u32, void *)) { u32 range_len; if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_ax210); handler(fwrt, iwl_prph_dump_addr_ax210, range_len, ptr); } else if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000); handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr); } else { range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm); handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr); if (fwrt->trans->trans_cfg->mq_rx_supported) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000); handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr); } } } static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data, u32 len, u32 ofs, u32 type) { struct iwl_fw_error_dump_mem *dump_mem; if (!len) return; (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem)); dump_mem = (void *)(*dump_data)->data; dump_mem->type = cpu_to_le32(type); dump_mem->offset = cpu_to_le32(ofs); iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len); *dump_data = iwl_fw_error_next_data(*dump_data); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, ofs, dump_mem->data, len); IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type); } #define ADD_LEN(len, item_len, const_len) \ do {size_t item = item_len; len += (!!item) * const_len + item; } \ while (0) static int iwl_fw_rxf_len(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_shared_mem_cfg *mem_cfg) { size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fifo); u32 fifo_len = 0; int i; if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) return 0; /* Count RXF2 size */ ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len); /* Count RXF1 sizes */ if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC)) mem_cfg->num_lmacs = MAX_NUM_LMAC; for (i = 0; i < mem_cfg->num_lmacs; i++) ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len); return fifo_len; } static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_shared_mem_cfg *mem_cfg) { size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fifo); u32 fifo_len = 0; int i; if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) goto dump_internal_txf; /* Count TXF sizes */ if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC)) mem_cfg->num_lmacs = MAX_NUM_LMAC; for (i = 0; i < mem_cfg->num_lmacs; i++) { int j; for (j = 0; j < mem_cfg->num_txfifo_entries; j++) ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j], hdr_len); } dump_internal_txf: if (!(iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) && fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))) goto out; for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++) ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len); out: return fifo_len; } static void iwl_dump_paging(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **data) { int i; IWL_DEBUG_INFO(fwrt, "WRT paging dump\n"); for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) { struct iwl_fw_error_dump_paging *paging; struct page *pages = fwrt->fw_paging_db[i].fw_paging_block; dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); (*data)->len = cpu_to_le32(sizeof(*paging) + PAGING_BLOCK_SIZE); paging = (void *)(*data)->data; paging->index = cpu_to_le32(i); dma_sync_single_for_cpu(fwrt->trans->dev, addr, PAGING_BLOCK_SIZE, DMA_BIDIRECTIONAL); memcpy(paging->data, page_address(pages), PAGING_BLOCK_SIZE); dma_sync_single_for_device(fwrt->trans->dev, addr, PAGING_BLOCK_SIZE, DMA_BIDIRECTIONAL); (*data) = iwl_fw_error_next_data(*data); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, fwrt->fw_paging_db[i].fw_offs, paging->data, PAGING_BLOCK_SIZE); } } static struct iwl_fw_error_dump_file * iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, struct iwl_fw_dump_ptrs *fw_error_dump, struct iwl_fwrt_dump_data *data) { struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_info *dump_info; struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg; struct iwl_fw_error_dump_trigger_desc *dump_trig; u32 sram_len, sram_ofs; const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv; struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg; u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0; u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->dccm2_len; int i; /* SRAM - include stack CCM if driver knows the values for it */ if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) { const struct fw_img *img; if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX) return NULL; img = &fwrt->fw->img[fwrt->cur_fw_img]; sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; } else { sram_ofs = fwrt->trans->cfg->dccm_offset; sram_len = fwrt->trans->cfg->dccm_len; } /* reading RXF/TXF sizes */ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { fifo_len = iwl_fw_rxf_len(fwrt, mem_cfg); fifo_len += iwl_fw_txf_len(fwrt, mem_cfg); /* Make room for PRPH registers */ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH)) iwl_fw_prph_handler(fwrt, &prph_len, iwl_fw_get_prph_len); if (fwrt->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 && iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG)) radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; } file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) file_len += sizeof(*dump_data) + sizeof(*dump_info); if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg); if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) { size_t hdr_len = sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_mem); /* Dump SRAM only if no mem_tlvs */ if (!fwrt->fw->dbg.n_mem_tlv) ADD_LEN(file_len, sram_len, hdr_len); /* Make room for all mem types that exist */ ADD_LEN(file_len, smem_len, hdr_len); ADD_LEN(file_len, sram2_len, hdr_len); for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len); } /* Make room for fw's virtual image pages, if it exists */ if (iwl_fw_dbg_is_paging_enabled(fwrt)) file_len += fwrt->num_of_paging_blk * (sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_paging) + PAGING_BLOCK_SIZE); if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) { file_len += sizeof(*dump_data) + fwrt->trans->cfg->d3_debug_data_length * 2; } /* If we only want a monitor dump, reset the file length */ if (data->monitor_only) { file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 + sizeof(*dump_info) + sizeof(*dump_smem_cfg); } if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && data->desc) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + data->desc->len; dump_file = vzalloc(file_len); if (!dump_file) return NULL; fw_error_dump->fwrt_ptr = dump_file; dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); dump_data = (void *)dump_file->data; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_info)); dump_info = (void *)dump_data->data; dump_info->hw_type = cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev)); dump_info->hw_step = cpu_to_le32(fwrt->trans->hw_rev_step); memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, sizeof(dump_info->fw_human_readable)); strncpy(dump_info->dev_human_readable, fwrt->trans->name, sizeof(dump_info->dev_human_readable) - 1); #if defined(__linux__) strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, sizeof(dump_info->bus_human_readable) - 1); #elif defined(__FreeBSD__) /* XXX TODO */ strncpy(dump_info->bus_human_readable, "", sizeof(dump_info->bus_human_readable) - 1); #endif dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs; dump_info->lmac_err_id[0] = cpu_to_le32(fwrt->dump.lmac_err_id[0]); if (fwrt->smem_cfg.num_lmacs > 1) dump_info->lmac_err_id[1] = cpu_to_le32(fwrt->dump.lmac_err_id[1]); dump_info->umac_err_id = cpu_to_le32(fwrt->dump.umac_err_id); dump_data = iwl_fw_error_next_data(dump_data); } if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) { /* Dump shared memory configuration */ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg)); dump_smem_cfg = (void *)dump_data->data; dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs); dump_smem_cfg->num_txfifo_entries = cpu_to_le32(mem_cfg->num_txfifo_entries); for (i = 0; i < MAX_NUM_LMAC; i++) { int j; u32 *txf_size = mem_cfg->lmac[i].txfifo_size; for (j = 0; j < TX_FIFO_MAX_NUM; j++) dump_smem_cfg->lmac[i].txfifo_size[j] = cpu_to_le32(txf_size[j]); dump_smem_cfg->lmac[i].rxfifo1_size = cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size); } dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size); dump_smem_cfg->internal_txfifo_addr = cpu_to_le32(mem_cfg->internal_txfifo_addr); for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) { dump_smem_cfg->internal_txfifo_size[i] = cpu_to_le32(mem_cfg->internal_txfifo_size[i]); } dump_data = iwl_fw_error_next_data(dump_data); } /* We only dump the FIFOs if the FW is in error state */ if (fifo_len) { iwl_fw_dump_rxf(fwrt, &dump_data); iwl_fw_dump_txf(fwrt, &dump_data); } if (radio_len) iwl_read_radio_regs(fwrt, &dump_data); if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && data->desc) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_trig) + data->desc->len); dump_trig = (void *)dump_data->data; memcpy(dump_trig, &data->desc->trig_desc, sizeof(*dump_trig) + data->desc->len); dump_data = iwl_fw_error_next_data(dump_data); } /* In case we only want monitor dump, skip to dump trasport data */ if (data->monitor_only) goto out; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) { const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg.mem_tlv; if (!fwrt->fw->dbg.n_mem_tlv) iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs, IWL_FW_ERROR_DUMP_MEM_SRAM); for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) { u32 len = le32_to_cpu(fw_dbg_mem[i].len); u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); iwl_fw_dump_mem(fwrt, &dump_data, len, ofs, le32_to_cpu(fw_dbg_mem[i].data_type)); } iwl_fw_dump_mem(fwrt, &dump_data, smem_len, fwrt->trans->cfg->smem_offset, IWL_FW_ERROR_DUMP_MEM_SMEM); iwl_fw_dump_mem(fwrt, &dump_data, sram2_len, fwrt->trans->cfg->dccm2_offset, IWL_FW_ERROR_DUMP_MEM_SRAM); } if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) { u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr; size_t data_size = fwrt->trans->cfg->d3_debug_data_length; dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); dump_data->len = cpu_to_le32(data_size * 2); memcpy(dump_data->data, fwrt->dump.d3_debug_data, data_size); kfree(fwrt->dump.d3_debug_data); fwrt->dump.d3_debug_data = NULL; iwl_trans_read_mem_bytes(fwrt->trans, addr, dump_data->data + data_size, data_size); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, addr, dump_data->data + data_size, data_size); dump_data = iwl_fw_error_next_data(dump_data); } /* Dump fw's virtual image */ if (iwl_fw_dbg_is_paging_enabled(fwrt)) iwl_dump_paging(fwrt, &dump_data); if (prph_len) iwl_fw_prph_handler(fwrt, &dump_data, iwl_dump_prph); out: dump_file->file_len = cpu_to_le32(file_len); return dump_file; } /** * struct iwl_dump_ini_region_data - region data * @reg_tlv: region TLV * @dump_data: dump data */ struct iwl_dump_ini_region_data { struct iwl_ucode_tlv *reg_tlv; struct iwl_fwrt_dump_data *dump_data; }; static int iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 prph_val; u32 addr = le32_to_cpu(reg->addrs[idx]) + le32_to_cpu(reg->dev_addr.offset); int i; range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { prph_val = iwl_read_prph(fwrt->trans, addr + i); - if (prph_val == 0x5a5a5a5a) + if (iwl_trans_is_hw_error_value(prph_val)) return -EBUSY; *val++ = cpu_to_le32(prph_val); } return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 indirect_wr_addr = WMAL_INDRCT_RD_CMD1; u32 indirect_rd_addr = WMAL_MRSPF_1; u32 prph_val; u32 addr = le32_to_cpu(reg->addrs[idx]); u32 dphy_state; u32 dphy_addr; int i; range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) indirect_wr_addr = WMAL_INDRCT_CMD1; indirect_wr_addr += le32_to_cpu(reg->dev_addr.offset); indirect_rd_addr += le32_to_cpu(reg->dev_addr.offset); if (!iwl_trans_grab_nic_access(fwrt->trans)) return -EBUSY; dphy_addr = (reg->dev_addr.offset) ? WFPM_LMAC2_PS_CTL_RW : WFPM_LMAC1_PS_CTL_RW; dphy_state = iwl_read_umac_prph_no_grab(fwrt->trans, dphy_addr); for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { if (dphy_state == HBUS_TIMEOUT || (dphy_state & WFPM_PS_CTL_RW_PHYRF_PD_FSM_CURSTATE_MSK) != WFPM_PHYRF_STATE_ON) { *val++ = cpu_to_le32(WFPM_DPHY_OFF); continue; } iwl_write_prph_no_grab(fwrt->trans, indirect_wr_addr, WMAL_INDRCT_CMD(addr + i)); prph_val = iwl_read_prph_no_grab(fwrt->trans, indirect_rd_addr); *val++ = cpu_to_le32(prph_val); } iwl_trans_release_nic_access(fwrt->trans); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 addr = le32_to_cpu(reg->addrs[idx]) + le32_to_cpu(reg->dev_addr.offset); int i; range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) *val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans, addr + i)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_trans *trans = fwrt->trans; struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 addr = le32_to_cpu(reg->addrs[idx]) + le32_to_cpu(reg->dev_addr.offset); int i; /* we shouldn't get here if the trans doesn't have read_config32 */ if (WARN_ON_ONCE(!trans->ops->read_config32)) return -EOPNOTSUPP; range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { int ret; u32 tmp; ret = trans->ops->read_config32(trans, addr + i, &tmp); if (ret < 0) return ret; *val++ = cpu_to_le32(tmp); } return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; u32 addr = le32_to_cpu(reg->addrs[idx]) + le32_to_cpu(reg->dev_addr.offset); range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(reg->dev_addr.size)); if (reg->sub_type == IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM && fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf) fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx, range->data, le32_to_cpu(reg->dev_addr.size)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, void *range_ptr, u32 range_len, int idx) { struct page *page = fwrt->fw_paging_db[idx].fw_paging_block; struct iwl_fw_ini_error_dump_range *range = range_ptr; dma_addr_t addr = fwrt->fw_paging_db[idx].fw_paging_phys; u32 page_size = fwrt->fw_paging_db[idx].fw_paging_size; range->page_num = cpu_to_le32(idx); range->range_data_size = cpu_to_le32(page_size); dma_sync_single_for_cpu(fwrt->trans->dev, addr, page_size, DMA_BIDIRECTIONAL); memcpy(range->data, page_address(page), page_size); dma_sync_single_for_device(fwrt->trans->dev, addr, page_size, DMA_BIDIRECTIONAL); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_error_dump_range *range; u32 page_size; /* all paged index start from 1 to skip CSS section */ idx++; if (!fwrt->trans->trans_cfg->gen2) return _iwl_dump_ini_paging_iter(fwrt, range_ptr, range_len, idx); range = range_ptr; page_size = fwrt->trans->init_dram.paging[idx].size; range->page_num = cpu_to_le32(idx); range->range_data_size = cpu_to_le32(page_size); memcpy(range->data, fwrt->trans->init_dram.paging[idx].block, page_size); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; struct iwl_dram_data *frag; u32 alloc_id = le32_to_cpu(reg->dram_alloc_id); frag = &fwrt->trans->dbg.fw_mon_ini[alloc_id].frags[idx]; range->dram_base_addr = cpu_to_le64(frag->physical); range->range_data_size = cpu_to_le32(frag->size); memcpy(range->data, frag->block, frag->size); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_mon_smem_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; u32 addr = le32_to_cpu(reg->internal_buffer.base_addr); range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->internal_buffer.size; iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(reg->internal_buffer.size)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data; struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; int txf_num = cfg->num_txfifo_entries; int int_txf_num = ARRAY_SIZE(cfg->internal_txfifo_size); u32 lmac_bitmap = le32_to_cpu(reg->fifos.fid[0]); if (!idx) { if (le32_to_cpu(reg->fifos.offset) && cfg->num_lmacs == 1) { IWL_ERR(fwrt, "WRT: Invalid lmac offset 0x%x\n", le32_to_cpu(reg->fifos.offset)); return false; } iter->internal_txf = 0; iter->fifo_size = 0; iter->fifo = -1; if (le32_to_cpu(reg->fifos.offset)) iter->lmac = 1; else iter->lmac = 0; } if (!iter->internal_txf) { for (iter->fifo++; iter->fifo < txf_num; iter->fifo++) { iter->fifo_size = cfg->lmac[iter->lmac].txfifo_size[iter->fifo]; if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo))) return true; } iter->fifo--; } iter->internal_txf = 1; if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) return false; for (iter->fifo++; iter->fifo < int_txf_num + txf_num; iter->fifo++) { iter->fifo_size = cfg->internal_txfifo_size[iter->fifo - txf_num]; if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo))) return true; } return false; } static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data; struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data; u32 offs = le32_to_cpu(reg->fifos.offset), addr; u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); u32 registers_size = registers_num * sizeof(*reg_dump); __le32 *data; int i; if (!iwl_ini_txf_iter(fwrt, reg_data, idx)) return -EIO; if (!iwl_trans_grab_nic_access(fwrt->trans)) return -EBUSY; range->fifo_hdr.fifo_num = cpu_to_le32(iter->fifo); range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num); range->range_data_size = cpu_to_le32(iter->fifo_size + registers_size); iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo); /* * read txf registers. for each register, write to the dump the * register address and its value */ for (i = 0; i < registers_num; i++) { addr = le32_to_cpu(reg->addrs[i]) + offs; reg_dump->addr = cpu_to_le32(addr); reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); reg_dump++; } if (reg->fifos.hdr_only) { range->range_data_size = cpu_to_le32(registers_size); goto out; } /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ iwl_write_prph_no_grab(fwrt->trans, TXF_READ_MODIFY_ADDR + offs, TXF_WR_PTR + offs); /* Dummy-read to advance the read pointer to the head */ iwl_read_prph_no_grab(fwrt->trans, TXF_READ_MODIFY_DATA + offs); /* Read FIFO */ addr = TXF_READ_MODIFY_DATA + offs; data = (void *)reg_dump; for (i = 0; i < iter->fifo_size; i += sizeof(*data)) *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf) fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx, reg_dump, iter->fifo_size); out: iwl_trans_release_nic_access(fwrt->trans); return sizeof(*range) + le32_to_cpu(range->range_data_size); } struct iwl_ini_rxf_data { u32 fifo_num; u32 size; u32 offset; }; static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, struct iwl_ini_rxf_data *data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 fid1 = le32_to_cpu(reg->fifos.fid[0]); u32 fid2 = le32_to_cpu(reg->fifos.fid[1]); u8 fifo_idx; if (!data) return; + memset(data, 0, sizeof(*data)); + /* make sure only one bit is set in only one fid */ if (WARN_ONCE(hweight_long(fid1) + hweight_long(fid2) != 1, "fid1=%x, fid2=%x\n", fid1, fid2)) return; - memset(data, 0, sizeof(*data)); - if (fid1) { fifo_idx = ffs(fid1) - 1; if (WARN_ONCE(fifo_idx >= MAX_NUM_LMAC, "fifo_idx=%d\n", fifo_idx)) return; data->size = fwrt->smem_cfg.lmac[fifo_idx].rxfifo1_size; data->fifo_num = fifo_idx; } else { u8 max_idx; fifo_idx = ffs(fid2) - 1; if (iwl_fw_lookup_notif_ver(fwrt->fw, SYSTEM_GROUP, SHARED_MEM_CFG_CMD, 0) <= 3) max_idx = 0; else max_idx = 1; if (WARN_ONCE(fifo_idx > max_idx, "invalid umac fifo idx %d", fifo_idx)) return; /* use bit 31 to distinguish between umac and lmac rxf while * parsing the dump */ data->fifo_num = fifo_idx | IWL_RXF_UMAC_BIT; switch (fifo_idx) { case 0: data->size = fwrt->smem_cfg.rxfifo2_size; data->offset = iwl_umac_prph(fwrt->trans, RXF_DIFF_FROM_PREV); break; case 1: data->size = fwrt->smem_cfg.rxfifo2_control_size; data->offset = iwl_umac_prph(fwrt->trans, RXF2C_DIFF_FROM_PREV); break; } } } static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; struct iwl_ini_rxf_data rxf_data; struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data; u32 offs = le32_to_cpu(reg->fifos.offset), addr; u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); u32 registers_size = registers_num * sizeof(*reg_dump); __le32 *data; int i; #if defined(__FreeBSD__) rxf_data.size = 0; #endif iwl_ini_get_rxf_data(fwrt, reg_data, &rxf_data); if (!rxf_data.size) return -EIO; if (!iwl_trans_grab_nic_access(fwrt->trans)) return -EBUSY; range->fifo_hdr.fifo_num = cpu_to_le32(rxf_data.fifo_num); range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num); range->range_data_size = cpu_to_le32(rxf_data.size + registers_size); /* * read rxf registers. for each register, write to the dump the * register address and its value */ for (i = 0; i < registers_num; i++) { addr = le32_to_cpu(reg->addrs[i]) + offs; reg_dump->addr = cpu_to_le32(addr); reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); reg_dump++; } if (reg->fifos.hdr_only) { range->range_data_size = cpu_to_le32(registers_size); goto out; } offs = rxf_data.offset; /* Lock fence */ iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1); /* Set fence pointer to the same place like WR pointer */ iwl_write_prph_no_grab(fwrt->trans, RXF_LD_WR2FENCE + offs, 0x1); /* Set fence offset */ iwl_write_prph_no_grab(fwrt->trans, RXF_LD_FENCE_OFFSET_ADDR + offs, 0x0); /* Read FIFO */ addr = RXF_FIFO_RD_FENCE_INC + offs; data = (void *)reg_dump; for (i = 0; i < rxf_data.size; i += sizeof(*data)) *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); out: iwl_trans_release_nic_access(fwrt->trans); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_region_err_table *err_table = ®->err_table; struct iwl_fw_ini_error_dump_range *range = range_ptr; u32 addr = le32_to_cpu(err_table->base_addr) + le32_to_cpu(err_table->offset); range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = err_table->size; iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(err_table->size)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_region_special_device_memory *special_mem = ®->special_mem; struct iwl_fw_ini_error_dump_range *range = range_ptr; u32 addr = le32_to_cpu(special_mem->base_addr) + le32_to_cpu(special_mem->offset); range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = special_mem->size; iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(special_mem->size)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 prph_data; int i; if (!iwl_trans_grab_nic_access(fwrt->trans)) return -EBUSY; range->range_data_size = reg->dev_addr.size; for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) { prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ? DBGI_SRAM_TARGET_ACCESS_RDATA_MSB : DBGI_SRAM_TARGET_ACCESS_RDATA_LSB); - if (prph_data == 0x5a5a5a5a) { + if (iwl_trans_is_hw_error_value(prph_data)) { iwl_trans_release_nic_access(fwrt->trans); return -EBUSY; } *val++ = cpu_to_le32(prph_data); } iwl_trans_release_nic_access(fwrt->trans); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_error_dump_range *range = range_ptr; struct iwl_rx_packet *pkt = reg_data->dump_data->fw_pkt; u32 pkt_len; if (!pkt) return -EIO; pkt_len = iwl_rx_packet_payload_len(pkt); memcpy(&range->fw_pkt_hdr, &pkt->hdr, sizeof(range->fw_pkt_hdr)); range->range_data_size = cpu_to_le32(pkt_len); memcpy(range->data, pkt->data, pkt_len); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_imr_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { /* read the IMR memory and DMA it to SRAM */ struct iwl_fw_ini_error_dump_range *range = range_ptr; u64 imr_curr_addr = fwrt->trans->dbg.imr_data.imr_curr_addr; u32 imr_rem_bytes = fwrt->trans->dbg.imr_data.imr2sram_remainbyte; u32 sram_addr = fwrt->trans->dbg.imr_data.sram_addr; u32 sram_size = fwrt->trans->dbg.imr_data.sram_size; u32 size_to_dump = (imr_rem_bytes > sram_size) ? sram_size : imr_rem_bytes; range->range_data_size = cpu_to_le32(size_to_dump); if (iwl_trans_write_imr_mem(fwrt->trans, sram_addr, imr_curr_addr, size_to_dump)) { IWL_ERR(fwrt, "WRT_DEBUG: IMR Memory transfer failed\n"); return -1; } fwrt->trans->dbg.imr_data.imr_curr_addr = imr_curr_addr + size_to_dump; fwrt->trans->dbg.imr_data.imr2sram_remainbyte -= size_to_dump; iwl_trans_read_mem_bytes(fwrt->trans, sram_addr, range->data, size_to_dump); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static void * iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_error_dump *dump = data; dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); return dump->data; } /** * mask_apply_and_normalize - applies mask on val and normalize the result * * The normalization is based on the first set bit in the mask * * @val: value * @mask: mask to apply and to normalize with */ static u32 mask_apply_and_normalize(u32 val, u32 mask) { return (val & mask) >> (ffs(mask) - 1); } static __le32 iwl_get_mon_reg(struct iwl_fw_runtime *fwrt, u32 alloc_id, const struct iwl_fw_mon_reg *reg_info) { u32 val, offs; /* The header addresses of DBGCi is calculate as follows: * DBGC1 address + (0x100 * i) */ offs = (alloc_id - IWL_FW_INI_ALLOCATION_ID_DBGC1) * 0x100; if (!reg_info || !reg_info->addr || !reg_info->mask) return 0; val = iwl_read_prph_no_grab(fwrt->trans, reg_info->addr + offs); return cpu_to_le32(mask_apply_and_normalize(val, reg_info->mask)); } static void * -iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, - struct iwl_dump_ini_region_data *reg_data, +iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, u32 alloc_id, struct iwl_fw_ini_monitor_dump *data, const struct iwl_fw_mon_regs *addrs) { - struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; - u32 alloc_id = le32_to_cpu(reg->dram_alloc_id); - if (!iwl_trans_grab_nic_access(fwrt->trans)) { IWL_ERR(fwrt, "Failed to get monitor header\n"); return NULL; } data->write_ptr = iwl_get_mon_reg(fwrt, alloc_id, &addrs->write_ptr); if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { u32 wrt_ptr = le32_to_cpu(data->write_ptr); data->write_ptr = cpu_to_le32(wrt_ptr >> 2); } data->cycle_cnt = iwl_get_mon_reg(fwrt, alloc_id, &addrs->cycle_cnt); data->cur_frag = iwl_get_mon_reg(fwrt, alloc_id, &addrs->cur_frag); iwl_trans_release_nic_access(fwrt->trans); data->header.version = cpu_to_le32(IWL_INI_DUMP_VER); return data->data; } static void * iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + u32 alloc_id = le32_to_cpu(reg->dram_alloc_id); - return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, + return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump, &fwrt->trans->cfg->mon_dram_regs); } static void * iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id); - return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, + return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump, &fwrt->trans->cfg->mon_smem_regs); } static void * iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; - return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, + return iwl_dump_ini_mon_fill_header(fwrt, + /* no offset calculation later */ + IWL_FW_INI_ALLOCATION_ID_DBGC1, + mon_dump, &fwrt->trans->cfg->mon_dbgi_regs); } static void * iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_err_table_dump *dump = data; dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); dump->version = reg->err_table.version; return dump->data; } static void * iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_special_device_memory *dump = data; dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); dump->type = reg->special_mem.type; dump->version = reg->special_mem.version; return dump->data; } static void * iwl_dump_ini_imr_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_error_dump *dump = data; dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); return dump->data; } static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; return iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); } static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { if (fwrt->trans->trans_cfg->gen2) { if (fwrt->trans->init_dram.paging_cnt) return fwrt->trans->init_dram.paging_cnt - 1; else return 0; } return fwrt->num_of_paging_blk; } static u32 iwl_dump_ini_mon_dram_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_mon *fw_mon; u32 ranges = 0, alloc_id = le32_to_cpu(reg->dram_alloc_id); int i; fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id]; for (i = 0; i < fw_mon->num_frags; i++) { if (!fw_mon->frags[i].size) break; ranges++; } return ranges; } static u32 iwl_dump_ini_txf_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { u32 num_of_fifos = 0; while (iwl_ini_txf_iter(fwrt, reg_data, num_of_fifos)) num_of_fifos++; return num_of_fifos; } static u32 iwl_dump_ini_single_range(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { return 1; } static u32 iwl_dump_ini_imr_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { /* range is total number of pages need to copied from *IMR memory to SRAM and later from SRAM to DRAM */ u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable; u32 imr_size = fwrt->trans->dbg.imr_data.imr_size; u32 sram_size = fwrt->trans->dbg.imr_data.sram_size; if (imr_enable == 0 || imr_size == 0 || sram_size == 0) { IWL_DEBUG_INFO(fwrt, "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n", imr_enable, imr_size, sram_size); return 0; } return((imr_size % sram_size) ? (imr_size / sram_size + 1) : (imr_size / sram_size)); } static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 size = le32_to_cpu(reg->dev_addr.size); u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data); if (!size || !ranges) return 0; return sizeof(struct iwl_fw_ini_error_dump) + ranges * (size + sizeof(struct iwl_fw_ini_error_dump_range)); } static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { int i; u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range); u32 size = sizeof(struct iwl_fw_ini_error_dump); /* start from 1 to skip CSS section */ for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg_data); i++) { size += range_header_len; if (fwrt->trans->trans_cfg->gen2) size += fwrt->trans->init_dram.paging[i].size; else size += fwrt->fw_paging_db[i].fw_paging_size; } return size; } static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_mon *fw_mon; u32 size = 0, alloc_id = le32_to_cpu(reg->dram_alloc_id); int i; fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id]; for (i = 0; i < fw_mon->num_frags; i++) { struct iwl_dram_data *frag = &fw_mon->frags[i]; if (!frag->size) break; size += sizeof(struct iwl_fw_ini_error_dump_range) + frag->size; } if (size) size += sizeof(struct iwl_fw_ini_monitor_dump); return size; } static u32 iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 size; size = le32_to_cpu(reg->internal_buffer.size); if (!size) return 0; size += sizeof(struct iwl_fw_ini_monitor_dump) + sizeof(struct iwl_fw_ini_error_dump_range); return size; } static u32 iwl_dump_ini_mon_dbgi_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 size = le32_to_cpu(reg->dev_addr.size); u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data); if (!size || !ranges) return 0; return sizeof(struct iwl_fw_ini_monitor_dump) + ranges * (size + sizeof(struct iwl_fw_ini_error_dump_range)); } static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data; u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); u32 size = 0; u32 fifo_hdr = sizeof(struct iwl_fw_ini_error_dump_range) + registers_num * sizeof(struct iwl_fw_ini_error_dump_register); while (iwl_ini_txf_iter(fwrt, reg_data, size)) { size += fifo_hdr; if (!reg->fifos.hdr_only) size += iter->fifo_size; } if (!size) return 0; return size + sizeof(struct iwl_fw_ini_error_dump); } static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_ini_rxf_data rx_data; u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); u32 size = sizeof(struct iwl_fw_ini_error_dump) + sizeof(struct iwl_fw_ini_error_dump_range) + registers_num * sizeof(struct iwl_fw_ini_error_dump_register); if (reg->fifos.hdr_only) return size; #if defined(__FreeBSD__) rx_data.size = 0; #endif iwl_ini_get_rxf_data(fwrt, reg_data, &rx_data); size += rx_data.size; return size; } static u32 iwl_dump_ini_err_table_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 size = le32_to_cpu(reg->err_table.size); if (size) size += sizeof(struct iwl_fw_ini_err_table_dump) + sizeof(struct iwl_fw_ini_error_dump_range); return size; } static u32 iwl_dump_ini_special_mem_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 size = le32_to_cpu(reg->special_mem.size); if (size) size += sizeof(struct iwl_fw_ini_special_device_memory) + sizeof(struct iwl_fw_ini_error_dump_range); return size; } static u32 iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { u32 size = 0; if (!reg_data->dump_data->fw_pkt) return 0; size += iwl_rx_packet_payload_len(reg_data->dump_data->fw_pkt); if (size) size += sizeof(struct iwl_fw_ini_error_dump) + sizeof(struct iwl_fw_ini_error_dump_range); return size; } static u32 iwl_dump_ini_imr_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { - u32 size = 0; u32 ranges = 0; u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable; u32 imr_size = fwrt->trans->dbg.imr_data.imr_size; u32 sram_size = fwrt->trans->dbg.imr_data.sram_size; if (imr_enable == 0 || imr_size == 0 || sram_size == 0) { IWL_DEBUG_INFO(fwrt, "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n", imr_enable, imr_size, sram_size); - return size; + return 0; } - size = imr_size; ranges = iwl_dump_ini_imr_ranges(fwrt, reg_data); - if (!size && !ranges) { - IWL_ERR(fwrt, "WRT: imr_size :=%d, ranges :=%d\n", size, ranges); + if (!ranges) { + IWL_ERR(fwrt, "WRT: ranges :=%d\n", ranges); return 0; } - size += sizeof(struct iwl_fw_ini_error_dump) + + imr_size += sizeof(struct iwl_fw_ini_error_dump) + ranges * sizeof(struct iwl_fw_ini_error_dump_range); - return size; + return imr_size; } /** * struct iwl_dump_ini_mem_ops - ini memory dump operations * @get_num_of_ranges: returns the number of memory ranges in the region. * @get_size: returns the total size of the region. * @fill_mem_hdr: fills region type specific headers and returns pointer to * the first range or NULL if failed to fill headers. * @fill_range: copies a given memory range into the dump. * Returns the size of the range or negative error value otherwise. */ struct iwl_dump_ini_mem_ops { u32 (*get_num_of_ranges)(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data); u32 (*get_size)(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data); void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len); int (*fill_range)(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range, u32 range_len, int idx); }; /** * iwl_dump_ini_mem * * Creates a dump tlv and copy a memory region into it. * Returns the size of the current dump tlv or 0 if failed * * @fwrt: fw runtime struct * @list: list to add the dump tlv to * @reg_data: memory region * @ops: memory dump operations */ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list, struct iwl_dump_ini_region_data *reg_data, const struct iwl_dump_ini_mem_ops *ops) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_dump_entry *entry; struct iwl_fw_ini_error_dump_data *tlv; struct iwl_fw_ini_error_dump_header *header; u32 type = reg->type; u32 id = le32_get_bits(reg->id, IWL_FW_INI_REGION_ID_MASK); u32 num_of_ranges, i, size; u8 *range; u32 free_size; u64 header_size; u32 dump_policy = IWL_FW_INI_DUMP_VERBOSE; IWL_DEBUG_FW(fwrt, "WRT: Collecting region: dump type=%d, id=%d, type=%d\n", dump_policy, id, type); if (le32_to_cpu(reg->hdr.version) >= 2) { u32 dp = le32_get_bits(reg->id, IWL_FW_INI_REGION_DUMP_POLICY_MASK); if (dump_policy == IWL_FW_INI_DUMP_VERBOSE && !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT)) { IWL_DEBUG_FW(fwrt, "WRT: no dump - type %d and policy mismatch=%d\n", dump_policy, dp); return 0; } else if (dump_policy == IWL_FW_INI_DUMP_MEDIUM && !(dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB)) { IWL_DEBUG_FW(fwrt, "WRT: no dump - type %d and policy mismatch=%d\n", dump_policy, dp); return 0; } else if (dump_policy == IWL_FW_INI_DUMP_BRIEF && !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB)) { IWL_DEBUG_FW(fwrt, "WRT: no dump - type %d and policy mismatch=%d\n", dump_policy, dp); return 0; } } if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr || !ops->fill_range) { IWL_DEBUG_FW(fwrt, "WRT: no ops for collecting data\n"); return 0; } size = ops->get_size(fwrt, reg_data); if (size < sizeof(*header)) { IWL_DEBUG_FW(fwrt, "WRT: size didn't include space for header\n"); return 0; } entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + size); if (!entry) return 0; entry->size = sizeof(*tlv) + size; tlv = (void *)entry->data; tlv->type = reg->type; tlv->sub_type = reg->sub_type; tlv->sub_type_ver = reg->sub_type_ver; tlv->reserved = reg->reserved; tlv->len = cpu_to_le32(size); num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data); header = (void *)tlv->data; header->region_id = cpu_to_le32(id); header->num_of_ranges = cpu_to_le32(num_of_ranges); header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME); memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME); free_size = size; range = ops->fill_mem_hdr(fwrt, reg_data, header, free_size); if (!range) { IWL_ERR(fwrt, "WRT: Failed to fill region header: id=%d, type=%d\n", id, type); goto out_err; } header_size = range - (u8 *)header; if (WARN(header_size > free_size, #if defined(__linux__) "header size %llu > free_size %d", header_size, free_size)) { #elif defined(__FreeBSD__) "header size %ju > free_size %d", (uintmax_t)header_size, free_size)) { #endif IWL_ERR(fwrt, "WRT: fill_mem_hdr used more than given free_size\n"); goto out_err; } free_size -= header_size; for (i = 0; i < num_of_ranges; i++) { int range_size = ops->fill_range(fwrt, reg_data, range, free_size, i); if (range_size < 0) { IWL_ERR(fwrt, "WRT: Failed to dump region: id=%d, type=%d\n", id, type); goto out_err; } if (WARN(range_size > free_size, "range_size %d > free_size %d", range_size, free_size)) { IWL_ERR(fwrt, "WRT: fill_raged used more than given free_size\n"); goto out_err; } free_size -= range_size; range = range + range_size; } list_add_tail(&entry->list, list); return entry->size; out_err: vfree(entry); return 0; } static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger_tlv *trigger, struct list_head *list) { struct iwl_fw_ini_dump_entry *entry; struct iwl_fw_error_dump_data *tlv; struct iwl_fw_ini_dump_info *dump; struct iwl_dbg_tlv_node *node; struct iwl_fw_ini_dump_cfg_name *cfg_name; u32 size = sizeof(*tlv) + sizeof(*dump); u32 num_of_cfg_names = 0; u32 hw_type; list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) { size += sizeof(*cfg_name); num_of_cfg_names++; } entry = vzalloc(sizeof(*entry) + size); if (!entry) return 0; entry->size = size; tlv = (void *)entry->data; tlv->type = cpu_to_le32(IWL_INI_DUMP_INFO_TYPE); tlv->len = cpu_to_le32(size - sizeof(*tlv)); dump = (void *)tlv->data; dump->version = cpu_to_le32(IWL_INI_DUMP_VER); dump->time_point = trigger->time_point; dump->trigger_reason = trigger->trigger_reason; dump->external_cfg_state = cpu_to_le32(fwrt->trans->dbg.external_ini_cfg); dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type); dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype); dump->hw_step = cpu_to_le32(fwrt->trans->hw_rev_step); /* * Several HWs all have type == 0x42, so we'll override this value * according to the detected HW */ hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev); if (hw_type == IWL_AX210_HW_TYPE) { u32 prph_val = iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR); u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT); u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT); u32 masked_bits = is_jacket | (is_cdb << 1); /* * The HW type depends on certain bits in this case, so add * these bits to the HW type. We won't have collisions since we * add these bits after the highest possible bit in the mask. */ hw_type |= masked_bits << IWL_AX210_HW_TYPE_ADDITION_SHIFT; } dump->hw_type = cpu_to_le32(hw_type); dump->rf_id_flavor = cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt->trans->hw_rf_id)); dump->rf_id_dash = cpu_to_le32(CSR_HW_RFID_DASH(fwrt->trans->hw_rf_id)); dump->rf_id_step = cpu_to_le32(CSR_HW_RFID_STEP(fwrt->trans->hw_rf_id)); dump->rf_id_type = cpu_to_le32(CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)); dump->lmac_major = cpu_to_le32(fwrt->dump.fw_ver.lmac_major); dump->lmac_minor = cpu_to_le32(fwrt->dump.fw_ver.lmac_minor); dump->umac_major = cpu_to_le32(fwrt->dump.fw_ver.umac_major); dump->umac_minor = cpu_to_le32(fwrt->dump.fw_ver.umac_minor); dump->fw_mon_mode = cpu_to_le32(fwrt->trans->dbg.ini_dest); dump->regions_mask = trigger->regions_mask & ~cpu_to_le64(fwrt->trans->dbg.unsupported_region_msk); dump->build_tag_len = cpu_to_le32(sizeof(dump->build_tag)); memcpy(dump->build_tag, fwrt->fw->human_readable, sizeof(dump->build_tag)); cfg_name = dump->cfg_names; dump->num_of_cfg_names = cpu_to_le32(num_of_cfg_names); list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) { struct iwl_fw_ini_debug_info_tlv *debug_info = (void *)node->tlv.data; cfg_name->image_type = debug_info->image_type; cfg_name->cfg_name_len = cpu_to_le32(IWL_FW_INI_MAX_CFG_NAME); memcpy(cfg_name->cfg_name, debug_info->debug_cfg_name, sizeof(cfg_name->cfg_name)); cfg_name++; } /* add dump info TLV to the beginning of the list since it needs to be * the first TLV in the dump */ list_add(&entry->list, list); return entry->size; } +static u32 iwl_dump_ini_file_name_info(struct iwl_fw_runtime *fwrt, + struct list_head *list) +{ + struct iwl_fw_ini_dump_entry *entry; + struct iwl_dump_file_name_info *tlv; + u32 len = strnlen(fwrt->trans->dbg.dump_file_name_ext, + IWL_FW_INI_MAX_NAME); + + if (!fwrt->trans->dbg.dump_file_name_ext_valid) + return 0; + + entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + len); + if (!entry) + return 0; + + entry->size = sizeof(*tlv) + len; + + tlv = (void *)entry->data; + tlv->type = cpu_to_le32(IWL_INI_DUMP_NAME_TYPE); + tlv->len = cpu_to_le32(len); + memcpy(tlv->data, fwrt->trans->dbg.dump_file_name_ext, len); + + /* add the dump file name extension tlv to the list */ + list_add_tail(&entry->list, list); + + fwrt->trans->dbg.dump_file_name_ext_valid = false; + + return entry->size; +} + static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = { [IWL_FW_INI_REGION_INVALID] = {}, [IWL_FW_INI_REGION_INTERNAL_BUFFER] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_mon_smem_get_size, .fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header, .fill_range = iwl_dump_ini_mon_smem_iter, }, [IWL_FW_INI_REGION_DRAM_BUFFER] = { .get_num_of_ranges = iwl_dump_ini_mon_dram_ranges, .get_size = iwl_dump_ini_mon_dram_get_size, .fill_mem_hdr = iwl_dump_ini_mon_dram_fill_header, .fill_range = iwl_dump_ini_mon_dram_iter, }, [IWL_FW_INI_REGION_TXF] = { .get_num_of_ranges = iwl_dump_ini_txf_ranges, .get_size = iwl_dump_ini_txf_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_txf_iter, }, [IWL_FW_INI_REGION_RXF] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_rxf_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_rxf_iter, }, [IWL_FW_INI_REGION_LMAC_ERROR_TABLE] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_err_table_get_size, .fill_mem_hdr = iwl_dump_ini_err_table_fill_header, .fill_range = iwl_dump_ini_err_table_iter, }, [IWL_FW_INI_REGION_UMAC_ERROR_TABLE] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_err_table_get_size, .fill_mem_hdr = iwl_dump_ini_err_table_fill_header, .fill_range = iwl_dump_ini_err_table_iter, }, [IWL_FW_INI_REGION_RSP_OR_NOTIF] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_fw_pkt_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_fw_pkt_iter, }, [IWL_FW_INI_REGION_DEVICE_MEMORY] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_dev_mem_iter, }, [IWL_FW_INI_REGION_PERIPHERY_MAC] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_prph_mac_iter, }, [IWL_FW_INI_REGION_PERIPHERY_PHY] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_prph_phy_iter, }, [IWL_FW_INI_REGION_PERIPHERY_AUX] = {}, [IWL_FW_INI_REGION_PAGING] = { .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .get_num_of_ranges = iwl_dump_ini_paging_ranges, .get_size = iwl_dump_ini_paging_get_size, .fill_range = iwl_dump_ini_paging_iter, }, [IWL_FW_INI_REGION_CSR] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_csr_iter, }, [IWL_FW_INI_REGION_DRAM_IMR] = { .get_num_of_ranges = iwl_dump_ini_imr_ranges, .get_size = iwl_dump_ini_imr_get_size, .fill_mem_hdr = iwl_dump_ini_imr_fill_header, .fill_range = iwl_dump_ini_imr_iter, }, [IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_config_iter, }, [IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_special_mem_get_size, .fill_mem_hdr = iwl_dump_ini_special_mem_fill_header, .fill_range = iwl_dump_ini_special_mem_iter, }, [IWL_FW_INI_REGION_DBGI_SRAM] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mon_dbgi_get_size, .fill_mem_hdr = iwl_dump_ini_mon_dbgi_fill_header, .fill_range = iwl_dump_ini_dbgi_sram_iter, }, }; static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data, struct list_head *list) { struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig; enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trigger->time_point); struct iwl_dump_ini_region_data reg_data = { .dump_data = dump_data, }; struct iwl_dump_ini_region_data imr_reg_data = { .dump_data = dump_data, }; int i; u32 size = 0; u64 regions_mask = le64_to_cpu(trigger->regions_mask) & ~(fwrt->trans->dbg.unsupported_region_msk); BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask)); BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) < ARRAY_SIZE(fwrt->trans->dbg.active_regions)); for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions); i++) { u32 reg_type; struct iwl_fw_ini_region_tlv *reg; if (!(BIT_ULL(i) & regions_mask)) continue; reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i]; if (!reg_data.reg_tlv) { IWL_WARN(fwrt, "WRT: Unassigned region id %d, skipping\n", i); continue; } reg = (void *)reg_data.reg_tlv->data; reg_type = reg->type; if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops)) continue; if (reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY && tp_id != IWL_FW_INI_TIME_POINT_FW_ASSERT) { IWL_WARN(fwrt, "WRT: trying to collect phy prph at time point: %d, skipping\n", tp_id); continue; } /* * DRAM_IMR can be collected only for FW/HW error timepoint * when fw is not alive. In addition, it must be collected * lastly as it overwrites SRAM that can possibly contain * debug data which also need to be collected. */ if (reg_type == IWL_FW_INI_REGION_DRAM_IMR) { if (tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT || tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR) imr_reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i]; else IWL_INFO(fwrt, "WRT: trying to collect DRAM_IMR at time point: %d, skipping\n", tp_id); /* continue to next region */ continue; } size += iwl_dump_ini_mem(fwrt, list, ®_data, &iwl_dump_ini_region_ops[reg_type]); } /* collect DRAM_IMR region in the last */ if (imr_reg_data.reg_tlv) size += iwl_dump_ini_mem(fwrt, list, ®_data, &iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]); - if (size) + if (size) { + size += iwl_dump_ini_file_name_info(fwrt, list); size += iwl_dump_ini_info(fwrt, trigger, list); + } return size; } static bool iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger_tlv *trig) { enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point); u32 usec = le32_to_cpu(trig->ignore_consec); if (!iwl_trans_dbg_ini_valid(fwrt->trans) || tp_id == IWL_FW_INI_TIME_POINT_INVALID || tp_id >= IWL_FW_INI_TIME_POINT_NUM || iwl_fw_dbg_no_trig_window(fwrt, tp_id, usec)) return false; return true; } static u32 iwl_dump_ini_file_gen(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data, struct list_head *list) { struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig; struct iwl_fw_ini_dump_entry *entry; struct iwl_fw_ini_dump_file_hdr *hdr; u32 size; if (!trigger || !iwl_fw_ini_trigger_on(fwrt, trigger) || !le64_to_cpu(trigger->regions_mask)) return 0; entry = vzalloc(sizeof(*entry) + sizeof(*hdr)); if (!entry) return 0; entry->size = sizeof(*hdr); size = iwl_dump_ini_trigger(fwrt, dump_data, list); if (!size) { vfree(entry); return 0; } hdr = (void *)entry->data; hdr->barker = cpu_to_le32(IWL_FW_INI_ERROR_DUMP_BARKER); hdr->file_len = cpu_to_le32(size + entry->size); list_add(&entry->list, list); return le32_to_cpu(hdr->file_len); } static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dump_desc *desc) { if (desc && desc != &iwl_dump_desc_assert) kfree(desc); fwrt->dump.lmac_err_id[0] = 0; if (fwrt->smem_cfg.num_lmacs > 1) fwrt->dump.lmac_err_id[1] = 0; fwrt->dump.umac_err_id = 0; } static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data) { struct iwl_fw_dump_ptrs fw_error_dump = {}; struct iwl_fw_error_dump_file *dump_file; struct scatterlist *sg_dump_data; u32 file_len; u32 dump_mask = fwrt->fw->dbg.dump_mask; dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump, dump_data); if (!dump_file) return; if (dump_data->monitor_only) dump_mask &= BIT(IWL_FW_ERROR_DUMP_FW_MONITOR); fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask, fwrt->sanitize_ops, fwrt->sanitize_ctx); file_len = le32_to_cpu(dump_file->file_len); fw_error_dump.fwrt_len = file_len; if (fw_error_dump.trans_ptr) { file_len += fw_error_dump.trans_ptr->len; dump_file->file_len = cpu_to_le32(file_len); } sg_dump_data = alloc_sgtable(file_len); if (sg_dump_data) { sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), fw_error_dump.fwrt_ptr, fw_error_dump.fwrt_len, 0); if (fw_error_dump.trans_ptr) sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), fw_error_dump.trans_ptr->data, fw_error_dump.trans_ptr->len, fw_error_dump.fwrt_len); dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, GFP_KERNEL); } vfree(fw_error_dump.fwrt_ptr); vfree(fw_error_dump.trans_ptr); } static void iwl_dump_ini_list_free(struct list_head *list) { while (!list_empty(list)) { struct iwl_fw_ini_dump_entry *entry = list_entry(list->next, typeof(*entry), list); list_del(&entry->list); vfree(entry); } } static void iwl_fw_error_dump_data_free(struct iwl_fwrt_dump_data *dump_data) { dump_data->trig = NULL; kfree(dump_data->fw_pkt); dump_data->fw_pkt = NULL; } static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data) { #if defined(__linux__) LIST_HEAD(dump_list); #elif defined(__FreeBSD__) LINUX_LIST_HEAD(dump_list); #endif struct scatterlist *sg_dump_data; u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list); if (!file_len) return; sg_dump_data = alloc_sgtable(file_len); if (sg_dump_data) { struct iwl_fw_ini_dump_entry *entry; int sg_entries = sg_nents(sg_dump_data); u32 offs = 0; list_for_each_entry(entry, &dump_list, list) { sg_pcopy_from_buffer(sg_dump_data, sg_entries, entry->data, entry->size, offs); offs += entry->size; } dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, GFP_KERNEL); } iwl_dump_ini_list_free(&dump_list); } const struct iwl_fw_dump_desc iwl_dump_desc_assert = { .trig_desc = { .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT), }, }; IWL_EXPORT_SYMBOL(iwl_dump_desc_assert); int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dump_desc *desc, bool monitor_only, unsigned int delay) { struct iwl_fwrt_wk_data *wk_data; unsigned long idx; if (iwl_trans_dbg_ini_valid(fwrt->trans)) { iwl_fw_free_dump_desc(fwrt, desc); return 0; } /* * Check there is an available worker. * ffz return value is undefined if no zero exists, * so check against ~0UL first. */ if (fwrt->dump.active_wks == ~0UL) return -EBUSY; idx = ffz(fwrt->dump.active_wks); if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM || test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks)) return -EBUSY; wk_data = &fwrt->dump.wks[idx]; if (WARN_ON(wk_data->dump_data.desc)) iwl_fw_free_dump_desc(fwrt, wk_data->dump_data.desc); wk_data->dump_data.desc = desc; wk_data->dump_data.monitor_only = monitor_only; IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", le32_to_cpu(desc->trig_desc.type)); schedule_delayed_work(&wk_data->wk, usecs_to_jiffies(delay)); return 0; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc); int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig_type) { if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) return -EIO; if (iwl_trans_dbg_ini_valid(fwrt->trans)) { if (trig_type != FW_DBG_TRIGGER_ALIVE_TIMEOUT && trig_type != FW_DBG_TRIGGER_DRIVER) return -EIO; iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT, NULL); } else { struct iwl_fw_dump_desc *iwl_dump_error_desc; int ret; iwl_dump_error_desc = kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL); if (!iwl_dump_error_desc) return -ENOMEM; iwl_dump_error_desc->trig_desc.type = cpu_to_le32(trig_type); iwl_dump_error_desc->len = 0; ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0); if (ret) { kfree(iwl_dump_error_desc); return ret; } } iwl_trans_sync_nmi(fwrt->trans); return 0; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_error_collect); int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig, const char *str, size_t len, struct iwl_fw_dbg_trigger_tlv *trigger) { struct iwl_fw_dump_desc *desc; unsigned int delay = 0; bool monitor_only = false; if (trigger) { u16 occurrences = le16_to_cpu(trigger->occurrences) - 1; if (!le16_to_cpu(trigger->occurrences)) return 0; if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) { IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", trig); iwl_force_nmi(fwrt->trans); return 0; } trigger->occurrences = cpu_to_le16(occurrences); monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY; /* convert msec to usec */ delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC; } desc = kzalloc(struct_size(desc, trig_desc.data, len), GFP_ATOMIC); if (!desc) return -ENOMEM; desc->len = len; desc->trig_desc.type = cpu_to_le32(trig); memcpy(desc->trig_desc.data, str, len); return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) { int ret, len = 0; char buf[64]; if (iwl_trans_dbg_ini_valid(fwrt->trans)) return 0; if (fmt) { va_list ap; buf[sizeof(buf) - 1] = '\0'; va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); /* check for truncation */ if (WARN_ON_ONCE(buf[sizeof(buf) - 1])) buf[sizeof(buf) - 1] = '\0'; len = strlen(buf) + 1; } ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len, trigger); if (ret) return ret; return 0; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig); int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id) { u8 *ptr; int ret; int i; if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv), "Invalid configuration %d\n", conf_id)) return -EINVAL; /* EARLY START - firmware's configuration is hard coded */ if ((!fwrt->fw->dbg.conf_tlv[conf_id] || !fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) && conf_id == FW_DBG_START_FROM_ALIVE) return 0; if (!fwrt->fw->dbg.conf_tlv[conf_id]) return -EINVAL; if (fwrt->dump.conf != FW_DBG_INVALID) IWL_INFO(fwrt, "FW already configured (%d) - re-configuring\n", fwrt->dump.conf); /* Send all HCMDs for configuring the FW debug */ ptr = (void *)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd; for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) { struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr; struct iwl_host_cmd hcmd = { .id = cmd->id, .len = { le16_to_cpu(cmd->len), }, .data = { cmd->data, }, }; ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); if (ret) return ret; ptr += sizeof(*cmd); ptr += le16_to_cpu(cmd->len); } fwrt->dump.conf = conf_id; return 0; } IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf); void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt, u32 timepoint, u32 timepoint_data) { struct iwl_dbg_dump_complete_cmd hcmd_data; struct iwl_host_cmd hcmd = { .id = WIDE_ID(DEBUG_GROUP, FW_DUMP_COMPLETE_CMD), .data[0] = &hcmd_data, .len[0] = sizeof(hcmd_data), }; if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) return; if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT)) { hcmd_data.tp = cpu_to_le32(timepoint); hcmd_data.tp_data = cpu_to_le32(timepoint_data); iwl_trans_send_cmd(fwrt->trans, &hcmd); } } /* this function assumes dump_start was called beforehand and dump_end will be * called afterwards */ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) { struct iwl_fw_dbg_params params = {0}; struct iwl_fwrt_dump_data *dump_data = &fwrt->dump.wks[wk_idx].dump_data; u32 policy; u32 time_point; if (!test_bit(wk_idx, &fwrt->dump.active_wks)) return; if (!dump_data->trig) { IWL_ERR(fwrt, "dump trigger data is not set\n"); goto out; } if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) { IWL_ERR(fwrt, "Device is not enabled - cannot dump error\n"); goto out; } /* there's no point in fw dump if the bus is dead */ if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) { IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n"); goto out; } iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, true); IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n"); if (iwl_trans_dbg_ini_valid(fwrt->trans)) iwl_fw_error_ini_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data); else iwl_fw_error_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data); IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection done\n"); iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, false); policy = le32_to_cpu(dump_data->trig->apply_policy); time_point = le32_to_cpu(dump_data->trig->time_point); if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) { IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n"); iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0); } if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) iwl_force_nmi(fwrt->trans); out: if (iwl_trans_dbg_ini_valid(fwrt->trans)) { iwl_fw_error_dump_data_free(dump_data); } else { iwl_fw_free_dump_desc(fwrt, dump_data->desc); dump_data->desc = NULL; } clear_bit(wk_idx, &fwrt->dump.active_wks); } int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data, bool sync) { struct iwl_fw_ini_trigger_tlv *trig = dump_data->trig; enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point); u32 occur, delay; unsigned long idx; if (!iwl_fw_ini_trigger_on(fwrt, trig)) { IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n", tp_id); return -EINVAL; } delay = le32_to_cpu(trig->dump_delay); occur = le32_to_cpu(trig->occurrences); if (!occur) return 0; trig->occurrences = cpu_to_le32(--occur); /* Check there is an available worker. * ffz return value is undefined if no zero exists, * so check against ~0UL first. */ if (fwrt->dump.active_wks == ~0UL) return -EBUSY; idx = ffz(fwrt->dump.active_wks); if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM || test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks)) return -EBUSY; fwrt->dump.wks[idx].dump_data = *dump_data; if (sync) delay = 0; IWL_WARN(fwrt, "WRT: Collecting data: ini trigger %d fired (delay=%dms).\n", tp_id, (u32)(delay / USEC_PER_MSEC)); if (sync) iwl_fw_dbg_collect_sync(fwrt, idx); else schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay)); return 0; } void iwl_fw_error_dump_wk(struct work_struct *work) { struct iwl_fwrt_wk_data *wks = container_of(work, typeof(*wks), wk.work); struct iwl_fw_runtime *fwrt = container_of(wks, typeof(*fwrt), dump.wks[wks->idx]); /* assumes the op mode mutex is locked in dump_start since * iwl_fw_dbg_collect_sync can't run in parallel */ if (fwrt->ops && fwrt->ops->dump_start) fwrt->ops->dump_start(fwrt->ops_ctx); iwl_fw_dbg_collect_sync(fwrt, wks->idx); if (fwrt->ops && fwrt->ops->dump_end) fwrt->ops->dump_end(fwrt->ops_ctx); } void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt) { const struct iwl_cfg *cfg = fwrt->trans->cfg; if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt)) return; if (!fwrt->dump.d3_debug_data) { fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length, GFP_KERNEL); if (!fwrt->dump.d3_debug_data) { IWL_ERR(fwrt, "failed to allocate memory for D3 debug data\n"); return; } } /* if the buffer holds previous debug data it is overwritten */ iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr, fwrt->dump.d3_debug_data, cfg->d3_debug_data_length); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, cfg->d3_debug_data_base_addr, fwrt->dump.d3_debug_data, cfg->d3_debug_data_length); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data); void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime *fwrt) { int i; iwl_dbg_tlv_del_timers(fwrt->trans); for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) iwl_fw_dbg_collect_sync(fwrt, i); iwl_fw_dbg_stop_restart_recording(fwrt, NULL, true); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_sync); static int iwl_fw_dbg_suspend_resume_hcmd(struct iwl_trans *trans, bool suspend) { struct iwl_dbg_suspend_resume_cmd cmd = { .operation = suspend ? cpu_to_le32(DBGC_SUSPEND_CMD) : cpu_to_le32(DBGC_RESUME_CMD), }; struct iwl_host_cmd hcmd = { .id = WIDE_ID(DEBUG_GROUP, DBGC_SUSPEND_RESUME), .data[0] = &cmd, .len[0] = sizeof(cmd), }; return iwl_trans_send_cmd(trans, &hcmd); } static void iwl_fw_dbg_stop_recording(struct iwl_trans *trans, struct iwl_fw_dbg_params *params) { if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) { iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); return; } if (params) { params->in_sample = iwl_read_umac_prph(trans, DBGC_IN_SAMPLE); params->out_ctrl = iwl_read_umac_prph(trans, DBGC_OUT_CTRL); } iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, 0); /* wait for the DBGC to finish writing the internal buffer to DRAM to * avoid halting the HW while writing */ usleep_range(700, 1000); iwl_write_umac_prph(trans, DBGC_OUT_CTRL, 0); } static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans, struct iwl_fw_dbg_params *params) { if (!params) return -EIO; if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) { iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); } else { iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, params->in_sample); iwl_write_umac_prph(trans, DBGC_OUT_CTRL, params->out_ctrl); } return 0; } +int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt) +{ + struct iwl_mvm_marker marker = { + .dw_len = sizeof(struct iwl_mvm_marker) / 4, + .marker_id = MARKER_ID_SYNC_CLOCK, + }; + struct iwl_host_cmd hcmd = { + .flags = CMD_ASYNC, + .id = WIDE_ID(LONG_GROUP, MARKER_CMD), + .dataflags = {}, + }; + struct iwl_mvm_marker_rsp *resp; + int cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, + WIDE_ID(LONG_GROUP, MARKER_CMD), + IWL_FW_CMD_VER_UNKNOWN); + int ret; + + if (cmd_ver == 1) { + /* the real timestamp is taken from the ftrace clock + * this is for finding the match between fw and kernel logs + */ + marker.timestamp = cpu_to_le64(fwrt->timestamp.seq++); + } else if (cmd_ver == 2) { + marker.timestamp = cpu_to_le64(ktime_get_boottime_ns()); + } else { + IWL_DEBUG_INFO(fwrt, + "Invalid version of Marker CMD. Ver = %d\n", + cmd_ver); + return -EINVAL; + } + + hcmd.data[0] = ▮ + hcmd.len[0] = sizeof(marker); + + ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); + + if (cmd_ver > 1 && hcmd.resp_pkt) { + resp = (void *)hcmd.resp_pkt->data; + IWL_DEBUG_INFO(fwrt, "FW GP2 time: %u\n", + le32_to_cpu(resp->gp2)); + } + + return ret; +} + void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_params *params, bool stop) { int ret __maybe_unused = 0; if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) return; if (fw_has_capa(&fwrt->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP)) + IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP)) { + if (stop) + iwl_fw_send_timestamp_marker_cmd(fwrt); ret = iwl_fw_dbg_suspend_resume_hcmd(fwrt->trans, stop); - else if (stop) + } else if (stop) { iwl_fw_dbg_stop_recording(fwrt->trans, params); - else + } else { ret = iwl_fw_dbg_restart_recording(fwrt->trans, params); + } #ifdef CONFIG_IWLWIFI_DEBUGFS if (!ret) { if (stop) fwrt->trans->dbg.rec_on = false; else iwl_fw_set_dbg_rec_on(fwrt); } #endif } IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording); diff --git a/sys/contrib/dev/iwlwifi/fw/dbg.h b/sys/contrib/dev/iwlwifi/fw/dbg.h index e240ef2e1c96..8340bb991135 100644 --- a/sys/contrib/dev/iwlwifi/fw/dbg.h +++ b/sys/contrib/dev/iwlwifi/fw/dbg.h @@ -1,334 +1,350 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2019, 2021-2022 Intel Corporation + * Copyright (C) 2005-2014, 2018-2019, 2021-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_dbg_h__ #define __iwl_fw_dbg_h__ #include #include #include "runtime.h" #include "iwl-prph.h" #include "iwl-io.h" #include "file.h" #include "error-dump.h" #include "api/commands.h" #include "api/dbg-tlv.h" #include "api/alive.h" /** * struct iwl_fw_dump_desc - describes the dump * @len: length of trig_desc->data * @trig_desc: the description of the dump */ struct iwl_fw_dump_desc { size_t len; /* must be last */ struct iwl_fw_error_dump_trigger_desc trig_desc; }; /** * struct iwl_fw_dbg_params - register values to restore * @in_sample: DBGC_IN_SAMPLE value * @out_ctrl: DBGC_OUT_CTRL value */ struct iwl_fw_dbg_params { u32 in_sample; u32 out_ctrl; }; extern const struct iwl_fw_dump_desc iwl_dump_desc_assert; int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dump_desc *desc, bool monitor_only, unsigned int delay); int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig_type); int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data, bool sync); int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig, const char *str, size_t len, struct iwl_fw_dbg_trigger_tlv *trigger); int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) __printf(3, 4); int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id); #define iwl_fw_dbg_trigger_enabled(fw, id) ({ \ void *__dbg_trigger = (fw)->dbg.trigger_tlv[(id)]; \ unlikely(__dbg_trigger); \ }) static inline struct iwl_fw_dbg_trigger_tlv* _iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id) { return fw->dbg.trigger_tlv[id]; } #define iwl_fw_dbg_get_trigger(fw, id) ({ \ BUILD_BUG_ON(!__builtin_constant_p(id)); \ BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \ _iwl_fw_dbg_get_trigger((fw), (id)); \ }) static inline bool iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig, struct wireless_dev *wdev) { u32 trig_vif = le32_to_cpu(trig->vif_type); return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || wdev->iftype == trig_vif; } static inline bool iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trig) { return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) && (fwrt->dump.conf == FW_DBG_INVALID || (BIT(fwrt->dump.conf) & le32_to_cpu(trig->stop_conf_ids)))); } static inline bool iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, u32 id, u32 dis_usec) { unsigned long wind_jiff = usecs_to_jiffies(dis_usec); /* If this is the first event checked, jump to update start ts */ if (fwrt->dump.non_collect_ts_start[id] && (time_after(fwrt->dump.non_collect_ts_start[id] + wind_jiff, jiffies))) return true; fwrt->dump.non_collect_ts_start[id] = jiffies; return false; } static inline bool iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt, struct wireless_dev *wdev, struct iwl_fw_dbg_trigger_tlv *trig) { u32 usec = le16_to_cpu(trig->trig_dis_ms) * USEC_PER_MSEC; if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev)) return false; if (iwl_fw_dbg_no_trig_window(fwrt, le32_to_cpu(trig->id), usec)) { IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n", trig->id); return false; } return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig); } static inline struct iwl_fw_dbg_trigger_tlv* _iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt, struct wireless_dev *wdev, const enum iwl_fw_dbg_trigger id) { struct iwl_fw_dbg_trigger_tlv *trig; if (iwl_trans_dbg_ini_valid(fwrt->trans)) return NULL; if (!iwl_fw_dbg_trigger_enabled(fwrt->fw, id)) return NULL; trig = _iwl_fw_dbg_get_trigger(fwrt->fw, id); if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trig)) return NULL; return trig; } #define iwl_fw_dbg_trigger_on(fwrt, wdev, id) ({ \ BUILD_BUG_ON(!__builtin_constant_p(id)); \ BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \ _iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \ }) static inline void _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, struct wireless_dev *wdev, struct iwl_fw_dbg_trigger_tlv *trigger) { if (!trigger) return; if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trigger)) return; #if defined(__linux__) iwl_fw_dbg_collect_trig(fwrt, trigger, NULL); #elif defined(__FreeBSD__) iwl_fw_dbg_collect_trig(fwrt, trigger, ""); #endif } #define iwl_fw_dbg_trigger_simple_stop(fwrt, wdev, trig) \ _iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev), \ iwl_fw_dbg_get_trigger((fwrt)->fw,\ (trig))) void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_params *params, bool stop); #ifdef CONFIG_IWLWIFI_DEBUGFS static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt) { if (fwrt->cur_fw_img == IWL_UCODE_REGULAR && (fwrt->fw->dbg.dest_tlv || fwrt->trans->dbg.ini_dest != IWL_FW_INI_LOCATION_INVALID)) fwrt->trans->dbg.rec_on = true; } #endif static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) { fwrt->dump.conf = FW_DBG_INVALID; } void iwl_fw_error_dump_wk(struct work_struct *work); static inline bool iwl_fw_dbg_type_on(struct iwl_fw_runtime *fwrt, u32 type) { return (fwrt->fw->dbg.dump_mask & BIT(type)); } static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt) { return fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D3_DEBUG) && fwrt->trans->cfg->d3_debug_data_length && fwrt->ops && fwrt->ops->d3_debug_enable && fwrt->ops->d3_debug_enable(fwrt->ops_ctx) && iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); } static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt) { return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) && !fwrt->trans->trans_cfg->gen2 && fwrt->cur_fw_img < IWL_UCODE_TYPE_MAX && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw_paging_db[0].fw_paging_block; } void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt); static inline void iwl_fw_flush_dumps(struct iwl_fw_runtime *fwrt) { int i; iwl_dbg_tlv_del_timers(fwrt->trans); for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) flush_delayed_work(&fwrt->dump.wks[i].wk); } +int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt); + #ifdef CONFIG_IWLWIFI_DEBUGFS static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) { fwrt->timestamp.delay = 0; cancel_delayed_work_sync(&fwrt->timestamp.wk); } void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay); static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) { cancel_delayed_work_sync(&fwrt->timestamp.wk); } static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) { if (!fwrt->timestamp.delay) return; schedule_delayed_work(&fwrt->timestamp.wk, round_jiffies_relative(fwrt->timestamp.delay)); } #else static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {} static inline void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay) {} static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {} static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {} #endif /* CONFIG_IWLWIFI_DEBUGFS */ void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime *fwrt); static inline void iwl_fw_lmac1_set_alive_err_table(struct iwl_trans *trans, u32 lmac_error_event_table) { if (!(trans->dbg.error_event_table_tlv_status & IWL_ERROR_EVENT_TABLE_LMAC1) || WARN_ON(trans->dbg.lmac_error_event_table[0] != lmac_error_event_table)) trans->dbg.lmac_error_event_table[0] = lmac_error_event_table; } static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans, u32 umac_error_event_table) { if (!(trans->dbg.error_event_table_tlv_status & IWL_ERROR_EVENT_TABLE_UMAC) || WARN_ON(trans->dbg.umac_error_event_table != umac_error_event_table)) trans->dbg.umac_error_event_table = umac_error_event_table; } static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync) { enum iwl_fw_ini_time_point tp_id; if (!iwl_trans_dbg_ini_valid(fwrt->trans)) { iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0); return; } if (fwrt->trans->dbg.hw_error) { tp_id = IWL_FW_INI_TIME_POINT_FW_HW_ERROR; fwrt->trans->dbg.hw_error = false; } else { tp_id = IWL_FW_INI_TIME_POINT_FW_ASSERT; } _iwl_dbg_tlv_time_point(fwrt, tp_id, NULL, sync); } void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt); static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt, struct iwl_lmac_alive *lmac, struct iwl_umac_alive *umac) { if (lmac) { fwrt->dump.fw_ver.type = lmac->ver_type; fwrt->dump.fw_ver.subtype = lmac->ver_subtype; fwrt->dump.fw_ver.lmac_major = le32_to_cpu(lmac->ucode_major); fwrt->dump.fw_ver.lmac_minor = le32_to_cpu(lmac->ucode_minor); } if (umac) { fwrt->dump.fw_ver.umac_major = le32_to_cpu(umac->umac_major); fwrt->dump.fw_ver.umac_minor = le32_to_cpu(umac->umac_minor); } } void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt); void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt, u32 timepoint, u32 timepoint_data); + +#define IWL_FW_CHECK_FAILED(_obj, _fmt, ...) \ + IWL_ERR_LIMIT(_obj, _fmt, __VA_ARGS__) + +#define IWL_FW_CHECK(_obj, _cond, _fmt, ...) \ + ({ \ + bool __cond = (_cond); \ + \ + if (unlikely(__cond)) \ + IWL_FW_CHECK_FAILED(_obj, _fmt, __VA_ARGS__); \ + \ + unlikely(__cond); \ + }) + #endif /* __iwl_fw_dbg_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/debugfs.c b/sys/contrib/dev/iwlwifi/fw/debugfs.c index 43e997283db0..3cdbc6ac7ae5 100644 --- a/sys/contrib/dev/iwlwifi/fw/debugfs.c +++ b/sys/contrib/dev/iwlwifi/fw/debugfs.c @@ -1,409 +1,398 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2018-2020 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include "api/commands.h" #include "debugfs.h" #include "dbg.h" #include #define FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ struct dbgfs_##name##_data { \ argtype *arg; \ bool read_done; \ ssize_t rlen; \ char rbuf[buflen]; \ }; \ static int _iwl_dbgfs_##name##_open(struct inode *inode, \ struct file *file) \ { \ struct dbgfs_##name##_data *data; \ \ data = kzalloc(sizeof(*data), GFP_KERNEL); \ if (!data) \ return -ENOMEM; \ \ data->read_done = false; \ data->arg = inode->i_private; \ file->private_data = data; \ \ return 0; \ } #define FWRT_DEBUGFS_READ_WRAPPER(name) \ static ssize_t _iwl_dbgfs_##name##_read(struct file *file, \ char __user *user_buf, \ size_t count, loff_t *ppos) \ { \ struct dbgfs_##name##_data *data = file->private_data; \ \ if (!data->read_done) { \ data->read_done = true; \ data->rlen = iwl_dbgfs_##name##_read(data->arg, \ sizeof(data->rbuf),\ data->rbuf); \ } \ \ if (data->rlen < 0) \ return data->rlen; \ return simple_read_from_buffer(user_buf, count, ppos, \ data->rbuf, data->rlen); \ } static int _iwl_dbgfs_release(struct inode *inode, struct file *file) { kfree(file->private_data); return 0; } #define _FWRT_DEBUGFS_READ_FILE_OPS(name, buflen, argtype) \ FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ FWRT_DEBUGFS_READ_WRAPPER(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .read = _iwl_dbgfs_##name##_read, \ .open = _iwl_dbgfs_##name##_open, \ .llseek = generic_file_llseek, \ .release = _iwl_dbgfs_release, \ } #define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \ const char __user *user_buf, \ size_t count, loff_t *ppos) \ { \ argtype *arg = \ ((struct dbgfs_##name##_data *)file->private_data)->arg;\ char buf[buflen] = {}; \ size_t buf_size = min(count, sizeof(buf) - 1); \ \ if (copy_from_user(buf, user_buf, buf_size)) \ return -EFAULT; \ \ return iwl_dbgfs_##name##_write(arg, buf, buf_size); \ } #define _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \ FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ FWRT_DEBUGFS_READ_WRAPPER(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = _iwl_dbgfs_##name##_write, \ .read = _iwl_dbgfs_##name##_read, \ .open = _iwl_dbgfs_##name##_open, \ .llseek = generic_file_llseek, \ .release = _iwl_dbgfs_release, \ } #define _FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \ FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = _iwl_dbgfs_##name##_write, \ .open = _iwl_dbgfs_##name##_open, \ .llseek = generic_file_llseek, \ .release = _iwl_dbgfs_release, \ } #define FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz) \ _FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz, struct iwl_fw_runtime) #define FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime) #define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime) #define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \ debugfs_create_file(alias, mode, parent, fwrt, \ &iwl_dbgfs_##name##_ops); \ } while (0) #define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \ FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) -static int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt) -{ - struct iwl_mvm_marker marker = { - .dw_len = sizeof(struct iwl_mvm_marker) / 4, - .marker_id = MARKER_ID_SYNC_CLOCK, - - /* the real timestamp is taken from the ftrace clock - * this is for finding the match between fw and kernel logs - */ - .timestamp = cpu_to_le64(fwrt->timestamp.seq++), - }; - - struct iwl_host_cmd hcmd = { - .id = MARKER_CMD, - .flags = CMD_ASYNC, - .data[0] = &marker, - .len[0] = sizeof(marker), - }; - - return iwl_trans_send_cmd(fwrt->trans, &hcmd); -} - static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt, char *buf, size_t count) { struct iwl_dbg_host_event_cfg_cmd event_cfg; struct iwl_host_cmd hcmd = { .id = WIDE_ID(DEBUG_GROUP, HOST_EVENT_CFG), .flags = CMD_ASYNC, .data[0] = &event_cfg, .len[0] = sizeof(event_cfg), }; u32 enabled_severities; int ret = kstrtou32(buf, 10, &enabled_severities); if (ret < 0) return ret; event_cfg.enabled_severities = cpu_to_le32(enabled_severities); ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); IWL_INFO(fwrt, "sent host event cfg with enabled_severities: %u, ret: %d\n", enabled_severities, ret); return ret ?: count; } FWRT_DEBUGFS_WRITE_FILE_OPS(enabled_severities, 16); static void iwl_fw_timestamp_marker_wk(struct work_struct *work) { int ret; struct iwl_fw_runtime *fwrt = container_of(work, struct iwl_fw_runtime, timestamp.wk.work); unsigned long delay = fwrt->timestamp.delay; ret = iwl_fw_send_timestamp_marker_cmd(fwrt); if (!ret && delay) schedule_delayed_work(&fwrt->timestamp.wk, round_jiffies_relative(delay)); else IWL_INFO(fwrt, "stopping timestamp_marker, ret: %d, delay: %u\n", ret, jiffies_to_msecs(delay) / 1000); } void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay) { IWL_INFO(fwrt, "starting timestamp_marker trigger with delay: %us\n", delay); iwl_fw_cancel_timestamp(fwrt); fwrt->timestamp.delay = msecs_to_jiffies(delay * 1000); schedule_delayed_work(&fwrt->timestamp.wk, round_jiffies_relative(fwrt->timestamp.delay)); } static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt, char *buf, size_t count) { int ret; u32 delay; ret = kstrtou32(buf, 10, &delay); if (ret < 0) return ret; iwl_fw_trigger_timestamp(fwrt, delay); return count; } static ssize_t iwl_dbgfs_timestamp_marker_read(struct iwl_fw_runtime *fwrt, size_t size, char *buf) { u32 delay_secs = jiffies_to_msecs(fwrt->timestamp.delay) / 1000; return scnprintf(buf, size, "%d\n", delay_secs); } FWRT_DEBUGFS_READ_WRITE_FILE_OPS(timestamp_marker, 16); struct hcmd_write_data { __be32 cmd_id; __be32 flags; __be16 length; u8 data[]; } __packed; static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf, size_t count) { size_t header_size = (sizeof(u32) * 2 + sizeof(u16)) * 2; size_t data_size = (count - 1) / 2; int ret; struct hcmd_write_data *data; struct iwl_host_cmd hcmd = { .len = { 0, }, .data = { NULL, }, }; if (fwrt->ops && fwrt->ops->fw_running && !fwrt->ops->fw_running(fwrt->ops_ctx)) return -EIO; if (count < header_size + 1 || count > 1024 * 4) return -EINVAL; data = kmalloc(data_size, GFP_KERNEL); if (!data) return -ENOMEM; ret = hex2bin((u8 *)data, buf, data_size); if (ret) goto out; hcmd.id = be32_to_cpu(data->cmd_id); hcmd.flags = be32_to_cpu(data->flags); hcmd.len[0] = be16_to_cpu(data->length); hcmd.data[0] = data->data; if (count != header_size + hcmd.len[0] * 2 + 1) { IWL_ERR(fwrt, "host command data size does not match header length\n"); ret = -EINVAL; goto out; } if (fwrt->ops && fwrt->ops->send_hcmd) ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd); else ret = -EPERM; if (ret < 0) goto out; if (hcmd.flags & CMD_WANT_SKB) iwl_free_resp(&hcmd); out: kfree(data); return ret ?: count; } FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512); static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt, size_t size, char *buf) { return scnprintf(buf, size, "0x%08x\n", fwrt->trans->dbg.domains_bitmap); } FWRT_DEBUGFS_READ_FILE_OPS(fw_dbg_domain, 20); struct iwl_dbgfs_fw_info_priv { struct iwl_fw_runtime *fwrt; }; struct iwl_dbgfs_fw_info_state { loff_t pos; }; static void *iwl_dbgfs_fw_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct iwl_dbgfs_fw_info_state *state = v; struct iwl_dbgfs_fw_info_priv *priv = seq->private; const struct iwl_fw *fw = priv->fwrt->fw; *pos = ++state->pos; - if (*pos >= fw->ucode_capa.n_cmd_versions) + if (*pos >= fw->ucode_capa.n_cmd_versions) { + kfree(state); return NULL; + } return state; } static void iwl_dbgfs_fw_info_seq_stop(struct seq_file *seq, void *v) { kfree(v); } static void *iwl_dbgfs_fw_info_seq_start(struct seq_file *seq, loff_t *pos) { struct iwl_dbgfs_fw_info_priv *priv = seq->private; const struct iwl_fw *fw = priv->fwrt->fw; struct iwl_dbgfs_fw_info_state *state; if (*pos >= fw->ucode_capa.n_cmd_versions) return NULL; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return NULL; state->pos = *pos; return state; }; static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v) { struct iwl_dbgfs_fw_info_state *state = v; struct iwl_dbgfs_fw_info_priv *priv = seq->private; const struct iwl_fw *fw = priv->fwrt->fw; const struct iwl_fw_cmd_version *ver; u32 cmd_id; - - if (!state->pos) + int has_capa; + + if (!state->pos) { + seq_puts(seq, "fw_capa:\n"); + has_capa = fw_has_capa(&fw->ucode_capa, + IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT) ? 1 : 0; + seq_printf(seq, + " %d: %d\n", + IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT, + has_capa); seq_puts(seq, "fw_api_ver:\n"); + } ver = &fw->ucode_capa.cmd_versions[state->pos]; cmd_id = WIDE_ID(ver->group, ver->cmd); seq_printf(seq, " 0x%04x:\n", cmd_id); seq_printf(seq, " name: %s\n", iwl_get_cmd_string(priv->fwrt->trans, cmd_id)); seq_printf(seq, " cmd_ver: %d\n", ver->cmd_ver); seq_printf(seq, " notif_ver: %d\n", ver->notif_ver); return 0; } static const struct seq_operations iwl_dbgfs_info_seq_ops = { .start = iwl_dbgfs_fw_info_seq_start, .next = iwl_dbgfs_fw_info_seq_next, .stop = iwl_dbgfs_fw_info_seq_stop, .show = iwl_dbgfs_fw_info_seq_show, }; static int iwl_dbgfs_fw_info_open(struct inode *inode, struct file *filp) { struct iwl_dbgfs_fw_info_priv *priv; priv = __seq_open_private(filp, &iwl_dbgfs_info_seq_ops, sizeof(*priv)); if (!priv) return -ENOMEM; priv->fwrt = inode->i_private; return 0; } static const struct file_operations iwl_dbgfs_fw_info_ops = { .owner = THIS_MODULE, .open = iwl_dbgfs_fw_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, struct dentry *dbgfs_dir) { INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk); FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(fw_info, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(enabled_severities, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0400); } diff --git a/sys/contrib/dev/iwlwifi/fw/dump.c b/sys/contrib/dev/iwlwifi/fw/dump.c index b90f1e9ce691..5876f917e536 100644 --- a/sys/contrib/dev/iwlwifi/fw/dump.c +++ b/sys/contrib/dev/iwlwifi/fw/dump.c @@ -1,458 +1,533 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #include #include "iwl-drv.h" #include "runtime.h" #include "dbg.h" #include "debugfs.h" #include "iwl-io.h" #include "iwl-prph.h" #include "iwl-csr.h" #include "pnvm.h" +#define FW_ASSERT_LMAC_FATAL 0x70 +#define FW_ASSERT_LMAC2_FATAL 0x72 +#define FW_ASSERT_UMAC_FATAL 0x71 +#define UMAC_RT_NMI_LMAC2_FATAL 0x72 +#define RT_NMI_INTERRUPT_OTHER_LMAC_FATAL 0x73 +#define FW_ASSERT_NMI_UNKNOWN 0x84 + /* * Note: This structure is read from the device with IO accesses, * and the reading already does the endian conversion. As it is * read with u32-sized accesses, any members with a different size * need to be ordered correctly though! */ struct iwl_error_event_table { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ u32 trm_hw_status0; /* TRM HW status */ u32 trm_hw_status1; /* TRM HW status */ u32 blink2; /* branch link */ u32 ilink1; /* interrupt link */ u32 ilink2; /* interrupt link */ u32 data1; /* error-specific data */ u32 data2; /* error-specific data */ u32 data3; /* error-specific data */ u32 bcon_time; /* beacon timer */ u32 tsf_low; /* network timestamp function timer */ u32 tsf_hi; /* network timestamp function timer */ u32 gp1; /* GP1 timer register */ u32 gp2; /* GP2 timer register */ u32 fw_rev_type; /* firmware revision type */ u32 major; /* uCode version major */ u32 minor; /* uCode version minor */ u32 hw_ver; /* HW Silicon version */ u32 brd_ver; /* HW board version */ u32 log_pc; /* log program counter */ u32 frame_ptr; /* frame pointer */ u32 stack_ptr; /* stack pointer */ u32 hcmd; /* last host command header */ u32 isr0; /* isr status register LMPM_NIC_ISR0: * rxtx_flag */ u32 isr1; /* isr status register LMPM_NIC_ISR1: * host_flag */ u32 isr2; /* isr status register LMPM_NIC_ISR2: * enc_flag */ u32 isr3; /* isr status register LMPM_NIC_ISR3: * time_flag */ u32 isr4; /* isr status register LMPM_NIC_ISR4: * wico interrupt */ u32 last_cmd_id; /* last HCMD id handled by the firmware */ u32 wait_event; /* wait event() caller address */ u32 l2p_control; /* L2pControlField */ u32 l2p_duration; /* L2pDurationField */ u32 l2p_mhvalid; /* L2pMhValidBits */ u32 l2p_addr_match; /* L2pAddrMatchStat */ u32 lmpm_pmg_sel; /* indicate which clocks are turned on * (LMPM_PMG_SEL) */ u32 u_timestamp; /* indicate when the date and time of the * compilation */ u32 flow_handler; /* FH read/write pointers, RX credit */ } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */; /* * UMAC error struct - relevant starting from family 8000 chip. * Note: This structure is read from the device with IO accesses, * and the reading already does the endian conversion. As it is * read with u32-sized accesses, any members with a different size * need to be ordered correctly though! */ struct iwl_umac_error_event_table { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ u32 blink1; /* branch link */ u32 blink2; /* branch link */ u32 ilink1; /* interrupt link */ u32 ilink2; /* interrupt link */ u32 data1; /* error-specific data */ u32 data2; /* error-specific data */ u32 data3; /* error-specific data */ u32 umac_major; u32 umac_minor; u32 frame_pointer; /* core register 27*/ u32 stack_pointer; /* core register 28 */ u32 cmd_header; /* latest host cmd sent to UMAC */ u32 nic_isr_pref; /* ISR status register */ } __packed; #define ERROR_START_OFFSET (1 * sizeof(u32)) #define ERROR_ELEM_SIZE (7 * sizeof(u32)) +static bool iwl_fwrt_if_errorid_other_cpu(u32 err_id) +{ + err_id &= 0xFF; + + if ((err_id >= FW_ASSERT_LMAC_FATAL && + err_id <= RT_NMI_INTERRUPT_OTHER_LMAC_FATAL) || + err_id == FW_ASSERT_NMI_UNKNOWN) + return true; + return false; +} + static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt) { struct iwl_trans *trans = fwrt->trans; struct iwl_umac_error_event_table table = {}; u32 base = fwrt->trans->dbg.umac_error_event_table; char pnvm_name[MAX_PNVM_NAME]; if (!base && !(fwrt->trans->dbg.error_event_table_tlv_status & IWL_ERROR_EVENT_TABLE_UMAC)) return; iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); if (table.valid) fwrt->dump.umac_err_id = table.error_id; + if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.umac_err_id) && + !fwrt->trans->dbg.dump_file_name_ext_valid) { + fwrt->trans->dbg.dump_file_name_ext_valid = true; + snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME, + "0x%x", fwrt->dump.umac_err_id); + } + if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n", fwrt->trans->status, table.valid); } if ((table.error_id & ~FW_SYSASSERT_CPU_MASK) == FW_SYSASSERT_PNVM_MISSING) { iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name)); IWL_ERR(fwrt, "PNVM data is missing, please install %s\n", pnvm_name); } IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id, iwl_fw_lookup_assert_desc(table.error_id)); IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1); IWL_ERR(fwrt, "0x%08X | umac branchlink2\n", table.blink2); IWL_ERR(fwrt, "0x%08X | umac interruptlink1\n", table.ilink1); IWL_ERR(fwrt, "0x%08X | umac interruptlink2\n", table.ilink2); IWL_ERR(fwrt, "0x%08X | umac data1\n", table.data1); IWL_ERR(fwrt, "0x%08X | umac data2\n", table.data2); IWL_ERR(fwrt, "0x%08X | umac data3\n", table.data3); IWL_ERR(fwrt, "0x%08X | umac major\n", table.umac_major); IWL_ERR(fwrt, "0x%08X | umac minor\n", table.umac_minor); IWL_ERR(fwrt, "0x%08X | frame pointer\n", table.frame_pointer); IWL_ERR(fwrt, "0x%08X | stack pointer\n", table.stack_pointer); IWL_ERR(fwrt, "0x%08X | last host cmd\n", table.cmd_header); IWL_ERR(fwrt, "0x%08X | isr status reg\n", table.nic_isr_pref); } static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_num) { struct iwl_trans *trans = fwrt->trans; struct iwl_error_event_table table = {}; u32 val, base = fwrt->trans->dbg.lmac_error_event_table[lmac_num]; if (fwrt->cur_fw_img == IWL_UCODE_INIT) { if (!base) base = fwrt->fw->init_errlog_ptr; } else { if (!base) base = fwrt->fw->inst_errlog_ptr; } - if (base < 0x400000) { + if ((fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ && !base) || + (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ && base < 0x400000)) { IWL_ERR(fwrt, "Not valid error log pointer 0x%08X for %s uCode\n", base, (fwrt->cur_fw_img == IWL_UCODE_INIT) ? "Init" : "RT"); return; } /* check if there is a HW error */ val = iwl_trans_read_mem32(trans, base); - if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) { + if (iwl_trans_is_hw_error_value(val)) { int err; IWL_ERR(trans, "HW error, resetting before reading\n"); /* reset the device */ err = iwl_trans_sw_reset(trans, true); if (err) return; err = iwl_finish_nic_init(trans); if (err) return; } iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); if (table.valid) fwrt->dump.lmac_err_id[lmac_num] = table.error_id; + if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.lmac_err_id[lmac_num]) && + !fwrt->trans->dbg.dump_file_name_ext_valid) { + fwrt->trans->dbg.dump_file_name_ext_valid = true; + snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME, + "0x%x", fwrt->dump.lmac_err_id[lmac_num]); + } + if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n", fwrt->trans->status, table.valid); } /* Do not change this output - scripts rely on it */ IWL_ERR(fwrt, "Loaded firmware version: %s\n", fwrt->fw->fw_version); IWL_ERR(fwrt, "0x%08X | %-28s\n", table.error_id, iwl_fw_lookup_assert_desc(table.error_id)); IWL_ERR(fwrt, "0x%08X | trm_hw_status0\n", table.trm_hw_status0); IWL_ERR(fwrt, "0x%08X | trm_hw_status1\n", table.trm_hw_status1); IWL_ERR(fwrt, "0x%08X | branchlink2\n", table.blink2); IWL_ERR(fwrt, "0x%08X | interruptlink1\n", table.ilink1); IWL_ERR(fwrt, "0x%08X | interruptlink2\n", table.ilink2); IWL_ERR(fwrt, "0x%08X | data1\n", table.data1); IWL_ERR(fwrt, "0x%08X | data2\n", table.data2); IWL_ERR(fwrt, "0x%08X | data3\n", table.data3); IWL_ERR(fwrt, "0x%08X | beacon time\n", table.bcon_time); IWL_ERR(fwrt, "0x%08X | tsf low\n", table.tsf_low); IWL_ERR(fwrt, "0x%08X | tsf hi\n", table.tsf_hi); IWL_ERR(fwrt, "0x%08X | time gp1\n", table.gp1); IWL_ERR(fwrt, "0x%08X | time gp2\n", table.gp2); IWL_ERR(fwrt, "0x%08X | uCode revision type\n", table.fw_rev_type); IWL_ERR(fwrt, "0x%08X | uCode version major\n", table.major); IWL_ERR(fwrt, "0x%08X | uCode version minor\n", table.minor); IWL_ERR(fwrt, "0x%08X | hw version\n", table.hw_ver); IWL_ERR(fwrt, "0x%08X | board version\n", table.brd_ver); IWL_ERR(fwrt, "0x%08X | hcmd\n", table.hcmd); IWL_ERR(fwrt, "0x%08X | isr0\n", table.isr0); IWL_ERR(fwrt, "0x%08X | isr1\n", table.isr1); IWL_ERR(fwrt, "0x%08X | isr2\n", table.isr2); IWL_ERR(fwrt, "0x%08X | isr3\n", table.isr3); IWL_ERR(fwrt, "0x%08X | isr4\n", table.isr4); IWL_ERR(fwrt, "0x%08X | last cmd Id\n", table.last_cmd_id); IWL_ERR(fwrt, "0x%08X | wait_event\n", table.wait_event); IWL_ERR(fwrt, "0x%08X | l2p_control\n", table.l2p_control); IWL_ERR(fwrt, "0x%08X | l2p_duration\n", table.l2p_duration); IWL_ERR(fwrt, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid); IWL_ERR(fwrt, "0x%08X | l2p_addr_match\n", table.l2p_addr_match); IWL_ERR(fwrt, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); IWL_ERR(fwrt, "0x%08X | timestamp\n", table.u_timestamp); IWL_ERR(fwrt, "0x%08X | flow_handler\n", table.flow_handler); } /* * TCM error struct. * Note: This structure is read from the device with IO accesses, * and the reading already does the endian conversion. As it is * read with u32-sized accesses, any members with a different size * need to be ordered correctly though! */ struct iwl_tcm_error_event_table { u32 valid; u32 error_id; u32 blink2; u32 ilink1; u32 ilink2; u32 data1, data2, data3; u32 logpc; u32 frame_pointer; u32 stack_pointer; u32 msgid; u32 isr; u32 hw_status[5]; u32 sw_status[1]; u32 reserved[4]; } __packed; /* TCM_LOG_ERROR_TABLE_API_S_VER_1 */ static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt, int idx) { struct iwl_trans *trans = fwrt->trans; struct iwl_tcm_error_event_table table = {}; u32 base = fwrt->trans->dbg.tcm_error_event_table[idx]; int i; u32 flag = idx ? IWL_ERROR_EVENT_TABLE_TCM2 : IWL_ERROR_EVENT_TABLE_TCM1; if (!base || !(fwrt->trans->dbg.error_event_table_tlv_status & flag)) return; iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); + if (table.valid) + fwrt->dump.tcm_err_id[idx] = table.error_id; + + if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.tcm_err_id[idx]) && + !fwrt->trans->dbg.dump_file_name_ext_valid) { + fwrt->trans->dbg.dump_file_name_ext_valid = true; + snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME, + "0x%x", fwrt->dump.tcm_err_id[idx]); + } + IWL_ERR(fwrt, "TCM%d status:\n", idx + 1); IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id); IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2); IWL_ERR(fwrt, "0x%08X | tcm interruptlink1\n", table.ilink1); IWL_ERR(fwrt, "0x%08X | tcm interruptlink2\n", table.ilink2); IWL_ERR(fwrt, "0x%08X | tcm data1\n", table.data1); IWL_ERR(fwrt, "0x%08X | tcm data2\n", table.data2); IWL_ERR(fwrt, "0x%08X | tcm data3\n", table.data3); IWL_ERR(fwrt, "0x%08X | tcm log PC\n", table.logpc); IWL_ERR(fwrt, "0x%08X | tcm frame pointer\n", table.frame_pointer); IWL_ERR(fwrt, "0x%08X | tcm stack pointer\n", table.stack_pointer); IWL_ERR(fwrt, "0x%08X | tcm msg ID\n", table.msgid); IWL_ERR(fwrt, "0x%08X | tcm ISR status\n", table.isr); for (i = 0; i < ARRAY_SIZE(table.hw_status); i++) IWL_ERR(fwrt, "0x%08X | tcm HW status[%d]\n", table.hw_status[i], i); for (i = 0; i < ARRAY_SIZE(table.sw_status); i++) IWL_ERR(fwrt, "0x%08X | tcm SW status[%d]\n", table.sw_status[i], i); } /* * RCM error struct. * Note: This structure is read from the device with IO accesses, * and the reading already does the endian conversion. As it is * read with u32-sized accesses, any members with a different size * need to be ordered correctly though! */ struct iwl_rcm_error_event_table { u32 valid; u32 error_id; u32 blink2; u32 ilink1; u32 ilink2; u32 data1, data2, data3; u32 logpc; u32 frame_pointer; u32 stack_pointer; u32 msgid; u32 isr; u32 frame_hw_status; u32 mbx_lmac_to_rcm_req; u32 mbx_rcm_to_lmac_req; u32 mh_ctl; u32 mh_addr1_lo; u32 mh_info; u32 mh_err; u32 reserved[3]; } __packed; /* RCM_LOG_ERROR_TABLE_API_S_VER_1 */ static void iwl_fwrt_dump_rcm_error_log(struct iwl_fw_runtime *fwrt, int idx) { struct iwl_trans *trans = fwrt->trans; struct iwl_rcm_error_event_table table = {}; u32 base = fwrt->trans->dbg.rcm_error_event_table[idx]; u32 flag = idx ? IWL_ERROR_EVENT_TABLE_RCM2 : IWL_ERROR_EVENT_TABLE_RCM1; if (!base || !(fwrt->trans->dbg.error_event_table_tlv_status & flag)) return; iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); + if (table.valid) + fwrt->dump.rcm_err_id[idx] = table.error_id; + + if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.rcm_err_id[idx]) && + !fwrt->trans->dbg.dump_file_name_ext_valid) { + fwrt->trans->dbg.dump_file_name_ext_valid = true; + snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME, + "0x%x", fwrt->dump.rcm_err_id[idx]); + } + IWL_ERR(fwrt, "RCM%d status:\n", idx + 1); IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id); IWL_ERR(fwrt, "0x%08X | rcm branchlink2\n", table.blink2); IWL_ERR(fwrt, "0x%08X | rcm interruptlink1\n", table.ilink1); IWL_ERR(fwrt, "0x%08X | rcm interruptlink2\n", table.ilink2); IWL_ERR(fwrt, "0x%08X | rcm data1\n", table.data1); IWL_ERR(fwrt, "0x%08X | rcm data2\n", table.data2); IWL_ERR(fwrt, "0x%08X | rcm data3\n", table.data3); IWL_ERR(fwrt, "0x%08X | rcm log PC\n", table.logpc); IWL_ERR(fwrt, "0x%08X | rcm frame pointer\n", table.frame_pointer); IWL_ERR(fwrt, "0x%08X | rcm stack pointer\n", table.stack_pointer); IWL_ERR(fwrt, "0x%08X | rcm msg ID\n", table.msgid); IWL_ERR(fwrt, "0x%08X | rcm ISR status\n", table.isr); IWL_ERR(fwrt, "0x%08X | frame HW status\n", table.frame_hw_status); IWL_ERR(fwrt, "0x%08X | LMAC-to-RCM request mbox\n", table.mbx_lmac_to_rcm_req); IWL_ERR(fwrt, "0x%08X | RCM-to-LMAC request mbox\n", table.mbx_rcm_to_lmac_req); IWL_ERR(fwrt, "0x%08X | MAC header control\n", table.mh_ctl); IWL_ERR(fwrt, "0x%08X | MAC header addr1 low\n", table.mh_addr1_lo); IWL_ERR(fwrt, "0x%08X | MAC header info\n", table.mh_info); IWL_ERR(fwrt, "0x%08X | MAC header error\n", table.mh_err); } static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt) { struct iwl_trans *trans = fwrt->trans; u32 error, data1; if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { error = UMAG_SB_CPU_2_STATUS; data1 = UMAG_SB_CPU_1_STATUS; } else if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) { error = SB_CPU_2_STATUS; data1 = SB_CPU_1_STATUS; } else { return; } - error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS); + error = iwl_read_umac_prph(trans, error); IWL_ERR(trans, "IML/ROM dump:\n"); if (error & 0xFFFF0000) IWL_ERR(trans, "0x%04X | IML/ROM SYSASSERT\n", error >> 16); IWL_ERR(fwrt, "0x%08X | IML/ROM error/state\n", error); IWL_ERR(fwrt, "0x%08X | IML/ROM data1\n", iwl_read_umac_prph(trans, data1)); if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) IWL_ERR(fwrt, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n", iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG)); } #define FSEQ_REG(x) { .addr = (x), .str = #x, } static void iwl_fwrt_dump_fseq_regs(struct iwl_fw_runtime *fwrt) { struct iwl_trans *trans = fwrt->trans; int i; struct { u32 addr; const char *str; } fseq_regs[] = { FSEQ_REG(FSEQ_ERROR_CODE), FSEQ_REG(FSEQ_TOP_INIT_VERSION), FSEQ_REG(FSEQ_CNVIO_INIT_VERSION), FSEQ_REG(FSEQ_OTP_VERSION), FSEQ_REG(FSEQ_TOP_CONTENT_VERSION), FSEQ_REG(FSEQ_ALIVE_TOKEN), FSEQ_REG(FSEQ_CNVI_ID), FSEQ_REG(FSEQ_CNVR_ID), FSEQ_REG(CNVI_AUX_MISC_CHIP), FSEQ_REG(CNVR_AUX_MISC_CHIP), FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM), FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR), + FSEQ_REG(FSEQ_PREV_CNVIO_INIT_VERSION), + FSEQ_REG(FSEQ_WIFI_FSEQ_VERSION), + FSEQ_REG(FSEQ_BT_FSEQ_VERSION), + FSEQ_REG(FSEQ_CLASS_TP_VERSION), }; if (!iwl_trans_grab_nic_access(trans)) return; IWL_ERR(fwrt, "Fseq Registers:\n"); for (i = 0; i < ARRAY_SIZE(fseq_regs); i++) IWL_ERR(fwrt, "0x%08X | %s\n", iwl_read_prph_no_grab(trans, fseq_regs[i].addr), fseq_regs[i].str); iwl_trans_release_nic_access(trans); } void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt) { + struct iwl_pc_data *pc_data; + u8 count; + if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) { IWL_ERR(fwrt, "DEVICE_ENABLED bit is not set. Aborting dump.\n"); return; } iwl_fwrt_dump_lmac_error_log(fwrt, 0); if (fwrt->trans->dbg.lmac_error_event_table[1]) iwl_fwrt_dump_lmac_error_log(fwrt, 1); iwl_fwrt_dump_umac_error_log(fwrt); iwl_fwrt_dump_tcm_error_log(fwrt, 0); iwl_fwrt_dump_rcm_error_log(fwrt, 0); - iwl_fwrt_dump_tcm_error_log(fwrt, 1); - iwl_fwrt_dump_rcm_error_log(fwrt, 1); + if (fwrt->trans->dbg.tcm_error_event_table[1]) + iwl_fwrt_dump_tcm_error_log(fwrt, 1); + if (fwrt->trans->dbg.rcm_error_event_table[1]) + iwl_fwrt_dump_rcm_error_log(fwrt, 1); iwl_fwrt_dump_iml_error_log(fwrt); iwl_fwrt_dump_fseq_regs(fwrt); + if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { + pc_data = fwrt->trans->dbg.pc_data; + + if (!iwl_trans_grab_nic_access(fwrt->trans)) + return; + for (count = 0; count < fwrt->trans->dbg.num_pc; + count++, pc_data++) + IWL_ERR(fwrt, "%s: 0x%x\n", + pc_data->pc_name, + iwl_read_prph_no_grab(fwrt->trans, + pc_data->pc_address)); + iwl_trans_release_nic_access(fwrt->trans); + } if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { u32 scratch = iwl_read32(fwrt->trans, CSR_FUNC_SCRATCH); IWL_ERR(fwrt, "Function Scratch status:\n"); IWL_ERR(fwrt, "0x%08X | Func Scratch\n", scratch); } } IWL_EXPORT_SYMBOL(iwl_fwrt_dump_error_logs); diff --git a/sys/contrib/dev/iwlwifi/fw/error-dump.h b/sys/contrib/dev/iwlwifi/fw/error-dump.h index 079fa0023bd8..f5e08988dc7b 100644 --- a/sys/contrib/dev/iwlwifi/fw/error-dump.h +++ b/sys/contrib/dev/iwlwifi/fw/error-dump.h @@ -1,560 +1,575 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2014, 2018-2021 Intel Corporation + * Copyright (C) 2014, 2018-2022 Intel Corporation * Copyright (C) 2014-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __fw_error_dump_h__ #define __fw_error_dump_h__ #include #include "fw/api/cmdhdr.h" #define IWL_FW_ERROR_DUMP_BARKER 0x14789632 #define IWL_FW_INI_ERROR_DUMP_BARKER 0x14789633 /** * enum iwl_fw_error_dump_type - types of data in the dump file * @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0 * @IWL_FW_ERROR_DUMP_RXF: * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as * &struct iwl_fw_error_dump_txcmd packets * @IWL_FW_ERROR_DUMP_DEV_FW_INFO: struct %iwl_fw_error_dump_info * info on the device / firmware. * @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor * @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several * sections like this in a single file. * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers * @IWL_FW_ERROR_DUMP_MEM: chunk of memory * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump. * Structured as &struct iwl_fw_error_dump_trigger_desc. * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as * &struct iwl_fw_error_dump_rb * @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were * paged to the DRAM. * @IWL_FW_ERROR_DUMP_RADIO_REG: Dump the radio registers. * @IWL_FW_ERROR_DUMP_EXTERNAL: used only by external code utilities, and * for that reason is not in use in any other place in the Linux Wi-Fi * stack. * @IWL_FW_ERROR_DUMP_MEM_CFG: the addresses and sizes of fifos in the smem, * which we get from the fw after ALIVE. The content is structured as * &struct iwl_fw_error_dump_smem_cfg. */ enum iwl_fw_error_dump_type { /* 0 is deprecated */ IWL_FW_ERROR_DUMP_CSR = 1, IWL_FW_ERROR_DUMP_RXF = 2, IWL_FW_ERROR_DUMP_TXCMD = 3, IWL_FW_ERROR_DUMP_DEV_FW_INFO = 4, IWL_FW_ERROR_DUMP_FW_MONITOR = 5, IWL_FW_ERROR_DUMP_PRPH = 6, IWL_FW_ERROR_DUMP_TXF = 7, IWL_FW_ERROR_DUMP_FH_REGS = 8, IWL_FW_ERROR_DUMP_MEM = 9, IWL_FW_ERROR_DUMP_ERROR_INFO = 10, IWL_FW_ERROR_DUMP_RB = 11, IWL_FW_ERROR_DUMP_PAGING = 12, IWL_FW_ERROR_DUMP_RADIO_REG = 13, IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14, IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */ IWL_FW_ERROR_DUMP_MEM_CFG = 16, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA = 17, IWL_FW_ERROR_DUMP_MAX, }; /** * struct iwl_fw_error_dump_data - data for one type * @type: &enum iwl_fw_error_dump_type * @len: the length starting from %data * @data: the data itself */ struct iwl_fw_error_dump_data { __le32 type; __le32 len; __u8 data[]; } __packed; +/** + * struct iwl_dump_file_name_info - data for dump file name addition + * @type: region type with reserved bits + * @len: the length of file name string to be added to dump file + * @data: the string need to be added to dump file + */ +struct iwl_dump_file_name_info { + __le32 type; + __le32 len; + __u8 data[]; +} __packed; + /** * struct iwl_fw_error_dump_file - the layout of the header of the file * @barker: must be %IWL_FW_ERROR_DUMP_BARKER * @file_len: the length of all the file starting from %barker * @data: array of &struct iwl_fw_error_dump_data */ struct iwl_fw_error_dump_file { __le32 barker; __le32 file_len; - u8 data[0]; + u8 data[]; } __packed; /** * struct iwl_fw_error_dump_txcmd - TX command data * @cmdlen: original length of command * @caplen: captured length of command (may be less) * @data: captured command data, @caplen bytes */ struct iwl_fw_error_dump_txcmd { __le32 cmdlen; __le32 caplen; u8 data[]; } __packed; /** * struct iwl_fw_error_dump_fifo - RX/TX FIFO data * @fifo_num: number of FIFO (starting from 0) * @available_bytes: num of bytes available in FIFO (may be less than FIFO size) * @wr_ptr: position of write pointer * @rd_ptr: position of read pointer * @fence_ptr: position of fence pointer * @fence_mode: the current mode of the fence (before locking) - * 0=follow RD pointer ; 1 = freeze * @data: all of the FIFO's data */ struct iwl_fw_error_dump_fifo { __le32 fifo_num; __le32 available_bytes; __le32 wr_ptr; __le32 rd_ptr; __le32 fence_ptr; __le32 fence_mode; u8 data[]; } __packed; enum iwl_fw_error_dump_family { IWL_FW_ERROR_DUMP_FAMILY_7 = 7, IWL_FW_ERROR_DUMP_FAMILY_8 = 8, }; #define MAX_NUM_LMAC 2 /** * struct iwl_fw_error_dump_info - info on the device / firmware * @hw_type: the type of the device * @hw_step: the step of the device * @fw_human_readable: human readable FW version * @dev_human_readable: name of the device * @bus_human_readable: name of the bus used * @num_of_lmacs: the number of lmacs * @lmac_err_id: the lmac 0/1 error_id/rt_status that triggered the latest dump * if the dump collection was not initiated by an assert, the value is 0 * @umac_err_id: the umac error_id/rt_status that triggered the latest dump * if the dump collection was not initiated by an assert, the value is 0 */ struct iwl_fw_error_dump_info { __le32 hw_type; __le32 hw_step; u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ]; u8 dev_human_readable[64]; u8 bus_human_readable[8]; u8 num_of_lmacs; __le32 umac_err_id; __le32 lmac_err_id[MAX_NUM_LMAC]; } __packed; /** * struct iwl_fw_error_dump_fw_mon - FW monitor data * @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer * @fw_mon_base_ptr: base pointer of the data * @fw_mon_cycle_cnt: number of wraparounds * @fw_mon_base_high_ptr: used in AX210 devices, the base adderss is 64 bit * so fw_mon_base_ptr holds LSB 32 bits and fw_mon_base_high_ptr hold * MSB 32 bits * @reserved: for future use * @data: captured data */ struct iwl_fw_error_dump_fw_mon { __le32 fw_mon_wr_ptr; __le32 fw_mon_base_ptr; __le32 fw_mon_cycle_cnt; __le32 fw_mon_base_high_ptr; __le32 reserved[2]; u8 data[]; } __packed; #define MAX_NUM_LMAC 2 #define TX_FIFO_INTERNAL_MAX_NUM 6 #define TX_FIFO_MAX_NUM 15 /** * struct iwl_fw_error_dump_smem_cfg - Dump SMEM configuration * This must follow &struct iwl_fwrt_shared_mem_cfg. * @num_lmacs: number of lmacs * @num_txfifo_entries: number of tx fifos * @lmac: sizes of lmacs txfifos and rxfifo1 * @rxfifo2_size: size of rxfifo2 * @internal_txfifo_addr: address of internal tx fifo * @internal_txfifo_size: size of internal tx fifo */ struct iwl_fw_error_dump_smem_cfg { __le32 num_lmacs; __le32 num_txfifo_entries; struct { __le32 txfifo_size[TX_FIFO_MAX_NUM]; __le32 rxfifo1_size; } lmac[MAX_NUM_LMAC]; __le32 rxfifo2_size; __le32 internal_txfifo_addr; __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; } __packed; /** * struct iwl_fw_error_dump_prph - periphery registers data * @prph_start: address of the first register in this chunk * @data: the content of the registers */ struct iwl_fw_error_dump_prph { __le32 prph_start; __le32 data[]; }; enum iwl_fw_error_dump_mem_type { IWL_FW_ERROR_DUMP_MEM_SRAM, IWL_FW_ERROR_DUMP_MEM_SMEM, IWL_FW_ERROR_DUMP_MEM_NAMED_MEM = 10, }; /** * struct iwl_fw_error_dump_mem - chunk of memory * @type: &enum iwl_fw_error_dump_mem_type * @offset: the offset from which the memory was read * @data: the content of the memory */ struct iwl_fw_error_dump_mem { __le32 type; __le32 offset; u8 data[]; }; /* Dump version, used by the dump parser to differentiate between * different dump formats */ #define IWL_INI_DUMP_VER 1 /* Use bit 31 as dump info type to avoid colliding with region types */ #define IWL_INI_DUMP_INFO_TYPE BIT(31) +/* Use bit 31 and bit 24 as dump name type to avoid colliding with region types */ +#define IWL_INI_DUMP_NAME_TYPE (BIT(31) | BIT(24)) + /** * struct iwl_fw_error_dump_data - data for one type * @type: &enum iwl_fw_ini_region_type * @sub_type: sub type id * @sub_type_ver: sub type version * @reserved: not in use * @len: the length starting from %data * @data: the data itself */ struct iwl_fw_ini_error_dump_data { u8 type; u8 sub_type; u8 sub_type_ver; u8 reserved; __le32 len; __u8 data[]; } __packed; /** * struct iwl_fw_ini_dump_entry * @list: list of dump entries * @size: size of the data * @data: entry data */ struct iwl_fw_ini_dump_entry { struct list_head list; u32 size; u8 data[]; } __packed; /** * struct iwl_fw_error_dump_file - header of dump file * @barker: must be %IWL_FW_INI_ERROR_DUMP_BARKER * @file_len: the length of all the file including the header */ struct iwl_fw_ini_dump_file_hdr { __le32 barker; __le32 file_len; } __packed; /** * struct iwl_fw_ini_fifo_hdr - fifo range header * @fifo_num: the fifo number. In case of umac rx fifo, set BIT(31) to * distinguish between lmac and umac rx fifos * @num_of_registers: num of registers to dump, dword size each */ struct iwl_fw_ini_fifo_hdr { __le32 fifo_num; __le32 num_of_registers; } __packed; /** * struct iwl_fw_ini_error_dump_range - range of memory * @range_data_size: the size of this range, in bytes * @internal_base_addr: base address of internal memory range * @dram_base_addr: base address of dram monitor range * @page_num: page number of memory range * @fifo_hdr: fifo header of memory range * @fw_pkt: FW packet header of memory range * @data: the actual memory */ struct iwl_fw_ini_error_dump_range { __le32 range_data_size; union { __le32 internal_base_addr; __le64 dram_base_addr; __le32 page_num; struct iwl_fw_ini_fifo_hdr fifo_hdr; struct iwl_cmd_header fw_pkt_hdr; }; __le32 data[]; } __packed; /** * struct iwl_fw_ini_error_dump_header - ini region dump header * @version: dump version * @region_id: id of the region * @num_of_ranges: number of ranges in this region * @name_len: number of bytes allocated to the name string of this region * @name: name of the region */ struct iwl_fw_ini_error_dump_header { __le32 version; __le32 region_id; __le32 num_of_ranges; __le32 name_len; u8 name[IWL_FW_INI_MAX_NAME]; }; /** * struct iwl_fw_ini_error_dump - ini region dump * @header: the header of this region * @data: data of memory ranges in this region, * see &struct iwl_fw_ini_error_dump_range */ struct iwl_fw_ini_error_dump { struct iwl_fw_ini_error_dump_header header; u8 data[]; } __packed; /* This bit is used to differentiate between lmac and umac rxf */ #define IWL_RXF_UMAC_BIT BIT(31) /** * struct iwl_fw_ini_error_dump_register - ini register dump * @addr: address of the register * @data: data of the register */ struct iwl_fw_ini_error_dump_register { __le32 addr; __le32 data; } __packed; /** * struct iwl_fw_ini_dump_cfg_name - configuration name * @image_type: image type the configuration is related to * @cfg_name_len: length of the configuration name * @cfg_name: name of the configuraiton */ struct iwl_fw_ini_dump_cfg_name { __le32 image_type; __le32 cfg_name_len; u8 cfg_name[IWL_FW_INI_MAX_CFG_NAME]; } __packed; /* AX210's HW type */ #define IWL_AX210_HW_TYPE 0x42 /* How many bits to roll when adding to the HW type of AX210 HW */ #define IWL_AX210_HW_TYPE_ADDITION_SHIFT 12 /* struct iwl_fw_ini_dump_info - ini dump information * @version: dump version * @time_point: time point that caused the dump collection * @trigger_reason: reason of the trigger * @external_cfg_state: &enum iwl_ini_cfg_state * @ver_type: FW version type * @ver_subtype: FW version subype * @hw_step: HW step * @hw_type: HW type * @rf_id_flavor: HW RF id flavor * @rf_id_dash: HW RF id dash * @rf_id_step: HW RF id step * @rf_id_type: HW RF id type * @lmac_major: lmac major version * @lmac_minor: lmac minor version * @umac_major: umac major version * @umac_minor: umac minor version * @fw_mon_mode: FW monitor mode &enum iwl_fw_ini_buffer_location * @regions_mask: bitmap mask of regions ids in the dump * @build_tag_len: length of the build tag * @build_tag: build tag string * @num_of_cfg_names: number of configuration name structs * @cfg_names: configuration names */ struct iwl_fw_ini_dump_info { __le32 version; __le32 time_point; __le32 trigger_reason; __le32 external_cfg_state; __le32 ver_type; __le32 ver_subtype; __le32 hw_step; __le32 hw_type; __le32 rf_id_flavor; __le32 rf_id_dash; __le32 rf_id_step; __le32 rf_id_type; __le32 lmac_major; __le32 lmac_minor; __le32 umac_major; __le32 umac_minor; __le32 fw_mon_mode; __le64 regions_mask; __le32 build_tag_len; u8 build_tag[FW_VER_HUMAN_READABLE_SZ]; __le32 num_of_cfg_names; struct iwl_fw_ini_dump_cfg_name cfg_names[]; } __packed; /** * struct iwl_fw_ini_err_table_dump - ini error table dump * @header: header of the region * @version: error table version * @data: data of memory ranges in this region, * see &struct iwl_fw_ini_error_dump_range */ struct iwl_fw_ini_err_table_dump { struct iwl_fw_ini_error_dump_header header; __le32 version; u8 data[]; } __packed; /** * struct iwl_fw_error_dump_rb - content of an Receive Buffer * @index: the index of the Receive Buffer in the Rx queue * @rxq: the RB's Rx queue * @reserved: * @data: the content of the Receive Buffer */ struct iwl_fw_error_dump_rb { __le32 index; __le32 rxq; __le32 reserved; u8 data[]; }; /** * struct iwl_fw_ini_monitor_dump - ini monitor dump * @header: header of the region * @write_ptr: write pointer position in the buffer * @cycle_cnt: cycles count * @cur_frag: current fragment in use * @data: data of memory ranges in this region, * see &struct iwl_fw_ini_error_dump_range */ struct iwl_fw_ini_monitor_dump { struct iwl_fw_ini_error_dump_header header; __le32 write_ptr; __le32 cycle_cnt; __le32 cur_frag; u8 data[]; } __packed; /** * struct iwl_fw_ini_special_device_memory - special device memory * @header: header of the region * @type: type of special memory * @version: struct special memory version * @data: data of memory ranges in this region, * see &struct iwl_fw_ini_error_dump_range */ struct iwl_fw_ini_special_device_memory { struct iwl_fw_ini_error_dump_header header; __le16 type; __le16 version; u8 data[]; } __packed; /** * struct iwl_fw_error_dump_paging - content of the UMAC's image page * block on DRAM * @index: the index of the page block * @reserved: * @data: the content of the page block */ struct iwl_fw_error_dump_paging { __le32 index; __le32 reserved; u8 data[]; }; /** * iwl_fw_error_next_data - advance fw error dump data pointer * @data: previous data block * Returns: next data block */ static inline struct iwl_fw_error_dump_data * iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) { return (void *)(data->data + le32_to_cpu(data->len)); } /** * enum iwl_fw_dbg_trigger - triggers available * * @FW_DBG_TRIGGER_USER: trigger log collection by user * This should not be defined as a trigger to the driver, but a value the * driver should set to indicate that the trigger was initiated by the * user. * @FW_DBG_TRIGGER_FW_ASSERT: trigger log collection when the firmware asserts * @FW_DBG_TRIGGER_MISSED_BEACONS: trigger log collection when beacons are * missed. * @FW_DBG_TRIGGER_CHANNEL_SWITCH: trigger log collection upon channel switch. * @FW_DBG_TRIGGER_FW_NOTIF: trigger log collection when the firmware sends a * command response or a notification. * @FW_DBG_TRIGGER_MLME: trigger log collection upon MLME event. * @FW_DBG_TRIGGER_STATS: trigger log collection upon statistics threshold. * @FW_DBG_TRIGGER_RSSI: trigger log collection when the rssi of the beacon * goes below a threshold. * @FW_DBG_TRIGGER_TXQ_TIMERS: configures the timers for the Tx queue hang * detection. * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related * events. * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events. * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a * threshold. * @FW_DBG_TDLS: trigger log collection upon TDLS related events. * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when * the firmware sends a tx reply. * @FW_DBG_TRIGGER_ALIVE_TIMEOUT: trigger log collection if alive flow timeouts * @FW_DBG_TRIGGER_DRIVER: trigger log collection upon a flow failure * in the driver. */ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_INVALID = 0, FW_DBG_TRIGGER_USER, FW_DBG_TRIGGER_FW_ASSERT, FW_DBG_TRIGGER_MISSED_BEACONS, FW_DBG_TRIGGER_CHANNEL_SWITCH, FW_DBG_TRIGGER_FW_NOTIF, FW_DBG_TRIGGER_MLME, FW_DBG_TRIGGER_STATS, FW_DBG_TRIGGER_RSSI, FW_DBG_TRIGGER_TXQ_TIMERS, FW_DBG_TRIGGER_TIME_EVENT, FW_DBG_TRIGGER_BA, FW_DBG_TRIGGER_TX_LATENCY, FW_DBG_TRIGGER_TDLS, FW_DBG_TRIGGER_TX_STATUS, FW_DBG_TRIGGER_ALIVE_TIMEOUT, FW_DBG_TRIGGER_DRIVER, /* must be last */ FW_DBG_TRIGGER_MAX, }; /** * struct iwl_fw_error_dump_trigger_desc - describes the trigger condition * @type: &enum iwl_fw_dbg_trigger * @data: raw data about what happened */ struct iwl_fw_error_dump_trigger_desc { __le32 type; u8 data[]; }; #endif /* __fw_error_dump_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/file.h b/sys/contrib/dev/iwlwifi/fw/file.h index 5679a78758be..b36e9613a52c 100644 --- a/sys/contrib/dev/iwlwifi/fw/file.h +++ b/sys/contrib/dev/iwlwifi/fw/file.h @@ -1,955 +1,968 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2008-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_file_h__ #define __iwl_fw_file_h__ #include #include /* v1/v2 uCode file layout */ struct iwl_ucode_header { __le32 ver; /* major/minor/API/serial */ union { struct { __le32 inst_size; /* bytes of runtime code */ __le32 data_size; /* bytes of runtime data */ __le32 init_size; /* bytes of init code */ __le32 init_data_size; /* bytes of init data */ __le32 boot_size; /* bytes of bootstrap code */ u8 data[0]; /* in same order as sizes */ } v1; struct { __le32 build; /* build number */ __le32 inst_size; /* bytes of runtime code */ __le32 data_size; /* bytes of runtime data */ __le32 init_size; /* bytes of init code */ __le32 init_data_size; /* bytes of init data */ __le32 boot_size; /* bytes of bootstrap code */ u8 data[0]; /* in same order as sizes */ } v2; } u; }; #define IWL_UCODE_TLV_DEBUG_BASE 0x1000005 #define IWL_UCODE_TLV_CONST_BASE 0x100 /* * new TLV uCode file layout * * The new TLV file format contains TLVs, that each specify * some piece of data. */ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_INVALID = 0, /* unused */ IWL_UCODE_TLV_INST = 1, IWL_UCODE_TLV_DATA = 2, IWL_UCODE_TLV_INIT = 3, IWL_UCODE_TLV_INIT_DATA = 4, IWL_UCODE_TLV_BOOT = 5, IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */ IWL_UCODE_TLV_PAN = 7, /* deprecated -- only used in DVM */ IWL_UCODE_TLV_MEM_DESC = 7, /* replaces PAN in non-DVM */ IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8, IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9, IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10, IWL_UCODE_TLV_INIT_EVTLOG_PTR = 11, IWL_UCODE_TLV_INIT_EVTLOG_SIZE = 12, IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13, IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14, IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15, IWL_UCODE_TLV_WOWLAN_INST = 16, IWL_UCODE_TLV_WOWLAN_DATA = 17, IWL_UCODE_TLV_FLAGS = 18, IWL_UCODE_TLV_SEC_RT = 19, IWL_UCODE_TLV_SEC_INIT = 20, IWL_UCODE_TLV_SEC_WOWLAN = 21, IWL_UCODE_TLV_DEF_CALIB = 22, IWL_UCODE_TLV_PHY_SKU = 23, IWL_UCODE_TLV_SECURE_SEC_RT = 24, IWL_UCODE_TLV_SECURE_SEC_INIT = 25, IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26, IWL_UCODE_TLV_NUM_OF_CPU = 27, IWL_UCODE_TLV_CSCHEME = 28, IWL_UCODE_TLV_API_CHANGES_SET = 29, IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30, IWL_UCODE_TLV_N_SCAN_CHANNELS = 31, IWL_UCODE_TLV_PAGING = 32, IWL_UCODE_TLV_SEC_RT_USNIFFER = 34, /* 35 is unused */ IWL_UCODE_TLV_FW_VERSION = 36, IWL_UCODE_TLV_FW_DBG_DEST = 38, IWL_UCODE_TLV_FW_DBG_CONF = 39, IWL_UCODE_TLV_FW_DBG_TRIGGER = 40, IWL_UCODE_TLV_CMD_VERSIONS = 48, IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, IWL_UCODE_TLV_FW_MEM_SEG = 51, IWL_UCODE_TLV_IML = 52, IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54, IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55, IWL_UCODE_TLV_FW_RECOVERY_INFO = 57, IWL_UCODE_TLV_HW_TYPE = 58, IWL_UCODE_TLV_FW_FSEQ_VERSION = 60, IWL_UCODE_TLV_PHY_INTEGRATION_VERSION = 61, IWL_UCODE_TLV_PNVM_VERSION = 62, IWL_UCODE_TLV_PNVM_SKU = 64, IWL_UCODE_TLV_SEC_TABLE_ADDR = 66, IWL_UCODE_TLV_D3_KEK_KCK_ADDR = 67, + IWL_UCODE_TLV_CURRENT_PC = 68, IWL_UCODE_TLV_FW_NUM_STATIONS = IWL_UCODE_TLV_CONST_BASE + 0, + IWL_UCODE_TLV_FW_NUM_BEACONS = IWL_UCODE_TLV_CONST_BASE + 2, IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0, IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_TLV_DEBUG_BASE + 1, IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_TLV_DEBUG_BASE + 2, IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_TLV_DEBUG_BASE + 3, IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_TLV_DEBUG_BASE + 4, IWL_UCODE_TLV_TYPE_CONF_SET = IWL_UCODE_TLV_DEBUG_BASE + 5, IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_TRIGGERS, /* TLVs 0x1000-0x2000 are for internal driver usage */ IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000, }; struct iwl_ucode_tlv { __le32 type; /* see above */ __le32 length; /* not including type/length fields */ u8 data[]; }; #define IWL_TLV_UCODE_MAGIC 0x0a4c5749 #define FW_VER_HUMAN_READABLE_SZ 64 struct iwl_tlv_ucode_header { /* * The TLV style ucode header is distinguished from * the v1/v2 style header by first four bytes being * zero, as such is an invalid combination of * major/minor/API/serial versions. */ __le32 zero; __le32 magic; u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; /* major/minor/API/serial or major in new format */ __le32 ver; __le32 build; __le64 ignore; /* * The data contained herein has a TLV layout, * see above for the TLV header and types. * Note that each TLV is padded to a length * that is a multiple of 4 for alignment. */ - u8 data[0]; + u8 data[]; }; /* * ucode TLVs * * ability to get extension for: flags & capabilities from ucode binaries files */ struct iwl_ucode_api { __le32 api_index; __le32 api_flags; } __packed; struct iwl_ucode_capa { __le32 api_index; __le32 api_capa; } __packed; /** * enum iwl_ucode_tlv_flag - ucode API flags * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously * was a separate TLV but moved here to save space. * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behavior on hidden SSID, * treats good CRC threshold as a boolean * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w). * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of block list instead of 64 in scan * offload profile config command. * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six * (rather than two) IPv6 addresses * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element * from the probe request template. * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version) * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version) * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS. */ enum iwl_ucode_tlv_flag { IWL_UCODE_TLV_FLAGS_PAN = BIT(0), IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1), IWL_UCODE_TLV_FLAGS_MFP = BIT(2), IWL_UCODE_TLV_FLAGS_SHORT_BL = BIT(7), IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10), IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID = BIT(12), IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15), IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16), IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24), IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25), IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26), }; typedef unsigned int __bitwise iwl_ucode_tlv_api_t; /** * enum iwl_ucode_tlv_api - ucode api * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time * longer than the passive one, which is essential for fragmented scan. * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format * @IWL_UCODE_TLV_API_SCAN_TSF_REPORT: Scan start time reported in scan * iteration complete notification, and the timestamp reported for RX * received during scan, are reported in TSF of the mac specified in the * scan request. * @IWL_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of * ADD_MODIFY_STA_KEY_API_S_VER_2. * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement. * @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2 * @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used * @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field * indicating low latency direction. * @IWL_UCODE_TLV_API_DEPRECATE_TTAK: RX status flag TTAK ok (bit 7) is * deprecated. * @IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2: This ucode supports version 8 * of scan request: SCAN_REQUEST_CMD_UMAC_API_S_VER_8 * @IWL_UCODE_TLV_API_FRAG_EBS: This ucode supports fragmented EBS * @IWL_UCODE_TLV_API_REDUCE_TX_POWER: This ucode supports v5 of * the REDUCE_TX_POWER_CMD. * @IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF: This ucode supports the short * version of the beacon notification. * @IWL_UCODE_TLV_API_BEACON_FILTER_V4: This ucode supports v4 of * BEACON_FILTER_CONFIG_API_S_VER_4. * @IWL_UCODE_TLV_API_REGULATORY_NVM_INFO: This ucode supports v4 of * REGULATORY_NVM_GET_INFO_RSP_API_S. * @IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ: This ucode supports v7 of * LOCATION_RANGE_REQ_CMD_API_S and v6 of LOCATION_RANGE_RESP_NTFY_API_S. * @IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS: This ucode supports v2 of * SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S and v3 of * SCAN_OFFLOAD_PROFILES_QUERY_RSP_S. * @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of * STA_CONTEXT_DOT11AX_API_S * @IWL_UCODE_TLV_API_SAR_TABLE_VER: This ucode supports different sar * version tables. * @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of * SCAN_CONFIG_DB_CMD_API_S. * * @NUM_IWL_UCODE_TLV_API: number of bits used */ enum iwl_ucode_tlv_api { /* API Set 0 */ IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8, IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18, IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28, IWL_UCODE_TLV_API_TKIP_MIC_KEYS = (__force iwl_ucode_tlv_api_t)29, IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30, IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31, /* API Set 1 */ IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32, IWL_UCODE_TLV_API_OCE = (__force iwl_ucode_tlv_api_t)33, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34, IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35, IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL = (__force iwl_ucode_tlv_api_t)36, IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY = (__force iwl_ucode_tlv_api_t)38, IWL_UCODE_TLV_API_DEPRECATE_TTAK = (__force iwl_ucode_tlv_api_t)41, IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2 = (__force iwl_ucode_tlv_api_t)42, IWL_UCODE_TLV_API_FRAG_EBS = (__force iwl_ucode_tlv_api_t)44, IWL_UCODE_TLV_API_REDUCE_TX_POWER = (__force iwl_ucode_tlv_api_t)45, IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF = (__force iwl_ucode_tlv_api_t)46, IWL_UCODE_TLV_API_BEACON_FILTER_V4 = (__force iwl_ucode_tlv_api_t)47, IWL_UCODE_TLV_API_REGULATORY_NVM_INFO = (__force iwl_ucode_tlv_api_t)48, IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ = (__force iwl_ucode_tlv_api_t)49, IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS = (__force iwl_ucode_tlv_api_t)50, IWL_UCODE_TLV_API_MBSSID_HE = (__force iwl_ucode_tlv_api_t)52, IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53, IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54, IWL_UCODE_TLV_API_SAR_TABLE_VER = (__force iwl_ucode_tlv_api_t)55, IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG = (__force iwl_ucode_tlv_api_t)56, IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57, IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58, IWL_UCODE_TLV_API_BAND_IN_RX_DATA = (__force iwl_ucode_tlv_api_t)59, #ifdef __CHECKER__ /* sparse says it cannot increment the previous enum member */ #define NUM_IWL_UCODE_TLV_API 128 #else NUM_IWL_UCODE_TLV_API #endif }; typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; /** * enum iwl_ucode_tlv_capa - ucode capabilities * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3 * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan. * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current * tx power value into TPC Report action frame and Link Measurement Report * action frame * @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current * channel in DS parameter set element in probe requests. * @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in * probe requests. * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), * which also implies support for the scheduler configuration command * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching * @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics * @IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD: supports U-APSD on p2p interface when it * is standalone or with a BSS station interface in the same binding. * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different * sources for the MCC. This TLV bit is a future replacement to * IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR * is supported. * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used) + * @IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG: supports fragmented PNVM image * @IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT: the firmware supports setting * stabilization latency for SoCs. * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related * @IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2: firmware implements Coex Schema 2 * IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD: firmware supports CSA command * @IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band * (6 GHz). * @IWL_UCODE_TLV_CAPA_CS_MODIFY: firmware supports modify action CSA command * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT * @IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD: the firmware supports CSA * countdown offloading. Beacon notifications are not sent to the host. * The fw also offloads TBTT alignment. * @IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what * antenna the beacon should be transmitted * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon * from AP and will send it upon d0i3 exit. * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3: support LAR API V3 * @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill * @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature * thresholds reporting * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in * regular image. * @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared * memory addresses from the firmware. * @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger * command size (command version 4) that supports toggling ACK TX * power reduction. * @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3 * @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax * capability. * @IWL_UCODE_TLV_CAPA_CSI_REPORTING: firmware is capable of being configured * to report the CSI information with (certain) RX frames * @IWL_UCODE_TLV_CAPA_FTM_CALIBRATED: has FTM calibrated and thus supports both * initiator and responder * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload * @IWL_UCODE_TLV_CAPA_PROTECTED_TWT: Supports protection of TWT action frames * @IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE: Supports the firmware handshake in * reset flow * @IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN: Support for passive scan on 6GHz PSC * channels even when these are not enabled. * @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection * complete to FW. * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ enum iwl_ucode_tlv_capa { /* set 0 */ IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0, IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1, IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2, IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3, IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8, IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9, IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)10, IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11, IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12, IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17, IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18, IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22, IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD = (__force iwl_ucode_tlv_capa_t)26, IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28, IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31, /* set 1 */ + IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG = (__force iwl_ucode_tlv_capa_t)32, IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT = (__force iwl_ucode_tlv_capa_t)37, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39, IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST = (__force iwl_ucode_tlv_capa_t)41, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43, IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2 = (__force iwl_ucode_tlv_capa_t)45, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD = (__force iwl_ucode_tlv_capa_t)46, IWL_UCODE_TLV_CAPA_FTM_CALIBRATED = (__force iwl_ucode_tlv_capa_t)47, IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS = (__force iwl_ucode_tlv_capa_t)48, IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2 = (__force iwl_ucode_tlv_capa_t)50, IWL_UCODE_TLV_CAPA_SET_PPAG = (__force iwl_ucode_tlv_capa_t)52, IWL_UCODE_TLV_CAPA_TAS_CFG = (__force iwl_ucode_tlv_capa_t)53, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54, IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56, IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE = (__force iwl_ucode_tlv_capa_t)57, IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)58, IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)59, IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60, IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO = (__force iwl_ucode_tlv_capa_t)61, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)62, IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT = (__force iwl_ucode_tlv_capa_t)63, /* set 2 */ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67, IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT = (__force iwl_ucode_tlv_capa_t)68, IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD = (__force iwl_ucode_tlv_capa_t)70, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71, IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72, IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3 = (__force iwl_ucode_tlv_capa_t)73, IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74, IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75, IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80, IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81, IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84, IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87, IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88, IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89, IWL_UCODE_TLV_CAPA_CSI_REPORTING = (__force iwl_ucode_tlv_capa_t)90, IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP = (__force iwl_ucode_tlv_capa_t)92, IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP = (__force iwl_ucode_tlv_capa_t)93, /* set 3 */ IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96, /* * @IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT: supports PSC channels */ IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)98, IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100, IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104, IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105, + IWL_UCODE_TLV_CAPA_SYNCED_TIME = (__force iwl_ucode_tlv_capa_t)106, + IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM = (__force iwl_ucode_tlv_capa_t)108, + IWL_UCODE_TLV_CAPA_BIGTK_TX_SUPPORT = (__force iwl_ucode_tlv_capa_t)109, + IWL_UCODE_TLV_CAPA_MLD_API_SUPPORT = (__force iwl_ucode_tlv_capa_t)110, + IWL_UCODE_TLV_CAPA_SCAN_DONT_TOGGLE_ANT = (__force iwl_ucode_tlv_capa_t)111, + IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT = (__force iwl_ucode_tlv_capa_t)112, + IWL_UCODE_TLV_CAPA_OFFLOAD_BTM_SUPPORT = (__force iwl_ucode_tlv_capa_t)113, + IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT = (__force iwl_ucode_tlv_capa_t)114, + IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT = (__force iwl_ucode_tlv_capa_t)116, #ifdef __CHECKER__ /* sparse says it cannot increment the previous enum member */ #define NUM_IWL_UCODE_TLV_CAPA 128 #else NUM_IWL_UCODE_TLV_CAPA #endif }; /* The default calibrate table size if not specified by firmware file */ #define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 #define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19 #define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253 /* The default max probe length if not specified by the firmware file */ #define IWL_DEFAULT_MAX_PROBE_LENGTH 200 /* * For 16.0 uCode and above, there is no differentiation between sections, * just an offset to the HW address. */ #define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC #define PAGING_SEPARATOR_SECTION 0xAAAABBBB /* uCode version contains 4 values: Major/Minor/API/Serial */ #define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) #define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) #define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8) #define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF) /** * struct iwl_tlv_calib_ctrl - Calibration control struct. * Sent as part of the phy configuration command. * @flow_trigger: bitmap for which calibrations to perform according to * flow triggers. * @event_trigger: bitmap for which calibrations to perform according to * event triggers. */ struct iwl_tlv_calib_ctrl { __le32 flow_trigger; __le32 event_trigger; } __packed; enum iwl_fw_phy_cfg { FW_PHY_CFG_RADIO_TYPE_POS = 0, FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS, FW_PHY_CFG_RADIO_STEP_POS = 2, FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS, FW_PHY_CFG_RADIO_DASH_POS = 4, FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS, FW_PHY_CFG_TX_CHAIN_POS = 16, FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS, FW_PHY_CFG_RX_CHAIN_POS = 20, FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS, FW_PHY_CFG_CHAIN_SAD_POS = 23, FW_PHY_CFG_CHAIN_SAD_ENABLED = 0x1 << FW_PHY_CFG_CHAIN_SAD_POS, FW_PHY_CFG_CHAIN_SAD_ANT_A = 0x2 << FW_PHY_CFG_CHAIN_SAD_POS, FW_PHY_CFG_CHAIN_SAD_ANT_B = 0x4 << FW_PHY_CFG_CHAIN_SAD_POS, FW_PHY_CFG_SHARED_CLK = BIT(31), }; enum iwl_fw_dbg_reg_operator { CSR_ASSIGN, CSR_SETBIT, CSR_CLEARBIT, PRPH_ASSIGN, PRPH_SETBIT, PRPH_CLEARBIT, INDIRECT_ASSIGN, INDIRECT_SETBIT, INDIRECT_CLEARBIT, PRPH_BLOCKBIT, }; /** * struct iwl_fw_dbg_reg_op - an operation on a register * * @op: &enum iwl_fw_dbg_reg_operator * @addr: offset of the register * @val: value */ struct iwl_fw_dbg_reg_op { u8 op; u8 reserved[3]; __le32 addr; __le32 val; } __packed; /** * enum iwl_fw_dbg_monitor_mode - available monitor recording modes * * @SMEM_MODE: monitor stores the data in SMEM * @EXTERNAL_MODE: monitor stores the data in allocated DRAM * @MARBH_MODE: monitor stores the data in MARBH buffer * @MIPI_MODE: monitor outputs the data through the MIPI interface */ enum iwl_fw_dbg_monitor_mode { SMEM_MODE = 0, EXTERNAL_MODE = 1, MARBH_MODE = 2, MIPI_MODE = 3, }; /** * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments * * @data_type: the memory segment type to record * @ofs: the memory segment offset * @len: the memory segment length, in bytes * * This parses IWL_UCODE_TLV_FW_MEM_SEG */ struct iwl_fw_dbg_mem_seg_tlv { __le32 data_type; __le32 ofs; __le32 len; } __packed; /** * struct iwl_fw_dbg_dest_tlv_v1 - configures the destination of the debug data * * @version: version of the TLV - currently 0 * @monitor_mode: &enum iwl_fw_dbg_monitor_mode * @size_power: buffer size will be 2^(size_power + 11) * @base_reg: addr of the base addr register (PRPH) * @end_reg: addr of the end addr register (PRPH) * @write_ptr_reg: the addr of the reg of the write pointer * @wrap_count: the addr of the reg of the wrap_count * @base_shift: shift right of the base addr reg * @end_shift: shift right of the end addr reg * @reg_ops: array of registers operations * * This parses IWL_UCODE_TLV_FW_DBG_DEST */ struct iwl_fw_dbg_dest_tlv_v1 { u8 version; u8 monitor_mode; u8 size_power; u8 reserved; __le32 base_reg; __le32 end_reg; __le32 write_ptr_reg; __le32 wrap_count; u8 base_shift; u8 end_shift; - struct iwl_fw_dbg_reg_op reg_ops[0]; + struct iwl_fw_dbg_reg_op reg_ops[]; } __packed; /* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */ #define IWL_LDBG_M2S_BUF_SIZE_MSK 0x0fff0000 /* Mask of the register for defining the LDBG MAC2SMEM SMEM base address */ #define IWL_LDBG_M2S_BUF_BA_MSK 0x00000fff /* The smem buffer chunks are in units of 256 bits */ #define IWL_M2S_UNIT_SIZE 0x100 struct iwl_fw_dbg_dest_tlv { u8 version; u8 monitor_mode; u8 size_power; u8 reserved; __le32 cfg_reg; __le32 write_ptr_reg; __le32 wrap_count; u8 base_shift; u8 size_shift; - struct iwl_fw_dbg_reg_op reg_ops[0]; + struct iwl_fw_dbg_reg_op reg_ops[]; } __packed; struct iwl_fw_dbg_conf_hcmd { u8 id; u8 reserved; __le16 len; - u8 data[0]; + u8 data[]; } __packed; /** * enum iwl_fw_dbg_trigger_mode - triggers functionalities * * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to * collect only monitor data */ enum iwl_fw_dbg_trigger_mode { IWL_FW_DBG_TRIGGER_START = BIT(0), IWL_FW_DBG_TRIGGER_STOP = BIT(1), IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2), }; /** * enum iwl_fw_dbg_trigger_flags - the flags supported by wrt triggers * @IWL_FW_DBG_FORCE_RESTART: force a firmware restart */ enum iwl_fw_dbg_trigger_flags { IWL_FW_DBG_FORCE_RESTART = BIT(0), }; /** * enum iwl_fw_dbg_trigger_vif_type - define the VIF type for a trigger * @IWL_FW_DBG_CONF_VIF_ANY: any vif type * @IWL_FW_DBG_CONF_VIF_IBSS: IBSS mode * @IWL_FW_DBG_CONF_VIF_STATION: BSS mode * @IWL_FW_DBG_CONF_VIF_AP: AP mode * @IWL_FW_DBG_CONF_VIF_P2P_CLIENT: P2P Client mode * @IWL_FW_DBG_CONF_VIF_P2P_GO: P2P GO mode * @IWL_FW_DBG_CONF_VIF_P2P_DEVICE: P2P device */ enum iwl_fw_dbg_trigger_vif_type { IWL_FW_DBG_CONF_VIF_ANY = NL80211_IFTYPE_UNSPECIFIED, IWL_FW_DBG_CONF_VIF_IBSS = NL80211_IFTYPE_ADHOC, IWL_FW_DBG_CONF_VIF_STATION = NL80211_IFTYPE_STATION, IWL_FW_DBG_CONF_VIF_AP = NL80211_IFTYPE_AP, IWL_FW_DBG_CONF_VIF_P2P_CLIENT = NL80211_IFTYPE_P2P_CLIENT, IWL_FW_DBG_CONF_VIF_P2P_GO = NL80211_IFTYPE_P2P_GO, IWL_FW_DBG_CONF_VIF_P2P_DEVICE = NL80211_IFTYPE_P2P_DEVICE, }; /** * struct iwl_fw_dbg_trigger_tlv - a TLV that describes the trigger * @id: &enum iwl_fw_dbg_trigger * @vif_type: &enum iwl_fw_dbg_trigger_vif_type * @stop_conf_ids: bitmap of configurations this trigger relates to. * if the mode is %IWL_FW_DBG_TRIGGER_STOP, then if the bit corresponding * to the currently running configuration is set, the data should be * collected. * @stop_delay: how many milliseconds to wait before collecting the data * after the STOP trigger fires. * @mode: &enum iwl_fw_dbg_trigger_mode - can be stop / start of both * @start_conf_id: if mode is %IWL_FW_DBG_TRIGGER_START, this defines what * configuration should be applied when the triggers kicks in. * @occurrences: number of occurrences. 0 means the trigger will never fire. * @trig_dis_ms: the time, in milliseconds, after an occurrence of this * trigger in which another occurrence should be ignored. * @flags: &enum iwl_fw_dbg_trigger_flags */ struct iwl_fw_dbg_trigger_tlv { __le32 id; __le32 vif_type; __le32 stop_conf_ids; __le32 stop_delay; u8 mode; u8 start_conf_id; __le16 occurrences; __le16 trig_dis_ms; u8 flags; u8 reserved[5]; - u8 data[0]; + u8 data[]; } __packed; #define FW_DBG_START_FROM_ALIVE 0 #define FW_DBG_CONF_MAX 32 #define FW_DBG_INVALID 0xff /** * struct iwl_fw_dbg_trigger_missed_bcon - configures trigger for missed beacons * @stop_consec_missed_bcon: stop recording if threshold is crossed. * @stop_consec_missed_bcon_since_rx: stop recording if threshold is crossed. * @start_consec_missed_bcon: start recording if threshold is crossed. * @start_consec_missed_bcon_since_rx: start recording if threshold is crossed. * @reserved1: reserved * @reserved2: reserved */ struct iwl_fw_dbg_trigger_missed_bcon { __le32 stop_consec_missed_bcon; __le32 stop_consec_missed_bcon_since_rx; __le32 reserved2[2]; __le32 start_consec_missed_bcon; __le32 start_consec_missed_bcon_since_rx; __le32 reserved1[2]; } __packed; /** * struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW. * cmds: the list of commands to trigger the collection on */ struct iwl_fw_dbg_trigger_cmd { struct cmd { u8 cmd_id; u8 group_id; } __packed cmds[16]; } __packed; /** * iwl_fw_dbg_trigger_stats - configures trigger for statistics * @stop_offset: the offset of the value to be monitored * @stop_threshold: the threshold above which to collect * @start_offset: the offset of the value to be monitored * @start_threshold: the threshold above which to start recording */ struct iwl_fw_dbg_trigger_stats { __le32 stop_offset; __le32 stop_threshold; __le32 start_offset; __le32 start_threshold; } __packed; /** * struct iwl_fw_dbg_trigger_low_rssi - trigger for low beacon RSSI * @rssi: RSSI value to trigger at */ struct iwl_fw_dbg_trigger_low_rssi { __le32 rssi; } __packed; /** * struct iwl_fw_dbg_trigger_mlme - configures trigger for mlme events * @stop_auth_denied: number of denied authentication to collect * @stop_auth_timeout: number of authentication timeout to collect * @stop_rx_deauth: number of Rx deauth before to collect * @stop_tx_deauth: number of Tx deauth before to collect * @stop_assoc_denied: number of denied association to collect * @stop_assoc_timeout: number of association timeout to collect * @stop_connection_loss: number of connection loss to collect * @start_auth_denied: number of denied authentication to start recording * @start_auth_timeout: number of authentication timeout to start recording * @start_rx_deauth: number of Rx deauth to start recording * @start_tx_deauth: number of Tx deauth to start recording * @start_assoc_denied: number of denied association to start recording * @start_assoc_timeout: number of association timeout to start recording * @start_connection_loss: number of connection loss to start recording */ struct iwl_fw_dbg_trigger_mlme { u8 stop_auth_denied; u8 stop_auth_timeout; u8 stop_rx_deauth; u8 stop_tx_deauth; u8 stop_assoc_denied; u8 stop_assoc_timeout; u8 stop_connection_loss; u8 reserved; u8 start_auth_denied; u8 start_auth_timeout; u8 start_rx_deauth; u8 start_tx_deauth; u8 start_assoc_denied; u8 start_assoc_timeout; u8 start_connection_loss; u8 reserved2; } __packed; /** * struct iwl_fw_dbg_trigger_txq_timer - configures the Tx queue's timer * @command_queue: timeout for the command queue in ms * @bss: timeout for the queues of a BSS (except for TDLS queues) in ms * @softap: timeout for the queues of a softAP in ms * @p2p_go: timeout for the queues of a P2P GO in ms * @p2p_client: timeout for the queues of a P2P client in ms * @p2p_device: timeout for the queues of a P2P device in ms * @ibss: timeout for the queues of an IBSS in ms * @tdls: timeout for the queues of a TDLS station in ms */ struct iwl_fw_dbg_trigger_txq_timer { __le32 command_queue; __le32 bss; __le32 softap; __le32 p2p_go; __le32 p2p_client; __le32 p2p_device; __le32 ibss; __le32 tdls; __le32 reserved[4]; } __packed; /** * struct iwl_fw_dbg_trigger_time_event - configures a time event trigger * time_Events: a list of tuples . The driver will issue a * trigger each time a time event notification that relates to time event * id with one of the actions in the bitmap is received and * BIT(notif->status) is set in status_bitmap. * */ struct iwl_fw_dbg_trigger_time_event { struct { __le32 id; __le32 action_bitmap; __le32 status_bitmap; } __packed time_events[16]; } __packed; /** * struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger * rx_ba_start: tid bitmap to configure on what tid the trigger should occur * when an Rx BlockAck session is started. * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur * when an Rx BlockAck session is stopped. * tx_ba_start: tid bitmap to configure on what tid the trigger should occur * when a Tx BlockAck session is started. * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur * when a Tx BlockAck session is stopped. * rx_bar: tid bitmap to configure on what tid the trigger should occur * when a BAR is received (for a Tx BlockAck session). * tx_bar: tid bitmap to configure on what tid the trigger should occur * when a BAR is send (for an Rx BlocAck session). * frame_timeout: tid bitmap to configure on what tid the trigger should occur * when a frame times out in the reordering buffer. */ struct iwl_fw_dbg_trigger_ba { __le16 rx_ba_start; __le16 rx_ba_stop; __le16 tx_ba_start; __le16 tx_ba_stop; __le16 rx_bar; __le16 tx_bar; __le16 frame_timeout; } __packed; /** * struct iwl_fw_dbg_trigger_tdls - configures trigger for TDLS events. * @action_bitmap: the TDLS action to trigger the collection upon * @peer_mode: trigger on specific peer or all * @peer: the TDLS peer to trigger the collection on */ struct iwl_fw_dbg_trigger_tdls { u8 action_bitmap; u8 peer_mode; u8 peer[ETH_ALEN]; u8 reserved[4]; } __packed; /** * struct iwl_fw_dbg_trigger_tx_status - configures trigger for tx response * status. * @statuses: the list of statuses to trigger the collection on */ struct iwl_fw_dbg_trigger_tx_status { struct tx_status { u8 status; u8 reserved[3]; } __packed statuses[16]; __le32 reserved[2]; } __packed; /** * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration. * @id: conf id * @usniffer: should the uSniffer image be used * @num_of_hcmds: how many HCMDs to send are present here * @hcmd: a variable length host command to be sent to apply the configuration. * If there is more than one HCMD to send, they will appear one after the * other and be sent in the order that they appear in. * This parses IWL_UCODE_TLV_FW_DBG_CONF. The user can add up-to * %FW_DBG_CONF_MAX configuration per run. */ struct iwl_fw_dbg_conf_tlv { u8 id; u8 usniffer; u8 reserved; u8 num_of_hcmds; struct iwl_fw_dbg_conf_hcmd hcmd; } __packed; #define IWL_FW_CMD_VER_UNKNOWN 99 /** * struct iwl_fw_cmd_version - firmware command version entry * @cmd: command ID * @group: group ID * @cmd_ver: command version * @notif_ver: notification version */ struct iwl_fw_cmd_version { u8 cmd; u8 group; u8 cmd_ver; u8 notif_ver; } __packed; struct iwl_fw_tcm_error_addr { __le32 addr; }; /* FW_TLV_TCM_ERROR_INFO_ADDRS_S */ struct iwl_fw_dump_exclude { __le32 addr, size; }; static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv, size_t fixed_size, size_t var_size) { size_t var_len = le32_to_cpu(tlv->length) - fixed_size; if (WARN_ON(var_len % var_size)) return 0; return var_len / var_size; } #define iwl_tlv_array_len(_tlv_ptr, _struct_ptr, _memb) \ _iwl_tlv_array_len((_tlv_ptr), sizeof(*(_struct_ptr)), \ sizeof(_struct_ptr->_memb[0])) #endif /* __iwl_fw_file_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/img.h b/sys/contrib/dev/iwlwifi/fw/img.h index f878ac508801..8d0d58d61892 100644 --- a/sys/contrib/dev/iwlwifi/fw/img.h +++ b/sys/contrib/dev/iwlwifi/fw/img.h @@ -1,274 +1,275 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ #ifndef __iwl_fw_img_h__ #define __iwl_fw_img_h__ #include #include "api/dbg-tlv.h" #include "file.h" #include "error-dump.h" /** * enum iwl_ucode_type * * The type of ucode. * * @IWL_UCODE_REGULAR: Normal runtime ucode * @IWL_UCODE_INIT: Initial ucode * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode * @IWL_UCODE_REGULAR_USNIFFER: Normal runtime ucode when using usniffer image */ enum iwl_ucode_type { IWL_UCODE_REGULAR, IWL_UCODE_INIT, IWL_UCODE_WOWLAN, IWL_UCODE_REGULAR_USNIFFER, IWL_UCODE_TYPE_MAX, }; /* * enumeration of ucode section. * This enumeration is used directly for older firmware (before 16.0). * For new firmware, there can be up to 4 sections (see below) but the * first one packaged into the firmware file is the DATA section and * some debugging code accesses that. */ enum iwl_ucode_sec { IWL_UCODE_SECTION_DATA, IWL_UCODE_SECTION_INST, }; struct iwl_ucode_capabilities { u32 max_probe_length; u32 n_scan_channels; u32 standard_phy_calibration_size; u32 flags; u32 error_log_addr; u32 error_log_size; u32 num_stations; + u32 num_beacons; unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)]; unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)]; const struct iwl_fw_cmd_version *cmd_versions; u32 n_cmd_versions; }; static inline bool fw_has_api(const struct iwl_ucode_capabilities *capabilities, iwl_ucode_tlv_api_t api) { return test_bit((__force long)api, capabilities->_api); } static inline bool fw_has_capa(const struct iwl_ucode_capabilities *capabilities, iwl_ucode_tlv_capa_t capa) { return test_bit((__force long)capa, capabilities->_capa); } /* one for each uCode image (inst/data, init/runtime/wowlan) */ struct fw_desc { const void *data; /* vmalloc'ed data */ u32 len; /* size in bytes */ u32 offset; /* offset in the device */ }; struct fw_img { struct fw_desc *sec; int num_sec; bool is_dual_cpus; u32 paging_mem_size; }; /* * Block paging calculations */ #define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */ #define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */ #define PAGE_PER_GROUP_2_EXP_SIZE 3 /* 8 pages per group */ #define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE) /* don't change, support only 32KB size */ #define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE) /* 32K == 2^15 */ #define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE) /* * Image paging calculations */ #define BLOCK_PER_IMAGE_2_EXP_SIZE 5 /* 2^5 == 32 blocks per image */ #define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE) /* maximum image size 1024KB */ #define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE) /* Virtual address signature */ #define PAGING_ADDR_SIG 0xAA000000 #define PAGING_CMD_IS_SECURED BIT(9) #define PAGING_CMD_IS_ENABLED BIT(8) #define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0 #define PAGING_TLV_SECURE_MASK 1 /* FW MSB Mask for regions/cache_control */ #define FW_ADDR_CACHE_CONTROL 0xC0000000UL /** * struct iwl_fw_paging * @fw_paging_phys: page phy pointer * @fw_paging_block: pointer to the allocated block * @fw_paging_size: page size * @fw_offs: offset in the device */ struct iwl_fw_paging { dma_addr_t fw_paging_phys; struct page *fw_paging_block; u32 fw_paging_size; u32 fw_offs; }; /** * enum iwl_fw_type - iwlwifi firmware type * @IWL_FW_DVM: DVM firmware * @IWL_FW_MVM: MVM firmware */ enum iwl_fw_type { IWL_FW_DVM, IWL_FW_MVM, }; /** * struct iwl_fw_dbg - debug data * * @dest_tlv: points to debug destination TLV (typically SRAM or DRAM) * @n_dest_reg: num of reg_ops in dest_tlv * @conf_tlv: array of pointers to configuration HCMDs * @trigger_tlv: array of pointers to triggers TLVs * @trigger_tlv_len: lengths of the @dbg_trigger_tlv entries * @mem_tlv: Runtime addresses to dump * @n_mem_tlv: number of runtime addresses * @dump_mask: bitmask of dump regions */ struct iwl_fw_dbg { struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv; u8 n_dest_reg; struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv *trigger_tlv[FW_DBG_TRIGGER_MAX]; size_t trigger_tlv_len[FW_DBG_TRIGGER_MAX]; struct iwl_fw_dbg_mem_seg_tlv *mem_tlv; size_t n_mem_tlv; u32 dump_mask; }; struct iwl_dump_exclude { u32 addr, size; }; /** * struct iwl_fw - variables associated with the firmware * * @ucode_ver: ucode version from the ucode file * @fw_version: firmware version string * @img: ucode image like ucode_rt, ucode_init, ucode_wowlan. * @iml_len: length of the image loader image * @iml: image loader fw image * @ucode_capa: capabilities parsed from the ucode file. * @enhance_sensitivity_table: device can do enhanced sensitivity. * @init_evtlog_ptr: event log offset for init ucode. * @init_evtlog_size: event log size for init ucode. - * @init_errlog_ptr: error log offfset for init ucode. + * @init_errlog_ptr: error log offset for init ucode. * @inst_evtlog_ptr: event log offset for runtime ucode. * @inst_evtlog_size: event log size for runtime ucode. - * @inst_errlog_ptr: error log offfset for runtime ucode. + * @inst_errlog_ptr: error log offset for runtime ucode. * @type: firmware type (&enum iwl_fw_type) * @human_readable: human readable version * we get the ALIVE from the uCode * @phy_integration_ver: PHY integration version string * @phy_integration_ver_len: length of @phy_integration_ver * @dump_excl: image dump exclusion areas for RT image * @dump_excl_wowlan: image dump exclusion areas for WoWLAN image */ struct iwl_fw { u32 ucode_ver; char fw_version[64]; /* ucode images */ struct fw_img img[IWL_UCODE_TYPE_MAX]; size_t iml_len; u8 *iml; struct iwl_ucode_capabilities ucode_capa; bool enhance_sensitivity_table; u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX]; u32 phy_config; u8 valid_tx_ant; u8 valid_rx_ant; enum iwl_fw_type type; u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; struct iwl_fw_dbg dbg; u8 *phy_integration_ver; u32 phy_integration_ver_len; struct iwl_dump_exclude dump_excl[2], dump_excl_wowlan[2]; }; static inline const char *get_fw_dbg_mode_string(int mode) { switch (mode) { case SMEM_MODE: return "SMEM"; case EXTERNAL_MODE: return "EXTERNAL_DRAM"; case MARBH_MODE: return "MARBH"; case MIPI_MODE: return "MIPI"; default: return "UNKNOWN"; } } static inline bool iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id) { const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg.conf_tlv[id]; if (!conf_tlv) return false; return conf_tlv->usniffer; } static inline const struct fw_img * iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type) { if (ucode_type >= IWL_UCODE_TYPE_MAX) return NULL; return &fw->img[ucode_type]; } u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def); u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def); const char *iwl_fw_lookup_assert_desc(u32 num); #define FW_SYSASSERT_CPU_MASK 0xf0000000 #define FW_SYSASSERT_PNVM_MISSING 0x0010070d #endif /* __iwl_fw_img_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/pnvm.c b/sys/contrib/dev/iwlwifi/fw/pnvm.c index 1ced9cb2836b..650e4bde9c17 100644 --- a/sys/contrib/dev/iwlwifi/fw/pnvm.c +++ b/sys/contrib/dev/iwlwifi/fw/pnvm.c @@ -1,359 +1,394 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright(c) 2020-2021 Intel Corporation + * Copyright(c) 2020-2023 Intel Corporation */ #include "iwl-drv.h" #include "pnvm.h" #include "iwl-prph.h" #include "iwl-io.h" #include "fw/api/commands.h" #include "fw/api/nvm-reg.h" #include "fw/api/alive.h" #include "fw/uefi.h" struct iwl_pnvm_section { __le32 offset; const u8 data[]; } __packed; static bool iwl_pnvm_complete_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_trans *trans = (struct iwl_trans *)data; struct iwl_pnvm_init_complete_ntfy *pnvm_ntf = (void *)pkt->data; IWL_DEBUG_FW(trans, "PNVM complete notification received with status 0x%0x\n", le32_to_cpu(pnvm_ntf->status)); return true; } static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data, - size_t len) + size_t len, + struct iwl_pnvm_image *pnvm_data) { const struct iwl_ucode_tlv *tlv; u32 sha1 = 0; u16 mac_type = 0, rf_id = 0; - u8 *pnvm_data = NULL, *tmp; bool hw_match = false; - u32 size = 0; - int ret; IWL_DEBUG_FW(trans, "Handling PNVM section\n"); + memset(pnvm_data, 0, sizeof(*pnvm_data)); + while (len >= sizeof(*tlv)) { u32 tlv_len, tlv_type; len -= sizeof(*tlv); tlv = (const void *)data; tlv_len = le32_to_cpu(tlv->length); tlv_type = le32_to_cpu(tlv->type); if (len < tlv_len) { IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len); - ret = -EINVAL; - goto out; + return -EINVAL; } data += sizeof(*tlv); switch (tlv_type) { case IWL_UCODE_TLV_PNVM_VERSION: if (tlv_len < sizeof(__le32)) { IWL_DEBUG_FW(trans, "Invalid size for IWL_UCODE_TLV_PNVM_VERSION (expected %zd, got %d)\n", sizeof(__le32), tlv_len); break; } sha1 = le32_to_cpup((const __le32 *)data); IWL_DEBUG_FW(trans, "Got IWL_UCODE_TLV_PNVM_VERSION %0x\n", sha1); + pnvm_data->version = sha1; break; case IWL_UCODE_TLV_HW_TYPE: if (tlv_len < 2 * sizeof(__le16)) { IWL_DEBUG_FW(trans, "Invalid size for IWL_UCODE_TLV_HW_TYPE (expected %zd, got %d)\n", 2 * sizeof(__le16), tlv_len); break; } if (hw_match) break; mac_type = le16_to_cpup((const __le16 *)data); rf_id = le16_to_cpup((const __le16 *)(data + sizeof(__le16))); IWL_DEBUG_FW(trans, "Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n", mac_type, rf_id); if (mac_type == CSR_HW_REV_TYPE(trans->hw_rev) && rf_id == CSR_HW_RFID_TYPE(trans->hw_rf_id)) hw_match = true; break; case IWL_UCODE_TLV_SEC_RT: { const struct iwl_pnvm_section *section = (const void *)data; u32 data_len = tlv_len - sizeof(*section); IWL_DEBUG_FW(trans, "Got IWL_UCODE_TLV_SEC_RT len %d\n", tlv_len); /* TODO: remove, this is a deprecated separator */ if (le32_to_cpup((const __le32 *)data) == 0xddddeeee) { IWL_DEBUG_FW(trans, "Ignoring separator.\n"); break; } - IWL_DEBUG_FW(trans, "Adding data (size %d)\n", - data_len); - - tmp = krealloc(pnvm_data, size + data_len, GFP_KERNEL); - if (!tmp) { + if (pnvm_data->n_chunks == IPC_DRAM_MAP_ENTRY_NUM_MAX) { IWL_DEBUG_FW(trans, - "Couldn't allocate (more) pnvm_data\n"); - - ret = -ENOMEM; - goto out; + "too many payloads to allocate in DRAM.\n"); + return -EINVAL; } - pnvm_data = tmp; - - memcpy(pnvm_data + size, section->data, data_len); + IWL_DEBUG_FW(trans, "Adding data (size %d)\n", + data_len); - size += data_len; + pnvm_data->chunks[pnvm_data->n_chunks].data = section->data; + pnvm_data->chunks[pnvm_data->n_chunks].len = data_len; + pnvm_data->n_chunks++; break; } + case IWL_UCODE_TLV_MEM_DESC: + if (iwl_uefi_handle_tlv_mem_desc(trans, data, tlv_len, + pnvm_data)) + return -EINVAL; + break; case IWL_UCODE_TLV_PNVM_SKU: IWL_DEBUG_FW(trans, "New PNVM section started, stop parsing.\n"); goto done; default: IWL_DEBUG_FW(trans, "Found TLV 0x%0x, len %d\n", tlv_type, tlv_len); break; } len -= ALIGN(tlv_len, 4); data += ALIGN(tlv_len, 4); } done: if (!hw_match) { IWL_DEBUG_FW(trans, "HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n", CSR_HW_REV_TYPE(trans->hw_rev), CSR_HW_RFID_TYPE(trans->hw_rf_id)); - ret = -ENOENT; - goto out; + return -ENOENT; } - if (!size) { + if (!pnvm_data->n_chunks) { IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n"); - ret = -ENOENT; - goto out; + return -ENOENT; } - IWL_INFO(trans, "loaded PNVM version %08x\n", sha1); - - ret = iwl_trans_set_pnvm(trans, pnvm_data, size); -out: - kfree(pnvm_data); - return ret; + return 0; } static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data, - size_t len) + size_t len, + struct iwl_pnvm_image *pnvm_data) { const struct iwl_ucode_tlv *tlv; IWL_DEBUG_FW(trans, "Parsing PNVM file\n"); while (len >= sizeof(*tlv)) { u32 tlv_len, tlv_type; len -= sizeof(*tlv); tlv = (const void *)data; tlv_len = le32_to_cpu(tlv->length); tlv_type = le32_to_cpu(tlv->type); if (len < tlv_len) { IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len); return -EINVAL; } if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) { const struct iwl_sku_id *sku_id = (const void *)(data + sizeof(*tlv)); IWL_DEBUG_FW(trans, "Got IWL_UCODE_TLV_PNVM_SKU len %d\n", tlv_len); IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n", le32_to_cpu(sku_id->data[0]), le32_to_cpu(sku_id->data[1]), le32_to_cpu(sku_id->data[2])); data += sizeof(*tlv) + ALIGN(tlv_len, 4); len -= ALIGN(tlv_len, 4); if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) && trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) && trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) { int ret; - ret = iwl_pnvm_handle_section(trans, data, len); + ret = iwl_pnvm_handle_section(trans, data, len, + pnvm_data); if (!ret) return 0; } else { IWL_DEBUG_FW(trans, "SKU ID didn't match!\n"); } } else { data += sizeof(*tlv) + ALIGN(tlv_len, 4); len -= ALIGN(tlv_len, 4); } } return -ENOENT; } static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len) { const struct firmware *pnvm; char pnvm_name[MAX_PNVM_NAME]; size_t new_len; int ret; iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name)); ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev); if (ret) { IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n", pnvm_name, ret); return ret; } new_len = pnvm->size; *data = kmemdup(pnvm->data, pnvm->size, GFP_KERNEL); release_firmware(pnvm); if (!*data) return -ENOMEM; *len = new_len; return 0; } -int iwl_pnvm_load(struct iwl_trans *trans, - struct iwl_notif_wait_data *notif_wait) +static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len) { - u8 *data; - size_t len; struct pnvm_sku_package *package; - struct iwl_notification_wait pnvm_wait; - static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP, - PNVM_INIT_COMPLETE_NTFY) }; - int ret; - - /* if the SKU_ID is empty, there's nothing to do */ - if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2]) - return 0; - - /* - * If we already loaded (or tried to load) it before, we just - * need to set it again. - */ - if (trans->pnvm_loaded) { - ret = iwl_trans_set_pnvm(trans, NULL, 0); - if (ret) - return ret; - goto skip_parse; - } + u8 *image = NULL; /* First attempt to get the PNVM from BIOS */ - package = iwl_uefi_get_pnvm(trans, &len); + package = iwl_uefi_get_pnvm(trans_p, len); if (!IS_ERR_OR_NULL(package)) { - if (len >= sizeof(*package)) { + if (*len >= sizeof(*package)) { /* we need only the data */ - len -= sizeof(*package); - data = kmemdup(package->data, len, GFP_KERNEL); - } else { - data = NULL; + *len -= sizeof(*package); + image = kmemdup(package->data, *len, GFP_KERNEL); } - /* free package regardless of whether kmemdup succeeded */ kfree(package); - - if (data) - goto parse; + if (image) + return image; } /* If it's not available, try from the filesystem */ - ret = iwl_pnvm_get_from_fs(trans, &data, &len); + if (iwl_pnvm_get_from_fs(trans_p, &image, len)) + return NULL; + return image; +} + +static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) +{ + struct iwl_pnvm_image *pnvm_data = NULL; + u8 *data = NULL; + size_t length; + int ret; + + /* failed to get/parse the image in the past, no use trying again */ + if (trans->fail_to_parse_pnvm_image) + return; + + if (trans->pnvm_loaded) + goto set; + + data = iwl_get_pnvm_image(trans, &length); + if (!data) { + trans->fail_to_parse_pnvm_image = true; + return; + } + + pnvm_data = kzalloc(sizeof(*pnvm_data), GFP_KERNEL); + if (!pnvm_data) + goto free; + + ret = iwl_pnvm_parse(trans, data, length, pnvm_data); if (ret) { - /* - * Pretend we've loaded it - at least we've tried and - * couldn't load it at all, so there's no point in - * trying again over and over. - */ - trans->pnvm_loaded = true; - - goto skip_parse; + trans->fail_to_parse_pnvm_image = true; + goto free; } -parse: - iwl_pnvm_parse(trans, data, len); + ret = iwl_trans_load_pnvm(trans, pnvm_data, capa); + if (ret) + goto free; + IWL_INFO(trans, "loaded PNVM version %08x\n", pnvm_data->version); +set: + iwl_trans_set_pnvm(trans, capa); +free: kfree(data); + kfree(pnvm_data); +} -skip_parse: - data = NULL; -#if defined(__FreeBSD__) - len = 0; -#endif - /* now try to get the reduce power table, if not loaded yet */ - if (!trans->reduce_power_loaded) { - data = iwl_uefi_get_reduced_power(trans, &len); - if (IS_ERR_OR_NULL(data)) { - /* - * Pretend we've loaded it - at least we've tried and - * couldn't load it at all, so there's no point in - * trying again over and over. - */ - trans->reduce_power_loaded = true; - - goto skip_reduce_power; - } +static void +iwl_pnvm_load_reduce_power_to_trans(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) +{ + struct iwl_pnvm_image *pnvm_data = NULL; + u8 *data = NULL; + size_t length; + int ret; + + if (trans->failed_to_load_reduce_power_image) + return; + + if (trans->reduce_power_loaded) + goto set; + + data = iwl_uefi_get_reduced_power(trans, &length); + if (IS_ERR(data)) { + trans->failed_to_load_reduce_power_image = true; + return; } - ret = iwl_trans_set_reduce_power(trans, data, len); - if (ret) + pnvm_data = kzalloc(sizeof(*pnvm_data), GFP_KERNEL); + if (!pnvm_data) + goto free; + + ret = iwl_uefi_reduce_power_parse(trans, data, length, pnvm_data); + if (ret) { + trans->failed_to_load_reduce_power_image = true; + goto free; + } + + ret = iwl_trans_load_reduce_power(trans, pnvm_data, capa); + if (ret) { IWL_DEBUG_FW(trans, - "Failed to set reduce power table %d\n", + "Failed to load reduce power table %d\n", ret); + trans->failed_to_load_reduce_power_image = true; + goto free; + } + +set: + iwl_trans_set_reduce_power(trans, capa); +free: kfree(data); + kfree(pnvm_data); +} + +int iwl_pnvm_load(struct iwl_trans *trans, + struct iwl_notif_wait_data *notif_wait, + const struct iwl_ucode_capabilities *capa) +{ + struct iwl_notification_wait pnvm_wait; + static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP, + PNVM_INIT_COMPLETE_NTFY) }; + + /* if the SKU_ID is empty, there's nothing to do */ + if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2]) + return 0; + + iwl_pnvm_load_pnvm_to_trans(trans, capa); + iwl_pnvm_load_reduce_power_to_trans(trans, capa); -skip_reduce_power: iwl_init_notification_wait(notif_wait, &pnvm_wait, ntf_cmds, ARRAY_SIZE(ntf_cmds), iwl_pnvm_complete_fn, trans); /* kick the doorbell */ iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, UREG_DOORBELL_TO_ISR6_PNVM); return iwl_wait_notification(notif_wait, &pnvm_wait, MVM_UCODE_PNVM_TIMEOUT); } IWL_EXPORT_SYMBOL(iwl_pnvm_load); diff --git a/sys/contrib/dev/iwlwifi/fw/pnvm.h b/sys/contrib/dev/iwlwifi/fw/pnvm.h index 203c367dd4de..1bac3466154c 100644 --- a/sys/contrib/dev/iwlwifi/fw/pnvm.h +++ b/sys/contrib/dev/iwlwifi/fw/pnvm.h @@ -1,38 +1,29 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/****************************************************************************** - * - * Copyright(c) 2020-2021 Intel Corporation - * - *****************************************************************************/ - +/* + * Copyright(c) 2020-2023 Intel Corporation + */ #ifndef __IWL_PNVM_H__ #define __IWL_PNVM_H__ +#include "iwl-drv.h" #include "fw/notif-wait.h" #define MVM_UCODE_PNVM_TIMEOUT (HZ / 4) #define MAX_PNVM_NAME 64 int iwl_pnvm_load(struct iwl_trans *trans, - struct iwl_notif_wait_data *notif_wait); + struct iwl_notif_wait_data *notif_wait, + const struct iwl_ucode_capabilities *capa); static inline void iwl_pnvm_get_fs_name(struct iwl_trans *trans, u8 *pnvm_name, size_t max_len) { - int pre_len; - - /* - * The prefix unfortunately includes a hyphen at the end, so - * don't add the dot here... - */ - snprintf(pnvm_name, max_len, "%spnvm", trans->cfg->fw_name_pre); + char _fw_name_pre[FW_NAME_PRE_BUFSIZE]; - /* ...but replace the hyphen with the dot here. */ - pre_len = strlen(trans->cfg->fw_name_pre); - if (pre_len < max_len && pre_len > 0) - pnvm_name[pre_len - 1] = '.'; + snprintf(pnvm_name, max_len, "%s.pnvm", + iwl_drv_get_fwname_pre(trans, _fw_name_pre)); } #endif /* __IWL_PNVM_H__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/rs.c b/sys/contrib/dev/iwlwifi/fw/rs.c index a835214611ce..b09e68dbf5a9 100644 --- a/sys/contrib/dev/iwlwifi/fw/rs.c +++ b/sys/contrib/dev/iwlwifi/fw/rs.c @@ -1,255 +1,257 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2021 Intel Corporation + * Copyright (C) 2021-2022 Intel Corporation */ #include #include "fw/api/rs.h" #include "iwl-drv.h" #include "iwl-config.h" #define IWL_DECLARE_RATE_INFO(r) \ [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP /* * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP * */ static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = { IWL_DECLARE_RATE_INFO(1), IWL_DECLARE_RATE_INFO(2), IWL_DECLARE_RATE_INFO(5), IWL_DECLARE_RATE_INFO(11), IWL_DECLARE_RATE_INFO(6), IWL_DECLARE_RATE_INFO(9), IWL_DECLARE_RATE_INFO(12), IWL_DECLARE_RATE_INFO(18), IWL_DECLARE_RATE_INFO(24), IWL_DECLARE_RATE_INFO(36), IWL_DECLARE_RATE_INFO(48), IWL_DECLARE_RATE_INFO(54), }; /* mbps, mcs */ static const struct iwl_rate_mcs_info rate_mcs[IWL_RATE_COUNT] = { { "1", "BPSK DSSS"}, { "2", "QPSK DSSS"}, {"5.5", "BPSK CCK"}, { "11", "QPSK CCK"}, { "6", "BPSK 1/2"}, { "9", "BPSK 1/2"}, { "12", "QPSK 1/2"}, { "18", "QPSK 3/4"}, { "24", "16QAM 1/2"}, { "36", "16QAM 3/4"}, { "48", "64QAM 2/3"}, { "54", "64QAM 3/4"}, { "60", "64QAM 5/6"}, }; static const char * const ant_name[] = { [ANT_NONE] = "None", [ANT_A] = "A", [ANT_B] = "B", [ANT_AB] = "AB", }; static const char * const pretty_bw[] = { "20Mhz", "40Mhz", "80Mhz", "160 Mhz", "320Mhz", }; u8 iwl_fw_rate_idx_to_plcp(int idx) { return fw_rate_idx_to_plcp[idx]; } IWL_EXPORT_SYMBOL(iwl_fw_rate_idx_to_plcp); const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx) { return &rate_mcs[idx]; } IWL_EXPORT_SYMBOL(iwl_rate_mcs); const char *iwl_rs_pretty_ant(u8 ant) { if (ant >= ARRAY_SIZE(ant_name)) return "UNKNOWN"; return ant_name[ant]; } IWL_EXPORT_SYMBOL(iwl_rs_pretty_ant); const char *iwl_rs_pretty_bw(int bw) { if (bw >= ARRAY_SIZE(pretty_bw)) return "unknown bw"; return pretty_bw[bw]; } IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw); static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags) { int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1; int idx; bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1); int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0; int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE; for (idx = offset; idx < last; idx++) if (iwl_fw_rate_idx_to_plcp(idx) == rate) return idx - offset; return IWL_RATE_INVALID; } u32 iwl_new_rate_from_v1(u32 rate_v1) { u32 rate_v2 = 0; u32 dup = 0; if (rate_v1 == 0) return rate_v1; /* convert rate */ if (rate_v1 & RATE_MCS_HT_MSK_V1) { u32 nss = 0; rate_v2 |= RATE_MCS_HT_MSK; rate_v2 |= rate_v1 & RATE_HT_MCS_RATE_CODE_MSK_V1; nss = (rate_v1 & RATE_HT_MCS_MIMO2_MSK) >> RATE_HT_MCS_NSS_POS_V1; rate_v2 |= nss << RATE_MCS_NSS_POS; } else if (rate_v1 & RATE_MCS_VHT_MSK_V1 || rate_v1 & RATE_MCS_HE_MSK_V1) { rate_v2 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK; - rate_v2 |= rate_v1 & RATE_VHT_MCS_MIMO2_MSK; + rate_v2 |= rate_v1 & RATE_MCS_NSS_MSK; if (rate_v1 & RATE_MCS_HE_MSK_V1) { u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1; u32 he_type = he_type_bits >> RATE_MCS_HE_TYPE_POS_V1; u32 he_106t = (rate_v1 & RATE_MCS_HE_106T_MSK_V1) >> RATE_MCS_HE_106T_POS_V1; u32 he_gi_ltf = (rate_v1 & RATE_MCS_HE_GI_LTF_MSK_V1) >> RATE_MCS_HE_GI_LTF_POS; if ((he_type_bits == RATE_MCS_HE_TYPE_SU || he_type_bits == RATE_MCS_HE_TYPE_EXT_SU) && he_gi_ltf == RATE_MCS_HE_SU_4_LTF) /* the new rate have an additional bit to * represent the value 4 rather then using SGI * bit for this purpose - as it was done in the old * rate */ he_gi_ltf += (rate_v1 & RATE_MCS_SGI_MSK_V1) >> RATE_MCS_SGI_POS_V1; rate_v2 |= he_gi_ltf << RATE_MCS_HE_GI_LTF_POS; rate_v2 |= he_type << RATE_MCS_HE_TYPE_POS; rate_v2 |= he_106t << RATE_MCS_HE_106T_POS; rate_v2 |= rate_v1 & RATE_HE_DUAL_CARRIER_MODE_MSK; rate_v2 |= RATE_MCS_HE_MSK; } else { rate_v2 |= RATE_MCS_VHT_MSK; } /* if legacy format */ } else { u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1); if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID)) legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ? IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE; rate_v2 |= legacy_rate; if (!(rate_v1 & RATE_MCS_CCK_MSK_V1)) rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK; } /* convert flags */ if (rate_v1 & RATE_MCS_LDPC_MSK_V1) rate_v2 |= RATE_MCS_LDPC_MSK; rate_v2 |= (rate_v1 & RATE_MCS_CHAN_WIDTH_MSK_V1) | (rate_v1 & RATE_MCS_ANT_AB_MSK) | (rate_v1 & RATE_MCS_STBC_MSK) | (rate_v1 & RATE_MCS_BF_MSK); dup = (rate_v1 & RATE_MCS_DUP_MSK_V1) >> RATE_MCS_DUP_POS_V1; if (dup) { rate_v2 |= RATE_MCS_DUP_MSK; rate_v2 |= dup << RATE_MCS_CHAN_WIDTH_POS; } if ((!(rate_v1 & RATE_MCS_HE_MSK_V1)) && (rate_v1 & RATE_MCS_SGI_MSK_V1)) rate_v2 |= RATE_MCS_SGI_MSK; return rate_v2; } IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1); int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) { char *type; u8 mcs = 0, nss = 0; u8 ant = (rate & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS; u32 bw = (rate & RATE_MCS_CHAN_WIDTH_MSK) >> RATE_MCS_CHAN_WIDTH_POS; u32 format = rate & RATE_MCS_MOD_TYPE_MSK; bool sgi; if (format == RATE_MCS_CCK_MSK || format == RATE_MCS_LEGACY_OFDM_MSK) { int legacy_rate = rate & RATE_LEGACY_RATE_MSK; int index = format == RATE_MCS_CCK_MSK ? legacy_rate : legacy_rate + IWL_FIRST_OFDM_RATE; return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps", iwl_rs_pretty_ant(ant), index == IWL_RATE_INVALID ? "BAD" : iwl_rate_mcs(index)->mbps); } if (format == RATE_MCS_VHT_MSK) type = "VHT"; else if (format == RATE_MCS_HT_MSK) type = "HT"; else if (format == RATE_MCS_HE_MSK) type = "HE"; + else if (format == RATE_MCS_EHT_MSK) + type = "EHT"; else type = "Unknown"; /* shouldn't happen */ mcs = format == RATE_MCS_HT_MSK ? RATE_HT_MCS_INDEX(rate) : rate & RATE_MCS_CODE_MSK; nss = ((rate & RATE_MCS_NSS_MSK) >> RATE_MCS_NSS_POS) + 1; sgi = format == RATE_MCS_HE_MSK ? iwl_he_is_sgi(rate) : rate & RATE_MCS_SGI_MSK; return scnprintf(buf, bufsz, "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s", rate, type, iwl_rs_pretty_ant(ant), iwl_rs_pretty_bw(bw), mcs, nss, (sgi) ? "SGI " : "NGI ", (rate & RATE_MCS_STBC_MSK) ? "STBC " : "", (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "", (rate & RATE_HE_DUAL_CARRIER_MODE_MSK) ? "DCM " : "", (rate & RATE_MCS_BF_MSK) ? "BF " : ""); } IWL_EXPORT_SYMBOL(rs_pretty_print_rate); bool iwl_he_is_sgi(u32 rate_n_flags) { u32 type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; u32 ltf_gi = rate_n_flags & RATE_MCS_HE_GI_LTF_MSK; if (type == RATE_MCS_HE_TYPE_SU || type == RATE_MCS_HE_TYPE_EXT_SU) return ltf_gi == RATE_MCS_HE_SU_4_LTF_08_GI; return false; } IWL_EXPORT_SYMBOL(iwl_he_is_sgi); diff --git a/sys/contrib/dev/iwlwifi/fw/runtime.h b/sys/contrib/dev/iwlwifi/fw/runtime.h index d3cb1ae68a96..702586945533 100644 --- a/sys/contrib/dev/iwlwifi/fw/runtime.h +++ b/sys/contrib/dev/iwlwifi/fw/runtime.h @@ -1,206 +1,213 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #ifndef __iwl_fw_runtime_h__ #define __iwl_fw_runtime_h__ #include "iwl-config.h" #include "iwl-trans.h" #include "img.h" #include "fw/api/debug.h" #include "fw/api/paging.h" #include "fw/api/power.h" #include "iwl-eeprom-parse.h" #include "fw/acpi.h" struct iwl_fw_runtime_ops { void (*dump_start)(void *ctx); void (*dump_end)(void *ctx); bool (*fw_running)(void *ctx); int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd); bool (*d3_debug_enable)(void *ctx); }; #define MAX_NUM_LMAC 2 +#define MAX_NUM_TCM 2 +#define MAX_NUM_RCM 2 struct iwl_fwrt_shared_mem_cfg { int num_lmacs; int num_txfifo_entries; struct { u32 txfifo_size[TX_FIFO_MAX_NUM]; u32 rxfifo1_size; } lmac[MAX_NUM_LMAC]; u32 rxfifo2_size; u32 rxfifo2_control_size; u32 internal_txfifo_addr; u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; }; #define IWL_FW_RUNTIME_DUMP_WK_NUM 5 /** * struct iwl_fwrt_dump_data - dump data * @trig: trigger the worker was scheduled upon * @fw_pkt: packet received from FW */ struct iwl_fwrt_dump_data { union { struct { struct iwl_fw_ini_trigger_tlv *trig; struct iwl_rx_packet *fw_pkt; }; struct { const struct iwl_fw_dump_desc *desc; bool monitor_only; }; }; }; /** * struct iwl_fwrt_wk_data - dump worker data struct * @idx: index of the worker * @wk: worker */ struct iwl_fwrt_wk_data { u8 idx; struct delayed_work wk; struct iwl_fwrt_dump_data dump_data; }; /** * struct iwl_txf_iter_data - Tx fifo iterator data struct * @fifo: fifo number * @lmac: lmac number * @fifo_size: fifo size * @internal_txf: non zero if fifo is internal Tx fifo */ struct iwl_txf_iter_data { int fifo; int lmac; u32 fifo_size; u8 internal_txf; }; /** * struct iwl_fw_runtime - runtime data for firmware * @fw: firmware image * @cfg: NIC configuration * @dev: device pointer * @ops: user ops * @ops_ctx: user ops context * @fw_paging_db: paging database * @num_of_paging_blk: number of paging blocks * @num_of_pages_in_last_blk: number of pages in the last block * @smem_cfg: saved firmware SMEM configuration * @cur_fw_img: current firmware image, must be maintained by * the driver by calling &iwl_fw_set_current_image() * @dump: debug dump data */ struct iwl_fw_runtime { struct iwl_trans *trans; const struct iwl_fw *fw; struct device *dev; const struct iwl_fw_runtime_ops *ops; void *ops_ctx; const struct iwl_dump_sanitize_ops *sanitize_ops; void *sanitize_ctx; /* Paging */ struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; u16 num_of_paging_blk; u16 num_of_pages_in_last_blk; enum iwl_ucode_type cur_fw_img; /* memory configuration */ struct iwl_fwrt_shared_mem_cfg smem_cfg; /* debug */ struct { struct iwl_fwrt_wk_data wks[IWL_FW_RUNTIME_DUMP_WK_NUM]; unsigned long active_wks; u8 conf; /* ts of the beginning of a non-collect fw dbg data period */ unsigned long non_collect_ts_start[IWL_FW_INI_TIME_POINT_NUM]; u32 *d3_debug_data; u32 lmac_err_id[MAX_NUM_LMAC]; + u32 tcm_err_id[MAX_NUM_TCM]; + u32 rcm_err_id[MAX_NUM_RCM]; u32 umac_err_id; struct iwl_txf_iter_data txf_iter_data; struct { u8 type; u8 subtype; u32 lmac_major; u32 lmac_minor; u32 umac_major; u32 umac_minor; } fw_ver; } dump; -#ifdef CONFIG_IWLWIFI_DEBUGFS struct { +#ifdef CONFIG_IWLWIFI_DEBUGFS struct delayed_work wk; u32 delay; +#endif u64 seq; } timestamp; +#ifdef CONFIG_IWLWIFI_DEBUGFS bool tpc_enabled; #endif /* CONFIG_IWLWIFI_DEBUGFS */ #ifdef CONFIG_ACPI struct iwl_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM]; u8 sar_chain_a_profile; u8 sar_chain_b_profile; struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES_REV3]; u32 geo_rev; u32 geo_num_profiles; bool geo_enabled; struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS]; u32 ppag_flags; u32 ppag_ver; + bool ppag_table_valid; struct iwl_sar_offset_mapping_cmd sgom_table; bool sgom_enabled; u8 reduced_power_flags; #endif }; void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, const struct iwl_fw *fw, const struct iwl_fw_runtime_ops *ops, void *ops_ctx, const struct iwl_dump_sanitize_ops *sanitize_ops, void *sanitize_ctx, struct dentry *dbgfs_dir); static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt) { int i; kfree(fwrt->dump.d3_debug_data); fwrt->dump.d3_debug_data = NULL; iwl_dbg_tlv_del_timers(fwrt->trans); for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) cancel_delayed_work_sync(&fwrt->dump.wks[i].wk); } void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt); void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt); static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type cur_fw_img) { fwrt->cur_fw_img = cur_fw_img; } int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type); void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt); void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt); int iwl_configure_rxq(struct iwl_fw_runtime *fwrt); #endif /* __iwl_fw_runtime_h__ */ diff --git a/sys/contrib/dev/iwlwifi/fw/uefi.c b/sys/contrib/dev/iwlwifi/fw/uefi.c index 23b1d689ba7b..9877988db0d2 100644 --- a/sys/contrib/dev/iwlwifi/fw/uefi.c +++ b/sys/contrib/dev/iwlwifi/fw/uefi.c @@ -1,356 +1,391 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright(c) 2021 Intel Corporation + * Copyright(c) 2021-2023 Intel Corporation */ #include "iwl-drv.h" #include "pnvm.h" #include "iwl-prph.h" #include "iwl-io.h" #include "fw/uefi.h" #include "fw/api/alive.h" #include #include "fw/runtime.h" #define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \ 0xb2, 0xec, 0xf5, 0xa3, \ 0x59, 0x4f, 0x4a, 0xea) -void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len) +struct iwl_uefi_pnvm_mem_desc { + __le32 addr; + __le32 size; + const u8 data[]; +} __packed; + +static void *iwl_uefi_get_variable(efi_char16_t *name, efi_guid_t *guid, + unsigned long *data_size) { - struct efivar_entry *pnvm_efivar; + efi_status_t status; void *data; - unsigned long package_size; - int err; - *len = 0; + if (!data_size) + return ERR_PTR(-EINVAL); - pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL); - if (!pnvm_efivar) - return ERR_PTR(-ENOMEM); + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) + return ERR_PTR(-ENODEV); - memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME, - sizeof(IWL_UEFI_OEM_PNVM_NAME)); - pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID; + /* first call with NULL data to get the exact entry size */ + *data_size = 0; + status = efi.get_variable(name, guid, NULL, data_size, NULL); + if (status != EFI_BUFFER_TOO_SMALL || !*data_size) + return ERR_PTR(-EIO); - /* - * TODO: we hardcode a maximum length here, because reading - * from the UEFI is not working. To implement this properly, - * we have to call efivar_entry_size(). - */ - package_size = IWL_HARDCODED_PNVM_SIZE; + data = kmalloc(*data_size, GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); - data = kmalloc(package_size, GFP_KERNEL); - if (!data) { - data = ERR_PTR(-ENOMEM); - goto out; + status = efi.get_variable(name, guid, NULL, data_size, data); + if (status != EFI_SUCCESS) { + kfree(data); + return ERR_PTR(-ENOENT); } - err = efivar_entry_get(pnvm_efivar, NULL, &package_size, data); - if (err) { + return data; +} + +void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len) +{ + unsigned long package_size; + void *data; + + *len = 0; + + data = iwl_uefi_get_variable(IWL_UEFI_OEM_PNVM_NAME, &IWL_EFI_VAR_GUID, + &package_size); + if (IS_ERR(data)) { IWL_DEBUG_FW(trans, - "PNVM UEFI variable not found %d (len %lu)\n", - err, package_size); - kfree(data); - data = ERR_PTR(err); - goto out; + "PNVM UEFI variable not found 0x%lx (len %lu)\n", + PTR_ERR(data), package_size); + return data; } IWL_DEBUG_FW(trans, "Read PNVM from UEFI with size %lu\n", package_size); *len = package_size; -out: - kfree(pnvm_efivar); - return data; } -static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, - const u8 *data, size_t len) +int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data, + u32 tlv_len, struct iwl_pnvm_image *pnvm_data) +{ + const struct iwl_uefi_pnvm_mem_desc *desc = (const void *)data; + u32 data_len; + + if (tlv_len < sizeof(*desc)) { + IWL_DEBUG_FW(trans, "TLV len (%d) is too small\n", tlv_len); + return -EINVAL; + } + + data_len = tlv_len - sizeof(*desc); + + IWL_DEBUG_FW(trans, + "Handle IWL_UCODE_TLV_MEM_DESC, len %d data_len %d\n", + tlv_len, data_len); + + if (le32_to_cpu(desc->size) != data_len) { + IWL_DEBUG_FW(trans, "invalid mem desc size %d\n", desc->size); + return -EINVAL; + } + + if (pnvm_data->n_chunks == IPC_DRAM_MAP_ENTRY_NUM_MAX) { + IWL_DEBUG_FW(trans, "too many payloads to allocate in DRAM.\n"); + return -EINVAL; + } + + IWL_DEBUG_FW(trans, "Adding data (size %d)\n", data_len); + + pnvm_data->chunks[pnvm_data->n_chunks].data = desc->data; + pnvm_data->chunks[pnvm_data->n_chunks].len = data_len; + pnvm_data->n_chunks++; + + return 0; +} + +static int iwl_uefi_reduce_power_section(struct iwl_trans *trans, + const u8 *data, size_t len, + struct iwl_pnvm_image *pnvm_data) { const struct iwl_ucode_tlv *tlv; - u8 *reduce_power_data = NULL, *tmp; - u32 size = 0; IWL_DEBUG_FW(trans, "Handling REDUCE_POWER section\n"); + memset(pnvm_data, 0, sizeof(*pnvm_data)); while (len >= sizeof(*tlv)) { u32 tlv_len, tlv_type; len -= sizeof(*tlv); tlv = (const void *)data; tlv_len = le32_to_cpu(tlv->length); tlv_type = le32_to_cpu(tlv->type); if (len < tlv_len) { IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len); - kfree(reduce_power_data); - reduce_power_data = ERR_PTR(-EINVAL); - goto out; + return -EINVAL; } data += sizeof(*tlv); switch (tlv_type) { - case IWL_UCODE_TLV_MEM_DESC: { - IWL_DEBUG_FW(trans, - "Got IWL_UCODE_TLV_MEM_DESC len %d\n", - tlv_len); - - IWL_DEBUG_FW(trans, "Adding data (size %d)\n", tlv_len); - - tmp = krealloc(reduce_power_data, size + tlv_len, GFP_KERNEL); - if (!tmp) { - IWL_DEBUG_FW(trans, - "Couldn't allocate (more) reduce_power_data\n"); - - kfree(reduce_power_data); - reduce_power_data = ERR_PTR(-ENOMEM); - goto out; - } - - reduce_power_data = tmp; - - memcpy(reduce_power_data + size, data, tlv_len); - - size += tlv_len; - + case IWL_UCODE_TLV_MEM_DESC: + if (iwl_uefi_handle_tlv_mem_desc(trans, data, tlv_len, + pnvm_data)) + return -EINVAL; break; - } case IWL_UCODE_TLV_PNVM_SKU: IWL_DEBUG_FW(trans, "New REDUCE_POWER section started, stop parsing.\n"); goto done; default: IWL_DEBUG_FW(trans, "Found TLV 0x%0x, len %d\n", tlv_type, tlv_len); break; } len -= ALIGN(tlv_len, 4); data += ALIGN(tlv_len, 4); } done: - if (!size) { + if (!pnvm_data->n_chunks) { IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n"); - /* Better safe than sorry, but 'reduce_power_data' should - * always be NULL if !size. - */ - kfree(reduce_power_data); - reduce_power_data = ERR_PTR(-ENOENT); - goto out; + return -ENOENT; } - - IWL_INFO(trans, "loaded REDUCE_POWER\n"); - -out: - return reduce_power_data; + return 0; } -static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans, - const u8 *data, size_t len) +int iwl_uefi_reduce_power_parse(struct iwl_trans *trans, + const u8 *data, size_t len, + struct iwl_pnvm_image *pnvm_data) { const struct iwl_ucode_tlv *tlv; - void *sec_data; IWL_DEBUG_FW(trans, "Parsing REDUCE_POWER data\n"); while (len >= sizeof(*tlv)) { u32 tlv_len, tlv_type; len -= sizeof(*tlv); tlv = (const void *)data; tlv_len = le32_to_cpu(tlv->length); tlv_type = le32_to_cpu(tlv->type); if (len < tlv_len) { IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len); - return ERR_PTR(-EINVAL); + return -EINVAL; } if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) { const struct iwl_sku_id *sku_id = (const void *)(data + sizeof(*tlv)); IWL_DEBUG_FW(trans, "Got IWL_UCODE_TLV_PNVM_SKU len %d\n", tlv_len); IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n", le32_to_cpu(sku_id->data[0]), le32_to_cpu(sku_id->data[1]), le32_to_cpu(sku_id->data[2])); data += sizeof(*tlv) + ALIGN(tlv_len, 4); len -= ALIGN(tlv_len, 4); if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) && trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) && trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) { - sec_data = iwl_uefi_reduce_power_section(trans, - data, - len); - if (!IS_ERR(sec_data)) - return sec_data; + int ret = iwl_uefi_reduce_power_section(trans, + data, len, + pnvm_data); + if (!ret) + return 0; } else { IWL_DEBUG_FW(trans, "SKU ID didn't match!\n"); } } else { data += sizeof(*tlv) + ALIGN(tlv_len, 4); len -= ALIGN(tlv_len, 4); } } - return ERR_PTR(-ENOENT); + return -ENOENT; } -void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len) +u8 *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len) { - struct efivar_entry *reduce_power_efivar; struct pnvm_sku_package *package; - void *data = NULL; unsigned long package_size; - int err; + u8 *data; - *len = 0; + package = iwl_uefi_get_variable(IWL_UEFI_REDUCED_POWER_NAME, + &IWL_EFI_VAR_GUID, &package_size); - reduce_power_efivar = kzalloc(sizeof(*reduce_power_efivar), GFP_KERNEL); - if (!reduce_power_efivar) - return ERR_PTR(-ENOMEM); - - memcpy(&reduce_power_efivar->var.VariableName, IWL_UEFI_REDUCED_POWER_NAME, - sizeof(IWL_UEFI_REDUCED_POWER_NAME)); - reduce_power_efivar->var.VendorGuid = IWL_EFI_VAR_GUID; - - /* - * TODO: we hardcode a maximum length here, because reading - * from the UEFI is not working. To implement this properly, - * we have to call efivar_entry_size(). - */ - package_size = IWL_HARDCODED_REDUCE_POWER_SIZE; - - package = kmalloc(package_size, GFP_KERNEL); - if (!package) { - package = ERR_PTR(-ENOMEM); - goto out; + if (IS_ERR(package)) { + IWL_DEBUG_FW(trans, + "Reduced Power UEFI variable not found 0x%lx (len %lu)\n", + PTR_ERR(package), package_size); + return ERR_CAST(package); } - err = efivar_entry_get(reduce_power_efivar, NULL, &package_size, package); - if (err) { + if (package_size < sizeof(*package)) { IWL_DEBUG_FW(trans, - "Reduced Power UEFI variable not found %d (len %lu)\n", - err, package_size); + "Invalid Reduced Power UEFI variable len (%lu)\n", + package_size); kfree(package); - data = ERR_PTR(err); - goto out; + return ERR_PTR(-EINVAL); } IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n", package_size); - *len = package_size; IWL_DEBUG_FW(trans, "rev %d, total_size %d, n_skus %d\n", package->rev, package->total_size, package->n_skus); - data = iwl_uefi_reduce_power_parse(trans, package->data, - *len - sizeof(*package)); + *len = package_size - sizeof(*package); + data = kmemdup(package->data, *len, GFP_KERNEL); + if (!data) { + kfree(package); + return ERR_PTR(-ENOMEM); + } kfree(package); -out: - kfree(reduce_power_efivar); - return data; } +static int iwl_uefi_step_parse(struct uefi_cnv_common_step_data *common_step_data, + struct iwl_trans *trans) +{ + if (common_step_data->revision != 1) + return -EINVAL; + + trans->mbx_addr_0_step = (u32)common_step_data->revision | + (u32)common_step_data->cnvi_eq_channel << 8 | + (u32)common_step_data->cnvr_eq_channel << 16 | + (u32)common_step_data->radio1 << 24; + trans->mbx_addr_1_step = (u32)common_step_data->radio2; + return 0; +} + +void iwl_uefi_get_step_table(struct iwl_trans *trans) +{ + struct uefi_cnv_common_step_data *data; + unsigned long package_size; + int ret; + + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + return; + + data = iwl_uefi_get_variable(IWL_UEFI_STEP_NAME, &IWL_EFI_VAR_GUID, + &package_size); + + if (IS_ERR(data)) { + IWL_DEBUG_FW(trans, + "STEP UEFI variable not found 0x%lx\n", + PTR_ERR(data)); + return; + } + + if (package_size < sizeof(*data)) { + IWL_DEBUG_FW(trans, + "Invalid STEP table UEFI variable len (%lu)\n", + package_size); + kfree(data); + return; + } + + IWL_DEBUG_FW(trans, "Read STEP from UEFI with size %lu\n", + package_size); + + ret = iwl_uefi_step_parse(data, trans); + if (ret < 0) + IWL_DEBUG_FW(trans, "Cannot read STEP tables. rev is invalid\n"); + + kfree(data); +} +IWL_EXPORT_SYMBOL(iwl_uefi_get_step_table); + #ifdef CONFIG_ACPI static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data, struct iwl_fw_runtime *fwrt) { int i, j; if (sgom_data->revision != 1) return -EINVAL; memcpy(fwrt->sgom_table.offset_map, sgom_data->offset_map, sizeof(fwrt->sgom_table.offset_map)); for (i = 0; i < MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE; i++) { for (j = 0; j < MCC_TO_SAR_OFFSET_TABLE_COL_SIZE; j++) { /* since each byte is composed of to values, */ /* one for each letter, */ /* extract and check each of them separately */ u8 value = fwrt->sgom_table.offset_map[i][j]; u8 low = value & 0xF; u8 high = (value & 0xF0) >> 4; if (high > fwrt->geo_num_profiles) high = 0; if (low > fwrt->geo_num_profiles) low = 0; fwrt->sgom_table.offset_map[i][j] = (high << 4) | low; } } fwrt->sgom_enabled = true; return 0; } void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt) { - struct efivar_entry *sgom_efivar; struct uefi_cnv_wlan_sgom_data *data; unsigned long package_size; - int err, ret; + int ret; if (!fwrt->geo_enabled) return; - sgom_efivar = kzalloc(sizeof(*sgom_efivar), GFP_KERNEL); - if (!sgom_efivar) + data = iwl_uefi_get_variable(IWL_UEFI_SGOM_NAME, &IWL_EFI_VAR_GUID, + &package_size); + if (IS_ERR(data)) { + IWL_DEBUG_FW(trans, + "SGOM UEFI variable not found 0x%lx\n", + PTR_ERR(data)); return; - - memcpy(&sgom_efivar->var.VariableName, IWL_UEFI_SGOM_NAME, - sizeof(IWL_UEFI_SGOM_NAME)); - sgom_efivar->var.VendorGuid = IWL_EFI_VAR_GUID; - - /* TODO: we hardcode a maximum length here, because reading - * from the UEFI is not working. To implement this properly, - * we have to call efivar_entry_size(). - */ - package_size = IWL_HARDCODED_SGOM_SIZE; - - data = kmalloc(package_size, GFP_KERNEL); - if (!data) { - data = ERR_PTR(-ENOMEM); - goto out; } - err = efivar_entry_get(sgom_efivar, NULL, &package_size, data); - if (err) { + if (package_size < sizeof(*data)) { IWL_DEBUG_FW(trans, - "SGOM UEFI variable not found %d\n", err); - goto out_free; + "Invalid SGOM table UEFI variable len (%lu)\n", + package_size); + kfree(data); + return; } IWL_DEBUG_FW(trans, "Read SGOM from UEFI with size %lu\n", package_size); ret = iwl_uefi_sgom_parse(data, fwrt); if (ret < 0) IWL_DEBUG_FW(trans, "Cannot read SGOM tables. rev is invalid\n"); -out_free: kfree(data); - -out: - kfree(sgom_efivar); } IWL_EXPORT_SYMBOL(iwl_uefi_get_sgom_table); #endif /* CONFIG_ACPI */ diff --git a/sys/contrib/dev/iwlwifi/fw/uefi.h b/sys/contrib/dev/iwlwifi/fw/uefi.h index 09d2a971b3a0..1369cc4855c3 100644 --- a/sys/contrib/dev/iwlwifi/fw/uefi.h +++ b/sys/contrib/dev/iwlwifi/fw/uefi.h @@ -1,65 +1,91 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright(c) 2021 Intel Corporation + * Copyright(c) 2021-2023 Intel Corporation */ #ifndef __iwl_fw_uefi__ #define __iwl_fw_uefi__ #define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm" #define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower" #define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping" +#define IWL_UEFI_STEP_NAME L"UefiCnvCommonSTEP" -/* - * TODO: we have these hardcoded values that the caller must pass, - * because reading from the UEFI is not working. To implement this - * properly, we have to change iwl_pnvm_get_from_uefi() to call - * efivar_entry_size() and return the value to the caller instead. - */ -#define IWL_HARDCODED_PNVM_SIZE 4096 -#define IWL_HARDCODED_REDUCE_POWER_SIZE 32768 -#define IWL_HARDCODED_SGOM_SIZE 339 +#define IWL_SGOM_MAP_SIZE 339 struct pnvm_sku_package { u8 rev; u32 total_size; u8 n_skus; u32 reserved[2]; u8 data[]; } __packed; struct uefi_cnv_wlan_sgom_data { u8 revision; - u8 offset_map[IWL_HARDCODED_SGOM_SIZE - 1]; + u8 offset_map[IWL_SGOM_MAP_SIZE - 1]; +} __packed; + +struct uefi_cnv_common_step_data { + u8 revision; + u8 step_mode; + u8 cnvi_eq_channel; + u8 cnvr_eq_channel; + u8 radio1; + u8 radio2; } __packed; /* * This is known to be broken on v4.19 and to work on v5.4. Until we * figure out why this is the case and how to make it work, simply * disable the feature in old kernels. */ #ifdef CONFIG_EFI void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len); -void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len); +u8 *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len); +int iwl_uefi_reduce_power_parse(struct iwl_trans *trans, + const u8 *data, size_t len, + struct iwl_pnvm_image *pnvm_data); +void iwl_uefi_get_step_table(struct iwl_trans *trans); +int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data, + u32 tlv_len, struct iwl_pnvm_image *pnvm_data); #else /* CONFIG_EFI */ -static inline -void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len) +static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len) { return ERR_PTR(-EOPNOTSUPP); } -static inline -void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len) +static inline int +iwl_uefi_reduce_power_parse(struct iwl_trans *trans, + const u8 *data, size_t len, + struct iwl_pnvm_image *pnvm_data) +{ + return -EOPNOTSUPP; +} + +static inline u8 * +iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len) { return ERR_PTR(-EOPNOTSUPP); } + +static inline void iwl_uefi_get_step_table(struct iwl_trans *trans) +{ +} + +static inline int +iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data, + u32 tlv_len, struct iwl_pnvm_image *pnvm_data) +{ + return 0; +} #endif /* CONFIG_EFI */ #if defined(CONFIG_EFI) && defined(CONFIG_ACPI) void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt); #else static inline void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt) { } #endif #endif /* __iwl_fw_uefi__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-config.h b/sys/contrib/dev/iwlwifi/iwl-config.h index f5b556a103e8..241a9e3f2a1a 100644 --- a/sys/contrib/dev/iwlwifi/iwl-config.h +++ b/sys/contrib/dev/iwlwifi/iwl-config.h @@ -1,661 +1,646 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2023 Intel Corporation */ #ifndef __IWL_CONFIG_H__ #define __IWL_CONFIG_H__ #include #include #include #include #include "iwl-csr.h" enum iwl_device_family { IWL_DEVICE_FAMILY_UNDEFINED, IWL_DEVICE_FAMILY_1000, IWL_DEVICE_FAMILY_100, IWL_DEVICE_FAMILY_2000, IWL_DEVICE_FAMILY_2030, IWL_DEVICE_FAMILY_105, IWL_DEVICE_FAMILY_135, IWL_DEVICE_FAMILY_5000, IWL_DEVICE_FAMILY_5150, IWL_DEVICE_FAMILY_6000, IWL_DEVICE_FAMILY_6000i, IWL_DEVICE_FAMILY_6005, IWL_DEVICE_FAMILY_6030, IWL_DEVICE_FAMILY_6050, IWL_DEVICE_FAMILY_6150, IWL_DEVICE_FAMILY_7000, IWL_DEVICE_FAMILY_8000, IWL_DEVICE_FAMILY_9000, IWL_DEVICE_FAMILY_22000, IWL_DEVICE_FAMILY_AX210, IWL_DEVICE_FAMILY_BZ, + IWL_DEVICE_FAMILY_SC, }; /* * LED mode * IWL_LED_DEFAULT: use device default * IWL_LED_RF_STATE: turn LED on/off based on RF state * LED ON = RF ON * LED OFF = RF OFF * IWL_LED_BLINK: adjust led blink rate based on blink table * IWL_LED_DISABLE: led disabled */ enum iwl_led_mode { IWL_LED_DEFAULT, IWL_LED_RF_STATE, IWL_LED_BLINK, IWL_LED_DISABLE, }; /** * enum iwl_nvm_type - nvm formats * @IWL_NVM: the regular format * @IWL_NVM_EXT: extended NVM format * @IWL_NVM_SDP: NVM format used by 3168 series */ enum iwl_nvm_type { IWL_NVM, IWL_NVM_EXT, IWL_NVM_SDP, }; /* * This is the threshold value of plcp error rate per 100mSecs. It is * used to set and check for the validity of plcp_delta. */ #define IWL_MAX_PLCP_ERR_THRESHOLD_MIN 1 #define IWL_MAX_PLCP_ERR_THRESHOLD_DEF 50 #define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF 100 #define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF 200 #define IWL_MAX_PLCP_ERR_THRESHOLD_MAX 255 #define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE 0 /* TX queue watchdog timeouts in mSecs */ #define IWL_WATCHDOG_DISABLED 0 #define IWL_DEF_WD_TIMEOUT 2500 #define IWL_LONG_WD_TIMEOUT 10000 #define IWL_MAX_WD_TIMEOUT 120000 #define IWL_DEFAULT_MAX_TX_POWER 22 #define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\ NETIF_F_TSO | NETIF_F_TSO6) #define IWL_TX_CSUM_NETIF_FLAGS_BZ (NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6) #define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | \ IWL_TX_CSUM_NETIF_FLAGS_BZ | \ NETIF_F_RXCSUM) /* Antenna presence definitions */ #define ANT_NONE 0x0 #define ANT_INVALID 0xff #define ANT_A BIT(0) #define ANT_B BIT(1) #define ANT_C BIT(2) #define ANT_AB (ANT_A | ANT_B) #define ANT_AC (ANT_A | ANT_C) #define ANT_BC (ANT_B | ANT_C) #define ANT_ABC (ANT_A | ANT_B | ANT_C) static inline u8 num_of_ant(u8 mask) { return !!((mask) & ANT_A) + !!((mask) & ANT_B) + !!((mask) & ANT_C); } /** * struct iwl_base_params - params not likely to change within a device family * @max_ll_items: max number of OTP blocks * @shadow_ram_support: shadow support for OTP memory * @led_compensation: compensate on the led on/off time per HW according * to the deviation to achieve the desired led frequency. * The detail algorithm is described in iwl-led.c * @wd_timeout: TX queues watchdog timeout * @max_event_log_size: size of event log buffer size for ucode event logging * @shadow_reg_enable: HW shadow register support * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command * is in flight. This is due to a HW bug in 7260, 3160 and 7265. * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled. * @max_tfd_queue_size: max number of entries in tfd queue. */ struct iwl_base_params { unsigned int wd_timeout; u16 eeprom_size; u16 max_event_log_size; u8 pll_cfg:1, /* for iwl_pcie_apm_init() */ shadow_ram_support:1, shadow_reg_enable:1, pcie_l1_allowed:1, apmg_wake_up_wa:1, scd_chain_ext_wa:1; u16 num_of_queues; /* def: HW dependent */ u32 max_tfd_queue_size; /* def: HW dependent */ u8 max_ll_items; u8 led_compensation; }; /* * @stbc: support Tx STBC and 1*SS Rx STBC * @ldpc: support Tx/Rx with LDPC * @use_rts_for_aggregation: use rts/cts protection for HT traffic * @ht40_bands: bitmap of bands (using %NL80211_BAND_*) that support HT40 */ struct iwl_ht_params { u8 ht_greenfield_support:1, stbc:1, ldpc:1, use_rts_for_aggregation:1; u8 ht40_bands; }; /* * Tx-backoff threshold * @temperature: The threshold in Celsius * @backoff: The tx-backoff in uSec */ struct iwl_tt_tx_backoff { s32 temperature; u32 backoff; }; #define TT_TX_BACKOFF_SIZE 6 /** * struct iwl_tt_params - thermal throttling parameters * @ct_kill_entry: CT Kill entry threshold * @ct_kill_exit: CT Kill exit threshold * @ct_kill_duration: The time intervals (in uSec) in which the driver needs * to checks whether to exit CT Kill. * @dynamic_smps_entry: Dynamic SMPS entry threshold * @dynamic_smps_exit: Dynamic SMPS exit threshold * @tx_protection_entry: TX protection entry threshold * @tx_protection_exit: TX protection exit threshold * @tx_backoff: Array of thresholds for tx-backoff , in ascending order. * @support_ct_kill: Support CT Kill? * @support_dynamic_smps: Support dynamic SMPS? * @support_tx_protection: Support tx protection? * @support_tx_backoff: Support tx-backoff? */ struct iwl_tt_params { u32 ct_kill_entry; u32 ct_kill_exit; u32 ct_kill_duration; u32 dynamic_smps_entry; u32 dynamic_smps_exit; u32 tx_protection_entry; u32 tx_protection_exit; struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE]; u8 support_ct_kill:1, support_dynamic_smps:1, support_tx_protection:1, support_tx_backoff:1; }; /* * information on how to parse the EEPROM */ #define EEPROM_REG_BAND_1_CHANNELS 0x08 #define EEPROM_REG_BAND_2_CHANNELS 0x26 #define EEPROM_REG_BAND_3_CHANNELS 0x42 #define EEPROM_REG_BAND_4_CHANNELS 0x5C #define EEPROM_REG_BAND_5_CHANNELS 0x74 #define EEPROM_REG_BAND_24_HT40_CHANNELS 0x82 #define EEPROM_REG_BAND_52_HT40_CHANNELS 0x92 #define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80 #define EEPROM_REGULATORY_BAND_NO_HT40 0 /* lower blocks contain EEPROM image and calibration data */ #define OTP_LOW_IMAGE_SIZE_2K (2 * 512 * sizeof(u16)) /* 2 KB */ #define OTP_LOW_IMAGE_SIZE_16K (16 * 512 * sizeof(u16)) /* 16 KB */ #define OTP_LOW_IMAGE_SIZE_32K (32 * 512 * sizeof(u16)) /* 32 KB */ struct iwl_eeprom_params { const u8 regulatory_bands[7]; bool enhanced_txpower; }; /* Tx-backoff power threshold * @pwr: The power limit in mw * @backoff: The tx-backoff in uSec */ struct iwl_pwr_tx_backoff { u32 pwr; u32 backoff; }; enum iwl_cfg_trans_ltr_delay { IWL_CFG_TRANS_LTR_DELAY_NONE = 0, IWL_CFG_TRANS_LTR_DELAY_200US = 1, IWL_CFG_TRANS_LTR_DELAY_2500US = 2, IWL_CFG_TRANS_LTR_DELAY_1820US = 3, }; /** * struct iwl_cfg_trans - information needed to start the trans * * These values are specific to the device ID and do not change when * multiple configs are used for a single device ID. They values are * used, among other things, to boot the NIC so that the HW REV or * RFID can be read before deciding the remaining parameters to use. * * @base_params: pointer to basic parameters * @csr: csr flags and addresses that are different across devices * @device_family: the device family * @umac_prph_offset: offset to add to UMAC periphery address * @xtal_latency: power up latency to get the xtal stabilized * @extra_phy_cfg_flags: extra configuration flags to pass to the PHY * @rf_id: need to read rf_id to determine the firmware image - * @use_tfh: use TFH * @gen2: 22000 and on transport operation * @mq_rx_supported: multi-queue rx support * @integrated: discrete or integrated * @low_latency_xtal: use the low latency xtal if supported * @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay. * @imr_enabled: use the IMR if supported. */ struct iwl_cfg_trans_params { const struct iwl_base_params *base_params; enum iwl_device_family device_family; u32 umac_prph_offset; u32 xtal_latency; u32 extra_phy_cfg_flags; u32 rf_id:1, - use_tfh:1, gen2:1, mq_rx_supported:1, integrated:1, low_latency_xtal:1, bisr_workaround:1, ltr_delay:2, imr_enabled:1; }; /** * struct iwl_fw_mon_reg - FW monitor register info * @addr: register address * @mask: register mask */ struct iwl_fw_mon_reg { u32 addr; u32 mask; }; /** * struct iwl_fw_mon_regs - FW monitor registers * @write_ptr: write pointer register * @cycle_cnt: cycle count register * @cur_frag: current fragment in use */ struct iwl_fw_mon_regs { struct iwl_fw_mon_reg write_ptr; struct iwl_fw_mon_reg cycle_cnt; struct iwl_fw_mon_reg cur_frag; }; /** * struct iwl_cfg * @trans: the trans-specific configuration part * @name: Official name of the device * @fw_name_pre: Firmware filename prefix. The api version and extension * (.ucode) will be added to filename before loading from disk. The - * filename is constructed as fw_name_pre.ucode. + * filename is constructed as -.ucode. + * @fw_name_mac: MAC name for this config, the remaining pieces of the + * name will be generated dynamically * @ucode_api_max: Highest version of uCode API supported by driver. * @ucode_api_min: Lowest version of uCode API supported by driver. * @max_inst_size: The maximal length of the fw inst section (only DVM) * @max_data_size: The maximal length of the fw data section (only DVM) * @valid_tx_ant: valid transmit antenna * @valid_rx_ant: valid receive antenna * @non_shared_ant: the antenna that is for WiFi only * @nvm_ver: NVM version * @nvm_calib_ver: NVM calibration version * @lib: pointer to the lib ops * @ht_params: point to ht parameters * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) * @rx_with_siso_diversity: 1x1 device with rx antenna diversity * @tx_with_siso_diversity: 1x1 device with tx antenna diversity * @internal_wimax_coex: internal wifi/wimax combo device * @high_temp: Is this NIC is designated to be in high temperature. * @host_interrupt_operation_mode: device needs host interrupt operation * mode set * @nvm_hw_section_num: the ID of the HW NVM section * @mac_addr_from_csr: read HW address from CSR registers at this offset * @features: hw features, any combination of feature_passlist * @pwr_tx_backoffs: translation table between power limits and backoffs * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response * @dccm_offset: offset from which DCCM begins * @dccm_len: length of DCCM (including runtime stack CCM) * @dccm2_offset: offset from which the second DCCM begins * @dccm2_len: length of the second DCCM * @smem_offset: offset from which the SMEM begins * @smem_len: the length of SMEM * @vht_mu_mimo_supported: VHT MU-MIMO support * @cdb: CDB support * @nvm_type: see &enum iwl_nvm_type * @d3_debug_data_base_addr: base address where D3 debug data is stored * @d3_debug_data_length: length of the D3 debug data * @bisr_workaround: BISR hardware workaround (for 22260 series devices) * @min_txq_size: minimum number of slots required in a TX queue * @uhb_supported: ultra high band channels supported * @min_ba_txq_size: minimum number of slots required in a TX queue which * based on hardware support (HE - 256, EHT - 1K). * @num_rbds: number of receive buffer descriptors to use * (only used for multi-queue capable devices) * @mac_addr_csr_base: CSR base register for MAC address access, if not set * assume 0x380 * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs * and/or the uCode API version instead. */ struct iwl_cfg { struct iwl_cfg_trans_params trans; /* params specific to an individual device within a device family */ const char *name; const char *fw_name_pre; + const char *fw_name_mac; /* params likely to change within a device family */ const struct iwl_ht_params *ht_params; const struct iwl_eeprom_params *eeprom_params; const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; const char *default_nvm_file_C_step; const struct iwl_tt_params *thermal_params; enum iwl_led_mode led_mode; enum iwl_nvm_type nvm_type; u32 max_data_size; u32 max_inst_size; netdev_features_t features; u32 dccm_offset; u32 dccm_len; u32 dccm2_offset; u32 dccm2_len; u32 smem_offset; u32 smem_len; u16 nvm_ver; u16 nvm_calib_ver; u32 rx_with_siso_diversity:1, tx_with_siso_diversity:1, bt_shared_single_ant:1, internal_wimax_coex:1, host_interrupt_operation_mode:1, high_temp:1, mac_addr_from_csr:10, lp_xtal_workaround:1, - disable_dummy_notification:1, apmg_not_supported:1, vht_mu_mimo_supported:1, cdb:1, dbgc_supported:1, uhb_supported:1; u8 valid_tx_ant; u8 valid_rx_ant; u8 non_shared_ant; u8 nvm_hw_section_num; u8 max_tx_agg_size; u8 ucode_api_max; u8 ucode_api_min; u16 num_rbds; u32 min_umac_error_event_table; u32 d3_debug_data_base_addr; u32 d3_debug_data_length; u32 min_txq_size; u32 gp2_reg_addr; u32 min_ba_txq_size; const struct iwl_fw_mon_regs mon_dram_regs; const struct iwl_fw_mon_regs mon_smem_regs; const struct iwl_fw_mon_regs mon_dbgi_regs; }; #define IWL_CFG_ANY (~0) #define IWL_CFG_MAC_TYPE_PU 0x31 -#define IWL_CFG_MAC_TYPE_PNJ 0x32 #define IWL_CFG_MAC_TYPE_TH 0x32 #define IWL_CFG_MAC_TYPE_QU 0x33 #define IWL_CFG_MAC_TYPE_QUZ 0x35 -#define IWL_CFG_MAC_TYPE_QNJ 0x36 #define IWL_CFG_MAC_TYPE_SO 0x37 -#define IWL_CFG_MAC_TYPE_SNJ 0x42 #define IWL_CFG_MAC_TYPE_SOF 0x43 #define IWL_CFG_MAC_TYPE_MA 0x44 #define IWL_CFG_MAC_TYPE_BZ 0x46 #define IWL_CFG_MAC_TYPE_GL 0x47 +#define IWL_CFG_MAC_TYPE_SC 0x48 #define IWL_CFG_RF_TYPE_TH 0x105 #define IWL_CFG_RF_TYPE_TH1 0x108 #define IWL_CFG_RF_TYPE_JF2 0x105 #define IWL_CFG_RF_TYPE_JF1 0x108 #define IWL_CFG_RF_TYPE_HR2 0x10A #define IWL_CFG_RF_TYPE_HR1 0x10C #define IWL_CFG_RF_TYPE_GF 0x10D #define IWL_CFG_RF_TYPE_MR 0x110 #define IWL_CFG_RF_TYPE_MS 0x111 #define IWL_CFG_RF_TYPE_FM 0x112 +#define IWL_CFG_RF_TYPE_WH 0x113 #define IWL_CFG_RF_ID_TH 0x1 #define IWL_CFG_RF_ID_TH1 0x1 #define IWL_CFG_RF_ID_JF 0x3 #define IWL_CFG_RF_ID_JF1 0x6 #define IWL_CFG_RF_ID_JF1_DIV 0xA #define IWL_CFG_RF_ID_HR 0x7 #define IWL_CFG_RF_ID_HR1 0x4 #define IWL_CFG_NO_160 0x1 #define IWL_CFG_160 0x0 #define IWL_CFG_CORES_BT 0x0 #define IWL_CFG_CORES_BT_GNSS 0x5 #define IWL_CFG_NO_CDB 0x0 #define IWL_CFG_CDB 0x1 #define IWL_CFG_NO_JACKET 0x0 #define IWL_CFG_IS_JACKET 0x1 #define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4) #define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9) #define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10) struct iwl_dev_info { u16 device; u16 subdevice; u16 mac_type; u16 rf_type; u8 mac_step; + u8 rf_step; u8 rf_id; u8 no_160; u8 cores; u8 cdb; u8 jacket; const struct iwl_cfg *cfg; const char *name; }; /* * This list declares the config structures for all devices. */ extern const struct iwl_cfg_trans_params iwl9000_trans_cfg; extern const struct iwl_cfg_trans_params iwl9560_trans_cfg; extern const struct iwl_cfg_trans_params iwl9560_long_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_qnj_trans_cfg; extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg; extern const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_snj_trans_cfg; extern const struct iwl_cfg_trans_params iwl_so_trans_cfg; extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg; extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg; extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg; +extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg; extern const char iwl9162_name[]; extern const char iwl9260_name[]; extern const char iwl9260_1_name[]; extern const char iwl9270_name[]; extern const char iwl9461_name[]; extern const char iwl9462_name[]; extern const char iwl9560_name[]; extern const char iwl9162_160_name[]; extern const char iwl9260_160_name[]; extern const char iwl9270_160_name[]; extern const char iwl9461_160_name[]; extern const char iwl9462_160_name[]; extern const char iwl9560_160_name[]; extern const char iwl9260_killer_1550_name[]; extern const char iwl9560_killer_1550i_name[]; extern const char iwl9560_killer_1550s_name[]; extern const char iwl_ax200_name[]; extern const char iwl_ax203_name[]; extern const char iwl_ax204_name[]; extern const char iwl_ax201_name[]; extern const char iwl_ax101_name[]; extern const char iwl_ax200_killer_1650w_name[]; extern const char iwl_ax200_killer_1650x_name[]; extern const char iwl_ax201_killer_1650s_name[]; extern const char iwl_ax201_killer_1650i_name[]; extern const char iwl_ax210_killer_1675w_name[]; extern const char iwl_ax210_killer_1675x_name[]; extern const char iwl9560_killer_1550i_160_name[]; extern const char iwl9560_killer_1550s_160_name[]; extern const char iwl_ax211_killer_1675s_name[]; extern const char iwl_ax211_killer_1675i_name[]; extern const char iwl_ax411_killer_1690s_name[]; extern const char iwl_ax411_killer_1690i_name[]; extern const char iwl_ax211_name[]; extern const char iwl_ax221_name[]; extern const char iwl_ax231_name[]; extern const char iwl_ax411_name[]; extern const char iwl_bz_name[]; +extern const char iwl_sc_name[]; #if IS_ENABLED(CONFIG_IWLDVM) extern const struct iwl_cfg iwl5300_agn_cfg; extern const struct iwl_cfg iwl5100_agn_cfg; extern const struct iwl_cfg iwl5350_agn_cfg; extern const struct iwl_cfg iwl5100_bgn_cfg; extern const struct iwl_cfg iwl5100_abg_cfg; extern const struct iwl_cfg iwl5150_agn_cfg; extern const struct iwl_cfg iwl5150_abg_cfg; extern const struct iwl_cfg iwl6005_2agn_cfg; extern const struct iwl_cfg iwl6005_2abg_cfg; extern const struct iwl_cfg iwl6005_2bg_cfg; extern const struct iwl_cfg iwl6005_2agn_sff_cfg; extern const struct iwl_cfg iwl6005_2agn_d_cfg; extern const struct iwl_cfg iwl6005_2agn_mow1_cfg; extern const struct iwl_cfg iwl6005_2agn_mow2_cfg; extern const struct iwl_cfg iwl1030_bgn_cfg; extern const struct iwl_cfg iwl1030_bg_cfg; extern const struct iwl_cfg iwl6030_2agn_cfg; extern const struct iwl_cfg iwl6030_2abg_cfg; extern const struct iwl_cfg iwl6030_2bgn_cfg; extern const struct iwl_cfg iwl6030_2bg_cfg; extern const struct iwl_cfg iwl6000i_2agn_cfg; extern const struct iwl_cfg iwl6000i_2abg_cfg; extern const struct iwl_cfg iwl6000i_2bg_cfg; extern const struct iwl_cfg iwl6000_3agn_cfg; extern const struct iwl_cfg iwl6050_2agn_cfg; extern const struct iwl_cfg iwl6050_2abg_cfg; extern const struct iwl_cfg iwl6150_bgn_cfg; extern const struct iwl_cfg iwl6150_bg_cfg; extern const struct iwl_cfg iwl1000_bgn_cfg; extern const struct iwl_cfg iwl1000_bg_cfg; extern const struct iwl_cfg iwl100_bgn_cfg; extern const struct iwl_cfg iwl100_bg_cfg; extern const struct iwl_cfg iwl130_bgn_cfg; extern const struct iwl_cfg iwl130_bg_cfg; extern const struct iwl_cfg iwl2000_2bgn_cfg; extern const struct iwl_cfg iwl2000_2bgn_d_cfg; extern const struct iwl_cfg iwl2030_2bgn_cfg; extern const struct iwl_cfg iwl6035_2agn_cfg; extern const struct iwl_cfg iwl6035_2agn_sff_cfg; extern const struct iwl_cfg iwl105_bgn_cfg; extern const struct iwl_cfg iwl105_bgn_d_cfg; extern const struct iwl_cfg iwl135_bgn_cfg; #endif /* CONFIG_IWLDVM */ #if IS_ENABLED(CONFIG_IWLMVM) +extern const struct iwl_ht_params iwl_22000_ht_params; extern const struct iwl_cfg iwl7260_2ac_cfg; extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp; extern const struct iwl_cfg iwl7260_2n_cfg; extern const struct iwl_cfg iwl7260_n_cfg; extern const struct iwl_cfg iwl3160_2ac_cfg; extern const struct iwl_cfg iwl3160_2n_cfg; extern const struct iwl_cfg iwl3160_n_cfg; extern const struct iwl_cfg iwl3165_2ac_cfg; extern const struct iwl_cfg iwl3168_2ac_cfg; extern const struct iwl_cfg iwl7265_2ac_cfg; extern const struct iwl_cfg iwl7265_2n_cfg; extern const struct iwl_cfg iwl7265_n_cfg; extern const struct iwl_cfg iwl7265d_2ac_cfg; extern const struct iwl_cfg iwl7265d_2n_cfg; extern const struct iwl_cfg iwl7265d_n_cfg; extern const struct iwl_cfg iwl8260_2n_cfg; extern const struct iwl_cfg iwl8260_2ac_cfg; extern const struct iwl_cfg iwl8265_2ac_cfg; extern const struct iwl_cfg iwl8275_2ac_cfg; extern const struct iwl_cfg iwl4165_2ac_cfg; extern const struct iwl_cfg iwl9260_2ac_cfg; extern const struct iwl_cfg iwl9560_qu_b0_jf_b0_cfg; extern const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg; extern const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg; -extern const struct iwl_cfg iwl9560_qnj_b0_jf_b0_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg_soc; extern const struct iwl_cfg iwl_qu_b0_hr1_b0; extern const struct iwl_cfg iwl_qu_c0_hr1_b0; extern const struct iwl_cfg iwl_quz_a0_hr1_b0; extern const struct iwl_cfg iwl_qu_b0_hr_b0; extern const struct iwl_cfg iwl_qu_c0_hr_b0; extern const struct iwl_cfg iwl_ax200_cfg_cc; extern const struct iwl_cfg iwl_ax201_cfg_qu_hr; extern const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0; extern const struct iwl_cfg iwl_ax201_cfg_quz_hr; extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr; extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr; extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0; extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0; extern const struct iwl_cfg killer1650x_2ax_cfg; extern const struct iwl_cfg killer1650w_2ax_cfg; -extern const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg; extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long; extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0; extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0; extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long; -extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0; -extern const struct iwl_cfg iwlax211_cfg_snj_gf_a0; -extern const struct iwl_cfg iwl_cfg_snj_hr_b0; -extern const struct iwl_cfg iwl_cfg_snj_a0_jf_b0; -extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0; -extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0; -extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0; -extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0; -extern const struct iwl_cfg iwl_cfg_ma_a0_ms_a0; -extern const struct iwl_cfg iwl_cfg_ma_a0_fm_a0; -extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0; -extern const struct iwl_cfg iwl_cfg_snj_a0_ms_a0; + +extern const struct iwl_cfg iwl_cfg_ma; + extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0; extern const struct iwl_cfg iwl_cfg_so_a0_ms_a0; extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0; -extern const struct iwl_cfg iwl_cfg_bz_a0_hr_b0; -extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0; -extern const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0; -extern const struct iwl_cfg iwl_cfg_bz_a0_mr_a0; -extern const struct iwl_cfg iwl_cfg_bz_a0_fm_a0; -extern const struct iwl_cfg iwl_cfg_gl_a0_fm_a0; -extern const struct iwl_cfg iwl_cfg_bz_z0_gf_a0; -extern const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0; -extern const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0; -extern const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0; -extern const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0; -extern const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0; + +extern const struct iwl_cfg iwl_cfg_bz; +extern const struct iwl_cfg iwl_cfg_gl; + +extern const struct iwl_cfg iwl_cfg_sc; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-context-info-gen3.h b/sys/contrib/dev/iwlwifi/iwl-context-info-gen3.h index b84884034c74..96bf353469b8 100644 --- a/sys/contrib/dev/iwlwifi/iwl-context-info-gen3.h +++ b/sys/contrib/dev/iwlwifi/iwl-context-info-gen3.h @@ -1,269 +1,304 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2018, 2020-2022 Intel Corporation */ #ifndef __iwl_context_info_file_gen3_h__ #define __iwl_context_info_file_gen3_h__ #include "iwl-context-info.h" #define CSR_CTXT_INFO_BOOT_CTRL 0x0 #define CSR_CTXT_INFO_ADDR 0x118 #define CSR_IML_DATA_ADDR 0x120 #define CSR_IML_SIZE_ADDR 0x128 #define CSR_IML_RESP_ADDR 0x12c +#define UNFRAGMENTED_PNVM_PAYLOADS_NUMBER 2 + /* Set bit for enabling automatic function boot */ #define CSR_AUTO_FUNC_BOOT_ENA BIT(1) /* Set bit for initiating function boot */ #define CSR_AUTO_FUNC_INIT BIT(7) /** * enum iwl_prph_scratch_mtr_format - tfd size configuration * @IWL_PRPH_MTR_FORMAT_16B: 16 bit tfd * @IWL_PRPH_MTR_FORMAT_32B: 32 bit tfd * @IWL_PRPH_MTR_FORMAT_64B: 64 bit tfd * @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd */ enum iwl_prph_scratch_mtr_format { IWL_PRPH_MTR_FORMAT_16B = 0x0, IWL_PRPH_MTR_FORMAT_32B = 0x40000, IWL_PRPH_MTR_FORMAT_64B = 0x80000, IWL_PRPH_MTR_FORMAT_256B = 0xC0000, }; /** * enum iwl_prph_scratch_flags - PRPH scratch control flags * @IWL_PRPH_SCRATCH_IMR_DEBUG_EN: IMR support for debug * @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf * @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated * in hwm config. * @IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for * multicomm. * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW * @IWL_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K) * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for * completion descriptor, 1 for responses (legacy) * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd. * There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit, * 3: 256 bit. * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK: RB size full information, ignored * by older firmware versions, so set IWL_PRPH_SCRATCH_RB_SIZE_4K * appropriately; use the below values for this. * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size */ enum iwl_prph_scratch_flags { IWL_PRPH_SCRATCH_IMR_DEBUG_EN = BIT(1), IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4), IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8), IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9), IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER = BIT(10), IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF = BIT(11), IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16), IWL_PRPH_SCRATCH_MTR_MODE = BIT(17), IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19), IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK = 0xf << 20, IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K = 8 << 20, IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20, IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K = 10 << 20, }; /* * struct iwl_prph_scratch_version - version structure * @mac_id: SKU and revision id * @version: prph scratch information version id * @size: the size of the context information in DWs * @reserved: reserved */ struct iwl_prph_scratch_version { __le16 mac_id; __le16 version; __le16 size; __le16 reserved; } __packed; /* PERIPH_SCRATCH_VERSION_S */ /* * struct iwl_prph_scratch_control - control structure * @control_flags: context information flags see &enum iwl_prph_scratch_flags * @reserved: reserved */ struct iwl_prph_scratch_control { __le32 control_flags; __le32 reserved; } __packed; /* PERIPH_SCRATCH_CONTROL_S */ /* - * struct iwl_prph_scratch_pnvm_cfg - ror config + * struct iwl_prph_scratch_pnvm_cfg - PNVM scratch * @pnvm_base_addr: PNVM start address - * @pnvm_size: PNVM size in DWs + * @pnvm_size: the size of the PNVM image in bytes * @reserved: reserved */ struct iwl_prph_scratch_pnvm_cfg { __le64 pnvm_base_addr; __le32 pnvm_size; __le32 reserved; } __packed; /* PERIPH_SCRATCH_PNVM_CFG_S */ +/** + * struct iwl_prph_scrath_mem_desc_addr_array + * @mem_descs: array of dram addresses. + * Each address is the beggining of a pnvm payload. + */ +struct iwl_prph_scrath_mem_desc_addr_array { + __le64 mem_descs[IPC_DRAM_MAP_ENTRY_NUM_MAX]; +} __packed; /* PERIPH_SCRATCH_MEM_DESC_ADDR_ARRAY_S_VER_1 */ /* * struct iwl_prph_scratch_hwm_cfg - hwm config * @hwm_base_addr: hwm start address * @hwm_size: hwm size in DWs * @debug_token_config: debug preset */ struct iwl_prph_scratch_hwm_cfg { __le64 hwm_base_addr; __le32 hwm_size; __le32 debug_token_config; } __packed; /* PERIPH_SCRATCH_HWM_CFG_S */ /* * struct iwl_prph_scratch_rbd_cfg - RBDs configuration * @free_rbd_addr: default queue free RB CB base address * @reserved: reserved */ struct iwl_prph_scratch_rbd_cfg { __le64 free_rbd_addr; __le32 reserved; } __packed; /* PERIPH_SCRATCH_RBD_CFG_S */ /* * struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table * @base_addr: reduce power table address - * @size: table size in dwords + * @size: the size of the entire power table image */ struct iwl_prph_scratch_uefi_cfg { __le64 base_addr; __le32 size; __le32 reserved; } __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */ +/* + * struct iwl_prph_scratch_step_cfg - prph scratch step configuration + * @mbx_addr_0: [0:7] revision, + * [8:15] cnvi_to_cnvr length, + * [16:23] cnvr_to_cnvi channel length, + * [24:31] radio1 reserved + * @mbx_addr_1: [0:7] radio2 reserved + */ + +struct iwl_prph_scratch_step_cfg { + __le32 mbx_addr_0; + __le32 mbx_addr_1; +} __packed; + /* * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config * @version: version information of context info and HW * @control: control flags of FH configurations * @pnvm_cfg: ror configuration * @hwm_cfg: hwm configuration * @rbd_cfg: default RX queue configuration + * @step_cfg: step configuration */ struct iwl_prph_scratch_ctrl_cfg { struct iwl_prph_scratch_version version; struct iwl_prph_scratch_control control; struct iwl_prph_scratch_pnvm_cfg pnvm_cfg; struct iwl_prph_scratch_hwm_cfg hwm_cfg; struct iwl_prph_scratch_rbd_cfg rbd_cfg; struct iwl_prph_scratch_uefi_cfg reduce_power_cfg; + struct iwl_prph_scratch_step_cfg step_cfg; } __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */ /* * struct iwl_prph_scratch - peripheral scratch mapping * @ctrl_cfg: control and configuration of prph scratch * @dram: firmware images addresses in DRAM * @reserved: reserved */ struct iwl_prph_scratch { struct iwl_prph_scratch_ctrl_cfg ctrl_cfg; - __le32 reserved[12]; + __le32 reserved[10]; struct iwl_context_info_dram dram; } __packed; /* PERIPH_SCRATCH_S */ /* * struct iwl_prph_info - peripheral information * @boot_stage_mirror: reflects the value in the Boot Stage CSR register * @ipc_status_mirror: reflects the value in the IPC Status CSR register * @sleep_notif: indicates the peripheral sleep status * @reserved: reserved */ struct iwl_prph_info { __le32 boot_stage_mirror; __le32 ipc_status_mirror; __le32 sleep_notif; __le32 reserved; } __packed; /* PERIPH_INFO_S */ /* * struct iwl_context_info_gen3 - device INIT configuration * @version: version of the context information * @size: size of context information in DWs * @config: context in which the peripheral would execute - a subset of * capability csr register published by the peripheral * @prph_info_base_addr: the peripheral information structure start address * @cr_head_idx_arr_base_addr: the completion ring head index array * start address * @tr_tail_idx_arr_base_addr: the transfer ring tail index array * start address * @cr_tail_idx_arr_base_addr: the completion ring tail index array * start address * @tr_head_idx_arr_base_addr: the transfer ring head index array * start address * @cr_idx_arr_size: number of entries in the completion ring index array * @tr_idx_arr_size: number of entries in the transfer ring index array * @mtr_base_addr: the message transfer ring start address * @mcr_base_addr: the message completion ring start address * @mtr_size: number of entries which the message transfer ring can hold * @mcr_size: number of entries which the message completion ring can hold * @mtr_doorbell_vec: the doorbell vector associated with the message * transfer ring * @mcr_doorbell_vec: the doorbell vector associated with the message * completion ring * @mtr_msi_vec: the MSI which shall be generated by the peripheral after * completing a transfer descriptor in the message transfer ring * @mcr_msi_vec: the MSI which shall be generated by the peripheral after * completing a completion descriptor in the message completion ring * @mtr_opt_header_size: the size of the optional header in the transfer * descriptor associated with the message transfer ring in DWs * @mtr_opt_footer_size: the size of the optional footer in the transfer * descriptor associated with the message transfer ring in DWs * @mcr_opt_header_size: the size of the optional header in the completion * descriptor associated with the message completion ring in DWs * @mcr_opt_footer_size: the size of the optional footer in the completion * descriptor associated with the message completion ring in DWs * @msg_rings_ctrl_flags: message rings control flags * @prph_info_msi_vec: the MSI which shall be generated by the peripheral * after updating the Peripheral Information structure * @prph_scratch_base_addr: the peripheral scratch structure start address * @prph_scratch_size: the size of the peripheral scratch structure in DWs * @reserved: reserved */ struct iwl_context_info_gen3 { __le16 version; __le16 size; __le32 config; __le64 prph_info_base_addr; __le64 cr_head_idx_arr_base_addr; __le64 tr_tail_idx_arr_base_addr; __le64 cr_tail_idx_arr_base_addr; __le64 tr_head_idx_arr_base_addr; __le16 cr_idx_arr_size; __le16 tr_idx_arr_size; __le64 mtr_base_addr; __le64 mcr_base_addr; __le16 mtr_size; __le16 mcr_size; __le16 mtr_doorbell_vec; __le16 mcr_doorbell_vec; __le16 mtr_msi_vec; __le16 mcr_msi_vec; u8 mtr_opt_header_size; u8 mtr_opt_footer_size; u8 mcr_opt_header_size; u8 mcr_opt_footer_size; __le16 msg_rings_ctrl_flags; __le16 prph_info_msi_vec; __le64 prph_scratch_base_addr; __le32 prph_scratch_size; __le32 reserved; } __packed; /* IPC_CONTEXT_INFO_S */ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, const struct fw_img *fw); void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive); -int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, - const void *data, u32 len); -int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans, - const void *data, u32 len); - +int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_payloads, + const struct iwl_ucode_capabilities *capa); +void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa); +int +iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans, + const struct iwl_pnvm_image *payloads, + const struct iwl_ucode_capabilities *capa); +void +iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa); +int iwl_trans_pcie_ctx_info_gen3_set_step(struct iwl_trans *trans, + u32 mbx_addr_0_step, u32 mbx_addr_1_step); #endif /* __iwl_context_info_file_gen3_h__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-context-info.h b/sys/contrib/dev/iwlwifi/iwl-context-info.h index 4354d5acac9f..1a1321db137c 100644 --- a/sys/contrib/dev/iwlwifi/iwl-context-info.h +++ b/sys/contrib/dev/iwlwifi/iwl-context-info.h @@ -1,184 +1,187 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2020, 2022 Intel Corporation */ #ifndef __iwl_context_info_file_h__ #define __iwl_context_info_file_h__ /* maximmum number of DRAM map entries supported by FW */ #define IWL_MAX_DRAM_ENTRY 64 #define CSR_CTXT_INFO_BA 0x40 /** * enum iwl_context_info_flags - Context information control flags * @IWL_CTXT_INFO_AUTO_FUNC_INIT: If set, FW will not wait before interrupting * the init done for driver command that configures several system modes * @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug * @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump * @IWL_CTXT_INFO_RB_CB_SIZE: mask of the RBD Cyclic Buffer Size * exponent, the actual size is 2**value, valid sizes are 8-2048. * The value is four bits long. Maximum valid exponent is 12 * @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the * default is short format - not supported by the driver) * @IWL_CTXT_INFO_RB_SIZE: RB size mask * (values are IWL_CTXT_INFO_RB_SIZE_*K) * @IWL_CTXT_INFO_RB_SIZE_1K: Value for 1K RB size * @IWL_CTXT_INFO_RB_SIZE_2K: Value for 2K RB size * @IWL_CTXT_INFO_RB_SIZE_4K: Value for 4K RB size * @IWL_CTXT_INFO_RB_SIZE_8K: Value for 8K RB size * @IWL_CTXT_INFO_RB_SIZE_12K: Value for 12K RB size * @IWL_CTXT_INFO_RB_SIZE_16K: Value for 16K RB size * @IWL_CTXT_INFO_RB_SIZE_20K: Value for 20K RB size * @IWL_CTXT_INFO_RB_SIZE_24K: Value for 24K RB size * @IWL_CTXT_INFO_RB_SIZE_28K: Value for 28K RB size * @IWL_CTXT_INFO_RB_SIZE_32K: Value for 32K RB size */ enum iwl_context_info_flags { IWL_CTXT_INFO_AUTO_FUNC_INIT = 0x0001, IWL_CTXT_INFO_EARLY_DEBUG = 0x0002, IWL_CTXT_INFO_ENABLE_CDMP = 0x0004, IWL_CTXT_INFO_RB_CB_SIZE = 0x00f0, IWL_CTXT_INFO_TFD_FORMAT_LONG = 0x0100, IWL_CTXT_INFO_RB_SIZE = 0x1e00, IWL_CTXT_INFO_RB_SIZE_1K = 0x1, IWL_CTXT_INFO_RB_SIZE_2K = 0x2, IWL_CTXT_INFO_RB_SIZE_4K = 0x4, IWL_CTXT_INFO_RB_SIZE_8K = 0x8, IWL_CTXT_INFO_RB_SIZE_12K = 0x9, IWL_CTXT_INFO_RB_SIZE_16K = 0xa, IWL_CTXT_INFO_RB_SIZE_20K = 0xb, IWL_CTXT_INFO_RB_SIZE_24K = 0xc, IWL_CTXT_INFO_RB_SIZE_28K = 0xd, IWL_CTXT_INFO_RB_SIZE_32K = 0xe, }; /* * struct iwl_context_info_version - version structure * @mac_id: SKU and revision id * @version: context information version id * @size: the size of the context information in DWs */ struct iwl_context_info_version { __le16 mac_id; __le16 version; __le16 size; __le16 reserved; } __packed; /* * struct iwl_context_info_control - version structure * @control_flags: context information flags see &enum iwl_context_info_flags */ struct iwl_context_info_control { __le32 control_flags; __le32 reserved; } __packed; /* * struct iwl_context_info_dram - images DRAM map * each entry in the map represents a DRAM chunk of up to 32 KB * @umac_img: UMAC image DRAM map * @lmac_img: LMAC image DRAM map * @virtual_img: paged image DRAM map */ struct iwl_context_info_dram { __le64 umac_img[IWL_MAX_DRAM_ENTRY]; __le64 lmac_img[IWL_MAX_DRAM_ENTRY]; __le64 virtual_img[IWL_MAX_DRAM_ENTRY]; } __packed; /* * struct iwl_context_info_rbd_cfg - RBDs configuration * @free_rbd_addr: default queue free RB CB base address * @used_rbd_addr: default queue used RB CB base address * @status_wr_ptr: default queue used RB status write pointer */ struct iwl_context_info_rbd_cfg { __le64 free_rbd_addr; __le64 used_rbd_addr; __le64 status_wr_ptr; } __packed; /* * struct iwl_context_info_hcmd_cfg - command queue configuration * @cmd_queue_addr: address of command queue * @cmd_queue_size: number of entries */ struct iwl_context_info_hcmd_cfg { __le64 cmd_queue_addr; u8 cmd_queue_size; u8 reserved[7]; } __packed; /* * struct iwl_context_info_dump_cfg - Core Dump configuration * @core_dump_addr: core dump (debug DRAM address) start address * @core_dump_size: size, in DWs */ struct iwl_context_info_dump_cfg { __le64 core_dump_addr; __le32 core_dump_size; __le32 reserved; } __packed; /* * struct iwl_context_info_pnvm_cfg - platform NVM data configuration * @platform_nvm_addr: Platform NVM data start address * @platform_nvm_size: size in DWs */ struct iwl_context_info_pnvm_cfg { __le64 platform_nvm_addr; __le32 platform_nvm_size; __le32 reserved; } __packed; /* * struct iwl_context_info_early_dbg_cfg - early debug configuration for * dumping DRAM addresses * @early_debug_addr: early debug start address * @early_debug_size: size in DWs */ struct iwl_context_info_early_dbg_cfg { __le64 early_debug_addr; __le32 early_debug_size; __le32 reserved; } __packed; /* * struct iwl_context_info - device INIT configuration * @version: version information of context info and HW * @control: control flags of FH configurations * @rbd_cfg: default RX queue configuration * @hcmd_cfg: command queue configuration * @dump_cfg: core dump data * @edbg_cfg: early debug configuration * @pnvm_cfg: platform nvm configuration * @dram: firmware image addresses in DRAM */ struct iwl_context_info { struct iwl_context_info_version version; struct iwl_context_info_control control; __le64 reserved0; struct iwl_context_info_rbd_cfg rbd_cfg; struct iwl_context_info_hcmd_cfg hcmd_cfg; __le32 reserved1[4]; struct iwl_context_info_dump_cfg dump_cfg; struct iwl_context_info_early_dbg_cfg edbg_cfg; struct iwl_context_info_pnvm_cfg pnvm_cfg; __le32 reserved2[16]; struct iwl_context_info_dram dram; __le32 reserved3[16]; } __packed; int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw); void iwl_pcie_ctxt_info_free(struct iwl_trans *trans); void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans); int iwl_pcie_init_fw_sec(struct iwl_trans *trans, const struct fw_img *fw, struct iwl_context_info_dram *ctxt_dram); +void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, + size_t size, + dma_addr_t *phys); int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, const void *data, u32 len, struct iwl_dram_data *dram); #endif /* __iwl_context_info_file_h__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-csr.h b/sys/contrib/dev/iwlwifi/iwl-csr.h index 3e1f011e93aa..587368a0ad4a 100644 --- a/sys/contrib/dev/iwlwifi/iwl-csr.h +++ b/sys/contrib/dev/iwlwifi/iwl-csr.h @@ -1,641 +1,646 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2005-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ #ifndef __iwl_csr_h__ #define __iwl_csr_h__ /* * CSR (control and status registers) * * CSR registers are mapped directly into PCI bus space, and are accessible * whenever platform supplies power to device, even when device is in * low power states due to driver-invoked device resets * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes. * * Use iwl_write32() and iwl_read32() family to access these registers; * these provide simple PCI bus access, without waking up the MAC. * Do not use iwl_write_direct32() family for these registers; * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ. * The MAC (uCode processor, etc.) does not need to be powered up for accessing * the CSR registers. * * NOTE: Device does need to be awake in order to read this memory * via CSR_EEPROM and CSR_OTP registers */ #define CSR_BASE (0x000) #define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ #define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ #define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ #define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ #define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ #define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ #define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ #define CSR_GP_CNTRL (CSR_BASE+0x024) #define CSR_FUNC_SCRATCH (CSR_BASE+0x02c) /* Scratch register - used for FW dbg */ /* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */ #define CSR_INT_PERIODIC_REG (CSR_BASE+0x005) /* * Hardware revision info * Bit fields: * 31-16: Reserved * 15-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D * 1-0: "Dash" (-) value, as in A-1, etc. */ #define CSR_HW_REV (CSR_BASE+0x028) /* * RF ID revision info * Bit fields: * 31:24: Reserved (set to 0x0) * 23:12: Type * 11:8: Step (A - 0x0, B - 0x1, etc) * 7:4: Dash * 3:0: Flavor */ #define CSR_HW_RF_ID (CSR_BASE+0x09c) /* * EEPROM and OTP (one-time-programmable) memory reads * * NOTE: Device must be awake, initialized via apm_ops.init(), * in order to read. */ #define CSR_EEPROM_REG (CSR_BASE+0x02c) #define CSR_EEPROM_GP (CSR_BASE+0x030) #define CSR_OTP_GP_REG (CSR_BASE+0x034) #define CSR_GIO_REG (CSR_BASE+0x03C) #define CSR_GP_UCODE_REG (CSR_BASE+0x048) #define CSR_GP_DRIVER_REG (CSR_BASE+0x050) /* * UCODE-DRIVER GP (general purpose) mailbox registers. * SET/CLR registers set/clear bit(s) if "1" is written. */ #define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054) #define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058) #define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c) #define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060) #define CSR_MBOX_SET_REG (CSR_BASE + 0x88) #define CSR_LED_REG (CSR_BASE+0x094) #define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0) #define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE + 0x0A8) /* 6000 and up */ #define CSR_MAC_SHADOW_REG_CTRL_RX_WAKE BIT(20) #define CSR_MAC_SHADOW_REG_CTL2 (CSR_BASE + 0x0AC) #define CSR_MAC_SHADOW_REG_CTL2_RX_WAKE 0xFFFF /* LTR control (since IWL_DEVICE_FAMILY_22000) */ #define CSR_LTR_LONG_VAL_AD (CSR_BASE + 0x0D4) #define CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ 0x80000000 #define CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE 0x1c000000 #define CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL 0x03ff0000 #define CSR_LTR_LONG_VAL_AD_SNOOP_REQ 0x00008000 #define CSR_LTR_LONG_VAL_AD_SNOOP_SCALE 0x00001c00 #define CSR_LTR_LONG_VAL_AD_SNOOP_VAL 0x000003ff #define CSR_LTR_LONG_VAL_AD_SCALE_USEC 2 +#define CSR_LTR_LAST_MSG (CSR_BASE + 0x0DC) + /* GIO Chicken Bits (PCI Express bus link power management) */ #define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) #define CSR_IPC_SLEEP_CONTROL (CSR_BASE + 0x114) #define CSR_IPC_SLEEP_CONTROL_SUSPEND 0x3 #define CSR_IPC_SLEEP_CONTROL_RESUME 0 /* Doorbell - since Bz * connected to UREG_DOORBELL_TO_ISR6 (lower 16 bits only) */ #define CSR_DOORBELL_VECTOR (CSR_BASE + 0x130) /* host chicken bits */ #define CSR_HOST_CHICKEN (CSR_BASE + 0x204) #define CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME BIT(19) /* Analog phase-lock-loop configuration */ #define CSR_ANA_PLL_CFG (CSR_BASE+0x20c) /* * CSR HW resources monitor registers */ #define CSR_MONITOR_CFG_REG (CSR_BASE+0x214) #define CSR_MONITOR_STATUS_REG (CSR_BASE+0x228) #define CSR_MONITOR_XTAL_RESOURCES (0x00000010) /* * CSR Hardware Revision Workaround Register. Indicates hardware rev; * "step" determines CCK backoff for txpower calculation. * See also CSR_HW_REV register. * Bit fields: * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step * 1-0: "Dash" (-) value, as in C-1, etc. */ #define CSR_HW_REV_WA_REG (CSR_BASE+0x22C) #define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240) #define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250) /* * Scratch register initial configuration - this is set on init, and read * during a error FW error. */ #define CSR_FUNC_SCRATCH_INIT_VALUE (0x01010101) /* Bits for CSR_HW_IF_CONFIG_REG */ #define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH (0x0000000F) #define CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM (0x00000080) #define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0) #define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) #define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) #define CSR_HW_IF_CONFIG_REG_D3_DEBUG (0x00000200) #define CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00) #define CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000) #define CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000) #define CSR_HW_IF_CONFIG_REG_POS_MAC_DASH (0) #define CSR_HW_IF_CONFIG_REG_POS_MAC_STEP (2) #define CSR_HW_IF_CONFIG_REG_POS_BOARD_VER (6) #define CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE (10) #define CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12) #define CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14) #define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) #define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) #define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ #define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ #define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ #define CSR_HW_IF_CONFIG_REG_ENABLE_PME (0x10000000) #define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */ #define CSR_MBOX_SET_REG_OS_ALIVE BIT(5) #define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ #define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ /* interrupt flags in INTA, set by uCode or hardware (e.g. dma), * acknowledged (reset) by host writing "1" to flagged bits. */ #define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ #define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ #define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ #define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ #define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ #define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ #define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ #define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ #define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */ #define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ #define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ #define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \ CSR_INT_BIT_HW_ERR | \ CSR_INT_BIT_FH_TX | \ CSR_INT_BIT_SW_ERR | \ CSR_INT_BIT_RF_KILL | \ CSR_INT_BIT_SW_RX | \ CSR_INT_BIT_WAKEUP | \ CSR_INT_BIT_ALIVE | \ CSR_INT_BIT_RX_PERIODIC) /* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ #define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ #define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ #define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ #define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ #define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ #define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ #define CSR_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ CSR_FH_INT_BIT_RX_CHNL1 | \ CSR_FH_INT_BIT_RX_CHNL0) #define CSR_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \ CSR_FH_INT_BIT_TX_CHNL0) /* GPIO */ #define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200) #define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000) #define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200) /* RESET */ #define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001) #define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002) #define CSR_RESET_REG_FLAG_SW_RESET (0x00000080) #define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100) #define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200) #define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000) /* * GP (general purpose) CONTROL REGISTER * Bit fields: * 27: HW_RF_KILL_SW * Indicates state of (platform's) hardware RF-Kill switch * 26-24: POWER_SAVE_TYPE * Indicates current power-saving mode: * 000 -- No power saving * 001 -- MAC power-down * 010 -- PHY (radio) power-down * 011 -- Error * 10: XTAL ON request * 9-6: SYS_CONFIG * Indicates current system configuration, reflecting pins on chip * as forced high/low by device circuit board. * 4: GOING_TO_SLEEP * Indicates MAC is entering a power-saving sleep power-down. * Not a good time to access device-internal resources. * 3: MAC_ACCESS_REQ * Host sets this to request and maintain MAC wakeup, to allow host * access to device-internal resources. Host must wait for * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR * device registers. * 2: INIT_DONE * Host sets this to put device into fully operational D0 power mode. * Host resets this after SW_RESET to put device into low power mode. * 0: MAC_CLOCK_READY * Indicates MAC (ucode processor, etc.) is powered up and can run. * Internal resources are accessible. * NOTE: This does not indicate that the processor is actually running. * NOTE: This does not indicate that device has completed * init or post-power-down restore of internal SRAM memory. * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that * SRAM is restored and uCode is in normal operation mode. * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and * do not need to save/restore it. * NOTE: After device reset, this bit remains "0" until host sets * INIT_DONE */ #define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001) #define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004) #define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008) #define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010) #define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400) #define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001) #define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000) #define CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000) #define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) /* From Bz we use these instead during init/reset flow */ #define CSR_GP_CNTRL_REG_FLAG_MAC_INIT BIT(6) #define CSR_GP_CNTRL_REG_FLAG_ROM_START BIT(7) #define CSR_GP_CNTRL_REG_FLAG_MAC_STATUS BIT(20) #define CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ BIT(21) #define CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS BIT(28) #define CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ BIT(29) #define CSR_GP_CNTRL_REG_FLAG_SW_RESET BIT(31) /* HW REV */ #define CSR_HW_REV_STEP_DASH(_val) ((_val) & CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH) #define CSR_HW_REV_TYPE(_val) (((_val) & 0x000FFF0) >> 4) /* HW RFID */ #define CSR_HW_RFID_FLAVOR(_val) (((_val) & 0x000000F) >> 0) #define CSR_HW_RFID_DASH(_val) (((_val) & 0x00000F0) >> 4) #define CSR_HW_RFID_STEP(_val) (((_val) & 0x0000F00) >> 8) #define CSR_HW_RFID_TYPE(_val) (((_val) & 0x0FFF000) >> 12) #define CSR_HW_RFID_IS_CDB(_val) (((_val) & 0x10000000) >> 28) #define CSR_HW_RFID_IS_JACKET(_val) (((_val) & 0x20000000) >> 29) /** * hw_rev values */ enum { SILICON_A_STEP = 0, SILICON_B_STEP, SILICON_C_STEP, + SILICON_D_STEP, + SILICON_E_STEP, SILICON_Z_STEP = 0xf, }; #define CSR_HW_REV_TYPE_MSK (0x000FFF0) #define CSR_HW_REV_TYPE_5300 (0x0000020) #define CSR_HW_REV_TYPE_5350 (0x0000030) #define CSR_HW_REV_TYPE_5100 (0x0000050) #define CSR_HW_REV_TYPE_5150 (0x0000040) #define CSR_HW_REV_TYPE_1000 (0x0000060) #define CSR_HW_REV_TYPE_6x00 (0x0000070) #define CSR_HW_REV_TYPE_6x50 (0x0000080) #define CSR_HW_REV_TYPE_6150 (0x0000084) #define CSR_HW_REV_TYPE_6x05 (0x00000B0) #define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05 #define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05 #define CSR_HW_REV_TYPE_2x30 (0x00000C0) #define CSR_HW_REV_TYPE_2x00 (0x0000100) #define CSR_HW_REV_TYPE_105 (0x0000110) #define CSR_HW_REV_TYPE_135 (0x0000120) #define CSR_HW_REV_TYPE_3160 (0x0000164) #define CSR_HW_REV_TYPE_7265D (0x0000210) #define CSR_HW_REV_TYPE_NONE (0x00001F0) #define CSR_HW_REV_TYPE_QNJ (0x0000360) #define CSR_HW_REV_TYPE_QNJ_B0 (0x0000361) #define CSR_HW_REV_TYPE_QU_B0 (0x0000331) #define CSR_HW_REV_TYPE_QU_C0 (0x0000332) #define CSR_HW_REV_TYPE_QUZ (0x0000351) #define CSR_HW_REV_TYPE_HR_CDB (0x0000340) #define CSR_HW_REV_TYPE_SO (0x0000370) #define CSR_HW_REV_TYPE_TY (0x0000420) /* RF_ID value */ #define CSR_HW_RF_ID_TYPE_JF (0x00105100) #define CSR_HW_RF_ID_TYPE_HR (0x0010A000) #define CSR_HW_RF_ID_TYPE_HR1 (0x0010c100) #define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00) #define CSR_HW_RF_ID_TYPE_GF (0x0010D000) #define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000) +#define CSR_HW_RF_ID_TYPE_MS (0x00111000) /* HW_RF CHIP STEP */ #define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF) /* EEPROM REG */ #define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) #define CSR_EEPROM_REG_BIT_CMD (0x00000002) #define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC) #define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000) /* EEPROM GP */ #define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */ #define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) #define CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP (0x00000000) #define CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP (0x00000001) #define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002) #define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004) /* One-time-programmable memory general purpose reg */ #define CSR_OTP_GP_REG_DEVICE_SELECT (0x00010000) /* 0 - EEPROM, 1 - OTP */ #define CSR_OTP_GP_REG_OTP_ACCESS_MODE (0x00020000) /* 0 - absolute, 1 - relative */ #define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK (0x00100000) /* bit 20 */ #define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK (0x00200000) /* bit 21 */ /* GP REG */ #define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */ #define CSR_GP_REG_NO_POWER_SAVE (0x00000000) #define CSR_GP_REG_MAC_POWER_SAVE (0x01000000) #define CSR_GP_REG_PHY_POWER_SAVE (0x02000000) #define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000) /* CSR GIO */ #define CSR_GIO_REG_VAL_L0S_DISABLED (0x00000002) /* * UCODE-DRIVER GP (general purpose) mailbox register 1 * Host driver and uCode write and/or read this register to communicate with * each other. * Bit fields: * 4: UCODE_DISABLE * Host sets this to request permanent halt of uCode, same as * sending CARD_STATE command with "halt" bit set. * 3: CT_KILL_EXIT * Host sets this to request exit from CT_KILL state, i.e. host thinks * device temperature is low enough to continue normal operation. * 2: CMD_BLOCKED * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL) * to release uCode to clear all Tx and command queues, enter * unassociated mode, and power down. * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit. * 1: SW_BIT_RFKILL * Host sets this when issuing CARD_STATE command to request * device sleep. * 0: MAC_SLEEP * uCode sets this when preparing a power-saving power-down. * uCode resets this when power-up is complete and SRAM is sane. * NOTE: device saves internal SRAM data to host when powering down, * and must restore this data after powering back up. * MAC_SLEEP is the best indication that restore is complete. * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and * do not need to save/restore it. */ #define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001) #define CSR_UCODE_SW_BIT_RFKILL (0x00000002) #define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004) #define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008) #define CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE (0x00000020) /* GP Driver */ #define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_MSK (0x00000003) #define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_3x3_HYB (0x00000000) #define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB (0x00000001) #define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA (0x00000002) #define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004) #define CSR_GP_DRIVER_REG_BIT_6050_1x2 (0x00000008) #define CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER (0x00000080) /* GIO Chicken Bits (PCI Express bus link power management) */ #define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) #define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) /* LED */ #define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF) #define CSR_LED_REG_TURN_ON (0x60) #define CSR_LED_REG_TURN_OFF (0x20) /* ANA_PLL */ #define CSR50_ANA_PLL_CFG_VAL (0x00880300) /* HPET MEM debug */ #define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000) /* DRAM INT TABLE */ #define CSR_DRAM_INT_TBL_ENABLE (1 << 31) #define CSR_DRAM_INIT_TBL_WRITE_POINTER (1 << 28) #define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) /* * SHR target access (Shared block memory space) * * Shared internal registers can be accessed directly from PCI bus through SHR * arbiter without need for the MAC HW to be powered up. This is possible due to * indirect read/write via HEEP_CTRL_WRD_PCIEX_CTRL (0xEC) and * HEEP_CTRL_WRD_PCIEX_DATA (0xF4) registers. * * Use iwl_write32()/iwl_read32() family to access these registers. The MAC HW * need not be powered up so no "grab inc access" is required. */ /* * Registers for accessing shared registers (e.g. SHR_APMG_GP1, * SHR_APMG_XTAL_CFG). For example, to read from SHR_APMG_GP1 register (0x1DC), * first, write to the control register: * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register) * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 2 (read access) * second, read from the data register HEEP_CTRL_WRD_PCIEX_DATA[31:0]. * * To write the register, first, write to the data register * HEEP_CTRL_WRD_PCIEX_DATA[31:0] and then: * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register) * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 3 (write access) */ #define HEEP_CTRL_WRD_PCIEX_CTRL_REG (CSR_BASE+0x0ec) #define HEEP_CTRL_WRD_PCIEX_DATA_REG (CSR_BASE+0x0f4) /* * HBUS (Host-side Bus) * * HBUS registers are mapped directly into PCI bus space, but are used * to indirectly access device's internal memory or registers that * may be powered-down. * * Use iwl_write_direct32()/iwl_read_direct32() family for these registers; * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ * to make sure the MAC (uCode processor, etc.) is powered up for accessing * internal resources. * * Do not use iwl_write32()/iwl_read32() family to access these registers; * these provide only simple PCI bus access, without waking up the MAC. */ #define HBUS_BASE (0x400) /* * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM * structures, error log, event log, verifying uCode load). * First write to address register, then read from or write to data register * to complete the job. Once the address register is set up, accesses to * data registers auto-increment the address by one dword. * Bit usage for address registers (read or write): * 0-31: memory address within device */ #define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c) #define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010) #define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018) #define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c) /* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */ #define HBUS_TARG_MBX_C (HBUS_BASE+0x030) #define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004) /* * Registers for accessing device's internal peripheral registers * (e.g. SCD, BSM, etc.). First write to address register, * then read from or write to data register to complete the job. * Bit usage for address registers (read or write): * 0-15: register address (offset) within device * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword) */ #define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044) #define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048) #define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c) #define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) /* Used to enable DBGM */ #define HBUS_TARG_TEST_REG (HBUS_BASE+0x05c) /* * Per-Tx-queue write pointer (index, really!) * Indicates index to next TFD that driver will fill (1 past latest filled). * Bit usage: * 0-7: queue write index * 11-8: queue selector */ #define HBUS_TARG_WRPTR (HBUS_BASE+0x060) /* This register is common for Tx and Rx, Rx queues start from 512 */ #define HBUS_TARG_WRPTR_Q_SHIFT (16) #define HBUS_TARG_WRPTR_RX_Q(q) (((q) + 512) << HBUS_TARG_WRPTR_Q_SHIFT) /********************************************************** * CSR values **********************************************************/ /* * host interrupt timeout value * used with setting interrupt coalescing timer * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit * * default interrupt coalescing timer is 64 x 32 = 2048 usecs */ #define IWL_HOST_INT_TIMEOUT_MAX (0xFF) #define IWL_HOST_INT_TIMEOUT_DEF (0x40) #define IWL_HOST_INT_TIMEOUT_MIN (0x0) #define IWL_HOST_INT_OPER_MODE BIT(31) /***************************************************************************** * 7000/3000 series SHR DTS addresses * *****************************************************************************/ /* Diode Results Register Structure: */ enum dtd_diode_reg { DTS_DIODE_REG_DIG_VAL = 0x000000FF, /* bits [7:0] */ DTS_DIODE_REG_VREF_LOW = 0x0000FF00, /* bits [15:8] */ DTS_DIODE_REG_VREF_HIGH = 0x00FF0000, /* bits [23:16] */ DTS_DIODE_REG_VREF_ID = 0x03000000, /* bits [25:24] */ DTS_DIODE_REG_PASS_ONCE = 0x80000000, /* bits [31:31] */ DTS_DIODE_REG_FLAGS_MSK = 0xFF000000, /* bits [31:24] */ /* Those are the masks INSIDE the flags bit-field: */ DTS_DIODE_REG_FLAGS_VREFS_ID_POS = 0, DTS_DIODE_REG_FLAGS_VREFS_ID = 0x00000003, /* bits [1:0] */ DTS_DIODE_REG_FLAGS_PASS_ONCE_POS = 7, DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */ }; /***************************************************************************** * MSIX related registers * *****************************************************************************/ #define CSR_MSIX_BASE (0x2000) #define CSR_MSIX_FH_INT_CAUSES_AD (CSR_MSIX_BASE + 0x800) #define CSR_MSIX_FH_INT_MASK_AD (CSR_MSIX_BASE + 0x804) #define CSR_MSIX_HW_INT_CAUSES_AD (CSR_MSIX_BASE + 0x808) #define CSR_MSIX_HW_INT_MASK_AD (CSR_MSIX_BASE + 0x80C) #define CSR_MSIX_AUTOMASK_ST_AD (CSR_MSIX_BASE + 0x810) #define CSR_MSIX_RX_IVAR_AD_REG (CSR_MSIX_BASE + 0x880) #define CSR_MSIX_IVAR_AD_REG (CSR_MSIX_BASE + 0x890) #define CSR_MSIX_PENDING_PBA_AD (CSR_MSIX_BASE + 0x1000) #define CSR_MSIX_RX_IVAR(cause) (CSR_MSIX_RX_IVAR_AD_REG + (cause)) #define CSR_MSIX_IVAR(cause) (CSR_MSIX_IVAR_AD_REG + (cause)) #define MSIX_FH_INT_CAUSES_Q(q) (q) /* * Causes for the FH register interrupts */ enum msix_fh_int_causes { MSIX_FH_INT_CAUSES_Q0 = BIT(0), MSIX_FH_INT_CAUSES_Q1 = BIT(1), MSIX_FH_INT_CAUSES_D2S_CH0_NUM = BIT(16), MSIX_FH_INT_CAUSES_D2S_CH1_NUM = BIT(17), MSIX_FH_INT_CAUSES_S2D = BIT(19), MSIX_FH_INT_CAUSES_FH_ERR = BIT(21), }; /* The low 16 bits are for rx data queue indication */ #define MSIX_FH_INT_CAUSES_DATA_QUEUE 0xffff /* * Causes for the HW register interrupts */ enum msix_hw_int_causes { MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0), MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1), MSIX_HW_INT_CAUSES_REG_IML = BIT(1), MSIX_HW_INT_CAUSES_REG_RESET_DONE = BIT(2), MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ = BIT(5), MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6), MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7), MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8), MSIX_HW_INT_CAUSES_REG_SW_ERR = BIT(25), MSIX_HW_INT_CAUSES_REG_SCD = BIT(26), MSIX_HW_INT_CAUSES_REG_FH_TX = BIT(27), MSIX_HW_INT_CAUSES_REG_HW_ERR = BIT(29), MSIX_HW_INT_CAUSES_REG_HAP = BIT(30), }; #define MSIX_MIN_INTERRUPT_VECTORS 2 #define MSIX_AUTO_CLEAR_CAUSE 0 #define MSIX_NON_AUTO_CLEAR_CAUSE BIT(7) /***************************************************************************** * HW address related registers * *****************************************************************************/ #define CSR_ADDR_BASE(trans) ((trans)->cfg->mac_addr_from_csr) #define CSR_MAC_ADDR0_OTP(trans) (CSR_ADDR_BASE(trans) + 0x00) #define CSR_MAC_ADDR1_OTP(trans) (CSR_ADDR_BASE(trans) + 0x04) #define CSR_MAC_ADDR0_STRAP(trans) (CSR_ADDR_BASE(trans) + 0x08) #define CSR_MAC_ADDR1_STRAP(trans) (CSR_ADDR_BASE(trans) + 0x0c) #endif /* !__iwl_csr_h__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-dbg-tlv.c b/sys/contrib/dev/iwlwifi/iwl-dbg-tlv.c index 3237d4b528b5..ef5baee6c9c5 100644 --- a/sys/contrib/dev/iwlwifi/iwl-dbg-tlv.c +++ b/sys/contrib/dev/iwlwifi/iwl-dbg-tlv.c @@ -1,1383 +1,1408 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include #include "iwl-drv.h" #include "iwl-trans.h" #include "iwl-dbg-tlv.h" #include "fw/dbg.h" #include "fw/runtime.h" /** * enum iwl_dbg_tlv_type - debug TLV types * @IWL_DBG_TLV_TYPE_DEBUG_INFO: debug info TLV * @IWL_DBG_TLV_TYPE_BUF_ALLOC: buffer allocation TLV * @IWL_DBG_TLV_TYPE_HCMD: host command TLV * @IWL_DBG_TLV_TYPE_REGION: region TLV * @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV * @IWL_DBG_TLV_TYPE_CONF_SET: conf set TLV * @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs */ enum iwl_dbg_tlv_type { IWL_DBG_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_TYPE_DEBUG_INFO - IWL_UCODE_TLV_DEBUG_BASE, IWL_DBG_TLV_TYPE_BUF_ALLOC, IWL_DBG_TLV_TYPE_HCMD, IWL_DBG_TLV_TYPE_REGION, IWL_DBG_TLV_TYPE_TRIGGER, IWL_DBG_TLV_TYPE_CONF_SET, IWL_DBG_TLV_TYPE_NUM, }; /** * struct iwl_dbg_tlv_ver_data - debug TLV version struct * @min_ver: min version supported * @max_ver: max version supported */ struct iwl_dbg_tlv_ver_data { int min_ver; int max_ver; }; /** * struct iwl_dbg_tlv_timer_node - timer node struct * @list: list of &struct iwl_dbg_tlv_timer_node * @timer: timer * @fwrt: &struct iwl_fw_runtime * @tlv: TLV attach to the timer node */ struct iwl_dbg_tlv_timer_node { struct list_head list; struct timer_list timer; struct iwl_fw_runtime *fwrt; struct iwl_ucode_tlv *tlv; }; static const struct iwl_dbg_tlv_ver_data dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = { [IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,}, [IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,}, [IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,}, [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 3,}, [IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,}, [IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,}, }; static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv, struct list_head *list) { u32 len = le32_to_cpu(tlv->length); struct iwl_dbg_tlv_node *node; node = kzalloc(sizeof(*node) + len, GFP_KERNEL); if (!node) return -ENOMEM; memcpy(&node->tlv, tlv, sizeof(node->tlv)); memcpy(node->tlv.data, tlv->data, len); list_add_tail(&node->list, list); return 0; } static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv) { const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0]; u32 type = le32_to_cpu(tlv->type); u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE; u32 ver = le32_to_cpu(hdr->version); if (ver < dbg_ver_table[tlv_idx].min_ver || ver > dbg_ver_table[tlv_idx].max_ver) return false; return true; } static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv) { const struct iwl_fw_ini_debug_info_tlv *debug_info = (const void *)tlv->data; if (le32_to_cpu(tlv->length) != sizeof(*debug_info)) return -EINVAL; IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n", debug_info->debug_cfg_name); return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list); } static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv) { const struct iwl_fw_ini_allocation_tlv *alloc = (const void *)tlv->data; u32 buf_location; u32 alloc_id; if (le32_to_cpu(tlv->length) != sizeof(*alloc)) return -EINVAL; buf_location = le32_to_cpu(alloc->buf_location); alloc_id = le32_to_cpu(alloc->alloc_id); if (buf_location == IWL_FW_INI_LOCATION_INVALID || buf_location >= IWL_FW_INI_LOCATION_NUM) goto err; if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID || alloc_id >= IWL_FW_INI_ALLOCATION_NUM) goto err; if (buf_location == IWL_FW_INI_LOCATION_NPK_PATH && alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) goto err; if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH && alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) goto err; + if (buf_location == IWL_FW_INI_LOCATION_DRAM_PATH && + alloc->req_size == 0) { + IWL_ERR(trans, "WRT: Invalid DRAM buffer allocation requested size (0)\n"); + return -EINVAL; + } + trans->dbg.fw_mon_cfg[alloc_id] = *alloc; return 0; err: IWL_ERR(trans, "WRT: Invalid allocation id %u and/or location id %u for allocation TLV\n", alloc_id, buf_location); return -EINVAL; } static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv) { const struct iwl_fw_ini_hcmd_tlv *hcmd = (const void *)tlv->data; u32 tp = le32_to_cpu(hcmd->time_point); if (le32_to_cpu(tlv->length) <= sizeof(*hcmd)) return -EINVAL; /* Host commands can not be sent in early time point since the FW * is not ready */ if (tp == IWL_FW_INI_TIME_POINT_INVALID || tp >= IWL_FW_INI_TIME_POINT_NUM || tp == IWL_FW_INI_TIME_POINT_EARLY) { IWL_ERR(trans, "WRT: Invalid time point %u for host command TLV\n", tp); return -EINVAL; } return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list); } static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv) { const struct iwl_fw_ini_region_tlv *reg = (const void *)tlv->data; struct iwl_ucode_tlv **active_reg; u32 id = le32_to_cpu(reg->id); u8 type = reg->type; u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length); /* * The higher part of the ID from version 2 is debug policy. * The id will be only lsb 16 bits, so mask it out. */ if (le32_to_cpu(reg->hdr.version) >= 2) id &= IWL_FW_INI_REGION_ID_MASK; if (le32_to_cpu(tlv->length) < sizeof(*reg)) return -EINVAL; /* for safe use of a string from FW, limit it to IWL_FW_INI_MAX_NAME */ IWL_DEBUG_FW(trans, "WRT: parsing region: %.*s\n", IWL_FW_INI_MAX_NAME, reg->name); if (id >= IWL_FW_INI_MAX_REGION_ID) { IWL_ERR(trans, "WRT: Invalid region id %u\n", id); return -EINVAL; } if (type <= IWL_FW_INI_REGION_INVALID || type >= IWL_FW_INI_REGION_NUM) { IWL_ERR(trans, "WRT: Invalid region type %u\n", type); return -EINVAL; } if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG && !trans->ops->read_config32) { IWL_ERR(trans, "WRT: Unsupported region type %u\n", type); return -EOPNOTSUPP; } if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) { trans->dbg.imr_data.sram_addr = le32_to_cpu(reg->internal_buffer.base_addr); trans->dbg.imr_data.sram_size = le32_to_cpu(reg->internal_buffer.size); } active_reg = &trans->dbg.active_regions[id]; if (*active_reg) { IWL_WARN(trans, "WRT: Overriding region id %u\n", id); kfree(*active_reg); } *active_reg = kmemdup(tlv, tlv_len, GFP_KERNEL); if (!*active_reg) return -ENOMEM; IWL_DEBUG_FW(trans, "WRT: Enabling region id %u type %u\n", id, type); return 0; } static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv) { const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data; struct iwl_fw_ini_trigger_tlv *dup_trig; u32 tp = le32_to_cpu(trig->time_point); u32 rf = le32_to_cpu(trig->reset_fw); struct iwl_ucode_tlv *dup = NULL; int ret; if (le32_to_cpu(tlv->length) < sizeof(*trig)) return -EINVAL; if (tp <= IWL_FW_INI_TIME_POINT_INVALID || tp >= IWL_FW_INI_TIME_POINT_NUM) { IWL_ERR(trans, "WRT: Invalid time point %u for trigger TLV\n", tp); return -EINVAL; } IWL_DEBUG_FW(trans, "WRT: time point %u for trigger TLV with reset_fw %u\n", tp, rf); trans->dbg.last_tp_resetfw = 0xFF; if (!le32_to_cpu(trig->occurrences)) { dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length), GFP_KERNEL); if (!dup) return -ENOMEM; dup_trig = (void *)dup->data; dup_trig->occurrences = cpu_to_le32(-1); tlv = dup; } ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list); kfree(dup); return ret; } static int iwl_dbg_tlv_config_set(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv) { const struct iwl_fw_ini_conf_set_tlv *conf_set = (const void *)tlv->data; u32 tp = le32_to_cpu(conf_set->time_point); u32 type = le32_to_cpu(conf_set->set_type); if (tp <= IWL_FW_INI_TIME_POINT_INVALID || tp >= IWL_FW_INI_TIME_POINT_NUM) { IWL_DEBUG_FW(trans, "WRT: Invalid time point %u for config set TLV\n", tp); return -EINVAL; } if (type <= IWL_FW_INI_CONFIG_SET_TYPE_INVALID || type >= IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM) { IWL_DEBUG_FW(trans, "WRT: Invalid config set type %u for config set TLV\n", type); return -EINVAL; } return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list); } static int (*dbg_tlv_alloc[])(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv) = { [IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info, [IWL_DBG_TLV_TYPE_BUF_ALLOC] = iwl_dbg_tlv_alloc_buf_alloc, [IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd, [IWL_DBG_TLV_TYPE_REGION] = iwl_dbg_tlv_alloc_region, [IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger, [IWL_DBG_TLV_TYPE_CONF_SET] = iwl_dbg_tlv_config_set, }; void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv, bool ext) { enum iwl_ini_cfg_state *cfg_state = ext ? &trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg; const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0]; u32 type; u32 tlv_idx; u32 domain; int ret; if (le32_to_cpu(tlv->length) < sizeof(*hdr)) return; type = le32_to_cpu(tlv->type); tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE; domain = le32_to_cpu(hdr->domain); if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON && !(domain & trans->dbg.domains_bitmap)) { IWL_DEBUG_FW(trans, "WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n", domain, trans->dbg.domains_bitmap); return; } if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) { IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type); goto out_err; } if (!iwl_dbg_tlv_ver_support(tlv)) { IWL_ERR(trans, "WRT: Unsupported TLV 0x%x version %u\n", type, le32_to_cpu(hdr->version)); goto out_err; } ret = dbg_tlv_alloc[tlv_idx](trans, tlv); if (ret) { - IWL_ERR(trans, - "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n", - type, ret, ext); + IWL_WARN(trans, + "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n", + type, ret, ext); goto out_err; } if (*cfg_state == IWL_INI_CFG_STATE_NOT_LOADED) *cfg_state = IWL_INI_CFG_STATE_LOADED; return; out_err: *cfg_state = IWL_INI_CFG_STATE_CORRUPTED; } void iwl_dbg_tlv_del_timers(struct iwl_trans *trans) { struct list_head *timer_list = &trans->dbg.periodic_trig_list; struct iwl_dbg_tlv_timer_node *node, *tmp; list_for_each_entry_safe(node, tmp, timer_list, list) { - del_timer_sync(&node->timer); + timer_shutdown_sync(&node->timer); list_del(&node->list); kfree(node); } } IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers); static void iwl_dbg_tlv_fragments_free(struct iwl_trans *trans, enum iwl_fw_ini_allocation_id alloc_id) { struct iwl_fw_mon *fw_mon; int i; if (alloc_id <= IWL_FW_INI_ALLOCATION_INVALID || alloc_id >= IWL_FW_INI_ALLOCATION_NUM) return; fw_mon = &trans->dbg.fw_mon_ini[alloc_id]; for (i = 0; i < fw_mon->num_frags; i++) { struct iwl_dram_data *frag = &fw_mon->frags[i]; dma_free_coherent(trans->dev, frag->size, frag->block, frag->physical); frag->physical = 0; frag->block = NULL; frag->size = 0; } kfree(fw_mon->frags); fw_mon->frags = NULL; fw_mon->num_frags = 0; } void iwl_dbg_tlv_free(struct iwl_trans *trans) { struct iwl_dbg_tlv_node *tlv_node, *tlv_node_tmp; int i; iwl_dbg_tlv_del_timers(trans); for (i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) { struct iwl_ucode_tlv **active_reg = &trans->dbg.active_regions[i]; kfree(*active_reg); *active_reg = NULL; } list_for_each_entry_safe(tlv_node, tlv_node_tmp, &trans->dbg.debug_info_tlv_list, list) { list_del(&tlv_node->list); kfree(tlv_node); } for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) { struct iwl_dbg_tlv_time_point_data *tp = &trans->dbg.time_point[i]; list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->trig_list, list) { list_del(&tlv_node->list); kfree(tlv_node); } list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->hcmd_list, list) { list_del(&tlv_node->list); kfree(tlv_node); } list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->active_trig_list, list) { list_del(&tlv_node->list); kfree(tlv_node); } list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->config_list, list) { list_del(&tlv_node->list); kfree(tlv_node); } } for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++) iwl_dbg_tlv_fragments_free(trans, i); } static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data, size_t len) { const struct iwl_ucode_tlv *tlv; u32 tlv_len; while (len >= sizeof(*tlv)) { len -= sizeof(*tlv); tlv = (const void *)data; tlv_len = le32_to_cpu(tlv->length); if (len < tlv_len) { IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len); return -EINVAL; } len -= ALIGN(tlv_len, 4); data += sizeof(*tlv) + ALIGN(tlv_len, 4); iwl_dbg_tlv_alloc(trans, tlv, true); } return 0; } void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans) { const struct firmware *fw; const char *yoyo_bin = "iwl-debug-yoyo.bin"; int res; if (!iwlwifi_mod_params.enable_ini || trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) return; res = firmware_request_nowarn(&fw, yoyo_bin, dev); IWL_DEBUG_FW(trans, "%s %s\n", res ? "didn't load" : "loaded", yoyo_bin); if (res) return; iwl_dbg_tlv_parse_bin(trans, fw->data, fw->size); release_firmware(fw); } void iwl_dbg_tlv_init(struct iwl_trans *trans) { int i; INIT_LIST_HEAD(&trans->dbg.debug_info_tlv_list); INIT_LIST_HEAD(&trans->dbg.periodic_trig_list); for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) { struct iwl_dbg_tlv_time_point_data *tp = &trans->dbg.time_point[i]; INIT_LIST_HEAD(&tp->trig_list); INIT_LIST_HEAD(&tp->hcmd_list); INIT_LIST_HEAD(&tp->active_trig_list); INIT_LIST_HEAD(&tp->config_list); } } static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt, struct iwl_dram_data *frag, u32 pages) { void *block = NULL; dma_addr_t physical; if (!frag || frag->size || !pages) return -EIO; /* * We try to allocate as many pages as we can, starting with * the requested amount and going down until we can allocate * something. Because of DIV_ROUND_UP(), pages will never go * down to 0 and stop the loop, so stop when pages reaches 1, * which is too small anyway. */ while (pages > 1) { block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE, &physical, GFP_KERNEL | __GFP_NOWARN); if (block) break; IWL_WARN(fwrt, "WRT: Failed to allocate fragment size %lu\n", pages * PAGE_SIZE); pages = DIV_ROUND_UP(pages, 2); } if (!block) return -ENOMEM; frag->physical = physical; frag->block = block; frag->size = pages * PAGE_SIZE; return pages; } static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt, enum iwl_fw_ini_allocation_id alloc_id) { struct iwl_fw_mon *fw_mon; struct iwl_fw_ini_allocation_tlv *fw_mon_cfg; u32 num_frags, remain_pages, frag_pages; int i; if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID || alloc_id >= IWL_FW_INI_ALLOCATION_NUM) return -EIO; fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id]; fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id]; - if (fw_mon->num_frags || - fw_mon_cfg->buf_location != + if (fw_mon->num_frags) { + for (i = 0; i < fw_mon->num_frags; i++) + memset(fw_mon->frags[i].block, 0, + fw_mon->frags[i].size); + return 0; + } + + if (fw_mon_cfg->buf_location != cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH)) return 0; num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num); if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) return -EIO; num_frags = 1; + } else if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ && + alloc_id > IWL_FW_INI_ALLOCATION_ID_DBGC3) { + return -EIO; } remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size), PAGE_SIZE); num_frags = min_t(u32, num_frags, BUF_ALLOC_MAX_NUM_FRAGS); num_frags = min_t(u32, num_frags, remain_pages); frag_pages = DIV_ROUND_UP(remain_pages, num_frags); fw_mon->frags = kcalloc(num_frags, sizeof(*fw_mon->frags), GFP_KERNEL); if (!fw_mon->frags) return -ENOMEM; for (i = 0; i < num_frags; i++) { int pages = min_t(u32, frag_pages, remain_pages); IWL_DEBUG_FW(fwrt, "WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n", alloc_id, i, pages * PAGE_SIZE); pages = iwl_dbg_tlv_alloc_fragment(fwrt, &fw_mon->frags[i], pages); if (pages < 0) { u32 alloc_size = le32_to_cpu(fw_mon_cfg->req_size) - (remain_pages * PAGE_SIZE); if (alloc_size < le32_to_cpu(fw_mon_cfg->min_size)) { iwl_dbg_tlv_fragments_free(fwrt->trans, alloc_id); return pages; } break; } remain_pages -= pages; fw_mon->num_frags++; } return 0; } static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt, enum iwl_fw_ini_allocation_id alloc_id) { struct iwl_fw_mon *fw_mon; u32 remain_frags, num_commands; int i, fw_mon_idx = 0; if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) return 0; if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID || alloc_id >= IWL_FW_INI_ALLOCATION_NUM) return -EIO; if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) != IWL_FW_INI_LOCATION_DRAM_PATH) return 0; fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id]; /* the first fragment of DBGC1 is given to the FW via register * or context info */ if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1) fw_mon_idx++; remain_frags = fw_mon->num_frags - fw_mon_idx; if (!remain_frags) return 0; num_commands = DIV_ROUND_UP(remain_frags, BUF_ALLOC_MAX_NUM_FRAGS); IWL_DEBUG_FW(fwrt, "WRT: Applying DRAM destination (alloc_id=%u)\n", alloc_id); for (i = 0; i < num_commands; i++) { u32 num_frags = min_t(u32, remain_frags, BUF_ALLOC_MAX_NUM_FRAGS); struct iwl_buf_alloc_cmd data = { .alloc_id = cpu_to_le32(alloc_id), .num_frags = cpu_to_le32(num_frags), .buf_location = cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH), }; struct iwl_host_cmd hcmd = { .id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION), .data[0] = &data, .len[0] = sizeof(data), .flags = CMD_SEND_IN_RFKILL, }; int ret, j; for (j = 0; j < num_frags; j++) { struct iwl_buf_alloc_frag *frag = &data.frags[j]; struct iwl_dram_data *fw_mon_frag = &fw_mon->frags[fw_mon_idx++]; frag->addr = cpu_to_le64(fw_mon_frag->physical); frag->size = cpu_to_le32(fw_mon_frag->size); } ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); if (ret) return ret; remain_frags -= num_frags; } return 0; } static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt) { int ret, i; if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT)) return; for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) { ret = iwl_dbg_tlv_apply_buffer(fwrt, i); if (ret) IWL_WARN(fwrt, "WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n", i, ret); } } static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt, enum iwl_fw_ini_allocation_id alloc_id, struct iwl_dram_info *dram_info) { struct iwl_fw_mon *fw_mon; u32 remain_frags, num_frags; int j, fw_mon_idx = 0; struct iwl_buf_alloc_cmd *data; if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) != IWL_FW_INI_LOCATION_DRAM_PATH) { - IWL_DEBUG_FW(fwrt, "DRAM_PATH is not supported alloc_id %u\n", alloc_id); + IWL_DEBUG_FW(fwrt, "WRT: alloc_id %u location is not in DRAM_PATH\n", + alloc_id); return -1; } fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id]; /* the first fragment of DBGC1 is given to the FW via register * or context info */ if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1) fw_mon_idx++; remain_frags = fw_mon->num_frags - fw_mon_idx; if (!remain_frags) return -1; num_frags = min_t(u32, remain_frags, BUF_ALLOC_MAX_NUM_FRAGS); data = &dram_info->dram_frags[alloc_id - 1]; data->alloc_id = cpu_to_le32(alloc_id); data->num_frags = cpu_to_le32(num_frags); data->buf_location = cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH); IWL_DEBUG_FW(fwrt, "WRT: DRAM buffer details alloc_id=%u, num_frags=%u\n", cpu_to_le32(alloc_id), cpu_to_le32(num_frags)); for (j = 0; j < num_frags; j++) { struct iwl_buf_alloc_frag *frag = &data->frags[j]; struct iwl_dram_data *fw_mon_frag = &fw_mon->frags[fw_mon_idx++]; frag->addr = cpu_to_le64(fw_mon_frag->physical); frag->size = cpu_to_le32(fw_mon_frag->size); IWL_DEBUG_FW(fwrt, "WRT: DRAM fragment details\n"); IWL_DEBUG_FW(fwrt, "frag=%u, addr=0x%016llx, size=0x%x)\n", j, cpu_to_le64(fw_mon_frag->physical), cpu_to_le32(fw_mon_frag->size)); } return 0; } static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt) { int ret, i; bool dram_alloc = false; struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0]; struct iwl_dram_info *dram_info; if (!frags || !frags->block) return; dram_info = frags->block; if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT)) return; - dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD); - dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD); + memset(dram_info, 0, sizeof(*dram_info)); for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1; - i <= IWL_FW_INI_ALLOCATION_ID_DBGC3; i++) { + i < IWL_FW_INI_ALLOCATION_NUM; i++) { + if (fwrt->trans->dbg.fw_mon_cfg[i].buf_location == + IWL_FW_INI_LOCATION_INVALID) + continue; + ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info); if (!ret) dram_alloc = true; else - IWL_WARN(fwrt, + IWL_INFO(fwrt, "WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n", i, ret); } - if (dram_alloc) - IWL_DEBUG_FW(fwrt, "block data after %08x\n", - dram_info->first_word); - else - memset(frags->block, 0, sizeof(*dram_info)); + if (dram_alloc) { + dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD); + dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD); + } } static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt, struct list_head *hcmd_list) { struct iwl_dbg_tlv_node *node; list_for_each_entry(node, hcmd_list, list) { struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data; struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd; u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd); struct iwl_host_cmd cmd = { .id = WIDE_ID(hcmd_data->group, hcmd_data->id), .len = { hcmd_len, }, .data = { hcmd_data->data, }, }; iwl_trans_send_cmd(fwrt->trans, &cmd); } } static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt, struct list_head *conf_list) { struct iwl_dbg_tlv_node *node; list_for_each_entry(node, conf_list, list) { struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data; u32 count, address, value; u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8; u32 type = le32_to_cpu(config_list->set_type); u32 offset = le32_to_cpu(config_list->addr_offset); switch (type) { case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC: { if (!iwl_trans_grab_nic_access(fwrt->trans)) { IWL_DEBUG_FW(fwrt, "WRT: failed to get nic access\n"); IWL_DEBUG_FW(fwrt, "WRT: skipping MAC PERIPHERY config\n"); continue; } IWL_DEBUG_FW(fwrt, "WRT: MAC PERIPHERY config len: len %u\n", len); for (count = 0; count < len; count++) { address = le32_to_cpu(config_list->addr_val[count].address); value = le32_to_cpu(config_list->addr_val[count].value); iwl_trans_write_prph(fwrt->trans, address + offset, value); } iwl_trans_release_nic_access(fwrt->trans); break; } case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY: { for (count = 0; count < len; count++) { address = le32_to_cpu(config_list->addr_val[count].address); value = le32_to_cpu(config_list->addr_val[count].value); iwl_trans_write_mem32(fwrt->trans, address + offset, value); IWL_DEBUG_FW(fwrt, "WRT: DEV_MEM: count %u, add: %u val: %u\n", count, address, value); } break; } case IWL_FW_INI_CONFIG_SET_TYPE_CSR: { for (count = 0; count < len; count++) { address = le32_to_cpu(config_list->addr_val[count].address); value = le32_to_cpu(config_list->addr_val[count].value); iwl_write32(fwrt->trans, address + offset, value); IWL_DEBUG_FW(fwrt, "WRT: CSR: count %u, add: %u val: %u\n", count, address, value); } break; } case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: { struct iwl_dbgc1_info dram_info = {}; struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0]; __le64 dram_base_addr; __le32 dram_size; u64 dram_addr; u32 ret; if (!frags) break; dram_base_addr = cpu_to_le64(frags->physical); dram_size = cpu_to_le32(frags->size); dram_addr = le64_to_cpu(dram_base_addr); IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n", dram_base_addr, dram_size); IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n", le32_to_cpu(config_list->addr_offset)); for (count = 0; count < len; count++) { address = le32_to_cpu(config_list->addr_val[count].address); dram_info.dbgc1_add_lsb = cpu_to_le32((dram_addr & 0x00000000FFFFFFFFULL) + 0x400); dram_info.dbgc1_add_msb = cpu_to_le32((dram_addr & 0xFFFFFFFF00000000ULL) >> 32); dram_info.dbgc1_size = cpu_to_le32(le32_to_cpu(dram_size) - 0x400); ret = iwl_trans_write_mem(fwrt->trans, address + offset, &dram_info, 4); if (ret) { IWL_ERR(fwrt, "Failed to write dram_info to HW_SMEM\n"); break; } } break; } case IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: { u32 debug_token_config = le32_to_cpu(config_list->addr_val[0].value); IWL_DEBUG_FW(fwrt, "WRT: Setting HWM debug token config: %u\n", debug_token_config); fwrt->trans->dbg.ucode_preset = debug_token_config; break; } default: break; } } } static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t) { struct iwl_dbg_tlv_timer_node *timer_node = from_timer(timer_node, t, timer); struct iwl_fwrt_dump_data dump_data = { .trig = (void *)timer_node->tlv->data, }; int ret; ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data, false); if (!ret || ret == -EBUSY) { u32 occur = le32_to_cpu(dump_data.trig->occurrences); u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]); if (!occur) return; mod_timer(t, jiffies + msecs_to_jiffies(collect_interval)); } } static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt) { struct iwl_dbg_tlv_node *node; struct list_head *trig_list = &fwrt->trans->dbg.time_point[IWL_FW_INI_TIME_POINT_PERIODIC].active_trig_list; list_for_each_entry(node, trig_list, list) { struct iwl_fw_ini_trigger_tlv *trig = (void *)node->tlv.data; struct iwl_dbg_tlv_timer_node *timer_node; u32 occur = le32_to_cpu(trig->occurrences), collect_interval; u32 min_interval = 100; if (!occur) continue; /* make sure there is at least one dword of data for the * interval value */ if (le32_to_cpu(node->tlv.length) < sizeof(*trig) + sizeof(__le32)) { IWL_ERR(fwrt, "WRT: Invalid periodic trigger data was not given\n"); continue; } if (le32_to_cpu(trig->data[0]) < min_interval) { IWL_WARN(fwrt, "WRT: Override min interval from %u to %u msec\n", le32_to_cpu(trig->data[0]), min_interval); trig->data[0] = cpu_to_le32(min_interval); } collect_interval = le32_to_cpu(trig->data[0]); timer_node = kzalloc(sizeof(*timer_node), GFP_KERNEL); if (!timer_node) { IWL_ERR(fwrt, "WRT: Failed to allocate periodic trigger\n"); continue; } timer_node->fwrt = fwrt; timer_node->tlv = &node->tlv; timer_setup(&timer_node->timer, iwl_dbg_tlv_periodic_trig_handler, 0); list_add_tail(&timer_node->list, &fwrt->trans->dbg.periodic_trig_list); IWL_DEBUG_FW(fwrt, "WRT: Enabling periodic trigger\n"); mod_timer(&timer_node->timer, jiffies + msecs_to_jiffies(collect_interval)); } } static bool is_trig_data_contained(const struct iwl_ucode_tlv *new, const struct iwl_ucode_tlv *old) { const struct iwl_fw_ini_trigger_tlv *new_trig = (const void *)new->data; const struct iwl_fw_ini_trigger_tlv *old_trig = (const void *)old->data; const __le32 *new_data = new_trig->data, *old_data = old_trig->data; u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data); u32 old_dwords_num = iwl_tlv_array_len(old, old_trig, data); int i, j; for (i = 0; i < new_dwords_num; i++) { bool match = false; for (j = 0; j < old_dwords_num; j++) { if (new_data[i] == old_data[j]) { match = true; break; } } if (!match) return false; } return true; } static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt, struct iwl_ucode_tlv *trig_tlv, struct iwl_dbg_tlv_node *node) { struct iwl_ucode_tlv *node_tlv = &node->tlv; struct iwl_fw_ini_trigger_tlv *node_trig = (void *)node_tlv->data; struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data; u32 policy = le32_to_cpu(trig->apply_policy); u32 size = le32_to_cpu(trig_tlv->length); u32 trig_data_len = size - sizeof(*trig); u32 offset = 0; if (!(policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA)) { u32 data_len = le32_to_cpu(node_tlv->length) - sizeof(*node_trig); IWL_DEBUG_FW(fwrt, "WRT: Appending trigger data (time point %u)\n", le32_to_cpu(trig->time_point)); offset += data_len; size += data_len; } else { IWL_DEBUG_FW(fwrt, "WRT: Overriding trigger data (time point %u)\n", le32_to_cpu(trig->time_point)); } if (size != le32_to_cpu(node_tlv->length)) { struct list_head *prev = node->list.prev; struct iwl_dbg_tlv_node *tmp; list_del(&node->list); tmp = krealloc(node, sizeof(*node) + size, GFP_KERNEL); if (!tmp) { IWL_WARN(fwrt, "WRT: No memory to override trigger (time point %u)\n", le32_to_cpu(trig->time_point)); list_add(&node->list, prev); return -ENOMEM; } list_add(&tmp->list, prev); node_tlv = &tmp->tlv; node_trig = (void *)node_tlv->data; } memcpy(node_trig->data + offset, trig->data, trig_data_len); node_tlv->length = cpu_to_le32(size); if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) { IWL_DEBUG_FW(fwrt, "WRT: Overriding trigger configuration (time point %u)\n", le32_to_cpu(trig->time_point)); /* the first 11 dwords are configuration related */ memcpy(node_trig, trig, sizeof(__le32) * 11); } if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS) { IWL_DEBUG_FW(fwrt, "WRT: Overriding trigger regions (time point %u)\n", le32_to_cpu(trig->time_point)); node_trig->regions_mask = trig->regions_mask; } else { IWL_DEBUG_FW(fwrt, "WRT: Appending trigger regions (time point %u)\n", le32_to_cpu(trig->time_point)); node_trig->regions_mask |= trig->regions_mask; } return 0; } static int iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt, struct list_head *trig_list, struct iwl_ucode_tlv *trig_tlv) { struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data; struct iwl_dbg_tlv_node *node, *match = NULL; u32 policy = le32_to_cpu(trig->apply_policy); list_for_each_entry(node, trig_list, list) { if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT)) break; if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_DATA) || is_trig_data_contained(trig_tlv, &node->tlv)) { match = node; break; } } if (!match) { IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n", le32_to_cpu(trig->time_point)); return iwl_dbg_tlv_add(trig_tlv, trig_list); } return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match); } static void iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt, struct iwl_dbg_tlv_time_point_data *tp) { struct iwl_dbg_tlv_node *node; struct list_head *trig_list = &tp->trig_list; struct list_head *active_trig_list = &tp->active_trig_list; list_for_each_entry(node, trig_list, list) { struct iwl_ucode_tlv *tlv = &node->tlv; iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv); } } static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data, union iwl_dbg_tlv_tp_data *tp_data, u32 trig_data) { struct iwl_rx_packet *pkt = tp_data->fw_pkt; struct iwl_cmd_header *wanted_hdr = (void *)&trig_data; if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd && pkt->hdr.group_id == wanted_hdr->group_id)) { struct iwl_rx_packet *fw_pkt = kmemdup(pkt, sizeof(*pkt) + iwl_rx_packet_payload_len(pkt), GFP_ATOMIC); if (!fw_pkt) return false; dump_data->fw_pkt = fw_pkt; return true; } return false; } static int iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, struct list_head *active_trig_list, union iwl_dbg_tlv_tp_data *tp_data, bool (*data_check)(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data, union iwl_dbg_tlv_tp_data *tp_data, u32 trig_data)) { struct iwl_dbg_tlv_node *node; list_for_each_entry(node, active_trig_list, list) { struct iwl_fwrt_dump_data dump_data = { .trig = (void *)node->tlv.data, }; u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig, data); int ret, i; u32 tp = le32_to_cpu(dump_data.trig->time_point); if (!num_data) { ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync); if (ret) return ret; } for (i = 0; i < num_data; i++) { if (!data_check || data_check(fwrt, &dump_data, tp_data, le32_to_cpu(dump_data.trig->data[i]))) { ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync); if (ret) return ret; break; } } fwrt->trans->dbg.restart_required = FALSE; - IWL_DEBUG_INFO(fwrt, "WRT: tp %d, reset_fw %d\n", - tp, dump_data.trig->reset_fw); - IWL_DEBUG_INFO(fwrt, "WRT: restart_required %d, last_tp_resetfw %d\n", - fwrt->trans->dbg.restart_required, - fwrt->trans->dbg.last_tp_resetfw); + IWL_DEBUG_FW(fwrt, "WRT: tp %d, reset_fw %d\n", + tp, dump_data.trig->reset_fw); + IWL_DEBUG_FW(fwrt, + "WRT: restart_required %d, last_tp_resetfw %d\n", + fwrt->trans->dbg.restart_required, + fwrt->trans->dbg.last_tp_resetfw); if (fwrt->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000) { fwrt->trans->dbg.restart_required = TRUE; } else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT && fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) { fwrt->trans->dbg.restart_required = FALSE; fwrt->trans->dbg.last_tp_resetfw = 0xFF; IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n"); } else if (le32_to_cpu(dump_data.trig->reset_fw) == IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) { - IWL_DEBUG_INFO(fwrt, "WRT: stop and reload firmware\n"); + IWL_DEBUG_FW(fwrt, "WRT: stop and reload firmware\n"); fwrt->trans->dbg.restart_required = TRUE; } else if (le32_to_cpu(dump_data.trig->reset_fw) == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) { - IWL_DEBUG_INFO(fwrt, "WRT: stop only and no reload firmware\n"); + IWL_DEBUG_FW(fwrt, + "WRT: stop only and no reload firmware\n"); fwrt->trans->dbg.restart_required = FALSE; fwrt->trans->dbg.last_tp_resetfw = le32_to_cpu(dump_data.trig->reset_fw); } else if (le32_to_cpu(dump_data.trig->reset_fw) == IWL_FW_INI_RESET_FW_MODE_NOTHING) { - IWL_DEBUG_INFO(fwrt, - "WRT: nothing need to be done after debug collection\n"); + IWL_DEBUG_FW(fwrt, + "WRT: nothing need to be done after debug collection\n"); } else { IWL_ERR(fwrt, "WRT: wrong resetfw %d\n", le32_to_cpu(dump_data.trig->reset_fw)); } } return 0; } static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt) { enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest; int ret, i; u32 failed_alloc = 0; - if (*ini_dest != IWL_FW_INI_LOCATION_INVALID) - return; - - IWL_DEBUG_FW(fwrt, - "WRT: Generating active triggers list, domain 0x%x\n", - fwrt->trans->dbg.domains_bitmap); + if (*ini_dest == IWL_FW_INI_LOCATION_INVALID) { + IWL_DEBUG_FW(fwrt, + "WRT: Generating active triggers list, domain 0x%x\n", + fwrt->trans->dbg.domains_bitmap); - for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) { - struct iwl_dbg_tlv_time_point_data *tp = - &fwrt->trans->dbg.time_point[i]; + for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) { + struct iwl_dbg_tlv_time_point_data *tp = + &fwrt->trans->dbg.time_point[i]; - iwl_dbg_tlv_gen_active_trig_list(fwrt, tp); + iwl_dbg_tlv_gen_active_trig_list(fwrt, tp); + } + } else if (*ini_dest != IWL_FW_INI_LOCATION_DRAM_PATH) { + /* For DRAM, go through the loop below to clear all the buffers + * properly on restart, otherwise garbage may be left there and + * leak into new debug dumps. + */ + return; } *ini_dest = IWL_FW_INI_LOCATION_INVALID; for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) { struct iwl_fw_ini_allocation_tlv *fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[i]; u32 dest = le32_to_cpu(fw_mon_cfg->buf_location); if (dest == IWL_FW_INI_LOCATION_INVALID) { failed_alloc |= BIT(i); continue; } if (*ini_dest == IWL_FW_INI_LOCATION_INVALID) *ini_dest = dest; if (dest != *ini_dest) continue; ret = iwl_dbg_tlv_alloc_fragments(fwrt, i); if (ret) { IWL_WARN(fwrt, "WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n", i, ret); failed_alloc |= BIT(i); } } if (!failed_alloc) return; for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions) && failed_alloc; i++) { struct iwl_fw_ini_region_tlv *reg; struct iwl_ucode_tlv **active_reg = &fwrt->trans->dbg.active_regions[i]; u32 reg_type; if (!*active_reg) { fwrt->trans->dbg.unsupported_region_msk |= BIT(i); continue; } reg = (void *)(*active_reg)->data; reg_type = reg->type; if (reg_type != IWL_FW_INI_REGION_DRAM_BUFFER || !(BIT(le32_to_cpu(reg->dram_alloc_id)) & failed_alloc)) continue; IWL_DEBUG_FW(fwrt, "WRT: removing allocation id %d from region id %d\n", le32_to_cpu(reg->dram_alloc_id), i); - failed_alloc &= ~le32_to_cpu(reg->dram_alloc_id); + failed_alloc &= ~BIT(le32_to_cpu(reg->dram_alloc_id)); fwrt->trans->dbg.unsupported_region_msk |= BIT(i); kfree(*active_reg); *active_reg = NULL; } } void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, enum iwl_fw_ini_time_point tp_id, union iwl_dbg_tlv_tp_data *tp_data, bool sync) { struct list_head *hcmd_list, *trig_list, *conf_list; if (!iwl_trans_dbg_ini_valid(fwrt->trans) || tp_id == IWL_FW_INI_TIME_POINT_INVALID || tp_id >= IWL_FW_INI_TIME_POINT_NUM) return; hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list; trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list; conf_list = &fwrt->trans->dbg.time_point[tp_id].config_list; switch (tp_id) { case IWL_FW_INI_TIME_POINT_EARLY: iwl_dbg_tlv_init_cfg(fwrt); iwl_dbg_tlv_apply_config(fwrt, conf_list); iwl_dbg_tlv_update_drams(fwrt); iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); break; case IWL_FW_INI_TIME_POINT_AFTER_ALIVE: iwl_dbg_tlv_apply_buffers(fwrt); iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); iwl_dbg_tlv_apply_config(fwrt, conf_list); iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); break; case IWL_FW_INI_TIME_POINT_PERIODIC: iwl_dbg_tlv_set_periodic_trigs(fwrt); iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); break; case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF: case IWL_FW_INI_TIME_POINT_MISSED_BEACONS: case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION: iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); iwl_dbg_tlv_apply_config(fwrt, conf_list); iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, iwl_dbg_tlv_check_fw_pkt); break; default: iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); iwl_dbg_tlv_apply_config(fwrt, conf_list); iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); break; } } IWL_EXPORT_SYMBOL(_iwl_dbg_tlv_time_point); diff --git a/sys/contrib/dev/iwlwifi/iwl-debug.c b/sys/contrib/dev/iwlwifi/iwl-debug.c index 010ac7b4294f..ac61b464e51d 100644 --- a/sys/contrib/dev/iwlwifi/iwl-debug.c +++ b/sys/contrib/dev/iwlwifi/iwl-debug.c @@ -1,169 +1,170 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2011, 2021 Intel Corporation + * Copyright (C) 2005-2011, 2021-2022 Intel Corporation */ #include #include #include #if defined(CONFIG_IWLWIFI_DEBUG) #include #endif #include "iwl-drv.h" #include "iwl-debug.h" #if defined(__FreeBSD__) #include "iwl-modparams.h" #endif #include "iwl-devtrace.h" #if defined(__FreeBSD__) #if defined(CONFIG_IWLWIFI_DEBUG) #include /* hexdump(9) */ #include #endif #endif #if defined(__linux__) #define __iwl_fn(fn) \ void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \ { \ struct va_format vaf = { \ .fmt = fmt, \ }; \ va_list args; \ \ va_start(args, fmt); \ vaf.va = &args; \ dev_ ##fn(dev, "%pV", &vaf); \ trace_iwlwifi_ ##fn(&vaf); \ va_end(args); \ } #elif defined(__FreeBSD__) #define __iwl_fn(fn) \ void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \ { \ struct va_format vaf = { \ .fmt = fmt, \ }; \ va_list args; \ char *str; \ \ va_start(args, fmt); \ vaf.va = &args; \ vasprintf(&str, M_KMALLOC, vaf.fmt, args); \ dev_ ##fn(dev, "%s", str); \ trace_iwlwifi_ ##fn(&vaf); \ free(str, M_KMALLOC); \ va_end(args); \ } #endif __iwl_fn(warn) IWL_EXPORT_SYMBOL(__iwl_warn); __iwl_fn(info) IWL_EXPORT_SYMBOL(__iwl_info); __iwl_fn(crit) IWL_EXPORT_SYMBOL(__iwl_crit); void __iwl_err(struct device *dev, enum iwl_err_mode mode, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args, args2; va_start(args, fmt); switch (mode) { case IWL_ERR_MODE_RATELIMIT: if (net_ratelimit()) break; fallthrough; case IWL_ERR_MODE_REGULAR: case IWL_ERR_MODE_RFKILL: va_copy(args2, args); vaf.va = &args2; #if defined(__linux_) if (mode == IWL_ERR_MODE_RFKILL) dev_err(dev, "(RFKILL) %pV", &vaf); else dev_err(dev, "%pV", &vaf); #elif defined(__FreeBSD__) char *str; vasprintf(&str, M_KMALLOC, vaf.fmt, args2); dev_err(dev, "%s%s", (mode == IWL_ERR_MODE_RFKILL) ? "(RFKILL)" : "", str); free(str, M_KMALLOC); #endif va_end(args2); break; default: break; } + vaf.va = &args; trace_iwlwifi_err(&vaf); va_end(args); } IWL_EXPORT_SYMBOL(__iwl_err); #if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) #ifdef CONFIG_IWLWIFI_DEBUG bool iwl_have_debug_level(enum iwl_dl level) { return (iwlwifi_mod_params.debug_level & level || level == IWL_DL_ANY); } /* Passing the iwl_drv * in seems pointless. */ void iwl_print_hex_dump(void *drv __unused, enum iwl_dl level, #if defined(__linux__) const char *prefix, uint8_t *data, size_t len) #elif defined(__FreeBSD__) const char *prefix, const uint8_t *data, size_t len) #endif { /* Given we have a level, check for it. */ if (!iwl_have_debug_level(level)) return; #if defined(__linux_) /* XXX I am cluseless in my editor. pcie/trans.c to the rescue. */ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, data, len, 0); #elif defined(__FreeBSD__) hexdump(data, len, prefix, 0); #endif } #endif void __iwl_dbg(struct device *dev, u32 level, bool limit, const char *function, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; #ifdef CONFIG_IWLWIFI_DEBUG if (iwl_have_debug_level(level) && (!limit || net_ratelimit())) { #if defined(__linux_) dev_printk(KERN_DEBUG, dev, "%s %pV", function, &vaf); #elif defined(__FreeBSD__) char *str; vasprintf(&str, M_KMALLOC, vaf.fmt, args); dev_printk(KERN_DEBUG, dev, "%d %u %s %s", curthread->td_tid, (unsigned int)ticks, function, str); free(str, M_KMALLOC); #endif } #endif trace_iwlwifi_dbg(level, function, &vaf); va_end(args); } IWL_EXPORT_SYMBOL(__iwl_dbg); #endif diff --git a/sys/contrib/dev/iwlwifi/iwl-drv.c b/sys/contrib/dev/iwlwifi/iwl-drv.c index cecbacbe62c2..be89566b98ab 100644 --- a/sys/contrib/dev/iwlwifi/iwl-drv.c +++ b/sys/contrib/dev/iwlwifi/iwl-drv.c @@ -1,2027 +1,2104 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #if defined(__FreeBSD__) #define LINUXKPI_PARAM_PREFIX iwlwifi_ #endif #include #include #include #include #include #include "iwl-drv.h" #include "iwl-csr.h" #include "iwl-debug.h" #include "iwl-trans.h" #include "iwl-op-mode.h" #include "iwl-agn-hw.h" #include "fw/img.h" #include "iwl-dbg-tlv.h" #include "iwl-config.h" #include "iwl-modparams.h" #include "fw/api/alive.h" #include "fw/api/mac.h" /****************************************************************************** * * module boiler plate * ******************************************************************************/ #if defined(__linux__) #define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux" MODULE_LICENSE("GPL"); #elif defined(__FreeBSD__) #define DRV_DESCRIPTION "Intel(R) Wireless WiFi based driver for FreeBSD" MODULE_LICENSE("BSD"); MODULE_VERSION(if_iwlwifi, 1); MODULE_DEPEND(if_iwlwifi, linuxkpi, 1, 1, 1); MODULE_DEPEND(if_iwlwifi, linuxkpi_wlan, 1, 1, 1); #ifdef CONFIG_IWLWIFI_DEBUGFS MODULE_DEPEND(if_iwlwifi, lindebugfs, 1, 1, 1); #endif #endif MODULE_DESCRIPTION(DRV_DESCRIPTION); #ifdef CONFIG_IWLWIFI_DEBUGFS static struct dentry *iwl_dbgfs_root; #endif /** * struct iwl_drv - drv common data * @list: list of drv structures using this opmode * @fw: the iwl_fw structure * @op_mode: the running op_mode * @trans: transport layer * @dev: for debug prints only * @fw_index: firmware revision to try loading * @firmware_name: composite filename of ucode file to load * @request_firmware_complete: the firmware has been obtained from user space * @dbgfs_drv: debugfs root directory entry * @dbgfs_trans: debugfs transport directory entry * @dbgfs_op_mode: debugfs op_mode directory entry */ struct iwl_drv { struct list_head list; struct iwl_fw fw; struct iwl_op_mode *op_mode; struct iwl_trans *trans; struct device *dev; int fw_index; /* firmware we're trying to load */ char firmware_name[64]; /* name of firmware file to load */ struct completion request_firmware_complete; #if defined(__FreeBSD__) struct completion drv_start_complete; #endif #ifdef CONFIG_IWLWIFI_DEBUGFS struct dentry *dbgfs_drv; struct dentry *dbgfs_trans; struct dentry *dbgfs_op_mode; #endif }; enum { DVM_OP_MODE, MVM_OP_MODE, }; /* Protects the table contents, i.e. the ops pointer & drv list */ static DEFINE_MUTEX(iwlwifi_opmode_table_mtx); static struct iwlwifi_opmode_table { const char *name; /* name: iwldvm, iwlmvm, etc */ const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */ struct list_head drv; /* list of devices using this op_mode */ } iwlwifi_opmode_table[] = { /* ops set when driver is initialized */ [DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL }, [MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL }, }; #define IWL_DEFAULT_SCAN_CHANNELS 40 /* * struct fw_sec: Just for the image parsing process. * For the fw storage we are using struct fw_desc. */ struct fw_sec { const void *data; /* the sec data */ size_t size; /* section size */ u32 offset; /* offset of writing in the device */ }; static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc) { vfree(desc->data); desc->data = NULL; desc->len = 0; } static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img) { int i; for (i = 0; i < img->num_sec; i++) iwl_free_fw_desc(drv, &img->sec[i]); kfree(img->sec); } static void iwl_dealloc_ucode(struct iwl_drv *drv) { int i; kfree(drv->fw.dbg.dest_tlv); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) kfree(drv->fw.dbg.conf_tlv[i]); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) kfree(drv->fw.dbg.trigger_tlv[i]); kfree(drv->fw.dbg.mem_tlv); kfree(drv->fw.iml); kfree(drv->fw.ucode_capa.cmd_versions); kfree(drv->fw.phy_integration_ver); + kfree(drv->trans->dbg.pc_data); for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); /* clear the data for the aborted load case */ memset(&drv->fw, 0, sizeof(drv->fw)); } static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc, struct fw_sec *sec) { void *data; desc->data = NULL; if (!sec || !sec->size) return -EINVAL; data = vmalloc(sec->size); if (!data) return -ENOMEM; desc->len = sec->size; desc->offset = sec->offset; memcpy(data, sec->data, desc->len); desc->data = data; return 0; } +static inline char iwl_drv_get_step(int step) +{ + if (step == SILICON_Z_STEP) + return 'z'; + return 'a' + step; +} + +const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf) +{ + char mac_step, rf_step; + const char *rf, *cdb; + + if (trans->cfg->fw_name_pre) + return trans->cfg->fw_name_pre; + + if (WARN_ON(!trans->cfg->fw_name_mac)) + return "unconfigured"; + + mac_step = iwl_drv_get_step(trans->hw_rev_step); + + switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { + case IWL_CFG_RF_TYPE_HR1: + case IWL_CFG_RF_TYPE_HR2: + rf = "hr"; + break; + case IWL_CFG_RF_TYPE_GF: + rf = "gf"; + break; + case IWL_CFG_RF_TYPE_MR: + rf = "mr"; + break; + case IWL_CFG_RF_TYPE_MS: + rf = "ms"; + break; + case IWL_CFG_RF_TYPE_FM: + rf = "fm"; + break; + case IWL_CFG_RF_TYPE_WH: + rf = "wh"; + break; + default: + return "unknown-rf"; + } + + cdb = CSR_HW_RFID_IS_CDB(trans->hw_rf_id) ? "4" : ""; + + rf_step = iwl_drv_get_step(CSR_HW_RFID_STEP(trans->hw_rf_id)); + + scnprintf(buf, FW_NAME_PRE_BUFSIZE, + "iwlwifi-%s-%c0-%s%s-%c0", + trans->cfg->fw_name_mac, mac_step, + rf, cdb, rf_step); + + return buf; +} +IWL_EXPORT_SYMBOL(iwl_drv_get_fwname_pre); + static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context); static int iwl_request_firmware(struct iwl_drv *drv, bool first) { const struct iwl_cfg *cfg = drv->trans->cfg; - char tag[8]; + char _fw_name_pre[FW_NAME_PRE_BUFSIZE]; + const char *fw_name_pre; if (drv->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 && (drv->trans->hw_rev_step != SILICON_B_STEP && drv->trans->hw_rev_step != SILICON_C_STEP)) { IWL_ERR(drv, "Only HW steps B and C are currently supported (0x%0x)\n", drv->trans->hw_rev); return -EINVAL; } - if (first) { + fw_name_pre = iwl_drv_get_fwname_pre(drv->trans, _fw_name_pre); + + if (first) drv->fw_index = cfg->ucode_api_max; - sprintf(tag, "%d", drv->fw_index); - } else { + else drv->fw_index--; - sprintf(tag, "%d", drv->fw_index); - } if (drv->fw_index < cfg->ucode_api_min) { IWL_ERR(drv, "no suitable firmware found!\n"); if (cfg->ucode_api_min == cfg->ucode_api_max) { - IWL_ERR(drv, "%s%d is required\n", cfg->fw_name_pre, + IWL_ERR(drv, "%s-%d is required\n", fw_name_pre, cfg->ucode_api_max); } else { - IWL_ERR(drv, "minimum version required: %s%d\n", - cfg->fw_name_pre, cfg->ucode_api_min); - IWL_ERR(drv, "maximum version supported: %s%d\n", - cfg->fw_name_pre, cfg->ucode_api_max); + IWL_ERR(drv, "minimum version required: %s-%d\n", + fw_name_pre, cfg->ucode_api_min); + IWL_ERR(drv, "maximum version supported: %s-%d\n", + fw_name_pre, cfg->ucode_api_max); } IWL_ERR(drv, "check git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git\n"); return -ENOENT; } - snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode", - cfg->fw_name_pre, tag); + snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s-%d.ucode", + fw_name_pre, drv->fw_index); IWL_DEBUG_FW_INFO(drv, "attempting to load firmware '%s'\n", drv->firmware_name); return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name, drv->trans->dev, GFP_KERNEL, drv, iwl_req_fw_callback); } struct fw_img_parsing { struct fw_sec *sec; int sec_counter; }; /* * struct fw_sec_parsing: to extract fw section and it's offset from tlv */ struct fw_sec_parsing { __le32 offset; const u8 data[]; } __packed; /** * struct iwl_tlv_calib_data - parse the default calib data from TLV * * @ucode_type: the uCode to which the following default calib relates. * @calib: default calibrations. */ struct iwl_tlv_calib_data { __le32 ucode_type; struct iwl_tlv_calib_ctrl calib; } __packed; struct iwl_firmware_pieces { struct fw_img_parsing img[IWL_UCODE_TYPE_MAX]; u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr; u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr; /* FW debug data parsed for driver usage */ bool dbg_dest_tlv_init; const u8 *dbg_dest_ver; union { const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1; }; const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; const struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv; size_t n_mem_tlv; }; /* * These functions are just to extract uCode section data from the pieces * structure. */ static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec) { return &pieces->img[type].sec[sec]; } static void alloc_sec_data(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec) { struct fw_img_parsing *img = &pieces->img[type]; struct fw_sec *sec_memory; int size = sec + 1; size_t alloc_size = sizeof(*img->sec) * size; if (img->sec && img->sec_counter >= size) return; sec_memory = krealloc(img->sec, alloc_size, GFP_KERNEL); if (!sec_memory) return; img->sec = sec_memory; img->sec_counter = size; } static void set_sec_data(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec, const void *data) { alloc_sec_data(pieces, type, sec); pieces->img[type].sec[sec].data = data; } static void set_sec_size(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec, size_t size) { alloc_sec_data(pieces, type, sec); pieces->img[type].sec[sec].size = size; } static size_t get_sec_size(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec) { return pieces->img[type].sec[sec].size; } static void set_sec_offset(struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type, int sec, u32 offset) { alloc_sec_data(pieces, type, sec); pieces->img[type].sec[sec].offset = offset; } /* * Gets uCode section from tlv. */ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces, const void *data, enum iwl_ucode_type type, int size) { struct fw_img_parsing *img; struct fw_sec *sec; const struct fw_sec_parsing *sec_parse; size_t alloc_size; if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX)) return -1; sec_parse = (const struct fw_sec_parsing *)data; img = &pieces->img[type]; alloc_size = sizeof(*img->sec) * (img->sec_counter + 1); sec = krealloc(img->sec, alloc_size, GFP_KERNEL); if (!sec) return -ENOMEM; img->sec = sec; sec = &img->sec[img->sec_counter]; sec->offset = le32_to_cpu(sec_parse->offset); sec->data = sec_parse->data; sec->size = size - sizeof(sec_parse->offset); ++img->sec_counter; return 0; } static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data) { const struct iwl_tlv_calib_data *def_calib = (const struct iwl_tlv_calib_data *)data; u32 ucode_type = le32_to_cpu(def_calib->ucode_type); if (ucode_type >= IWL_UCODE_TYPE_MAX) { IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n", ucode_type); return -EINVAL; } drv->fw.default_calib[ucode_type].flow_trigger = def_calib->calib.flow_trigger; drv->fw.default_calib[ucode_type].event_trigger = def_calib->calib.event_trigger; return 0; } static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, struct iwl_ucode_capabilities *capa) { const struct iwl_ucode_api *ucode_api = (const void *)data; u32 api_index = le32_to_cpu(ucode_api->api_index); u32 api_flags = le32_to_cpu(ucode_api->api_flags); int i; if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) { IWL_WARN(drv, "api flags index %d larger than supported by driver\n", api_index); return; } for (i = 0; i < 32; i++) { if (api_flags & BIT(i)) __set_bit(i + 32 * api_index, capa->_api); } } static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, struct iwl_ucode_capabilities *capa) { const struct iwl_ucode_capa *ucode_capa = (const void *)data; u32 api_index = le32_to_cpu(ucode_capa->api_index); u32 api_flags = le32_to_cpu(ucode_capa->api_capa); int i; if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) { IWL_WARN(drv, "capa flags index %d larger than supported by driver\n", api_index); return; } for (i = 0; i < 32; i++) { if (api_flags & BIT(i)) __set_bit(i + 32 * api_index, capa->_capa); } } static const char *iwl_reduced_fw_name(struct iwl_drv *drv) { const char *name = drv->firmware_name; if (strncmp(name, "iwlwifi-", 8) == 0) name += 8; return name; } static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, const struct firmware *ucode_raw, struct iwl_firmware_pieces *pieces) { const struct iwl_ucode_header *ucode = (const void *)ucode_raw->data; u32 api_ver, hdr_size, build; char buildstr[25]; const u8 *src; drv->fw.ucode_ver = le32_to_cpu(ucode->ver); api_ver = IWL_UCODE_API(drv->fw.ucode_ver); switch (api_ver) { default: hdr_size = 28; if (ucode_raw->size < hdr_size) { IWL_ERR(drv, "File size too small!\n"); return -EINVAL; } build = le32_to_cpu(ucode->u.v2.build); set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, le32_to_cpu(ucode->u.v2.inst_size)); set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, le32_to_cpu(ucode->u.v2.data_size)); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, le32_to_cpu(ucode->u.v2.init_size)); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, le32_to_cpu(ucode->u.v2.init_data_size)); src = ucode->u.v2.data; break; case 0: case 1: case 2: hdr_size = 24; if (ucode_raw->size < hdr_size) { IWL_ERR(drv, "File size too small!\n"); return -EINVAL; } build = 0; set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, le32_to_cpu(ucode->u.v1.inst_size)); set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, le32_to_cpu(ucode->u.v1.data_size)); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, le32_to_cpu(ucode->u.v1.init_size)); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, le32_to_cpu(ucode->u.v1.init_data_size)); src = ucode->u.v1.data; break; } if (build) sprintf(buildstr, " build %u", build); else buildstr[0] = '\0'; snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%u.%u.%u%s %s", IWL_UCODE_MAJOR(drv->fw.ucode_ver), IWL_UCODE_MINOR(drv->fw.ucode_ver), IWL_UCODE_API(drv->fw.ucode_ver), IWL_UCODE_SERIAL(drv->fw.ucode_ver), buildstr, iwl_reduced_fw_name(drv)); /* Verify size of file vs. image size info in file's header */ if (ucode_raw->size != hdr_size + get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) + get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) + get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) + get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)) { IWL_ERR(drv, "uCode file size %d does not match expected size\n", (int)ucode_raw->size); return -EINVAL; } set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, src); src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST); set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND); set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, src); src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA); set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND); set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, src); src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST); set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND); set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, src); src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA); set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND); return 0; } static void iwl_drv_set_dump_exclude(struct iwl_drv *drv, enum iwl_ucode_tlv_type tlv_type, const void *tlv_data, u32 tlv_len) { const struct iwl_fw_dump_exclude *fw = tlv_data; struct iwl_dump_exclude *excl; if (tlv_len < sizeof(*fw)) return; if (tlv_type == IWL_UCODE_TLV_SEC_TABLE_ADDR) { excl = &drv->fw.dump_excl[0]; /* second time we find this, it's for WoWLAN */ if (excl->addr) excl = &drv->fw.dump_excl_wowlan[0]; } else if (fw_has_capa(&drv->fw.ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG)) { /* IWL_UCODE_TLV_D3_KEK_KCK_ADDR is regular image */ excl = &drv->fw.dump_excl[0]; } else { /* IWL_UCODE_TLV_D3_KEK_KCK_ADDR is WoWLAN image */ excl = &drv->fw.dump_excl_wowlan[0]; } if (excl->addr) excl++; if (excl->addr) { IWL_DEBUG_FW_INFO(drv, "found too many excludes in fw file\n"); return; } excl->addr = le32_to_cpu(fw->addr) & ~FW_ADDR_CACHE_CONTROL; excl->size = le32_to_cpu(fw->size); } static void iwl_parse_dbg_tlv_assert_tables(struct iwl_drv *drv, const struct iwl_ucode_tlv *tlv) { const struct iwl_fw_ini_region_tlv *region; u32 length = le32_to_cpu(tlv->length); u32 addr; if (length < offsetof(typeof(*region), special_mem) + sizeof(region->special_mem)) return; region = (const void *)tlv->data; addr = le32_to_cpu(region->special_mem.base_addr); addr += le32_to_cpu(region->special_mem.offset); addr &= ~FW_ADDR_CACHE_CONTROL; if (region->type != IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY) return; switch (region->sub_type) { case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE: drv->trans->dbg.umac_error_event_table = addr; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_UMAC; break; case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE: drv->trans->dbg.lmac_error_event_table[0] = addr; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_LMAC1; break; case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE: drv->trans->dbg.lmac_error_event_table[1] = addr; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_LMAC2; break; case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE: drv->trans->dbg.tcm_error_event_table[0] = addr; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_TCM1; break; case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE: drv->trans->dbg.tcm_error_event_table[1] = addr; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_TCM2; break; case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE: drv->trans->dbg.rcm_error_event_table[0] = addr; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_RCM1; break; case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE: drv->trans->dbg.rcm_error_event_table[1] = addr; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_RCM2; break; default: break; } } static int iwl_parse_tlv_firmware(struct iwl_drv *drv, const struct firmware *ucode_raw, struct iwl_firmware_pieces *pieces, struct iwl_ucode_capabilities *capa, bool *usniffer_images) { const struct iwl_tlv_ucode_header *ucode = (const void *)ucode_raw->data; const struct iwl_ucode_tlv *tlv; size_t len = ucode_raw->size; const u8 *data; u32 tlv_len; u32 usniffer_img; enum iwl_ucode_tlv_type tlv_type; const u8 *tlv_data; char buildstr[25]; u32 build, paging_mem_size; int num_of_cpus; bool usniffer_req = false; if (len < sizeof(*ucode)) { IWL_ERR(drv, "uCode has invalid length: %zd\n", len); return -EINVAL; } if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) { IWL_ERR(drv, "invalid uCode magic: 0X%x\n", le32_to_cpu(ucode->magic)); return -EINVAL; } drv->fw.ucode_ver = le32_to_cpu(ucode->ver); memcpy(drv->fw.human_readable, ucode->human_readable, sizeof(drv->fw.human_readable)); build = le32_to_cpu(ucode->build); if (build) sprintf(buildstr, " build %u", build); else buildstr[0] = '\0'; snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%u.%u.%u%s %s", IWL_UCODE_MAJOR(drv->fw.ucode_ver), IWL_UCODE_MINOR(drv->fw.ucode_ver), IWL_UCODE_API(drv->fw.ucode_ver), IWL_UCODE_SERIAL(drv->fw.ucode_ver), buildstr, iwl_reduced_fw_name(drv)); data = ucode->data; len -= sizeof(*ucode); while (len >= sizeof(*tlv)) { len -= sizeof(*tlv); tlv = (const void *)data; tlv_len = le32_to_cpu(tlv->length); tlv_type = le32_to_cpu(tlv->type); tlv_data = tlv->data; if (len < tlv_len) { IWL_ERR(drv, "invalid TLV len: %zd/%u\n", len, tlv_len); return -EINVAL; } len -= ALIGN(tlv_len, 4); data += sizeof(*tlv) + ALIGN(tlv_len, 4); switch (tlv_type) { case IWL_UCODE_TLV_INST: set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, tlv_data); set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, tlv_len); set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND); break; case IWL_UCODE_TLV_DATA: set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, tlv_data); set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, tlv_len); set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND); break; case IWL_UCODE_TLV_INIT: set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, tlv_data); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, tlv_len); set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND); break; case IWL_UCODE_TLV_INIT_DATA: set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, tlv_data); set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, tlv_len); set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND); break; case IWL_UCODE_TLV_BOOT: IWL_ERR(drv, "Found unexpected BOOT ucode\n"); break; case IWL_UCODE_TLV_PROBE_MAX_LEN: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; capa->max_probe_length = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_PAN: if (tlv_len) goto invalid_tlv_len; capa->flags |= IWL_UCODE_TLV_FLAGS_PAN; break; case IWL_UCODE_TLV_FLAGS: /* must be at least one u32 */ if (tlv_len < sizeof(u32)) goto invalid_tlv_len; /* and a proper number of u32s */ if (tlv_len % sizeof(u32)) goto invalid_tlv_len; /* * This driver only reads the first u32 as * right now no more features are defined, * if that changes then either the driver * will not work with the new firmware, or * it'll not take advantage of new features. */ capa->flags = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_API_CHANGES_SET: if (tlv_len != sizeof(struct iwl_ucode_api)) goto invalid_tlv_len; iwl_set_ucode_api_flags(drv, tlv_data, capa); break; case IWL_UCODE_TLV_ENABLED_CAPABILITIES: if (tlv_len != sizeof(struct iwl_ucode_capa)) goto invalid_tlv_len; iwl_set_ucode_capabilities(drv, tlv_data, capa); break; case IWL_UCODE_TLV_INIT_EVTLOG_PTR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->init_evtlog_ptr = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_INIT_EVTLOG_SIZE: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->init_evtlog_size = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_INIT_ERRLOG_PTR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->init_errlog_ptr = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_RUNT_EVTLOG_PTR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->inst_evtlog_ptr = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->inst_evtlog_size = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_RUNT_ERRLOG_PTR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->inst_errlog_ptr = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_ENHANCE_SENS_TBL: if (tlv_len) goto invalid_tlv_len; drv->fw.enhance_sensitivity_table = true; break; case IWL_UCODE_TLV_WOWLAN_INST: set_sec_data(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_INST, tlv_data); set_sec_size(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_INST, tlv_len); set_sec_offset(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_INST, IWLAGN_RTC_INST_LOWER_BOUND); break; case IWL_UCODE_TLV_WOWLAN_DATA: set_sec_data(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_DATA, tlv_data); set_sec_size(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_DATA, tlv_len); set_sec_offset(pieces, IWL_UCODE_WOWLAN, IWL_UCODE_SECTION_DATA, IWLAGN_RTC_DATA_LOWER_BOUND); break; case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; capa->standard_phy_calibration_size = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_SEC_RT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SEC_INIT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SEC_WOWLAN: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_DEF_CALIB: if (tlv_len != sizeof(struct iwl_tlv_calib_data)) goto invalid_tlv_len; if (iwl_set_default_calib(drv, tlv_data)) goto tlv_error; break; case IWL_UCODE_TLV_PHY_SKU: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; drv->fw.phy_config = le32_to_cpup((const __le32 *)tlv_data); drv->fw.valid_tx_ant = (drv->fw.phy_config & FW_PHY_CFG_TX_CHAIN) >> FW_PHY_CFG_TX_CHAIN_POS; drv->fw.valid_rx_ant = (drv->fw.phy_config & FW_PHY_CFG_RX_CHAIN) >> FW_PHY_CFG_RX_CHAIN_POS; break; case IWL_UCODE_TLV_SECURE_SEC_RT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SECURE_SEC_INIT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SECURE_SEC_WOWLAN: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN, tlv_len); drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_NUM_OF_CPU: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; num_of_cpus = le32_to_cpup((const __le32 *)tlv_data); if (num_of_cpus == 2) { drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus = true; drv->fw.img[IWL_UCODE_INIT].is_dual_cpus = true; drv->fw.img[IWL_UCODE_WOWLAN].is_dual_cpus = true; } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) { - IWL_ERR(drv, "Driver support upto 2 CPUs\n"); + IWL_ERR(drv, "Driver support up to 2 CPUs\n"); return -EINVAL; } break; case IWL_UCODE_TLV_N_SCAN_CHANNELS: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; capa->n_scan_channels = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_FW_VERSION: { const __le32 *ptr = (const void *)tlv_data; u32 major, minor; u8 local_comp; if (tlv_len != sizeof(u32) * 3) goto invalid_tlv_len; major = le32_to_cpup(ptr++); minor = le32_to_cpup(ptr++); local_comp = le32_to_cpup(ptr); if (major >= 35) snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%08x.%u %s", major, minor, local_comp, iwl_reduced_fw_name(drv)); else snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), "%u.%u.%u %s", major, minor, local_comp, iwl_reduced_fw_name(drv)); break; } case IWL_UCODE_TLV_FW_DBG_DEST: { const struct iwl_fw_dbg_dest_tlv *dest = NULL; const struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL; u8 mon_mode; pieces->dbg_dest_ver = (const u8 *)tlv_data; if (*pieces->dbg_dest_ver == 1) { dest = (const void *)tlv_data; } else if (*pieces->dbg_dest_ver == 0) { dest_v1 = (const void *)tlv_data; } else { IWL_ERR(drv, "The version is %d, and it is invalid\n", *pieces->dbg_dest_ver); break; } if (pieces->dbg_dest_tlv_init) { IWL_ERR(drv, "dbg destination ignored, already exists\n"); break; } pieces->dbg_dest_tlv_init = true; if (dest_v1) { pieces->dbg_dest_tlv_v1 = dest_v1; mon_mode = dest_v1->monitor_mode; } else { pieces->dbg_dest_tlv = dest; mon_mode = dest->monitor_mode; } IWL_INFO(drv, "Found debug destination: %s\n", get_fw_dbg_mode_string(mon_mode)); drv->fw.dbg.n_dest_reg = (dest_v1) ? tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv_v1, reg_ops) : tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv, reg_ops); drv->fw.dbg.n_dest_reg /= sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]); break; } case IWL_UCODE_TLV_FW_DBG_CONF: { const struct iwl_fw_dbg_conf_tlv *conf = (const void *)tlv_data; if (!pieces->dbg_dest_tlv_init) { IWL_ERR(drv, "Ignore dbg config %d - no destination configured\n", conf->id); break; } if (conf->id >= ARRAY_SIZE(drv->fw.dbg.conf_tlv)) { IWL_ERR(drv, "Skip unknown configuration: %d\n", conf->id); break; } if (pieces->dbg_conf_tlv[conf->id]) { IWL_ERR(drv, "Ignore duplicate dbg config %d\n", conf->id); break; } if (conf->usniffer) usniffer_req = true; IWL_INFO(drv, "Found debug configuration: %d\n", conf->id); pieces->dbg_conf_tlv[conf->id] = conf; pieces->dbg_conf_tlv_len[conf->id] = tlv_len; break; } case IWL_UCODE_TLV_FW_DBG_TRIGGER: { const struct iwl_fw_dbg_trigger_tlv *trigger = (const void *)tlv_data; u32 trigger_id = le32_to_cpu(trigger->id); if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) { IWL_ERR(drv, "Skip unknown trigger: %u\n", trigger->id); break; } if (pieces->dbg_trigger_tlv[trigger_id]) { IWL_ERR(drv, "Ignore duplicate dbg trigger %u\n", trigger->id); break; } IWL_INFO(drv, "Found debug trigger: %u\n", trigger->id); pieces->dbg_trigger_tlv[trigger_id] = trigger; pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len; break; } case IWL_UCODE_TLV_FW_DBG_DUMP_LST: { if (tlv_len != sizeof(u32)) { IWL_ERR(drv, "dbg lst mask size incorrect, skip\n"); break; } drv->fw.dbg.dump_mask = le32_to_cpup((const __le32 *)tlv_data); break; } case IWL_UCODE_TLV_SEC_RT_USNIFFER: *usniffer_images = true; iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR_USNIFFER, tlv_len); break; case IWL_UCODE_TLV_PAGING: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; paging_mem_size = le32_to_cpup((const __le32 *)tlv_data); IWL_DEBUG_FW(drv, "Paging: paging enabled (size = %u bytes)\n", paging_mem_size); if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) { IWL_ERR(drv, "Paging: driver supports up to %lu bytes for paging image\n", MAX_PAGING_IMAGE_SIZE); return -EINVAL; } if (paging_mem_size & (FW_PAGING_SIZE - 1)) { IWL_ERR(drv, "Paging: image isn't multiple %lu\n", FW_PAGING_SIZE); return -EINVAL; } drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size = paging_mem_size; usniffer_img = IWL_UCODE_REGULAR_USNIFFER; drv->fw.img[usniffer_img].paging_mem_size = paging_mem_size; break; case IWL_UCODE_TLV_FW_GSCAN_CAPA: /* ignored */ break; case IWL_UCODE_TLV_FW_MEM_SEG: { const struct iwl_fw_dbg_mem_seg_tlv *dbg_mem = (const void *)tlv_data; size_t size; struct iwl_fw_dbg_mem_seg_tlv *n; if (tlv_len != (sizeof(*dbg_mem))) goto invalid_tlv_len; IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n", dbg_mem->data_type); size = sizeof(*pieces->dbg_mem_tlv) * (pieces->n_mem_tlv + 1); n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL); if (!n) return -ENOMEM; pieces->dbg_mem_tlv = n; pieces->dbg_mem_tlv[pieces->n_mem_tlv] = *dbg_mem; pieces->n_mem_tlv++; break; } case IWL_UCODE_TLV_IML: { drv->fw.iml_len = tlv_len; drv->fw.iml = kmemdup(tlv_data, tlv_len, GFP_KERNEL); if (!drv->fw.iml) return -ENOMEM; break; } case IWL_UCODE_TLV_FW_RECOVERY_INFO: { const struct { __le32 buf_addr; __le32 buf_size; } *recov_info = (const void *)tlv_data; if (tlv_len != sizeof(*recov_info)) goto invalid_tlv_len; capa->error_log_addr = le32_to_cpu(recov_info->buf_addr); capa->error_log_size = le32_to_cpu(recov_info->buf_size); } break; case IWL_UCODE_TLV_FW_FSEQ_VERSION: { const struct { u8 version[32]; u8 sha1[20]; } *fseq_ver = (const void *)tlv_data; if (tlv_len != sizeof(*fseq_ver)) goto invalid_tlv_len; IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n", fseq_ver->version); } break; case IWL_UCODE_TLV_FW_NUM_STATIONS: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; if (le32_to_cpup((const __le32 *)tlv_data) > IWL_MVM_STATION_COUNT_MAX) { IWL_ERR(drv, "%d is an invalid number of station\n", le32_to_cpup((const __le32 *)tlv_data)); goto tlv_error; } capa->num_stations = le32_to_cpup((const __le32 *)tlv_data); break; + case IWL_UCODE_TLV_FW_NUM_BEACONS: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + capa->num_beacons = + le32_to_cpup((const __le32 *)tlv_data); + break; case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: { const struct iwl_umac_debug_addrs *dbg_ptrs = (const void *)tlv_data; if (tlv_len != sizeof(*dbg_ptrs)) goto invalid_tlv_len; if (drv->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22000) break; drv->trans->dbg.umac_error_event_table = le32_to_cpu(dbg_ptrs->error_info_addr) & ~FW_ADDR_CACHE_CONTROL; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_UMAC; break; } case IWL_UCODE_TLV_LMAC_DEBUG_ADDRS: { const struct iwl_lmac_debug_addrs *dbg_ptrs = (const void *)tlv_data; if (tlv_len != sizeof(*dbg_ptrs)) goto invalid_tlv_len; if (drv->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_22000) break; drv->trans->dbg.lmac_error_event_table[0] = le32_to_cpu(dbg_ptrs->error_event_table_ptr) & ~FW_ADDR_CACHE_CONTROL; drv->trans->dbg.error_event_table_tlv_status |= IWL_ERROR_EVENT_TABLE_LMAC1; break; } case IWL_UCODE_TLV_TYPE_REGIONS: iwl_parse_dbg_tlv_assert_tables(drv, tlv); fallthrough; case IWL_UCODE_TLV_TYPE_DEBUG_INFO: case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: case IWL_UCODE_TLV_TYPE_HCMD: case IWL_UCODE_TLV_TYPE_TRIGGERS: case IWL_UCODE_TLV_TYPE_CONF_SET: if (iwlwifi_mod_params.enable_ini) iwl_dbg_tlv_alloc(drv->trans, tlv, false); break; case IWL_UCODE_TLV_CMD_VERSIONS: if (tlv_len % sizeof(struct iwl_fw_cmd_version)) { IWL_ERR(drv, "Invalid length for command versions: %u\n", tlv_len); tlv_len /= sizeof(struct iwl_fw_cmd_version); tlv_len *= sizeof(struct iwl_fw_cmd_version); } if (WARN_ON(capa->cmd_versions)) return -EINVAL; capa->cmd_versions = kmemdup(tlv_data, tlv_len, GFP_KERNEL); if (!capa->cmd_versions) return -ENOMEM; capa->n_cmd_versions = tlv_len / sizeof(struct iwl_fw_cmd_version); break; case IWL_UCODE_TLV_PHY_INTEGRATION_VERSION: if (drv->fw.phy_integration_ver) { IWL_ERR(drv, "phy integration str ignored, already exists\n"); break; } drv->fw.phy_integration_ver = kmemdup(tlv_data, tlv_len, GFP_KERNEL); if (!drv->fw.phy_integration_ver) return -ENOMEM; drv->fw.phy_integration_ver_len = tlv_len; break; case IWL_UCODE_TLV_SEC_TABLE_ADDR: case IWL_UCODE_TLV_D3_KEK_KCK_ADDR: iwl_drv_set_dump_exclude(drv, tlv_type, tlv_data, tlv_len); break; + case IWL_UCODE_TLV_CURRENT_PC: + if (tlv_len < sizeof(struct iwl_pc_data)) + goto invalid_tlv_len; + drv->trans->dbg.num_pc = + tlv_len / sizeof(struct iwl_pc_data); + drv->trans->dbg.pc_data = + kmemdup(tlv_data, tlv_len, GFP_KERNEL); + break; default: IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); break; } } if (!fw_has_capa(capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED) && usniffer_req && !*usniffer_images) { IWL_ERR(drv, "user selected to work with usniffer but usniffer image isn't available in ucode package\n"); return -EINVAL; } if (len) { IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len); #if defined(__linux__) iwl_print_hex_dump(drv, IWL_DL_FW, data, len); #elif defined(__FreeBSD__) #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_print_hex_dump(drv, IWL_DL_FW, "TLV ", data, len); #endif #endif return -EINVAL; } return 0; invalid_tlv_len: IWL_ERR(drv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len); tlv_error: #if defined(__linux__) iwl_print_hex_dump(drv, IWL_DL_FW, tlv_data, tlv_len); #elif defined(__FreeBSD__) #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_print_hex_dump(drv, IWL_DL_FW, "TLV ", tlv_data, tlv_len); #endif #endif return -EINVAL; } static int iwl_alloc_ucode(struct iwl_drv *drv, struct iwl_firmware_pieces *pieces, enum iwl_ucode_type type) { int i; struct fw_desc *sec; sec = kcalloc(pieces->img[type].sec_counter, sizeof(*sec), GFP_KERNEL); if (!sec) return -ENOMEM; drv->fw.img[type].sec = sec; drv->fw.img[type].num_sec = pieces->img[type].sec_counter; for (i = 0; i < pieces->img[type].sec_counter; i++) if (iwl_alloc_fw_desc(drv, &sec[i], get_sec(pieces, type, i))) return -ENOMEM; return 0; } static int validate_sec_sizes(struct iwl_drv *drv, struct iwl_firmware_pieces *pieces, const struct iwl_cfg *cfg) { IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %zd\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST)); IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %zd\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA)); IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %zd\n", get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST)); IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %zd\n", get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)); /* Verify that uCode images will fit in card's SRAM. */ if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) > cfg->max_inst_size) { IWL_ERR(drv, "uCode instr len %zd too large to fit in\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST)); return -1; } if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) > cfg->max_data_size) { IWL_ERR(drv, "uCode data len %zd too large to fit in\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA)); return -1; } if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) > cfg->max_inst_size) { IWL_ERR(drv, "uCode init instr len %zd too large to fit in\n", get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST)); return -1; } if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) > cfg->max_data_size) { IWL_ERR(drv, "uCode init data len %zd too large to fit in\n", get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA)); return -1; } return 0; } static struct iwl_op_mode * _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) { const struct iwl_op_mode_ops *ops = op->ops; struct dentry *dbgfs_dir = NULL; struct iwl_op_mode *op_mode = NULL; int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY; for (retry = 0; retry <= max_retry; retry++) { #ifdef CONFIG_IWLWIFI_DEBUGFS drv->dbgfs_op_mode = debugfs_create_dir(op->name, drv->dbgfs_drv); dbgfs_dir = drv->dbgfs_op_mode; #endif op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir); if (op_mode) return op_mode; IWL_ERR(drv, "retry init count %d\n", retry); #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(drv->dbgfs_op_mode); drv->dbgfs_op_mode = NULL; #endif } return NULL; } static void _iwl_op_mode_stop(struct iwl_drv *drv) { /* op_mode can be NULL if its start failed */ if (drv->op_mode) { iwl_op_mode_stop(drv->op_mode); drv->op_mode = NULL; #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(drv->dbgfs_op_mode); drv->dbgfs_op_mode = NULL; #endif } } /* * iwl_req_fw_callback - callback when firmware was loaded * * If loaded successfully, copies the firmware into buffers * for the card to fetch (via DMA). */ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) { struct iwl_drv *drv = context; struct iwl_fw *fw = &drv->fw; const struct iwl_ucode_header *ucode; struct iwlwifi_opmode_table *op; int err; struct iwl_firmware_pieces *pieces; const unsigned int api_max = drv->trans->cfg->ucode_api_max; const unsigned int api_min = drv->trans->cfg->ucode_api_min; size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX]; u32 api_ver; int i; bool load_module = false; bool usniffer_images = false; bool failure = true; fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH; fw->ucode_capa.standard_phy_calibration_size = IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; fw->ucode_capa.num_stations = IWL_MVM_STATION_COUNT_MAX; + fw->ucode_capa.num_beacons = 1; /* dump all fw memory areas by default */ fw->dbg.dump_mask = 0xffffffff; pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); if (!pieces) goto out_free_fw; if (!ucode_raw) goto try_again; IWL_DEBUG_FW_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n", drv->firmware_name, ucode_raw->size); /* Make sure that we got at least the API version number */ if (ucode_raw->size < 4) { IWL_ERR(drv, "File size way too small!\n"); goto try_again; } /* Data from ucode file: header followed by uCode images */ ucode = (const struct iwl_ucode_header *)ucode_raw->data; if (ucode->ver) err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces); else err = iwl_parse_tlv_firmware(drv, ucode_raw, pieces, &fw->ucode_capa, &usniffer_images); if (err) goto try_again; if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) api_ver = drv->fw.ucode_ver; else api_ver = IWL_UCODE_API(drv->fw.ucode_ver); /* * api_ver should match the api version forming part of the * firmware filename ... but we don't check for that and only rely * on the API version read from firmware header from here on forward */ if (api_ver < api_min || api_ver > api_max) { IWL_ERR(drv, "Driver unable to support your firmware API. " "Driver supports v%u, firmware is v%u.\n", api_max, api_ver); goto try_again; } /* * In mvm uCode there is no difference between data and instructions * sections. */ if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces, drv->trans->cfg)) goto try_again; /* Allocate ucode buffers for card's bus-master loading ... */ /* Runtime instructions and 2 copies of data: * 1) unmodified from disk * 2) backup cache for save/restore during power-downs */ for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) if (iwl_alloc_ucode(drv, pieces, i)) goto out_free_fw; if (pieces->dbg_dest_tlv_init) { size_t dbg_dest_size = sizeof(*drv->fw.dbg.dest_tlv) + sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) * drv->fw.dbg.n_dest_reg; drv->fw.dbg.dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL); if (!drv->fw.dbg.dest_tlv) goto out_free_fw; if (*pieces->dbg_dest_ver == 0) { memcpy(drv->fw.dbg.dest_tlv, pieces->dbg_dest_tlv_v1, dbg_dest_size); } else { struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv = drv->fw.dbg.dest_tlv; dest_tlv->version = pieces->dbg_dest_tlv->version; dest_tlv->monitor_mode = pieces->dbg_dest_tlv->monitor_mode; dest_tlv->size_power = pieces->dbg_dest_tlv->size_power; dest_tlv->wrap_count = pieces->dbg_dest_tlv->wrap_count; dest_tlv->write_ptr_reg = pieces->dbg_dest_tlv->write_ptr_reg; dest_tlv->base_shift = pieces->dbg_dest_tlv->base_shift; memcpy(dest_tlv->reg_ops, pieces->dbg_dest_tlv->reg_ops, sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) * drv->fw.dbg.n_dest_reg); /* In version 1 of the destination tlv, which is * relevant for internal buffer exclusively, * the base address is part of given with the length * of the buffer, and the size shift is give instead of * end shift. We now store these values in base_reg, * and end shift, and when dumping the data we'll * manipulate it for extracting both the length and * base address */ dest_tlv->base_reg = pieces->dbg_dest_tlv->cfg_reg; dest_tlv->end_shift = pieces->dbg_dest_tlv->size_shift; } } for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) { if (pieces->dbg_conf_tlv[i]) { drv->fw.dbg.conf_tlv[i] = kmemdup(pieces->dbg_conf_tlv[i], pieces->dbg_conf_tlv_len[i], GFP_KERNEL); if (!drv->fw.dbg.conf_tlv[i]) goto out_free_fw; } } memset(&trigger_tlv_sz, 0xff, sizeof(trigger_tlv_sz)); trigger_tlv_sz[FW_DBG_TRIGGER_MISSED_BEACONS] = sizeof(struct iwl_fw_dbg_trigger_missed_bcon); trigger_tlv_sz[FW_DBG_TRIGGER_CHANNEL_SWITCH] = 0; trigger_tlv_sz[FW_DBG_TRIGGER_FW_NOTIF] = sizeof(struct iwl_fw_dbg_trigger_cmd); trigger_tlv_sz[FW_DBG_TRIGGER_MLME] = sizeof(struct iwl_fw_dbg_trigger_mlme); trigger_tlv_sz[FW_DBG_TRIGGER_STATS] = sizeof(struct iwl_fw_dbg_trigger_stats); trigger_tlv_sz[FW_DBG_TRIGGER_RSSI] = sizeof(struct iwl_fw_dbg_trigger_low_rssi); trigger_tlv_sz[FW_DBG_TRIGGER_TXQ_TIMERS] = sizeof(struct iwl_fw_dbg_trigger_txq_timer); trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] = sizeof(struct iwl_fw_dbg_trigger_time_event); trigger_tlv_sz[FW_DBG_TRIGGER_BA] = sizeof(struct iwl_fw_dbg_trigger_ba); trigger_tlv_sz[FW_DBG_TRIGGER_TDLS] = sizeof(struct iwl_fw_dbg_trigger_tdls); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) { if (pieces->dbg_trigger_tlv[i]) { /* * If the trigger isn't long enough, WARN and exit. * Someone is trying to debug something and he won't * be able to catch the bug he is trying to chase. * We'd better be noisy to be sure he knows what's * going on. */ if (WARN_ON(pieces->dbg_trigger_tlv_len[i] < (trigger_tlv_sz[i] + sizeof(struct iwl_fw_dbg_trigger_tlv)))) goto out_free_fw; drv->fw.dbg.trigger_tlv_len[i] = pieces->dbg_trigger_tlv_len[i]; drv->fw.dbg.trigger_tlv[i] = kmemdup(pieces->dbg_trigger_tlv[i], drv->fw.dbg.trigger_tlv_len[i], GFP_KERNEL); if (!drv->fw.dbg.trigger_tlv[i]) goto out_free_fw; } } /* Now that we can no longer fail, copy information */ drv->fw.dbg.mem_tlv = pieces->dbg_mem_tlv; pieces->dbg_mem_tlv = NULL; drv->fw.dbg.n_mem_tlv = pieces->n_mem_tlv; /* * The (size - 16) / 12 formula is based on the information recorded * for each event, which is of mode 1 (including timestamp) for all * new microcodes that include this information. */ fw->init_evtlog_ptr = pieces->init_evtlog_ptr; if (pieces->init_evtlog_size) fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12; else fw->init_evtlog_size = drv->trans->trans_cfg->base_params->max_event_log_size; fw->init_errlog_ptr = pieces->init_errlog_ptr; fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr; if (pieces->inst_evtlog_size) fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12; else fw->inst_evtlog_size = drv->trans->trans_cfg->base_params->max_event_log_size; fw->inst_errlog_ptr = pieces->inst_errlog_ptr; /* * figure out the offset of chain noise reset and gain commands * base on the size of standard phy calibration commands table size */ if (fw->ucode_capa.standard_phy_calibration_size > IWL_MAX_PHY_CALIBRATE_TBL_SIZE) fw->ucode_capa.standard_phy_calibration_size = IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE; /* We have our copies now, allow OS release its copies */ release_firmware(ucode_raw); iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans); mutex_lock(&iwlwifi_opmode_table_mtx); switch (fw->type) { case IWL_FW_DVM: op = &iwlwifi_opmode_table[DVM_OP_MODE]; break; default: WARN(1, "Invalid fw type %d\n", fw->type); fallthrough; case IWL_FW_MVM: op = &iwlwifi_opmode_table[MVM_OP_MODE]; break; } IWL_INFO(drv, "loaded firmware version %s op_mode %s\n", drv->fw.fw_version, op->name); /* add this device to the list of devices using this op_mode */ list_add_tail(&drv->list, &op->drv); if (op->ops) { drv->op_mode = _iwl_op_mode_start(drv, op); if (!drv->op_mode) { mutex_unlock(&iwlwifi_opmode_table_mtx); goto out_unbind; } } else { load_module = true; } mutex_unlock(&iwlwifi_opmode_table_mtx); /* * Complete the firmware request last so that * a driver unbind (stop) doesn't run while we * are doing the start() above. */ complete(&drv->request_firmware_complete); /* * Load the module last so we don't block anything * else from proceeding if the module fails to load * or hangs loading. */ if (load_module) request_module("%s", op->name); failure = false; goto free; try_again: /* try next, if any */ release_firmware(ucode_raw); if (iwl_request_firmware(drv, false)) goto out_unbind; goto free; out_free_fw: release_firmware(ucode_raw); out_unbind: complete(&drv->request_firmware_complete); device_release_driver(drv->trans->dev); /* drv has just been freed by the release */ failure = false; free: if (failure) iwl_dealloc_ucode(drv); if (pieces) { for (i = 0; i < ARRAY_SIZE(pieces->img); i++) kfree(pieces->img[i].sec); kfree(pieces->dbg_mem_tlv); kfree(pieces); } } struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) { struct iwl_drv *drv; int ret; drv = kzalloc(sizeof(*drv), GFP_KERNEL); if (!drv) { ret = -ENOMEM; goto err; } drv->trans = trans; drv->dev = trans->dev; init_completion(&drv->request_firmware_complete); #if defined(__FreeBSD__) init_completion(&drv->drv_start_complete); #endif INIT_LIST_HEAD(&drv->list); #ifdef CONFIG_IWLWIFI_DEBUGFS /* Create the device debugfs entries. */ drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev), iwl_dbgfs_root); /* Create transport layer debugfs dir */ drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv); #endif drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans); ret = iwl_request_firmware(drv, true); if (ret) { IWL_ERR(trans, "Couldn't request the fw\n"); goto err_fw; } #if defined(__FreeBSD__) /* * Wait until initilization is done before returning in order to * replicate FreeBSD's synchronous behaviour -- we cannot create * a vap before the com is fully created but if LinuxKPI "probe" * returned before it was all done that is what could happen. */ wait_for_completion(&drv->request_firmware_complete); complete(&drv->drv_start_complete); #endif return drv; err_fw: #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(drv->dbgfs_drv); iwl_dbg_tlv_free(drv->trans); #endif kfree(drv); err: return ERR_PTR(ret); } void iwl_drv_stop(struct iwl_drv *drv) { #if defined(__linux__) wait_for_completion(&drv->request_firmware_complete); #elif defined(__FreeBSD__) wait_for_completion(&drv->drv_start_complete); #endif _iwl_op_mode_stop(drv); iwl_dealloc_ucode(drv); mutex_lock(&iwlwifi_opmode_table_mtx); /* * List is empty (this item wasn't added) * when firmware loading failed -- in that * case we can't remove it from any list. */ if (!list_empty(&drv->list)) list_del(&drv->list); mutex_unlock(&iwlwifi_opmode_table_mtx); #ifdef CONFIG_IWLWIFI_DEBUGFS drv->trans->ops->debugfs_cleanup(drv->trans); debugfs_remove_recursive(drv->dbgfs_drv); #endif iwl_dbg_tlv_free(drv->trans); kfree(drv); } #define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1) /* shared module parameters */ struct iwl_mod_params iwlwifi_mod_params = { .fw_restart = true, .bt_coex_active = true, .power_level = IWL_POWER_INDEX_1, .uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT, .enable_ini = ENABLE_INI, #if defined(__FreeBSD__) .disable_11n = 1, .disable_11ac = true, .disable_11ax = true, + .disable_11be = true, #endif /* the rest are 0 by default */ }; IWL_EXPORT_SYMBOL(iwlwifi_mod_params); int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops) { int i; struct iwl_drv *drv; struct iwlwifi_opmode_table *op; mutex_lock(&iwlwifi_opmode_table_mtx); for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { op = &iwlwifi_opmode_table[i]; if (strcmp(op->name, name)) continue; op->ops = ops; /* TODO: need to handle exceptional case */ list_for_each_entry(drv, &op->drv, list) drv->op_mode = _iwl_op_mode_start(drv, op); mutex_unlock(&iwlwifi_opmode_table_mtx); return 0; } mutex_unlock(&iwlwifi_opmode_table_mtx); return -EIO; } IWL_EXPORT_SYMBOL(iwl_opmode_register); void iwl_opmode_deregister(const char *name) { int i; struct iwl_drv *drv; mutex_lock(&iwlwifi_opmode_table_mtx); for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { if (strcmp(iwlwifi_opmode_table[i].name, name)) continue; iwlwifi_opmode_table[i].ops = NULL; /* call the stop routine for all devices */ list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) _iwl_op_mode_stop(drv); mutex_unlock(&iwlwifi_opmode_table_mtx); return; } mutex_unlock(&iwlwifi_opmode_table_mtx); } IWL_EXPORT_SYMBOL(iwl_opmode_deregister); static int __init iwl_drv_init(void) { int i, err; for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv); pr_info(DRV_DESCRIPTION "\n"); #ifdef CONFIG_IWLWIFI_DEBUGFS /* Create the root of iwlwifi debugfs subsystem. */ iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL); #endif err = iwl_pci_register_driver(); if (err) goto cleanup_debugfs; return 0; cleanup_debugfs: #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(iwl_dbgfs_root); #endif return err; } module_init(iwl_drv_init); static void __exit iwl_drv_exit(void) { iwl_pci_unregister_driver(); #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(iwl_dbgfs_root); #endif } module_exit(iwl_drv_exit); #ifdef CONFIG_IWLWIFI_DEBUG module_param_named(debug, iwlwifi_mod_params.debug_level, uint, 0644); MODULE_PARM_DESC(debug, "debug output mask"); #endif module_param_named(swcrypto, iwlwifi_mod_params.swcrypto, int, 0444); MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, 0444); MODULE_PARM_DESC(11n_disable, "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX"); module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444); MODULE_PARM_DESC(amsdu_size, "amsdu size 0: 12K for multi Rx queue devices, 2K for AX210 devices, " "4K for other devices 1:4K 2:8K 3:12K (16K buffers) 4: 2K (default 0)"); module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)"); module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444); MODULE_PARM_DESC(nvm_file, "NVM file name"); module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644); MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)"); #if defined(__linux__) static int enable_ini_set(const char *arg, const struct kernel_param *kp) { int ret = 0; bool res; __u32 new_enable_ini; /* in case the argument type is a number */ ret = kstrtou32(arg, 0, &new_enable_ini); if (!ret) { if (new_enable_ini > ENABLE_INI) { pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini); return -EINVAL; } goto out; } /* in case the argument type is boolean */ ret = kstrtobool(arg, &res); if (ret) return ret; new_enable_ini = (res ? ENABLE_INI : 0); out: iwlwifi_mod_params.enable_ini = new_enable_ini; return 0; } static const struct kernel_param_ops enable_ini_ops = { .set = enable_ini_set }; module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644); MODULE_PARM_DESC(enable_ini, "0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined," "Debug INI TLV FW debug infrastructure (default: 16)"); #endif /* * set bt_coex_active to true, uCode will do kill/defer * every time the priority line is asserted (BT is sending signals on the * priority line in the PCIx). * set bt_coex_active to false, uCode will ignore the BT activity and * perform the normal operation * * User might experience transmit issue on some platform due to WiFi/BT * co-exist problem. The possible behaviors are: * Able to scan and finding all the available AP * Not able to associate with any AP * On those platforms, WiFi communication can be restored by set * "bt_coex_active" module parameter to "false" * * default: bt_coex_active = true (BT_COEX_ENABLE) */ module_param_named(bt_coex_active, iwlwifi_mod_params.bt_coex_active, bool, 0444); MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)"); module_param_named(led_mode, iwlwifi_mod_params.led_mode, int, 0444); MODULE_PARM_DESC(led_mode, "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)"); module_param_named(power_save, iwlwifi_mod_params.power_save, bool, 0444); MODULE_PARM_DESC(power_save, "enable WiFi power management (default: disable)"); module_param_named(power_level, iwlwifi_mod_params.power_level, int, 0444); MODULE_PARM_DESC(power_level, "default power save level (range from 1 - 5, default: 1)"); module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, 0444); MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)"); module_param_named(remove_when_gone, iwlwifi_mod_params.remove_when_gone, bool, 0444); MODULE_PARM_DESC(remove_when_gone, "Remove dev from PCIe bus if it is deemed inaccessible (default: false)"); module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool, S_IRUGO); MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)"); + +module_param_named(disable_11be, iwlwifi_mod_params.disable_11be, bool, 0444); +MODULE_PARM_DESC(disable_11be, "Disable EHT capabilities (default: false)"); diff --git a/sys/contrib/dev/iwlwifi/iwl-drv.h b/sys/contrib/dev/iwlwifi/iwl-drv.h index 80073f973334..6c19989e4ab7 100644 --- a/sys/contrib/dev/iwlwifi/iwl-drv.h +++ b/sys/contrib/dev/iwlwifi/iwl-drv.h @@ -1,95 +1,99 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2020-2021 Intel Corporation + * Copyright (C) 2005-2014, 2020-2021, 2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH */ #ifndef __iwl_drv_h__ #define __iwl_drv_h__ #include /* for all modules */ #define DRV_NAME "iwlwifi" /* radio config bits (actual values from NVM definition) */ #define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */ #define NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ #define NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ #define NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ #define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ #define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ #define EXT_NVM_RF_CFG_FLAVOR_MSK(x) ((x) & 0xF) #define EXT_NVM_RF_CFG_DASH_MSK(x) (((x) >> 4) & 0xF) #define EXT_NVM_RF_CFG_STEP_MSK(x) (((x) >> 8) & 0xF) #define EXT_NVM_RF_CFG_TYPE_MSK(x) (((x) >> 12) & 0xFFF) #define EXT_NVM_RF_CFG_TX_ANT_MSK(x) (((x) >> 24) & 0xF) #define EXT_NVM_RF_CFG_RX_ANT_MSK(x) (((x) >> 28) & 0xF) /** * DOC: Driver system flows - drv component * * This component implements the system flows such as bus enumeration, bus * removal. Bus dependent parts of system flows (such as iwl_pci_probe) are in * bus specific files (transport files). This is the code that is common among * different buses. * * This component is also in charge of managing the several implementations of * the wifi flows: it will allow to have several fw API implementation. These * different implementations will differ in the way they implement mac80211's * handlers too. * The init flow wrt to the drv component looks like this: * 1) The bus specific component is called from module_init * 2) The bus specific component registers the bus driver * 3) The bus driver calls the probe function * 4) The bus specific component configures the bus * 5) The bus specific component calls to the drv bus agnostic part * (iwl_drv_start) * 6) iwl_drv_start fetches the fw ASYNC, iwl_req_fw_callback * 7) iwl_req_fw_callback parses the fw file * 8) iwl_req_fw_callback starts the wifi implementation to matches the fw */ struct iwl_drv; struct iwl_trans; struct iwl_cfg; /** * iwl_drv_start - start the drv * * @trans_ops: the ops of the transport * * starts the driver: fetches the firmware. This should be called by bus * specific system flows implementations. For example, the bus specific probe * function should do bus related operations only, and then call to this * function. It returns the driver object or %NULL if an error occurred. */ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans); /** * iwl_drv_stop - stop the drv * * @drv: * * Stop the driver. This should be called by bus specific system flows * implementations. For example, the bus specific remove function should first * call this function and then do the bus related operations only. */ void iwl_drv_stop(struct iwl_drv *drv); /* * exported symbol management * * The driver can be split into multiple modules, in which case some symbols * must be exported for the sub-modules. However, if it's not split and * everything is built-in, then we can avoid that. */ #ifdef CONFIG_IWLWIFI_OPMODE_MODULAR #define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_NS_GPL(sym, IWLWIFI) #else #define IWL_EXPORT_SYMBOL(sym) #endif /* max retry for init flow */ #define IWL_MAX_INIT_RETRY 2 +#define FW_NAME_PRE_BUFSIZE 64 +struct iwl_trans; +const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf); + #endif /* __iwl_drv_h__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-eeprom-parse.h b/sys/contrib/dev/iwlwifi/iwl-eeprom-parse.h index 0efffb6eeb1e..0e8ca761d24b 100644 --- a/sys/contrib/dev/iwlwifi/iwl-eeprom-parse.h +++ b/sys/contrib/dev/iwlwifi/iwl-eeprom-parse.h @@ -1,88 +1,88 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018 Intel Corporation + * Copyright (C) 2005-2014, 2018, 2020-2022 Intel Corporation * Copyright (C) 2015 Intel Mobile Communications GmbH */ #ifndef __iwl_eeprom_parse_h__ #define __iwl_eeprom_parse_h__ #include #include #include #include "iwl-trans.h" struct iwl_nvm_data { int n_hw_addrs; u8 hw_addr[ETH_ALEN]; u8 calib_version; __le16 calib_voltage; __le16 raw_temperature; __le16 kelvin_temperature; __le16 kelvin_voltage; __le16 xtal_calib[2]; bool sku_cap_band_24ghz_enable; bool sku_cap_band_52ghz_enable; bool sku_cap_11n_enable; bool sku_cap_11ac_enable; bool sku_cap_11ax_enable; bool sku_cap_amt_enable; bool sku_cap_ipan_enable; bool sku_cap_mimo_disabled; + bool sku_cap_11be_enable; u16 radio_cfg_type; u8 radio_cfg_step; u8 radio_cfg_dash; u8 radio_cfg_pnum; u8 valid_tx_ant, valid_rx_ant; u32 nvm_version; s8 max_tx_pwr_half_dbm; bool lar_enabled; bool vht160_supported; struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; /* - * iftype data for low (2.4 GHz) and high (5 and 6 GHz) bands, - * we can use the same for 5 and 6 GHz bands because they have - * the same data + * iftype data for low (2.4 GHz) high (5 GHz) and uhb (6 GHz) bands */ struct { struct ieee80211_sband_iftype_data low[2]; struct ieee80211_sband_iftype_data high[2]; + struct ieee80211_sband_iftype_data uhb[2]; } iftd; struct ieee80211_channel channels[]; }; /** * iwl_parse_eeprom_data - parse EEPROM data and return values * * @dev: device pointer we're parsing for, for debug only * @cfg: device configuration for parsing and overrides * @eeprom: the EEPROM data * @eeprom_size: length of the EEPROM data * * This function parses all EEPROM values we need and then * returns a (newly allocated) struct containing all the * relevant values for driver use. The struct must be freed * later with iwl_free_nvm_data(). */ struct iwl_nvm_data * iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const u8 *eeprom, size_t eeprom_size); int iwl_init_sband_channels(struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, int n_channels, enum nl80211_band band); void iwl_init_ht_hw_capab(struct iwl_trans *trans, struct iwl_nvm_data *data, struct ieee80211_sta_ht_cap *ht_info, enum nl80211_band band, u8 tx_chains, u8 rx_chains); #endif /* __iwl_eeprom_parse_h__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-fh.h b/sys/contrib/dev/iwlwifi/iwl-fh.h index bedd78a47f67..4e4a60ddf9b2 100644 --- a/sys/contrib/dev/iwlwifi/iwl-fh.h +++ b/sys/contrib/dev/iwlwifi/iwl-fh.h @@ -1,740 +1,740 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #ifndef __iwl_fh_h__ #define __iwl_fh_h__ #include #include #include "iwl-trans.h" /****************************/ /* Flow Handler Definitions */ /****************************/ /** * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) * Addresses are offsets from device's PCI hardware base address. */ #define FH_MEM_LOWER_BOUND (0x1000) #define FH_MEM_UPPER_BOUND (0x2000) #define FH_MEM_LOWER_BOUND_GEN2 (0xa06000) #define FH_MEM_UPPER_BOUND_GEN2 (0xa08000) /** * Keep-Warm (KW) buffer base address. * * Driver must allocate a 4KByte buffer that is for keeping the * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency * DRAM access when doing Txing or Rxing. The dummy accesses prevent host * from going into a power-savings mode that would cause higher DRAM latency, * and possible data over/under-runs, before all Tx/Rx is complete. * * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4) * of the buffer, which must be 4K aligned. Once this is set up, the device * automatically invokes keep-warm accesses when normal accesses might not * be sufficient to maintain fast DRAM response. * * Bit fields: * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned */ #define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C) /** * TFD Circular Buffers Base (CBBC) addresses * * Device has 16 base pointer registers, one for each of 16 host-DRAM-resident * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs) * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte * aligned (address bits 0-7 must be 0). * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers * for them are in different places. * * Bit fields in each pointer register: * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned */ #define FH_MEM_CBBC_0_15_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) #define FH_MEM_CBBC_0_15_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) #define FH_MEM_CBBC_16_19_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBF0) #define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) #define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20) #define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80) /* 22000 TFD table address, 64 bit */ #define TFH_TFDQ_CBB_TABLE (0x1C00) /* Find TFD CB base pointer for given queue */ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, unsigned int chnl) { - if (trans->trans_cfg->use_tfh) { + if (trans->trans_cfg->gen2) { WARN_ON_ONCE(chnl >= 64); return TFH_TFDQ_CBB_TABLE + 8 * chnl; } if (chnl < 16) return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl; if (chnl < 20) return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16); WARN_ON_ONCE(chnl >= 32); return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20); } /* 22000 configuration registers */ /* * TFH Configuration register. * * BIT fields: * * Bits 3:0: * Define the maximum number of pending read requests. * Maximum configuration value allowed is 0xC * Bits 9:8: * Define the maximum transfer size. (64 / 128 / 256) * Bit 10: * When bit is set and transfer size is set to 128B, the TFH will enable * reading chunks of more than 64B only if the read address is aligned to 128B. * In case of DRAM read address which is not aligned to 128B, the TFH will * enable transfer size which doesn't cross 64B DRAM address boundary. */ #define TFH_TRANSFER_MODE (0x1F40) #define TFH_TRANSFER_MAX_PENDING_REQ 0xc #define TFH_CHUNK_SIZE_128 BIT(8) #define TFH_CHUNK_SPLIT_MODE BIT(10) /* * Defines the offset address in dwords referring from the beginning of the * Tx CMD which will be updated in DRAM. * Note that the TFH offset address for Tx CMD update is always referring to * the start of the TFD first TB. * In case of a DRAM Tx CMD update the TFH will update PN and Key ID */ #define TFH_TXCMD_UPDATE_CFG (0x1F48) /* * Controls TX DMA operation * * BIT fields: * * Bits 31:30: Enable the SRAM DMA channel. * Turning on bit 31 will kick the SRAM2DRAM DMA. * Note that the sram2dram may be enabled only after configuring the DRAM and * SRAM addresses registers and the byte count register. * Bits 25:24: Defines the interrupt target upon dram2sram transfer done. When * set to 1 - interrupt is sent to the driver * Bit 0: Indicates the snoop configuration */ #define TFH_SRV_DMA_CHNL0_CTRL (0x1F60) #define TFH_SRV_DMA_SNOOP BIT(0) #define TFH_SRV_DMA_TO_DRIVER BIT(24) #define TFH_SRV_DMA_START BIT(31) /* Defines the DMA SRAM write start address to transfer a data block */ #define TFH_SRV_DMA_CHNL0_SRAM_ADDR (0x1F64) /* Defines the 64bits DRAM start address to read the DMA data block from */ #define TFH_SRV_DMA_CHNL0_DRAM_ADDR (0x1F68) /* * Defines the number of bytes to transfer from DRAM to SRAM. * Note that this register may be configured with non-dword aligned size. */ #define TFH_SRV_DMA_CHNL0_BC (0x1F70) /** * Rx SRAM Control and Status Registers (RSCSR) * * These registers provide handshake between driver and device for the Rx queue * (this queue handles *all* command responses, notifications, Rx data, etc. * sent from uCode to host driver). Unlike Tx, there is only one Rx * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1 * mapping between RBDs and RBs. * * Driver must allocate host DRAM memory for the following, and set the * physical address of each into device registers: * * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256 * entries (although any power of 2, up to 4096, is selectable by driver). * Each entry (1 dword) points to a receive buffer (RB) of consistent size * (typically 4K, although 8K or 16K are also selectable by driver). * Driver sets up RB size and number of RBDs in the CB via Rx config * register FH_MEM_RCSR_CHNL0_CONFIG_REG. * * Bit fields within one RBD: * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned * * Driver sets physical address [35:8] of base of RBD circular buffer * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0]. * * 2) Rx status buffer, 8 bytes, in which uCode indicates which Rx Buffers * (RBs) have been filled, via a "write pointer", actually the index of * the RB's corresponding RBD within the circular buffer. Driver sets * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0]. * * Bit fields in lower dword of Rx status buffer (upper dword not used * by driver: * 31-12: Not used by driver * 11- 0: Index of last filled Rx buffer descriptor * (device writes, driver reads this value) * * As the driver prepares Receive Buffers (RBs) for device to fill, driver must * enter pointers to these RBs into contiguous RBD circular buffer entries, * and update the device's "write" index register, * FH_RSCSR_CHNL0_RBDCB_WPTR_REG. * * This "write" index corresponds to the *next* RBD that the driver will make * available, i.e. one RBD past the tail of the ready-to-fill RBDs within * the circular buffer. This value should initially be 0 (before preparing any * RBs), should be 8 after preparing the first 8 RBs (for example), and must * wrap back to 0 at the end of the circular buffer (but don't wrap before * "read" index has advanced past 1! See below). * NOTE: DEVICE EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8. * * As the device fills RBs (referenced from contiguous RBDs within the circular * buffer), it updates the Rx status buffer in host DRAM, 2) described above, * to tell the driver the index of the latest filled RBD. The driver must * read this "read" index from DRAM after receiving an Rx interrupt from device * * The driver must also internally keep track of a third index, which is the * next RBD to process. When receiving an Rx interrupt, driver should process * all filled but unprocessed RBs up to, but not including, the RB * corresponding to the "read" index. For example, if "read" index becomes "1", * driver may process the RB pointed to by RBD 0. Depending on volume of * traffic, there may be many RBs to process. * * If read index == write index, device thinks there is no room to put new data. * Due to this, the maximum number of filled RBs is 255, instead of 256. To * be safe, make sure that there is a gap of at least 2 RBDs between "write" * and "read" indexes; that is, make sure that there are no more than 254 * buffers waiting to be filled. */ #define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0) #define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) #define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND) /** * Physical base address of 8-byte Rx Status buffer. * Bit fields: * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned. */ #define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0) /** * Physical base address of Rx Buffer Descriptor Circular Buffer. * Bit fields: * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned. */ #define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004) /** * Rx write pointer (index, really!). * Bit fields: * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1. * NOTE: For 256-entry circular buffer, use only bits [7:0]. */ #define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008) #define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG) #define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x00c) #define FH_RSCSR_CHNL0_RDPTR FW_RSCSR_CHNL0_RXDCB_RDPTR_REG /** * Rx Config/Status Registers (RCSR) * Rx Config Reg for channel 0 (only channel used) * * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for * normal operation (see bit fields). * * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA. * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing. * * Bit fields: * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame, * '10' operate normally * 29-24: reserved * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal), * min "5" for 32 RBDs, max "12" for 4096 RBDs. * 19-18: reserved * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K, * '10' 12K, '11' 16K. * 15-14: reserved * 13-12: IRQ destination; '00' none, '01' host driver (normal operation) * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec) * typical value 0x10 (about 1/2 msec) * 3- 0: reserved */ #define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) #define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0) #define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND) #define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0) #define FH_MEM_RCSR_CHNL0_RBDCB_WPTR (FH_MEM_RCSR_CHNL0 + 0x8) #define FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ (FH_MEM_RCSR_CHNL0 + 0x10) #define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */ #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */ #define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */ #define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */ #define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */ #define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/ #define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20) #define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4) #define RX_RB_TIMEOUT (0x11) #define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) #define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) #define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000) #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000) #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000) #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000) #define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000) #define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004) #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) /** * Rx Shared Status Registers (RSSR) * * After stopping Rx DMA channel (writing 0 to * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle. * * Bit fields: * 24: 1 = Channel 0 is idle * * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV * contain default values that should not be altered by the driver. */ #define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40) #define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) #define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND) #define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004) #define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\ (FH_MEM_RSSR_LOWER_BOUND + 0x008) #define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) #define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 #define FH_MEM_TB_MAX_LENGTH (0x00020000) /* 9000 rx series registers */ #define RFH_Q0_FRBDCB_BA_LSB 0xA08000 /* 64 bit address */ #define RFH_Q_FRBDCB_BA_LSB(q) (RFH_Q0_FRBDCB_BA_LSB + (q) * 8) /* Write index table */ #define RFH_Q0_FRBDCB_WIDX 0xA08080 #define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4) /* Write index table - shadow registers */ #define RFH_Q0_FRBDCB_WIDX_TRG 0x1C80 #define RFH_Q_FRBDCB_WIDX_TRG(q) (RFH_Q0_FRBDCB_WIDX_TRG + (q) * 4) /* Read index table */ #define RFH_Q0_FRBDCB_RIDX 0xA080C0 #define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4) /* Used list table */ #define RFH_Q0_URBDCB_BA_LSB 0xA08100 /* 64 bit address */ #define RFH_Q_URBDCB_BA_LSB(q) (RFH_Q0_URBDCB_BA_LSB + (q) * 8) /* Write index table */ #define RFH_Q0_URBDCB_WIDX 0xA08180 #define RFH_Q_URBDCB_WIDX(q) (RFH_Q0_URBDCB_WIDX + (q) * 4) #define RFH_Q0_URBDCB_VAID 0xA081C0 #define RFH_Q_URBDCB_VAID(q) (RFH_Q0_URBDCB_VAID + (q) * 4) /* stts */ #define RFH_Q0_URBD_STTS_WPTR_LSB 0xA08200 /*64 bits address */ #define RFH_Q_URBD_STTS_WPTR_LSB(q) (RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8) #define RFH_Q0_ORB_WPTR_LSB 0xA08280 #define RFH_Q_ORB_WPTR_LSB(q) (RFH_Q0_ORB_WPTR_LSB + (q) * 8) #define RFH_RBDBUF_RBD0_LSB 0xA08300 #define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8) /** * RFH Status Register * * Bit fields: * * Bit 29: RBD_FETCH_IDLE * This status flag is set by the RFH when there is no active RBD fetch from * DRAM. * Once the RFH RBD controller starts fetching (or when there is a pending * RBD read response from DRAM), this flag is immediately turned off. * * Bit 30: SRAM_DMA_IDLE * This status flag is set by the RFH when there is no active transaction from * SRAM to DRAM. * Once the SRAM to DRAM DMA is active, this flag is immediately turned off. * * Bit 31: RXF_DMA_IDLE * This status flag is set by the RFH when there is no active transaction from * RXF to DRAM. * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off. */ #define RFH_GEN_STATUS 0xA09808 #define RFH_GEN_STATUS_GEN3 0xA07824 #define RBD_FETCH_IDLE BIT(29) #define SRAM_DMA_IDLE BIT(30) #define RXF_DMA_IDLE BIT(31) /* DMA configuration */ #define RFH_RXF_DMA_CFG 0xA09820 #define RFH_RXF_DMA_CFG_GEN3 0xA07880 /* RB size */ #define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */ #define RFH_RXF_DMA_RB_SIZE_POS 16 #define RFH_RXF_DMA_RB_SIZE_1K (0x1 << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_2K (0x2 << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_4K (0x4 << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_8K (0x8 << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_12K (0x9 << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_16K (0xA << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_20K (0xB << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_24K (0xC << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_28K (0xD << RFH_RXF_DMA_RB_SIZE_POS) #define RFH_RXF_DMA_RB_SIZE_32K (0xE << RFH_RXF_DMA_RB_SIZE_POS) /* RB Circular Buffer size:defines the table sizes in RBD units */ #define RFH_RXF_DMA_RBDCB_SIZE_MASK (0x00F00000) /* bits 20-23 */ #define RFH_RXF_DMA_RBDCB_SIZE_POS 20 #define RFH_RXF_DMA_RBDCB_SIZE_8 (0x3 << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_16 (0x4 << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_32 (0x5 << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_64 (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_128 (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_256 (0x8 << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_512 (0x9 << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_1024 (0xA << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_2048 (0xB << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */ #define RFH_RXF_DMA_MIN_RB_SIZE_POS 24 #define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS) #define RFH_RXF_DMA_DROP_TOO_LARGE_MASK (0x04000000) /* bit 26 */ #define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */ #define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/ #define RFH_DMA_EN_ENABLE_VAL BIT(31) #define RFH_RXF_RXQ_ACTIVE 0xA0980C #define RFH_GEN_CFG 0xA09800 #define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0) #define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1) #define RFH_GEN_CFG_RB_CHUNK_SIZE BIT(4) #define RFH_GEN_CFG_RB_CHUNK_SIZE_128 1 #define RFH_GEN_CFG_RB_CHUNK_SIZE_64 0 /* the driver assumes everywhere that the default RXQ is 0 */ #define RFH_GEN_CFG_DEFAULT_RXQ_NUM 0xF00 #define RFH_GEN_CFG_VAL(_n, _v) FIELD_PREP(RFH_GEN_CFG_ ## _n, _v) /* end of 9000 rx series registers */ /* TFDB Area - TFDs buffer table */ #define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) #define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900) #define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958) #define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl)) #define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4) /** * Transmit DMA Channel Control/Status Registers (TCSR) * * Device has one configuration register for each of 8 Tx DMA/FIFO channels * supported in hardware (don't confuse these with the 16 Tx queues in DRAM, * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes. * * To use a Tx DMA channel, driver must initialize its * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with: * * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL * * All other bits should be 0. * * Bit fields: * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame, * '10' operate normally * 29- 4: Reserved, set to "0" * 3: Enable internal DMA requests (1, normal operation), disable (0) * 2- 0: Reserved, set to "0" */ #define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) #define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60) /* Find Control/Status reg for given Tx DMA/FIFO channel */ #define FH_TCSR_CHNL_NUM (8) /* TCSR: tx_config register values */ #define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \ (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl)) #define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \ (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4) #define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \ (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8) #define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) #define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001) #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000) #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008) #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000) #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000) #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000) #define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000) #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000) #define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) #define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000) #define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000) #define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003) #define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) #define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) /** * Tx Shared Status Registers (TSSR) * * After stopping Tx DMA channel (writing 0 to * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle * (channel's buffers empty | no pending requests). * * Bit fields: * 31-24: 1 = Channel buffers empty (channel 7:0) * 23-16: 1 = No pending requests (channel 7:0) */ #define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0) #define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0) #define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010) /** * Bit fields for TSSR(Tx Shared Status & Control) error status register: * 31: Indicates an address error when accessed to internal memory * uCode/driver must write "1" in order to clear this flag * 30: Indicates that Host did not send the expected number of dwords to FH * uCode/driver must write "1" in order to clear this flag * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA * command was received from the scheduler while the TRB was already full * with previous command * uCode/driver must write "1" in order to clear this flag * 7-0: Each status bit indicates a channel's TxCredit error. When an error * bit is set, it indicates that the FH has received a full indication * from the RTC TxFIFO and the current value of the TxCredit counter was * not equal to zero. This mean that the credit mechanism was not * synchronized to the TxFIFO status * uCode/driver must write "1" in order to clear this flag */ #define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018) #define FH_TSSR_TX_MSG_CONFIG_REG (FH_TSSR_LOWER_BOUND + 0x008) #define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16) /* Tx service channels */ #define FH_SRVC_CHNL (9) #define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8) #define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) #define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \ (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) #define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98) #define FH_TX_TRB_REG(_chan) (FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4) /* Instruct FH to increment the retry count of a packet when * it is brought from the memory to TX-FIFO */ #define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) #define RX_POOL_SIZE(rbds) ((rbds) - 1 + \ IWL_MAX_RX_HW_QUEUES * \ (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC)) /* cb size is the exponent */ #define RX_QUEUE_CB_SIZE(x) ilog2(x) #define RX_QUEUE_SIZE 256 #define RX_QUEUE_MASK 255 #define RX_QUEUE_SIZE_LOG 8 /** * struct iwl_rb_status - reserve buffer status * host memory mapped FH registers * @closed_rb_num [0:11] - Indicates the index of the RB which was closed * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed * @finished_rb_num [0:11] - Indicates the index of the current RB * in which the last frame was written to * @finished_fr_num [0:11] - Indicates the index of the RX Frame * which was transferred */ struct iwl_rb_status { __le16 closed_rb_num; __le16 closed_fr_num; __le16 finished_rb_num; __le16 finished_fr_nam; __le32 __spare; } __packed; #define TFD_QUEUE_SIZE_MAX (256) #define TFD_QUEUE_SIZE_MAX_GEN3 (65536) /* cb size is the exponent - 3 */ #define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3) #define TFD_QUEUE_SIZE_BC_DUP (64) #define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) #define TFD_QUEUE_BC_SIZE_GEN3_AX210 1024 #define TFD_QUEUE_BC_SIZE_GEN3_BZ (1024 * 4) #define IWL_TX_DMA_MASK DMA_BIT_MASK(36) #define IWL_NUM_OF_TBS 20 #define IWL_TFH_NUM_TBS 25 /* IMR DMA registers */ #define IMR_TFH_SRV_DMA_CHNL0_CTRL 0x00a0a51c #define IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR 0x00a0a520 #define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB 0x00a0a524 #define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB 0x00a0a528 #define IMR_TFH_SRV_DMA_CHNL0_BC 0x00a0a52c #define TFH_SRV_DMA_CHNL0_LEFT_BC 0x00a0a530 /* RFH S2D DMA registers */ #define IMR_RFH_GEN_CFG_SERVICE_DMA_RS_MSK 0x0000000c #define IMR_RFH_GEN_CFG_SERVICE_DMA_SNOOP_MSK 0x00000002 /* TFH D2S DMA registers */ #define IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK 0x80000000 #define IMR_UREG_CHICK 0x00d05c00 #define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS 0x00800000 #define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK 0x00000030 #define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS 0x80000000 static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr) { return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF; } /** * enum iwl_tfd_tb_hi_n_len - TB hi_n_len bits * @TB_HI_N_LEN_ADDR_HI_MSK: high 4 bits (to make it 36) of DMA address * @TB_HI_N_LEN_LEN_MSK: length of the TB */ enum iwl_tfd_tb_hi_n_len { TB_HI_N_LEN_ADDR_HI_MSK = 0xf, TB_HI_N_LEN_LEN_MSK = 0xfff0, }; /** * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor * * This structure contains dma address and length of transmission address * * @lo: low [31:0] portion of the dma address of TX buffer * every even is unaligned on 16 bit boundary * @hi_n_len: &enum iwl_tfd_tb_hi_n_len */ struct iwl_tfd_tb { __le32 lo; __le16 hi_n_len; } __packed; /** * struct iwl_tfh_tb transmit buffer descriptor within transmit frame descriptor * * This structure contains dma address and length of transmission address * * @tb_len length of the tx buffer * @addr 64 bits dma address */ struct iwl_tfh_tb { __le16 tb_len; __le64 addr; } __packed; /** * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM. * Both driver and device share these circular buffers, each of which must be * contiguous 256 TFDs. * For pre 22000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes * For 22000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes * * Driver must indicate the physical address of the base of each * circular buffer via the FH_MEM_CBBC_QUEUE registers. * * Each TFD contains pointer/size information for up to 20 / 25 data buffers * in host DRAM. These buffers collectively contain the (one) frame described * by the TFD. Each buffer must be a single contiguous block of memory within * itself, but buffers may be scattered in host DRAM. Each buffer has max size * of (4K - 4). The concatenates all of a TFD's buffers into a single * Tx frame, up to 8 KBytes in size. * * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. */ /** * struct iwl_tfd - Transmit Frame Descriptor (TFD) * @ __reserved1[3] reserved * @ num_tbs 0-4 number of active tbs * 5 reserved * 6-7 padding (not used) * @ tbs[20] transmit frame buffer descriptors * @ __pad padding */ struct iwl_tfd { u8 __reserved1[3]; u8 num_tbs; struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS]; __le32 __pad; } __packed; /** * struct iwl_tfh_tfd - Transmit Frame Descriptor (TFD) * @ num_tbs 0-4 number of active tbs * 5 -15 reserved * @ tbs[25] transmit frame buffer descriptors * @ __pad padding */ struct iwl_tfh_tfd { __le16 num_tbs; struct iwl_tfh_tb tbs[IWL_TFH_NUM_TBS]; __le32 __pad; } __packed; /* Keep Warm Size */ #define IWL_KW_SIZE 0x1000 /* 4k */ /* Fixed (non-configurable) rx data from phy */ /** * struct iwlagn_schedq_bc_tbl scheduler byte count table * base physical address provided by SCD_DRAM_BASE_ADDR * For devices up to 22000: * @tfd_offset 0-12 - tx command byte count * 12-16 - station index * For 22000: * @tfd_offset 0-12 - tx command byte count * 12-13 - number of 64 byte chunks * 14-16 - reserved */ struct iwlagn_scd_bc_tbl { __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; } __packed; /** * struct iwl_gen3_bc_tbl_entry scheduler byte count table entry gen3 * For AX210 and on: * @tfd_offset: 0-12 - tx command byte count * 12-13 - number of 64 byte chunks * 14-16 - reserved */ struct iwl_gen3_bc_tbl_entry { __le16 tfd_offset; } __packed; #endif /* !__iwl_fh_h__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-io.c b/sys/contrib/dev/iwlwifi/iwl-io.c index 396f2c997da6..c60f9466c5fd 100644 --- a/sys/contrib/dev/iwlwifi/iwl-io.c +++ b/sys/contrib/dev/iwlwifi/iwl-io.c @@ -1,528 +1,530 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2021 Intel Corporation + * Copyright (C) 2003-2014, 2018-2022 Intel Corporation * Copyright (C) 2015-2016 Intel Deutschland GmbH */ #include #include #include #include "iwl-drv.h" #include "iwl-io.h" #include "iwl-csr.h" #include "iwl-debug.h" #include "iwl-prph.h" #include "iwl-fh.h" void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val) { trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val); iwl_trans_write8(trans, ofs, val); } IWL_EXPORT_SYMBOL(iwl_write8); void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val) { trace_iwlwifi_dev_iowrite32(trans->dev, ofs, val); iwl_trans_write32(trans, ofs, val); } IWL_EXPORT_SYMBOL(iwl_write32); void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val) { trace_iwlwifi_dev_iowrite64(trans->dev, ofs, val); iwl_trans_write32(trans, ofs, lower_32_bits(val)); iwl_trans_write32(trans, ofs + 4, upper_32_bits(val)); } IWL_EXPORT_SYMBOL(iwl_write64); u32 iwl_read32(struct iwl_trans *trans, u32 ofs) { u32 val = iwl_trans_read32(trans, ofs); trace_iwlwifi_dev_ioread32(trans->dev, ofs, val); return val; } IWL_EXPORT_SYMBOL(iwl_read32); #define IWL_POLL_INTERVAL 10 /* microseconds */ int iwl_poll_bit(struct iwl_trans *trans, u32 addr, u32 bits, u32 mask, int timeout) { int t = 0; do { if ((iwl_read32(trans, addr) & mask) == (bits & mask)) return t; udelay(IWL_POLL_INTERVAL); t += IWL_POLL_INTERVAL; } while (t < timeout); return -ETIMEDOUT; } IWL_EXPORT_SYMBOL(iwl_poll_bit); u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg) { if (iwl_trans_grab_nic_access(trans)) { u32 value = iwl_read32(trans, reg); iwl_trans_release_nic_access(trans); return value; } + /* return as if we have a HW timeout/failure */ return 0x5a5a5a5a; } IWL_EXPORT_SYMBOL(iwl_read_direct32); void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value) { if (iwl_trans_grab_nic_access(trans)) { iwl_write32(trans, reg, value); iwl_trans_release_nic_access(trans); } } IWL_EXPORT_SYMBOL(iwl_write_direct32); void iwl_write_direct64(struct iwl_trans *trans, u64 reg, u64 value) { if (iwl_trans_grab_nic_access(trans)) { iwl_write64(trans, reg, value); iwl_trans_release_nic_access(trans); } } IWL_EXPORT_SYMBOL(iwl_write_direct64); int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, int timeout) { int t = 0; do { if ((iwl_read_direct32(trans, addr) & mask) == mask) return t; udelay(IWL_POLL_INTERVAL); t += IWL_POLL_INTERVAL; } while (t < timeout); return -ETIMEDOUT; } IWL_EXPORT_SYMBOL(iwl_poll_direct_bit); u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs) { u32 val = iwl_trans_read_prph(trans, ofs); trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val); return val; } IWL_EXPORT_SYMBOL(iwl_read_prph_no_grab); void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val) { trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val); iwl_trans_write_prph(trans, ofs, val); } IWL_EXPORT_SYMBOL(iwl_write_prph_no_grab); void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val) { trace_iwlwifi_dev_iowrite_prph64(trans->dev, ofs, val); iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff); iwl_write_prph_no_grab(trans, ofs + 4, val >> 32); } IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab); u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs) { if (iwl_trans_grab_nic_access(trans)) { u32 val = iwl_read_prph_no_grab(trans, ofs); iwl_trans_release_nic_access(trans); return val; } + /* return as if we have a HW timeout/failure */ return 0x5a5a5a5a; } IWL_EXPORT_SYMBOL(iwl_read_prph); void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs, u32 val, u32 delay_ms) { if (iwl_trans_grab_nic_access(trans)) { mdelay(delay_ms); iwl_write_prph_no_grab(trans, ofs, val); iwl_trans_release_nic_access(trans); } } IWL_EXPORT_SYMBOL(iwl_write_prph_delay); int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr, u32 bits, u32 mask, int timeout) { int t = 0; do { if ((iwl_read_prph(trans, addr) & mask) == (bits & mask)) return t; udelay(IWL_POLL_INTERVAL); t += IWL_POLL_INTERVAL; } while (t < timeout); return -ETIMEDOUT; } void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) { if (iwl_trans_grab_nic_access(trans)) { iwl_write_prph_no_grab(trans, ofs, iwl_read_prph_no_grab(trans, ofs) | mask); iwl_trans_release_nic_access(trans); } } IWL_EXPORT_SYMBOL(iwl_set_bits_prph); void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, u32 bits, u32 mask) { if (iwl_trans_grab_nic_access(trans)) { iwl_write_prph_no_grab(trans, ofs, (iwl_read_prph_no_grab(trans, ofs) & mask) | bits); iwl_trans_release_nic_access(trans); } } IWL_EXPORT_SYMBOL(iwl_set_bits_mask_prph); void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) { u32 val; if (iwl_trans_grab_nic_access(trans)) { val = iwl_read_prph_no_grab(trans, ofs); iwl_write_prph_no_grab(trans, ofs, (val & ~mask)); iwl_trans_release_nic_access(trans); } } IWL_EXPORT_SYMBOL(iwl_clear_bits_prph); void iwl_force_nmi(struct iwl_trans *trans) { if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) iwl_write_prph_delay(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL_DRV, 1); else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER); else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, UREG_DOORBELL_TO_ISR6_NMI_BIT); else iwl_write32(trans, CSR_DOORBELL_VECTOR, UREG_DOORBELL_TO_ISR6_NMI_BIT); } IWL_EXPORT_SYMBOL(iwl_force_nmi); static const char *get_rfh_string(int cmd) { #define IWL_CMD(x) case x: return #x #define IWL_CMD_MQ(arg, reg, q) { if (arg == reg(q)) return #reg; } int i; for (i = 0; i < IWL_MAX_RX_HW_QUEUES; i++) { IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_BA_LSB, i); IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_WIDX, i); IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_RIDX, i); IWL_CMD_MQ(cmd, RFH_Q_URBD_STTS_WPTR_LSB, i); } switch (cmd) { IWL_CMD(RFH_RXF_DMA_CFG); IWL_CMD(RFH_GEN_CFG); IWL_CMD(RFH_GEN_STATUS); IWL_CMD(FH_TSSR_TX_STATUS_REG); IWL_CMD(FH_TSSR_TX_ERROR_REG); default: return "UNKNOWN"; } #undef IWL_CMD_MQ } struct reg { u32 addr; bool is64; }; static int iwl_dump_rfh(struct iwl_trans *trans, char **buf) { int i, q; int num_q = trans->num_rx_queues; static const u32 rfh_tbl[] = { RFH_RXF_DMA_CFG, RFH_GEN_CFG, RFH_GEN_STATUS, FH_TSSR_TX_STATUS_REG, FH_TSSR_TX_ERROR_REG, }; static const struct reg rfh_mq_tbl[] = { { RFH_Q0_FRBDCB_BA_LSB, true }, { RFH_Q0_FRBDCB_WIDX, false }, { RFH_Q0_FRBDCB_RIDX, false }, { RFH_Q0_URBD_STTS_WPTR_LSB, true }, }; #ifdef CONFIG_IWLWIFI_DEBUGFS if (buf) { int pos = 0; /* * Register (up to 34 for name + 8 blank/q for MQ): 40 chars * Colon + space: 2 characters * 0X%08x: 10 characters * New line: 1 character * Total of 53 characters */ size_t bufsz = ARRAY_SIZE(rfh_tbl) * 53 + ARRAY_SIZE(rfh_mq_tbl) * 53 * num_q + 40; *buf = kmalloc(bufsz, GFP_KERNEL); if (!*buf) return -ENOMEM; pos += scnprintf(*buf + pos, bufsz - pos, "RFH register values:\n"); for (i = 0; i < ARRAY_SIZE(rfh_tbl); i++) pos += scnprintf(*buf + pos, bufsz - pos, "%40s: 0X%08x\n", get_rfh_string(rfh_tbl[i]), iwl_read_prph(trans, rfh_tbl[i])); for (i = 0; i < ARRAY_SIZE(rfh_mq_tbl); i++) for (q = 0; q < num_q; q++) { u32 addr = rfh_mq_tbl[i].addr; addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4); pos += scnprintf(*buf + pos, bufsz - pos, "%34s(q %2d): 0X%08x\n", get_rfh_string(addr), q, iwl_read_prph(trans, addr)); } return pos; } #endif IWL_ERR(trans, "RFH register values:\n"); for (i = 0; i < ARRAY_SIZE(rfh_tbl); i++) IWL_ERR(trans, " %34s: 0X%08x\n", get_rfh_string(rfh_tbl[i]), iwl_read_prph(trans, rfh_tbl[i])); for (i = 0; i < ARRAY_SIZE(rfh_mq_tbl); i++) for (q = 0; q < num_q; q++) { u32 addr = rfh_mq_tbl[i].addr; addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4); IWL_ERR(trans, " %34s(q %d): 0X%08x\n", get_rfh_string(addr), q, iwl_read_prph(trans, addr)); } return 0; } static const char *get_fh_string(int cmd) { switch (cmd) { IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); IWL_CMD(FH_RSCSR_CHNL0_WPTR); IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); IWL_CMD(FH_TSSR_TX_STATUS_REG); IWL_CMD(FH_TSSR_TX_ERROR_REG); default: return "UNKNOWN"; } #undef IWL_CMD } int iwl_dump_fh(struct iwl_trans *trans, char **buf) { int i; static const u32 fh_tbl[] = { FH_RSCSR_CHNL0_STTS_WPTR_REG, FH_RSCSR_CHNL0_RBDCB_BASE_REG, FH_RSCSR_CHNL0_WPTR, FH_MEM_RCSR_CHNL0_CONFIG_REG, FH_MEM_RSSR_SHARED_CTRL_REG, FH_MEM_RSSR_RX_STATUS_REG, FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, FH_TSSR_TX_STATUS_REG, FH_TSSR_TX_ERROR_REG }; if (trans->trans_cfg->mq_rx_supported) return iwl_dump_rfh(trans, buf); #ifdef CONFIG_IWLWIFI_DEBUGFS if (buf) { int pos = 0; size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; *buf = kmalloc(bufsz, GFP_KERNEL); if (!*buf) return -ENOMEM; pos += scnprintf(*buf + pos, bufsz - pos, "FH register values:\n"); for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) pos += scnprintf(*buf + pos, bufsz - pos, " %34s: 0X%08x\n", get_fh_string(fh_tbl[i]), iwl_read_direct32(trans, fh_tbl[i])); return pos; } #endif IWL_ERR(trans, "FH register values:\n"); for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) IWL_ERR(trans, " %34s: 0X%08x\n", get_fh_string(fh_tbl[i]), iwl_read_direct32(trans, fh_tbl[i])); return 0; } #define IWL_HOST_MON_BLOCK_PEMON 0x00 #define IWL_HOST_MON_BLOCK_HIPM 0x22 #define IWL_HOST_MON_BLOCK_PEMON_VEC0 0x00 #define IWL_HOST_MON_BLOCK_PEMON_VEC1 0x01 #define IWL_HOST_MON_BLOCK_PEMON_WFPM 0x06 static void iwl_dump_host_monitor_block(struct iwl_trans *trans, u32 block, u32 vec, u32 iter) { int i; IWL_ERR(trans, "Host monitor block 0x%x vector 0x%x\n", block, vec); iwl_write32(trans, CSR_MONITOR_CFG_REG, (block << 8) | vec); for (i = 0; i < iter; i++) IWL_ERR(trans, " value [iter %d]: 0x%08x\n", i, iwl_read32(trans, CSR_MONITOR_STATUS_REG)); } static void iwl_dump_host_monitor(struct iwl_trans *trans) { switch (trans->trans_cfg->device_family) { case IWL_DEVICE_FAMILY_22000: case IWL_DEVICE_FAMILY_AX210: IWL_ERR(trans, "CSR_RESET = 0x%x\n", iwl_read32(trans, CSR_RESET)); iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON, IWL_HOST_MON_BLOCK_PEMON_VEC0, 15); iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON, IWL_HOST_MON_BLOCK_PEMON_VEC1, 15); iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON, IWL_HOST_MON_BLOCK_PEMON_WFPM, 15); iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_HIPM, IWL_HOST_MON_BLOCK_PEMON_VEC0, 1); break; default: /* not supported yet */ return; } } int iwl_finish_nic_init(struct iwl_trans *trans) { const struct iwl_cfg_trans_params *cfg_trans = trans->trans_cfg; u32 poll_ready; int err; if (cfg_trans->bisr_workaround) { /* ensure the TOP FSM isn't still in previous reset */ mdelay(2); } /* * Set "initialization complete" bit to move adapter from * D0U* --> D0A* (powered-up active) state. */ if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_BZ) { iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | CSR_GP_CNTRL_REG_FLAG_MAC_INIT); poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; } else { iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY; } if (cfg_trans->device_family == IWL_DEVICE_FAMILY_8000) udelay(2); /* * Wait for clock stabilization; once stabilized, access to * device-internal resources is supported, e.g. iwl_write_prph() * and accesses to uCode SRAM. */ err = iwl_poll_bit(trans, CSR_GP_CNTRL, poll_ready, poll_ready, 25000); if (err < 0) { IWL_DEBUG_INFO(trans, "Failed to wake NIC\n"); iwl_dump_host_monitor(trans); } if (cfg_trans->bisr_workaround) { /* ensure BISR shift has finished */ udelay(200); } return err < 0 ? err : 0; } IWL_EXPORT_SYMBOL(iwl_finish_nic_init); void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr, u32 sw_err_bit) { unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT; bool interrupts_enabled = test_bit(STATUS_INT_ENABLED, &trans->status); /* if the interrupts were already disabled, there is no point in * calling iwl_disable_interrupts */ if (interrupts_enabled) iwl_trans_interrupts(trans, false); iwl_force_nmi(trans); while (time_after(timeout, jiffies)) { u32 inta_hw = iwl_read32(trans, inta_addr); /* Error detected by uCode */ if (inta_hw & sw_err_bit) { /* Clear causes register */ iwl_write32(trans, inta_addr, inta_hw & sw_err_bit); break; } mdelay(1); } /* enable interrupts only if there were already enabled before this * function to avoid a case were the driver enable interrupts before * proper configurations were made */ if (interrupts_enabled) iwl_trans_interrupts(trans, true); iwl_trans_fw_error(trans, false); } diff --git a/sys/contrib/dev/iwlwifi/iwl-modparams.h b/sys/contrib/dev/iwlwifi/iwl-modparams.h index cb2b2e81b28c..9d8c018c725e 100644 --- a/sys/contrib/dev/iwlwifi/iwl-modparams.h +++ b/sys/contrib/dev/iwlwifi/iwl-modparams.h @@ -1,115 +1,116 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2005-2014, 2018-2022 Intel Corporation */ #ifndef __iwl_modparams_h__ #define __iwl_modparams_h__ #include #include #include #ifdef CONFIG_IWLWIFI_DEBUG #include "iwl-debug.h" #endif extern struct iwl_mod_params iwlwifi_mod_params; enum iwl_power_level { IWL_POWER_INDEX_1, IWL_POWER_INDEX_2, IWL_POWER_INDEX_3, IWL_POWER_INDEX_4, IWL_POWER_INDEX_5, IWL_POWER_NUM }; enum iwl_disable_11n { IWL_DISABLE_HT_ALL = BIT(0), IWL_DISABLE_HT_TXAGG = BIT(1), IWL_DISABLE_HT_RXAGG = BIT(2), IWL_ENABLE_HT_TXAGG = BIT(3), }; enum iwl_amsdu_size { IWL_AMSDU_DEF = 0, IWL_AMSDU_4K = 1, IWL_AMSDU_8K = 2, IWL_AMSDU_12K = 3, /* Add 2K at the end to avoid breaking current API */ IWL_AMSDU_2K = 4, }; enum iwl_uapsd_disable { IWL_DISABLE_UAPSD_BSS = BIT(0), IWL_DISABLE_UAPSD_P2P_CLIENT = BIT(1), }; /** * struct iwl_mod_params * * Holds the module parameters * * @swcrypto: using hardware encryption, default = 0 * @disable_11n: disable 11n capabilities, default = 0, * use IWL_[DIS,EN]ABLE_HT_* constants * @amsdu_size: See &enum iwl_amsdu_size. * @fw_restart: restart firmware, default = 1 * @bt_coex_active: enable bt coex, default = true * @led_mode: system default, default = 0 * @power_save: enable power save, default = false * @power_level: power level, default = 1 * @debug_level: levels are IWL_DL_* * @nvm_file: specifies a external NVM file * @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default = * IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT * @disable_11ac: disable VHT capabilities, default = false. * @remove_when_gone: remove an inaccessible device from the PCIe bus. * @enable_ini: enable new FW debug infratructure (INI TLVs) + * @disable_11be: disable EHT capabilities, default = false. */ struct iwl_mod_params { int swcrypto; unsigned int disable_11n; int amsdu_size; bool fw_restart; bool bt_coex_active; int led_mode; bool power_save; int power_level; #ifdef CONFIG_IWLWIFI_DEBUG #if defined(__linux__) u32 debug_level; #elif defined(__FreeBSD__) enum iwl_dl debug_level; #endif #endif char *nvm_file; u32 uapsd_disable; bool disable_11ac; /** * @disable_11ax: disable HE capabilities, default = false */ bool disable_11ax; bool remove_when_gone; u32 enable_ini; bool disable_11be; }; static inline bool iwl_enable_rx_ampdu(void) { if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) return false; return true; } static inline bool iwl_enable_tx_ampdu(void) { if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) return false; if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) return true; /* enabled by default */ return true; } #endif /* #__iwl_modparams_h__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-nvm-parse.c b/sys/contrib/dev/iwlwifi/iwl-nvm-parse.c index fdb823e55792..d20a6019dbca 100644 --- a/sys/contrib/dev/iwlwifi/iwl-nvm-parse.c +++ b/sys/contrib/dev/iwlwifi/iwl-nvm-parse.c @@ -1,1826 +1,2083 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include #include #include #include "iwl-drv.h" #include "iwl-modparams.h" #include "iwl-nvm-parse.h" #include "iwl-prph.h" #include "iwl-io.h" #include "iwl-csr.h" #include "fw/acpi.h" #include "fw/api/nvm-reg.h" #include "fw/api/commands.h" #include "fw/api/cmdhdr.h" #include "fw/img.h" #include "mei/iwl-mei.h" /* NVM offsets (in words) definitions */ enum nvm_offsets { /* NVM HW-Section offset (in words) definitions */ SUBSYSTEM_ID = 0x0A, HW_ADDR = 0x15, /* NVM SW-Section offset (in words) definitions */ NVM_SW_SECTION = 0x1C0, NVM_VERSION = 0, RADIO_CFG = 1, SKU = 2, N_HW_ADDRS = 3, NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION, /* NVM calibration section offset (in words) definitions */ NVM_CALIB_SECTION = 0x2B8, XTAL_CALIB = 0x316 - NVM_CALIB_SECTION, /* NVM REGULATORY -Section offset (in words) definitions */ NVM_CHANNELS_SDP = 0, }; enum ext_nvm_offsets { /* NVM HW-Section offset (in words) definitions */ MAC_ADDRESS_OVERRIDE_EXT_NVM = 1, /* NVM SW-Section offset (in words) definitions */ NVM_VERSION_EXT_NVM = 0, N_HW_ADDRS_FAMILY_8000 = 3, /* NVM PHY_SKU-Section offset (in words) definitions */ RADIO_CFG_FAMILY_EXT_NVM = 0, SKU_FAMILY_8000 = 2, /* NVM REGULATORY -Section offset (in words) definitions */ NVM_CHANNELS_EXTENDED = 0, NVM_LAR_OFFSET_OLD = 0x4C7, NVM_LAR_OFFSET = 0x507, NVM_LAR_ENABLED = 0x7, }; /* SKU Capabilities (actual values from NVM definition) */ enum nvm_sku_bits { NVM_SKU_CAP_BAND_24GHZ = BIT(0), NVM_SKU_CAP_BAND_52GHZ = BIT(1), NVM_SKU_CAP_11N_ENABLE = BIT(2), NVM_SKU_CAP_11AC_ENABLE = BIT(3), NVM_SKU_CAP_MIMO_DISABLE = BIT(5), }; /* * These are the channel numbers in the order that they are stored in the NVM */ static const u16 iwl_nvm_channels[] = { /* 2.4 GHz */ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 5 GHz */ - 36, 40, 44 , 48, 52, 56, 60, 64, + 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165 }; static const u16 iwl_ext_nvm_channels[] = { /* 2.4 GHz */ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 5 GHz */ 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173, 177, 181 }; static const u16 iwl_uhb_nvm_channels[] = { /* 2.4 GHz */ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 5 GHz */ 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165, 169, 173, 177, 181, /* 6-7 GHz */ 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69, 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185, 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233 }; #define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) #define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) #define IWL_NVM_NUM_CHANNELS_UHB ARRAY_SIZE(iwl_uhb_nvm_channels) #define NUM_2GHZ_CHANNELS 14 #define NUM_5GHZ_CHANNELS 37 #define FIRST_2GHZ_HT_MINUS 5 #define LAST_2GHZ_HT_PLUS 9 #define N_HW_ADDR_MASK 0xF /* rate data (static) */ static struct ieee80211_rate iwl_cfg80211_rates[] = { { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3, .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, }, { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, }, { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, }, { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, }, { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, }, { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, }, { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, }, { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, }, }; #define RATES_24_OFFS 0 #define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates) #define RATES_52_OFFS 4 #define N_RATES_52 (N_RATES_24 - RATES_52_OFFS) /** * enum iwl_nvm_channel_flags - channel flags in NVM * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo * @NVM_CHANNEL_IBSS: usable as an IBSS channel * @NVM_CHANNEL_ACTIVE: active scanning allowed * @NVM_CHANNEL_RADAR: radar detection required * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS * on same channel on 2.4 or same UNII band on 5.2 * @NVM_CHANNEL_UNIFORM: uniform spreading required * @NVM_CHANNEL_20MHZ: 20 MHz channel okay * @NVM_CHANNEL_40MHZ: 40 MHz channel okay * @NVM_CHANNEL_80MHZ: 80 MHz channel okay * @NVM_CHANNEL_160MHZ: 160 MHz channel okay * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?) */ enum iwl_nvm_channel_flags { NVM_CHANNEL_VALID = BIT(0), NVM_CHANNEL_IBSS = BIT(1), NVM_CHANNEL_ACTIVE = BIT(3), NVM_CHANNEL_RADAR = BIT(4), NVM_CHANNEL_INDOOR_ONLY = BIT(5), NVM_CHANNEL_GO_CONCURRENT = BIT(6), NVM_CHANNEL_UNIFORM = BIT(7), NVM_CHANNEL_20MHZ = BIT(8), NVM_CHANNEL_40MHZ = BIT(9), NVM_CHANNEL_80MHZ = BIT(10), NVM_CHANNEL_160MHZ = BIT(11), NVM_CHANNEL_DC_HIGH = BIT(12), }; /** - * enum iwl_reg_capa_flags - global flags applied for the whole regulatory + * enum iwl_reg_capa_flags_v1 - global flags applied for the whole regulatory * domain. - * @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the + * @REG_CAPA_V1_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the * 2.4Ghz band is allowed. - * @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the + * @REG_CAPA_V1_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the * 5Ghz band is allowed. - * @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed + * @REG_CAPA_V1_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed * for this regulatory domain (valid only in 5Ghz). - * @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed + * @REG_CAPA_V1_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed * for this regulatory domain (valid only in 5Ghz). - * @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed. - * @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed. - * @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden + * @REG_CAPA_V1_MCS_8_ALLOWED: 11ac with MCS 8 is allowed. + * @REG_CAPA_V1_MCS_9_ALLOWED: 11ac with MCS 9 is allowed. + * @REG_CAPA_V1_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden * for this regulatory domain (valid only in 5Ghz). - * @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed. - * @REG_CAPA_11AX_DISABLED: 11ax is forbidden for this regulatory domain. + * @REG_CAPA_V1_DC_HIGH_ENABLED: DC HIGH allowed. + * @REG_CAPA_V1_11AX_DISABLED: 11ax is forbidden for this regulatory domain. */ -enum iwl_reg_capa_flags { - REG_CAPA_BF_CCD_LOW_BAND = BIT(0), - REG_CAPA_BF_CCD_HIGH_BAND = BIT(1), - REG_CAPA_160MHZ_ALLOWED = BIT(2), - REG_CAPA_80MHZ_ALLOWED = BIT(3), - REG_CAPA_MCS_8_ALLOWED = BIT(4), - REG_CAPA_MCS_9_ALLOWED = BIT(5), - REG_CAPA_40MHZ_FORBIDDEN = BIT(7), - REG_CAPA_DC_HIGH_ENABLED = BIT(9), - REG_CAPA_11AX_DISABLED = BIT(10), -}; +enum iwl_reg_capa_flags_v1 { + REG_CAPA_V1_BF_CCD_LOW_BAND = BIT(0), + REG_CAPA_V1_BF_CCD_HIGH_BAND = BIT(1), + REG_CAPA_V1_160MHZ_ALLOWED = BIT(2), + REG_CAPA_V1_80MHZ_ALLOWED = BIT(3), + REG_CAPA_V1_MCS_8_ALLOWED = BIT(4), + REG_CAPA_V1_MCS_9_ALLOWED = BIT(5), + REG_CAPA_V1_40MHZ_FORBIDDEN = BIT(7), + REG_CAPA_V1_DC_HIGH_ENABLED = BIT(9), + REG_CAPA_V1_11AX_DISABLED = BIT(10), +}; /* GEO_CHANNEL_CAPABILITIES_API_S_VER_1 */ /** * enum iwl_reg_capa_flags_v2 - global flags applied for the whole regulatory * domain (version 2). * @REG_CAPA_V2_STRADDLE_DISABLED: Straddle channels (144, 142, 138) are * disabled. * @REG_CAPA_V2_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the * 2.4Ghz band is allowed. * @REG_CAPA_V2_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the * 5Ghz band is allowed. * @REG_CAPA_V2_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed * for this regulatory domain (valid only in 5Ghz). * @REG_CAPA_V2_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed * for this regulatory domain (valid only in 5Ghz). * @REG_CAPA_V2_MCS_8_ALLOWED: 11ac with MCS 8 is allowed. * @REG_CAPA_V2_MCS_9_ALLOWED: 11ac with MCS 9 is allowed. * @REG_CAPA_V2_WEATHER_DISABLED: Weather radar channels (120, 124, 128, 118, * 126, 122) are disabled. * @REG_CAPA_V2_40MHZ_ALLOWED: 11n channel with a width of 40Mhz is allowed * for this regulatory domain (uvalid only in 5Ghz). * @REG_CAPA_V2_11AX_DISABLED: 11ax is forbidden for this regulatory domain. */ enum iwl_reg_capa_flags_v2 { REG_CAPA_V2_STRADDLE_DISABLED = BIT(0), REG_CAPA_V2_BF_CCD_LOW_BAND = BIT(1), REG_CAPA_V2_BF_CCD_HIGH_BAND = BIT(2), REG_CAPA_V2_160MHZ_ALLOWED = BIT(3), REG_CAPA_V2_80MHZ_ALLOWED = BIT(4), REG_CAPA_V2_MCS_8_ALLOWED = BIT(5), REG_CAPA_V2_MCS_9_ALLOWED = BIT(6), REG_CAPA_V2_WEATHER_DISABLED = BIT(7), REG_CAPA_V2_40MHZ_ALLOWED = BIT(8), REG_CAPA_V2_11AX_DISABLED = BIT(10), -}; +}; /* GEO_CHANNEL_CAPABILITIES_API_S_VER_2 */ + +/** + * enum iwl_reg_capa_flags_v4 - global flags applied for the whole regulatory + * domain. + * @REG_CAPA_V4_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_V4_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_V4_MCS_12_ALLOWED: 11ac with MCS 12 is allowed. + * @REG_CAPA_V4_MCS_13_ALLOWED: 11ac with MCS 13 is allowed. + * @REG_CAPA_V4_11BE_DISABLED: 11be is forbidden for this regulatory domain. + * @REG_CAPA_V4_11AX_DISABLED: 11ax is forbidden for this regulatory domain. + * @REG_CAPA_V4_320MHZ_ALLOWED: 11be channel with a width of 320Mhz is allowed + * for this regulatory domain (valid only in 5GHz). + */ +enum iwl_reg_capa_flags_v4 { + REG_CAPA_V4_160MHZ_ALLOWED = BIT(3), + REG_CAPA_V4_80MHZ_ALLOWED = BIT(4), + REG_CAPA_V4_MCS_12_ALLOWED = BIT(5), + REG_CAPA_V4_MCS_13_ALLOWED = BIT(6), + REG_CAPA_V4_11BE_DISABLED = BIT(8), + REG_CAPA_V4_11AX_DISABLED = BIT(13), + REG_CAPA_V4_320MHZ_ALLOWED = BIT(16), +}; /* GEO_CHANNEL_CAPABILITIES_API_S_VER_4 */ /* * API v2 for reg_capa_flags is relevant from version 6 and onwards of the * MCC update command response. */ #define REG_CAPA_V2_RESP_VER 6 +/* API v4 for reg_capa_flags is relevant from version 8 and onwards of the + * MCC update command response. + */ +#define REG_CAPA_V4_RESP_VER 8 + /** * struct iwl_reg_capa - struct for global regulatory capabilities, Used for * handling the different APIs of reg_capa_flags. * * @allow_40mhz: 11n channel with a width of 40Mhz is allowed - * for this regulatory domain (valid only in 5Ghz). + * for this regulatory domain. * @allow_80mhz: 11ac channel with a width of 80Mhz is allowed - * for this regulatory domain (valid only in 5Ghz). + * for this regulatory domain (valid only in 5 and 6 Ghz). * @allow_160mhz: 11ac channel with a width of 160Mhz is allowed - * for this regulatory domain (valid only in 5Ghz). + * for this regulatory domain (valid only in 5 and 6 Ghz). + * @allow_320mhz: 11be channel with a width of 320Mhz is allowed + * for this regulatory domain (valid only in 6 Ghz). * @disable_11ax: 11ax is forbidden for this regulatory domain. + * @disable_11be: 11be is forbidden for this regulatory domain. */ struct iwl_reg_capa { - u16 allow_40mhz; - u16 allow_80mhz; - u16 allow_160mhz; - u16 disable_11ax; + bool allow_40mhz; + bool allow_80mhz; + bool allow_160mhz; + bool allow_320mhz; + bool disable_11ax; + bool disable_11be; }; static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, int chan, u32 flags) { #define CHECK_AND_PRINT_I(x) \ ((flags & NVM_CHANNEL_##x) ? " " #x : "") if (!(flags & NVM_CHANNEL_VALID)) { IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n", chan, flags); return; } /* Note: already can print up to 101 characters, 110 is the limit! */ IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n", chan, flags, CHECK_AND_PRINT_I(VALID), CHECK_AND_PRINT_I(IBSS), CHECK_AND_PRINT_I(ACTIVE), CHECK_AND_PRINT_I(RADAR), CHECK_AND_PRINT_I(INDOOR_ONLY), CHECK_AND_PRINT_I(GO_CONCURRENT), CHECK_AND_PRINT_I(UNIFORM), CHECK_AND_PRINT_I(20MHZ), CHECK_AND_PRINT_I(40MHZ), CHECK_AND_PRINT_I(80MHZ), CHECK_AND_PRINT_I(160MHZ), CHECK_AND_PRINT_I(DC_HIGH)); #undef CHECK_AND_PRINT_I } static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band, u32 nvm_flags, const struct iwl_cfg *cfg) { u32 flags = IEEE80211_CHAN_NO_HT40; if (band == NL80211_BAND_2GHZ && (nvm_flags & NVM_CHANNEL_40MHZ)) { if (ch_num <= LAST_2GHZ_HT_PLUS) flags &= ~IEEE80211_CHAN_NO_HT40PLUS; if (ch_num >= FIRST_2GHZ_HT_MINUS) flags &= ~IEEE80211_CHAN_NO_HT40MINUS; } else if (nvm_flags & NVM_CHANNEL_40MHZ) { if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) flags &= ~IEEE80211_CHAN_NO_HT40PLUS; else flags &= ~IEEE80211_CHAN_NO_HT40MINUS; } if (!(nvm_flags & NVM_CHANNEL_80MHZ)) flags |= IEEE80211_CHAN_NO_80MHZ; if (!(nvm_flags & NVM_CHANNEL_160MHZ)) flags |= IEEE80211_CHAN_NO_160MHZ; if (!(nvm_flags & NVM_CHANNEL_IBSS)) flags |= IEEE80211_CHAN_NO_IR; if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) flags |= IEEE80211_CHAN_NO_IR; if (nvm_flags & NVM_CHANNEL_RADAR) flags |= IEEE80211_CHAN_RADAR; if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) flags |= IEEE80211_CHAN_INDOOR_ONLY; /* Set the GO concurrent flag only in case that NO_IR is set. * Otherwise it is meaningless */ if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && (flags & IEEE80211_CHAN_NO_IR)) flags |= IEEE80211_CHAN_IR_CONCURRENT; return flags; } static enum nl80211_band iwl_nl80211_band_from_channel_idx(int ch_idx) { if (ch_idx >= NUM_2GHZ_CHANNELS + NUM_5GHZ_CHANNELS) { return NL80211_BAND_6GHZ; } if (ch_idx >= NUM_2GHZ_CHANNELS) return NL80211_BAND_5GHZ; return NL80211_BAND_2GHZ; } static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const void * const nvm_ch_flags, u32 sbands_flags, bool v4) { int ch_idx; int n_channels = 0; struct ieee80211_channel *channel; u32 ch_flags; int num_of_ch; const u16 *nvm_chan; if (cfg->uhb_supported) { num_of_ch = IWL_NVM_NUM_CHANNELS_UHB; nvm_chan = iwl_uhb_nvm_channels; } else if (cfg->nvm_type == IWL_NVM_EXT) { num_of_ch = IWL_NVM_NUM_CHANNELS_EXT; nvm_chan = iwl_ext_nvm_channels; } else { num_of_ch = IWL_NVM_NUM_CHANNELS; nvm_chan = iwl_nvm_channels; } for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { enum nl80211_band band = iwl_nl80211_band_from_channel_idx(ch_idx); if (v4) ch_flags = __le32_to_cpup((const __le32 *)nvm_ch_flags + ch_idx); else ch_flags = __le16_to_cpup((const __le16 *)nvm_ch_flags + ch_idx); if (band == NL80211_BAND_5GHZ && !data->sku_cap_band_52ghz_enable) continue; /* workaround to disable wide channels in 5GHz */ if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) && band == NL80211_BAND_5GHZ) { ch_flags &= ~(NVM_CHANNEL_40MHZ | NVM_CHANNEL_80MHZ | NVM_CHANNEL_160MHZ); } if (ch_flags & NVM_CHANNEL_160MHZ) data->vht160_supported = true; if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR) && !(ch_flags & NVM_CHANNEL_VALID)) { /* * Channels might become valid later if lar is * supported, hence we still want to add them to * the list of supported channels to cfg80211. */ iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, nvm_chan[ch_idx], ch_flags); continue; } channel = &data->channels[n_channels]; n_channels++; channel->hw_value = nvm_chan[ch_idx]; channel->band = band; channel->center_freq = ieee80211_channel_to_frequency( channel->hw_value, channel->band); /* Initialize regulatory-based run-time data */ /* * Default value - highest tx power value. max_power * is not used in mvm, and is used for backwards compatibility */ channel->max_power = IWL_DEFAULT_MAX_TX_POWER; /* don't put limitations in case we're using LAR */ if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR)) channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx], ch_idx, band, ch_flags, cfg); else channel->flags = 0; /* TODO: Don't put limitations on UHB devices as we still don't * have NVM for them */ if (cfg->uhb_supported) channel->flags = 0; iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, channel->hw_value, ch_flags); IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n", channel->hw_value, channel->max_power); } return n_channels; } static void iwl_init_vht_hw_capab(struct iwl_trans *trans, struct iwl_nvm_data *data, struct ieee80211_sta_vht_cap *vht_cap, u8 tx_chains, u8 rx_chains) { const struct iwl_cfg *cfg = trans->cfg; int num_rx_ants = num_of_ant(rx_chains); int num_tx_ants = num_of_ant(tx_chains); vht_cap->vht_supported = true; vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_RXSTBC_1 | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT | IEEE80211_VHT_MAX_AMPDU_1024K << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + if (!trans->cfg->ht_params->stbc) + vht_cap->cap &= ~IEEE80211_VHT_CAP_RXSTBC_MASK; + if (data->vht160_supported) vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | IEEE80211_VHT_CAP_SHORT_GI_160; if (cfg->vht_mu_mimo_supported) vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; if (cfg->ht_params->ldpc) vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; if (data->sku_cap_mimo_disabled) { num_rx_ants = 1; num_tx_ants = 1; } - if (num_tx_ants > 1) + if (trans->cfg->ht_params->stbc && num_tx_ants > 1) vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; else vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; switch (iwlwifi_mod_params.amsdu_size) { case IWL_AMSDU_DEF: if (trans->trans_cfg->mq_rx_supported) vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; else vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; break; case IWL_AMSDU_2K: if (trans->trans_cfg->mq_rx_supported) vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; else WARN(1, "RB size of 2K is not supported by this device\n"); break; case IWL_AMSDU_4K: vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; break; case IWL_AMSDU_8K: vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991; break; case IWL_AMSDU_12K: vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; break; default: break; } vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 14); if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) { vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN; /* this works because NOT_SUPPORTED == 3 */ vht_cap->vht_mcs.rx_mcs_map |= cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2); } vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; vht_cap->vht_mcs.tx_highest |= cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); } static const u8 iwl_vendor_caps[] = { 0xdd, /* vendor element */ 0x06, /* length */ 0x00, 0x17, 0x35, /* Intel OUI */ 0x08, /* type (Intel Capabilities) */ /* followed by 16 bits of capabilities */ #define IWL_VENDOR_CAP_IMPROVED_BF_FDBK_HE BIT(0) IWL_VENDOR_CAP_IMPROVED_BF_FDBK_HE, 0x00 }; -static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { +static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { { .types_mask = BIT(NL80211_IFTYPE_STATION), .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU | IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39, .mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 | IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 | IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU | IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS | IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX, - .phy_cap_info[0] = - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ, .phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK | IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK | IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, .phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 | IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8, .phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB | IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB | IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, .phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP | IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI, .phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242, .phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB | (IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED << IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS), .phy_cap_info[10] = IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF, }, /* * Set default Tx/Rx HE MCS NSS Support field. * Indicate support for up to 2 spatial streams and all * MCS, without any special cases */ .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xfffa), .tx_mcs_160 = cpu_to_le16(0xfffa), .rx_mcs_80p80 = cpu_to_le16(0xffff), .tx_mcs_80p80 = cpu_to_le16(0xffff), }, /* * Set default PPE thresholds, with PPET16 set to 0, * PPET8 set to 7 */ .ppe_thres = {0x61, 0x1c, 0xc7, 0x71}, }, + .eht_cap = { + .has_eht = true, + .eht_cap_elem = { + .mac_cap_info[0] = + IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | + IEEE80211_EHT_MAC_CAP0_OM_CONTROL | + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2, + .phy_cap_info[0] = + IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | + IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | + IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO | + IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE | + IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK, + .phy_cap_info[1] = + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK | + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK, + .phy_cap_info[3] = + IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK, + + .phy_cap_info[4] = + IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | + IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | + IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI, + .phy_cap_info[5] = + IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | + IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | + IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT, + .phy_cap_info[6] = + IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK | + IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP, + .phy_cap_info[8] = + IEEE80211_EHT_PHY_CAP8_RX_1024QAM_WIDER_BW_DL_OFDMA | + IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA, + }, + + /* For all MCS and bandwidth, set 2 NSS for both Tx and + * Rx - note we don't set the only_20mhz, but due to this + * being a union, it gets set correctly anyway. + */ + .eht_mcs_nss_supp = { + .bw._80 = { + .rx_tx_mcs9_max_nss = 0x22, + .rx_tx_mcs11_max_nss = 0x22, + .rx_tx_mcs13_max_nss = 0x22, + }, + .bw._160 = { + .rx_tx_mcs9_max_nss = 0x22, + .rx_tx_mcs11_max_nss = 0x22, + .rx_tx_mcs13_max_nss = 0x22, + }, + .bw._320 = { + .rx_tx_mcs9_max_nss = 0x22, + .rx_tx_mcs11_max_nss = 0x22, + .rx_tx_mcs13_max_nss = 0x22, + }, + }, + + /* + * PPE thresholds for NSS = 2, and RU index bitmap set + * to 0xc. + */ + .eht_ppe_thres = {0xc1, 0x0e, 0xe0 } + }, }, { .types_mask = BIT(NL80211_IFTYPE_AP), .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL, - .phy_cap_info[0] = - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US, .phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK | IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK | IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, .phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, .phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI, .phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242, .phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED << IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS, }, /* * Set default Tx/Rx HE MCS NSS Support field. * Indicate support for up to 2 spatial streams and all * MCS, without any special cases */ .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xfffa), .tx_mcs_160 = cpu_to_le16(0xfffa), .rx_mcs_80p80 = cpu_to_le16(0xffff), .tx_mcs_80p80 = cpu_to_le16(0xffff), }, /* * Set default PPE thresholds, with PPET16 set to 0, * PPET8 set to 7 */ .ppe_thres = {0x61, 0x1c, 0xc7, 0x71}, }, + .eht_cap = { + .has_eht = true, + .eht_cap_elem = { + .mac_cap_info[0] = + IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | + IEEE80211_EHT_MAC_CAP0_OM_CONTROL | + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2, + .phy_cap_info[0] = + IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | + IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI, + .phy_cap_info[5] = + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT, + }, + + /* For all MCS and bandwidth, set 2 NSS for both Tx and + * Rx - note we don't set the only_20mhz, but due to this + * being a union, it gets set correctly anyway. + */ + .eht_mcs_nss_supp = { + .bw._80 = { + .rx_tx_mcs9_max_nss = 0x22, + .rx_tx_mcs11_max_nss = 0x22, + .rx_tx_mcs13_max_nss = 0x22, + }, + .bw._160 = { + .rx_tx_mcs9_max_nss = 0x22, + .rx_tx_mcs11_max_nss = 0x22, + .rx_tx_mcs13_max_nss = 0x22, + }, + .bw._320 = { + .rx_tx_mcs9_max_nss = 0x22, + .rx_tx_mcs11_max_nss = 0x22, + .rx_tx_mcs13_max_nss = 0x22, + }, + }, + + /* + * PPE thresholds for NSS = 2, and RU index bitmap set + * to 0xc. + */ + .eht_ppe_thres = {0xc1, 0x0e, 0xe0 } + }, }, }; static void iwl_init_he_6ghz_capa(struct iwl_trans *trans, struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, u8 tx_chains, u8 rx_chains) { struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap = {}; struct ieee80211_sband_iftype_data *iftype_data; u16 he_6ghz_capa = 0; u32 exp; int i; if (sband->band != NL80211_BAND_6GHZ) return; /* grab HT/VHT capabilities and calculate HE 6 GHz capabilities */ iwl_init_ht_hw_capab(trans, data, &ht_cap, NL80211_BAND_5GHZ, tx_chains, rx_chains); WARN_ON(!ht_cap.ht_supported); iwl_init_vht_hw_capab(trans, data, &vht_cap, tx_chains, rx_chains); WARN_ON(!vht_cap.vht_supported); he_6ghz_capa |= u16_encode_bits(ht_cap.ampdu_density, IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); exp = u32_get_bits(vht_cap.cap, IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK); he_6ghz_capa |= u16_encode_bits(exp, IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); exp = u32_get_bits(vht_cap.cap, IEEE80211_VHT_CAP_MAX_MPDU_MASK); he_6ghz_capa |= u16_encode_bits(exp, IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN); /* we don't support extended_ht_cap_info anywhere, so no RD_RESPONDER */ if (vht_cap.cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN) he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS; if (vht_cap.cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN) he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS; IWL_DEBUG_EEPROM(trans->dev, "he_6ghz_capa=0x%x\n", he_6ghz_capa); /* we know it's writable - we set it before ourselves */ iftype_data = (void *)(uintptr_t)sband->iftype_data; for (i = 0; i < sband->n_iftype_data; i++) iftype_data[i].he_6ghz_capa.capa = cpu_to_le16(he_6ghz_capa); } static void iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, + struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, struct ieee80211_sband_iftype_data *iftype_data, u8 tx_chains, u8 rx_chains, const struct iwl_fw *fw) { bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP); + bool no_320; + + no_320 = !trans->trans_cfg->integrated && + trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB; + + if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be) + iftype_data->eht_cap.has_eht = false; /* Advertise an A-MPDU exponent extension based on * operating band */ - if (sband->band != NL80211_BAND_2GHZ) + if (sband->band == NL80211_BAND_6GHZ && iftype_data->eht_cap.has_eht) + iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |= + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2; + else if (sband->band != NL80211_BAND_2GHZ) iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1; else iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3; - if (is_ap && iwlwifi_mod_params.nvm_file) + switch (sband->band) { + case NL80211_BAND_2GHZ: iftype_data->he_cap.he_cap_elem.phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; + iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] |= + u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454, + IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK); + break; + case NL80211_BAND_6GHZ: + if (!no_320) { + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[0] |= + IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[1] |= + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK; + } + fallthrough; + case NL80211_BAND_5GHZ: + iftype_data->he_cap.he_cap_elem.phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; + break; + default: + WARN_ON(1); + break; + } if ((tx_chains & rx_chains) == ANT_AB) { iftype_data->he_cap.he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ; iftype_data->he_cap.he_cap_elem.phy_cap_info[5] |= IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2; - if (!is_ap) + if (!is_ap) { iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |= IEEE80211_HE_PHY_CAP7_MAX_NC_2; - } else if (!is_ap) { - /* If not 2x2, we need to indicate 1x1 in the - * Midamble RX Max NSTS - but not for AP mode - */ - iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &= - ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS; - iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &= - ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS; - iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |= - IEEE80211_HE_PHY_CAP7_MAX_NC_1; + + if (iftype_data->eht_cap.has_eht) { + /* + * Set the number of sounding dimensions for each + * bandwidth to 1 to indicate the maximal supported + * value of TXVECTOR parameter NUM_STS of 2 + */ + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] |= 0x49; + + /* + * Set the MAX NC to 1 to indicate sounding feedback of + * 2 supported by the beamfomee. + */ + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[4] |= 0x10; + } + } + } else { + if (iftype_data->eht_cap.has_eht) { + struct ieee80211_eht_mcs_nss_supp *mcs_nss = + &iftype_data->eht_cap.eht_mcs_nss_supp; + + memset(mcs_nss, 0x11, sizeof(*mcs_nss)); + } + + if (!is_ap) { + /* If not 2x2, we need to indicate 1x1 in the + * Midamble RX Max NSTS - but not for AP mode + */ + iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &= + ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS; + iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &= + ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS; + iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |= + IEEE80211_HE_PHY_CAP7_MAX_NC_1; + } } + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210 && !is_ap) + iftype_data->he_cap.he_cap_elem.phy_cap_info[2] |= + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO; + switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { case IWL_CFG_RF_TYPE_GF: case IWL_CFG_RF_TYPE_MR: case IWL_CFG_RF_TYPE_MS: iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; if (!is_ap) iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; break; } + if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL && + iftype_data->eht_cap.has_eht) { + iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &= + ~(IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2); + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[3] &= + ~(IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO | + IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK); + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[4] &= + ~(IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | + IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP); + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[5] &= + ~IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK; + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[6] &= + ~(IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK | + IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP); + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[5] |= + IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF; + } + if (fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_BROADCAST_TWT)) iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |= IEEE80211_HE_MAC_CAP2_BCAST_TWT; if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && !is_ap) { iftype_data->vendor_elems.data = iwl_vendor_caps; iftype_data->vendor_elems.len = ARRAY_SIZE(iwl_vendor_caps); } + + if (!trans->cfg->ht_params->stbc) { + iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &= + ~IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ; + iftype_data->he_cap.he_cap_elem.phy_cap_info[7] &= + ~IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ; + } } static void iwl_init_he_hw_capab(struct iwl_trans *trans, struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, u8 tx_chains, u8 rx_chains, const struct iwl_fw *fw) { struct ieee80211_sband_iftype_data *iftype_data; int i; /* should only initialize once */ if (WARN_ON(sband->iftype_data)) return; - BUILD_BUG_ON(sizeof(data->iftd.low) != sizeof(iwl_he_capa)); - BUILD_BUG_ON(sizeof(data->iftd.high) != sizeof(iwl_he_capa)); + BUILD_BUG_ON(sizeof(data->iftd.low) != sizeof(iwl_he_eht_capa)); + BUILD_BUG_ON(sizeof(data->iftd.high) != sizeof(iwl_he_eht_capa)); + BUILD_BUG_ON(sizeof(data->iftd.uhb) != sizeof(iwl_he_eht_capa)); switch (sband->band) { case NL80211_BAND_2GHZ: iftype_data = data->iftd.low; break; case NL80211_BAND_5GHZ: - case NL80211_BAND_6GHZ: iftype_data = data->iftd.high; break; + case NL80211_BAND_6GHZ: + iftype_data = data->iftd.uhb; + break; default: WARN_ON(1); return; } - memcpy(iftype_data, iwl_he_capa, sizeof(iwl_he_capa)); + memcpy(iftype_data, iwl_he_eht_capa, sizeof(iwl_he_eht_capa)); sband->iftype_data = iftype_data; - sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa); + sband->n_iftype_data = ARRAY_SIZE(iwl_he_eht_capa); for (i = 0; i < sband->n_iftype_data; i++) - iwl_nvm_fixup_sband_iftd(trans, sband, &iftype_data[i], + iwl_nvm_fixup_sband_iftd(trans, data, sband, &iftype_data[i], tx_chains, rx_chains, fw); iwl_init_he_6ghz_capa(trans, data, sband, tx_chains, rx_chains); } static void iwl_init_sbands(struct iwl_trans *trans, struct iwl_nvm_data *data, const void *nvm_ch_flags, u8 tx_chains, u8 rx_chains, u32 sbands_flags, bool v4, const struct iwl_fw *fw) { struct device *dev = trans->dev; const struct iwl_cfg *cfg = trans->cfg; int n_channels; int n_used = 0; struct ieee80211_supported_band *sband; n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, sbands_flags, v4); sband = &data->bands[NL80211_BAND_2GHZ]; sband->band = NL80211_BAND_2GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; sband->n_bitrates = N_RATES_24; n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_2GHZ); iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_2GHZ, tx_chains, rx_chains); if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains, fw); sband = &data->bands[NL80211_BAND_5GHZ]; sband->band = NL80211_BAND_5GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; sband->n_bitrates = N_RATES_52; n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_5GHZ); iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_5GHZ, tx_chains, rx_chains); if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac) iwl_init_vht_hw_capab(trans, data, &sband->vht_cap, tx_chains, rx_chains); if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains, fw); /* 6GHz band. */ sband = &data->bands[NL80211_BAND_6GHZ]; sband->band = NL80211_BAND_6GHZ; /* use the same rates as 5GHz band */ sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; sband->n_bitrates = N_RATES_52; n_used += iwl_init_sband_channels(data, sband, n_channels, NL80211_BAND_6GHZ); if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains, fw); else sband->n_channels = 0; if (n_channels != n_used) IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", n_used, n_channels); } static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, const __le16 *phy_sku) { if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + SKU); return le32_to_cpup((const __le32 *)(phy_sku + SKU_FAMILY_8000)); } static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) { if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + NVM_VERSION); else return le32_to_cpup((const __le32 *)(nvm_sw + NVM_VERSION_EXT_NVM)); } static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, const __le16 *phy_sku) { if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + RADIO_CFG); return le32_to_cpup((const __le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM)); } static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw) { int n_hw_addr; if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + N_HW_ADDRS); n_hw_addr = le32_to_cpup((const __le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000)); return n_hw_addr & N_HW_ADDR_MASK; } static void iwl_set_radio_cfg(const struct iwl_cfg *cfg, struct iwl_nvm_data *data, u32 radio_cfg) { if (cfg->nvm_type != IWL_NVM_EXT) { data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg); data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg); data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg); data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg); return; } /* set the radio configuration for family 8000 */ data->radio_cfg_type = EXT_NVM_RF_CFG_TYPE_MSK(radio_cfg); data->radio_cfg_step = EXT_NVM_RF_CFG_STEP_MSK(radio_cfg); data->radio_cfg_dash = EXT_NVM_RF_CFG_DASH_MSK(radio_cfg); data->radio_cfg_pnum = EXT_NVM_RF_CFG_FLAVOR_MSK(radio_cfg); data->valid_tx_ant = EXT_NVM_RF_CFG_TX_ANT_MSK(radio_cfg); data->valid_rx_ant = EXT_NVM_RF_CFG_RX_ANT_MSK(radio_cfg); } static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest) { const u8 *hw_addr; hw_addr = (const u8 *)&mac_addr0; dest[0] = hw_addr[3]; dest[1] = hw_addr[2]; dest[2] = hw_addr[1]; dest[3] = hw_addr[0]; hw_addr = (const u8 *)&mac_addr1; dest[4] = hw_addr[1]; dest[5] = hw_addr[0]; } static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, struct iwl_nvm_data *data) { __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP(trans))); __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP(trans))); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); /* * If the OEM fused a valid address, use it instead of the one in the * OTP */ if (is_valid_ether_addr(data->hw_addr)) return; mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP(trans))); mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP(trans))); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); } static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 *mac_override, const __be16 *nvm_hw) { const u8 *hw_addr; if (mac_override) { static const u8 reserved_mac[] = { 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 }; hw_addr = (const u8 *)(mac_override + MAC_ADDRESS_OVERRIDE_EXT_NVM); /* * Store the MAC address from MAO section. * No byte swapping is required in MAO section */ memcpy(data->hw_addr, hw_addr, ETH_ALEN); /* * Force the use of the OTP MAC address in case of reserved MAC * address in the NVM, or if address is given but invalid. */ if (is_valid_ether_addr(data->hw_addr) && memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0) return; IWL_ERR(trans, "mac address from nvm override section is not valid\n"); } if (nvm_hw) { /* read the mac address from WFMP registers */ __le32 mac_addr0 = cpu_to_le32(iwl_trans_read_prph(trans, WFMP_MAC_ADDR_0)); __le32 mac_addr1 = cpu_to_le32(iwl_trans_read_prph(trans, WFMP_MAC_ADDR_1)); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); return; } IWL_ERR(trans, "mac address is not found\n"); } static int iwl_set_hw_address(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __be16 *nvm_hw, const __le16 *mac_override) { if (cfg->mac_addr_from_csr) { iwl_set_hw_address_from_csr(trans, data); } else if (cfg->nvm_type != IWL_NVM_EXT) { const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR); /* The byte order is little endian 16 bit, meaning 214365 */ data->hw_addr[0] = hw_addr[1]; data->hw_addr[1] = hw_addr[0]; data->hw_addr[2] = hw_addr[3]; data->hw_addr[3] = hw_addr[2]; data->hw_addr[4] = hw_addr[5]; data->hw_addr[5] = hw_addr[4]; } else { iwl_set_hw_address_family_8000(trans, cfg, data, mac_override, nvm_hw); } if (!is_valid_ether_addr(data->hw_addr)) { IWL_ERR(trans, "no valid mac address was found\n"); return -EINVAL; } if (!trans->csme_own) #if defined(__linux__) IWL_INFO(trans, "base HW address: %pM, OTP minor version: 0x%x\n", data->hw_addr, iwl_read_prph(trans, REG_OTP_MINOR)); #elif defined(__FreeBSD__) IWL_INFO(trans, "base HW address: %6D, OTP minor version: 0x%x\n", data->hw_addr, ":", iwl_read_prph(trans, REG_OTP_MINOR)); #endif return 0; } static bool iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg, const __be16 *nvm_hw) { /* * Workaround a bug in Indonesia SKUs where the regulatory in * some 7000-family OTPs erroneously allow wide channels in * 5GHz. To check for Indonesia, we take the SKU value from * bits 1-4 in the subsystem ID and check if it is either 5 or * 9. In those cases, we need to force-disable wide channels * in 5GHz otherwise the FW will throw a sysassert when we try * to use them. */ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) { /* * Unlike the other sections in the NVM, the hw * section uses big-endian. */ u16 subsystem_id = be16_to_cpup(nvm_hw + SUBSYSTEM_ID); u8 sku = (subsystem_id & 0x1e) >> 1; if (sku == 5 || sku == 9) { IWL_DEBUG_EEPROM(trans->dev, "disabling wide channels in 5GHz (0x%0x %d)\n", subsystem_id, sku); return true; } } return false; } struct iwl_nvm_data * iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_mei_nvm *mei_nvm, const struct iwl_fw *fw) { struct iwl_nvm_data *data; u32 sbands_flags = 0; u8 rx_chains = fw->valid_rx_ant; u8 tx_chains = fw->valid_rx_ant; if (cfg->uhb_supported) data = kzalloc(struct_size(data, channels, IWL_NVM_NUM_CHANNELS_UHB), GFP_KERNEL); else data = kzalloc(struct_size(data, channels, IWL_NVM_NUM_CHANNELS_EXT), GFP_KERNEL); if (!data) return NULL; BUILD_BUG_ON(ARRAY_SIZE(mei_nvm->channels) != IWL_NVM_NUM_CHANNELS_UHB); data->nvm_version = mei_nvm->nvm_version; iwl_set_radio_cfg(cfg, data, mei_nvm->radio_cfg); if (data->valid_tx_ant) tx_chains &= data->valid_tx_ant; if (data->valid_rx_ant) rx_chains &= data->valid_rx_ant; data->sku_cap_mimo_disabled = false; data->sku_cap_band_24ghz_enable = true; data->sku_cap_band_52ghz_enable = true; data->sku_cap_11n_enable = !(iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL); data->sku_cap_11ac_enable = true; data->sku_cap_11ax_enable = mei_nvm->caps & MEI_NVM_CAPS_11AX_SUPPORT; data->lar_enabled = mei_nvm->caps & MEI_NVM_CAPS_LARI_SUPPORT; data->n_hw_addrs = mei_nvm->n_hw_addrs; /* If no valid mac address was found - bail out */ if (iwl_set_hw_address(trans, cfg, data, NULL, NULL)) { kfree(data); return NULL; } if (data->lar_enabled && fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; iwl_init_sbands(trans, data, mei_nvm->channels, tx_chains, rx_chains, sbands_flags, true, fw); return data; } IWL_EXPORT_SYMBOL(iwl_parse_mei_nvm_data); struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains) { struct iwl_nvm_data *data; bool lar_enabled; u32 sku, radio_cfg; u32 sbands_flags = 0; u16 lar_config; const __le16 *ch_section; if (cfg->uhb_supported) data = kzalloc(struct_size(data, channels, IWL_NVM_NUM_CHANNELS_UHB), GFP_KERNEL); else if (cfg->nvm_type != IWL_NVM_EXT) data = kzalloc(struct_size(data, channels, IWL_NVM_NUM_CHANNELS), GFP_KERNEL); else data = kzalloc(struct_size(data, channels, IWL_NVM_NUM_CHANNELS_EXT), GFP_KERNEL); if (!data) return NULL; data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw); radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw, phy_sku); iwl_set_radio_cfg(cfg, data, radio_cfg); if (data->valid_tx_ant) tx_chains &= data->valid_tx_ant; if (data->valid_rx_ant) rx_chains &= data->valid_rx_ant; sku = iwl_get_sku(cfg, nvm_sw, phy_sku); data->sku_cap_band_24ghz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; data->sku_cap_band_52ghz_enable = sku & NVM_SKU_CAP_BAND_52GHZ; data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE; if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) data->sku_cap_11n_enable = false; data->sku_cap_11ac_enable = data->sku_cap_11n_enable && (sku & NVM_SKU_CAP_11AC_ENABLE); data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE; data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); if (cfg->nvm_type != IWL_NVM_EXT) { /* Checking for required sections */ if (!nvm_calib) { IWL_ERR(trans, "Can't parse empty Calib NVM sections\n"); kfree(data); return NULL; } ch_section = cfg->nvm_type == IWL_NVM_SDP ? ®ulatory[NVM_CHANNELS_SDP] : &nvm_sw[NVM_CHANNELS]; /* in family 8000 Xtal calibration values moved to OTP */ data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); lar_enabled = true; } else { u16 lar_offset = data->nvm_version < 0xE39 ? NVM_LAR_OFFSET_OLD : NVM_LAR_OFFSET; lar_config = le16_to_cpup(regulatory + lar_offset); data->lar_enabled = !!(lar_config & NVM_LAR_ENABLED); lar_enabled = data->lar_enabled; ch_section = ®ulatory[NVM_CHANNELS_EXTENDED]; } /* If no valid mac address was found - bail out */ if (iwl_set_hw_address(trans, cfg, data, nvm_hw, mac_override)) { kfree(data); return NULL; } if (lar_enabled && fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; if (iwl_nvm_no_wide_in_5ghz(trans, cfg, nvm_hw)) sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ; iwl_init_sbands(trans, data, ch_section, tx_chains, rx_chains, sbands_flags, false, fw); data->calib_version = 255; return data; } IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, int ch_idx, u16 nvm_flags, struct iwl_reg_capa reg_capa, const struct iwl_cfg *cfg) { u32 flags = NL80211_RRF_NO_HT40; if (ch_idx < NUM_2GHZ_CHANNELS && (nvm_flags & NVM_CHANNEL_40MHZ)) { if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS) flags &= ~NL80211_RRF_NO_HT40PLUS; if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS) flags &= ~NL80211_RRF_NO_HT40MINUS; } else if (nvm_flags & NVM_CHANNEL_40MHZ) { if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) flags &= ~NL80211_RRF_NO_HT40PLUS; else flags &= ~NL80211_RRF_NO_HT40MINUS; } if (!(nvm_flags & NVM_CHANNEL_80MHZ)) flags |= NL80211_RRF_NO_80MHZ; if (!(nvm_flags & NVM_CHANNEL_160MHZ)) flags |= NL80211_RRF_NO_160MHZ; if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) flags |= NL80211_RRF_NO_IR; if (nvm_flags & NVM_CHANNEL_RADAR) flags |= NL80211_RRF_DFS; if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) flags |= NL80211_RRF_NO_OUTDOOR; /* Set the GO concurrent flag only in case that NO_IR is set. * Otherwise it is meaningless */ if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && (flags & NL80211_RRF_NO_IR)) flags |= NL80211_RRF_GO_CONCURRENT; /* * reg_capa is per regulatory domain so apply it for every channel */ if (ch_idx >= NUM_2GHZ_CHANNELS) { if (!reg_capa.allow_40mhz) flags |= NL80211_RRF_NO_HT40; if (!reg_capa.allow_80mhz) flags |= NL80211_RRF_NO_80MHZ; if (!reg_capa.allow_160mhz) flags |= NL80211_RRF_NO_160MHZ; + + if (!reg_capa.allow_320mhz) + flags |= NL80211_RRF_NO_320MHZ; } + if (reg_capa.disable_11ax) flags |= NL80211_RRF_NO_HE; + if (reg_capa.disable_11be) + flags |= NL80211_RRF_NO_EHT; + return flags; } -static struct iwl_reg_capa iwl_get_reg_capa(u16 flags, u8 resp_ver) +static struct iwl_reg_capa iwl_get_reg_capa(u32 flags, u8 resp_ver) { - struct iwl_reg_capa reg_capa; - - if (resp_ver >= REG_CAPA_V2_RESP_VER) { + struct iwl_reg_capa reg_capa = {}; + + if (resp_ver >= REG_CAPA_V4_RESP_VER) { + reg_capa.allow_40mhz = true; + reg_capa.allow_80mhz = flags & REG_CAPA_V4_80MHZ_ALLOWED; + reg_capa.allow_160mhz = flags & REG_CAPA_V4_160MHZ_ALLOWED; + reg_capa.allow_320mhz = flags & REG_CAPA_V4_320MHZ_ALLOWED; + reg_capa.disable_11ax = flags & REG_CAPA_V4_11AX_DISABLED; + reg_capa.disable_11be = flags & REG_CAPA_V4_11BE_DISABLED; + } else if (resp_ver >= REG_CAPA_V2_RESP_VER) { reg_capa.allow_40mhz = flags & REG_CAPA_V2_40MHZ_ALLOWED; reg_capa.allow_80mhz = flags & REG_CAPA_V2_80MHZ_ALLOWED; reg_capa.allow_160mhz = flags & REG_CAPA_V2_160MHZ_ALLOWED; reg_capa.disable_11ax = flags & REG_CAPA_V2_11AX_DISABLED; } else { - reg_capa.allow_40mhz = !(flags & REG_CAPA_40MHZ_FORBIDDEN); - reg_capa.allow_80mhz = flags & REG_CAPA_80MHZ_ALLOWED; - reg_capa.allow_160mhz = flags & REG_CAPA_160MHZ_ALLOWED; - reg_capa.disable_11ax = flags & REG_CAPA_11AX_DISABLED; + reg_capa.allow_40mhz = !(flags & REG_CAPA_V1_40MHZ_FORBIDDEN); + reg_capa.allow_80mhz = flags & REG_CAPA_V1_80MHZ_ALLOWED; + reg_capa.allow_160mhz = flags & REG_CAPA_V1_160MHZ_ALLOWED; + reg_capa.disable_11ax = flags & REG_CAPA_V1_11AX_DISABLED; } return reg_capa; } struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, - u16 geo_info, u16 cap, u8 resp_ver) + u16 geo_info, u32 cap, u8 resp_ver) { int ch_idx; u16 ch_flags; u32 reg_rule_flags, prev_reg_rule_flags = 0; const u16 *nvm_chan; struct ieee80211_regdomain *regd, *copy_rd; struct ieee80211_reg_rule *rule; enum nl80211_band band; int center_freq, prev_center_freq = 0; int valid_rules = 0; bool new_rule; int max_num_ch; struct iwl_reg_capa reg_capa; if (cfg->uhb_supported) { max_num_ch = IWL_NVM_NUM_CHANNELS_UHB; nvm_chan = iwl_uhb_nvm_channels; } else if (cfg->nvm_type == IWL_NVM_EXT) { max_num_ch = IWL_NVM_NUM_CHANNELS_EXT; nvm_chan = iwl_ext_nvm_channels; } else { max_num_ch = IWL_NVM_NUM_CHANNELS; nvm_chan = iwl_nvm_channels; } if (num_of_ch > max_num_ch) { IWL_DEBUG_DEV(dev, IWL_DL_LAR, "Num of channels (%d) is greater than expected. Truncating to %d\n", num_of_ch, max_num_ch); num_of_ch = max_num_ch; } if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) return ERR_PTR(-EINVAL); IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n", num_of_ch); /* build a regdomain rule for every valid channel */ regd = kzalloc(struct_size(regd, reg_rules, num_of_ch), GFP_KERNEL); if (!regd) return ERR_PTR(-ENOMEM); /* set alpha2 from FW. */ regd->alpha2[0] = fw_mcc >> 8; regd->alpha2[1] = fw_mcc & 0xff; /* parse regulatory capability flags */ reg_capa = iwl_get_reg_capa(cap, resp_ver); for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { ch_flags = (u16)__le32_to_cpup(channels + ch_idx); band = iwl_nl80211_band_from_channel_idx(ch_idx); center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx], band); new_rule = false; if (!(ch_flags & NVM_CHANNEL_VALID)) { iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, nvm_chan[ch_idx], ch_flags); continue; } reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, ch_flags, reg_capa, cfg); /* we can't continue the same rule */ if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags || center_freq - prev_center_freq > 20) { valid_rules++; new_rule = true; } rule = ®d->reg_rules[valid_rules - 1]; if (new_rule) rule->freq_range.start_freq_khz = MHZ_TO_KHZ(center_freq - 10); rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10); /* this doesn't matter - not used by FW */ rule->power_rule.max_antenna_gain = DBI_TO_MBI(6); rule->power_rule.max_eirp = DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); rule->flags = reg_rule_flags; /* rely on auto-calculation to merge BW of contiguous chans */ rule->flags |= NL80211_RRF_AUTO_BW; rule->freq_range.max_bandwidth_khz = 0; prev_center_freq = center_freq; prev_reg_rule_flags = reg_rule_flags; iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, nvm_chan[ch_idx], ch_flags); if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) || band == NL80211_BAND_2GHZ) continue; reg_query_regdb_wmm(regd->alpha2, center_freq, rule); } /* * Certain firmware versions might report no valid channels * if booted in RF-kill, i.e. not all calibrations etc. are * running. We'll get out of this situation later when the * rfkill is removed and we update the regdomain again, but * since cfg80211 doesn't accept an empty regdomain, add a * dummy (unusable) rule here in this case so we can init. */ if (!valid_rules) { valid_rules = 1; rule = ®d->reg_rules[valid_rules - 1]; rule->freq_range.start_freq_khz = MHZ_TO_KHZ(2412); rule->freq_range.end_freq_khz = MHZ_TO_KHZ(2413); rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(1); rule->power_rule.max_antenna_gain = DBI_TO_MBI(6); rule->power_rule.max_eirp = DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); } regd->n_reg_rules = valid_rules; /* * Narrow down regdom for unused regulatory rules to prevent hole * between reg rules to wmm rules. */ copy_rd = kmemdup(regd, struct_size(regd, reg_rules, valid_rules), GFP_KERNEL); if (!copy_rd) copy_rd = ERR_PTR(-ENOMEM); kfree(regd); return copy_rd; } IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info); #define IWL_MAX_NVM_SECTION_SIZE 0x1b58 #define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc #define MAX_NVM_FILE_LEN 16384 void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, unsigned int len) { #define IWL_4165_DEVICE_ID 0x5501 #define NVM_SKU_CAP_MIMO_DISABLE BIT(5) if (section == NVM_SECTION_TYPE_PHY_SKU && hw_id == IWL_4165_DEVICE_ID && data && len >= 5 && (data[4] & NVM_SKU_CAP_MIMO_DISABLE)) /* OTP 0x52 bug work around: it's a 1x1 device */ data[3] = ANT_B | (ANT_B << 4); } IWL_EXPORT_SYMBOL(iwl_nvm_fixups); /* * Reads external NVM from a file into mvm->nvm_sections * * HOW TO CREATE THE NVM FILE FORMAT: * ------------------------------ * 1. create hex file, format: * 3800 -> header * 0000 -> header * 5a40 -> data * * rev - 6 bit (word1) * len - 10 bit (word1) * id - 4 bit (word2) * rsv - 12 bit (word2) * * 2. flip 8bits with 8 bits per line to get the right NVM file format * * 3. create binary file from the hex file * * 4. save as "iNVM_xxx.bin" under /lib/firmware */ int iwl_read_external_nvm(struct iwl_trans *trans, const char *nvm_file_name, struct iwl_nvm_section *nvm_sections) { int ret, section_size; u16 section_id; const struct firmware *fw_entry; const struct { __le16 word1; __le16 word2; u8 data[]; } *file_sec; const u8 *eof; u8 *temp; int max_section_size; const __le32 *dword_buff; #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) #define NVM_WORD2_ID(x) (x >> 12) #define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8)) #define EXT_NVM_WORD1_ID(x) ((x) >> 4) #define NVM_HEADER_0 (0x2A504C54) #define NVM_HEADER_1 (0x4E564D2A) #define NVM_HEADER_SIZE (4 * sizeof(u32)) IWL_DEBUG_EEPROM(trans->dev, "Read from external NVM\n"); /* Maximal size depends on NVM version */ if (trans->cfg->nvm_type != IWL_NVM_EXT) max_section_size = IWL_MAX_NVM_SECTION_SIZE; else max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE; /* * Obtain NVM image via request_firmware. Since we already used * request_firmware_nowait() for the firmware binary load and only * get here after that we assume the NVM request can be satisfied * synchronously. */ ret = request_firmware(&fw_entry, nvm_file_name, trans->dev); if (ret) { IWL_ERR(trans, "ERROR: %s isn't available %d\n", nvm_file_name, ret); return ret; } IWL_INFO(trans, "Loaded NVM file %s (%zu bytes)\n", nvm_file_name, fw_entry->size); if (fw_entry->size > MAX_NVM_FILE_LEN) { IWL_ERR(trans, "NVM file too large\n"); ret = -EINVAL; goto out; } eof = fw_entry->data + fw_entry->size; dword_buff = (const __le32 *)fw_entry->data; /* some NVM file will contain a header. * The header is identified by 2 dwords header as follow: * dword[0] = 0x2A504C54 * dword[1] = 0x4E564D2A * * This header must be skipped when providing the NVM data to the FW. */ if (fw_entry->size > NVM_HEADER_SIZE && dword_buff[0] == cpu_to_le32(NVM_HEADER_0) && dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) { file_sec = (const void *)(fw_entry->data + NVM_HEADER_SIZE); IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2])); IWL_INFO(trans, "NVM Manufacturing date %08X\n", le32_to_cpu(dword_buff[3])); /* nvm file validation, dword_buff[2] holds the file version */ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_8000 && trans->hw_rev_step == SILICON_C_STEP && le32_to_cpu(dword_buff[2]) < 0xE4A) { ret = -EFAULT; goto out; } } else { file_sec = (const void *)fw_entry->data; } while (true) { if (file_sec->data > eof) { IWL_ERR(trans, "ERROR - NVM file too short for section header\n"); ret = -EINVAL; break; } /* check for EOF marker */ if (!file_sec->word1 && !file_sec->word2) { ret = 0; break; } if (trans->cfg->nvm_type != IWL_NVM_EXT) { section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); } else { section_size = 2 * EXT_NVM_WORD2_LEN( le16_to_cpu(file_sec->word2)); section_id = EXT_NVM_WORD1_ID( le16_to_cpu(file_sec->word1)); } if (section_size > max_section_size) { IWL_ERR(trans, "ERROR - section too large (%d)\n", section_size); ret = -EINVAL; break; } if (!section_size) { IWL_ERR(trans, "ERROR - section empty\n"); ret = -EINVAL; break; } if (file_sec->data + section_size > eof) { IWL_ERR(trans, "ERROR - NVM file too short for section (%d bytes)\n", section_size); ret = -EINVAL; break; } if (WARN(section_id >= NVM_MAX_NUM_SECTIONS, "Invalid NVM section ID %d\n", section_id)) { ret = -EINVAL; break; } temp = kmemdup(file_sec->data, section_size, GFP_KERNEL); if (!temp) { ret = -ENOMEM; break; } iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size); kfree(nvm_sections[section_id].data); nvm_sections[section_id].data = temp; nvm_sections[section_id].length = section_size; /* advance to the next section */ file_sec = (const void *)(file_sec->data + section_size); } out: release_firmware(fw_entry); return ret; } IWL_EXPORT_SYMBOL(iwl_read_external_nvm); struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, const struct iwl_fw *fw) { struct iwl_nvm_get_info cmd = {}; struct iwl_nvm_data *nvm; struct iwl_host_cmd hcmd = { .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &cmd, }, .len = { sizeof(cmd) }, .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) }; int ret; bool empty_otp; u32 mac_flags; u32 sbands_flags = 0; /* * All the values in iwl_nvm_get_info_rsp v4 are the same as * in v3, except for the channel profile part of the * regulatory. So we can just access the new struct, with the * exception of the latter. */ struct iwl_nvm_get_info_rsp *rsp; struct iwl_nvm_get_info_rsp_v3 *rsp_v3; bool v4 = fw_has_api(&fw->ucode_capa, IWL_UCODE_TLV_API_REGULATORY_NVM_INFO); size_t rsp_size = v4 ? sizeof(*rsp) : sizeof(*rsp_v3); void *channel_profile; ret = iwl_trans_send_cmd(trans, &hcmd); if (ret) return ERR_PTR(ret); if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != rsp_size, "Invalid payload len in NVM response from FW %d", iwl_rx_packet_payload_len(hcmd.resp_pkt))) { ret = -EINVAL; goto out; } rsp = (void *)hcmd.resp_pkt->data; empty_otp = !!(le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP); if (empty_otp) IWL_INFO(trans, "OTP is empty\n"); nvm = kzalloc(struct_size(nvm, channels, IWL_NUM_CHANNELS), GFP_KERNEL); if (!nvm) { ret = -ENOMEM; goto out; } iwl_set_hw_address_from_csr(trans, nvm); /* TODO: if platform NVM has MAC address - override it here */ if (!is_valid_ether_addr(nvm->hw_addr)) { IWL_ERR(trans, "no valid mac address was found\n"); ret = -EINVAL; goto err_free; } #if defined(__linux__) IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr); #elif defined(__FreeBSD__) IWL_INFO(trans, "base HW address: %6D\n", nvm->hw_addr, ":"); #endif /* Initialize general data */ nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version); nvm->n_hw_addrs = rsp->general.n_hw_addrs; if (nvm->n_hw_addrs == 0) IWL_WARN(trans, "Firmware declares no reserved mac addresses. OTP is empty: %d\n", empty_otp); /* Initialize MAC sku data */ mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags); nvm->sku_cap_11ac_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED); nvm->sku_cap_11n_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED); nvm->sku_cap_11ax_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED); nvm->sku_cap_band_24ghz_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED); nvm->sku_cap_band_52ghz_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); nvm->sku_cap_mimo_disabled = !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED); + if (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM) + nvm->sku_cap_11be_enable = true; /* Initialize PHY sku data */ nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); if (le32_to_cpu(rsp->regulatory.lar_enabled) && fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) { nvm->lar_enabled = true; sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; } rsp_v3 = (void *)rsp; channel_profile = v4 ? (void *)rsp->regulatory.channel_profile : (void *)rsp_v3->regulatory.channel_profile; iwl_init_sbands(trans, nvm, channel_profile, nvm->valid_tx_ant & fw->valid_tx_ant, nvm->valid_rx_ant & fw->valid_rx_ant, sbands_flags, v4, fw); iwl_free_resp(&hcmd); return nvm; err_free: kfree(nvm); out: iwl_free_resp(&hcmd); return ERR_PTR(ret); } IWL_EXPORT_SYMBOL(iwl_get_nvm); diff --git a/sys/contrib/dev/iwlwifi/iwl-nvm-parse.h b/sys/contrib/dev/iwlwifi/iwl-nvm-parse.h index e01f7751cf11..c79f72d54482 100644 --- a/sys/contrib/dev/iwlwifi/iwl-nvm-parse.h +++ b/sys/contrib/dev/iwlwifi/iwl-nvm-parse.h @@ -1,93 +1,93 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2015, 2018-2021 Intel Corporation + * Copyright (C) 2005-2015, 2018-2022 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_nvm_parse_h__ #define __iwl_nvm_parse_h__ #include #include "iwl-eeprom-parse.h" #include "mei/iwl-mei.h" /** * enum iwl_nvm_sbands_flags - modification flags for the channel profiles * * @IWL_NVM_SBANDS_FLAGS_LAR: LAR is enabled * @IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ: disallow 40, 80 and 160MHz on 5GHz */ enum iwl_nvm_sbands_flags { IWL_NVM_SBANDS_FLAGS_LAR = BIT(0), IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ = BIT(1), }; /** * iwl_parse_nvm_data - parse NVM data and return values * * This function parses all NVM values we need and then * returns a (newly allocated) struct containing all the * relevant values for driver use. The struct must be freed * later with iwl_free_nvm_data(). */ struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains); /** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW * * This function parses the regulatory channel data received as a * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain, * to be fed into the regulatory core. In case the geo_info is set handle * accordingly. An ERR_PTR is returned on error. * If not given to the regulatory core, the user is responsible for freeing * the regdomain returned here with kfree. */ struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, - u16 geo_info, u16 cap, u8 resp_ver); + u16 geo_info, u32 cap, u8 resp_ver); /** * struct iwl_nvm_section - describes an NVM section in memory. * * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD, * and saved for later use by the driver. Not all NVM sections are saved * this way, only the needed ones. */ struct iwl_nvm_section { u16 length; const u8 *data; }; /** * iwl_read_external_nvm - Reads external NVM from a file into nvm_sections */ int iwl_read_external_nvm(struct iwl_trans *trans, const char *nvm_file_name, struct iwl_nvm_section *nvm_sections); void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, unsigned int len); /** * iwl_get_nvm - retrieve NVM data from firmware * * Allocates a new iwl_nvm_data structure, fills it with * NVM data, and returns it to caller. */ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, const struct iwl_fw *fw); /** * iwl_parse_mei_nvm_data - parse the mei_nvm_data and get an iwl_nvm_data */ struct iwl_nvm_data * iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_mei_nvm *mei_nvm, const struct iwl_fw *fw); #endif /* __iwl_nvm_parse_h__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-prph.h b/sys/contrib/dev/iwlwifi/iwl-prph.h index a22788a68168..6dd381ff0f9e 100644 --- a/sys/contrib/dev/iwlwifi/iwl-prph.h +++ b/sys/contrib/dev/iwlwifi/iwl-prph.h @@ -1,501 +1,519 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2022 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ #ifndef __iwl_prph_h__ #define __iwl_prph_h__ #include /* * Registers in this file are internal, not PCI bus memory mapped. * Driver accesses these via HBUS_TARG_PRPH_* registers. */ #define PRPH_BASE (0x00000) #define PRPH_END (0xFFFFF) /* APMG (power management) constants */ #define APMG_BASE (PRPH_BASE + 0x3000) #define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000) #define APMG_CLK_EN_REG (APMG_BASE + 0x0004) #define APMG_CLK_DIS_REG (APMG_BASE + 0x0008) #define APMG_PS_CTRL_REG (APMG_BASE + 0x000c) #define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010) #define APMG_RFKILL_REG (APMG_BASE + 0x0014) #define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c) #define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020) #define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058) #define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C) #define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001) #define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200) #define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800) #define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000) #define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000) #define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000) #define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000) #define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000) #define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */ #define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060) #define APMG_PCIDEV_STT_VAL_PERSIST_DIS (0x00000200) #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) #define APMG_PCIDEV_STT_VAL_WAKE_ME (0x00004000) #define APMG_RTC_INT_STT_RFKILL (0x10000000) /* Device system time */ #define DEVICE_SYSTEM_TIME_REG 0xA0206C /* Device NMI register and value for 8000 family and lower hw's */ #define DEVICE_SET_NMI_REG 0x00a01c30 #define DEVICE_SET_NMI_VAL_DRV BIT(7) /* Device NMI register and value for 9000 family and above hw's */ #define UREG_NIC_SET_NMI_DRIVER 0x00a05c10 #define UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER BIT(24) #define UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE (BIT(24) | BIT(25)) /* Shared registers (0x0..0x3ff, via target indirect or periphery */ #define SHR_BASE 0x00a10000 /* Shared GP1 register */ #define SHR_APMG_GP1_REG 0x01dc #define SHR_APMG_GP1_REG_PRPH (SHR_BASE + SHR_APMG_GP1_REG) #define SHR_APMG_GP1_WF_XTAL_LP_EN 0x00000004 #define SHR_APMG_GP1_CHICKEN_BIT_SELECT 0x80000000 /* Shared DL_CFG register */ #define SHR_APMG_DL_CFG_REG 0x01c4 #define SHR_APMG_DL_CFG_REG_PRPH (SHR_BASE + SHR_APMG_DL_CFG_REG) #define SHR_APMG_DL_CFG_RTCS_CLK_SELECTOR_MSK 0x000000c0 #define SHR_APMG_DL_CFG_RTCS_CLK_INTERNAL_XTAL 0x00000080 #define SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP 0x00000100 /* Shared APMG_XTAL_CFG register */ #define SHR_APMG_XTAL_CFG_REG 0x1c0 #define SHR_APMG_XTAL_CFG_XTAL_ON_REQ 0x80000000 /* * Device reset for family 8000 * write to bit 24 in order to reset the CPU */ #define RELEASE_CPU_RESET (0x300C) #define RELEASE_CPU_RESET_BIT BIT(24) /***************************************************************************** * 7000/3000 series SHR DTS addresses * *****************************************************************************/ #define SHR_MISC_WFM_DTS_EN (0x00a10024) #define DTSC_CFG_MODE (0x00a10604) #define DTSC_VREF_AVG (0x00a10648) #define DTSC_VREF5_AVG (0x00a1064c) #define DTSC_CFG_MODE_PERIODIC (0x2) #define DTSC_PTAT_AVG (0x00a10650) /** * Tx Scheduler * * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in * host DRAM. It steers each frame's Tx command (which contains the frame * data) into one of up to 7 prioritized Tx DMA FIFO channels within the * device. A queue maps to only one (selectable by driver) Tx DMA channel, * but one DMA channel may take input from several queues. * * Tx DMA FIFOs have dedicated purposes. * * For 5000 series and up, they are used differently * (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c): * * 0 -- EDCA BK (background) frames, lowest priority * 1 -- EDCA BE (best effort) frames, normal priority * 2 -- EDCA VI (video) frames, higher priority * 3 -- EDCA VO (voice) and management frames, highest priority * 4 -- unused * 5 -- unused * 6 -- unused * 7 -- Commands * * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6. * In addition, driver can map the remaining queues to Tx DMA/FIFO * channels 0-3 to support 11n aggregation via EDCA DMA channels. * * The driver sets up each queue to work in one of two modes: * * 1) Scheduler-Ack, in which the scheduler automatically supports a * block-ack (BA) window of up to 64 TFDs. In this mode, each queue * contains TFDs for a unique combination of Recipient Address (RA) * and Traffic Identifier (TID), that is, traffic of a given * Quality-Of-Service (QOS) priority, destined for a single station. * * In scheduler-ack mode, the scheduler keeps track of the Tx status of * each frame within the BA window, including whether it's been transmitted, * and whether it's been acknowledged by the receiving station. The device * automatically processes block-acks received from the receiving STA, * and reschedules un-acked frames to be retransmitted (successful * Tx completion may end up being out-of-order). * * The driver must maintain the queue's Byte Count table in host DRAM * for this mode. * This mode does not support fragmentation. * * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order. * The device may automatically retry Tx, but will retry only one frame * at a time, until receiving ACK from receiving station, or reaching * retry limit and giving up. * * The command queue (#4/#9) must use this mode! * This mode does not require use of the Byte Count table in host DRAM. * * Driver controls scheduler operation via 3 means: * 1) Scheduler registers * 2) Shared scheduler data base in internal SRAM * 3) Shared data in host DRAM * * Initialization: * * When loading, driver should allocate memory for: * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs. * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory * (1024 bytes for each queue). * * After receiving "Alive" response from uCode, driver must initialize * the scheduler (especially for queue #4/#9, the command queue, otherwise * the driver can't issue commands!): */ #define SCD_MEM_LOWER_BOUND (0x0000) /** * Max Tx window size is the max number of contiguous TFDs that the scheduler * can keep track of at one time when creating block-ack chains of frames. * Note that "64" matches the number of ack bits in a block-ack packet. */ #define SCD_WIN_SIZE 64 #define SCD_FRAME_LIMIT 64 #define SCD_TXFIFO_POS_TID (0) #define SCD_TXFIFO_POS_RA (4) #define SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) /* agn SCD */ #define SCD_QUEUE_STTS_REG_POS_TXF (0) #define SCD_QUEUE_STTS_REG_POS_ACTIVE (3) #define SCD_QUEUE_STTS_REG_POS_WSL (4) #define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19) #define SCD_QUEUE_STTS_REG_MSK (0x017F0000) #define SCD_QUEUE_CTX_REG1_CREDIT (0x00FFFF00) #define SCD_QUEUE_CTX_REG1_SUPER_CREDIT (0xFF000000) #define SCD_QUEUE_CTX_REG1_VAL(_n, _v) FIELD_PREP(SCD_QUEUE_CTX_REG1_ ## _n, _v) #define SCD_QUEUE_CTX_REG2_WIN_SIZE (0x0000007F) #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT (0x007F0000) #define SCD_QUEUE_CTX_REG2_VAL(_n, _v) FIELD_PREP(SCD_QUEUE_CTX_REG2_ ## _n, _v) #define SCD_GP_CTRL_ENABLE_31_QUEUES BIT(0) #define SCD_GP_CTRL_AUTO_ACTIVE_MODE BIT(18) /* Context Data */ #define SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600) #define SCD_CONTEXT_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0) /* Tx status */ #define SCD_TX_STTS_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0) #define SCD_TX_STTS_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0) /* Translation Data */ #define SCD_TRANS_TBL_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0) #define SCD_TRANS_TBL_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x808) #define SCD_CONTEXT_QUEUE_OFFSET(x)\ (SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8)) #define SCD_TX_STTS_QUEUE_OFFSET(x)\ (SCD_TX_STTS_MEM_LOWER_BOUND + ((x) * 16)) #define SCD_TRANS_TBL_OFFSET_QUEUE(x) \ ((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc) #define SCD_BASE (PRPH_BASE + 0xa02c00) #define SCD_SRAM_BASE_ADDR (SCD_BASE + 0x0) #define SCD_DRAM_BASE_ADDR (SCD_BASE + 0x8) #define SCD_AIT (SCD_BASE + 0x0c) #define SCD_TXFACT (SCD_BASE + 0x10) #define SCD_ACTIVE (SCD_BASE + 0x14) #define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) #define SCD_CHAINEXT_EN (SCD_BASE + 0x244) #define SCD_AGGR_SEL (SCD_BASE + 0x248) #define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) #define SCD_GP_CTRL (SCD_BASE + 0x1a8) #define SCD_EN_CTRL (SCD_BASE + 0x254) /*********************** END TX SCHEDULER *************************************/ /* Oscillator clock */ #define OSC_CLK (0xa04068) #define OSC_CLK_FORCE_CONTROL (0x8) #define FH_UCODE_LOAD_STATUS (0x1AF0) /* * Replacing FH_UCODE_LOAD_STATUS * This register is writen by driver and is read by uCode during boot flow. * Note this address is cleared after MAC reset. */ #define UREG_UCODE_LOAD_STATUS (0xa05c40) #define UREG_CPU_INIT_RUN (0xa05c44) #define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78) #define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C) #define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000) #define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400) #define LMAC2_PRPH_OFFSET (0x100000) /* Rx FIFO */ #define RXF_SIZE_ADDR (0xa00c88) #define RXF_RD_D_SPACE (0xa00c40) #define RXF_RD_WR_PTR (0xa00c50) #define RXF_RD_RD_PTR (0xa00c54) #define RXF_RD_FENCE_PTR (0xa00c4c) #define RXF_SET_FENCE_MODE (0xa00c14) #define RXF_LD_WR2FENCE (0xa00c1c) #define RXF_FIFO_RD_FENCE_INC (0xa00c68) #define RXF_SIZE_BYTE_CND_POS (7) #define RXF_SIZE_BYTE_CNT_MSK (0x3ff << RXF_SIZE_BYTE_CND_POS) #define RXF_DIFF_FROM_PREV (0x200) #define RXF2C_DIFF_FROM_PREV (0x4e00) #define RXF_LD_FENCE_OFFSET_ADDR (0xa00c10) #define RXF_FIFO_RD_FENCE_ADDR (0xa00c0c) /* Tx FIFO */ #define TXF_FIFO_ITEM_CNT (0xa00438) #define TXF_WR_PTR (0xa00414) #define TXF_RD_PTR (0xa00410) #define TXF_FENCE_PTR (0xa00418) #define TXF_LOCK_FENCE (0xa00424) #define TXF_LARC_NUM (0xa0043c) #define TXF_READ_MODIFY_DATA (0xa00448) #define TXF_READ_MODIFY_ADDR (0xa0044c) /* UMAC Internal Tx Fifo */ #define TXF_CPU2_FIFO_ITEM_CNT (0xA00538) #define TXF_CPU2_WR_PTR (0xA00514) #define TXF_CPU2_RD_PTR (0xA00510) #define TXF_CPU2_FENCE_PTR (0xA00518) #define TXF_CPU2_LOCK_FENCE (0xA00524) #define TXF_CPU2_NUM (0xA0053C) #define TXF_CPU2_READ_MODIFY_DATA (0xA00548) #define TXF_CPU2_READ_MODIFY_ADDR (0xA0054C) /* Radio registers access */ #define RSP_RADIO_CMD (0xa02804) #define RSP_RADIO_RDDAT (0xa02814) #define RADIO_RSP_ADDR_POS (6) #define RADIO_RSP_RD_CMD (3) /* LTR control (Qu only) */ #define HPM_MAC_LTR_CSR 0xa0348c #define HPM_MAC_LRT_ENABLE_ALL 0xf /* also uses CSR_LTR_* for values */ #define HPM_UMAC_LTR 0xa03480 /* FW monitor */ #define MON_BUFF_SAMPLE_CTL (0xa03c00) #define MON_BUFF_BASE_ADDR (0xa03c1c) #define MON_BUFF_END_ADDR (0xa03c40) #define MON_BUFF_WRPTR (0xa03c44) #define MON_BUFF_CYCLE_CNT (0xa03c48) /* FW monitor family 8000 and on */ #define MON_BUFF_BASE_ADDR_VER2 (0xa03c1c) #define MON_BUFF_END_ADDR_VER2 (0xa03c20) #define MON_BUFF_WRPTR_VER2 (0xa03c24) #define MON_BUFF_CYCLE_CNT_VER2 (0xa03c28) #define MON_BUFF_SHIFT_VER2 (0x8) /* FW monitor familiy AX210 and on */ #define DBGC_CUR_DBGBUF_BASE_ADDR_LSB (0xd03c20) #define DBGC_CUR_DBGBUF_BASE_ADDR_MSB (0xd03c24) #define DBGC_CUR_DBGBUF_STATUS (0xd03c1c) #define DBGC_DBGBUF_WRAP_AROUND (0xd03c2c) #define DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK (0x00ffffff) #define DBGC_CUR_DBGBUF_STATUS_IDX_MSK (0x0f000000) #define MON_DMARB_RD_CTL_ADDR (0xa03c60) #define MON_DMARB_RD_DATA_ADDR (0xa03c5c) #define DBGC_IN_SAMPLE (0xa03c00) #define DBGC_OUT_CTRL (0xa03c0c) /* M2S registers */ #define LDBG_M2S_BUF_WPTR (0xa0476c) #define LDBG_M2S_BUF_WRAP_CNT (0xa04774) #define LDBG_M2S_BUF_WPTR_VAL_MSK (0x000fffff) #define LDBG_M2S_BUF_WRAP_CNT_VAL_MSK (0x000fffff) /* enable the ID buf for read */ #define WFPM_PS_CTL_CLR 0xA0300C #define WFMP_MAC_ADDR_0 0xA03080 #define WFMP_MAC_ADDR_1 0xA03084 #define LMPM_PMG_EN 0xA01CEC #define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078 #define RFIC_REG_RD 0xAD0470 #define WFPM_CTRL_REG 0xA03030 #define WFPM_OTP_CFG1_ADDR 0x00a03098 #define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4) #define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5) +#define WFPM_OTP_BZ_BNJ_JACKET_BIT 5 +#define WFPM_OTP_BZ_BNJ_CDB_BIT 4 +#define WFPM_OTP_CFG1_IS_JACKET(_val) (((_val) & 0x00000020) >> WFPM_OTP_BZ_BNJ_JACKET_BIT) +#define WFPM_OTP_CFG1_IS_CDB(_val) (((_val) & 0x00000010) >> WFPM_OTP_BZ_BNJ_CDB_BIT) + #define WFPM_GP2 0xA030B4 /* DBGI SRAM Register details */ #define DBGI_SRAM_TARGET_ACCESS_RDATA_LSB 0x00A2E154 #define DBGI_SRAM_TARGET_ACCESS_RDATA_MSB 0x00A2E158 #define DBGI_SRAM_FIFO_POINTERS 0x00A2E148 #define DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK 0x00000FFF enum { ENABLE_WFPM = BIT(31), WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000, }; #define CNVI_AUX_MISC_CHIP 0xA200B0 #define CNVR_AUX_MISC_CHIP 0xA2B800 #define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890 #define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR 0xA29938 +#define CNVI_SCU_SEQ_DATA_DW9 0xA27488 #define PREG_AUX_BUS_WPROT_0 0xA04CC0 /* device family 9000 WPROT register */ #define PREG_PRPH_WPROT_9000 0xA04CE0 /* device family 22000 WPROT register */ #define PREG_PRPH_WPROT_22000 0xA04D00 #define SB_MODIFY_CFG_FLAG 0xA03088 +#define SB_CFG_RESIDES_IN_OTP_MASK 0x10 #define SB_CPU_1_STATUS 0xA01E30 #define SB_CPU_2_STATUS 0xA01E34 #define UMAG_SB_CPU_1_STATUS 0xA038C0 #define UMAG_SB_CPU_2_STATUS 0xA038C4 #define UMAG_GEN_HW_STATUS 0xA038C8 #define UREG_UMAC_CURRENT_PC 0xa05c18 #define UREG_LMAC1_CURRENT_PC 0xa05c1c #define UREG_LMAC2_CURRENT_PC 0xa05c20 #define WFPM_LMAC1_PD_NOTIFICATION 0xa0338c #define WFPM_ARC1_PD_NOTIFICATION 0xa03044 #define HPM_SECONDARY_DEVICE_STATE 0xa03404 +#define WFPM_MAC_OTP_CFG7_ADDR 0xa03338 +#define WFPM_MAC_OTP_CFG7_DATA 0xa0333c /* For UMAG_GEN_HW_STATUS reg check */ enum { UMAG_GEN_HW_IS_FPGA = BIT(1), }; /* FW chicken bits */ #define LMPM_CHICK 0xA01FF8 enum { LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0), }; /* FW chicken bits */ #define LMPM_PAGE_PASS_NOTIF 0xA03824 enum { LMPM_PAGE_PASS_NOTIF_POS = BIT(20), }; /* * CRF ID register * * type: bits 0-11 * reserved: bits 12-18 * slave_exist: bit 19 * dash: bits 20-23 * step: bits 24-26 * flavor: bits 27-31 */ #define REG_CRF_ID_TYPE(val) (((val) & 0x00000FFF) >> 0) #define REG_CRF_ID_SLAVE(val) (((val) & 0x00080000) >> 19) #define REG_CRF_ID_DASH(val) (((val) & 0x00F00000) >> 20) #define REG_CRF_ID_STEP(val) (((val) & 0x07000000) >> 24) #define REG_CRF_ID_FLAVOR(val) (((val) & 0xF8000000) >> 27) #define UREG_CHICK (0xA05C00) #define UREG_CHICK_MSI_ENABLE BIT(24) #define UREG_CHICK_MSIX_ENABLE BIT(25) #define SD_REG_VER 0xa29600 #define SD_REG_VER_GEN2 0x00a2b800 #define REG_CRF_ID_TYPE_JF_1 0x201 #define REG_CRF_ID_TYPE_JF_2 0x202 #define REG_CRF_ID_TYPE_HR_CDB 0x503 #define REG_CRF_ID_TYPE_HR_NONE_CDB 0x504 #define REG_CRF_ID_TYPE_HR_NONE_CDB_1X1 0x501 #define REG_CRF_ID_TYPE_HR_NONE_CDB_CCP 0x532 #define REG_CRF_ID_TYPE_GF 0x410 #define REG_CRF_ID_TYPE_GF_TC 0xF08 #define REG_CRF_ID_TYPE_MR 0x810 #define REG_CRF_ID_TYPE_FM 0x910 +#define REG_CRF_ID_TYPE_FMI 0x930 +#define REG_CRF_ID_TYPE_FMR 0x900 #define HPM_DEBUG 0xA03440 #define PERSISTENCE_BIT BIT(12) #define PREG_WFPM_ACCESS BIT(12) #define HPM_HIPM_GEN_CFG 0xA03458 #define HPM_HIPM_GEN_CFG_CR_PG_EN BIT(0) #define HPM_HIPM_GEN_CFG_CR_SLP_EN BIT(1) #define HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE BIT(10) #define UREG_DOORBELL_TO_ISR6 0xA05C04 #define UREG_DOORBELL_TO_ISR6_NMI_BIT BIT(0) #define UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE (BIT(0) | BIT(1)) #define UREG_DOORBELL_TO_ISR6_SUSPEND BIT(18) #define UREG_DOORBELL_TO_ISR6_RESUME BIT(19) #define UREG_DOORBELL_TO_ISR6_PNVM BIT(20) /* * From BZ family driver triggers this bit for suspend and resume * The driver should update CSR_IPC_SLEEP_CONTROL before triggering * this interrupt with suspend/resume value */ #define UREG_DOORBELL_TO_ISR6_SLEEP_CTRL BIT(31) #define CNVI_MBOX_C 0xA3400C #define FSEQ_ERROR_CODE 0xA340C8 #define FSEQ_TOP_INIT_VERSION 0xA34038 #define FSEQ_CNVIO_INIT_VERSION 0xA3403C #define FSEQ_OTP_VERSION 0xA340FC #define FSEQ_TOP_CONTENT_VERSION 0xA340F4 #define FSEQ_ALIVE_TOKEN 0xA340F0 #define FSEQ_CNVI_ID 0xA3408C #define FSEQ_CNVR_ID 0xA34090 +#define FSEQ_PREV_CNVIO_INIT_VERSION 0xA34084 +#define FSEQ_WIFI_FSEQ_VERSION 0xA34040 +#define FSEQ_BT_FSEQ_VERSION 0xA34044 +#define FSEQ_CLASS_TP_VERSION 0xA34078 #define IWL_D3_SLEEP_STATUS_SUSPEND 0xD3 #define IWL_D3_SLEEP_STATUS_RESUME 0xD0 #define WMAL_INDRCT_RD_CMD1_OPMOD_POS 28 #define WMAL_INDRCT_RD_CMD1_BYTE_ADDRESS_MSK 0xFFFFF #define WMAL_CMD_READ_BURST_ACCESS 2 #define WMAL_MRSPF_1 0xADFC20 #define WMAL_INDRCT_RD_CMD1 0xADFD44 #define WMAL_INDRCT_CMD1 0xADFC14 #define WMAL_INDRCT_CMD(addr) \ ((WMAL_CMD_READ_BURST_ACCESS << WMAL_INDRCT_RD_CMD1_OPMOD_POS) | \ ((addr) & WMAL_INDRCT_RD_CMD1_BYTE_ADDRESS_MSK)) #define WFPM_LMAC1_PS_CTL_RW 0xA03380 #define WFPM_LMAC2_PS_CTL_RW 0xA033C0 #define WFPM_PS_CTL_RW_PHYRF_PD_FSM_CURSTATE_MSK 0x0000000F #define WFPM_PHYRF_STATE_ON 5 #define HBUS_TIMEOUT 0xA5A5A5A1 #define WFPM_DPHY_OFF 0xDF10FF #define REG_OTP_MINOR 0xA0333C +#define WFPM_LMAC2_PD_NOTIFICATION 0xA033CC +#define WFPM_LMAC2_PD_RE_READ BIT(31) + #endif /* __iwl_prph_h__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-trans.c b/sys/contrib/dev/iwlwifi/iwl-trans.c index a9a475b66bf6..c49e1d8e0ca1 100644 --- a/sys/contrib/dev/iwlwifi/iwl-trans.c +++ b/sys/contrib/dev/iwlwifi/iwl-trans.c @@ -1,257 +1,257 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2019-2021 Intel Corporation + * Copyright (C) 2019-2021, 2023 Intel Corporation */ #include #include #include "fw/api/tx.h" #include "iwl-trans.h" #include "iwl-drv.h" #include "iwl-fh.h" #include "queue/tx.h" #include #include "fw/api/commands.h" struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, const struct iwl_trans_ops *ops, const struct iwl_cfg_trans_params *cfg_trans) { struct iwl_trans *trans; #ifdef CONFIG_LOCKDEP static struct lock_class_key __key; #endif trans = devm_kzalloc(dev, sizeof(*trans) + priv_size, GFP_KERNEL); if (!trans) return NULL; trans->trans_cfg = cfg_trans; #ifdef CONFIG_LOCKDEP lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", &__key, 0); #endif trans->dev = dev; trans->ops = ops; trans->num_rx_queues = 1; WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty); - if (trans->trans_cfg->use_tfh) { + if (trans->trans_cfg->gen2) { trans->txqs.tfd.addr_size = 64; trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS; trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd); } else { trans->txqs.tfd.addr_size = 36; trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS; trans->txqs.tfd.size = sizeof(struct iwl_tfd); } trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans); return trans; } int iwl_trans_init(struct iwl_trans *trans) { int txcmd_size, txcmd_align; if (!trans->trans_cfg->gen2) { txcmd_size = sizeof(struct iwl_tx_cmd); txcmd_align = sizeof(void *); } else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { txcmd_size = sizeof(struct iwl_tx_cmd_gen2); txcmd_align = 64; } else { txcmd_size = sizeof(struct iwl_tx_cmd_gen3); txcmd_align = 128; } txcmd_size += sizeof(struct iwl_cmd_header); txcmd_size += 36; /* biggest possible 802.11 header */ /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */ if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align)) return -EINVAL; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ; else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210; else trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl); /* * For gen2 devices, we use a single allocation for each byte-count * table, but they're pretty small (1k) so use a DMA pool that we * allocate here. */ if (trans->trans_cfg->gen2) { trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", trans->dev, trans->txqs.bc_tbl_size, 256, 0); if (!trans->txqs.bc_pool) return -ENOMEM; } /* Some things must not change even if the config does */ WARN_ON(trans->txqs.tfd.addr_size != - (trans->trans_cfg->use_tfh ? 64 : 36)); + (trans->trans_cfg->gen2 ? 64 : 36)); snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), "iwl_cmd_pool:%s", dev_name(trans->dev)); trans->dev_cmd_pool = kmem_cache_create(trans->dev_cmd_pool_name, txcmd_size, txcmd_align, SLAB_HWCACHE_ALIGN, NULL); if (!trans->dev_cmd_pool) return -ENOMEM; #ifdef CONFIG_INET trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); if (!trans->txqs.tso_hdr_page) { kmem_cache_destroy(trans->dev_cmd_pool); return -ENOMEM; } #endif /* Initialize the wait queue for commands */ init_waitqueue_head(&trans->wait_command_queue); return 0; } void iwl_trans_free(struct iwl_trans *trans) { #ifdef CONFIG_INET int i; if (trans->txqs.tso_hdr_page) { for_each_possible_cpu(i) { struct iwl_tso_hdr_page *p = per_cpu_ptr(trans->txqs.tso_hdr_page, i); if (p && p->page) __free_page(p->page); } free_percpu(trans->txqs.tso_hdr_page); } #endif kmem_cache_destroy(trans->dev_cmd_pool); } int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { int ret; if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) && test_bit(STATUS_RFKILL_OPMODE, &trans->status))) return -ERFKILL; /* * We can't test IWL_MVM_STATUS_IN_D3 in mvm->status because this * bit is set early in the D3 flow, before we send all the commands * that configure the firmware for D3 operation (power, patterns, ...) * and we don't want to flag all those with CMD_SEND_IN_D3. * So use the system_pm_mode instead. The only command sent after * we set system_pm_mode is D3_CONFIG_CMD, which we now flag with * CMD_SEND_IN_D3. */ if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && !(cmd->flags & CMD_SEND_IN_D3))) return -EHOSTDOWN; if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) return -EIO; if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return -EIO; } if (WARN_ON((cmd->flags & CMD_WANT_ASYNC_CALLBACK) && !(cmd->flags & CMD_ASYNC))) return -EINVAL; if (!(cmd->flags & CMD_ASYNC)) lock_map_acquire_read(&trans->sync_cmd_lockdep_map); if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id)) { if (cmd->id != REPLY_ERROR) cmd->id = DEF_ID(cmd->id); } ret = iwl_trans_txq_send_hcmd(trans, cmd); if (!(cmd->flags & CMD_ASYNC)) lock_map_release(&trans->sync_cmd_lockdep_map); if (WARN_ON((cmd->flags & CMD_WANT_SKB) && !ret && !cmd->resp_pkt)) return -EIO; return ret; } IWL_EXPORT_SYMBOL(iwl_trans_send_cmd); /* Comparator for struct iwl_hcmd_names. * Used in the binary search over a list of host commands. * * @key: command_id that we're looking for. * @elt: struct iwl_hcmd_names candidate for match. * * @return 0 iff equal. */ static int iwl_hcmd_names_cmp(const void *key, const void *elt) { const struct iwl_hcmd_names *name = elt; const u8 *cmd1 = key; u8 cmd2 = name->cmd_id; return (*cmd1 - cmd2); } const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id) { u8 grp, cmd; struct iwl_hcmd_names *ret; const struct iwl_hcmd_arr *arr; size_t size = sizeof(struct iwl_hcmd_names); grp = iwl_cmd_groupid(id); cmd = iwl_cmd_opcode(id); if (!trans->command_groups || grp >= trans->command_groups_size || !trans->command_groups[grp].arr) return "UNKNOWN"; arr = &trans->command_groups[grp]; ret = bsearch(&cmd, arr->arr, arr->size, size, iwl_hcmd_names_cmp); if (!ret) return "UNKNOWN"; return ret->cmd_name; } IWL_EXPORT_SYMBOL(iwl_get_cmd_string); int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans) { int i, j; const struct iwl_hcmd_arr *arr; for (i = 0; i < trans->command_groups_size; i++) { arr = &trans->command_groups[i]; if (!arr->arr) continue; for (j = 0; j < arr->size - 1; j++) if (arr->arr[j].cmd_id > arr->arr[j + 1].cmd_id) return -1; } return 0; } IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted); diff --git a/sys/contrib/dev/iwlwifi/iwl-trans.h b/sys/contrib/dev/iwlwifi/iwl-trans.h index 43033e5193f8..88d09041b19a 100644 --- a/sys/contrib/dev/iwlwifi/iwl-trans.h +++ b/sys/contrib/dev/iwlwifi/iwl-trans.h @@ -1,1559 +1,1645 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2005-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_trans_h__ #define __iwl_trans_h__ #include #include /* for page_address */ #include #include #include "iwl-debug.h" #include "iwl-config.h" #include "fw/img.h" #include "iwl-op-mode.h" #include #include "fw/api/cmdhdr.h" #include "fw/api/txq.h" #include "fw/api/dbg-tlv.h" #include "iwl-dbg-tlv.h" #if defined(__FreeBSD__) #include #include "iwl-modparams.h" #endif /** * DOC: Transport layer - what is it ? * * The transport layer is the layer that deals with the HW directly. It provides * an abstraction of the underlying HW to the upper layer. The transport layer * doesn't provide any policy, algorithm or anything of this kind, but only * mechanisms to make the HW do something. It is not completely stateless but * close to it. * We will have an implementation for each different supported bus. */ /** * DOC: Life cycle of the transport layer * * The transport layer has a very precise life cycle. * * 1) A helper function is called during the module initialization and * registers the bus driver's ops with the transport's alloc function. * 2) Bus's probe calls to the transport layer's allocation functions. * Of course this function is bus specific. * 3) This allocation functions will spawn the upper layer which will * register mac80211. * * 4) At some point (i.e. mac80211's start call), the op_mode will call * the following sequence: * start_hw * start_fw * * 5) Then when finished (or reset): * stop_device * * 6) Eventually, the free function will be called. */ #define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ #define FH_RSCSR_FRAME_INVALID 0x55550000 #define FH_RSCSR_FRAME_ALIGN 0x40 #define FH_RSCSR_RPA_EN BIT(25) #define FH_RSCSR_RADA_EN BIT(26) #define FH_RSCSR_RXQ_POS 16 #define FH_RSCSR_RXQ_MASK 0x3F0000 struct iwl_rx_packet { /* * The first 4 bytes of the RX frame header contain both the RX frame * size and some flags. * Bit fields: * 31: flag flush RB request * 30: flag ignore TC (terminal counter) request * 29: flag fast IRQ request * 28-27: Reserved * 26: RADA enabled * 25: Offload enabled * 24: RPF enabled * 23: RSS enabled * 22: Checksum enabled * 21-16: RX queue * 15-14: Reserved * 13-00: RX frame size */ __le32 len_n_flags; struct iwl_cmd_header hdr; u8 data[]; } __packed; static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt) { return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; } static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) { return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr); } /** * enum CMD_MODE - how to send the host commands ? * * @CMD_ASYNC: Return right away and don't wait for the response * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of * the response. The caller needs to call iwl_free_resp when done. * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be * called after this command completes. Valid only with CMD_ASYNC. * @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to * SUSPEND and RESUME commands. We are in D3 mode when we set * trans->system_pm_mode to IWL_PLAT_PM_MODE_D3. */ enum CMD_MODE { CMD_ASYNC = BIT(0), CMD_WANT_SKB = BIT(1), CMD_SEND_IN_RFKILL = BIT(2), CMD_WANT_ASYNC_CALLBACK = BIT(3), CMD_SEND_IN_D3 = BIT(4), }; #define DEF_CMD_PAYLOAD_SIZE 320 /** * struct iwl_device_cmd * * For allocation of the command and tx queues, this establishes the overall * size of the largest command we send to uCode, except for commands that * aren't fully copied and use other TFD space. */ struct iwl_device_cmd { union { struct { struct iwl_cmd_header hdr; /* uCode API */ u8 payload[DEF_CMD_PAYLOAD_SIZE]; }; struct { struct iwl_cmd_header_wide hdr_wide; u8 payload_wide[DEF_CMD_PAYLOAD_SIZE - sizeof(struct iwl_cmd_header_wide) + sizeof(struct iwl_cmd_header)]; }; }; } __packed; /** * struct iwl_device_tx_cmd - buffer for TX command * @hdr: the header * @payload: the payload placeholder * * The actual structure is sized dynamically according to need. */ struct iwl_device_tx_cmd { struct iwl_cmd_header hdr; u8 payload[]; } __packed; #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) /* * number of transfer buffers (fragments) per transmit frame descriptor; * this is just the driver's idea, the hardware supports 20 */ #define IWL_MAX_CMD_TBS_PER_TFD 2 /* We need 2 entries for the TX command and header, and another one might * be needed for potential data in the SKB's head. The remaining ones can * be used for frags. */ #define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3) /** * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command * * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's * ring. The transport layer doesn't map the command's buffer to DMA, but * rather copies it to a previously allocated DMA buffer. This flag tells * the transport layer not to copy the command, but to map the existing * buffer (that is passed in) instead. This saves the memcpy and allows * commands that are bigger than the fixed buffer to be submitted. * Note that a TFD entry after a NOCOPY one cannot be a normal copied one. * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this * chunk internally and free it again after the command completes. This * can (currently) be used only once per command. * Note that a TFD entry after a DUP one cannot be a normal copied one. */ enum iwl_hcmd_dataflag { IWL_HCMD_DFL_NOCOPY = BIT(0), IWL_HCMD_DFL_DUP = BIT(1), }; enum iwl_error_event_table_status { IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0), IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1), IWL_ERROR_EVENT_TABLE_UMAC = BIT(2), IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3), IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4), IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5), IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6), }; /** * struct iwl_host_cmd - Host command to the uCode * * @data: array of chunks that composes the data of the host command * @resp_pkt: response packet, if %CMD_WANT_SKB was set * @_rx_page_order: (internally used to free response packet); * FreeBSD uses _page instead. * @_rx_page_addr: (internally used to free response packet) * @flags: can be CMD_* * @len: array of the lengths of the chunks in data * @dataflags: IWL_HCMD_DFL_* * @id: command id of the host command, for wide commands encoding the * version and group as well */ struct iwl_host_cmd { const void *data[IWL_MAX_CMD_TBS_PER_TFD]; struct iwl_rx_packet *resp_pkt; #if defined(__linux__) unsigned long _rx_page_addr; #elif defined(__FreeBSD__) struct page *_page; #endif u32 _rx_page_order; u32 flags; u32 id; u16 len[IWL_MAX_CMD_TBS_PER_TFD]; u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD]; }; static inline void iwl_free_resp(struct iwl_host_cmd *cmd) { #if defined(__linux__) free_pages(cmd->_rx_page_addr, cmd->_rx_page_order); #elif defined(__FreeBSD__) __free_pages(cmd->_page, cmd->_rx_page_order); #endif } struct iwl_rx_cmd_buffer { struct page *_page; int _offset; bool _page_stolen; u32 _rx_page_order; unsigned int truesize; }; static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) { return (void *)((unsigned long)page_address(r->_page) + r->_offset); } static inline int rxb_offset(struct iwl_rx_cmd_buffer *r) { return r->_offset; } static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) { r->_page_stolen = true; get_page(r->_page); return r->_page; } static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) { __free_pages(r->_page, r->_rx_page_order); } #define MAX_NO_RECLAIM_CMDS 6 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) /* * Maximum number of HW queues the transport layer * currently supports */ #define IWL_MAX_HW_QUEUES 32 #define IWL_MAX_TVQM_QUEUES 512 #define IWL_MAX_TID_COUNT 8 #define IWL_MGMT_TID 15 #define IWL_FRAME_LIMIT 64 #define IWL_MAX_RX_HW_QUEUES 16 #define IWL_9000_MAX_RX_HW_QUEUES 6 /** * enum iwl_wowlan_status - WoWLAN image/device status * @IWL_D3_STATUS_ALIVE: firmware is still running after resume * @IWL_D3_STATUS_RESET: device was reset while suspended */ enum iwl_d3_status { IWL_D3_STATUS_ALIVE, IWL_D3_STATUS_RESET, }; /** * enum iwl_trans_status: transport status flags * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed * @STATUS_DEVICE_ENABLED: APM is enabled * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up) * @STATUS_INT_ENABLED: interrupts are enabled * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode * @STATUS_FW_ERROR: the fw is in error state * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands * are sent * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once, * e.g. for testing */ enum iwl_trans_status { STATUS_SYNC_HCMD_ACTIVE, STATUS_DEVICE_ENABLED, STATUS_TPOWER_PMI, STATUS_INT_ENABLED, STATUS_RFKILL_HW, STATUS_RFKILL_OPMODE, STATUS_FW_ERROR, STATUS_TRANS_GOING_IDLE, STATUS_TRANS_IDLE, STATUS_TRANS_DEAD, STATUS_SUPPRESS_CMD_ERROR_ONCE, }; static inline int iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) { switch (rb_size) { case IWL_AMSDU_2K: return get_order(2 * 1024); case IWL_AMSDU_4K: return get_order(4 * 1024); case IWL_AMSDU_8K: return get_order(8 * 1024); case IWL_AMSDU_12K: return get_order(16 * 1024); default: WARN_ON(1); return -1; } } static inline int iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size) { switch (rb_size) { case IWL_AMSDU_2K: return 2 * 1024; case IWL_AMSDU_4K: return 4 * 1024; case IWL_AMSDU_8K: return 8 * 1024; case IWL_AMSDU_12K: return 16 * 1024; default: WARN_ON(1); return 0; } } struct iwl_hcmd_names { u8 cmd_id; const char *const cmd_name; }; #define HCMD_NAME(x) \ { .cmd_id = x, .cmd_name = #x } struct iwl_hcmd_arr { const struct iwl_hcmd_names *arr; int size; }; #define HCMD_ARR(x) \ { .arr = x, .size = ARRAY_SIZE(x) } /** * struct iwl_dump_sanitize_ops - dump sanitization operations * @frob_txf: Scrub the TX FIFO data * @frob_hcmd: Scrub a host command, the %hcmd pointer is to the header * but that might be short or long (&struct iwl_cmd_header or * &struct iwl_cmd_header_wide) * @frob_mem: Scrub memory data */ struct iwl_dump_sanitize_ops { void (*frob_txf)(void *ctx, void *buf, size_t buflen); void (*frob_hcmd)(void *ctx, void *hcmd, size_t buflen); void (*frob_mem)(void *ctx, u32 mem_addr, void *mem, size_t buflen); }; /** * struct iwl_trans_config - transport configuration * * @op_mode: pointer to the upper layer. * @cmd_queue: the index of the command queue. * Must be set before start_fw. * @cmd_fifo: the fifo for host commands * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue. * @no_reclaim_cmds: Some devices erroneously don't set the * SEQ_RX_FRAME bit on some notifications, this is the * list of such notifications to filter. Max length is * %MAX_NO_RECLAIM_CMDS. * @n_no_reclaim_cmds: # of commands in list * @rx_buf_size: RX buffer size needed for A-MSDUs * if unset 4k will be the RX buffer size * @bc_table_dword: set to true if the BC table expects the byte count to be * in DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue * @command_groups: array of command groups, each member is an array of the * commands in the group; for debugging only * @command_groups_size: number of command groups, to avoid illegal access * @cb_data_offs: offset inside skb->cb to store transport data at, must have * space for at least two pointers * @fw_reset_handshake: firmware supports reset flow handshake * @queue_alloc_cmd_ver: queue allocation command version, set to 0 * for using the older SCD_QUEUE_CFG, set to the version of * SCD_QUEUE_CONFIG_CMD otherwise. */ struct iwl_trans_config { struct iwl_op_mode *op_mode; u8 cmd_queue; u8 cmd_fifo; unsigned int cmd_q_wdg_timeout; const u8 *no_reclaim_cmds; unsigned int n_no_reclaim_cmds; enum iwl_amsdu_size rx_buf_size; bool bc_table_dword; bool scd_set_active; const struct iwl_hcmd_arr *command_groups; int command_groups_size; u8 cb_data_offs; bool fw_reset_handshake; u8 queue_alloc_cmd_ver; }; struct iwl_trans_dump_data { u32 len; u8 data[]; }; struct iwl_trans; struct iwl_trans_txq_scd_cfg { u8 fifo; u8 sta_id; u8 tid; bool aggregate; int frame_limit; }; /** * struct iwl_trans_rxq_dma_data - RX queue DMA data * @fr_bd_cb: DMA address of free BD cyclic buffer * @fr_bd_wid: Initial write index of the free BD cyclic buffer * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr * @ur_bd_cb: DMA address of used BD cyclic buffer */ struct iwl_trans_rxq_dma_data { u64 fr_bd_cb; u32 fr_bd_wid; u64 urbd_stts_wrptr; u64 ur_bd_cb; }; +/* maximal number of DRAM MAP entries supported by FW */ +#define IPC_DRAM_MAP_ENTRY_NUM_MAX 64 + +/** + * struct iwl_pnvm_image - contains info about the parsed pnvm image + * @chunks: array of pointers to pnvm payloads and their sizes + * @n_chunks: the number of the pnvm payloads. + * @version: the version of the loaded PNVM image + */ +struct iwl_pnvm_image { + struct { + const void *data; + u32 len; + } chunks[IPC_DRAM_MAP_ENTRY_NUM_MAX]; + u32 n_chunks; + u32 version; +}; + /** * struct iwl_trans_ops - transport specific operations * * All the handlers MUST be implemented * * @start_hw: starts the HW. From that point on, the HW can send interrupts. * May sleep. * @op_mode_leave: Turn off the HW RF kill indication if on * May sleep * @start_fw: allocates and inits all the resources for the transport * layer. Also kick a fw image. * May sleep * @fw_alive: called when the fw sends alive notification. If the fw provides * the SCD base address in SRAM, then provide it here, or 0 otherwise. * May sleep * @stop_device: stops the whole device (embedded CPU put to reset) and stops * the HW. From that point on, the HW will be stopped but will still issue * an interrupt if the HW RF kill switch is triggered. * This callback must do the right thing and not crash even if %start_hw() * was called but not &start_fw(). May sleep. * @d3_suspend: put the device into the correct mode for WoWLAN during * suspend. This is optional, if not implemented WoWLAN will not be * supported. This callback may sleep. * @d3_resume: resume the device after WoWLAN, enabling the opmode to * talk to the WoWLAN image to get its status. This is optional, if not * implemented WoWLAN will not be supported. This callback may sleep. * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted. * If RFkill is asserted in the middle of a SYNC host command, it must * return -ERFKILL straight away. * May sleep only if CMD_ASYNC is not set * @tx: send an skb. The transport relies on the op_mode to zero the * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all * the CSUM will be taken care of (TCP CSUM and IP header in case of * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP * header if it is IPv4. * Must be atomic * @reclaim: free packet until ssn. Returns a list of freed packets. * Must be atomic * @txq_enable: setup a queue. To setup an AC queue, use the * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before * this one. The op_mode must not configure the HCMD queue. The scheduler * configuration may be %NULL, in which case the hardware will not be * configured. If true is returned, the operation mode needs to increment * the sequence number of the packets routed to this queue because of a * hardware scheduler bug. May sleep. * @txq_disable: de-configure a Tx queue to send AMPDUs * Must be atomic * @txq_set_shared_mode: change Tx queue shared/unshared marking * @wait_tx_queues_empty: wait until tx queues are empty. May sleep. * @wait_txq_empty: wait until specific tx queue is empty. May sleep. * @freeze_txq_timer: prevents the timer of the queue from firing until the * queue is set to awake. Must be atomic. * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note * that the transport needs to refcount the calls since this function * will be called several times with block = true, and then the queues * need to be unblocked only after the same number of calls with * block = false. * @write8: write a u8 to a register at offset ofs from the BAR * @write32: write a u32 to a register at offset ofs from the BAR * @read32: read a u32 register at offset ofs from the BAR * @read_prph: read a DWORD from a periphery register * @write_prph: write a DWORD to a periphery register * @read_mem: read device's SRAM in DWORD * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory * will be zeroed. * @read_config32: read a u32 value from the device's config space at * the given offset. * @configure: configure parameters required by the transport layer from * the op_mode. May be called several times before start_fw, can't be * called after that. * @set_pmi: set the power pmi state * @grab_nic_access: wake the NIC to be able to access non-HBUS regs. * Sleeping is not allowed between grab_nic_access and * release_nic_access. * @release_nic_access: let the NIC go to sleep. The "flags" parameter * must be the same one that was sent before to the grab_nic_access. * @set_bits_mask - set SRAM register according to value and mask. * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last * TX'ed commands and similar. The buffer will be vfree'd by the caller. * Note that the transport must fill in the proper file headers. * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup * of the trans debugfs + * @load_pnvm: save the pnvm data in DRAM * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the * context info. + * @load_reduce_power: copy reduce power table to the corresponding DRAM memory + * @set_reduce_power: set reduce power table addresses in the sratch buffer * @interrupts: disable/enable interrupts to transport */ struct iwl_trans_ops { int (*start_hw)(struct iwl_trans *iwl_trans); void (*op_mode_leave)(struct iwl_trans *iwl_trans); int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill); void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); void (*stop_device)(struct iwl_trans *trans); int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset); int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, bool test, bool reset); int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int queue); void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, struct sk_buff_head *skbs); void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr); bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int queue_wdg_timeout); void (*txq_disable)(struct iwl_trans *trans, int queue, bool configure_scd); /* 22000 functions */ int (*txq_alloc)(struct iwl_trans *trans, u32 flags, u32 sta_mask, u8 tid, int size, unsigned int queue_wdg_timeout); void (*txq_free)(struct iwl_trans *trans, int queue); int (*rxq_dma_data)(struct iwl_trans *trans, int queue, struct iwl_trans_rxq_dma_data *data); void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, bool shared); int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm); int (*wait_txq_empty)(struct iwl_trans *trans, int queue); void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, bool freeze); void (*block_txq_ptrs)(struct iwl_trans *trans, bool block); void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); u32 (*read32)(struct iwl_trans *trans, u32 ofs); u32 (*read_prph)(struct iwl_trans *trans, u32 ofs); void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val); int (*read_mem)(struct iwl_trans *trans, u32 addr, void *buf, int dwords); int (*write_mem)(struct iwl_trans *trans, u32 addr, const void *buf, int dwords); int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val); void (*configure)(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg); void (*set_pmi)(struct iwl_trans *trans, bool state); int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership); bool (*grab_nic_access)(struct iwl_trans *trans); void (*release_nic_access)(struct iwl_trans *trans); void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, u32 value); struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, u32 dump_mask, const struct iwl_dump_sanitize_ops *sanitize_ops, void *sanitize_ctx); void (*debugfs_cleanup)(struct iwl_trans *trans); void (*sync_nmi)(struct iwl_trans *trans); - int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len); - int (*set_reduce_power)(struct iwl_trans *trans, - const void *data, u32 len); + int (*load_pnvm)(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_payloads, + const struct iwl_ucode_capabilities *capa); + void (*set_pnvm)(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa); + int (*load_reduce_power)(struct iwl_trans *trans, + const struct iwl_pnvm_image *payloads, + const struct iwl_ucode_capabilities *capa); + void (*set_reduce_power)(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa); + void (*interrupts)(struct iwl_trans *trans, bool enable); int (*imr_dma_data)(struct iwl_trans *trans, u32 dst_addr, u64 src_addr, u32 byte_cnt); }; /** * enum iwl_trans_state - state of the transport layer * * @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed * @IWL_TRANS_FW_STARTED: FW was started, but not alive yet * @IWL_TRANS_FW_ALIVE: FW has sent an alive response */ enum iwl_trans_state { IWL_TRANS_NO_FW, IWL_TRANS_FW_STARTED, IWL_TRANS_FW_ALIVE, }; /** * DOC: Platform power management * * In system-wide power management the entire platform goes into a low * power state (e.g. idle or suspend to RAM) at the same time and the * device is configured as a wakeup source for the entire platform. * This is usually triggered by userspace activity (e.g. the user * presses the suspend button or a power management daemon decides to * put the platform in low power mode). The device's behavior in this * mode is dictated by the wake-on-WLAN configuration. * * The terms used for the device's behavior are as follows: * * - D0: the device is fully powered and the host is awake; * - D3: the device is in low power mode and only reacts to * specific events (e.g. magic-packet received or scan * results found); * * These terms reflect the power modes in the firmware and are not to * be confused with the physical device power state. */ /** * enum iwl_plat_pm_mode - platform power management mode * * This enumeration describes the device's platform power management * behavior when in system-wide suspend (i.e WoWLAN). * * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this * device. In system-wide suspend mode, it means that the all * connections will be closed automatically by mac80211 before * the platform is suspended. * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN). */ enum iwl_plat_pm_mode { IWL_PLAT_PM_MODE_DISABLED, IWL_PLAT_PM_MODE_D3, }; /** * enum iwl_ini_cfg_state * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs * are corrupted. The rest of the debug TLVs will still be used */ enum iwl_ini_cfg_state { IWL_INI_CFG_STATE_NOT_LOADED, IWL_INI_CFG_STATE_LOADED, IWL_INI_CFG_STATE_CORRUPTED, }; /* Max time to wait for nmi interrupt */ #define IWL_TRANS_NMI_TIMEOUT (HZ / 4) /** * struct iwl_dram_data * @physical: page phy pointer * @block: pointer to the allocated block/page * @size: size of the block/page */ struct iwl_dram_data { dma_addr_t physical; void *block; int size; }; +/** + * @drams: array of several DRAM areas that contains the pnvm and power + * reduction table payloads. + * @n_regions: number of DRAM regions that were allocated + * @prph_scratch_mem_desc: points to a structure allocated in dram, + * designed to show FW where all the payloads are. + */ +struct iwl_dram_regions { + struct iwl_dram_data drams[IPC_DRAM_MAP_ENTRY_NUM_MAX]; + struct iwl_dram_data prph_scratch_mem_desc; + u8 n_regions; +}; + /** * struct iwl_fw_mon - fw monitor per allocation id * @num_frags: number of fragments * @frags: an array of DRAM buffer fragments */ struct iwl_fw_mon { u32 num_frags; struct iwl_dram_data *frags; }; /** * struct iwl_self_init_dram - dram data used by self init process * @fw: lmac and umac dram data * @fw_cnt: total number of items in array * @paging: paging dram data * @paging_cnt: total number of items in array */ struct iwl_self_init_dram { struct iwl_dram_data *fw; int fw_cnt; struct iwl_dram_data *paging; int paging_cnt; }; /** * struct iwl_imr_data - imr dram data used during debug process * @imr_enable: imr enable status received from fw * @imr_size: imr dram size received from fw * @sram_addr: sram address from debug tlv * @sram_size: sram size from debug tlv * @imr2sram_remainbyte`: size remained after each dma transfer * @imr_curr_addr: current dst address used during dma transfer * @imr_base_addr: imr address received from fw */ struct iwl_imr_data { u32 imr_enable; u32 imr_size; u32 sram_addr; u32 sram_size; u32 imr2sram_remainbyte; u64 imr_curr_addr; __le64 imr_base_addr; }; +#define IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES 32 + +/** + * struct iwl_pc_data - program counter details + * @pc_name: cpu name + * @pc_address: cpu program counter + */ +struct iwl_pc_data { + u8 pc_name[IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES]; + u32 pc_address; +}; + /** * struct iwl_trans_debug - transport debug related data * * @n_dest_reg: num of reg_ops in %dbg_dest_tlv * @rec_on: true iff there is a fw debug recording currently active * @dest_tlv: points to the destination TLV for debug * @conf_tlv: array of pointers to configuration TLVs for debug * @trigger_tlv: array of pointers to triggers TLVs for debug * @lmac_error_event_table: addrs of lmacs error tables * @umac_error_event_table: addr of umac error table * @tcm_error_event_table: address(es) of TCM error table(s) * @rcm_error_event_table: address(es) of RCM error table(s) * @error_event_table_tlv_status: bitmap that indicates what error table * pointers was recevied via TLV. uses enum &iwl_error_event_table_status * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state * @fw_mon_cfg: debug buffer allocation configuration * @fw_mon_ini: DRAM buffer fragments per allocation id * @fw_mon: DRAM buffer for firmware monitor * @hw_error: equals true if hw error interrupt was received from the FW * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location * @active_regions: active regions * @debug_info_tlv_list: list of debug info TLVs * @time_point: array of debug time points * @periodic_trig_list: periodic triggers list * @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON * @ucode_preset: preset based on ucode + * @dump_file_name_ext: dump file name extension + * @dump_file_name_ext_valid: dump file name extension if valid or not + * @num_pc: number of program counter for cpu + * @pc_data: details of the program counter */ struct iwl_trans_debug { u8 n_dest_reg; bool rec_on; const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv; const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv; u32 lmac_error_event_table[2]; u32 umac_error_event_table; u32 tcm_error_event_table[2]; u32 rcm_error_event_table[2]; unsigned int error_event_table_tlv_status; enum iwl_ini_cfg_state internal_ini_cfg; enum iwl_ini_cfg_state external_ini_cfg; struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM]; struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM]; struct iwl_dram_data fw_mon; bool hw_error; enum iwl_fw_ini_buffer_location ini_dest; u64 unsupported_region_msk; struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID]; struct list_head debug_info_tlv_list; struct iwl_dbg_tlv_time_point_data time_point[IWL_FW_INI_TIME_POINT_NUM]; struct list_head periodic_trig_list; u32 domains_bitmap; u32 ucode_preset; bool restart_required; u32 last_tp_resetfw; struct iwl_imr_data imr_data; + u8 dump_file_name_ext[IWL_FW_INI_MAX_NAME]; + bool dump_file_name_ext_valid; + u32 num_pc; + struct iwl_pc_data *pc_data; }; struct iwl_dma_ptr { dma_addr_t dma; void *addr; size_t size; }; struct iwl_cmd_meta { /* only for SYNC commands, iff the reply skb is wanted */ struct iwl_host_cmd *source; u32 flags; u32 tbs; }; /* * The FH will write back to the first TB only, so we need to copy some data * into the buffer regardless of whether it should be mapped or not. * This indicates how big the first TB must be to include the scratch buffer * and the assigned PN. * Since PN location is 8 bytes at offset 12, it's 20 now. * If we make it bigger then allocations will be bigger and copy slower, so * that's probably not useful. */ #define IWL_FIRST_TB_SIZE 20 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) struct iwl_pcie_txq_entry { void *cmd; struct sk_buff *skb; /* buffer to free after command completes */ const void *free_buf; struct iwl_cmd_meta meta; }; struct iwl_pcie_first_tb_buf { u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; }; /** * struct iwl_txq - Tx Queue for DMA * @q: generic Rx/Tx queue descriptor * @tfds: transmit frame descriptors (DMA memory) * @first_tb_bufs: start of command headers, including scratch buffers, for * the writeback -- this is DMA memory and an array holding one buffer * for each command on the queue * @first_tb_dma: DMA address for the first_tb_bufs start * @entries: transmit entries (driver state) * @lock: queue lock * @stuck_timer: timer that fires if queue gets stuck * @trans: pointer back to transport (for timer) * @need_update: indicates need to update read/write index * @ampdu: true if this queue is an ampdu queue for an specific RA/TID * @wd_timeout: queue watchdog timeout (jiffies) - per queue * @frozen: tx stuck queue timer is frozen * @frozen_expiry_remainder: remember how long until the timer fires * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) * @write_ptr: 1-st empty entry (index) host_w * @read_ptr: last used entry (index) host_r * @dma_addr: physical addr for BD's * @n_window: safe queue window * @id: queue id * @low_mark: low watermark, resume queue if free space more than this * @high_mark: high watermark, stop queue if free space less than this * * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame * descriptors) and required locking structures. * * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless * there might be HW changes in the future). For the normal TX * queues, n_window, which is the size of the software queue data * is also 256; however, for the command queue, n_window is only * 32 since we don't need so many commands pending. Since the HW * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. * This means that we end up with the following: * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | * SW entries: | 0 | ... | 31 | * where N is a number between 0 and 7. This means that the SW * data is a window overlayed over the HW queue. */ struct iwl_txq { void *tfds; struct iwl_pcie_first_tb_buf *first_tb_bufs; dma_addr_t first_tb_dma; struct iwl_pcie_txq_entry *entries; /* lock for syncing changes on the queue */ spinlock_t lock; unsigned long frozen_expiry_remainder; struct timer_list stuck_timer; struct iwl_trans *trans; bool need_update; bool frozen; bool ampdu; int block; unsigned long wd_timeout; struct sk_buff_head overflow_q; struct iwl_dma_ptr bc_tbl; int write_ptr; int read_ptr; dma_addr_t dma_addr; int n_window; u32 id; int low_mark; int high_mark; bool overflow_tx; }; /** * struct iwl_trans_txqs - transport tx queues data * * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) * @page_offs: offset from skb->cb to mac header page pointer * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer * @queue_used - bit mask of used queues * @queue_stopped - bit mask of stopped queues * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler * @queue_alloc_cmd_ver: queue allocation command version */ struct iwl_trans_txqs { unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; struct dma_pool *bc_pool; size_t bc_tbl_size; bool bc_table_dword; u8 page_offs; u8 dev_cmd_offs; struct iwl_tso_hdr_page __percpu *tso_hdr_page; struct { u8 fifo; u8 q_id; unsigned int wdg_timeout; } cmd; struct { u8 max_tbs; u16 size; u8 addr_size; } tfd; struct iwl_dma_ptr scd_bc_tbls; u8 queue_alloc_cmd_ver; }; /** * struct iwl_trans - transport common data * * @csme_own - true if we couldn't get ownership on the device * @ops - pointer to iwl_trans_ops * @op_mode - pointer to the op_mode * @trans_cfg: the trans-specific configuration part * @cfg - pointer to the configuration * @drv - pointer to iwl_drv * @status: a bit-mask of transport status flags * @dev - pointer to struct device * that represents the device * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported. * @hw_rf_id a u32 with the device RF ID + * @hw_crf_id a u32 with the device CRF ID + * @hw_wfpm_id a u32 with the device wfpm ID * @hw_id: a u32 with the ID of the device / sub-device. * Set during transport allocation. * @hw_id_str: a string with info about HW ID. Set during transport allocation. * @hw_rev_step: The mac step of the HW * @pm_support: set to true in start_hw if link pm is supported * @ltr_enabled: set to true if the LTR is enabled + * @fail_to_parse_pnvm_image: set to true if pnvm parsing failed + * @failed_to_load_reduce_power_image: set to true if pnvm loading failed * @wide_cmd_header: true when ucode supports wide command header format * @wait_command_queue: wait queue for sync commands * @num_rx_queues: number of RX queues allocated by the transport; * the transport must set this before calling iwl_drv_start() * @iml_len: the length of the image loader * @iml: a pointer to the image loader itself * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. * The user should use iwl_trans_{alloc,free}_tx_cmd. * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before * starting the firmware, used for tracing * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the * start of the 802.11 header in the @rx_mpdu_cmd * @dflt_pwr_limit: default power limit fetched from the platform (ACPI) * @system_pm_mode: the system-wide power management mode in use. * This mode is set dynamically, depending on the WoWLAN values * configured from the userspace at runtime. * @iwl_trans_txqs: transport tx queues data. + * @mbx_addr_0_step: step address data 0 + * @mbx_addr_1_step: step address data 1 + * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*), + * only valid for discrete (not integrated) NICs */ struct iwl_trans { bool csme_own; const struct iwl_trans_ops *ops; struct iwl_op_mode *op_mode; const struct iwl_cfg_trans_params *trans_cfg; const struct iwl_cfg *cfg; struct iwl_drv *drv; enum iwl_trans_state state; unsigned long status; struct device *dev; u32 max_skb_frags; u32 hw_rev; u32 hw_rev_step; u32 hw_rf_id; + u32 hw_crf_id; + u32 hw_cnv_id; + u32 hw_wfpm_id; u32 hw_id; char hw_id_str[52]; u32 sku_id[3]; u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; bool pm_support; bool ltr_enabled; u8 pnvm_loaded:1; + u8 fail_to_parse_pnvm_image:1; u8 reduce_power_loaded:1; + u8 failed_to_load_reduce_power_image:1; const struct iwl_hcmd_arr *command_groups; int command_groups_size; bool wide_cmd_header; wait_queue_head_t wait_command_queue; u8 num_rx_queues; size_t iml_len; u8 *iml; /* The following fields are internal only */ struct kmem_cache *dev_cmd_pool; char dev_cmd_pool_name[50]; struct dentry *dbgfs_dir; #ifdef CONFIG_LOCKDEP struct lockdep_map sync_cmd_lockdep_map; #endif struct iwl_trans_debug dbg; struct iwl_self_init_dram init_dram; enum iwl_plat_pm_mode system_pm_mode; const char *name; struct iwl_trans_txqs txqs; + u32 mbx_addr_0_step; + u32 mbx_addr_1_step; + + u8 pcie_link_speed; /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ char trans_specific[] __aligned(sizeof(void *)); }; const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id); int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans); static inline void iwl_trans_configure(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg) { trans->op_mode = trans_cfg->op_mode; trans->ops->configure(trans, trans_cfg); WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); } static inline int iwl_trans_start_hw(struct iwl_trans *trans) { might_sleep(); return trans->ops->start_hw(trans); } static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans) { might_sleep(); if (trans->ops->op_mode_leave) trans->ops->op_mode_leave(trans); trans->op_mode = NULL; trans->state = IWL_TRANS_NO_FW; } static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) { might_sleep(); trans->state = IWL_TRANS_FW_ALIVE; trans->ops->fw_alive(trans, scd_addr); } static inline int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { int ret; might_sleep(); WARN_ON_ONCE(!trans->rx_mpdu_cmd); clear_bit(STATUS_FW_ERROR, &trans->status); ret = trans->ops->start_fw(trans, fw, run_in_rfkill); if (ret == 0) trans->state = IWL_TRANS_FW_STARTED; return ret; } static inline void iwl_trans_stop_device(struct iwl_trans *trans) { might_sleep(); trans->ops->stop_device(trans); trans->state = IWL_TRANS_NO_FW; } static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset) { might_sleep(); if (!trans->ops->d3_suspend) - return 0; + return -EOPNOTSUPP; return trans->ops->d3_suspend(trans, test, reset); } static inline int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, bool test, bool reset) { might_sleep(); if (!trans->ops->d3_resume) - return 0; + return -EOPNOTSUPP; return trans->ops->d3_resume(trans, status, test, reset); } static inline struct iwl_trans_dump_data * iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask, const struct iwl_dump_sanitize_ops *sanitize_ops, void *sanitize_ctx) { if (!trans->ops->dump_data) return NULL; return trans->ops->dump_data(trans, dump_mask, sanitize_ops, sanitize_ctx); } static inline struct iwl_device_tx_cmd * iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) { return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC); } int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, struct iwl_device_tx_cmd *dev_cmd) { kmem_cache_free(trans->dev_cmd_pool, dev_cmd); } static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int queue) { if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) return -EIO; if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return -EIO; } return trans->ops->tx(trans, skb, dev_cmd, queue); } static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn, struct sk_buff_head *skbs) { if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return; } trans->ops->reclaim(trans, queue, ssn, skbs); } static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr) { if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return; } trans->ops->set_q_ptrs(trans, queue, ptr); } static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, bool configure_scd) { trans->ops->txq_disable(trans, queue, configure_scd); } static inline bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int queue_wdg_timeout) { might_sleep(); if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return false; } return trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout); } static inline int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue, struct iwl_trans_rxq_dma_data *data) { if (WARN_ON_ONCE(!trans->ops->rxq_dma_data)) return -ENOTSUPP; return trans->ops->rxq_dma_data(trans, queue, data); } static inline void iwl_trans_txq_free(struct iwl_trans *trans, int queue) { if (WARN_ON_ONCE(!trans->ops->txq_free)) return; trans->ops->txq_free(trans, queue); } static inline int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, u8 tid, int size, unsigned int wdg_timeout) { might_sleep(); if (WARN_ON_ONCE(!trans->ops->txq_alloc)) return -ENOTSUPP; if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return -EIO; } return trans->ops->txq_alloc(trans, flags, sta_mask, tid, size, wdg_timeout); } static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, int queue, bool shared_mode) { if (trans->ops->txq_set_shared_mode) trans->ops->txq_set_shared_mode(trans, queue, shared_mode); } static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, int fifo, int sta_id, int tid, int frame_limit, u16 ssn, unsigned int queue_wdg_timeout) { struct iwl_trans_txq_scd_cfg cfg = { .fifo = fifo, .sta_id = sta_id, .tid = tid, .frame_limit = frame_limit, .aggregate = sta_id >= 0, }; iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout); } static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo, unsigned int queue_wdg_timeout) { struct iwl_trans_txq_scd_cfg cfg = { .fifo = fifo, .sta_id = -1, .tid = IWL_MAX_TID_COUNT, .frame_limit = IWL_FRAME_LIMIT, .aggregate = false, }; iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout); } static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, unsigned long txqs, bool freeze) { if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return; } if (trans->ops->freeze_txq_timer) trans->ops->freeze_txq_timer(trans, txqs, freeze); } static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans, bool block) { if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return; } if (trans->ops->block_txq_ptrs) trans->ops->block_txq_ptrs(trans, block); } static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs) { if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty)) return -ENOTSUPP; /* No need to wait if the firmware is not alive */ if (trans->state != IWL_TRANS_FW_ALIVE) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return -EIO; } return trans->ops->wait_tx_queues_empty(trans, txqs); } static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue) { if (WARN_ON_ONCE(!trans->ops->wait_txq_empty)) return -ENOTSUPP; if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); return -EIO; } return trans->ops->wait_txq_empty(trans, queue); } static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) { trans->ops->write8(trans, ofs, val); } static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) { trans->ops->write32(trans, ofs, val); } static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) { return trans->ops->read32(trans, ofs); } static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) { return trans->ops->read_prph(trans, ofs); } static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) { return trans->ops->write_prph(trans, ofs, val); } static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, void *buf, int dwords) { return trans->ops->read_mem(trans, addr, buf, dwords); } #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ do { \ if (__builtin_constant_p(bufsize)) \ BUILD_BUG_ON((bufsize) % sizeof(u32)); \ iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\ } while (0) static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr, u64 src_addr, u32 byte_cnt) { if (trans->ops->imr_dma_data) return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt); return 0; } static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) { u32 value; if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1))) return 0xa5a5a5a5; return value; } static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, const void *buf, int dwords) { return trans->ops->write_mem(trans, addr, buf, dwords); } static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr, u32 val) { return iwl_trans_write_mem(trans, addr, &val, 1); } static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) { if (trans->ops->set_pmi) trans->ops->set_pmi(trans, state); } static inline int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership) { if (trans->ops->sw_reset) return trans->ops->sw_reset(trans, retake_ownership); return 0; } static inline void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) { trans->ops->set_bits_mask(trans, reg, mask, value); } #define iwl_trans_grab_nic_access(trans) \ __cond_lock(nic_access, \ likely((trans)->ops->grab_nic_access(trans))) static inline void __releases(nic_access) iwl_trans_release_nic_access(struct iwl_trans *trans) { trans->ops->release_nic_access(trans); __release(nic_access); } static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync) { if (WARN_ON_ONCE(!trans->op_mode)) return; /* prevent double restarts due to the same erroneous FW */ if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) { iwl_op_mode_nic_error(trans->op_mode, sync); trans->state = IWL_TRANS_NO_FW; } } static inline bool iwl_trans_fw_running(struct iwl_trans *trans) { return trans->state == IWL_TRANS_FW_ALIVE; } static inline void iwl_trans_sync_nmi(struct iwl_trans *trans) { if (trans->ops->sync_nmi) trans->ops->sync_nmi(trans); } void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr, u32 sw_err_bit); -static inline int iwl_trans_set_pnvm(struct iwl_trans *trans, - const void *data, u32 len) +static inline int iwl_trans_load_pnvm(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_data, + const struct iwl_ucode_capabilities *capa) { - if (trans->ops->set_pnvm) { - int ret = trans->ops->set_pnvm(trans, data, len); - - if (ret) - return ret; - } - - trans->pnvm_loaded = true; - - return 0; + return trans->ops->load_pnvm(trans, pnvm_data, capa); } -static inline int iwl_trans_set_reduce_power(struct iwl_trans *trans, - const void *data, u32 len) +static inline void iwl_trans_set_pnvm(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) { - if (trans->ops->set_reduce_power) { - int ret = trans->ops->set_reduce_power(trans, data, len); + if (trans->ops->set_pnvm) + trans->ops->set_pnvm(trans, capa); +} - if (ret) - return ret; - } +static inline int iwl_trans_load_reduce_power + (struct iwl_trans *trans, + const struct iwl_pnvm_image *payloads, + const struct iwl_ucode_capabilities *capa) +{ + return trans->ops->load_reduce_power(trans, payloads, capa); +} - trans->reduce_power_loaded = true; - return 0; +static inline void +iwl_trans_set_reduce_power(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) +{ + if (trans->ops->set_reduce_power) + trans->ops->set_reduce_power(trans, capa); } static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans) { return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED || trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED; } static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable) { if (trans->ops->interrupts) trans->ops->interrupts(trans, enable); } /***************************************************** * transport helper functions *****************************************************/ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, const struct iwl_trans_ops *ops, const struct iwl_cfg_trans_params *cfg_trans); int iwl_trans_init(struct iwl_trans *trans); void iwl_trans_free(struct iwl_trans *trans); +static inline bool iwl_trans_is_hw_error_value(u32 val) +{ + return ((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50); +} + /***************************************************** * driver (transport) register/unregister functions ******************************************************/ int __must_check iwl_pci_register_driver(void); void iwl_pci_unregister_driver(void); +void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan); #endif /* __iwl_trans_h__ */ diff --git a/sys/contrib/dev/iwlwifi/mei/iwl-mei.h b/sys/contrib/dev/iwlwifi/mei/iwl-mei.h index 789800075063..aec85c42d02a 100644 --- a/sys/contrib/dev/iwlwifi/mei/iwl-mei.h +++ b/sys/contrib/dev/iwlwifi/mei/iwl-mei.h @@ -1,138 +1,159 @@ /*- * Copyright (c) 2022 The FreeBSD Foundation * * This software was developed by Björn Zeeb under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _IWL_MEI_IWL_MEI_H #define _IWL_MEI_IWL_MEI_H #include #include #include #include enum mei_nvm_caps { MEI_NVM_CAPS_11AX_SUPPORT = BIT(0), MEI_NVM_CAPS_LARI_SUPPORT = BIT(1), }; struct iwl_mei_nvm { uint8_t n_hw_addrs; enum mei_nvm_caps caps; uint32_t nvm_version; uint32_t radio_cfg; uint32_t channels[110 /* IWL_NVM_NUM_CHANNELS_UHB */]; }; struct iwl_mei_conn_info { - int __dummy; + uint8_t lp_state; + uint8_t band; + uint8_t channel; + uint8_t ssid_len; + uint8_t bssid[ETH_ALEN]; + uint8_t ssid[IEEE80211_MAX_SSID_LEN]; }; struct iwl_mei_ops { void (*me_conn_status)(void *, const struct iwl_mei_conn_info *); void (*nic_stolen)(void *); void (*rfkill)(void *, bool); void (*roaming_forbidden)(void *, bool); void (*sap_connected)(void *); }; #if IS_ENABLED(CONFIG_IWLMEI) #error No MEI support in FreeBSD currently #else static __inline void iwl_mei_device_down(void) { } static __inline struct iwl_mei_nvm * iwl_mei_get_nvm(void) { return (NULL); } static __inline int iwl_mei_get_ownership(void) { return (0); } static __inline void iwl_mei_host_disassociated(void) { } static __inline bool iwl_mei_is_connected(void) { return (false); } static __inline void iwl_mei_set_country_code(uint16_t mcc __unused) { } static __inline void iwl_mei_set_netdev(struct net_device *netdevice __unused) { } static __inline void iwl_mei_set_nic_info(const uint8_t *addr __unused, const uint8_t *hw_addr __unused) { } static __inline void iwl_mei_set_rfkill_state(bool rf_killed __unused, bool sw_rfkill __unused) { } static __inline void iwl_mei_tx_copy_to_csme(struct sk_buff *skb __unused, unsigned int ivlen __unused) { } static __inline int iwl_mei_register(void *mvm __unused, const struct iwl_mei_ops *ops __unused) { return (0); } static __inline void iwl_mei_start_unregister(void) { } static __inline void iwl_mei_unregister_complete(void) { } + +static __inline void +iwl_mei_device_state(bool up __unused) +{ +} + +static __inline void +iwl_mei_alive_notif(bool x __unused) +{ +} + +static __inline bool +iwl_mei_pldr_req(void) +{ + return (false); +} #endif #endif /* _IWL_MEI_IWL_MEI_H */ diff --git a/sys/contrib/dev/iwlwifi/mvm/binding.c b/sys/contrib/dev/iwlwifi/mvm/binding.c index 0aac306304cb..458b97930059 100644 --- a/sys/contrib/dev/iwlwifi/mvm/binding.c +++ b/sys/contrib/dev/iwlwifi/mvm/binding.c @@ -1,163 +1,174 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2020 Intel Corporation * Copyright (C) 2016 Intel Deutschland GmbH + * Copyright (C) 2022 Intel Corporation */ #include #include "fw-api.h" #include "mvm.h" struct iwl_mvm_iface_iterator_data { struct ieee80211_vif *ignore_vif; int idx; struct iwl_mvm_phy_ctxt *phyctxt; u16 ids[MAX_MACS_IN_BINDING]; u16 colors[MAX_MACS_IN_BINDING]; }; static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action, struct iwl_mvm_iface_iterator_data *data) { struct iwl_binding_cmd cmd; struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt; int i, ret; u32 status; int size; memset(&cmd, 0, sizeof(cmd)); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) { size = sizeof(cmd); - cmd.lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm->fw, + cmd.lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm, phyctxt->channel->band)); } else { size = IWL_BINDING_CMD_SIZE_V1; } cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color)); cmd.action = cpu_to_le32(action); cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color)); for (i = 0; i < MAX_MACS_IN_BINDING; i++) cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); for (i = 0; i < data->idx; i++) cmd.macs[i] = cpu_to_le32(FW_CMD_ID_AND_COLOR(data->ids[i], data->colors[i])); status = 0; ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, size, &cmd, &status); if (ret) { IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n", action, ret); return ret; } if (status) { IWL_ERR(mvm, "Binding command failed: %u\n", status); ret = -EIO; } return ret; } static void iwl_mvm_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_iface_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif == data->ignore_vif) return; - if (mvmvif->phy_ctxt != data->phyctxt) + if (mvmvif->deflink.phy_ctxt != data->phyctxt) return; if (WARN_ON_ONCE(data->idx >= MAX_MACS_IN_BINDING)) return; data->ids[data->idx] = mvmvif->id; data->colors[data->idx] = mvmvif->color; data->idx++; } static int iwl_mvm_binding_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_phy_ctxt *phyctxt, bool add) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_iface_iterator_data data = { .ignore_vif = vif, .phyctxt = phyctxt, }; u32 action = FW_CTXT_ACTION_MODIFY; lockdep_assert_held(&mvm->mutex); ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_iface_iterator, &data); /* * If there are no other interfaces yet we * need to create a new binding. */ if (data.idx == 0) { if (add) action = FW_CTXT_ACTION_ADD; else action = FW_CTXT_ACTION_REMOVE; } if (add) { if (WARN_ON_ONCE(data.idx >= MAX_MACS_IN_BINDING)) return -EINVAL; data.ids[data.idx] = mvmvif->id; data.colors[data.idx] = mvmvif->color; data.idx++; } return iwl_mvm_binding_cmd(mvm, action, &data); } int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) + if (WARN_ON_ONCE(!mvmvif->deflink.phy_ctxt)) return -EINVAL; /* * Update SF - Disable if needed. if this fails, SF might still be on * while many macs are bound, which is forbidden - so fail the binding. */ if (iwl_mvm_sf_update(mvm, vif, false)) return -EINVAL; - return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true); + return iwl_mvm_binding_update(mvm, vif, mvmvif->deflink.phy_ctxt, + true); } int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; - if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) + if (WARN_ON_ONCE(!mvmvif->deflink.phy_ctxt)) return -EINVAL; - ret = iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false); + ret = iwl_mvm_binding_update(mvm, vif, mvmvif->deflink.phy_ctxt, + false); if (!ret) if (iwl_mvm_sf_update(mvm, vif, true)) IWL_ERR(mvm, "Failed to update SF state\n"); return ret; } + +u32 iwl_mvm_get_lmac_id(struct iwl_mvm *mvm, enum nl80211_band band) +{ + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CDB_SUPPORT) || + band == NL80211_BAND_2GHZ) + return IWL_LMAC_24G_INDEX; + return IWL_LMAC_5G_INDEX; +} diff --git a/sys/contrib/dev/iwlwifi/mvm/coex.c b/sys/contrib/dev/iwlwifi/mvm/coex.c index 9b194cb8d65e..5a5b1128e75c 100644 --- a/sys/contrib/dev/iwlwifi/mvm/coex.c +++ b/sys/contrib/dev/iwlwifi/mvm/coex.c @@ -1,685 +1,723 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2013-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2014, 2018-2020, 2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH */ #include #include #include #include "fw/api/coex.h" #include "iwl-modparams.h" #include "mvm.h" #include "iwl-debug.h" /* 20MHz / 40MHz below / 40Mhz above*/ static const __le64 iwl_ci_mask[][3] = { /* dummy entry for channel 0 */ {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)}, { cpu_to_le64(0x0000001FFFULL), cpu_to_le64(0x0ULL), cpu_to_le64(0x00007FFFFFULL), }, { cpu_to_le64(0x000000FFFFULL), cpu_to_le64(0x0ULL), cpu_to_le64(0x0003FFFFFFULL), }, { cpu_to_le64(0x000003FFFCULL), cpu_to_le64(0x0ULL), cpu_to_le64(0x000FFFFFFCULL), }, { cpu_to_le64(0x00001FFFE0ULL), cpu_to_le64(0x0ULL), cpu_to_le64(0x007FFFFFE0ULL), }, { cpu_to_le64(0x00007FFF80ULL), cpu_to_le64(0x00007FFFFFULL), cpu_to_le64(0x01FFFFFF80ULL), }, { cpu_to_le64(0x0003FFFC00ULL), cpu_to_le64(0x0003FFFFFFULL), cpu_to_le64(0x0FFFFFFC00ULL), }, { cpu_to_le64(0x000FFFF000ULL), cpu_to_le64(0x000FFFFFFCULL), cpu_to_le64(0x3FFFFFF000ULL), }, { cpu_to_le64(0x007FFF8000ULL), cpu_to_le64(0x007FFFFFE0ULL), cpu_to_le64(0xFFFFFF8000ULL), }, { cpu_to_le64(0x01FFFE0000ULL), cpu_to_le64(0x01FFFFFF80ULL), cpu_to_le64(0xFFFFFE0000ULL), }, { cpu_to_le64(0x0FFFF00000ULL), cpu_to_le64(0x0FFFFFFC00ULL), cpu_to_le64(0x0ULL), }, { cpu_to_le64(0x3FFFC00000ULL), cpu_to_le64(0x3FFFFFF000ULL), cpu_to_le64(0x0) }, { cpu_to_le64(0xFFFE000000ULL), cpu_to_le64(0xFFFFFF8000ULL), cpu_to_le64(0x0) }, { cpu_to_le64(0xFFF8000000ULL), cpu_to_le64(0xFFFFFE0000ULL), cpu_to_le64(0x0) }, { cpu_to_le64(0xFE00000000ULL), cpu_to_le64(0x0ULL), cpu_to_le64(0x0ULL) }, }; static enum iwl_bt_coex_lut_type iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif) { struct ieee80211_chanctx_conf *chanctx_conf; enum iwl_bt_coex_lut_type ret; u16 phy_ctx_id; u32 primary_ch_phy_id, secondary_ch_phy_id; /* * Checking that we hold mvm->mutex is a good idea, but the rate * control can't acquire the mutex since it runs in Tx path. * So this is racy in that case, but in the worst case, the AMPDU * size limit will be wrong for a short time which is not a big * issue. */ rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->chanctx_conf); + chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf); if (!chanctx_conf || chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) { rcu_read_unlock(); return BT_COEX_INVALID_LUT; } ret = BT_COEX_TX_DIS_LUT; if (mvm->cfg->bt_shared_single_ant) { rcu_read_unlock(); return ret; } phy_ctx_id = *((u16 *)chanctx_conf->drv_priv); primary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.primary_ch_phy_id); secondary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.secondary_ch_phy_id); if (primary_ch_phy_id == phy_ctx_id) ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut); else if (secondary_ch_phy_id == phy_ctx_id) ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut); /* else - default = TX TX disallowed */ rcu_read_unlock(); return ret; } int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm) { struct iwl_bt_coex_cmd bt_cmd = {}; u32 mode; lockdep_assert_held(&mvm->mutex); if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { switch (mvm->bt_force_ant_mode) { case BT_FORCE_ANT_BT: mode = BT_COEX_BT; break; case BT_FORCE_ANT_WIFI: mode = BT_COEX_WIFI; break; default: WARN_ON(1); mode = 0; } bt_cmd.mode = cpu_to_le32(mode); goto send_cmd; } bt_cmd.mode = cpu_to_le32(BT_COEX_NW); if (IWL_MVM_BT_COEX_SYNC2SCO) bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED); if (iwl_mvm_is_mplut_supported(mvm)) bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED); bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET); send_cmd: memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd); } static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable) { struct iwl_bt_coex_reduced_txp_update_cmd cmd = {}; struct iwl_mvm_sta *mvmsta; u32 value; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); if (!mvmsta) return 0; /* nothing to do */ if (mvmsta->bt_reduced_txpower == enable) return 0; - value = mvmsta->sta_id; + value = mvmsta->deflink.sta_id; if (enable) value |= BT_REDUCED_TX_POWER_BIT; IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n", enable ? "en" : "dis", sta_id); cmd.reduced_txp = cpu_to_le32(value); mvmsta->bt_reduced_txpower = enable; return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP, CMD_ASYNC, sizeof(cmd), &cmd); } struct iwl_bt_iterator_data { struct iwl_bt_coex_profile_notif *notif; struct iwl_mvm *mvm; struct ieee80211_chanctx_conf *primary; struct ieee80211_chanctx_conf *secondary; bool primary_ll; u8 primary_load; u8 secondary_load; }; static inline void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool enable, int rssi) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->bf_data.last_bt_coex_event = rssi; mvmvif->bf_data.bt_coex_max_thold = enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0; mvmvif->bf_data.bt_coex_min_thold = enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0; } #define MVM_COEX_TCM_PERIOD (HZ * 10) static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm, struct iwl_bt_iterator_data *data) { unsigned long now = jiffies; if (!time_after(now, mvm->bt_coex_last_tcm_ts + MVM_COEX_TCM_PERIOD)) return; mvm->bt_coex_last_tcm_ts = now; /* We assume here that we don't have more than 2 vifs on 2.4GHz */ /* if the primary is low latency, it will stay primary */ if (data->primary_ll) return; if (data->primary_load >= data->secondary_load) return; swap(data->primary, data->secondary); } -/* must be called under rcu_read_lock */ -static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) +static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_bt_iterator_data *data, + unsigned int link_id) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_bt_iterator_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - struct ieee80211_chanctx_conf *chanctx_conf; /* default smps_mode is AUTOMATIC - only used for client modes */ enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u32 bt_activity_grading, min_ag_for_static_smps; + struct ieee80211_chanctx_conf *chanctx_conf; + struct iwl_mvm_vif_link_info *link_info; + struct ieee80211_bss_conf *link_conf; int ave_rssi; lockdep_assert_held(&mvm->mutex); - switch (vif->type) { - case NL80211_IFTYPE_STATION: - break; - case NL80211_IFTYPE_AP: - if (!mvmvif->ap_ibss_active) - return; - break; - default: + link_info = mvmvif->link[link_id]; + if (!link_info) return; - } - chanctx_conf = rcu_dereference(vif->chanctx_conf); + link_conf = rcu_dereference(vif->link_conf[link_id]); + /* This can happen due to races: if we receive the notification + * and have the mutex held, while mac80211 is stuck on our mutex + * in the middle of removing the link. + */ + if (!link_conf) + return; + + chanctx_conf = rcu_dereference(link_conf->chanctx_conf); /* If channel context is invalid or not on 2.4GHz .. */ if ((!chanctx_conf || chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) { if (vif->type == NL80211_IFTYPE_STATION) { /* ... relax constraints and disable rssi events */ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, - smps_mode); - iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, + smps_mode, link_id); + iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, false); + /* FIXME: should this be per link? */ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); } return; } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2)) min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC; else min_ag_for_static_smps = BT_HIGH_TRAFFIC; bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading); if (bt_activity_grading >= min_ag_for_static_smps) smps_mode = IEEE80211_SMPS_STATIC; else if (bt_activity_grading >= BT_LOW_TRAFFIC) smps_mode = IEEE80211_SMPS_DYNAMIC; /* relax SMPS constraints for next association */ - if (!vif->bss_conf.assoc) + if (!vif->cfg.assoc) smps_mode = IEEE80211_SMPS_AUTOMATIC; - if (mvmvif->phy_ctxt && - (mvm->last_bt_notif.rrc_status & BIT(mvmvif->phy_ctxt->id))) + if (link_info->phy_ctxt && + (mvm->last_bt_notif.rrc_status & BIT(link_info->phy_ctxt->id))) smps_mode = IEEE80211_SMPS_AUTOMATIC; IWL_DEBUG_COEX(data->mvm, - "mac %d: bt_activity_grading %d smps_req %d\n", - mvmvif->id, bt_activity_grading, smps_mode); + "mac %d link %d: bt_activity_grading %d smps_req %d\n", + mvmvif->id, link_info->fw_link_id, + bt_activity_grading, smps_mode); if (vif->type == NL80211_IFTYPE_STATION) iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, - smps_mode); + smps_mode, link_id); /* low latency is always primary */ if (iwl_mvm_vif_low_latency(mvmvif)) { data->primary_ll = true; data->secondary = data->primary; data->primary = chanctx_conf; } if (vif->type == NL80211_IFTYPE_AP) { if (!mvmvif->ap_ibss_active) return; if (chanctx_conf == data->primary) return; if (!data->primary_ll) { /* * downgrade the current primary no matter what its * type is. */ data->secondary = data->primary; data->primary = chanctx_conf; } else { /* there is low latency vif - we will be secondary */ data->secondary = chanctx_conf; } + /* FIXME: TCM load per interface? or need something per link? */ if (data->primary == chanctx_conf) data->primary_load = mvm->tcm.result.load[mvmvif->id]; else if (data->secondary == chanctx_conf) data->secondary_load = mvm->tcm.result.load[mvmvif->id]; return; } /* * STA / P2P Client, try to be primary if first vif. If we are in low * latency mode, we are already in primary and just don't do much */ if (!data->primary || data->primary == chanctx_conf) data->primary = chanctx_conf; else if (!data->secondary) /* if secondary is not NULL, it might be a GO */ data->secondary = chanctx_conf; + /* FIXME: TCM load per interface? or need something per link? */ if (data->primary == chanctx_conf) data->primary_load = mvm->tcm.result.load[mvmvif->id]; else if (data->secondary == chanctx_conf) data->secondary_load = mvm->tcm.result.load[mvmvif->id]; /* * don't reduce the Tx power if one of these is true: * we are in LOOSE * single share antenna product * BT is inactive * we are not associated */ if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT || - mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc || + mvm->cfg->bt_shared_single_ant || !vif->cfg.assoc || le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) { - iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false); + iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, false); + /* FIXME: should this be per link? */ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); return; } /* try to get the avg rssi from fw */ ave_rssi = mvmvif->bf_data.ave_beacon_signal; /* if the RSSI isn't valid, fake it is very low */ if (!ave_rssi) ave_rssi = -100; if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) { - if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true)) + if (iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, + true)) IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) { - if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false)) + if (iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, + false)) IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); } /* Begin to monitor the RSSI: it may influence the reduced Tx power */ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi); } +/* must be called under rcu_read_lock */ +static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_bt_iterator_data *data = _data; + struct iwl_mvm *mvm = data->mvm; + unsigned int link_id; + + lockdep_assert_held(&mvm->mutex); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + break; + case NL80211_IFTYPE_AP: + if (!mvmvif->ap_ibss_active) + return; + break; + default: + return; + } + + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) + iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id); +} + static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) { struct iwl_bt_iterator_data data = { .mvm = mvm, .notif = &mvm->last_bt_notif, }; struct iwl_bt_coex_ci_cmd cmd = {}; u8 ci_bw_idx; /* Ignore updates if we are in force mode */ if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) return; rcu_read_lock(); ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bt_notif_iterator, &data); iwl_mvm_bt_coex_tcm_based_ci(mvm, &data); if (data.primary) { struct ieee80211_chanctx_conf *chan = data.primary; if (WARN_ON(!chan->def.chan)) { rcu_read_unlock(); return; } if (chan->def.width < NL80211_CHAN_WIDTH_40) { ci_bw_idx = 0; } else { if (chan->def.center_freq1 > chan->def.chan->center_freq) ci_bw_idx = 2; else ci_bw_idx = 1; } cmd.bt_primary_ci = iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx]; cmd.primary_ch_phy_id = cpu_to_le32(*((u16 *)data.primary->drv_priv)); } if (data.secondary) { struct ieee80211_chanctx_conf *chan = data.secondary; if (WARN_ON(!data.secondary->def.chan)) { rcu_read_unlock(); return; } if (chan->def.width < NL80211_CHAN_WIDTH_40) { ci_bw_idx = 0; } else { if (chan->def.center_freq1 > chan->def.chan->center_freq) ci_bw_idx = 2; else ci_bw_idx = 1; } cmd.bt_secondary_ci = iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx]; cmd.secondary_ch_phy_id = cpu_to_le32(*((u16 *)data.secondary->drv_priv)); } rcu_read_unlock(); /* Don't spam the fw with the same command over and over */ if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) { if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0, sizeof(cmd), &cmd)) IWL_ERR(mvm, "Failed to send BT_CI cmd\n"); memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd)); } } void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", le32_to_cpu(notif->primary_ch_lut)); IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n", le32_to_cpu(notif->secondary_ch_lut)); IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n", le32_to_cpu(notif->bt_activity_grading)); /* remember this notification for future use: rssi fluctuations */ memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif)); iwl_mvm_bt_coex_notif_handle(mvm); } void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event_data rssi_event) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); /* Ignore updates if we are in force mode */ if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) return; /* * Rssi update while not associated - can happen since the statistics * are handled asynchronously */ - if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) + if (mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA) return; /* No BT - reports should be disabled */ if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) return; IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid, rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW"); /* * Check if rssi is good enough for reduced Tx power, but not in loose * scheme. */ if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant || iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT) - ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, + ret = iwl_mvm_bt_coex_reduced_txp(mvm, + mvmvif->deflink.ap_sta_id, false); else - ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true); + ret = iwl_mvm_bt_coex_reduced_txp(mvm, + mvmvif->deflink.ap_sta_id, + true); if (ret) IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n"); } #define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) #define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200) u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; + struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->deflink.phy_ctxt; enum iwl_bt_coex_lut_type lut_type; if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id)) return LINK_QUAL_AGG_TIME_LIMIT_DEF; if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC) return LINK_QUAL_AGG_TIME_LIMIT_DEF; lut_type = iwl_get_coex_type(mvm, mvmsta->vif); if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT) return LINK_QUAL_AGG_TIME_LIMIT_DEF; /* tight coex, high bt traffic, reduce AGG time limit */ return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT; } bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; + struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->deflink.phy_ctxt; enum iwl_bt_coex_lut_type lut_type; if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id)) return true; if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC) return true; /* * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas * since BT is already killed. * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while * we Tx. * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO. */ lut_type = iwl_get_coex_type(mvm, mvmsta->vif); return lut_type != BT_COEX_LOOSE_LUT; } bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant) { /* there is no other antenna, shared antenna is always available */ if (mvm->cfg->bt_shared_single_ant) return true; if (ant & mvm->cfg->non_shared_ant) return true; return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC; } bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm) { /* there is no other antenna, shared antenna is always available */ if (mvm->cfg->bt_shared_single_ant) return true; return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC; } bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, enum nl80211_band band) { u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading); if (band != NL80211_BAND_2GHZ) return false; return bt_activity >= BT_LOW_TRAFFIC; } u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants) { if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2) && (mvm->cfg->non_shared_ant & enabled_ants)) return mvm->cfg->non_shared_ant; return first_antenna(enabled_ants); } u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac) { __le16 fc = hdr->frame_control; bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm); if (info->band != NL80211_BAND_2GHZ) return 0; if (unlikely(mvm->bt_tx_prio)) return mvm->bt_tx_prio - 1; if (likely(ieee80211_is_data(fc))) { if (likely(ieee80211_is_data_qos(fc))) { switch (ac) { case IEEE80211_AC_BE: return mplut_enabled ? 1 : 0; case IEEE80211_AC_VI: return mplut_enabled ? 2 : 3; case IEEE80211_AC_VO: return 3; default: return 0; } } else if (is_multicast_ether_addr(hdr->addr1)) { return 3; } else return 0; } else if (ieee80211_is_mgmt(fc)) { return ieee80211_is_disassoc(fc) ? 0 : 3; } else if (ieee80211_is_ctl(fc)) { /* ignore cfend and cfendack frames as we never send those */ return 3; } return 0; } void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) { iwl_mvm_bt_coex_notif_handle(mvm); } diff --git a/sys/contrib/dev/iwlwifi/mvm/constants.h b/sys/contrib/dev/iwlwifi/mvm/constants.h index c604f9f39b24..243eccc68cb0 100644 --- a/sys/contrib/dev/iwlwifi/mvm/constants.h +++ b/sys/contrib/dev/iwlwifi/mvm/constants.h @@ -1,126 +1,122 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2013-2015 Intel Mobile Communications GmbH - * Copyright (C) 2013-2014, 2018-2021 Intel Corporation + * Copyright (C) 2013-2014, 2018-2023 Intel Corporation * Copyright (C) 2015 Intel Deutschland GmbH */ #ifndef __MVM_CONSTANTS_H #define __MVM_CONSTANTS_H #include #include "fw-api.h" #define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20 #define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */ #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */ #define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_QUEUES (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\ IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\ IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) #define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20 #define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 8 #define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30 #define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS 20 #define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50 #define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50 #define IWL_MVM_PS_SNOOZE_INTERVAL 25 #define IWL_MVM_PS_SNOOZE_WINDOW 50 #define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25 #define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT 64 #define IWL_MVM_BT_COEX_EN_RED_TXP_THRESH 62 #define IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH 65 #define IWL_MVM_BT_COEX_SYNC2SCO 1 #define IWL_MVM_BT_COEX_MPLUT 1 #define IWL_MVM_BT_COEX_RRC 1 #define IWL_MVM_BT_COEX_TTC 1 #define IWL_MVM_BT_COEX_MPLUT_REG0 0x22002200 #define IWL_MVM_BT_COEX_MPLUT_REG1 0x11118451 #define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS 30 #define IWL_MVM_FW_MCAST_FILTER_PASS_ALL 0 #define IWL_MVM_FW_BCAST_FILTER_PASS_ALL 0 #define IWL_MVM_QUOTA_THRESHOLD 4 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0 #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1 #define IWL_MVM_TOF_IS_RESPONDER 0 #define IWL_MVM_HW_CSUM_DISABLE 0 #define IWL_MVM_PARSE_NVM 0 #define IWL_MVM_ADWELL_ENABLE 1 #define IWL_MVM_ADWELL_MAX_BUDGET 0 #define IWL_MVM_TCM_LOAD_MEDIUM_THRESH 10 /* percentage */ #define IWL_MVM_TCM_LOAD_HIGH_THRESH 50 /* percentage */ #define IWL_MVM_TCM_LOWLAT_ENABLE_THRESH 100 /* packets/10 seconds */ #define IWL_MVM_UAPSD_NONAGG_PERIOD 5000 /* msecs */ #define IWL_MVM_UAPSD_NOAGG_LIST_LEN IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM #define IWL_MVM_NON_TRANSMITTING_AP 0 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 #define IWL_MVM_RS_INITIAL_MIMO_NUM_RATES 3 #define IWL_MVM_RS_INITIAL_SISO_NUM_RATES 3 #define IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES 2 #define IWL_MVM_RS_INITIAL_LEGACY_RETRIES 2 #define IWL_MVM_RS_SECONDARY_LEGACY_RETRIES 1 #define IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES 16 #define IWL_MVM_RS_SECONDARY_SISO_NUM_RATES 3 #define IWL_MVM_RS_SECONDARY_SISO_RETRIES 1 #define IWL_MVM_RS_RATE_MIN_FAILURE_TH 3 #define IWL_MVM_RS_RATE_MIN_SUCCESS_TH 8 #define IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT 5 /* Seconds */ #define IWL_MVM_RS_IDLE_TIMEOUT 5 /* Seconds */ #define IWL_MVM_RS_MISSED_RATE_MAX 15 #define IWL_MVM_RS_LEGACY_FAILURE_LIMIT 160 #define IWL_MVM_RS_LEGACY_SUCCESS_LIMIT 480 #define IWL_MVM_RS_LEGACY_TABLE_COUNT 160 #define IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT 400 #define IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT 4500 #define IWL_MVM_RS_NON_LEGACY_TABLE_COUNT 1500 #define IWL_MVM_RS_SR_FORCE_DECREASE 15 /* percent */ #define IWL_MVM_RS_SR_NO_DECREASE 85 /* percent */ #define IWL_MVM_RS_AGG_TIME_LIMIT 4000 /* 4 msecs. valid 100-8000 */ #define IWL_MVM_RS_AGG_DISABLE_START 3 #define IWL_MVM_RS_AGG_START_THRESHOLD 10 /* num frames per second */ #define IWL_MVM_RS_TPC_SR_FORCE_INCREASE 75 /* percent */ #define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */ #define IWL_MVM_RS_TPC_TX_POWER_STEP 3 #define IWL_MVM_ENABLE_EBS 1 #define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE #define IWL_MVM_FTM_INITIATOR_DYNACK true #define IWL_MVM_FTM_LMR_FEEDBACK_TERMINATE false #define IWL_MVM_FTM_R2I_MAX_REP 7 #define IWL_MVM_FTM_I2R_MAX_REP 7 #define IWL_MVM_FTM_R2I_MAX_STS 1 #define IWL_MVM_FTM_I2R_MAX_STS 1 #define IWL_MVM_FTM_R2I_MAX_TOTAL_LTF 3 #define IWL_MVM_FTM_I2R_MAX_TOTAL_LTF 3 #define IWL_MVM_FTM_INITIATOR_SECURE_LTF false #define IWL_MVM_FTM_RESP_NDP_SUPPORT true #define IWL_MVM_FTM_RESP_LMR_FEEDBACK_SUPPORT true #define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 5 #define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000 #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT true #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 20 #define IWL_MVM_USE_NSSN_SYNC 0 -#define IWL_MVM_PHY_FILTER_CHAIN_A 0 -#define IWL_MVM_PHY_FILTER_CHAIN_B 0 -#define IWL_MVM_PHY_FILTER_CHAIN_C 0 -#define IWL_MVM_PHY_FILTER_CHAIN_D 0 #define IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH false #define IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA 40 /* 20016 pSec is 6 meter RTT, meaning 3 meter range */ #define IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT 20016 #define IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT 20016 #define IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC 2 #define IWL_MVM_DISABLE_AP_FILS false #define IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */ #define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */ #endif /* __MVM_CONSTANTS_H */ diff --git a/sys/contrib/dev/iwlwifi/mvm/d3.c b/sys/contrib/dev/iwlwifi/mvm/d3.c index bcc4ed20fe5b..f6488b4bbe68 100644 --- a/sys/contrib/dev/iwlwifi/mvm/d3.c +++ b/sys/contrib/dev/iwlwifi/mvm/d3.c @@ -1,2797 +1,3319 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include #include #include #include #include "iwl-modparams.h" #include "fw-api.h" #include "mvm.h" #include "fw/img.h" void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mutex_lock(&mvm->mutex); mvmvif->rekey_data.kek_len = data->kek_len; mvmvif->rekey_data.kck_len = data->kck_len; memcpy(mvmvif->rekey_data.kek, data->kek, data->kek_len); memcpy(mvmvif->rekey_data.kck, data->kck, data->kck_len); mvmvif->rekey_data.akm = data->akm & 0xFF; mvmvif->rekey_data.replay_ctr = cpu_to_le64(be64_to_cpup((const __be64 *)data->replay_ctr)); mvmvif->rekey_data.valid = true; mutex_unlock(&mvm->mutex); } #if IS_ENABLED(CONFIG_IPV6) void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct inet6_dev *idev) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct inet6_ifaddr *ifa; int idx = 0; memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs)); read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { mvmvif->target_ipv6_addrs[idx] = ifa->addr; if (ifa->flags & IFA_F_TENTATIVE) __set_bit(idx, mvmvif->tentative_addrs); idx++; if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX) break; } read_unlock_bh(&idev->lock); mvmvif->num_target_ipv6_addrs = idx; } #endif void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->tx_key_idx = idx; } static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out) { int i; for (i = 0; i < IWL_P1K_SIZE; i++) out[i] = cpu_to_le16(p1k[i]); } static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key, struct iwl_mvm_key_pn *ptk_pn, struct ieee80211_key_seq *seq, int tid, int queues) { const u8 *ret = seq->ccmp.pn; int i; /* get the PN from mac80211, used on the default queue */ ieee80211_get_key_rx_seq(key, tid, seq); /* and use the internal data for the other queues */ for (i = 1; i < queues; i++) { const u8 *tmp = ptk_pn->q[i].pn[tid]; if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0) ret = tmp; } return ret; } struct wowlan_key_reprogram_data { bool error; int wep_key_idx; }; static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct wowlan_key_reprogram_data *data = _data; int ret; switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */ struct { struct iwl_mvm_wep_key_cmd wep_key_cmd; struct iwl_mvm_wep_key wep_key; } __packed wkc = { .wep_key_cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .wep_key_cmd.num_keys = 1, /* firmware sets STA_KEY_FLG_WEP_13BYTES */ .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP, .wep_key.key_index = key->keyidx, .wep_key.key_size = key->keylen, }; /* * This will fail -- the key functions don't set support * pairwise WEP keys. However, that's better than silently * failing WoWLAN. Or maybe not? */ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) break; memcpy(&wkc.wep_key.key[3], key->key, key->keylen); if (key->keyidx == mvmvif->tx_key_idx) { /* TX key must be at offset 0 */ wkc.wep_key.key_offset = 0; } else { /* others start at 1 */ data->wep_key_idx++; wkc.wep_key.key_offset = data->wep_key_idx; } mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc); data->error = ret != 0; mvm->ptk_ivlen = key->iv_len; mvm->ptk_icvlen = key->icv_len; mvm->gtk_ivlen = key->iv_len; mvm->gtk_icvlen = key->icv_len; mutex_unlock(&mvm->mutex); /* don't upload key again */ return; } default: data->error = true; return; case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: return; case WLAN_CIPHER_SUITE_AES_CMAC: /* * Ignore CMAC keys -- the WoWLAN firmware doesn't support them * but we also shouldn't abort suspend due to that. It does have * support for the IGTK key renewal, but doesn't really use the * IGTK for anything. This means we could spuriously wake up or * be deauthenticated, but that was considered acceptable. */ return; case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: break; } mutex_lock(&mvm->mutex); /* * The D3 firmware hardcodes the key offset 0 as the key it * uses to transmit packets to the AP, i.e. the PTK. */ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { mvm->ptk_ivlen = key->iv_len; mvm->ptk_icvlen = key->icv_len; ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); } else { /* * firmware only supports TSC/RSC for a single key, * so if there are multiple keep overwriting them * with new ones -- this relies on mac80211 doing * list_add_tail(). */ mvm->gtk_ivlen = key->iv_len; mvm->gtk_icvlen = key->icv_len; ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); } mutex_unlock(&mvm->mutex); data->error = ret != 0; } struct wowlan_key_rsc_tsc_data { struct iwl_wowlan_rsc_tsc_params_cmd_v4 *rsc_tsc; bool have_rsc_tsc; }; static void iwl_mvm_wowlan_get_rsc_tsc_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct wowlan_key_rsc_tsc_data *data = _data; struct aes_sc *aes_sc; struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; struct ieee80211_key_seq seq; int i; switch (key->cipher) { default: break; case WLAN_CIPHER_SUITE_TKIP: if (sta) { u64 pn64; tkip_sc = data->rsc_tsc->params.all_tsc_rsc.tkip.unicast_rsc; tkip_tx_sc = &data->rsc_tsc->params.all_tsc_rsc.tkip.tsc; pn64 = atomic64_read(&key->tx_pn); tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); } else { tkip_sc = data->rsc_tsc->params.all_tsc_rsc.tkip.multicast_rsc; } /* * For non-QoS this relies on the fact that both the uCode and * mac80211 use TID 0 (as they need to to avoid replay attacks) * for checking the IV in the frames. */ for (i = 0; i < IWL_NUM_RSC; i++) { ieee80211_get_key_rx_seq(key, i, &seq); tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); } data->have_rsc_tsc = true; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (sta) { struct aes_sc *aes_tx_sc; u64 pn64; aes_sc = data->rsc_tsc->params.all_tsc_rsc.aes.unicast_rsc; aes_tx_sc = &data->rsc_tsc->params.all_tsc_rsc.aes.tsc; pn64 = atomic64_read(&key->tx_pn); aes_tx_sc->pn = cpu_to_le64(pn64); } else { aes_sc = data->rsc_tsc->params.all_tsc_rsc.aes.multicast_rsc; } /* * For non-QoS this relies on the fact that both the uCode and * mac80211/our RX code use TID 0 for checking the PN. */ if (sta && iwl_mvm_has_new_rx_api(mvm)) { struct iwl_mvm_sta *mvmsta; struct iwl_mvm_key_pn *ptk_pn; const u8 *pn; mvmsta = iwl_mvm_sta_from_mac80211(sta); rcu_read_lock(); ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); if (WARN_ON(!ptk_pn)) { rcu_read_unlock(); break; } for (i = 0; i < IWL_MAX_TID_COUNT; i++) { pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, mvm->trans->num_rx_queues); aes_sc[i].pn = cpu_to_le64((u64)pn[5] | ((u64)pn[4] << 8) | ((u64)pn[3] << 16) | ((u64)pn[2] << 24) | ((u64)pn[1] << 32) | ((u64)pn[0] << 40)); } rcu_read_unlock(); } else { for (i = 0; i < IWL_NUM_RSC; i++) { u8 *pn = seq.ccmp.pn; ieee80211_get_key_rx_seq(key, i, &seq); aes_sc[i].pn = cpu_to_le64((u64)pn[5] | ((u64)pn[4] << 8) | ((u64)pn[3] << 16) | ((u64)pn[2] << 24) | ((u64)pn[1] << 32) | ((u64)pn[0] << 40)); } } data->have_rsc_tsc = true; break; } } struct wowlan_key_rsc_v5_data { struct iwl_wowlan_rsc_tsc_params_cmd *rsc; bool have_rsc; int gtks; int gtk_ids[4]; }; static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct wowlan_key_rsc_v5_data *data = _data; struct ieee80211_key_seq seq; __le64 *rsc; int i; /* only for ciphers that can be PTK/GTK */ switch (key->cipher) { default: return; case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: break; } if (sta) { rsc = data->rsc->ucast_rsc; } else { if (WARN_ON(data->gtks >= ARRAY_SIZE(data->gtk_ids))) return; data->gtk_ids[data->gtks] = key->keyidx; rsc = data->rsc->mcast_rsc[data->gtks % 2]; if (WARN_ON(key->keyidx >= ARRAY_SIZE(data->rsc->mcast_key_id_map))) return; data->rsc->mcast_key_id_map[key->keyidx] = data->gtks % 2; if (data->gtks >= 2) { int prev = data->gtks - 2; int prev_idx = data->gtk_ids[prev]; data->rsc->mcast_key_id_map[prev_idx] = IWL_MCAST_KEY_MAP_INVALID; } data->gtks++; } switch (key->cipher) { default: WARN_ON(1); break; case WLAN_CIPHER_SUITE_TKIP: /* * For non-QoS this relies on the fact that both the uCode and * mac80211 use TID 0 (as they need to to avoid replay attacks) * for checking the IV in the frames. */ for (i = 0; i < IWL_MAX_TID_COUNT; i++) { ieee80211_get_key_rx_seq(key, i, &seq); rsc[i] = cpu_to_le64(((u64)seq.tkip.iv32 << 16) | seq.tkip.iv16); } data->have_rsc = true; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: /* * For non-QoS this relies on the fact that both the uCode and * mac80211/our RX code use TID 0 for checking the PN. */ if (sta) { struct iwl_mvm_sta *mvmsta; struct iwl_mvm_key_pn *ptk_pn; const u8 *pn; mvmsta = iwl_mvm_sta_from_mac80211(sta); rcu_read_lock(); ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); if (WARN_ON(!ptk_pn)) { rcu_read_unlock(); break; } for (i = 0; i < IWL_MAX_TID_COUNT; i++) { pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, mvm->trans->num_rx_queues); rsc[i] = cpu_to_le64((u64)pn[5] | ((u64)pn[4] << 8) | ((u64)pn[3] << 16) | ((u64)pn[2] << 24) | ((u64)pn[1] << 32) | ((u64)pn[0] << 40)); } rcu_read_unlock(); } else { for (i = 0; i < IWL_MAX_TID_COUNT; i++) { u8 *pn = seq.ccmp.pn; ieee80211_get_key_rx_seq(key, i, &seq); rsc[i] = cpu_to_le64((u64)pn[5] | ((u64)pn[4] << 8) | ((u64)pn[3] << 16) | ((u64)pn[2] << 24) | ((u64)pn[1] << 32) | ((u64)pn[0] << 40)); } } data->have_rsc = true; break; } } static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TSC_RSC_PARAM, IWL_FW_CMD_VER_UNKNOWN); int ret; if (ver == 5) { struct wowlan_key_rsc_v5_data data = {}; int i; data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL); if (!data.rsc) return -ENOMEM; memset(data.rsc, 0xff, sizeof(*data.rsc)); for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++) data.rsc->mcast_key_id_map[i] = IWL_MCAST_KEY_MAP_INVALID; - data.rsc->sta_id = cpu_to_le32(mvmvif->ap_sta_id); + data.rsc->sta_id = cpu_to_le32(mvmvif->deflink.ap_sta_id); ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_get_rsc_v5_data, &data); if (data.have_rsc) ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, CMD_ASYNC, sizeof(*data.rsc), data.rsc); else ret = 0; kfree(data.rsc); } else if (ver == 4 || ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN) { struct wowlan_key_rsc_tsc_data data = {}; int size; data.rsc_tsc = kzalloc(sizeof(*data.rsc_tsc), GFP_KERNEL); if (!data.rsc_tsc) return -ENOMEM; if (ver == 4) { size = sizeof(*data.rsc_tsc); - data.rsc_tsc->sta_id = cpu_to_le32(mvmvif->ap_sta_id); + data.rsc_tsc->sta_id = + cpu_to_le32(mvmvif->deflink.ap_sta_id); } else { /* ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN */ size = sizeof(data.rsc_tsc->params); } ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_get_rsc_tsc_data, &data); if (data.have_rsc_tsc) ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, CMD_ASYNC, size, data.rsc_tsc); else ret = 0; kfree(data.rsc_tsc); } else { ret = 0; WARN_ON_ONCE(1); } return ret; } struct wowlan_key_tkip_data { struct iwl_wowlan_tkip_params_cmd tkip; bool have_tkip_keys; }; static void iwl_mvm_wowlan_get_tkip_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *_data) { struct wowlan_key_tkip_data *data = _data; struct iwl_p1k_cache *rx_p1ks; u8 *rx_mic_key; struct ieee80211_key_seq seq; u32 cur_rx_iv32 = 0; u16 p1k[IWL_P1K_SIZE]; int i; switch (key->cipher) { default: break; case WLAN_CIPHER_SUITE_TKIP: if (sta) { u64 pn64; rx_p1ks = data->tkip.rx_uni; pn64 = atomic64_read(&key->tx_pn); ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), p1k); iwl_mvm_convert_p1k(p1k, data->tkip.tx.p1k); memcpy(data->tkip.mic_keys.tx, &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], IWL_MIC_KEY_SIZE); rx_mic_key = data->tkip.mic_keys.rx_unicast; } else { rx_p1ks = data->tkip.rx_multi; rx_mic_key = data->tkip.mic_keys.rx_mcast; } for (i = 0; i < IWL_NUM_RSC; i++) { + ieee80211_get_key_rx_seq(key, i, &seq); /* wrapping isn't allowed, AP must rekey */ if (seq.tkip.iv32 > cur_rx_iv32) cur_rx_iv32 = seq.tkip.iv32; } ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, cur_rx_iv32, p1k); iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k); ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, cur_rx_iv32 + 1, p1k); iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k); memcpy(rx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], IWL_MIC_KEY_SIZE); data->have_tkip_keys = true; break; } } struct wowlan_key_gtk_type_iter { struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd; }; static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *_data) { struct wowlan_key_gtk_type_iter *data = _data; switch (key->cipher) { default: return; + case WLAN_CIPHER_SUITE_TKIP: + if (!sta) + data->kek_kck_cmd->gtk_cipher = + cpu_to_le32(STA_KEY_FLG_TKIP); + return; case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); return; case WLAN_CIPHER_SUITE_AES_CMAC: data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM); return; case WLAN_CIPHER_SUITE_CCMP: if (!sta) data->kek_kck_cmd->gtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM); - break; + return; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (!sta) data->kek_kck_cmd->gtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); - break; + return; } } static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan) { struct iwl_wowlan_patterns_cmd_v1 *pattern_cmd; struct iwl_host_cmd cmd = { .id = WOWLAN_PATTERNS, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; int i, err; if (!wowlan->n_patterns) return 0; cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns); pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); if (!pattern_cmd) return -ENOMEM; pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); for (i = 0; i < wowlan->n_patterns; i++) { int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); memcpy(&pattern_cmd->patterns[i].mask, wowlan->patterns[i].mask, mask_len); memcpy(&pattern_cmd->patterns[i].pattern, wowlan->patterns[i].pattern, wowlan->patterns[i].pattern_len); pattern_cmd->patterns[i].mask_size = mask_len; pattern_cmd->patterns[i].pattern_size = wowlan->patterns[i].pattern_len; } cmd.data[0] = pattern_cmd; err = iwl_mvm_send_cmd(mvm, &cmd); kfree(pattern_cmd); return err; } static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_wowlan *wowlan) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_wowlan_patterns_cmd *pattern_cmd; struct iwl_host_cmd cmd = { .id = WOWLAN_PATTERNS, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; int i, err; int ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, IWL_FW_CMD_VER_UNKNOWN); if (!wowlan->n_patterns) return 0; cmd.len[0] = sizeof(*pattern_cmd) + wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2); pattern_cmd = kzalloc(cmd.len[0], GFP_KERNEL); if (!pattern_cmd) return -ENOMEM; pattern_cmd->n_patterns = wowlan->n_patterns; if (ver >= 3) - pattern_cmd->sta_id = mvmvif->ap_sta_id; + pattern_cmd->sta_id = mvmvif->deflink.ap_sta_id; for (i = 0; i < wowlan->n_patterns; i++) { int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); pattern_cmd->patterns[i].pattern_type = WOWLAN_PATTERN_TYPE_BITMASK; memcpy(&pattern_cmd->patterns[i].u.bitmask.mask, wowlan->patterns[i].mask, mask_len); memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern, wowlan->patterns[i].pattern, wowlan->patterns[i].pattern_len); pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len; pattern_cmd->patterns[i].u.bitmask.pattern_size = wowlan->patterns[i].pattern_len; } cmd.data[0] = pattern_cmd; err = iwl_mvm_send_cmd(mvm, &cmd); kfree(pattern_cmd); return err; } static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *ap_sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_chanctx_conf *ctx; u8 chains_static, chains_dynamic; struct cfg80211_chan_def chandef; int ret, i; struct iwl_binding_cmd_v1 binding_cmd = {}; struct iwl_time_quota_cmd quota_cmd = {}; struct iwl_time_quota_data *quota; u32 status; if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm))) return -EINVAL; /* add back the PHY */ - if (WARN_ON(!mvmvif->phy_ctxt)) + if (WARN_ON(!mvmvif->deflink.phy_ctxt)) return -EINVAL; rcu_read_lock(); - ctx = rcu_dereference(vif->chanctx_conf); + ctx = rcu_dereference(vif->bss_conf.chanctx_conf); if (WARN_ON(!ctx)) { rcu_read_unlock(); return -EINVAL; } chandef = ctx->def; chains_static = ctx->rx_chains_static; chains_dynamic = ctx->rx_chains_dynamic; rcu_read_unlock(); - ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef, + ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt, &chandef, chains_static, chains_dynamic); if (ret) return ret; /* add back the MAC */ mvmvif->uploaded = false; - if (WARN_ON(!vif->bss_conf.assoc)) + if (WARN_ON(!vif->cfg.assoc)) return -EINVAL; ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) return ret; /* add back binding - XXX refactor? */ binding_cmd.id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, - mvmvif->phy_ctxt->color)); + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->deflink.phy_ctxt->id, + mvmvif->deflink.phy_ctxt->color)); binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); binding_cmd.phy = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, - mvmvif->phy_ctxt->color)); + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->deflink.phy_ctxt->id, + mvmvif->deflink.phy_ctxt->color)); binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); for (i = 1; i < MAX_MACS_IN_BINDING; i++) binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); status = 0; ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, IWL_BINDING_CMD_SIZE_V1, &binding_cmd, &status); if (ret) { IWL_ERR(mvm, "Failed to add binding: %d\n", ret); return ret; } if (status) { IWL_ERR(mvm, "Binding command failed: %u\n", status); return -EIO; } ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0); if (ret) return ret; - rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); + rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id], + ap_sta); ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (ret) return ret; /* and some quota */ quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, 0); quota->id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, - mvmvif->phy_ctxt->color)); + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->deflink.phy_ctxt->id, + mvmvif->deflink.phy_ctxt->color)); quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA); quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA); for (i = 1; i < MAX_BINDINGS; i++) { quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, i); quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID); } ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, iwl_mvm_quota_cmd_size(mvm), "a_cmd); if (ret) IWL_ERR(mvm, "Failed to send quota: %d\n", ret); if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm)) IWL_ERR(mvm, "Failed to initialize D3 LAR information\n"); return 0; } static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_nonqos_seq_query_cmd query_cmd = { .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET), .mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), }; struct iwl_host_cmd cmd = { .id = NON_QOS_TX_COUNTER_CMD, .flags = CMD_WANT_SKB, }; int err; u32 size; cmd.data[0] = &query_cmd; cmd.len[0] = sizeof(query_cmd); err = iwl_mvm_send_cmd(mvm, &cmd); if (err) return err; size = iwl_rx_packet_payload_len(cmd.resp_pkt); if (size < sizeof(__le16)) { err = -EINVAL; } else { err = le16_to_cpup((__le16 *)cmd.resp_pkt->data); /* firmware returns next, not last-used seqno */ err = (u16) (err - 0x10); } iwl_free_resp(&cmd); return err; } void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_nonqos_seq_query_cmd query_cmd = { .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET), .mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .value = cpu_to_le16(mvmvif->seqno), }; /* return if called during restart, not resume from D3 */ if (!mvmvif->seqno_valid) return; mvmvif->seqno_valid = false; if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0, sizeof(query_cmd), &query_cmd)) IWL_ERR(mvm, "failed to set non-QoS seqno\n"); } static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) { iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); iwl_mvm_stop_device(mvm); /* * Set the HW restart bit -- this is mostly true as we're * going to load new firmware and reprogram that, though * the reprogramming is going to be manual to avoid adding * all the MACs that aren't support. * We don't have to clear up everything though because the * reprogramming is manual. When we resume, we'll actually * go through a proper restart sequence again to switch * back to the runtime firmware image. */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); /* the fw is reset, so all the keys are cleared */ memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); mvm->ptk_ivlen = 0; mvm->ptk_icvlen = 0; mvm->ptk_ivlen = 0; mvm->ptk_icvlen = 0; return iwl_mvm_load_d3_fw(mvm); } static int iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan, struct iwl_wowlan_config_cmd *wowlan_config_cmd, struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, struct ieee80211_sta *ap_sta) { struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */ wowlan_config_cmd->is_11n_connection = ap_sta->deflink.ht_cap.ht_supported; wowlan_config_cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) { /* Query the last used seqno and set it */ int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); if (ret < 0) return ret; wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret); } iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd); if (wowlan->disconnect) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | IWL_WOWLAN_WAKEUP_LINK_CHANGE); if (wowlan->magic_pkt) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET); if (wowlan->gtk_rekey_failure) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL); if (wowlan->eap_identity_req) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ); if (wowlan->four_way_handshake) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE); if (wowlan->n_patterns) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH); if (wowlan->rfkill_release) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); if (wowlan->tcp) { /* * Set the "link change" (really "link lost") flag as well * since that implies losing the TCP connection. */ wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS | IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE | IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET | IWL_WOWLAN_WAKEUP_LINK_CHANGE); } if (wowlan->any) { wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | IWL_WOWLAN_WAKEUP_LINK_CHANGE | IWL_WOWLAN_WAKEUP_RX_FRAME | IWL_WOWLAN_WAKEUP_BCN_FILTERING); } return 0; } static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { bool unified = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); struct wowlan_key_reprogram_data key_data = {}; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; u8 cmd_ver; size_t cmd_size; if (!unified) { /* * if we have to configure keys, call ieee80211_iter_keys(), * as we need non-atomic context in order to take the * required locks. */ /* * Note that currently we don't use CMD_ASYNC in the iterator. * In case of key_data.configure_keys, all the configured * commands are SYNC, and iwl_mvm_wowlan_program_keys() will * take care of locking/unlocking mvm->mutex. */ ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys, &key_data); if (key_data.error) return -EIO; } ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif); if (ret) return ret; if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) { int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TKIP_PARAM, IWL_FW_CMD_VER_UNKNOWN); struct wowlan_key_tkip_data tkip_data = {}; int size; if (ver == 2) { size = sizeof(tkip_data.tkip); tkip_data.tkip.sta_id = - cpu_to_le32(mvmvif->ap_sta_id); + cpu_to_le32(mvmvif->deflink.ap_sta_id); } else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) { size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1); } else { WARN_ON_ONCE(1); return -EINVAL; } ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_get_tkip_data, &tkip_data); if (tkip_data.have_tkip_keys) { /* send relevant data according to CMD version */ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TKIP_PARAM, CMD_ASYNC, size, &tkip_data.tkip); if (ret) return ret; } } /* configure rekey data only if offloaded rekey is supported (d3) */ if (mvmvif->rekey_data.valid) { struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {}; struct iwl_wowlan_kek_kck_material_cmd_v4 *_kek_kck_cmd = &kek_kck_cmd; struct wowlan_key_gtk_type_iter gtk_type_data = { .kek_kck_cmd = _kek_kck_cmd, }; cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL, IWL_FW_CMD_VER_UNKNOWN); if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && cmd_ver != 4 && cmd_ver != IWL_FW_CMD_VER_UNKNOWN)) return -EINVAL; ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_gtk_type_iter, >k_type_data); memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, mvmvif->rekey_data.kck_len); kek_kck_cmd.kck_len = cpu_to_le16(mvmvif->rekey_data.kck_len); memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, mvmvif->rekey_data.kek_len); kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len); kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm); - kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->ap_sta_id); + kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->deflink.ap_sta_id); if (cmd_ver == 4) { cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4); } else { if (cmd_ver == 3) cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3); else cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2); /* skip the sta_id at the beginning */ _kek_kck_cmd = (void *) ((u8 *)_kek_kck_cmd + sizeof(kek_kck_cmd.sta_id)); } IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n", mvmvif->rekey_data.akm); ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_KEK_KCK_MATERIAL, CMD_ASYNC, cmd_size, _kek_kck_cmd); if (ret) return ret; } return 0; } static int iwl_mvm_wowlan_config(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan, struct iwl_wowlan_config_cmd *wowlan_config_cmd, struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, struct ieee80211_sta *ap_sta) { int ret; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); mvm->offload_tid = wowlan_config_cmd->offloading_tid; if (!unified_image) { ret = iwl_mvm_switch_to_d3(mvm); if (ret) return ret; ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); if (ret) return ret; } /* * This needs to be unlocked due to lock ordering * constraints. Since we're in the suspend path * that isn't really a problem though. */ mutex_unlock(&mvm->mutex); ret = iwl_mvm_wowlan_config_key_params(mvm, vif); mutex_lock(&mvm->mutex); if (ret) return ret; ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, sizeof(*wowlan_config_cmd), wowlan_config_cmd); if (ret) return ret; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE)) ret = iwl_mvm_send_patterns(mvm, vif, wowlan); else ret = iwl_mvm_send_patterns_v1(mvm, wowlan); if (ret) return ret; return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0); } static int iwl_mvm_netdetect_config(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan, struct cfg80211_sched_scan_request *nd_config, struct ieee80211_vif *vif) { int ret; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); if (!unified_image) { ret = iwl_mvm_switch_to_d3(mvm); if (ret) return ret; } else { /* In theory, we wouldn't have to stop a running sched * scan in order to start another one (for * net-detect). But in practice this doesn't seem to * work properly, so stop any running sched_scan now. */ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); if (ret) return ret; } ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies, IWL_MVM_SCAN_NETDETECT); if (ret) return ret; if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels)) return -EBUSY; /* save the sched scan matchsets... */ if (nd_config->n_match_sets) { mvm->nd_match_sets = kmemdup(nd_config->match_sets, sizeof(*nd_config->match_sets) * nd_config->n_match_sets, GFP_KERNEL); if (mvm->nd_match_sets) mvm->n_nd_match_sets = nd_config->n_match_sets; } /* ...and the sched scan channels for later reporting */ mvm->nd_channels = kmemdup(nd_config->channels, sizeof(*nd_config->channels) * nd_config->n_channels, GFP_KERNEL); if (mvm->nd_channels) mvm->n_nd_channels = nd_config->n_channels; return 0; } static void iwl_mvm_free_nd(struct iwl_mvm *mvm) { kfree(mvm->nd_match_sets); mvm->nd_match_sets = NULL; mvm->n_nd_match_sets = 0; kfree(mvm->nd_channels); mvm->nd_channels = NULL; mvm->n_nd_channels = 0; } static int __iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan, bool test) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_vif *vif = NULL; struct iwl_mvm_vif *mvmvif = NULL; struct ieee80211_sta *ap_sta = NULL; struct iwl_d3_manager_config d3_cfg_cmd_data = { /* * Program the minimum sleep time to 10 seconds, as many * platforms have issues processing a wakeup signal while * still being in the process of suspending. */ .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), }; struct iwl_host_cmd d3_cfg_cmd = { .id = D3_CONFIG_CMD, .flags = CMD_WANT_SKB | CMD_SEND_IN_D3, .data[0] = &d3_cfg_cmd_data, .len[0] = sizeof(d3_cfg_cmd_data), }; int ret; int len __maybe_unused; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); if (!wowlan) { /* * mac80211 shouldn't get here, but for D3 test * it doesn't warrant a warning */ WARN_ON(!test); return -EINVAL; } mutex_lock(&mvm->mutex); set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); synchronize_net(); vif = iwl_mvm_get_bss_vif(mvm); if (IS_ERR_OR_NULL(vif)) { ret = 1; goto out_noreset; } mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) { + if (mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA) { /* if we're not associated, this must be netdetect */ if (!wowlan->nd_config) { ret = 1; goto out_noreset; } ret = iwl_mvm_netdetect_config( mvm, wowlan, wowlan->nd_config, vif); if (ret) goto out; mvm->net_detect = true; } else { struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; - wowlan_config_cmd.sta_id = mvmvif->ap_sta_id; + wowlan_config_cmd.sta_id = mvmvif->deflink.ap_sta_id; ap_sta = rcu_dereference_protected( - mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], + mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(ap_sta)) { ret = -EINVAL; goto out_noreset; } ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd, vif, mvmvif, ap_sta); if (ret) goto out_noreset; ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd, vif, mvmvif, ap_sta); if (ret) goto out; mvm->net_detect = false; } ret = iwl_mvm_power_update_device(mvm); if (ret) goto out; ret = iwl_mvm_power_update_mac(mvm); if (ret) goto out; #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvm->d3_wake_sysassert) d3_cfg_cmd_data.wakeup_flags |= cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR); #endif /* * Prior to 9000 device family the driver needs to stop the dbg * recording before entering D3. In later devices the FW stops the * recording automatically. */ if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true); mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; /* must be last -- this switches firmware state */ ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); if (ret) goto out; #ifdef CONFIG_IWLWIFI_DEBUGFS len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt); if (len >= sizeof(u32)) { mvm->d3_test_pme_ptr = le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data); } #endif iwl_free_resp(&d3_cfg_cmd); clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); ret = iwl_trans_d3_suspend(mvm->trans, test, !unified_image); out: if (ret < 0) { iwl_mvm_free_nd(mvm); if (!unified_image) { if (mvm->fw_restart > 0) { mvm->fw_restart--; ieee80211_restart_hw(mvm->hw); } } clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); } out_noreset: mutex_unlock(&mvm->mutex); return ret; } int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); iwl_mvm_pause_tcm(mvm, true); iwl_fw_runtime_suspend(&mvm->fwrt); return __iwl_mvm_suspend(hw, wowlan, false); } +struct iwl_multicast_key_data { + u8 key[WOWLAN_KEY_MAX_SIZE]; + u8 len; + u8 flags; + u8 id; + u8 ipn[6]; +}; + /* converted data from the different status responses */ struct iwl_wowlan_status_data { u64 replay_ctr; u32 num_of_gtk_rekeys; u32 received_beacons; u32 wakeup_reasons; u32 wake_packet_length; u32 wake_packet_bufsize; u16 pattern_number; u16 non_qos_seq_ctr; u16 qos_seq_ctr[8]; u8 tid_tear_down; struct { /* including RX MIC key for TKIP */ u8 key[WOWLAN_KEY_MAX_SIZE]; u8 len; u8 flags; - } gtk; + u8 id; + } gtk[WOWLAN_GTK_KEYS_NUM]; struct { /* * We store both the TKIP and AES representations * coming from the firmware because we decode the * data from there before we iterate the keys and * know which one we need. */ struct { struct ieee80211_key_seq seq[IWL_MAX_TID_COUNT]; } tkip, aes; /* * We use -1 for when we have valid data but don't know * the key ID from firmware, and thus it needs to be * installed with the last key (depending on rekeying). */ s8 key_id; bool valid; } gtk_seq[2]; struct { /* Same as above */ struct { struct ieee80211_key_seq seq[IWL_MAX_TID_COUNT]; u64 tx_pn; } tkip, aes; } ptk; - struct { - u64 ipn; - u8 key[WOWLAN_KEY_MAX_SIZE]; - u8 len; - u8 flags; - } igtk; + struct iwl_multicast_key_data igtk; - u8 wake_packet[]; + u8 *wake_packet; }; static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_wowlan_status_data *status) { struct sk_buff *pkt = NULL; struct cfg80211_wowlan_wakeup wakeup = { .pattern_idx = -1, }; struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; u32 reasons = status->wakeup_reasons; if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { wakeup_report = NULL; goto report; } pm_wakeup_event(mvm->dev, 0); if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) wakeup.magic_pkt = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) wakeup.pattern_idx = status->pattern_number; if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) wakeup.disconnect = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) wakeup.gtk_rekey_failure = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) wakeup.rfkill_release = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) wakeup.eap_identity_req = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) wakeup.four_way_handshake = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS) wakeup.tcp_connlost = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE) wakeup.tcp_nomoretokens = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET) wakeup.tcp_match = true; - if (status->wake_packet_bufsize) { + if (status->wake_packet) { int pktsize = status->wake_packet_bufsize; int pktlen = status->wake_packet_length; const u8 *pktdata = status->wake_packet; const struct ieee80211_hdr *hdr = (const void *)pktdata; int truncated = pktlen - pktsize; /* this would be a firmware bug */ if (WARN_ON_ONCE(truncated < 0)) truncated = 0; if (ieee80211_is_data(hdr->frame_control)) { int hdrlen = ieee80211_hdrlen(hdr->frame_control); int ivlen = 0, icvlen = 4; /* also FCS */ pkt = alloc_skb(pktsize, GFP_KERNEL); if (!pkt) goto report; skb_put_data(pkt, pktdata, hdrlen); pktdata += hdrlen; pktsize -= hdrlen; if (ieee80211_has_protected(hdr->frame_control)) { /* * This is unlocked and using gtk_i(c)vlen, * but since everything is under RTNL still * that's not really a problem - changing * it would be difficult. */ if (is_multicast_ether_addr(hdr->addr1)) { ivlen = mvm->gtk_ivlen; icvlen += mvm->gtk_icvlen; } else { ivlen = mvm->ptk_ivlen; icvlen += mvm->ptk_icvlen; } } /* if truncated, FCS/ICV is (partially) gone */ if (truncated >= icvlen) { icvlen = 0; truncated -= icvlen; } else { icvlen -= truncated; truncated = 0; } pktsize -= ivlen + icvlen; pktdata += ivlen; skb_put_data(pkt, pktdata, pktsize); if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) goto report; wakeup.packet = pkt->data; wakeup.packet_present_len = pkt->len; wakeup.packet_len = pkt->len - truncated; wakeup.packet_80211 = false; } else { int fcslen = 4; if (truncated >= 4) { truncated -= 4; fcslen = 0; } else { fcslen -= truncated; truncated = 0; } pktsize -= fcslen; wakeup.packet = status->wake_packet; wakeup.packet_present_len = pktsize; wakeup.packet_len = pktlen - truncated; wakeup.packet_80211 = true; } } report: ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); kfree_skb(pkt); } static void iwl_mvm_le64_to_aes_seq(__le64 le_pn, struct ieee80211_key_seq *seq) { u64 pn = le64_to_cpu(le_pn); seq->ccmp.pn[0] = pn >> 40; seq->ccmp.pn[1] = pn >> 32; seq->ccmp.pn[2] = pn >> 24; seq->ccmp.pn[3] = pn >> 16; seq->ccmp.pn[4] = pn >> 8; seq->ccmp.pn[5] = pn; } static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc, struct ieee80211_key_seq *seq) { iwl_mvm_le64_to_aes_seq(sc->pn, seq); } static void iwl_mvm_le64_to_tkip_seq(__le64 le_pn, struct ieee80211_key_seq *seq) { u64 pn = le64_to_cpu(le_pn); seq->tkip.iv16 = (u16)pn; seq->tkip.iv32 = (u32)(pn >> 16); } static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc, struct ieee80211_key_seq *seq) { seq->tkip.iv32 = le32_to_cpu(sc->iv32); seq->tkip.iv16 = le16_to_cpu(sc->iv16); } static void iwl_mvm_set_key_rx_seq_tids(struct ieee80211_key_conf *key, struct ieee80211_key_seq *seq) { int tid; for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) ieee80211_set_key_rx_seq(key, tid, &seq[tid]); } static void iwl_mvm_set_aes_ptk_rx_seq(struct iwl_mvm *mvm, struct iwl_wowlan_status_data *status, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_key_pn *ptk_pn; int tid; iwl_mvm_set_key_rx_seq_tids(key, status->ptk.aes.seq); if (!iwl_mvm_has_new_rx_api(mvm)) return; rcu_read_lock(); ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); if (WARN_ON(!ptk_pn)) { rcu_read_unlock(); return; } for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { int i; for (i = 1; i < mvm->trans->num_rx_queues; i++) memcpy(ptk_pn->q[i].pn[tid], status->ptk.aes.seq[tid].ccmp.pn, IEEE80211_CCMP_PN_LEN); } rcu_read_unlock(); } static void iwl_mvm_convert_key_counters(struct iwl_wowlan_status_data *status, union iwl_all_tsc_rsc *sc) { int i; BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_MAX_TID_COUNT); BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_NUM_RSC); /* GTK RX counters */ for (i = 0; i < IWL_MAX_TID_COUNT; i++) { iwl_mvm_tkip_sc_to_seq(&sc->tkip.multicast_rsc[i], &status->gtk_seq[0].tkip.seq[i]); iwl_mvm_aes_sc_to_seq(&sc->aes.multicast_rsc[i], &status->gtk_seq[0].aes.seq[i]); } status->gtk_seq[0].valid = true; status->gtk_seq[0].key_id = -1; /* PTK TX counter */ status->ptk.tkip.tx_pn = (u64)le16_to_cpu(sc->tkip.tsc.iv16) | ((u64)le32_to_cpu(sc->tkip.tsc.iv32) << 16); status->ptk.aes.tx_pn = le64_to_cpu(sc->aes.tsc.pn); /* PTK RX counters */ for (i = 0; i < IWL_MAX_TID_COUNT; i++) { iwl_mvm_tkip_sc_to_seq(&sc->tkip.unicast_rsc[i], &status->ptk.tkip.seq[i]); iwl_mvm_aes_sc_to_seq(&sc->aes.unicast_rsc[i], &status->ptk.aes.seq[i]); } } static void iwl_mvm_convert_key_counters_v5_gtk_seq(struct iwl_wowlan_status_data *status, struct iwl_wowlan_all_rsc_tsc_v5 *sc, unsigned int idx, unsigned int key_id) { int tid; for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { iwl_mvm_le64_to_tkip_seq(sc->mcast_rsc[idx][tid], &status->gtk_seq[idx].tkip.seq[tid]); iwl_mvm_le64_to_aes_seq(sc->mcast_rsc[idx][tid], &status->gtk_seq[idx].aes.seq[tid]); } status->gtk_seq[idx].valid = true; status->gtk_seq[idx].key_id = key_id; } static void iwl_mvm_convert_key_counters_v5(struct iwl_wowlan_status_data *status, struct iwl_wowlan_all_rsc_tsc_v5 *sc) { int i, tid; BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_MAX_TID_COUNT); BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_NUM_RSC); BUILD_BUG_ON(ARRAY_SIZE(sc->mcast_rsc) != ARRAY_SIZE(status->gtk_seq)); /* GTK RX counters */ for (i = 0; i < ARRAY_SIZE(sc->mcast_key_id_map); i++) { u8 entry = sc->mcast_key_id_map[i]; if (entry < ARRAY_SIZE(sc->mcast_rsc)) iwl_mvm_convert_key_counters_v5_gtk_seq(status, sc, entry, i); } /* PTK TX counters not needed, assigned in device */ /* PTK RX counters */ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { iwl_mvm_le64_to_tkip_seq(sc->ucast_rsc[tid], &status->ptk.tkip.seq[tid]); iwl_mvm_le64_to_aes_seq(sc->ucast_rsc[tid], &status->ptk.aes.seq[tid]); } } static void iwl_mvm_set_key_rx_seq_idx(struct ieee80211_key_conf *key, struct iwl_wowlan_status_data *status, int idx) { switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: iwl_mvm_set_key_rx_seq_tids(key, status->gtk_seq[idx].aes.seq); break; case WLAN_CIPHER_SUITE_TKIP: iwl_mvm_set_key_rx_seq_tids(key, status->gtk_seq[idx].tkip.seq); break; default: WARN_ON(1); } } static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key, struct iwl_wowlan_status_data *status, bool installed) { int i; for (i = 0; i < ARRAY_SIZE(status->gtk_seq); i++) { if (!status->gtk_seq[i].valid) continue; /* Handle the case where we know the key ID */ if (status->gtk_seq[i].key_id == key->keyidx) { s8 new_key_id = -1; if (status->num_of_gtk_rekeys) - new_key_id = status->gtk.flags & + new_key_id = status->gtk[0].flags & IWL_WOWLAN_GTK_IDX_MASK; /* Don't install a new key's value to an old key */ if (new_key_id != key->keyidx) iwl_mvm_set_key_rx_seq_idx(key, status, i); continue; } /* handle the case where we didn't, last key only */ if (status->gtk_seq[i].key_id == -1 && (!status->num_of_gtk_rekeys || installed)) iwl_mvm_set_key_rx_seq_idx(key, status, i); } } struct iwl_mvm_d3_gtk_iter_data { struct iwl_mvm *mvm; struct iwl_wowlan_status_data *status; - void *last_gtk; - u32 cipher; - bool find_phase, unhandled_cipher; + u32 gtk_cipher, igtk_cipher; + bool unhandled_cipher, igtk_support; int num_keys; }; -static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key, - void *_data) +static void iwl_mvm_d3_find_last_keys(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) { struct iwl_mvm_d3_gtk_iter_data *data = _data; - struct iwl_wowlan_status_data *status = data->status; if (data->unhandled_cipher) return; switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: /* ignore WEP completely, nothing to do */ return; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: case WLAN_CIPHER_SUITE_TKIP: /* we support these */ + data->gtk_cipher = key->cipher; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_AES_CMAC: + /* we support these */ + if (data->igtk_support && + (key->keyidx == 4 || key->keyidx == 5)) { + data->igtk_cipher = key->cipher; + } else { + data->unhandled_cipher = true; + return; + } break; default: - /* everything else (even CMAC for MFP) - disconnect from AP */ + /* everything else - disconnect from AP */ data->unhandled_cipher = true; return; } data->num_keys++; +} - /* - * pairwise key - update sequence counters only; - * note that this assumes no TDLS sessions are active - */ - if (sta) { - if (data->find_phase) - return; +static void +iwl_mvm_d3_set_igtk_bigtk_ipn(const struct iwl_multicast_key_data *key, + struct ieee80211_key_seq *seq, u32 cipher) +{ + switch (cipher) { + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + BUILD_BUG_ON(sizeof(seq->aes_gmac.pn) != sizeof(key->ipn)); + memcpy(seq->aes_gmac.pn, key->ipn, sizeof(seq->aes_gmac.pn)); + break; + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + BUILD_BUG_ON(sizeof(seq->aes_cmac.pn) != sizeof(key->ipn)); + memcpy(seq->aes_cmac.pn, key->ipn, sizeof(seq->aes_cmac.pn)); + break; + } +} + +static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mvm_d3_gtk_iter_data *data = _data; + struct iwl_wowlan_status_data *status = data->status; + s8 keyidx; - switch (key->cipher) { - case WLAN_CIPHER_SUITE_CCMP: - case WLAN_CIPHER_SUITE_GCMP: - case WLAN_CIPHER_SUITE_GCMP_256: + if (data->unhandled_cipher) + return; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + /* ignore WEP completely, nothing to do */ + return; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + if (sta) { atomic64_set(&key->tx_pn, status->ptk.aes.tx_pn); iwl_mvm_set_aes_ptk_rx_seq(data->mvm, status, sta, key); - break; - case WLAN_CIPHER_SUITE_TKIP: + return; + } + fallthrough; + case WLAN_CIPHER_SUITE_TKIP: + if (sta) { atomic64_set(&key->tx_pn, status->ptk.tkip.tx_pn); iwl_mvm_set_key_rx_seq_tids(key, status->ptk.tkip.seq); - break; + return; } + keyidx = key->keyidx; + /* The current key is always sent by the FW, even if it wasn't + * rekeyed during D3. + * We remove an existing key if it has the same index as + * a new key + */ + if (status->num_of_gtk_rekeys && + ((status->gtk[0].len && keyidx == status->gtk[0].id) || + (status->gtk[1].len && keyidx == status->gtk[1].id))) { + ieee80211_remove_key(key); + } else { + iwl_mvm_set_key_rx_seq(key, data->status, false); + } + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_AES_CMAC: + if (key->keyidx == 4 || key->keyidx == 5) { + /* remove rekeyed key */ + if (status->num_of_gtk_rekeys) { + ieee80211_remove_key(key); + } else { + struct ieee80211_key_seq seq; - /* that's it for this key */ - return; + iwl_mvm_d3_set_igtk_bigtk_ipn(&status->igtk, + &seq, + key->cipher); + ieee80211_set_key_rx_seq(key, 0, &seq); + } + } } +} - if (data->find_phase) { - data->last_gtk = key; - data->cipher = key->cipher; - return; +static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status, + struct ieee80211_vif *vif, + struct iwl_mvm *mvm, u32 gtk_cipher) +{ + int i; + struct ieee80211_key_conf *key; + struct { + struct ieee80211_key_conf conf; + u8 key[32]; + } conf = { + .conf.cipher = gtk_cipher, + }; + + BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP); + BUILD_BUG_ON(sizeof(conf.key) < sizeof(status->gtk[0].key)); + + switch (gtk_cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + conf.conf.keylen = WLAN_KEY_LEN_CCMP; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + conf.conf.keylen = WLAN_KEY_LEN_GCMP_256; + break; + case WLAN_CIPHER_SUITE_TKIP: + conf.conf.keylen = WLAN_KEY_LEN_TKIP; + break; + default: + WARN_ON(1); + } + + for (i = 0; i < ARRAY_SIZE(status->gtk); i++) { + if (!status->gtk[i].len) + continue; + + conf.conf.keyidx = status->gtk[i].id; + IWL_DEBUG_WOWLAN(mvm, + "Received from FW GTK cipher %d, key index %d\n", + conf.conf.cipher, conf.conf.keyidx); + memcpy(conf.conf.key, status->gtk[i].key, + sizeof(status->gtk[i].key)); + + key = ieee80211_gtk_rekey_add(vif, &conf.conf); + if (IS_ERR(key)) + return false; + iwl_mvm_set_key_rx_seq_idx(key, status, i); + } + + return true; +} + +static bool +iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status, + struct ieee80211_vif *vif, u32 cipher, + struct iwl_multicast_key_data *key_data) +{ + struct ieee80211_key_conf *key_config; + struct { + struct ieee80211_key_conf conf; + u8 key[WOWLAN_KEY_MAX_SIZE]; + } conf = { + .conf.cipher = cipher, + .conf.keyidx = key_data->id, + }; + struct ieee80211_key_seq seq; + + if (!key_data->len) + return true; + + iwl_mvm_d3_set_igtk_bigtk_ipn(key_data, &seq, conf.conf.cipher); + + switch (cipher) { + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_128; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_256; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + conf.conf.keylen = WLAN_KEY_LEN_AES_CMAC; + break; + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + conf.conf.keylen = WLAN_KEY_LEN_BIP_CMAC_256; + break; + default: + WARN_ON(1); } + BUILD_BUG_ON(sizeof(conf.key) < sizeof(key_data->key)); + memcpy(conf.conf.key, key_data->key, conf.conf.keylen); + + key_config = ieee80211_gtk_rekey_add(vif, &conf.conf); + if (IS_ERR(key_config)) + return false; + ieee80211_set_key_rx_seq(key_config, 0, &seq); + return true; +} + +static int iwl_mvm_lookup_wowlan_status_ver(struct iwl_mvm *mvm) +{ + u8 notif_ver; - if (data->status->num_of_gtk_rekeys) - ieee80211_remove_key(key); + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) + return 6; - if (data->last_gtk == key) - iwl_mvm_set_key_rx_seq(key, data->status, false); + /* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */ + notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, + WOWLAN_GET_STATUSES, 0); + if (!notif_ver) + notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + WOWLAN_GET_STATUSES, 7); + + return notif_ver; } static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_wowlan_status_data *status) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_d3_gtk_iter_data gtkdata = { .mvm = mvm, .status = status, }; u32 disconnection_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; if (!status || !vif->bss_conf.bssid) return false; if (status->wakeup_reasons & disconnection_reasons) return false; + if (iwl_mvm_lookup_wowlan_status_ver(mvm) > 6 || + iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, + WOWLAN_INFO_NOTIFICATION, + 0)) + gtkdata.igtk_support = true; + /* find last GTK that we used initially, if any */ - gtkdata.find_phase = true; ieee80211_iter_keys(mvm->hw, vif, - iwl_mvm_d3_update_keys, >kdata); + iwl_mvm_d3_find_last_keys, >kdata); /* not trying to keep connections with MFP/unhandled ciphers */ if (gtkdata.unhandled_cipher) return false; if (!gtkdata.num_keys) goto out; - if (!gtkdata.last_gtk) - return false; /* * invalidate all other GTKs that might still exist and update * the one that we used */ - gtkdata.find_phase = false; ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_d3_update_keys, >kdata); - IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n", - status->num_of_gtk_rekeys); if (status->num_of_gtk_rekeys) { - struct ieee80211_key_conf *key; - struct { - struct ieee80211_key_conf conf; - u8 key[32]; - } conf = { - .conf.cipher = gtkdata.cipher, - .conf.keyidx = - status->gtk.flags & IWL_WOWLAN_GTK_IDX_MASK, - }; - __be64 replay_ctr; - - IWL_DEBUG_WOWLAN(mvm, - "Received from FW GTK cipher %d, key index %d\n", - conf.conf.cipher, conf.conf.keyidx); + __be64 replay_ctr = cpu_to_be64(status->replay_ctr); - BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP); - BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP); - BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256); - BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP); - BUILD_BUG_ON(sizeof(conf.key) < sizeof(status->gtk.key)); + IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n", + status->num_of_gtk_rekeys); - memcpy(conf.conf.key, status->gtk.key, sizeof(status->gtk.key)); - - switch (gtkdata.cipher) { - case WLAN_CIPHER_SUITE_CCMP: - case WLAN_CIPHER_SUITE_GCMP: - conf.conf.keylen = WLAN_KEY_LEN_CCMP; - break; - case WLAN_CIPHER_SUITE_GCMP_256: - conf.conf.keylen = WLAN_KEY_LEN_GCMP_256; - break; - case WLAN_CIPHER_SUITE_TKIP: - conf.conf.keylen = WLAN_KEY_LEN_TKIP; - break; - } - - key = ieee80211_gtk_rekey_add(vif, &conf.conf); - if (IS_ERR(key)) + if (!iwl_mvm_gtk_rekey(status, vif, mvm, gtkdata.gtk_cipher)) return false; - iwl_mvm_set_key_rx_seq(key, status, true); - replay_ctr = cpu_to_be64(status->replay_ctr); + if (!iwl_mvm_d3_igtk_bigtk_rekey_add(status, vif, + gtkdata.igtk_cipher, + &status->igtk)) + return false; ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, (void *)&replay_ctr, GFP_KERNEL); } out: if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, WOWLAN_GET_STATUSES, 0) < 10) { mvmvif->seqno_valid = true; /* +0x10 because the set API expects next-to-use, not last-used */ mvmvif->seqno = status->non_qos_seq_ctr + 0x10; } return true; } +static void iwl_mvm_convert_gtk_v2(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_gtk_status_v2 *data) +{ + BUILD_BUG_ON(sizeof(status->gtk[0].key) < sizeof(data->key)); + BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY + + sizeof(data->tkip_mic_key) > + sizeof(status->gtk[0].key)); + + status->gtk[0].len = data->key_len; + status->gtk[0].flags = data->key_flags; + + memcpy(status->gtk[0].key, data->key, sizeof(data->key)); + + /* if it's as long as the TKIP encryption key, copy MIC key */ + if (status->gtk[0].len == NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY) + memcpy(status->gtk[0].key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, + data->tkip_mic_key, sizeof(data->tkip_mic_key)); +} + +static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_gtk_status_v3 *data) +{ + int data_idx, status_idx = 0; + + BUILD_BUG_ON(sizeof(status->gtk[0].key) < sizeof(data[0].key)); + BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY + + sizeof(data[0].tkip_mic_key) > + sizeof(status->gtk[0].key)); + BUILD_BUG_ON(ARRAY_SIZE(status->gtk) < WOWLAN_GTK_KEYS_NUM); + for (data_idx = 0; data_idx < ARRAY_SIZE(status->gtk); data_idx++) { + if (!(data[data_idx].key_len)) + continue; + status->gtk[status_idx].len = data[data_idx].key_len; + status->gtk[status_idx].flags = data[data_idx].key_flags; + status->gtk[status_idx].id = status->gtk[status_idx].flags & + IWL_WOWLAN_GTK_IDX_MASK; + + memcpy(status->gtk[status_idx].key, data[data_idx].key, + sizeof(data[data_idx].key)); + + /* if it's as long as the TKIP encryption key, copy MIC key */ + if (status->gtk[status_idx].len == + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY) + memcpy(status->gtk[status_idx].key + + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, + data[data_idx].tkip_mic_key, + sizeof(data[data_idx].tkip_mic_key)); + status_idx++; + } +} + +static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_igtk_status *data) +{ + BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key)); + + if (!data->key_len) + return; + + status->igtk.len = data->key_len; + status->igtk.flags = data->key_flags; + status->igtk.id = u32_get_bits(data->key_flags, + IWL_WOWLAN_IGTK_BIGTK_IDX_MASK) + + WOWLAN_IGTK_MIN_INDEX; + + memcpy(status->igtk.key, data->key, sizeof(data->key)); + memcpy(status->igtk.ipn, data->ipn, sizeof(data->ipn)); +} + +static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm, + struct iwl_wowlan_info_notif *data, + struct iwl_wowlan_status_data *status, + u32 len) +{ + u32 i; + + if (!data) { + IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n"); + status = NULL; + return; + } + + if (len < sizeof(*data)) { + IWL_ERR(mvm, "Invalid WoWLAN info notification!\n"); + status = NULL; + return; + } + + iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc); + iwl_mvm_convert_gtk_v3(status, data->gtk); + iwl_mvm_convert_igtk(status, &data->igtk[0]); + + status->replay_ctr = le64_to_cpu(data->replay_ctr); + status->pattern_number = le16_to_cpu(data->pattern_number); + for (i = 0; i < IWL_MAX_TID_COUNT; i++) + status->qos_seq_ctr[i] = + le16_to_cpu(data->qos_seq_ctr[i]); + status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); + status->num_of_gtk_rekeys = + le32_to_cpu(data->num_of_gtk_rekeys); + status->received_beacons = le32_to_cpu(data->received_beacons); + status->tid_tear_down = data->tid_tear_down; +} + /* Occasionally, templates would be nice. This is one of those times ... */ #define iwl_mvm_parse_wowlan_status_common(_ver) \ static struct iwl_wowlan_status_data * \ iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \ struct iwl_wowlan_status_ ##_ver *data,\ int len) \ { \ struct iwl_wowlan_status_data *status; \ int data_size, i; \ \ if (len < sizeof(*data)) { \ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ - return ERR_PTR(-EIO); \ + return NULL; \ } \ \ data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \ if (len != sizeof(*data) + data_size) { \ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ - return ERR_PTR(-EIO); \ + return NULL; \ } \ \ - status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); \ + status = kzalloc(sizeof(*status), GFP_KERNEL); \ if (!status) \ - return ERR_PTR(-ENOMEM); \ + return NULL; \ \ /* copy all the common fields */ \ status->replay_ctr = le64_to_cpu(data->replay_ctr); \ status->pattern_number = le16_to_cpu(data->pattern_number); \ status->non_qos_seq_ctr = le16_to_cpu(data->non_qos_seq_ctr); \ for (i = 0; i < 8; i++) \ status->qos_seq_ctr[i] = \ le16_to_cpu(data->qos_seq_ctr[i]); \ status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); \ status->num_of_gtk_rekeys = \ le32_to_cpu(data->num_of_gtk_rekeys); \ status->received_beacons = le32_to_cpu(data->received_beacons); \ status->wake_packet_length = \ le32_to_cpu(data->wake_packet_length); \ status->wake_packet_bufsize = \ le32_to_cpu(data->wake_packet_bufsize); \ - memcpy(status->wake_packet, data->wake_packet, \ - status->wake_packet_bufsize); \ + if (status->wake_packet_bufsize) { \ + status->wake_packet = \ + kmemdup(data->wake_packet, \ + status->wake_packet_bufsize, \ + GFP_KERNEL); \ + if (!status->wake_packet) { \ + kfree(status); \ + return NULL; \ + } \ + } else { \ + status->wake_packet = NULL; \ + } \ \ return status; \ } iwl_mvm_parse_wowlan_status_common(v6) iwl_mvm_parse_wowlan_status_common(v7) iwl_mvm_parse_wowlan_status_common(v9) iwl_mvm_parse_wowlan_status_common(v12) -static void iwl_mvm_convert_gtk_v2(struct iwl_wowlan_status_data *status, - struct iwl_wowlan_gtk_status_v2 *data) -{ - BUILD_BUG_ON(sizeof(status->gtk.key) < sizeof(data->key)); - BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY + - sizeof(data->tkip_mic_key) > - sizeof(status->gtk.key)); - - status->gtk.len = data->key_len; - status->gtk.flags = data->key_flags; - - memcpy(status->gtk.key, data->key, sizeof(data->key)); - - /* if it's as long as the TKIP encryption key, copy MIC key */ - if (status->gtk.len == NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY) - memcpy(status->gtk.key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, - data->tkip_mic_key, sizeof(data->tkip_mic_key)); -} - -static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status, - struct iwl_wowlan_gtk_status_v3 *data) -{ - /* The parts we need are identical in v2 and v3 */ -#define CHECK(_f) do { \ - BUILD_BUG_ON(offsetof(struct iwl_wowlan_gtk_status_v2, _f) != \ - offsetof(struct iwl_wowlan_gtk_status_v3, _f)); \ - BUILD_BUG_ON(offsetofend(struct iwl_wowlan_gtk_status_v2, _f) !=\ - offsetofend(struct iwl_wowlan_gtk_status_v3, _f)); \ -} while (0) - - CHECK(key); - CHECK(key_len); - CHECK(key_flags); - CHECK(tkip_mic_key); -#undef CHECK - - iwl_mvm_convert_gtk_v2(status, (void *)data); -} - -static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status, - struct iwl_wowlan_igtk_status *data) -{ - const u8 *ipn = data->ipn; - - BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key)); - - status->igtk.len = data->key_len; - status->igtk.flags = data->key_flags; - - memcpy(status->igtk.key, data->key, sizeof(data->key)); - - status->igtk.ipn = ((u64)ipn[5] << 0) | - ((u64)ipn[4] << 8) | - ((u64)ipn[3] << 16) | - ((u64)ipn[2] << 24) | - ((u64)ipn[1] << 32) | - ((u64)ipn[0] << 40); -} - static struct iwl_wowlan_status_data * iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) { struct iwl_wowlan_status_data *status; struct iwl_wowlan_get_status_cmd get_status_cmd = { .sta_id = cpu_to_le32(sta_id), }; struct iwl_host_cmd cmd = { .id = WOWLAN_GET_STATUSES, .flags = CMD_WANT_SKB, .data = { &get_status_cmd, }, .len = { sizeof(get_status_cmd), }, }; int ret, len; u8 notif_ver; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN) cmd.len[0] = 0; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { IWL_ERR(mvm, "failed to query wakeup status (%d)\n", ret); return ERR_PTR(ret); } len = iwl_rx_packet_payload_len(cmd.resp_pkt); /* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */ - notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, - WOWLAN_GET_STATUSES, 0); - if (!notif_ver) - notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, - WOWLAN_GET_STATUSES, 7); + notif_ver = iwl_mvm_lookup_wowlan_status_ver(mvm); - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) { + if (notif_ver < 7) { struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data; status = iwl_mvm_parse_wowlan_status_common_v6(mvm, v6, len); - if (IS_ERR(status)) + if (!status) goto out_free_resp; BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) > - sizeof(status->gtk.key)); + sizeof(status->gtk[0].key)); BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY + sizeof(v6->gtk.tkip_mic_key) > - sizeof(status->gtk.key)); + sizeof(status->gtk[0].key)); /* copy GTK info to the right place */ - memcpy(status->gtk.key, v6->gtk.decrypt_key, + memcpy(status->gtk[0].key, v6->gtk.decrypt_key, sizeof(v6->gtk.decrypt_key)); - memcpy(status->gtk.key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, + memcpy(status->gtk[0].key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, v6->gtk.tkip_mic_key, sizeof(v6->gtk.tkip_mic_key)); iwl_mvm_convert_key_counters(status, &v6->gtk.rsc.all_tsc_rsc); /* hardcode the key length to 16 since v6 only supports 16 */ - status->gtk.len = 16; + status->gtk[0].len = 16; /* * The key index only uses 2 bits (values 0 to 3) and * we always set bit 7 which means this is the * currently used key. */ - status->gtk.flags = v6->gtk.key_index | BIT(7); + status->gtk[0].flags = v6->gtk.key_index | BIT(7); } else if (notif_ver == 7) { struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data; status = iwl_mvm_parse_wowlan_status_common_v7(mvm, v7, len); - if (IS_ERR(status)) + if (!status) goto out_free_resp; iwl_mvm_convert_key_counters(status, &v7->gtk[0].rsc.all_tsc_rsc); iwl_mvm_convert_gtk_v2(status, &v7->gtk[0]); iwl_mvm_convert_igtk(status, &v7->igtk[0]); } else if (notif_ver == 9 || notif_ver == 10 || notif_ver == 11) { struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data; /* these three command versions have same layout and size, the * difference is only in a few not used (reserved) fields. */ status = iwl_mvm_parse_wowlan_status_common_v9(mvm, v9, len); - if (IS_ERR(status)) + if (!status) goto out_free_resp; iwl_mvm_convert_key_counters(status, &v9->gtk[0].rsc.all_tsc_rsc); iwl_mvm_convert_gtk_v2(status, &v9->gtk[0]); iwl_mvm_convert_igtk(status, &v9->igtk[0]); status->tid_tear_down = v9->tid_tear_down; } else if (notif_ver == 12) { struct iwl_wowlan_status_v12 *v12 = (void *)cmd.resp_pkt->data; status = iwl_mvm_parse_wowlan_status_common_v12(mvm, v12, len); - if (IS_ERR(status)) + if (!status) goto out_free_resp; iwl_mvm_convert_key_counters_v5(status, &v12->gtk[0].sc); - iwl_mvm_convert_gtk_v3(status, &v12->gtk[0]); + iwl_mvm_convert_gtk_v3(status, v12->gtk); iwl_mvm_convert_igtk(status, &v12->igtk[0]); status->tid_tear_down = v12->tid_tear_down; } else { IWL_ERR(mvm, "Firmware advertises unknown WoWLAN status response %d!\n", notif_ver); - status = ERR_PTR(-EIO); + status = NULL; } out_free_resp: iwl_free_resp(&cmd); return status; } -static struct iwl_wowlan_status_data * -iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id) -{ - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, OFFLOADS_QUERY_CMD, - IWL_FW_CMD_VER_UNKNOWN); - __le32 station_id = cpu_to_le32(sta_id); - u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0; - - if (!mvm->net_detect) { - /* only for tracing for now */ - int ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, - cmd_size, &station_id); - if (ret) - IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); - } - - return iwl_mvm_send_wowlan_get_status(mvm, sta_id); -} - /* releases the MVM mutex */ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + struct iwl_wowlan_status_data *status) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_wowlan_status_data *status; int i; bool keep; struct iwl_mvm_sta *mvm_ap_sta; - status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id); - if (IS_ERR(status)) + if (!status) goto out_unlock; IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n", status->wakeup_reasons); /* still at hard-coded place 0 for D3 image */ mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0); if (!mvm_ap_sta) - goto out_free; + goto out_unlock; for (i = 0; i < IWL_MAX_TID_COUNT; i++) { u16 seq = status->qos_seq_ctr[i]; /* firmware stores last-used value, we store next value */ seq += 0x10; mvm_ap_sta->tid_data[i].seq_number = seq; } if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { i = mvm->offload_tid; iwl_trans_set_q_ptrs(mvm->trans, mvm_ap_sta->tid_data[i].txq_id, mvm_ap_sta->tid_data[i].seq_number >> 4); } /* now we have all the data we need, unlock to avoid mac80211 issues */ mutex_unlock(&mvm->mutex); iwl_mvm_report_wakeup_reasons(mvm, vif, status); keep = iwl_mvm_setup_connection_keep(mvm, vif, status); - kfree(status); return keep; -out_free: - kfree(status); out_unlock: mutex_unlock(&mvm->mutex); return false; } #define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \ IWL_SCAN_MAX_PROFILES) -struct iwl_mvm_nd_query_results { +struct iwl_mvm_nd_results { u32 matched_profiles; u8 matches[ND_QUERY_BUF_LEN]; }; static int iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, - struct iwl_mvm_nd_query_results *results) + struct iwl_mvm_nd_results *results) { - struct iwl_scan_offload_profiles_query *query; + struct iwl_scan_offload_match_info *query; struct iwl_host_cmd cmd = { .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD, .flags = CMD_WANT_SKB, }; int ret, len; size_t query_len, matches_len; int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw); ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret); return ret; } if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { - query_len = sizeof(struct iwl_scan_offload_profiles_query); + query_len = sizeof(struct iwl_scan_offload_match_info); matches_len = sizeof(struct iwl_scan_offload_profile_match) * max_profiles; } else { query_len = sizeof(struct iwl_scan_offload_profiles_query_v1); matches_len = sizeof(struct iwl_scan_offload_profile_match_v1) * max_profiles; } len = iwl_rx_packet_payload_len(cmd.resp_pkt); if (len < query_len) { IWL_ERR(mvm, "Invalid scan offload profiles query response!\n"); ret = -EIO; goto out_free_resp; } query = (void *)cmd.resp_pkt->data; results->matched_profiles = le32_to_cpu(query->matched_profiles); memcpy(results->matches, query->matches, matches_len); #ifdef CONFIG_IWLWIFI_DEBUGFS mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done); #endif out_free_resp: iwl_free_resp(&cmd); return ret; } static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm, - struct iwl_mvm_nd_query_results *query, + struct iwl_mvm_nd_results *results, int idx) { int n_chans = 0, i; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { struct iwl_scan_offload_profile_match *matches = - (struct iwl_scan_offload_profile_match *)query->matches; + (void *)results->matches; for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++) n_chans += hweight8(matches[idx].matching_channels[i]); } else { struct iwl_scan_offload_profile_match_v1 *matches = - (struct iwl_scan_offload_profile_match_v1 *)query->matches; + (void *)results->matches; for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++) n_chans += hweight8(matches[idx].matching_channels[i]); } return n_chans; } static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm, - struct iwl_mvm_nd_query_results *query, + struct iwl_mvm_nd_results *results, struct cfg80211_wowlan_nd_match *match, int idx) { int i; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { struct iwl_scan_offload_profile_match *matches = - (struct iwl_scan_offload_profile_match *)query->matches; + (void *)results->matches; for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++) if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) match->channels[match->n_channels++] = mvm->nd_channels[i]->center_freq; } else { struct iwl_scan_offload_profile_match_v1 *matches = - (struct iwl_scan_offload_profile_match_v1 *)query->matches; + (void *)results->matches; for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++) if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) match->channels[match->n_channels++] = mvm->nd_channels[i]->center_freq; } } +/** + * enum iwl_d3_notif - d3 notifications + * @IWL_D3_NOTIF_WOWLAN_INFO: WOWLAN_INFO_NOTIF was received + * @IWL_D3_NOTIF_WOWLAN_WAKE_PKT: WOWLAN_WAKE_PKT_NOTIF was received + * @IWL_D3_NOTIF_PROT_OFFLOAD: PROT_OFFLOAD_NOTIF was received + * @IWL_D3_ND_MATCH_INFO: OFFLOAD_MATCH_INFO_NOTIF was received + * @IWL_D3_NOTIF_D3_END_NOTIF: D3_END_NOTIF was received + */ +enum iwl_d3_notif { + IWL_D3_NOTIF_WOWLAN_INFO = BIT(0), + IWL_D3_NOTIF_WOWLAN_WAKE_PKT = BIT(1), + IWL_D3_NOTIF_PROT_OFFLOAD = BIT(2), + IWL_D3_ND_MATCH_INFO = BIT(3), + IWL_D3_NOTIF_D3_END_NOTIF = BIT(4) +}; + +/* manage d3 resume data */ +struct iwl_d3_data { + struct iwl_wowlan_status_data *status; + bool test; + u32 d3_end_flags; + u32 notif_expected; /* bitmap - see &enum iwl_d3_notif */ + u32 notif_received; /* bitmap - see &enum iwl_d3_notif */ + struct iwl_mvm_nd_results *nd_results; + bool nd_results_valid; +}; + static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + struct iwl_d3_data *d3_data) { struct cfg80211_wowlan_nd_info *net_detect = NULL; struct cfg80211_wowlan_wakeup wakeup = { .pattern_idx = -1, }; struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; - struct iwl_wowlan_status_data *status; - struct iwl_mvm_nd_query_results query; unsigned long matched_profiles; u32 reasons = 0; int i, n_matches, ret; - status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA); - if (!IS_ERR(status)) { - reasons = status->wakeup_reasons; - kfree(status); - } + if (WARN_ON(!d3_data || !d3_data->status)) + goto out; + + reasons = d3_data->status->wakeup_reasons; if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) wakeup.rfkill_release = true; if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) goto out; - ret = iwl_mvm_netdetect_query_results(mvm, &query); - if (ret || !query.matched_profiles) { + if (!iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, + WOWLAN_INFO_NOTIFICATION, 0)) { + IWL_INFO(mvm, "Query FW for ND results\n"); + ret = iwl_mvm_netdetect_query_results(mvm, d3_data->nd_results); + + } else { + IWL_INFO(mvm, "Notification based ND results\n"); + ret = d3_data->nd_results_valid ? 0 : -1; + } + + if (ret || !d3_data->nd_results->matched_profiles) { wakeup_report = NULL; goto out; } - matched_profiles = query.matched_profiles; + matched_profiles = d3_data->nd_results->matched_profiles; if (mvm->n_nd_match_sets) { n_matches = hweight_long(matched_profiles); } else { IWL_ERR(mvm, "no net detect match information available\n"); n_matches = 0; } net_detect = kzalloc(struct_size(net_detect, matches, n_matches), GFP_KERNEL); if (!net_detect || !n_matches) goto out_report_nd; for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) { struct cfg80211_wowlan_nd_match *match; int idx, n_channels = 0; - n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i); + n_channels = iwl_mvm_query_num_match_chans(mvm, + d3_data->nd_results, + i); match = kzalloc(struct_size(match, channels, n_channels), GFP_KERNEL); if (!match) goto out_report_nd; net_detect->matches[net_detect->n_matches++] = match; /* We inverted the order of the SSIDs in the scan * request, so invert the index here. */ idx = mvm->n_nd_match_sets - i - 1; match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len; memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid, match->ssid.ssid_len); if (mvm->n_nd_channels < n_channels) continue; - iwl_mvm_query_set_freqs(mvm, &query, match, i); + iwl_mvm_query_set_freqs(mvm, d3_data->nd_results, match, i); } out_report_nd: wakeup.net_detect = net_detect; out: iwl_mvm_free_nd(mvm); mutex_unlock(&mvm->mutex); ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); if (net_detect) { for (i = 0; i < net_detect->n_matches; i++) kfree(net_detect->matches[i]); kfree(net_detect); } } static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { /* skip the one we keep connection on */ if (data == vif) return; if (vif->type == NL80211_IFTYPE_STATION) ieee80211_resume_disconnect(vif); } static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id) { struct error_table_start { /* cf. struct iwl_error_event_table */ u32 valid; __le32 err_id; } err_info; if (!base) return false; iwl_trans_read_mem_bytes(trans, base, &err_info, sizeof(err_info)); if (err_info.valid && err_id) *err_id = le32_to_cpu(err_info.err_id); return !!err_info.valid; } static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { u32 err_id; /* check for lmac1 error */ if (iwl_mvm_rt_status(mvm->trans, mvm->trans->dbg.lmac_error_event_table[0], &err_id)) { if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) { struct cfg80211_wowlan_wakeup wakeup = { .rfkill_release = true, }; ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL); } return true; } /* check if we have lmac2 set and check for error */ if (iwl_mvm_rt_status(mvm->trans, mvm->trans->dbg.lmac_error_event_table[1], NULL)) return true; /* check for umac error */ if (iwl_mvm_rt_status(mvm->trans, mvm->trans->dbg.umac_error_event_table, NULL)) return true; return false; } +/* + * This function assumes: + * 1. The mutex is already held. + * 2. The callee functions unlock the mutex. + */ +static bool +iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_d3_data *d3_data) +{ + lockdep_assert_held(&mvm->mutex); + + /* if FW uses status notification, status shouldn't be NULL here */ + if (!d3_data->status) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u8 sta_id = mvm->net_detect ? IWL_MVM_INVALID_STA : + mvmvif->deflink.ap_sta_id; + + d3_data->status = iwl_mvm_send_wowlan_get_status(mvm, sta_id); + } + + if (mvm->net_detect) { + iwl_mvm_query_netdetect_reasons(mvm, vif, d3_data); + } else { + bool keep = iwl_mvm_query_wakeup_reasons(mvm, vif, + d3_data->status); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (keep) + mvm->keep_vif = vif; +#endif + + return keep; + } + return false; +} + +#define IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT (IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET | \ + IWL_WOWLAN_WAKEUP_BY_PATTERN | \ + IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN |\ + IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD |\ + IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN |\ + IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD) + +static int iwl_mvm_wowlan_store_wake_pkt(struct iwl_mvm *mvm, + struct iwl_wowlan_wake_pkt_notif *notif, + struct iwl_wowlan_status_data *status, + u32 len) +{ + u32 data_size, packet_len = le32_to_cpu(notif->wake_packet_length); + + if (len < sizeof(*notif)) { + IWL_ERR(mvm, "Invalid WoWLAN wake packet notification!\n"); + return -EIO; + } + + if (WARN_ON(!status)) { + IWL_ERR(mvm, "Got wake packet notification but wowlan status data is NULL\n"); + return -EIO; + } + + if (WARN_ON(!(status->wakeup_reasons & + IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT))) { + IWL_ERR(mvm, "Got wakeup packet but wakeup reason is %x\n", + status->wakeup_reasons); + return -EIO; + } + + data_size = len - offsetof(struct iwl_wowlan_wake_pkt_notif, wake_packet); + + /* data_size got the padding from the notification, remove it. */ + if (packet_len < data_size) + data_size = packet_len; + + status->wake_packet = kmemdup(notif->wake_packet, data_size, + GFP_ATOMIC); + + if (!status->wake_packet) + return -ENOMEM; + + status->wake_packet_length = packet_len; + status->wake_packet_bufsize = data_size; + + return 0; +} + +static void iwl_mvm_nd_match_info_handler(struct iwl_mvm *mvm, + struct iwl_d3_data *d3_data, + struct iwl_scan_offload_match_info *notif, + u32 len) +{ + struct iwl_wowlan_status_data *status = d3_data->status; + struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm); + struct iwl_mvm_nd_results *results = d3_data->nd_results; + size_t i, matches_len = sizeof(struct iwl_scan_offload_profile_match) * + iwl_umac_scan_get_max_profiles(mvm->fw); + + if (IS_ERR_OR_NULL(vif)) + return; + + if (len < sizeof(struct iwl_scan_offload_match_info)) { + IWL_ERR(mvm, "Invalid scan match info notification\n"); + return; + } + + if (!mvm->net_detect) { + IWL_ERR(mvm, "Unexpected scan match info notification\n"); + return; + } + + if (!status || status->wakeup_reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { + IWL_ERR(mvm, + "Ignore scan match info notification: no reason\n"); + return; + } + +#ifdef CONFIG_IWLWIFI_DEBUGFS + mvm->last_netdetect_scans = le32_to_cpu(notif->n_scans_done); +#endif + + results->matched_profiles = le32_to_cpu(notif->matched_profiles); + IWL_INFO(mvm, "number of matched profiles=%u\n", + results->matched_profiles); + + if (results->matched_profiles) { + memcpy(results->matches, notif->matches, matches_len); + d3_data->nd_results_valid = TRUE; + } + + /* no scan should be active at this point */ + mvm->scan_status = 0; + for (i = 0; i < mvm->max_scans; i++) + mvm->scan_uid_status[i] = 0; +} + +static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + struct iwl_d3_data *d3_data = data; + u32 len; + int ret; + int wowlan_info_ver = iwl_fw_lookup_notif_ver(mvm->fw, + PROT_OFFLOAD_GROUP, + WOWLAN_INFO_NOTIFICATION, + IWL_FW_CMD_VER_UNKNOWN); + + + switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) { + case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION): { + struct iwl_wowlan_info_notif *notif; + + if (d3_data->notif_received & IWL_D3_NOTIF_WOWLAN_INFO) { + /* We might get two notifications due to dual bss */ + IWL_DEBUG_WOWLAN(mvm, + "Got additional wowlan info notification\n"); + break; + } + + if (wowlan_info_ver < 2) { + struct iwl_wowlan_info_notif_v1 *notif_v1 = (void *)pkt->data; + + notif = kmemdup(notif_v1, sizeof(*notif), GFP_ATOMIC); + if (!notif) + return false; + + notif->tid_tear_down = notif_v1->tid_tear_down; + notif->station_id = notif_v1->station_id; + memset_after(notif, 0, station_id); + } else { + notif = (void *)pkt->data; + } + + d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO; + len = iwl_rx_packet_payload_len(pkt); + iwl_mvm_parse_wowlan_info_notif(mvm, notif, d3_data->status, + len); + + if (wowlan_info_ver < 2) + kfree(notif); + + if (d3_data->status && + d3_data->status->wakeup_reasons & IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT) + /* We are supposed to get also wake packet notif */ + d3_data->notif_expected |= IWL_D3_NOTIF_WOWLAN_WAKE_PKT; + + break; + } + case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION): { + struct iwl_wowlan_wake_pkt_notif *notif = (void *)pkt->data; + + if (d3_data->notif_received & IWL_D3_NOTIF_WOWLAN_WAKE_PKT) { + /* We shouldn't get two wake packet notifications */ + IWL_ERR(mvm, + "Got additional wowlan wake packet notification\n"); + } else { + d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_WAKE_PKT; + len = iwl_rx_packet_payload_len(pkt); + ret = iwl_mvm_wowlan_store_wake_pkt(mvm, notif, + d3_data->status, + len); + if (ret) + IWL_ERR(mvm, + "Can't parse WOWLAN_WAKE_PKT_NOTIFICATION\n"); + } + + break; + } + case WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF): { + struct iwl_scan_offload_match_info *notif = (void *)pkt->data; + + if (d3_data->notif_received & IWL_D3_ND_MATCH_INFO) { + IWL_ERR(mvm, + "Got additional netdetect match info\n"); + break; + } + + d3_data->notif_received |= IWL_D3_ND_MATCH_INFO; + + /* explicitly set this in the 'expected' as well */ + d3_data->notif_expected |= IWL_D3_ND_MATCH_INFO; + + len = iwl_rx_packet_payload_len(pkt); + iwl_mvm_nd_match_info_handler(mvm, d3_data, notif, len); + break; + } + case WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION): { + struct iwl_mvm_d3_end_notif *notif = (void *)pkt->data; + + d3_data->d3_end_flags = __le32_to_cpu(notif->flags); + d3_data->notif_received |= IWL_D3_NOTIF_D3_END_NOTIF; + + break; + } + default: + WARN_ON(1); + } + + return d3_data->notif_received == d3_data->notif_expected; +} + +static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test) +{ + int ret; + enum iwl_d3_status d3_status; + struct iwl_host_cmd cmd = { + .id = D0I3_END_CMD, + .flags = CMD_WANT_SKB | CMD_SEND_IN_D3, + }; + bool reset = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); + + ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !reset); + if (ret) + return ret; + + if (d3_status != IWL_D3_STATUS_ALIVE) { + IWL_INFO(mvm, "Device was reset during suspend\n"); + return -ENOENT; + } + + /* + * We should trigger resume flow using command only for 22000 family + * AX210 and above don't need the command since they have + * the doorbell interrupt. + */ + if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_22000 && + fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST)) { + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret < 0) + IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n", + ret); + } + + return ret; +} + +#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 5) + +static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm, + struct iwl_d3_data *d3_data) +{ + static const u16 d3_resume_notif[] = { + WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION), + WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION), + WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF), + WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION) + }; + struct iwl_notification_wait wait_d3_notif; + int ret; + + iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif, + d3_resume_notif, ARRAY_SIZE(d3_resume_notif), + iwl_mvm_wait_d3_notif, d3_data); + + ret = iwl_mvm_resume_firmware(mvm, d3_data->test); + if (ret) { + iwl_remove_notification(&mvm->notif_wait, &wait_d3_notif); + return ret; + } + + return iwl_wait_notification(&mvm->notif_wait, &wait_d3_notif, + IWL_MVM_D3_NOTIF_TIMEOUT); +} + +static inline bool iwl_mvm_d3_resume_notif_based(struct iwl_mvm *mvm) +{ + return iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, + WOWLAN_INFO_NOTIFICATION, 0) && + iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, + WOWLAN_WAKE_PKT_NOTIFICATION, 0) && + iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, + D3_END_NOTIFICATION, 0); +} + static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) { struct ieee80211_vif *vif = NULL; int ret = 1; - enum iwl_d3_status d3_status; - bool keep = false; + struct iwl_mvm_nd_results results = {}; + struct iwl_d3_data d3_data = { + .test = test, + .notif_expected = + IWL_D3_NOTIF_WOWLAN_INFO | + IWL_D3_NOTIF_D3_END_NOTIF, + .nd_results_valid = false, + .nd_results = &results, + }; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST); + bool resume_notif_based = iwl_mvm_d3_resume_notif_based(mvm); + bool keep = false; mutex_lock(&mvm->mutex); mvm->last_reset_or_resume_time_jiffies = jiffies; /* get the BSS vif pointer again */ vif = iwl_mvm_get_bss_vif(mvm); if (IS_ERR_OR_NULL(vif)) goto err; iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); if (iwl_mvm_check_rt_status(mvm, vif)) { set_bit(STATUS_FW_ERROR, &mvm->trans->status); iwl_mvm_dump_nic_error_log(mvm); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL); iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, false, 0); ret = 1; goto err; } - ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); - if (ret) - goto err; - - if (d3_status != IWL_D3_STATUS_ALIVE) { - IWL_INFO(mvm, "Device was reset during suspend\n"); - goto err; - } - - if (d0i3_first) { - struct iwl_host_cmd cmd = { - .id = D0I3_END_CMD, - .flags = CMD_WANT_SKB | CMD_SEND_IN_D3, - }; - int len; - - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret < 0) { - IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n", - ret); + if (resume_notif_based) { + d3_data.status = kzalloc(sizeof(*d3_data.status), GFP_KERNEL); + if (!d3_data.status) { + IWL_ERR(mvm, "Failed to allocate wowlan status\n"); + ret = -ENOMEM; goto err; } - switch (mvm->cmd_ver.d0i3_resp) { - case 0: - break; - case 1: - len = iwl_rx_packet_payload_len(cmd.resp_pkt); - if (len != sizeof(u32)) { - IWL_ERR(mvm, - "Error with D0I3_END_CMD response size (%d)\n", - len); - goto err; - } - if (IWL_D0I3_RESET_REQUIRE & - le32_to_cpu(*(__le32 *)cmd.resp_pkt->data)) { - iwl_write32(mvm->trans, CSR_RESET, - CSR_RESET_REG_FLAG_FORCE_NMI); - iwl_free_resp(&cmd); - } - break; - default: - WARN_ON(1); - } + + ret = iwl_mvm_d3_notif_wait(mvm, &d3_data); + if (ret) + goto err; + } else { + ret = iwl_mvm_resume_firmware(mvm, test); + if (ret < 0) + goto err; } /* after the successful handshake, we're out of D3 */ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; + /* when reset is required we can't send these following commands */ + if (d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE) + goto query_wakeup_reasons; + /* * Query the current location and source from the D3 firmware so we * can play it back when we re-intiailize the D0 firmware */ iwl_mvm_update_changed_regdom(mvm); /* Re-configure PPAG settings */ iwl_mvm_ppag_send_cmd(mvm); if (!unified_image) /* Re-configure default SAR profile */ iwl_mvm_sar_select_profile(mvm, 1, 1); - if (mvm->net_detect) { + if (mvm->net_detect && unified_image) { /* If this is a non-unified image, we restart the FW, * so no need to stop the netdetect scan. If that * fails, continue and try to get the wake-up reasons, * but trigger a HW restart by keeping a failure code * in ret. */ - if (unified_image) - ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, - false); - - iwl_mvm_query_netdetect_reasons(mvm, vif); - /* has unlocked the mutex, so skip that */ - goto out; - } else { - keep = iwl_mvm_query_wakeup_reasons(mvm, vif); -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (keep) - mvm->keep_vif = vif; -#endif - /* has unlocked the mutex, so skip that */ - goto out_iterate; + ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, + false); } +query_wakeup_reasons: + keep = iwl_mvm_choose_query_wakeup_reasons(mvm, vif, &d3_data); + /* has unlocked the mutex, so skip that */ + goto out; + err: - iwl_mvm_free_nd(mvm); mutex_unlock(&mvm->mutex); +out: + if (d3_data.status) + kfree(d3_data.status->wake_packet); + kfree(d3_data.status); + iwl_mvm_free_nd(mvm); -out_iterate: - if (!test) + if (!d3_data.test && !mvm->net_detect) ieee80211_iterate_active_interfaces_mtx(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_d3_disconnect_iter, + keep ? vif : NULL); -out: clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); /* no need to reset the device in unified images, if successful */ if (unified_image && !ret) { /* nothing else to do if we already sent D0I3_END_CMD */ if (d0i3_first) return 0; - ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); - if (!ret) + if (!iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, + D3_END_NOTIFICATION, 0)) { + ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); + if (!ret) + return 0; + } else if (!(d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE)) { return 0; + } } /* * Reconfigure the device in one of the following cases: * 1. We are not using a unified image * 2. We are using a unified image but had an error while exiting D3 */ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); /* regardless of what happened, we're now out of D3 */ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; return 1; } int iwl_mvm_resume(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; ret = __iwl_mvm_resume(mvm, false); iwl_mvm_resume_tcm(mvm); iwl_fw_runtime_resume(&mvm->fwrt); return ret; } void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); device_set_wakeup_enable(mvm->trans->dev, enabled); } #ifdef CONFIG_IWLWIFI_DEBUGFS static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) { struct iwl_mvm *mvm = inode->i_private; int err; if (mvm->d3_test_active) return -EBUSY; file->private_data = inode->i_private; iwl_mvm_pause_tcm(mvm, true); iwl_fw_runtime_suspend(&mvm->fwrt); /* start pseudo D3 */ rtnl_lock(); wiphy_lock(mvm->hw->wiphy); err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); wiphy_unlock(mvm->hw->wiphy); rtnl_unlock(); if (err > 0) err = -EINVAL; if (err) return err; mvm->d3_test_active = true; mvm->keep_vif = NULL; return 0; } static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; u32 pme_asserted; while (true) { /* read pme_ptr if available */ if (mvm->d3_test_pme_ptr) { pme_asserted = iwl_trans_read_mem32(mvm->trans, mvm->d3_test_pme_ptr); if (pme_asserted) break; } if (msleep_interruptible(100)) break; } return 0; } static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { /* skip the one we keep connection on */ if (_data == vif) return; if (vif->type == NL80211_IFTYPE_STATION) ieee80211_connection_loss(vif); } static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) { struct iwl_mvm *mvm = inode->i_private; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); mvm->d3_test_active = false; iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); rtnl_lock(); wiphy_lock(mvm->hw->wiphy); __iwl_mvm_resume(mvm, true); wiphy_unlock(mvm->hw->wiphy); rtnl_unlock(); iwl_mvm_resume_tcm(mvm); iwl_fw_runtime_resume(&mvm->fwrt); iwl_abort_notification_waits(&mvm->notif_wait); if (!unified_image) { int remaining_time = 10; ieee80211_restart_hw(mvm->hw); /* wait for restart and disconnect all interfaces */ while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && remaining_time > 0) { remaining_time--; msleep(1000); } if (remaining_time == 0) IWL_ERR(mvm, "Timed out waiting for HW restart!\n"); } ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif); return 0; } const struct file_operations iwl_dbgfs_d3_test_ops = { .llseek = no_llseek, .open = iwl_mvm_d3_test_open, .read = iwl_mvm_d3_test_read, .release = iwl_mvm_d3_test_release, }; #endif diff --git a/sys/contrib/dev/iwlwifi/mvm/debugfs-vif.c b/sys/contrib/dev/iwlwifi/mvm/debugfs-vif.c index efc3b1aa7792..e03951c843a8 100644 --- a/sys/contrib/dev/iwlwifi/mvm/debugfs-vif.c +++ b/sys/contrib/dev/iwlwifi/mvm/debugfs-vif.c @@ -1,785 +1,776 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include "mvm.h" #include "debugfs.h" #if defined(__FreeBSD__) #include #endif static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_dbgfs_pm_mask param, int val) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm; dbgfs_pm->mask |= param; switch (param) { case MVM_DEBUGFS_PM_KEEP_ALIVE: { int dtimper = vif->bss_conf.dtim_period ?: 1; int dtimper_msec = dtimper * vif->bss_conf.beacon_int; IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val); if (val * MSEC_PER_SEC < 3 * dtimper_msec) IWL_WARN(mvm, "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n", val * MSEC_PER_SEC, 3 * dtimper_msec); dbgfs_pm->keep_alive_seconds = val; break; } case MVM_DEBUGFS_PM_SKIP_OVER_DTIM: IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n", val ? "enabled" : "disabled"); dbgfs_pm->skip_over_dtim = val; break; case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS: IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val); dbgfs_pm->skip_dtim_periods = val; break; case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT: IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val); dbgfs_pm->rx_data_timeout = val; break; case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT: IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val); dbgfs_pm->tx_data_timeout = val; break; case MVM_DEBUGFS_PM_LPRX_ENA: IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled"); dbgfs_pm->lprx_ena = val; break; case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD: IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val); dbgfs_pm->lprx_rssi_threshold = val; break; case MVM_DEBUGFS_PM_SNOOZE_ENABLE: IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val); dbgfs_pm->snooze_ena = val; break; case MVM_DEBUGFS_PM_UAPSD_MISBEHAVING: IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val); dbgfs_pm->uapsd_misbehaving = val; break; case MVM_DEBUGFS_PM_USE_PS_POLL: IWL_DEBUG_POWER(mvm, "use_ps_poll=%d\n", val); dbgfs_pm->use_ps_poll = val; break; } } static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; enum iwl_dbgfs_pm_mask param; int val, ret; if (!strncmp("keep_alive=", buf, 11)) { if (sscanf(buf + 11, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_KEEP_ALIVE; } else if (!strncmp("skip_over_dtim=", buf, 15)) { if (sscanf(buf + 15, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM; } else if (!strncmp("skip_dtim_periods=", buf, 18)) { if (sscanf(buf + 18, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS; } else if (!strncmp("rx_data_timeout=", buf, 16)) { if (sscanf(buf + 16, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT; } else if (!strncmp("tx_data_timeout=", buf, 16)) { if (sscanf(buf + 16, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT; } else if (!strncmp("lprx=", buf, 5)) { if (sscanf(buf + 5, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_LPRX_ENA; } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) { if (sscanf(buf + 20, "%d", &val) != 1) return -EINVAL; if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val < POWER_LPRX_RSSI_THRESHOLD_MIN) return -EINVAL; param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD; } else if (!strncmp("snooze_enable=", buf, 14)) { if (sscanf(buf + 14, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_SNOOZE_ENABLE; } else if (!strncmp("uapsd_misbehaving=", buf, 18)) { if (sscanf(buf + 18, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING; } else if (!strncmp("use_ps_poll=", buf, 12)) { if (sscanf(buf + 12, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_USE_PS_POLL; } else { return -EINVAL; } mutex_lock(&mvm->mutex); iwl_dbgfs_update_pm(mvm, vif, param, val); ret = iwl_mvm_power_update_mac(mvm); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; char buf[64]; int bufsz = sizeof(buf); int pos; pos = scnprintf(buf, bufsz, "bss limit = %d\n", vif->bss_conf.txpower); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_pm_params_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; char buf[512]; int bufsz = sizeof(buf); int pos; pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_mac_params_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; u8 ap_sta_id; struct ieee80211_chanctx_conf *chanctx_conf; char buf[512]; int bufsz = sizeof(buf); int pos = 0; int i; mutex_lock(&mvm->mutex); - ap_sta_id = mvmvif->ap_sta_id; + ap_sta_id = mvmvif->deflink.ap_sta_id; switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_ADHOC: pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n"); break; case NL80211_IFTYPE_STATION: pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n"); break; case NL80211_IFTYPE_AP: pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n"); break; case NL80211_IFTYPE_P2P_CLIENT: pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n"); break; case NL80211_IFTYPE_P2P_GO: pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n"); break; case NL80211_IFTYPE_P2P_DEVICE: pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n"); break; default: break; } pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n", mvmvif->id, mvmvif->color); pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n", vif->bss_conf.bssid); pos += scnprintf(buf+pos, bufsz-pos, "Load: %d\n", mvm->tcm.result.load[mvmvif->id]); pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n"); - for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++) + for (i = 0; i < ARRAY_SIZE(mvmvif->deflink.queue_params); i++) pos += scnprintf(buf+pos, bufsz-pos, "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n", - i, mvmvif->queue_params[i].txop, - mvmvif->queue_params[i].cw_min, - mvmvif->queue_params[i].cw_max, - mvmvif->queue_params[i].aifs, - mvmvif->queue_params[i].uapsd); + i, mvmvif->deflink.queue_params[i].txop, + mvmvif->deflink.queue_params[i].cw_min, + mvmvif->deflink.queue_params[i].cw_max, + mvmvif->deflink.queue_params[i].aifs, + mvmvif->deflink.queue_params[i].uapsd); if (vif->type == NL80211_IFTYPE_STATION && ap_sta_id != IWL_MVM_INVALID_STA) { struct iwl_mvm_sta *mvm_sta; mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id); if (mvm_sta) { pos += scnprintf(buf+pos, bufsz-pos, "ap_sta_id %d - reduced Tx power %d\n", ap_sta_id, mvm_sta->bt_reduced_txpower); } } rcu_read_lock(); chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf); if (chanctx_conf) pos += scnprintf(buf+pos, bufsz-pos, "idle rx chains %d, active rx chains: %d\n", chanctx_conf->rx_chains_static, chanctx_conf->rx_chains_dynamic); rcu_read_unlock(); mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif, enum iwl_dbgfs_bf_mask param, int value) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf; dbgfs_bf->mask |= param; switch (param) { case MVM_DEBUGFS_BF_ENERGY_DELTA: dbgfs_bf->bf_energy_delta = value; break; case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA: dbgfs_bf->bf_roaming_energy_delta = value; break; case MVM_DEBUGFS_BF_ROAMING_STATE: dbgfs_bf->bf_roaming_state = value; break; case MVM_DEBUGFS_BF_TEMP_THRESHOLD: dbgfs_bf->bf_temp_threshold = value; break; case MVM_DEBUGFS_BF_TEMP_FAST_FILTER: dbgfs_bf->bf_temp_fast_filter = value; break; case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER: dbgfs_bf->bf_temp_slow_filter = value; break; case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER: dbgfs_bf->bf_enable_beacon_filter = value; break; case MVM_DEBUGFS_BF_DEBUG_FLAG: dbgfs_bf->bf_debug_flag = value; break; case MVM_DEBUGFS_BF_ESCAPE_TIMER: dbgfs_bf->bf_escape_timer = value; break; case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT: dbgfs_bf->ba_enable_beacon_abort = value; break; case MVM_DEBUGFS_BA_ESCAPE_TIMER: dbgfs_bf->ba_escape_timer = value; break; } } static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; enum iwl_dbgfs_bf_mask param; int value, ret = 0; if (!strncmp("bf_energy_delta=", buf, 16)) { if (sscanf(buf+16, "%d", &value) != 1) return -EINVAL; if (value < IWL_BF_ENERGY_DELTA_MIN || value > IWL_BF_ENERGY_DELTA_MAX) return -EINVAL; param = MVM_DEBUGFS_BF_ENERGY_DELTA; } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) { if (sscanf(buf+24, "%d", &value) != 1) return -EINVAL; if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN || value > IWL_BF_ROAMING_ENERGY_DELTA_MAX) return -EINVAL; param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA; } else if (!strncmp("bf_roaming_state=", buf, 17)) { if (sscanf(buf+17, "%d", &value) != 1) return -EINVAL; if (value < IWL_BF_ROAMING_STATE_MIN || value > IWL_BF_ROAMING_STATE_MAX) return -EINVAL; param = MVM_DEBUGFS_BF_ROAMING_STATE; } else if (!strncmp("bf_temp_threshold=", buf, 18)) { if (sscanf(buf+18, "%d", &value) != 1) return -EINVAL; if (value < IWL_BF_TEMP_THRESHOLD_MIN || value > IWL_BF_TEMP_THRESHOLD_MAX) return -EINVAL; param = MVM_DEBUGFS_BF_TEMP_THRESHOLD; } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) { if (sscanf(buf+20, "%d", &value) != 1) return -EINVAL; if (value < IWL_BF_TEMP_FAST_FILTER_MIN || value > IWL_BF_TEMP_FAST_FILTER_MAX) return -EINVAL; param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER; } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) { if (sscanf(buf+20, "%d", &value) != 1) return -EINVAL; if (value < IWL_BF_TEMP_SLOW_FILTER_MIN || value > IWL_BF_TEMP_SLOW_FILTER_MAX) return -EINVAL; param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER; } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) { if (sscanf(buf+24, "%d", &value) != 1) return -EINVAL; if (value < 0 || value > 1) return -EINVAL; param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER; } else if (!strncmp("bf_debug_flag=", buf, 14)) { if (sscanf(buf+14, "%d", &value) != 1) return -EINVAL; if (value < 0 || value > 1) return -EINVAL; param = MVM_DEBUGFS_BF_DEBUG_FLAG; } else if (!strncmp("bf_escape_timer=", buf, 16)) { if (sscanf(buf+16, "%d", &value) != 1) return -EINVAL; if (value < IWL_BF_ESCAPE_TIMER_MIN || value > IWL_BF_ESCAPE_TIMER_MAX) return -EINVAL; param = MVM_DEBUGFS_BF_ESCAPE_TIMER; } else if (!strncmp("ba_escape_timer=", buf, 16)) { if (sscanf(buf+16, "%d", &value) != 1) return -EINVAL; if (value < IWL_BA_ESCAPE_TIMER_MIN || value > IWL_BA_ESCAPE_TIMER_MAX) return -EINVAL; param = MVM_DEBUGFS_BA_ESCAPE_TIMER; } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) { if (sscanf(buf+23, "%d", &value) != 1) return -EINVAL; if (value < 0 || value > 1) return -EINVAL; param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT; } else { return -EINVAL; } mutex_lock(&mvm->mutex); iwl_dbgfs_update_bf(vif, param, value); if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); else ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_bf_params_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); char buf[256]; int pos = 0; const size_t bufsz = sizeof(buf); struct iwl_beacon_filter_cmd cmd = { IWL_BF_CMD_CONFIG_DEFAULTS, .bf_enable_beacon_filter = cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT), .ba_enable_beacon_abort = cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT), }; iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); if (mvmvif->bf_data.bf_enabled) cmd.bf_enable_beacon_filter = cpu_to_le32(1); else cmd.bf_enable_beacon_filter = 0; pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n", le32_to_cpu(cmd.bf_energy_delta)); pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n", le32_to_cpu(cmd.bf_roaming_energy_delta)); pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n", le32_to_cpu(cmd.bf_roaming_state)); pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n", le32_to_cpu(cmd.bf_temp_threshold)); pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n", le32_to_cpu(cmd.bf_temp_fast_filter)); pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n", le32_to_cpu(cmd.bf_temp_slow_filter)); pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n", le32_to_cpu(cmd.bf_enable_beacon_filter)); pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n", le32_to_cpu(cmd.bf_debug_flag)); pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n", le32_to_cpu(cmd.bf_escape_timer)); pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n", le32_to_cpu(cmd.ba_escape_timer)); pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n", le32_to_cpu(cmd.ba_enable_beacon_abort)); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } -#if defined(__linux__) -static inline char *iwl_dbgfs_is_match(char *name, char *buf) -{ - int len = strlen(name); - - return !strncmp(name, buf, len) ? buf + len : NULL; -} -#endif - static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; u32 curr_gp2; u64 curr_os; s64 diff; char buf[64]; const size_t bufsz = sizeof(buf); int pos = 0; mutex_lock(&mvm->mutex); iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2, &curr_os, NULL); mutex_unlock(&mvm->mutex); do_div(curr_os, NSEC_PER_USEC); diff = curr_os - curr_gp2; pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; u8 value; int ret; ret = kstrtou8(buf, 0, &value); if (ret) return ret; if (value > 1) return -EINVAL; mutex_lock(&mvm->mutex); iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS); mutex_unlock(&mvm->mutex); return count; } static ssize_t iwl_dbgfs_low_latency_force_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; u8 value; int ret; ret = kstrtou8(buf, 0, &value); if (ret) return ret; if (value > NUM_LOW_LATENCY_FORCE) return -EINVAL; mutex_lock(&mvm->mutex); if (value == LOW_LATENCY_FORCE_UNSET) { iwl_mvm_update_low_latency(mvm, vif, false, LOW_LATENCY_DEBUGFS_FORCE); iwl_mvm_update_low_latency(mvm, vif, false, LOW_LATENCY_DEBUGFS_FORCE_ENABLE); } else { iwl_mvm_update_low_latency(mvm, vif, value == LOW_LATENCY_FORCE_ON, LOW_LATENCY_DEBUGFS_FORCE); iwl_mvm_update_low_latency(mvm, vif, true, LOW_LATENCY_DEBUGFS_FORCE_ENABLE); } mutex_unlock(&mvm->mutex); return count; } static ssize_t iwl_dbgfs_low_latency_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); char format[] = "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n" "dbgfs_force_enable=%d\ndbgfs_force=%d\nactual=%d\n"; /* * all values in format are boolean so the size of format is enough * for holding the result string */ char buf[sizeof(format) + 1] = {}; int len; len = scnprintf(buf, sizeof(buf) - 1, format, !!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC), !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS), !!(mvmvif->low_latency & LOW_LATENCY_VCMD), !!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE), !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE_ENABLE), !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE), !!(mvmvif->low_latency_actual)); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); char buf[20]; int len; #if defined(__linux__) - len = sprintf(buf, "%pM\n", mvmvif->uapsd_misbehaving_bssid); + len = sprintf(buf, "%pM\n", mvmvif->uapsd_misbehaving_ap_addr); #elif defined(__FreeBSD__) - len = sprintf(buf, "%6D\n", mvmvif->uapsd_misbehaving_bssid, ":"); + len = sprintf(buf, "%6D\n", mvmvif->uapsd_misbehaving_ap_addr, ":"); #endif return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; bool ret; mutex_lock(&mvm->mutex); - ret = mac_pton(buf, mvmvif->uapsd_misbehaving_bssid); + ret = mac_pton(buf, mvmvif->uapsd_misbehaving_ap_addr); mutex_unlock(&mvm->mutex); return ret ? count : -EINVAL; } static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; struct ieee80211_chanctx_conf *chanctx_conf; struct iwl_mvm_phy_ctxt *phy_ctxt; u16 value; int ret; ret = kstrtou16(buf, 0, &value); if (ret) return ret; mutex_lock(&mvm->mutex); rcu_read_lock(); chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf); /* make sure the channel context is assigned */ if (!chanctx_conf) { rcu_read_unlock(); mutex_unlock(&mvm->mutex); return -EINVAL; } phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv]; rcu_read_unlock(); mvm->dbgfs_rx_phyinfo = value; ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def, chanctx_conf->rx_chains_static, chanctx_conf->rx_chains_dynamic); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); char buf[8]; int len; len = scnprintf(buf, sizeof(buf), "0x%04x\n", mvmvif->mvm->dbgfs_rx_phyinfo); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static void iwl_dbgfs_quota_check(void *data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int *ret = data; if (mvmvif->dbgfs_quota_min) *ret = -EINVAL; } static ssize_t iwl_dbgfs_quota_min_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; u16 value; int ret; ret = kstrtou16(buf, 0, &value); if (ret) return ret; if (value > 95) return -EINVAL; mutex_lock(&mvm->mutex); mvmvif->dbgfs_quota_min = 0; ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_dbgfs_quota_check, &ret); if (ret == 0) { mvmvif->dbgfs_quota_min = value; iwl_mvm_update_quotas(mvm, false, NULL); } mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_quota_min_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); char buf[10]; int len; len = scnprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) #define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \ debugfs_create_file(#name, mode, parent, vif, \ &iwl_dbgfs_##name##_ops); \ } while (0) MVM_DEBUGFS_READ_FILE_OPS(mac_params); MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt); MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); MVM_DEBUGFS_WRITE_FILE_OPS(low_latency_force, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20); MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff); void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct dentry *dbgfs_dir = vif->debugfs_dir; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); #if defined(__linux__) char buf[100]; #endif /* * Check if debugfs directory already exist before creating it. * This may happen when, for example, resetting hw or suspend-resume */ if (!dbgfs_dir || mvmvif->dbgfs_dir) return; mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir); if (IS_ERR_OR_NULL(mvmvif->dbgfs_dir)) { IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n", dbgfs_dir); return; } if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM && ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) || (vif->type == NL80211_IFTYPE_STATION && vif->p2p))) MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, 0400); MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, 0400); MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(low_latency_force, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, mvmvif->dbgfs_dir, 0400); if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvmvif == mvm->bf_allowed_vif) MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600); #if defined(__linux__) /* * Create symlink for convenience pointing to interface specific * debugfs entries for the driver. For example, under * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/ * find * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/ */ snprintf(buf, 100, "../../../%pd3/%pd", dbgfs_dir, mvmvif->dbgfs_dir); mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name, mvm->debugfs_dir, buf); #endif } void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); debugfs_remove(mvmvif->dbgfs_slink); mvmvif->dbgfs_slink = NULL; debugfs_remove_recursive(mvmvif->dbgfs_dir); mvmvif->dbgfs_dir = NULL; } diff --git a/sys/contrib/dev/iwlwifi/mvm/debugfs.c b/sys/contrib/dev/iwlwifi/mvm/debugfs.c index 360b00a78148..2ba63198c70a 100644 --- a/sys/contrib/dev/iwlwifi/mvm/debugfs.c +++ b/sys/contrib/dev/iwlwifi/mvm/debugfs.c @@ -1,1953 +1,2299 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include +#include #include "mvm.h" #include "sta.h" #include "iwl-io.h" #include "debugfs.h" #include "iwl-modparams.h" +#include "iwl-drv.h" #include "fw/error-dump.h" +#include "fw/api/phy-ctxt.h" static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[16]; int pos, budget; if (!iwl_mvm_is_ctdp_supported(mvm)) return -EOPNOTSUPP; if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; mutex_lock(&mvm->mutex); budget = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_REPORT, 0); mutex_unlock(&mvm->mutex); if (budget < 0) return budget; pos = scnprintf(buf, sizeof(buf), "%d\n", budget); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; if (!iwl_mvm_is_ctdp_supported(mvm)) return -EOPNOTSUPP; if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; mutex_lock(&mvm->mutex); ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_STOP, 0); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_force_ctkill_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; iwl_mvm_enter_ctkill(mvm); return count; } static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; u32 flush_arg; if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; if (kstrtou32(buf, 0, &flush_arg)) return -EINVAL; if (iwl_mvm_has_new_tx_api(mvm)) { IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING all tids queues on sta_id = %d\n", flush_arg); mutex_lock(&mvm->mutex); ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFFFF) ? : count; mutex_unlock(&mvm->mutex); return ret; } IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING queues mask to flush = 0x%x\n", flush_arg); mutex_lock(&mvm->mutex); ret = iwl_mvm_flush_tx_path(mvm, flush_arg) ? : count; mutex_unlock(&mvm->mutex); return ret; } static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_sta *mvmsta; int sta_id, drain, ret; if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) return -EINVAL; if (sta_id < 0 || sta_id >= mvm->fw->ucode_capa.num_stations) return -EINVAL; if (drain < 0 || drain > 1) return -EINVAL; mutex_lock(&mvm->mutex); mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); if (!mvmsta) ret = -ENOENT; else ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count; mutex_unlock(&mvm->mutex); return ret; } static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; const struct fw_img *img; unsigned int ofs, len; size_t ret; u8 *ptr; if (!iwl_mvm_firmware_running(mvm)) return -EINVAL; /* default is to dump the entire data segment */ img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; len = img->sec[IWL_UCODE_SECTION_DATA].len; if (mvm->dbgfs_sram_len) { ofs = mvm->dbgfs_sram_offset; len = mvm->dbgfs_sram_len; } ptr = kzalloc(len, GFP_KERNEL); if (!ptr) return -ENOMEM; iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len); ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len); kfree(ptr); return ret; } static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { const struct fw_img *img; u32 offset, len; u32 img_offset, img_len; if (!iwl_mvm_firmware_running(mvm)) return -EINVAL; img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset; img_len = img->sec[IWL_UCODE_SECTION_DATA].len; if (sscanf(buf, "%x,%x", &offset, &len) == 2) { if ((offset & 0x3) || (len & 0x3)) return -EINVAL; if (offset + len > img_offset + img_len) return -EINVAL; mvm->dbgfs_sram_offset = offset; mvm->dbgfs_sram_len = len; } else { mvm->dbgfs_sram_offset = 0; mvm->dbgfs_sram_len = 0; } return count; } static ssize_t iwl_dbgfs_set_nic_temperature_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[16]; int pos; if (!mvm->temperature_test) - pos = scnprintf(buf , sizeof(buf), "disabled\n"); + pos = scnprintf(buf, sizeof(buf), "disabled\n"); else - pos = scnprintf(buf , sizeof(buf), "%d\n", mvm->temperature); + pos = scnprintf(buf, sizeof(buf), "%d\n", mvm->temperature); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } /* * Set NIC Temperature * Cause the driver to ignore the actual NIC temperature reported by the FW * Enable: any value between IWL_MVM_DEBUG_SET_TEMPERATURE_MIN - * IWL_MVM_DEBUG_SET_TEMPERATURE_MAX * Disable: IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE */ static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int temperature; if (!iwl_mvm_firmware_running(mvm) && !mvm->temperature_test) return -EIO; if (kstrtoint(buf, 10, &temperature)) return -EINVAL; /* not a legal temperature */ if ((temperature > IWL_MVM_DEBUG_SET_TEMPERATURE_MAX && temperature != IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) || temperature < IWL_MVM_DEBUG_SET_TEMPERATURE_MIN) return -EINVAL; mutex_lock(&mvm->mutex); if (temperature == IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) { if (!mvm->temperature_test) goto out; mvm->temperature_test = false; /* Since we can't read the temp while awake, just set * it to zero until we get the next RX stats from the * firmware. */ mvm->temperature = 0; } else { mvm->temperature_test = true; mvm->temperature = temperature; } IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n", - mvm->temperature_test ? "En" : "Dis" , + mvm->temperature_test ? "En" : "Dis", mvm->temperature); /* handle the temperature change */ iwl_mvm_tt_handler(mvm); out: mutex_unlock(&mvm->mutex); return count; } static ssize_t iwl_dbgfs_nic_temp_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[16]; int pos, ret; s32 temp; if (!iwl_mvm_firmware_running(mvm)) return -EIO; mutex_lock(&mvm->mutex); ret = iwl_mvm_get_temp(mvm, &temp); mutex_unlock(&mvm->mutex); if (ret) return -EIO; - pos = scnprintf(buf , sizeof(buf), "%d\n", temp); + pos = scnprintf(buf, sizeof(buf), "%d\n", temp); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } #ifdef CONFIG_ACPI static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[256]; int pos = 0; int bufsz = sizeof(buf); int tbl_idx; if (!iwl_mvm_firmware_running(mvm)) return -EIO; mutex_lock(&mvm->mutex); tbl_idx = iwl_mvm_get_sar_geo_profile(mvm); if (tbl_idx < 0) { mutex_unlock(&mvm->mutex); return tbl_idx; } if (!tbl_idx) { pos = scnprintf(buf, bufsz, "SAR geographic profile disabled\n"); } else { pos += scnprintf(buf + pos, bufsz - pos, "Use geographic profile %d\n", tbl_idx); pos += scnprintf(buf + pos, bufsz - pos, - "2.4GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n", + "2.4GHz:\n\tChain A offset: %u dBm\n\tChain B offset: %u dBm\n\tmax tx power: %u dBm\n", mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[0], mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[1], mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].max); pos += scnprintf(buf + pos, bufsz - pos, - "5.2GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n", + "5.2GHz:\n\tChain A offset: %u dBm\n\tChain B offset: %u dBm\n\tmax tx power: %u dBm\n", mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[0], mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[1], mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].max); } mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } + +static ssize_t iwl_dbgfs_wifi_6e_enable_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + int err, pos; + char buf[12]; + u32 value; + + err = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, + DSM_FUNC_ENABLE_6E, + &iwl_guid, &value); + if (err) + return err; + + pos = sprintf(buf, "0x%08x\n", value); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} #endif static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct ieee80211_sta *sta; char buf[400]; int i, pos = 0, bufsz = sizeof(buf); mutex_lock(&mvm->mutex); for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i); sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (!sta) pos += scnprintf(buf + pos, bufsz - pos, "N/A\n"); else if (IS_ERR(sta)) pos += scnprintf(buf + pos, bufsz - pos, "%ld\n", PTR_ERR(sta)); else pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", sta->addr); } mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } -static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf, +static ssize_t iwl_dbgfs_rs_data_read(struct ieee80211_link_sta *link_sta, + struct iwl_mvm_sta *mvmsta, + struct iwl_mvm *mvm, + struct iwl_mvm_link_sta *mvm_link_sta, + char __user *user_buf, size_t count, loff_t *ppos) { - struct ieee80211_sta *sta = file->private_data; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; - struct iwl_mvm *mvm = lq_sta->pers.drv; + struct iwl_lq_sta_rs_fw *lq_sta = &mvm_link_sta->lq_sta.rs_fw; static const size_t bufsz = 2048; char *buff; int desc = 0; ssize_t ret; buff = kmalloc(bufsz, GFP_KERNEL); if (!buff) return -ENOMEM; - mutex_lock(&mvm->mutex); - desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n", lq_sta->pers.sta_id); desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n", lq_sta->pers.dbg_fixed_rate); desc += scnprintf(buff + desc, bufsz - desc, "A-MPDU size limit %d\n", lq_sta->pers.dbg_agg_frame_count_lim); desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s\n", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : ""); desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X ", lq_sta->last_rate_n_flags); desc += rs_pretty_print_rate(buff + desc, bufsz - desc, lq_sta->last_rate_n_flags); if (desc < bufsz - 1) buff[desc++] = '\n'; - mutex_unlock(&mvm->mutex); ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); kfree(buff); return ret; } -static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta, +static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_link_sta *link_sta, + struct iwl_mvm_sta *mvmsta, + struct iwl_mvm *mvm, + struct iwl_mvm_link_sta *mvm_link_sta, char *buf, size_t count, loff_t *ppos) { - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int i; u16 amsdu_len; if (kstrtou16(buf, 0, &amsdu_len)) return -EINVAL; /* only change from debug set <-> debug unset */ - if (amsdu_len && mvmsta->orig_amsdu_len) + if (amsdu_len && mvm_link_sta->orig_amsdu_len) return -EBUSY; if (amsdu_len) { - mvmsta->orig_amsdu_len = sta->max_amsdu_len; - sta->max_amsdu_len = amsdu_len; - for (i = 0; i < ARRAY_SIZE(sta->max_tid_amsdu_len); i++) - sta->max_tid_amsdu_len[i] = amsdu_len; + mvm_link_sta->orig_amsdu_len = link_sta->agg.max_amsdu_len; + link_sta->agg.max_amsdu_len = amsdu_len; + link_sta->agg.max_amsdu_len = amsdu_len; + for (i = 0; i < ARRAY_SIZE(link_sta->agg.max_tid_amsdu_len); i++) + link_sta->agg.max_tid_amsdu_len[i] = amsdu_len; } else { - sta->max_amsdu_len = mvmsta->orig_amsdu_len; - mvmsta->orig_amsdu_len = 0; + link_sta->agg.max_amsdu_len = mvm_link_sta->orig_amsdu_len; + mvm_link_sta->orig_amsdu_len = 0; } + + ieee80211_sta_recalc_aggregates(link_sta->sta); + return count; } -static ssize_t iwl_dbgfs_amsdu_len_read(struct file *file, +static ssize_t iwl_dbgfs_amsdu_len_read(struct ieee80211_link_sta *link_sta, + struct iwl_mvm_sta *mvmsta, + struct iwl_mvm *mvm, + struct iwl_mvm_link_sta *mvm_link_sta, char __user *user_buf, size_t count, loff_t *ppos) { - struct ieee80211_sta *sta = file->private_data; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - char buf[32]; int pos; - pos = scnprintf(buf, sizeof(buf), "current %d ", sta->max_amsdu_len); + pos = scnprintf(buf, sizeof(buf), "current %d ", + link_sta->agg.max_amsdu_len); pos += scnprintf(buf + pos, sizeof(buf) - pos, "stored %d\n", - mvmsta->orig_amsdu_len); + mvm_link_sta->orig_amsdu_len); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[64]; int bufsz = sizeof(buf); int pos = 0; pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n", mvm->disable_power_off); pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n", mvm->disable_power_off_d3); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret, val; if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (!strncmp("disable_power_off_d0=", buf, 21)) { if (sscanf(buf + 21, "%d", &val) != 1) return -EINVAL; mvm->disable_power_off = val; } else if (!strncmp("disable_power_off_d3=", buf, 21)) { if (sscanf(buf + 21, "%d", &val) != 1) return -EINVAL; mvm->disable_power_off_d3 = val; } else { return -EINVAL; } mutex_lock(&mvm->mutex); ret = iwl_mvm_power_update_device(mvm); mutex_unlock(&mvm->mutex); return ret ?: count; } static int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, int pos, int bufsz) { pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n"); BT_MBOX_PRINT(0, LE_SLAVE_LAT, false); BT_MBOX_PRINT(0, LE_PROF1, false); BT_MBOX_PRINT(0, LE_PROF2, false); BT_MBOX_PRINT(0, LE_PROF_OTHER, false); BT_MBOX_PRINT(0, CHL_SEQ_N, false); BT_MBOX_PRINT(0, INBAND_S, false); BT_MBOX_PRINT(0, LE_MIN_RSSI, false); BT_MBOX_PRINT(0, LE_SCAN, false); BT_MBOX_PRINT(0, LE_ADV, false); BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false); BT_MBOX_PRINT(0, OPEN_CON_1, true); pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n"); BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false); BT_MBOX_PRINT(1, IP_SR, false); BT_MBOX_PRINT(1, LE_MSTR, false); BT_MBOX_PRINT(1, AGGR_TRFC_LD, false); BT_MBOX_PRINT(1, MSG_TYPE, false); BT_MBOX_PRINT(1, SSN, true); pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n"); BT_MBOX_PRINT(2, SNIFF_ACT, false); BT_MBOX_PRINT(2, PAG, false); BT_MBOX_PRINT(2, INQUIRY, false); BT_MBOX_PRINT(2, CONN, false); BT_MBOX_PRINT(2, SNIFF_INTERVAL, false); BT_MBOX_PRINT(2, DISC, false); BT_MBOX_PRINT(2, SCO_TX_ACT, false); BT_MBOX_PRINT(2, SCO_RX_ACT, false); BT_MBOX_PRINT(2, ESCO_RE_TX, false); BT_MBOX_PRINT(2, SCO_DURATION, true); pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n"); BT_MBOX_PRINT(3, SCO_STATE, false); BT_MBOX_PRINT(3, SNIFF_STATE, false); BT_MBOX_PRINT(3, A2DP_STATE, false); BT_MBOX_PRINT(3, A2DP_SRC, false); BT_MBOX_PRINT(3, ACL_STATE, false); BT_MBOX_PRINT(3, MSTR_STATE, false); BT_MBOX_PRINT(3, OBX_STATE, false); BT_MBOX_PRINT(3, OPEN_CON_2, false); BT_MBOX_PRINT(3, TRAFFIC_LOAD, false); BT_MBOX_PRINT(3, CHL_SEQN_LSB, false); BT_MBOX_PRINT(3, INBAND_P, false); BT_MBOX_PRINT(3, MSG_TYPE_2, false); BT_MBOX_PRINT(3, SSN_2, false); BT_MBOX_PRINT(3, UPDATE_REQUEST, true); return pos; } static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif; char *buf; int ret, pos = 0, bufsz = sizeof(char) * 1024; buf = kmalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&mvm->mutex); pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n", notif->bt_ci_compliance); pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n", le32_to_cpu(notif->primary_ch_lut)); pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n", le32_to_cpu(notif->secondary_ch_lut)); pos += scnprintf(buf + pos, bufsz - pos, "bt_activity_grading = %d\n", le32_to_cpu(notif->bt_activity_grading)); pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", notif->rrc_status & 0xF); pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", notif->ttc_status & 0xF); pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n", IWL_MVM_BT_COEX_SYNC2SCO); pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n", IWL_MVM_BT_COEX_MPLUT); mutex_unlock(&mvm->mutex); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } #undef BT_MBOX_PRINT static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd; char buf[256]; int bufsz = sizeof(buf); int pos = 0; mutex_lock(&mvm->mutex); pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n"); pos += scnprintf(buf + pos, bufsz - pos, "\tPrimary Channel Bitmap 0x%016llx\n", le64_to_cpu(cmd->bt_primary_ci)); pos += scnprintf(buf + pos, bufsz - pos, "\tSecondary Channel Bitmap 0x%016llx\n", le64_to_cpu(cmd->bt_secondary_ci)); mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { u32 bt_tx_prio; if (sscanf(buf, "%u", &bt_tx_prio) != 1) return -EINVAL; if (bt_tx_prio > 4) return -EINVAL; mvm->bt_tx_prio = bt_tx_prio; return count; } static ssize_t iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { static const char * const modes_str[BT_FORCE_ANT_MAX] = { [BT_FORCE_ANT_DIS] = "dis", [BT_FORCE_ANT_AUTO] = "auto", [BT_FORCE_ANT_BT] = "bt", [BT_FORCE_ANT_WIFI] = "wifi", }; int ret, bt_force_ant_mode; ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf); if (ret < 0) return ret; bt_force_ant_mode = ret; ret = 0; mutex_lock(&mvm->mutex); if (mvm->bt_force_ant_mode == bt_force_ant_mode) goto out; mvm->bt_force_ant_mode = bt_force_ant_mode; IWL_DEBUG_COEX(mvm, "Force mode: %s\n", modes_str[mvm->bt_force_ant_mode]); if (iwl_mvm_firmware_running(mvm)) ret = iwl_mvm_send_bt_init_conf(mvm); else ret = 0; out: mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char *buff, *pos, *endpos; static const size_t bufsz = 1024; + char _fw_name_pre[FW_NAME_PRE_BUFSIZE]; int ret; buff = kmalloc(bufsz, GFP_KERNEL); if (!buff) return -ENOMEM; pos = buff; endpos = pos + bufsz; pos += scnprintf(pos, endpos - pos, "FW prefix: %s\n", - mvm->trans->cfg->fw_name_pre); + iwl_drv_get_fwname_pre(mvm->trans, _fw_name_pre)); pos += scnprintf(pos, endpos - pos, "FW: %s\n", mvm->fwrt.fw->human_readable); pos += scnprintf(pos, endpos - pos, "Device: %s\n", mvm->fwrt.trans->name); pos += scnprintf(pos, endpos - pos, "Bus: %s\n", #if defined(__linux__) mvm->fwrt.dev->bus->name); #elif defined(__FreeBSD__) ""); #endif ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); kfree(buff); return ret; } +static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct iwl_mvm_tas_status_resp tas_rsp; + struct iwl_mvm_tas_status_resp *rsp = &tas_rsp; + static const size_t bufsz = 1024; + char *buff, *pos, *endpos; + const char * const tas_dis_reason[TAS_DISABLED_REASON_MAX] = { + [TAS_DISABLED_DUE_TO_BIOS] = + "Due To BIOS", + [TAS_DISABLED_DUE_TO_SAR_6DBM] = + "Due To SAR Limit Less Than 6 dBm", + [TAS_DISABLED_REASON_INVALID] = + "N/A", + }; + const char * const tas_current_status[TAS_DYNA_STATUS_MAX] = { + [TAS_DYNA_INACTIVE] = "INACTIVE", + [TAS_DYNA_INACTIVE_MVM_MODE] = + "inactive due to mvm mode", + [TAS_DYNA_INACTIVE_TRIGGER_MODE] = + "inactive due to trigger mode", + [TAS_DYNA_INACTIVE_BLOCK_LISTED] = + "inactive due to block listed", + [TAS_DYNA_INACTIVE_UHB_NON_US] = + "inactive due to uhb non US", + [TAS_DYNA_ACTIVE] = "ACTIVE", + }; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DEBUG_GROUP, GET_TAS_STATUS), + .flags = CMD_WANT_SKB, + .len = { 0, }, + .data = { NULL, }, + }; + int ret, i, tmp; + bool tas_enabled = false; + unsigned long dyn_status; + + if (!iwl_mvm_firmware_running(mvm)) + return -ENODEV; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd(mvm, &hcmd); + mutex_unlock(&mvm->mutex); + if (ret < 0) + return ret; + + buff = kzalloc(bufsz, GFP_KERNEL); + if (!buff) + return -ENOMEM; + pos = buff; + endpos = pos + bufsz; + + rsp = (void *)hcmd.resp_pkt->data; + + pos += scnprintf(pos, endpos - pos, "TAS Conclusion:\n"); + for (i = 0; i < rsp->in_dual_radio + 1; i++) { + if (rsp->tas_status_mac[i].band != TAS_LMAC_BAND_INVALID && + rsp->tas_status_mac[i].dynamic_status & BIT(TAS_DYNA_ACTIVE)) { + pos += scnprintf(pos, endpos - pos, "\tON for "); + switch (rsp->tas_status_mac[i].band) { + case TAS_LMAC_BAND_HB: + pos += scnprintf(pos, endpos - pos, "HB\n"); + break; + case TAS_LMAC_BAND_LB: + pos += scnprintf(pos, endpos - pos, "LB\n"); + break; + case TAS_LMAC_BAND_UHB: + pos += scnprintf(pos, endpos - pos, "UHB\n"); + break; + case TAS_LMAC_BAND_INVALID: + pos += scnprintf(pos, endpos - pos, + "INVALID BAND\n"); + break; + default: + pos += scnprintf(pos, endpos - pos, + "Unsupported band (%d)\n", + rsp->tas_status_mac[i].band); + goto out; + } + tas_enabled = true; + } + } + if (!tas_enabled) + pos += scnprintf(pos, endpos - pos, "\tOFF\n"); + + pos += scnprintf(pos, endpos - pos, "TAS Report\n"); + pos += scnprintf(pos, endpos - pos, "TAS FW version: %d\n", + rsp->tas_fw_version); + pos += scnprintf(pos, endpos - pos, "Is UHB enabled for USA?: %s\n", + rsp->is_uhb_for_usa_enable ? "True" : "False"); + pos += scnprintf(pos, endpos - pos, "Current MCC: 0x%x\n", + le16_to_cpu(rsp->curr_mcc)); + + pos += scnprintf(pos, endpos - pos, "Block list entries:"); + for (i = 0; i < APCI_WTAS_BLACK_LIST_MAX; i++) + pos += scnprintf(pos, endpos - pos, " 0x%x", + le16_to_cpu(rsp->block_list[i])); + + pos += scnprintf(pos, endpos - pos, "\nOEM name: %s\n", + dmi_get_system_info(DMI_SYS_VENDOR)); + pos += scnprintf(pos, endpos - pos, "\tVendor In Approved List: %s\n", + iwl_mvm_is_vendor_in_approved_list() ? "YES" : "NO"); + pos += scnprintf(pos, endpos - pos, + "\tDo TAS Support Dual Radio?: %s\n", + rsp->in_dual_radio ? "TRUE" : "FALSE"); + + for (i = 0; i < rsp->in_dual_radio + 1; i++) { + if (rsp->tas_status_mac[i].static_status == 0) { + pos += scnprintf(pos, endpos - pos, + "Static status: disabled\n"); + pos += scnprintf(pos, endpos - pos, + "Static disabled reason: %s (0)\n", + tas_dis_reason[0]); + goto out; + } + + pos += scnprintf(pos, endpos - pos, "TAS status for "); + switch (rsp->tas_status_mac[i].band) { + case TAS_LMAC_BAND_HB: + pos += scnprintf(pos, endpos - pos, "High band\n"); + break; + case TAS_LMAC_BAND_LB: + pos += scnprintf(pos, endpos - pos, "Low band\n"); + break; + case TAS_LMAC_BAND_UHB: + pos += scnprintf(pos, endpos - pos, + "Ultra high band\n"); + break; + case TAS_LMAC_BAND_INVALID: + pos += scnprintf(pos, endpos - pos, + "INVALID band\n"); + break; + default: + pos += scnprintf(pos, endpos - pos, + "Unsupported band (%d)\n", + rsp->tas_status_mac[i].band); + goto out; + } + pos += scnprintf(pos, endpos - pos, "Static status: %sabled\n", + rsp->tas_status_mac[i].static_status ? + "En" : "Dis"); + pos += scnprintf(pos, endpos - pos, + "\tStatic Disabled Reason: "); + if (rsp->tas_status_mac[i].static_dis_reason < TAS_DISABLED_REASON_MAX) + pos += scnprintf(pos, endpos - pos, "%s (%d)\n", + tas_dis_reason[rsp->tas_status_mac[i].static_dis_reason], + rsp->tas_status_mac[i].static_dis_reason); + else + pos += scnprintf(pos, endpos - pos, + "unsupported value (%d)\n", + rsp->tas_status_mac[i].static_dis_reason); + + pos += scnprintf(pos, endpos - pos, "Dynamic status:\n"); + dyn_status = (rsp->tas_status_mac[i].dynamic_status); + for_each_set_bit(tmp, &dyn_status, sizeof(dyn_status)) { + if (tmp >= 0 && tmp < TAS_DYNA_STATUS_MAX) + pos += scnprintf(pos, endpos - pos, + "\t%s (%d)\n", + tas_current_status[tmp], tmp); + } + + pos += scnprintf(pos, endpos - pos, + "Is near disconnection?: %s\n", + rsp->tas_status_mac[i].near_disconnection ? + "True" : "False"); + tmp = le16_to_cpu(rsp->tas_status_mac[i].max_reg_pwr_limit); + pos += scnprintf(pos, endpos - pos, + "Max. regulatory pwr limit (dBm): %d.%03d\n", + tmp / 8, 125 * (tmp % 8)); + tmp = le16_to_cpu(rsp->tas_status_mac[i].sar_limit); + pos += scnprintf(pos, endpos - pos, + "SAR limit (dBm): %d.%03d\n", + tmp / 8, 125 * (tmp % 8)); + } + +out: + ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); + kfree(buff); + iwl_free_resp(&hcmd); + return ret; +} + static ssize_t iwl_dbgfs_phy_integration_ver_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char *buf; size_t bufsz; int pos; ssize_t ret; bufsz = mvm->fw->phy_integration_ver_len + 2; buf = kmalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; pos = scnprintf(buf, bufsz, "%.*s\n", mvm->fw->phy_integration_ver_len, mvm->fw->phy_integration_ver); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } #define PRINT_STATS_LE32(_struct, _memb) \ pos += scnprintf(buf + pos, bufsz - pos, \ fmt_table, #_memb, \ le32_to_cpu(_struct->_memb)) static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; static const char *fmt_table = "\t%-30s %10u\n"; static const char *fmt_header = "%-32s\n"; int pos = 0; char *buf; int ret; size_t bufsz; if (iwl_mvm_has_new_rx_stats_api(mvm)) bufsz = ((sizeof(struct mvm_statistics_rx) / sizeof(__le32)) * 43) + (4 * 33) + 1; else /* 43 = size of each data line; 33 = size of each header */ bufsz = ((sizeof(struct mvm_statistics_rx_v3) / sizeof(__le32)) * 43) + (4 * 33) + 1; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&mvm->mutex); if (iwl_mvm_firmware_running(mvm)) iwl_mvm_request_statistics(mvm, false); pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - OFDM"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_rx_phy_v2 *ofdm = &mvm->rx_stats_v3.ofdm; PRINT_STATS_LE32(ofdm, ina_cnt); PRINT_STATS_LE32(ofdm, fina_cnt); PRINT_STATS_LE32(ofdm, plcp_err); PRINT_STATS_LE32(ofdm, crc32_err); PRINT_STATS_LE32(ofdm, overrun_err); PRINT_STATS_LE32(ofdm, early_overrun_err); PRINT_STATS_LE32(ofdm, crc32_good); PRINT_STATS_LE32(ofdm, false_alarm_cnt); PRINT_STATS_LE32(ofdm, fina_sync_err_cnt); PRINT_STATS_LE32(ofdm, sfd_timeout); PRINT_STATS_LE32(ofdm, fina_timeout); PRINT_STATS_LE32(ofdm, unresponded_rts); PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun); PRINT_STATS_LE32(ofdm, sent_ack_cnt); PRINT_STATS_LE32(ofdm, sent_cts_cnt); PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt); PRINT_STATS_LE32(ofdm, dsp_self_kill); PRINT_STATS_LE32(ofdm, mh_format_err); PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum); PRINT_STATS_LE32(ofdm, reserved); } else { struct mvm_statistics_rx_phy *ofdm = &mvm->rx_stats.ofdm; PRINT_STATS_LE32(ofdm, unresponded_rts); PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun); PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt); PRINT_STATS_LE32(ofdm, dsp_self_kill); PRINT_STATS_LE32(ofdm, reserved); } pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - CCK"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_rx_phy_v2 *cck = &mvm->rx_stats_v3.cck; PRINT_STATS_LE32(cck, ina_cnt); PRINT_STATS_LE32(cck, fina_cnt); PRINT_STATS_LE32(cck, plcp_err); PRINT_STATS_LE32(cck, crc32_err); PRINT_STATS_LE32(cck, overrun_err); PRINT_STATS_LE32(cck, early_overrun_err); PRINT_STATS_LE32(cck, crc32_good); PRINT_STATS_LE32(cck, false_alarm_cnt); PRINT_STATS_LE32(cck, fina_sync_err_cnt); PRINT_STATS_LE32(cck, sfd_timeout); PRINT_STATS_LE32(cck, fina_timeout); PRINT_STATS_LE32(cck, unresponded_rts); PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun); PRINT_STATS_LE32(cck, sent_ack_cnt); PRINT_STATS_LE32(cck, sent_cts_cnt); PRINT_STATS_LE32(cck, sent_ba_rsp_cnt); PRINT_STATS_LE32(cck, dsp_self_kill); PRINT_STATS_LE32(cck, mh_format_err); PRINT_STATS_LE32(cck, re_acq_main_rssi_sum); PRINT_STATS_LE32(cck, reserved); } else { struct mvm_statistics_rx_phy *cck = &mvm->rx_stats.cck; PRINT_STATS_LE32(cck, unresponded_rts); PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun); PRINT_STATS_LE32(cck, sent_ba_rsp_cnt); PRINT_STATS_LE32(cck, dsp_self_kill); PRINT_STATS_LE32(cck, reserved); } pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - GENERAL"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_rx_non_phy_v3 *general = &mvm->rx_stats_v3.general; PRINT_STATS_LE32(general, bogus_cts); PRINT_STATS_LE32(general, bogus_ack); PRINT_STATS_LE32(general, non_bssid_frames); PRINT_STATS_LE32(general, filtered_frames); PRINT_STATS_LE32(general, non_channel_beacons); PRINT_STATS_LE32(general, channel_beacons); PRINT_STATS_LE32(general, num_missed_bcon); PRINT_STATS_LE32(general, adc_rx_saturation_time); PRINT_STATS_LE32(general, ina_detection_search_time); PRINT_STATS_LE32(general, beacon_silence_rssi_a); PRINT_STATS_LE32(general, beacon_silence_rssi_b); PRINT_STATS_LE32(general, beacon_silence_rssi_c); PRINT_STATS_LE32(general, interference_data_flag); PRINT_STATS_LE32(general, channel_load); PRINT_STATS_LE32(general, dsp_false_alarms); PRINT_STATS_LE32(general, beacon_rssi_a); PRINT_STATS_LE32(general, beacon_rssi_b); PRINT_STATS_LE32(general, beacon_rssi_c); PRINT_STATS_LE32(general, beacon_energy_a); PRINT_STATS_LE32(general, beacon_energy_b); PRINT_STATS_LE32(general, beacon_energy_c); PRINT_STATS_LE32(general, num_bt_kills); PRINT_STATS_LE32(general, mac_id); PRINT_STATS_LE32(general, directed_data_mpdu); } else { struct mvm_statistics_rx_non_phy *general = &mvm->rx_stats.general; PRINT_STATS_LE32(general, bogus_cts); PRINT_STATS_LE32(general, bogus_ack); PRINT_STATS_LE32(general, non_channel_beacons); PRINT_STATS_LE32(general, channel_beacons); PRINT_STATS_LE32(general, num_missed_bcon); PRINT_STATS_LE32(general, adc_rx_saturation_time); PRINT_STATS_LE32(general, ina_detection_search_time); PRINT_STATS_LE32(general, beacon_silence_rssi_a); PRINT_STATS_LE32(general, beacon_silence_rssi_b); PRINT_STATS_LE32(general, beacon_silence_rssi_c); PRINT_STATS_LE32(general, interference_data_flag); PRINT_STATS_LE32(general, channel_load); PRINT_STATS_LE32(general, beacon_rssi_a); PRINT_STATS_LE32(general, beacon_rssi_b); PRINT_STATS_LE32(general, beacon_rssi_c); PRINT_STATS_LE32(general, beacon_energy_a); PRINT_STATS_LE32(general, beacon_energy_b); PRINT_STATS_LE32(general, beacon_energy_c); PRINT_STATS_LE32(general, num_bt_kills); PRINT_STATS_LE32(general, mac_id); } pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - HT"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_rx_ht_phy_v1 *ht = &mvm->rx_stats_v3.ofdm_ht; PRINT_STATS_LE32(ht, plcp_err); PRINT_STATS_LE32(ht, overrun_err); PRINT_STATS_LE32(ht, early_overrun_err); PRINT_STATS_LE32(ht, crc32_good); PRINT_STATS_LE32(ht, crc32_err); PRINT_STATS_LE32(ht, mh_format_err); PRINT_STATS_LE32(ht, agg_crc32_good); PRINT_STATS_LE32(ht, agg_mpdu_cnt); PRINT_STATS_LE32(ht, agg_cnt); PRINT_STATS_LE32(ht, unsupport_mcs); } else { struct mvm_statistics_rx_ht_phy *ht = &mvm->rx_stats.ofdm_ht; PRINT_STATS_LE32(ht, mh_format_err); PRINT_STATS_LE32(ht, agg_mpdu_cnt); PRINT_STATS_LE32(ht, agg_cnt); PRINT_STATS_LE32(ht, unsupport_mcs); } mutex_unlock(&mvm->mutex); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } #undef PRINT_STAT_LE32 static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm, char __user *user_buf, size_t count, loff_t *ppos, struct iwl_mvm_frame_stats *stats) { char *buff, *pos, *endpos; int idx, i; int ret; static const size_t bufsz = 1024; buff = kmalloc(bufsz, GFP_KERNEL); if (!buff) return -ENOMEM; spin_lock_bh(&mvm->drv_stats_lock); pos = buff; endpos = pos + bufsz; pos += scnprintf(pos, endpos - pos, "Legacy/HT/VHT\t:\t%d/%d/%d\n", stats->legacy_frames, stats->ht_frames, stats->vht_frames); pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n", stats->bw_20_frames, stats->bw_40_frames, stats->bw_80_frames); pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n", stats->ngi_frames, stats->sgi_frames); pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n", stats->siso_frames, stats->mimo2_frames); pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n", stats->fail_frames, stats->success_frames); pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n", stats->agg_frames); pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n", stats->ampdu_count); pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n", stats->ampdu_count > 0 ? (stats->agg_frames / stats->ampdu_count) : 0); pos += scnprintf(pos, endpos - pos, "Last Rates\n"); idx = stats->last_frame_idx - 1; for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) { idx = (idx + 1) % ARRAY_SIZE(stats->last_rates); if (stats->last_rates[idx] == 0) continue; pos += scnprintf(pos, endpos - pos, "Rate[%d]: ", (int)(ARRAY_SIZE(stats->last_rates) - i)); pos += rs_pretty_print_rate_v1(pos, endpos - pos, stats->last_rates[idx]); if (pos < endpos - 1) *pos++ = '\n'; } spin_unlock_bh(&mvm->drv_stats_lock); ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); kfree(buff); return ret; } static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos, &mvm->drv_rx_stats); } static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int __maybe_unused ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; mutex_lock(&mvm->mutex); /* allow one more restart that we're provoking here */ if (mvm->fw_restart >= 0) mvm->fw_restart++; if (count == 6 && !strcmp(buf, "nolog\n")) { set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status); set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &mvm->trans->status); } /* take the return value to make compiler happy - it will fail anyway */ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LONG_GROUP, REPLY_ERROR), 0, 0, NULL); mutex_unlock(&mvm->mutex); return count; } static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (count == 6 && !strcmp(buf, "nolog\n")) set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status); iwl_force_nmi(mvm->trans); return count; } static ssize_t iwl_dbgfs_scan_ant_rxchain_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; int pos = 0; char buf[32]; const size_t bufsz = sizeof(buf); /* print which antennas were set for the scan command by the user */ pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: "); if (mvm->scan_rx_ant & ANT_A) pos += scnprintf(buf + pos, bufsz - pos, "A"); if (mvm->scan_rx_ant & ANT_B) pos += scnprintf(buf + pos, bufsz - pos, "B"); - pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant); + pos += scnprintf(buf + pos, bufsz - pos, " (%x)\n", mvm->scan_rx_ant); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { u8 scan_rx_ant; if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (sscanf(buf, "%hhx", &scan_rx_ant) != 1) return -EINVAL; if (scan_rx_ant > ANT_ABC) return -EINVAL; if (scan_rx_ant & ~(iwl_mvm_get_valid_rx_ant(mvm))) return -EINVAL; if (mvm->scan_rx_ant != scan_rx_ant) { mvm->scan_rx_ant = scan_rx_ant; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); } return count; } static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_rss_config_cmd cmd = { .flags = cpu_to_le32(IWL_RSS_ENABLE), .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | IWL_RSS_HASH_TYPE_IPV4_UDP | IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | IWL_RSS_HASH_TYPE_IPV6_TCP | IWL_RSS_HASH_TYPE_IPV6_UDP | IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, }; int ret, i, num_repeats, nbytes = count / 2; ret = hex2bin(cmd.indirection_table, buf, nbytes); if (ret) return ret; /* * The input is the redirection table, partial or full. * Repeat the pattern if needed. * For example, input of 01020F will be repeated 42 times, * indirecting RSS hash results to queues 1, 2, 15 (skipping * queues 3 - 14). */ num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes; for (i = 1; i < num_repeats; i++) memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, nbytes); /* handle cut in the middle pattern for the last places */ memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, ARRAY_SIZE(cmd.indirection_table) % nbytes); netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); mutex_lock(&mvm->mutex); if (iwl_mvm_firmware_running(mvm)) ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); else ret = 0; mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_op_mode *opmode = container_of((void *)mvm, struct iwl_op_mode, op_mode_specific); struct iwl_rx_cmd_buffer rxb = { ._rx_page_order = 0, .truesize = 0, /* not used */ ._offset = 0, }; struct iwl_rx_packet *pkt; int bin_len = count / 2; int ret = -EINVAL; if (!iwl_mvm_firmware_running(mvm)) return -EIO; /* supporting only MQ RX */ if (!mvm->trans->trans_cfg->mq_rx_supported) return -ENOTSUPP; rxb._page = alloc_pages(GFP_ATOMIC, 0); if (!rxb._page) return -ENOMEM; pkt = rxb_addr(&rxb); ret = hex2bin(page_address(rxb._page), buf, bin_len); if (ret) goto out; /* avoid invalid memory access and malformed packet */ if (bin_len < sizeof(*pkt) || bin_len != sizeof(*pkt) + iwl_rx_packet_payload_len(pkt)) goto out; local_bh_disable(); iwl_mvm_rx_mq(opmode, NULL, &rxb); local_bh_enable(); ret = 0; out: iwl_free_rxb(&rxb); return ret ?: count; } static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) { struct ieee80211_vif *vif; struct iwl_mvm_vif *mvmvif; struct sk_buff *beacon; struct ieee80211_tx_info *info; struct iwl_mac_beacon_cmd beacon_cmd = {}; + unsigned int link_id; u8 rate; int i; len /= 2; /* Element len should be represented by u8 */ if (len >= U8_MAX) return -EINVAL; if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (!iwl_mvm_has_new_tx_api(mvm) && !fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) return -EINVAL; mutex_lock(&mvm->mutex); for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) { vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, false); if (!vif) continue; if (vif->type == NL80211_IFTYPE_AP) break; } if (i == NUM_MAC_INDEX_DRIVER || !vif) goto out_err; mvm->hw->extra_beacon_tailroom = len; beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL, 0); if (!beacon) goto out_err; if (len && hex2bin(skb_put_zero(beacon, len), bin, len)) { dev_kfree_skb(beacon); goto out_err; } mvm->beacon_inject_active = true; mvmvif = iwl_mvm_vif_from_mac80211(vif); info = IEEE80211_SKB_CB(beacon); - rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); - - beacon_cmd.flags = - cpu_to_le16(iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw, rate)); - beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); - beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif); + + for_each_mvm_vif_valid_link(mvmvif, link_id) { + beacon_cmd.flags = + cpu_to_le16(iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw, + rate)); + beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); + if (iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) > 12) + beacon_cmd.link_id = + cpu_to_le32(mvmvif->link[link_id]->fw_link_id); + else + beacon_cmd.link_id = cpu_to_le32((u32)mvmvif->id); - iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, - &beacon_cmd.tim_size, - beacon->data, beacon->len); + iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, + &beacon_cmd.tim_size, + beacon->data, beacon->len); - iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, - sizeof(beacon_cmd)); + iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, + sizeof(beacon_cmd)); + } mutex_unlock(&mvm->mutex); dev_kfree_skb(beacon); return 0; out_err: mutex_unlock(&mvm->mutex); return -EINVAL; } static ssize_t iwl_dbgfs_inject_beacon_ie_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret = _iwl_dbgfs_inject_beacon_ie(mvm, buf, count); mvm->hw->extra_beacon_tailroom = 0; return ret ?: count; } static ssize_t iwl_dbgfs_inject_beacon_ie_restore_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret = _iwl_dbgfs_inject_beacon_ie(mvm, NULL, 0); mvm->hw->extra_beacon_tailroom = 0; mvm->beacon_inject_active = false; return ret ?: count; } static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; int conf; char buf[8]; const size_t bufsz = sizeof(buf); int pos = 0; mutex_lock(&mvm->mutex); conf = mvm->fwrt.dump.conf; mutex_unlock(&mvm->mutex); pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { unsigned int conf_id; int ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; ret = kstrtouint(buf, 0, &conf_id); if (ret) return ret; if (WARN_ON(conf_id >= FW_DBG_CONF_MAX)) return -EINVAL; mutex_lock(&mvm->mutex); ret = iwl_fw_start_dbg_conf(&mvm->fwrt, conf_id); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { if (count == 0) return 0; iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_USER_TRIGGER, NULL); iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf, (count - 1), NULL); return count; } static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { u32 timepoint; if (kstrtou32(buf, 0, &timepoint)) return -EINVAL; if (timepoint == IWL_FW_INI_TIME_POINT_INVALID || timepoint >= IWL_FW_INI_TIME_POINT_NUM) return -EINVAL; iwl_dbg_tlv_time_point(&mvm->fwrt, timepoint, NULL); return count; } #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) #define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \ debugfs_create_file(alias, mode, parent, mvm, \ &iwl_dbgfs_##name##_ops); \ } while (0) #define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \ MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) -#define MVM_DEBUGFS_WRITE_STA_FILE_OPS(name, bufsz) \ - _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta) -#define MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(name, bufsz) \ - _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta) +static ssize_t +_iwl_dbgfs_link_sta_wrap_write(ssize_t (*real)(struct ieee80211_link_sta *, + struct iwl_mvm_sta *, + struct iwl_mvm *, + struct iwl_mvm_link_sta *, + char *, + size_t, loff_t *), + struct file *file, + char *buf, size_t buf_size, loff_t *ppos) +{ + struct ieee80211_link_sta *link_sta = file->private_data; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(link_sta->sta); + struct iwl_mvm *mvm = iwl_mvm_vif_from_mac80211(mvmsta->vif)->mvm; + struct iwl_mvm_link_sta *mvm_link_sta; + ssize_t ret; -#define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do { \ - debugfs_create_file(alias, mode, parent, sta, \ - &iwl_dbgfs_##name##_ops); \ - } while (0) -#define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \ - MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode) + mutex_lock(&mvm->mutex); + + mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_sta->link_id], + lockdep_is_held(&mvm->mutex)); + if (WARN_ON(!mvm_link_sta)) { + mutex_unlock(&mvm->mutex); + return -ENODEV; + } + + ret = real(link_sta, mvmsta, mvm, mvm_link_sta, buf, buf_size, ppos); + + mutex_unlock(&mvm->mutex); + + return ret; +} + +static ssize_t +_iwl_dbgfs_link_sta_wrap_read(ssize_t (*real)(struct ieee80211_link_sta *, + struct iwl_mvm_sta *, + struct iwl_mvm *, + struct iwl_mvm_link_sta *, + char __user *, + size_t, loff_t *), + struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + struct ieee80211_link_sta *link_sta = file->private_data; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(link_sta->sta); + struct iwl_mvm *mvm = iwl_mvm_vif_from_mac80211(mvmsta->vif)->mvm; + struct iwl_mvm_link_sta *mvm_link_sta; + ssize_t ret; + + mutex_lock(&mvm->mutex); + + mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_sta->link_id], + lockdep_is_held(&mvm->mutex)); + if (WARN_ON(!mvm_link_sta)) { + mutex_unlock(&mvm->mutex); + return -ENODEV; + } + + ret = real(link_sta, mvmsta, mvm, mvm_link_sta, user_buf, count, ppos); + + mutex_unlock(&mvm->mutex); + + return ret; +} + +#define MVM_DEBUGFS_LINK_STA_WRITE_WRAPPER(name, buflen) \ +static ssize_t _iwl_dbgfs_link_sta_##name##_write(struct file *file, \ + const char __user *user_buf, \ + size_t count, loff_t *ppos) \ +{ \ + char buf[buflen] = {}; \ + size_t buf_size = min(count, sizeof(buf) - 1); \ + \ + if (copy_from_user(buf, user_buf, sizeof(buf))) \ + return -EFAULT; \ + \ + return _iwl_dbgfs_link_sta_wrap_write(iwl_dbgfs_##name##_write, \ + file, \ + buf, buf_size, ppos); \ +} \ + +#define MVM_DEBUGFS_LINK_STA_READ_WRAPPER(name) \ +static ssize_t _iwl_dbgfs_link_sta_##name##_read(struct file *file, \ + char __user *user_buf, \ + size_t count, loff_t *ppos) \ +{ \ + return _iwl_dbgfs_link_sta_wrap_read(iwl_dbgfs_##name##_read, \ + file, \ + user_buf, count, ppos); \ +} \ + +#define MVM_DEBUGFS_WRITE_LINK_STA_FILE_OPS(name, bufsz) \ +MVM_DEBUGFS_LINK_STA_WRITE_WRAPPER(name, bufsz) \ +static const struct file_operations iwl_dbgfs_link_sta_##name##_ops = { \ + .write = _iwl_dbgfs_link_sta_##name##_write, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +#define MVM_DEBUGFS_READ_LINK_STA_FILE_OPS(name) \ +MVM_DEBUGFS_LINK_STA_READ_WRAPPER(name) \ +static const struct file_operations iwl_dbgfs_link_sta_##name##_ops = { \ + .read = _iwl_dbgfs_link_sta_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +#define MVM_DEBUGFS_READ_WRITE_LINK_STA_FILE_OPS(name, bufsz) \ +MVM_DEBUGFS_LINK_STA_READ_WRAPPER(name) \ +MVM_DEBUGFS_LINK_STA_WRITE_WRAPPER(name, bufsz) \ +static const struct file_operations iwl_dbgfs_link_sta_##name##_ops = { \ + .read = _iwl_dbgfs_link_sta_##name##_read, \ + .write = _iwl_dbgfs_link_sta_##name##_write, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +#define MVM_DEBUGFS_ADD_LINK_STA_FILE_ALIAS(alias, name, parent, mode) \ + debugfs_create_file(alias, mode, parent, link_sta, \ + &iwl_dbgfs_link_sta_##name##_ops) +#define MVM_DEBUGFS_ADD_LINK_STA_FILE(name, parent, mode) \ + MVM_DEBUGFS_ADD_LINK_STA_FILE_ALIAS(#name, name, parent, mode) static ssize_t iwl_dbgfs_prph_reg_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; int pos = 0; char buf[32]; const size_t bufsz = sizeof(buf); if (!mvm->dbgfs_prph_reg_addr) return -EINVAL; pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n", mvm->dbgfs_prph_reg_addr, iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr)); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { u8 args; u32 value; args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value); /* if we only want to set the reg address - nothing more to do */ if (args == 1) goto out; /* otherwise, make sure we have both address and value */ if (args != 2) return -EINVAL; iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value); out: return count; } static ssize_t iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); mutex_unlock(&mvm->mutex); return ret ?: count; } struct iwl_mvm_sniffer_apply { struct iwl_mvm *mvm; u8 *bssid; u16 aid; }; static bool iwl_mvm_sniffer_apply(struct iwl_notif_wait_data *notif_data, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm_sniffer_apply *apply = data; apply->mvm->cur_aid = cpu_to_le16(apply->aid); memcpy(apply->mvm->cur_bssid, apply->bssid, sizeof(apply->mvm->cur_bssid)); return true; } static ssize_t iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_notification_wait wait; struct iwl_he_monitor_cmd he_mon_cmd = {}; struct iwl_mvm_sniffer_apply apply = { .mvm = mvm, }; u16 wait_cmds[] = { WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD), }; u32 aid; int ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid, &he_mon_cmd.bssid[0], &he_mon_cmd.bssid[1], &he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3], &he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]); if (ret != 7) return -EINVAL; he_mon_cmd.aid = cpu_to_le16(aid); apply.aid = aid; apply.bssid = (void *)he_mon_cmd.bssid; mutex_lock(&mvm->mutex); /* * Use the notification waiter to get our function triggered * in sequence with other RX. This ensures that frames we get * on the RX queue _before_ the new configuration is applied * still have mvm->cur_aid pointing to the old AID, and that * frames on the RX queue _after_ the firmware processed the * new configuration (and sent the response, synchronously) * get mvm->cur_aid correctly set to the new AID. */ iwl_init_notification_wait(&mvm->notif_wait, &wait, wait_cmds, ARRAY_SIZE(wait_cmds), iwl_mvm_sniffer_apply, &apply); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD), 0, sizeof(he_mon_cmd), &he_mon_cmd); /* no need to really wait, we already did anyway */ iwl_remove_notification(&mvm->notif_wait, &wait); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_he_sniffer_params_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; u8 buf[32]; int len; len = scnprintf(buf, sizeof(buf), "%d %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n", le16_to_cpu(mvm->cur_aid), mvm->cur_bssid[0], mvm->cur_bssid[1], mvm->cur_bssid[2], mvm->cur_bssid[3], mvm->cur_bssid[4], mvm->cur_bssid[5]); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; u8 buf[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM * ETH_ALEN * 3 + 1]; unsigned int pos = 0; size_t bufsz = sizeof(buf); int i; mutex_lock(&mvm->mutex); for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", mvm->uapsd_noagg_bssids[i].addr); mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_ltr_config_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; struct iwl_ltr_config_cmd ltr_config = {0}; if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (sscanf(buf, "%x,%x,%x,%x,%x,%x,%x", <r_config.flags, <r_config.static_long, <r_config.static_short, <r_config.ltr_cfg_values[0], <r_config.ltr_cfg_values[1], <r_config.ltr_cfg_values[2], <r_config.ltr_cfg_values[3]) != 7) { return -EINVAL; } mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, sizeof(ltr_config), <r_config); mutex_unlock(&mvm->mutex); if (ret) IWL_ERR(mvm, "failed to send ltr configuration cmd\n"); return ret ?: count; } static ssize_t iwl_dbgfs_rfi_freq_table_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret = 0; u16 op_id; if (kstrtou16(buf, 10, &op_id)) return -EINVAL; /* value zero triggers re-sending the default table to the device */ if (!op_id) { mutex_lock(&mvm->mutex); ret = iwl_rfi_send_config_cmd(mvm, NULL); mutex_unlock(&mvm->mutex); } else { ret = -EOPNOTSUPP; /* in the future a new table will be added */ } return ret ?: count; } /* The size computation is as follows: * each number needs at most 3 characters, number of rows is the size of * the table; So, need 5 chars for the "freq: " part and each tuple afterwards * needs 6 characters for numbers and 5 for the punctuation around. */ #define IWL_RFI_BUF_SIZE (IWL_RFI_LUT_INSTALLED_SIZE *\ (5 + IWL_RFI_LUT_ENTRY_CHANNELS_NUM * (6 + 5))) static ssize_t iwl_dbgfs_rfi_freq_table_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_rfi_freq_table_resp_cmd *resp; u32 status; char buf[IWL_RFI_BUF_SIZE]; int i, j, pos = 0; resp = iwl_rfi_get_freq_table(mvm); if (IS_ERR(resp)) return PTR_ERR(resp); status = le32_to_cpu(resp->status); if (status != RFI_FREQ_TABLE_OK) { scnprintf(buf, IWL_RFI_BUF_SIZE, "status = %d\n", status); goto out; } for (i = 0; i < ARRAY_SIZE(resp->table); i++) { pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "%d: ", resp->table[i].freq); for (j = 0; j < ARRAY_SIZE(resp->table[i].channels); j++) pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "(%d, %d) ", resp->table[i].channels[j], resp->table[i].bands[j]); pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "\n"); } out: kfree(resp); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); /* Device wide debugfs entries */ MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget); MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8); MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8); MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64); MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64); MVM_DEBUGFS_READ_FILE_OPS(nic_temp); MVM_DEBUGFS_READ_FILE_OPS(stations); -MVM_DEBUGFS_READ_FILE_OPS(rs_data); +MVM_DEBUGFS_READ_LINK_STA_FILE_OPS(rs_data); MVM_DEBUGFS_READ_FILE_OPS(bt_notif); MVM_DEBUGFS_READ_FILE_OPS(bt_cmd); MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64); MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats); MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats); MVM_DEBUGFS_READ_FILE_OPS(fw_ver); MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver); +MVM_DEBUGFS_READ_FILE_OPS(tas_get_status); MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10); MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10); MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10); MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); MVM_DEBUGFS_WRITE_FILE_OPS(dbg_time_point, 64); MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, (IWL_RSS_INDIRECTION_TABLE_SIZE * 2)); MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512); MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie, 512); MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512); MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids); #ifdef CONFIG_ACPI MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile); +MVM_DEBUGFS_READ_FILE_OPS(wifi_6e_enable); #endif -MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(amsdu_len, 16); +MVM_DEBUGFS_READ_WRITE_LINK_STA_FILE_OPS(amsdu_len, 16); MVM_DEBUGFS_READ_WRITE_FILE_OPS(he_sniffer_params, 32); MVM_DEBUGFS_WRITE_FILE_OPS(ltr_config, 512); MVM_DEBUGFS_READ_WRITE_FILE_OPS(rfi_freq_table, 16); static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_dbg_mem_access_cmd cmd = {}; struct iwl_dbg_mem_access_rsp *rsp; struct iwl_host_cmd hcmd = { .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &cmd, }, .len = { sizeof(cmd) }, }; size_t delta; ssize_t ret, len; if (!iwl_mvm_firmware_running(mvm)) return -EIO; hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR); cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ); /* Take care of alignment of both the position and the length */ delta = *ppos & 0x3; cmd.addr = cpu_to_le32(*ppos - delta); cmd.len = cpu_to_le32(min(ALIGN(count + delta, 4) / 4, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS)); mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &hcmd); mutex_unlock(&mvm->mutex); if (ret < 0) return ret; + if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) { + ret = -EIO; + goto out; + } + rsp = (void *)hcmd.resp_pkt->data; if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) { ret = -ENXIO; goto out; } len = min((size_t)le32_to_cpu(rsp->len) << 2, iwl_rx_packet_payload_len(hcmd.resp_pkt) - sizeof(*rsp)); len = min(len - delta, count); if (len < 0) { ret = -EFAULT; goto out; } ret = len - copy_to_user(user_buf, (u8 *)rsp->data + delta, len); *ppos += ret; out: iwl_free_resp(&hcmd); return ret; } static ssize_t iwl_dbgfs_mem_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_dbg_mem_access_cmd *cmd; struct iwl_dbg_mem_access_rsp *rsp; struct iwl_host_cmd hcmd = {}; size_t cmd_size; size_t data_size; u32 op, len; ssize_t ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR); if (*ppos & 0x3 || count < 4) { op = DEBUG_MEM_OP_WRITE_BYTES; len = min(count, (size_t)(4 - (*ppos & 0x3))); data_size = len; } else { op = DEBUG_MEM_OP_WRITE; len = min(count >> 2, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS); data_size = len << 2; } cmd_size = sizeof(*cmd) + ALIGN(data_size, 4); cmd = kzalloc(cmd_size, GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->op = cpu_to_le32(op); cmd->len = cpu_to_le32(len); cmd->addr = cpu_to_le32(*ppos); if (copy_from_user((void *)cmd->data, user_buf, data_size)) { kfree(cmd); return -EFAULT; } hcmd.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, hcmd.data[0] = (void *)cmd; hcmd.len[0] = cmd_size; mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &hcmd); mutex_unlock(&mvm->mutex); kfree(cmd); if (ret < 0) return ret; + if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) { + ret = -EIO; + goto out; + } + rsp = (void *)hcmd.resp_pkt->data; if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) { ret = -ENXIO; goto out; } ret = data_size; *ppos += ret; out: iwl_free_resp(&hcmd); return ret; } static const struct file_operations iwl_dbgfs_mem_ops = { .read = iwl_dbgfs_mem_read, .write = iwl_dbgfs_mem_write, .open = simple_open, .llseek = default_llseek, }; -void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct dentry *dir) +void iwl_mvm_link_sta_add_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + struct dentry *dir) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); if (iwl_mvm_has_tlc_offload(mvm)) { - MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, 0400); + MVM_DEBUGFS_ADD_LINK_STA_FILE(rs_data, dir, 0400); } - MVM_DEBUGFS_ADD_STA_FILE(amsdu_len, dir, 0600); + + MVM_DEBUGFS_ADD_LINK_STA_FILE(amsdu_len, dir, 0600); } void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) { struct dentry *bcast_dir __maybe_unused; spin_lock_init(&mvm->drv_stats_lock); MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(nic_temp, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(ctdp_budget, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(stop_ctdp, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(bt_notif, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(bt_cmd, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(dbg_time_point, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(inject_beacon_ie, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(inject_beacon_ie_restore, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(rfi_freq_table, mvm->debugfs_dir, 0600); if (mvm->fw->phy_integration_ver) MVM_DEBUGFS_ADD_FILE(phy_integration_ver, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(tas_get_status, mvm->debugfs_dir, 0400); #ifdef CONFIG_ACPI MVM_DEBUGFS_ADD_FILE(sar_geo_profile, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(wifi_6e_enable, mvm->debugfs_dir, 0400); #endif MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0600); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2)) MVM_DEBUGFS_ADD_FILE(ltr_config, mvm->debugfs_dir, 0200); debugfs_create_bool("enable_scan_iteration_notif", 0600, mvm->debugfs_dir, &mvm->scan_iter_notif_enabled); debugfs_create_bool("drop_bcn_ap_mode", 0600, mvm->debugfs_dir, &mvm->drop_bcn_ap_mode); MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR); #ifdef CONFIG_PM_SLEEP MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400); debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir, &mvm->d3_wake_sysassert); debugfs_create_u32("last_netdetect_scans", 0400, mvm->debugfs_dir, &mvm->last_netdetect_scans); #endif debugfs_create_u8("ps_disabled", 0400, mvm->debugfs_dir, &mvm->ps_disabled); debugfs_create_blob("nvm_hw", 0400, mvm->debugfs_dir, &mvm->nvm_hw_blob); debugfs_create_blob("nvm_sw", 0400, mvm->debugfs_dir, &mvm->nvm_sw_blob); debugfs_create_blob("nvm_calib", 0400, mvm->debugfs_dir, &mvm->nvm_calib_blob); debugfs_create_blob("nvm_prod", 0400, mvm->debugfs_dir, &mvm->nvm_prod_blob); debugfs_create_blob("nvm_phy_sku", 0400, mvm->debugfs_dir, &mvm->nvm_phy_sku_blob); debugfs_create_blob("nvm_reg", S_IRUSR, mvm->debugfs_dir, &mvm->nvm_reg_blob); debugfs_create_file("mem", 0600, mvm->debugfs_dir, mvm, &iwl_dbgfs_mem_ops); #if defined(__linux__) /* * Create a symlink with mac80211. It will be removed when mac80211 * exists (before the opmode exists which removes the target.) */ if (!IS_ERR(mvm->debugfs_dir)) { char buf[100]; snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent); debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf); } #endif } diff --git a/sys/contrib/dev/iwlwifi/mvm/ftm-initiator.c b/sys/contrib/dev/iwlwifi/mvm/ftm-initiator.c index 66140af891d3..bc7451e77f25 100644 --- a/sys/contrib/dev/iwlwifi/mvm/ftm-initiator.c +++ b/sys/contrib/dev/iwlwifi/mvm/ftm-initiator.c @@ -1,1371 +1,1407 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #include #include #include #include "mvm.h" #include "iwl-io.h" #include "iwl-prph.h" #include "constants.h" struct iwl_mvm_loc_entry { struct list_head list; u8 addr[ETH_ALEN]; u8 lci_len, civic_len; u8 buf[]; }; struct iwl_mvm_smooth_entry { struct list_head list; u8 addr[ETH_ALEN]; s64 rtt_avg; u64 host_time; }; +enum iwl_mvm_pasn_flags { + IWL_MVM_PASN_FLAG_HAS_HLTK = BIT(0), +}; + struct iwl_mvm_ftm_pasn_entry { struct list_head list; u8 addr[ETH_ALEN]; u8 hltk[HLTK_11AZ_LEN]; u8 tk[TK_11AZ_LEN]; u8 cipher; u8 tx_pn[IEEE80211_CCMP_PN_LEN]; u8 rx_pn[IEEE80211_CCMP_PN_LEN]; + u32 flags; }; int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr, u32 cipher, u8 *tk, u32 tk_len, u8 *hltk, u32 hltk_len) { struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn), GFP_KERNEL); u32 expected_tk_len; lockdep_assert_held(&mvm->mutex); if (!pasn) return -ENOBUFS; pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher); switch (pasn->cipher) { case IWL_LOCATION_CIPHER_CCMP_128: case IWL_LOCATION_CIPHER_GCMP_128: expected_tk_len = WLAN_KEY_LEN_CCMP; break; case IWL_LOCATION_CIPHER_GCMP_256: expected_tk_len = WLAN_KEY_LEN_GCMP_256; break; default: goto out; } /* * If associated to this AP and already have security context, * the TK is already configured for this station, so it * shouldn't be set again here. */ - if (vif->bss_conf.assoc && - !memcmp(addr, vif->bss_conf.bssid, ETH_ALEN)) { + if (vif->cfg.assoc) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; struct ieee80211_sta *sta; + u8 sta_id; rcu_read_lock(); - sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); - if (!IS_ERR_OR_NULL(sta) && sta->mfp) - expected_tk_len = 0; + for_each_vif_active_link(vif, link_conf, link_id) { + if (memcmp(addr, link_conf->bssid, ETH_ALEN)) + continue; + + sta_id = mvmvif->link[link_id]->ap_sta_id; + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + if (!IS_ERR_OR_NULL(sta) && sta->mfp) + expected_tk_len = 0; + break; + } rcu_read_unlock(); } - if (tk_len != expected_tk_len || hltk_len != sizeof(pasn->hltk)) { + if (tk_len != expected_tk_len || + (hltk_len && hltk_len != sizeof(pasn->hltk))) { IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n", tk_len, hltk_len); goto out; } + if (!expected_tk_len && !hltk_len) { + IWL_ERR(mvm, "TK and HLTK not set\n"); + goto out; + } + memcpy(pasn->addr, addr, sizeof(pasn->addr)); - memcpy(pasn->hltk, hltk, sizeof(pasn->hltk)); + + if (hltk_len) { + memcpy(pasn->hltk, hltk, sizeof(pasn->hltk)); + pasn->flags |= IWL_MVM_PASN_FLAG_HAS_HLTK; + } if (tk && tk_len) memcpy(pasn->tk, tk, sizeof(pasn->tk)); list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list); return 0; out: kfree(pasn); return -EINVAL; } void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr) { struct iwl_mvm_ftm_pasn_entry *entry, *prev; lockdep_assert_held(&mvm->mutex); list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list, list) { if (memcmp(entry->addr, addr, sizeof(entry->addr))) continue; list_del(&entry->list); kfree(entry); return; } } static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm) { struct iwl_mvm_loc_entry *e, *t; mvm->ftm_initiator.req = NULL; mvm->ftm_initiator.req_wdev = NULL; memset(mvm->ftm_initiator.responses, 0, sizeof(mvm->ftm_initiator.responses)); list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) { list_del(&e->list); kfree(e); } } void iwl_mvm_ftm_restart(struct iwl_mvm *mvm) { struct cfg80211_pmsr_result result = { .status = NL80211_PMSR_STATUS_FAILURE, .final = 1, .host_time = ktime_get_boottime_ns(), .type = NL80211_PMSR_TYPE_FTM, }; int i; lockdep_assert_held(&mvm->mutex); if (!mvm->ftm_initiator.req) return; for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) { memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr, ETH_ALEN); result.ftm.burst_index = mvm->ftm_initiator.responses[i]; cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, mvm->ftm_initiator.req, &result, GFP_KERNEL); } cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, mvm->ftm_initiator.req, GFP_KERNEL); iwl_mvm_ftm_reset(mvm); } void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm) { INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp); IWL_DEBUG_INFO(mvm, "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n", IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH, IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA, IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ, IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT, IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT); } void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm) { struct iwl_mvm_smooth_entry *se, *st; list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp, list) { list_del(&se->list); kfree(se); } } static int iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s) { switch (s) { case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS: return 0; case IWL_TOF_RANGE_REQUEST_STATUS_BUSY: return -EBUSY; default: WARN_ON_ONCE(1); return -EIO; } } static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_tof_range_req_cmd_v5 *cmd, struct cfg80211_pmsr_request *req) { int i; cmd->request_id = req->cookie; cmd->num_of_ap = req->n_peers; /* use maximum for "no timeout" or bigger than what we can do */ if (!req->timeout || req->timeout > 255 * 100) cmd->req_timeout = 255; else cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100); /* * We treat it always as random, since if not we'll * have filled our local address there instead. */ cmd->macaddr_random = 1; memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); for (i = 0; i < ETH_ALEN; i++) cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; - if (vif->bss_conf.assoc) + if (vif->cfg.assoc) memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); else eth_broadcast_addr(cmd->range_req_bssid); } static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm, struct ieee80211_vif *vif, #if defined(__linux__) struct iwl_tof_range_req_cmd_v9 *cmd, #elif defined(__FreeBSD__) struct iwl_tof_range_req_cmd_v9 *cmd, /* XXX-BZ Probably better solved by a common struct in fw for top parts of the struct. */ #endif struct cfg80211_pmsr_request *req) { int i; cmd->initiator_flags = cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM | IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT); cmd->request_id = req->cookie; cmd->num_of_ap = req->n_peers; /* * Use a large value for "no timeout". Don't use the maximum value * because of fw limitations. */ if (req->timeout) cmd->req_timeout_ms = cpu_to_le32(req->timeout); else cmd->req_timeout_ms = cpu_to_le32(0xfffff); memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); for (i = 0; i < ETH_ALEN; i++) cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; - if (vif->bss_conf.assoc) { + if (vif->cfg.assoc) { memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); /* AP's TSF is only relevant if associated */ for (i = 0; i < req->n_peers; i++) { if (req->peers[i].report_ap_tsf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); cmd->tsf_mac_id = cpu_to_le32(mvmvif->id); return; } } } else { eth_broadcast_addr(cmd->range_req_bssid); } /* Don't report AP's TSF */ cmd->tsf_mac_id = cpu_to_le32(0xff); } static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_tof_range_req_cmd_v8 *cmd, struct cfg80211_pmsr_request *req) { iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req); } static int iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, u8 *channel, u8 *bandwidth, u8 *ctrl_ch_position) { u32 freq = peer->chandef.chan->center_freq; *channel = ieee80211_frequency_to_channel(freq); switch (peer->chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: *bandwidth = IWL_TOF_BW_20_LEGACY; break; case NL80211_CHAN_WIDTH_20: *bandwidth = IWL_TOF_BW_20_HT; break; case NL80211_CHAN_WIDTH_40: *bandwidth = IWL_TOF_BW_40; break; case NL80211_CHAN_WIDTH_80: *bandwidth = IWL_TOF_BW_80; break; default: IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", peer->chandef.width); return -EINVAL; } *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; return 0; } static int iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, u8 *channel, u8 *format_bw, u8 *ctrl_ch_position) { u32 freq = peer->chandef.chan->center_freq; u8 cmd_ver; *channel = ieee80211_frequency_to_channel(freq); switch (peer->chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY; *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; break; case NL80211_CHAN_WIDTH_20: *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; break; case NL80211_CHAN_WIDTH_40: *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS; break; case NL80211_CHAN_WIDTH_80: *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT; *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS; break; case NL80211_CHAN_WIDTH_160: cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver >= 13) { *format_bw = IWL_LOCATION_FRAME_FORMAT_HE; *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS; break; } fallthrough; default: IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", peer->chandef.width); return -EINVAL; } /* non EDCA based measurement must use HE preamble */ if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) *format_bw |= IWL_LOCATION_FRAME_FORMAT_HE; *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; return 0; } static int iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, struct iwl_tof_range_req_ap_entry_v2 *target) { int ret; ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num, &target->bandwidth, &target->ctrl_ch_position); if (ret) return ret; memcpy(target->bssid, peer->addr, ETH_ALEN); target->burst_period = cpu_to_le16(peer->ftm.burst_period); target->samples_per_burst = peer->ftm.ftms_per_burst; target->num_of_bursts = peer->ftm.num_bursts_exp; target->measure_type = 0; /* regular two-sided FTM */ target->retries_per_sample = peer->ftm.ftmr_retries; target->asap_mode = peer->ftm.asap; target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK; if (peer->ftm.request_lci) target->location_req |= IWL_TOF_LOC_LCI; if (peer->ftm.request_civicloc) target->location_req |= IWL_TOF_LOC_CIVIC; target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO; return 0; } #define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \ cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag)) static void iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, struct iwl_tof_range_req_ap_entry_v6 *target) { memcpy(target->bssid, peer->addr, ETH_ALEN); target->burst_period = cpu_to_le16(peer->ftm.burst_period); target->samples_per_burst = peer->ftm.ftms_per_burst; target->num_of_bursts = peer->ftm.num_bursts_exp; target->ftmr_max_retries = peer->ftm.ftmr_retries; target->initiator_ap_flags = cpu_to_le32(0); if (peer->ftm.asap) FTM_PUT_FLAG(ASAP); if (peer->ftm.request_lci) FTM_PUT_FLAG(LCI_REQUEST); if (peer->ftm.request_civicloc) FTM_PUT_FLAG(CIVIC_REQUEST); if (IWL_MVM_FTM_INITIATOR_DYNACK) FTM_PUT_FLAG(DYN_ACK); if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG) FTM_PUT_FLAG(ALGO_LR); else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT) FTM_PUT_FLAG(ALGO_FFT); if (peer->ftm.trigger_based) FTM_PUT_FLAG(TB); else if (peer->ftm.non_trigger_based) FTM_PUT_FLAG(NON_TB); if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) && peer->ftm.lmr_feedback) FTM_PUT_FLAG(LMR_FEEDBACK); } static int iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, struct iwl_tof_range_req_ap_entry_v3 *target) { int ret; ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num, &target->bandwidth, &target->ctrl_ch_position); if (ret) return ret; /* * Versions 3 and 4 has some common fields, so * iwl_mvm_ftm_put_target_common() can be used for version 7 too. */ iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); return 0; } static int iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, struct iwl_tof_range_req_ap_entry_v4 *target) { int ret; ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, &target->format_bw, &target->ctrl_ch_position); if (ret) return ret; iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); return 0; } static int iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request_peer *peer, struct iwl_tof_range_req_ap_entry_v6 *target) { int ret; ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, &target->format_bw, &target->ctrl_ch_position); if (ret) return ret; iwl_mvm_ftm_put_target_common(mvm, peer, target); - if (vif->bss_conf.assoc && - !memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) { + if (vif->cfg.assoc) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_sta *sta; + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; rcu_read_lock(); + for_each_vif_active_link(vif, link_conf, link_id) { + if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN)) + continue; + + target->sta_id = mvmvif->link[link_id]->ap_sta_id; + sta = rcu_dereference(mvm->fw_id_to_mac_id[target->sta_id]); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + return PTR_ERR_OR_ZERO(sta); + } - sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); - if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based)) - FTM_PUT_FLAG(PMF); - + if (sta->mfp && (peer->ftm.trigger_based || + peer->ftm.non_trigger_based)) + FTM_PUT_FLAG(PMF); + break; + } rcu_read_unlock(); - - target->sta_id = mvmvif->ap_sta_id; } else { target->sta_id = IWL_MVM_INVALID_STA; } /* * TODO: Beacon interval is currently unknown, so use the common value * of 100 TUs. */ target->beacon_interval = cpu_to_le16(100); return 0; } static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd) { u32 status; int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status); if (!err && status) { IWL_ERR(mvm, "FTM range request command failure, status: %u\n", status); err = iwl_ftm_range_request_status_to_err(status); } return err; } static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_req_cmd_v5 cmd_v5; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd_v5, .len[0] = sizeof(cmd_v5), }; u8 i; int err; iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req); for (i = 0; i < cmd_v5.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]); if (err) return err; } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_req_cmd_v7 cmd_v7; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd_v7, .len[0] = sizeof(cmd_v7), }; u8 i; int err; /* * Versions 7 and 8 has the same structure except from the responders * list, so iwl_mvm_ftm_cmd() can be used for version 7 too. */ iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req); for (i = 0; i < cmd_v7.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]); if (err) return err; } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_req_cmd_v8 cmd; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd, .len[0] = sizeof(cmd), }; u8 i; int err; iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req); for (i = 0; i < cmd.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]); if (err) return err; } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_req_cmd_v9 cmd; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd, .len[0] = sizeof(cmd), }; u8 i; int err; iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req); for (i = 0; i < cmd.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i]; err = iwl_mvm_ftm_put_target(mvm, vif, peer, target); if (err) return err; } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } static void iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data) { struct iwl_tof_range_req_ap_entry_v6 *target = data; if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN)) return; WARN_ON(!sta->mfp); if (WARN_ON(key->keylen > sizeof(target->tk))) return; memcpy(target->tk, key->key, key->keylen); target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher); WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID); } static void iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_tof_range_req_ap_entry_v7 *target) { struct iwl_mvm_ftm_pasn_entry *entry; u32 flags = le32_to_cpu(target->initiator_ap_flags); if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB | IWL_INITIATOR_AP_FLAGS_TB))) return; lockdep_assert_held(&mvm->mutex); list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { if (memcmp(entry->addr, target->bssid, sizeof(entry->addr))) continue; target->cipher = entry->cipher; - memcpy(target->hltk, entry->hltk, sizeof(target->hltk)); - if (vif->bss_conf.assoc && + if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK) + memcpy(target->hltk, entry->hltk, sizeof(target->hltk)); + else + memset(target->hltk, 0, sizeof(target->hltk)); + + if (vif->cfg.assoc && !memcmp(vif->bss_conf.bssid, target->bssid, sizeof(target->bssid))) ieee80211_iter_keys(mvm->hw, vif, iter, target); else memcpy(target->tk, entry->tk, sizeof(target->tk)); memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn)); memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn)); target->initiator_ap_flags |= cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED); return; } } static int iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request_peer *peer, struct iwl_tof_range_req_ap_entry_v7 *target) { int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target); if (err) return err; iwl_mvm_ftm_set_secured_ranging(mvm, vif, target); return err; } static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_req_cmd_v11 cmd; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd, .len[0] = sizeof(cmd), }; u8 i; int err; iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); for (i = 0; i < cmd.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i]; err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target); if (err) return err; } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } static void iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm, struct iwl_tof_range_req_ap_entry_v8 *target) { /* Only 2 STS are supported on Tx */ u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 : IWL_MVM_FTM_I2R_MAX_STS; target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS); target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | (i2r_max_sts << IWL_LOCATION_MAX_STS_POS); target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF; target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF; } static int iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request_peer *peer, struct iwl_tof_range_req_ap_entry_v8 *target) { u32 flags; int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target); if (ret) return ret; iwl_mvm_ftm_set_ndp_params(mvm, target); /* * If secure LTF is turned off, replace the flag with PMF only */ flags = le32_to_cpu(target->initiator_ap_flags); if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) && !IWL_MVM_FTM_INITIATOR_SECURE_LTF) { flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; flags |= IWL_INITIATOR_AP_FLAGS_PMF; target->initiator_ap_flags = cpu_to_le32(flags); } return 0; } static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_req_cmd_v12 cmd; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd, .len[0] = sizeof(cmd), }; u8 i; int err; iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); for (i = 0; i < cmd.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i]; err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target); if (err) return err; } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_req_cmd_v13 cmd; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd, .len[0] = sizeof(cmd), }; u8 i; int err; iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); for (i = 0; i < cmd.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i]; err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target); if (err) return err; if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) target->bss_color = peer->ftm.bss_color; if (peer->ftm.non_trigger_based) { target->min_time_between_msr = cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); target->burst_period = cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); } else { target->min_time_between_msr = cpu_to_le16(0); } target->band = iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band); } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); int err; lockdep_assert_held(&mvm->mutex); if (mvm->ftm_initiator.req) return -EBUSY; if (new_api) { u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), IWL_FW_CMD_VER_UNKNOWN); switch (cmd_ver) { case 13: err = iwl_mvm_ftm_start_v13(mvm, vif, req); break; case 12: err = iwl_mvm_ftm_start_v12(mvm, vif, req); break; case 11: err = iwl_mvm_ftm_start_v11(mvm, vif, req); break; case 9: case 10: err = iwl_mvm_ftm_start_v9(mvm, vif, req); break; case 8: err = iwl_mvm_ftm_start_v8(mvm, vif, req); break; default: err = iwl_mvm_ftm_start_v7(mvm, vif, req); break; } } else { err = iwl_mvm_ftm_start_v5(mvm, vif, req); } if (!err) { mvm->ftm_initiator.req = req; mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif); } return err; } void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_abort_cmd cmd = { .request_id = req->cookie, }; lockdep_assert_held(&mvm->mutex); if (req != mvm->ftm_initiator.req) return; iwl_mvm_ftm_reset(mvm); if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD), 0, sizeof(cmd), &cmd)) IWL_ERR(mvm, "failed to abort FTM process\n"); } static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req, const u8 *addr) { int i; for (i = 0; i < req->n_peers; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; if (ether_addr_equal_unaligned(peer->addr, addr)) return i; } return -ENOENT; } static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts) { u32 gp2_ts = le32_to_cpu(fw_gp2_ts); u32 curr_gp2, diff; u64 now_from_boot_ns; iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2, &now_from_boot_ns, NULL); if (curr_gp2 >= gp2_ts) diff = curr_gp2 - gp2_ts; else diff = curr_gp2 + (U32_MAX - gp2_ts + 1); return now_from_boot_ns - (u64)diff * 1000; } static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm, struct cfg80211_pmsr_result *res) { struct iwl_mvm_loc_entry *entry; list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) { if (!ether_addr_equal_unaligned(res->addr, entry->addr)) continue; if (entry->lci_len) { res->ftm.lci_len = entry->lci_len; res->ftm.lci = entry->buf; } if (entry->civic_len) { res->ftm.civicloc_len = entry->civic_len; res->ftm.civicloc = entry->buf + entry->lci_len; } /* we found the entry we needed */ break; } } static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id, u8 num_of_aps) { lockdep_assert_held(&mvm->mutex); if (request_id != (u8)mvm->ftm_initiator.req->cookie) { IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n", request_id, (u8)mvm->ftm_initiator.req->cookie); return -EINVAL; } if (num_of_aps > mvm->ftm_initiator.req->n_peers) { IWL_ERR(mvm, "FTM range response invalid\n"); return -EINVAL; } return 0; } static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm, struct cfg80211_pmsr_result *res) { - struct iwl_mvm_smooth_entry *resp; + struct iwl_mvm_smooth_entry *resp = NULL, *iter; s64 rtt_avg, rtt = res->ftm.rtt_avg; u32 undershoot, overshoot; u8 alpha; - bool found; if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH) return; WARN_ON(rtt < 0); if (res->status != NL80211_PMSR_STATUS_SUCCESS) { IWL_DEBUG_INFO(mvm, ": %pM: ignore failed measurement. Status=%u\n", res->addr, res->status); return; } - found = false; - list_for_each_entry(resp, &mvm->ftm_initiator.smooth.resp, list) { - if (!memcmp(res->addr, resp->addr, ETH_ALEN)) { - found = true; + list_for_each_entry(iter, &mvm->ftm_initiator.smooth.resp, list) { + if (!memcmp(res->addr, iter->addr, ETH_ALEN)) { + resp = iter; break; } } - if (!found) { + if (!resp) { resp = kzalloc(sizeof(*resp), GFP_KERNEL); if (!resp) return; memcpy(resp->addr, res->addr, ETH_ALEN); list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp); resp->rtt_avg = rtt; IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n", resp->addr, resp->rtt_avg); goto update_time; } if (res->host_time - resp->host_time > IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) { resp->rtt_avg = rtt; IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n", resp->addr, resp->rtt_avg); goto update_time; } /* Smooth the results based on the tracked RTT average */ undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT; overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT; alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA; rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100); IWL_DEBUG_INFO(mvm, "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n", resp->addr, resp->rtt_avg, rtt_avg, rtt); /* * update the responder's average RTT results regardless of * the under/over shoot logic below */ resp->rtt_avg = rtt_avg; /* smooth the results */ if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) { res->ftm.rtt_avg = rtt_avg; IWL_DEBUG_INFO(mvm, "undershoot: val=%lld\n", (rtt_avg - rtt)); } else if (rtt_avg < rtt && (rtt - rtt_avg) > overshoot) { res->ftm.rtt_avg = rtt_avg; IWL_DEBUG_INFO(mvm, "overshoot: val=%lld\n", (rtt - rtt_avg)); } update_time: resp->host_time = res->host_time; } static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, struct cfg80211_pmsr_result *res) { s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666); IWL_DEBUG_INFO(mvm, "entry %d\n", index); IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr); IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time); - IWL_DEBUG_INFO(mvm, "\tburst index: %hhu\n", res->ftm.burst_index); + IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index); IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes); IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg); - IWL_DEBUG_INFO(mvm, "\trssi spread: %hhu\n", res->ftm.rssi_spread); + IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread); IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg); IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance); IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread); IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg); } static void iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm, struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap) { struct iwl_mvm_ftm_pasn_entry *entry; lockdep_assert_held(&mvm->mutex); list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr))) continue; memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn)); memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn)); return; } } static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm) { if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ)) return 5; /* Starting from version 8, the FW advertises the version */ if (mvm->cmd_ver.range_resp >= 8) return mvm->cmd_ver.range_resp; else if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) return 7; /* The first version of the new range request API */ return 6; } static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len) { switch (ver) { case 9: case 8: return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8); case 7: return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7); case 6: return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6); case 5: return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5); default: WARN_ONCE(1, "FTM: unsupported range response version %u", ver); return false; } } void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data; struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data; struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data; struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data; int i; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); u8 num_of_aps, last_in_batch; u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm); lockdep_assert_held(&mvm->mutex); if (!mvm->ftm_initiator.req) { return; } if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len))) return; if (new_api) { if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id, fw_resp_v8->num_of_aps)) return; num_of_aps = fw_resp_v8->num_of_aps; last_in_batch = fw_resp_v8->last_report; } else { if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id, fw_resp_v5->num_of_aps)) return; num_of_aps = fw_resp_v5->num_of_aps; last_in_batch = fw_resp_v5->last_in_batch; } IWL_DEBUG_INFO(mvm, "Range response received\n"); - IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %hhu\n", + IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n", mvm->ftm_initiator.req->cookie, num_of_aps); for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) { struct cfg80211_pmsr_result result = {}; struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap; int peer_idx; if (new_api) { if (notif_ver >= 8) { fw_ap = &fw_resp_v8->ap[i]; iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap); } else if (notif_ver == 7) { fw_ap = (void *)&fw_resp_v7->ap[i]; } else { fw_ap = (void *)&fw_resp_v6->ap[i]; } result.final = fw_ap->last_burst; result.ap_tsf = le32_to_cpu(fw_ap->start_tsf); result.ap_tsf_valid = 1; } else { /* the first part is the same for old and new APIs */ fw_ap = (void *)&fw_resp_v5->ap[i]; /* * FIXME: the firmware needs to report this, we don't * even know the number of bursts the responder picked * (if we asked it to) */ result.final = 0; } peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req, fw_ap->bssid); if (peer_idx < 0) { IWL_WARN(mvm, "Unknown address (%pM, target #%d) in FTM response\n", fw_ap->bssid, i); continue; } switch (fw_ap->measure_status) { case IWL_TOF_ENTRY_SUCCESS: result.status = NL80211_PMSR_STATUS_SUCCESS; break; case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: result.status = NL80211_PMSR_STATUS_TIMEOUT; break; case IWL_TOF_ENTRY_NO_RESPONSE: result.status = NL80211_PMSR_STATUS_FAILURE; result.ftm.failure_reason = NL80211_PMSR_FTM_FAILURE_NO_RESPONSE; break; case IWL_TOF_ENTRY_REQUEST_REJECTED: result.status = NL80211_PMSR_STATUS_FAILURE; result.ftm.failure_reason = NL80211_PMSR_FTM_FAILURE_PEER_BUSY; result.ftm.busy_retry_time = fw_ap->refusal_period; break; default: result.status = NL80211_PMSR_STATUS_FAILURE; result.ftm.failure_reason = NL80211_PMSR_FTM_FAILURE_UNSPECIFIED; break; } memcpy(result.addr, fw_ap->bssid, ETH_ALEN); result.host_time = iwl_mvm_ftm_get_host_time(mvm, fw_ap->timestamp); result.type = NL80211_PMSR_TYPE_FTM; result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx]; mvm->ftm_initiator.responses[peer_idx]++; result.ftm.rssi_avg = fw_ap->rssi; result.ftm.rssi_avg_valid = 1; result.ftm.rssi_spread = fw_ap->rssi_spread; result.ftm.rssi_spread_valid = 1; result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt); result.ftm.rtt_avg_valid = 1; result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance); result.ftm.rtt_variance_valid = 1; result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread); result.ftm.rtt_spread_valid = 1; iwl_mvm_ftm_get_lci_civic(mvm, &result); iwl_mvm_ftm_rtt_smoothing(mvm, &result); cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, mvm->ftm_initiator.req, &result, GFP_KERNEL); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) - IWL_DEBUG_INFO(mvm, "RTT confidence: %hhu\n", + IWL_DEBUG_INFO(mvm, "RTT confidence: %u\n", fw_ap->rttConfidence); iwl_mvm_debug_range_resp(mvm, i, &result); } if (last_in_batch) { cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, mvm->ftm_initiator.req, GFP_KERNEL); iwl_mvm_ftm_reset(mvm); } } void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); const struct ieee80211_mgmt *mgmt = (void *)pkt->data; size_t len = iwl_rx_packet_payload_len(pkt); struct iwl_mvm_loc_entry *entry; const u8 *ies, *lci, *civic, *msr_ie; size_t ies_len, lci_len = 0, civic_len = 0; size_t baselen = IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.ftm); static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI; static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC; if (len <= baselen) return; lockdep_assert_held(&mvm->mutex); ies = mgmt->u.action.u.ftm.variable; ies_len = len - baselen; msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, &rprt_type_lci, 1, 4); if (msr_ie) { lci = msr_ie + 2; lci_len = msr_ie[1]; } msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, &rprt_type_civic, 1, 4); if (msr_ie) { civic = msr_ie + 2; civic_len = msr_ie[1]; } entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL); if (!entry) return; memcpy(entry->addr, mgmt->bssid, ETH_ALEN); entry->lci_len = lci_len; if (lci_len) memcpy(entry->buf, lci, lci_len); entry->civic_len = civic_len; if (civic_len) memcpy(entry->buf + lci_len, civic, civic_len); list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list); } diff --git a/sys/contrib/dev/iwlwifi/mvm/ftm-responder.c b/sys/contrib/dev/iwlwifi/mvm/ftm-responder.c index 4cb15046ec5c..769a344ced8e 100644 --- a/sys/contrib/dev/iwlwifi/mvm/ftm-responder.c +++ b/sys/contrib/dev/iwlwifi/mvm/ftm-responder.c @@ -1,481 +1,495 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #include #include #include "mvm.h" #include "constants.h" struct iwl_mvm_pasn_sta { struct list_head list; struct iwl_mvm_int_sta int_sta; u8 addr[ETH_ALEN]; }; struct iwl_mvm_pasn_hltk_data { u8 *addr; u8 cipher; u8 *hltk; }; static int iwl_mvm_ftm_responder_set_bw_v1(struct cfg80211_chan_def *chandef, u8 *bw, u8 *ctrl_ch_position) { switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: *bw = IWL_TOF_BW_20_LEGACY; break; case NL80211_CHAN_WIDTH_20: *bw = IWL_TOF_BW_20_HT; break; case NL80211_CHAN_WIDTH_40: *bw = IWL_TOF_BW_40; *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); break; case NL80211_CHAN_WIDTH_80: *bw = IWL_TOF_BW_80; *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); break; default: return -ENOTSUPP; } return 0; } static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef, u8 *format_bw, u8 *ctrl_ch_position, u8 cmd_ver) { switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY; *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; break; case NL80211_CHAN_WIDTH_20: *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; break; case NL80211_CHAN_WIDTH_40: *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS; *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); break; case NL80211_CHAN_WIDTH_80: *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT; *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS; *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); break; case NL80211_CHAN_WIDTH_160: if (cmd_ver >= 9) { *format_bw = IWL_LOCATION_FRAME_FORMAT_HE; *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS; *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); break; } fallthrough; default: return -ENOTSUPP; } return 0; } static void iwl_mvm_ftm_responder_set_ndp(struct iwl_mvm *mvm, struct iwl_tof_responder_config_cmd_v9 *cmd) { /* Up to 2 R2I STS are allowed on the responder */ u32 r2i_max_sts = IWL_MVM_FTM_R2I_MAX_STS < 2 ? IWL_MVM_FTM_R2I_MAX_STS : 1; cmd->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | (r2i_max_sts << IWL_RESPONDER_STS_POS) | (IWL_MVM_FTM_R2I_MAX_TOTAL_LTF << IWL_RESPONDER_TOTAL_LTF_POS); cmd->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | (IWL_MVM_FTM_I2R_MAX_STS << IWL_RESPONDER_STS_POS) | (IWL_MVM_FTM_I2R_MAX_TOTAL_LTF << IWL_RESPONDER_TOTAL_LTF_POS); cmd->cmd_valid_fields |= cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS); } static int iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct cfg80211_chan_def *chandef) + struct cfg80211_chan_def *chandef, + struct ieee80211_bss_conf *link_conf) { u32 cmd_id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_CONFIG_CMD); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); /* * The command structure is the same for versions 6, 7 and 8 (only the * field interpretation is different), so the same struct can be use * for all cases. */ struct iwl_tof_responder_config_cmd_v9 cmd = { .channel_num = chandef->chan->hw_value, .cmd_valid_fields = cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO | IWL_TOF_RESPONDER_CMD_VALID_BSSID | IWL_TOF_RESPONDER_CMD_VALID_STA_ID), - .sta_id = mvmvif->bcast_sta.sta_id, + .sta_id = mvmvif->link[link_conf->link_id]->bcast_sta.sta_id, }; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 6); int err; int cmd_size; lockdep_assert_held(&mvm->mutex); /* Use a default of bss_color=1 for now */ if (cmd_ver == 9) { cmd.cmd_valid_fields |= cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR | IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR); cmd.bss_color = 1; cmd.min_time_between_msr = cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); cmd.max_time_between_msr = cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); cmd_size = sizeof(struct iwl_tof_responder_config_cmd_v9); } else { /* All versions up to version 8 have the same size */ cmd_size = sizeof(struct iwl_tof_responder_config_cmd_v8); } if (cmd_ver >= 8) iwl_mvm_ftm_responder_set_ndp(mvm, &cmd); if (cmd_ver >= 7) err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw, &cmd.ctrl_ch_position, cmd_ver); else err = iwl_mvm_ftm_responder_set_bw_v1(chandef, &cmd.format_bw, &cmd.ctrl_ch_position); if (err) { IWL_ERR(mvm, "Failed to set responder bandwidth\n"); return err; } memcpy(cmd.bssid, vif->addr, ETH_ALEN); return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd); } static int iwl_mvm_ftm_responder_dyn_cfg_v2(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_ftm_responder_params *params) { struct iwl_tof_responder_dyn_config_cmd_v2 cmd = { .lci_len = cpu_to_le32(params->lci_len + 2), .civic_len = cpu_to_le32(params->civicloc_len + 2), }; u8 data[IWL_LCI_CIVIC_IE_MAX_SIZE] = {0}; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD), .data[0] = &cmd, .len[0] = sizeof(cmd), .data[1] = &data, /* .len[1] set later */ /* may not be able to DMA from stack */ .dataflags[1] = IWL_HCMD_DFL_DUP, }; u32 aligned_lci_len = ALIGN(params->lci_len + 2, 4); u32 aligned_civicloc_len = ALIGN(params->civicloc_len + 2, 4); u8 *pos = data; lockdep_assert_held(&mvm->mutex); if (aligned_lci_len + aligned_civicloc_len > sizeof(data)) { IWL_ERR(mvm, "LCI/civicloc data too big (%zd + %zd)\n", params->lci_len, params->civicloc_len); return -ENOBUFS; } pos[0] = WLAN_EID_MEASURE_REPORT; pos[1] = params->lci_len; memcpy(pos + 2, params->lci, params->lci_len); pos += aligned_lci_len; pos[0] = WLAN_EID_MEASURE_REPORT; pos[1] = params->civicloc_len; memcpy(pos + 2, params->civicloc, params->civicloc_len); hcmd.len[1] = aligned_lci_len + aligned_civicloc_len; return iwl_mvm_send_cmd(mvm, &hcmd); } static int iwl_mvm_ftm_responder_dyn_cfg_v3(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_ftm_responder_params *params, struct iwl_mvm_pasn_hltk_data *hltk_data) { struct iwl_tof_responder_dyn_config_cmd cmd; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD), .data[0] = &cmd, .len[0] = sizeof(cmd), /* may not be able to DMA from stack */ .dataflags[0] = IWL_HCMD_DFL_DUP, }; lockdep_assert_held(&mvm->mutex); cmd.valid_flags = 0; if (params) { if (params->lci_len + 2 > sizeof(cmd.lci_buf) || params->civicloc_len + 2 > sizeof(cmd.civic_buf)) { IWL_ERR(mvm, "LCI/civic data too big (lci=%zd, civic=%zd)\n", params->lci_len, params->civicloc_len); return -ENOBUFS; } cmd.lci_buf[0] = WLAN_EID_MEASURE_REPORT; cmd.lci_buf[1] = params->lci_len; memcpy(cmd.lci_buf + 2, params->lci, params->lci_len); cmd.lci_len = params->lci_len + 2; cmd.civic_buf[0] = WLAN_EID_MEASURE_REPORT; cmd.civic_buf[1] = params->civicloc_len; memcpy(cmd.civic_buf + 2, params->civicloc, params->civicloc_len); cmd.civic_len = params->civicloc_len + 2; cmd.valid_flags |= IWL_RESPONDER_DYN_CFG_VALID_LCI | IWL_RESPONDER_DYN_CFG_VALID_CIVIC; } if (hltk_data) { if (hltk_data->cipher > IWL_LOCATION_CIPHER_GCMP_256) { IWL_ERR(mvm, "invalid cipher: %u\n", hltk_data->cipher); return -EINVAL; } cmd.cipher = hltk_data->cipher; memcpy(cmd.addr, hltk_data->addr, sizeof(cmd.addr)); memcpy(cmd.hltk_buf, hltk_data->hltk, sizeof(cmd.hltk_buf)); cmd.valid_flags |= IWL_RESPONDER_DYN_CFG_VALID_PASN_STA; } return iwl_mvm_send_cmd(mvm, &hcmd); } static int iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_ftm_responder_params *params) { int ret; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD), 2); switch (cmd_ver) { case 2: ret = iwl_mvm_ftm_responder_dyn_cfg_v2(mvm, vif, params); break; case 3: ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif, params, NULL); break; default: IWL_ERR(mvm, "Unsupported DYN_CONFIG_CMD version %u\n", cmd_ver); ret = -ENOTSUPP; } return ret; } static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_pasn_sta *sta) { list_del(&sta->list); iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id); iwl_mvm_dealloc_int_sta(mvm, &sta->int_sta); kfree(sta); } #if defined(__linux__) int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr, u32 cipher, u8 *tk, u32 tk_len, u8 *hltk, u32 hltk_len) { int ret; struct iwl_mvm_pasn_sta *sta = NULL; struct iwl_mvm_pasn_hltk_data hltk_data = { .addr = addr, .hltk = hltk, }; + struct iwl_mvm_pasn_hltk_data *hltk_data_ptr = NULL; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD), 2); lockdep_assert_held(&mvm->mutex); if (cmd_ver < 3) { IWL_ERR(mvm, "Adding PASN station not supported by FW\n"); return -ENOTSUPP; } - hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher); - if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) { - IWL_ERR(mvm, "invalid cipher: %u\n", cipher); + if ((!hltk || !hltk_len) && (!tk || !tk_len)) { + IWL_ERR(mvm, "TK and HLTK not set\n"); return -EINVAL; } + if (hltk && hltk_len) { + hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher); + if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) { + IWL_ERR(mvm, "invalid cipher: %u\n", cipher); + return -EINVAL; + } + + hltk_data_ptr = &hltk_data; + } + if (tk && tk_len) { sta = kzalloc(sizeof(*sta), GFP_KERNEL); if (!sta) return -ENOBUFS; ret = iwl_mvm_add_pasn_sta(mvm, vif, &sta->int_sta, addr, cipher, tk, tk_len); if (ret) { kfree(sta); return ret; } memcpy(sta->addr, addr, ETH_ALEN); list_add_tail(&sta->list, &mvm->resp_pasn_list); } - ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif, NULL, &hltk_data); + ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif, NULL, hltk_data_ptr); if (ret && sta) iwl_mvm_resp_del_pasn_sta(mvm, vif, sta); return ret; } int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr) { struct iwl_mvm_pasn_sta *sta, *prev; lockdep_assert_held(&mvm->mutex); list_for_each_entry_safe(sta, prev, &mvm->resp_pasn_list, list) { if (!memcmp(sta->addr, addr, ETH_ALEN)) { iwl_mvm_resp_del_pasn_sta(mvm, vif, sta); return 0; } } IWL_ERR(mvm, "FTM: PASN station %pM not found\n", addr); return -EINVAL; } #endif -int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_ftm_responder_params *params; struct ieee80211_chanctx_conf ctx, *pctx; u16 *phy_ctxt_id; struct iwl_mvm_phy_ctxt *phy_ctxt; int ret; - params = vif->bss_conf.ftmr_params; + params = bss_conf->ftmr_params; lockdep_assert_held(&mvm->mutex); - if (WARN_ON_ONCE(!vif->bss_conf.ftm_responder)) + if (WARN_ON_ONCE(!bss_conf->ftm_responder)) return -EINVAL; if (vif->p2p || vif->type != NL80211_IFTYPE_AP || !mvmvif->ap_ibss_active) { IWL_ERR(mvm, "Cannot start responder, not in AP mode\n"); return -EIO; } rcu_read_lock(); - pctx = rcu_dereference(vif->chanctx_conf); + pctx = rcu_dereference(bss_conf->chanctx_conf); /* Copy the ctx to unlock the rcu and send the phy ctxt. We don't care * about changes in the ctx after releasing the lock because the driver * is still protected by the mutex. */ ctx = *pctx; phy_ctxt_id = (u16 *)pctx->drv_priv; rcu_read_unlock(); phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def, ctx.rx_chains_static, ctx.rx_chains_dynamic); if (ret) return ret; - ret = iwl_mvm_ftm_responder_cmd(mvm, vif, &ctx.def); + ret = iwl_mvm_ftm_responder_cmd(mvm, vif, &ctx.def, bss_conf); if (ret) return ret; if (params) ret = iwl_mvm_ftm_responder_dyn_cfg_cmd(mvm, vif, params); return ret; } void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_pasn_sta *sta, *prev; lockdep_assert_held(&mvm->mutex); list_for_each_entry_safe(sta, prev, &mvm->resp_pasn_list, list) iwl_mvm_resp_del_pasn_sta(mvm, vif, sta); } void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf) { - if (!vif->bss_conf.ftm_responder) + if (!bss_conf->ftm_responder) return; iwl_mvm_ftm_responder_clear(mvm, vif); - iwl_mvm_ftm_start_responder(mvm, vif); + iwl_mvm_ftm_start_responder(mvm, vif, bss_conf); } void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_ftm_responder_stats *resp = (void *)pkt->data; struct cfg80211_ftm_responder_stats *stats = &mvm->ftm_resp_stats; u32 flags = le32_to_cpu(resp->flags); if (resp->success_ftm == resp->ftm_per_burst) stats->success_num++; else if (resp->success_ftm >= 2) stats->partial_num++; else stats->failed_num++; if ((flags & FTM_RESP_STAT_ASAP_REQ) && (flags & FTM_RESP_STAT_ASAP_RESP)) stats->asap_num++; if (flags & FTM_RESP_STAT_NON_ASAP_RESP) stats->non_asap_num++; stats->total_duration_ms += le32_to_cpu(resp->duration) / USEC_PER_MSEC; if (flags & FTM_RESP_STAT_TRIGGER_UNKNOWN) stats->unknown_triggers_num++; if (flags & FTM_RESP_STAT_DUP) stats->reschedule_requests_num++; if (flags & FTM_RESP_STAT_NON_ASAP_OUT_WIN) stats->out_of_window_triggers_num++; } diff --git a/sys/contrib/dev/iwlwifi/mvm/fw.c b/sys/contrib/dev/iwlwifi/mvm/fw.c index 766bd7c7f9cf..1d5ee4330f29 100644 --- a/sys/contrib/dev/iwlwifi/mvm/fw.c +++ b/sys/contrib/dev/iwlwifi/mvm/fw.c @@ -1,1743 +1,1818 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include "iwl-trans.h" #include "iwl-op-mode.h" #include "fw/img.h" #include "iwl-debug.h" #include "iwl-prph.h" #include "fw/acpi.h" #include "fw/pnvm.h" #include "mvm.h" #include "fw/dbg.h" #include "iwl-phy-db.h" #include "iwl-modparams.h" #include "iwl-nvm-parse.h" +#include "time-sync.h" #define MVM_UCODE_ALIVE_TIMEOUT (HZ) #define MVM_UCODE_CALIB_TIMEOUT (2 * HZ) #define IWL_TAS_US_MCC 0x5553 #define IWL_TAS_CANADA_MCC 0x4341 struct iwl_mvm_alive_data { bool valid; u32 scd_base_addr; }; static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) { struct iwl_tx_ant_cfg_cmd tx_ant_cmd = { .valid = cpu_to_le32(valid_tx_ant), }; IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant); return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0, sizeof(tx_ant_cmd), &tx_ant_cmd); } static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) { int i; struct iwl_rss_config_cmd cmd = { .flags = cpu_to_le32(IWL_RSS_ENABLE), .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) | BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) | BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) | BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) | BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) | BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD), }; if (mvm->trans->num_rx_queues == 1) return 0; /* Do not direct RSS traffic to Q 0 which is our fallback queue */ for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) cmd.indirection_table[i] = 1 + (i % (mvm->trans->num_rx_queues - 1)); netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); } static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) { struct iwl_dqa_enable_cmd dqa_cmd = { .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE), }; u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, DQA_ENABLE_CMD); int ret; ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd); if (ret) IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret); else IWL_DEBUG_FW(mvm, "Working in DQA mode\n"); return ret; } void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data; __le32 *dump_data = mfu_dump_notif->data; int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32); int i; if (mfu_dump_notif->index_num == 0) IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n", le32_to_cpu(mfu_dump_notif->assert_id)); for (i = 0; i < n_words; i++) IWL_DEBUG_INFO(mvm, "MFUART assert dump, dword %u: 0x%08x\n", le16_to_cpu(mfu_dump_notif->index_num) * n_words + i, le32_to_cpu(dump_data[i])); } static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_mvm_alive_data *alive_data = data; struct iwl_umac_alive *umac; struct iwl_lmac_alive *lmac1; struct iwl_lmac_alive *lmac2 = NULL; u16 status; u32 lmac_error_event_table, umac_error_table; u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, UCODE_ALIVE_NTFY, 0); u32 i; + if (version == 6) { struct iwl_alive_ntf_v6 *palive; if (pkt_len < sizeof(*palive)) return false; palive = (void *)pkt->data; mvm->trans->dbg.imr_data.imr_enable = le32_to_cpu(palive->imr.enabled); mvm->trans->dbg.imr_data.imr_size = le32_to_cpu(palive->imr.size); mvm->trans->dbg.imr_data.imr2sram_remainbyte = mvm->trans->dbg.imr_data.imr_size; mvm->trans->dbg.imr_data.imr_base_addr = palive->imr.base_addr; mvm->trans->dbg.imr_data.imr_curr_addr = le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr); IWL_DEBUG_FW(mvm, "IMR Enabled: 0x0%x size 0x0%x Address 0x%016llx\n", mvm->trans->dbg.imr_data.imr_enable, mvm->trans->dbg.imr_data.imr_size, le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr)); if (!mvm->trans->dbg.imr_data.imr_enable) { for (i = 0; i < ARRAY_SIZE(mvm->trans->dbg.active_regions); i++) { struct iwl_ucode_tlv *reg_tlv; struct iwl_fw_ini_region_tlv *reg; reg_tlv = mvm->trans->dbg.active_regions[i]; if (!reg_tlv) continue; reg = (void *)reg_tlv->data; /* * We have only one DRAM IMR region, so we * can break as soon as we find the first * one. */ if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) { mvm->trans->dbg.unsupported_region_msk |= BIT(i); break; } } } } if (version >= 5) { struct iwl_alive_ntf_v5 *palive; if (pkt_len < sizeof(*palive)) return false; palive = (void *)pkt->data; umac = &palive->umac_data; lmac1 = &palive->lmac_data[0]; lmac2 = &palive->lmac_data[1]; status = le16_to_cpu(palive->status); mvm->trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]); mvm->trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]); mvm->trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]); IWL_DEBUG_FW(mvm, "Got sku_id: 0x0%x 0x0%x 0x0%x\n", mvm->trans->sku_id[0], mvm->trans->sku_id[1], mvm->trans->sku_id[2]); } else if (iwl_rx_packet_payload_len(pkt) == sizeof(struct iwl_alive_ntf_v4)) { struct iwl_alive_ntf_v4 *palive; if (pkt_len < sizeof(*palive)) return false; palive = (void *)pkt->data; umac = &palive->umac_data; lmac1 = &palive->lmac_data[0]; lmac2 = &palive->lmac_data[1]; status = le16_to_cpu(palive->status); } else if (iwl_rx_packet_payload_len(pkt) == sizeof(struct iwl_alive_ntf_v3)) { struct iwl_alive_ntf_v3 *palive3; if (pkt_len < sizeof(*palive3)) return false; palive3 = (void *)pkt->data; umac = &palive3->umac_data; lmac1 = &palive3->lmac_data; status = le16_to_cpu(palive3->status); } else { WARN(1, "unsupported alive notification (size %d)\n", iwl_rx_packet_payload_len(pkt)); /* get timeout later */ return false; } lmac_error_event_table = le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr); iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table); if (lmac2) mvm->trans->dbg.lmac_error_event_table[1] = le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr); umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) & ~FW_ADDR_CACHE_CONTROL; if (umac_error_table) { if (umac_error_table >= mvm->trans->cfg->min_umac_error_event_table) { iwl_fw_umac_set_alive_err_table(mvm->trans, umac_error_table); } else { IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", umac_error_table, (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ? "Init" : "RT"); } } alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr); alive_data->valid = status == IWL_ALIVE_STATUS_OK; IWL_DEBUG_FW(mvm, "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", status, lmac1->ver_type, lmac1->ver_subtype); if (lmac2) IWL_DEBUG_FW(mvm, "Alive ucode CDB\n"); IWL_DEBUG_FW(mvm, "UMAC version: Major - 0x%x, Minor - 0x%x\n", le32_to_cpu(umac->umac_major), le32_to_cpu(umac->umac_minor)); iwl_fwrt_update_fw_versions(&mvm->fwrt, lmac1, umac); return true; } static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); return true; } static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_phy_db *phy_db = data; if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) { WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); return true; } WARN_ON(iwl_phy_db_set_section(phy_db, pkt)); return false; } static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm) { +#define IWL_FW_PRINT_REG_INFO(reg_name) \ + IWL_ERR(mvm, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name)) + struct iwl_trans *trans = mvm->trans; enum iwl_device_family device_family = trans->trans_cfg->device_family; if (device_family < IWL_DEVICE_FAMILY_8000) return; if (device_family <= IWL_DEVICE_FAMILY_9000) - IWL_ERR(mvm, "WFPM_ARC1_PD_NOTIFICATION: 0x%x\n", - iwl_read_umac_prph(trans, WFPM_ARC1_PD_NOTIFICATION)); + IWL_FW_PRINT_REG_INFO(WFPM_ARC1_PD_NOTIFICATION); else - IWL_ERR(mvm, "WFPM_LMAC1_PD_NOTIFICATION: 0x%x\n", - iwl_read_umac_prph(trans, WFPM_LMAC1_PD_NOTIFICATION)); + IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION); - IWL_ERR(mvm, "HPM_SECONDARY_DEVICE_STATE: 0x%x\n", - iwl_read_umac_prph(trans, HPM_SECONDARY_DEVICE_STATE)); + IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE); + /* print OPT info */ + IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR); + IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA); } static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) { struct iwl_notification_wait alive_wait; struct iwl_mvm_alive_data alive_data = {}; const struct fw_img *fw; int ret; enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY }; bool run_in_rfkill = ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm); + u8 count; + struct iwl_pc_data *pc_data; if (ucode_type == IWL_UCODE_REGULAR && iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && !(fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED))) fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER); else fw = iwl_get_ucode_image(mvm->fw, ucode_type); if (WARN_ON(!fw)) return -EINVAL; iwl_fw_set_current_image(&mvm->fwrt, ucode_type); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); iwl_init_notification_wait(&mvm->notif_wait, &alive_wait, alive_cmd, ARRAY_SIZE(alive_cmd), iwl_alive_fn, &alive_data); /* * We want to load the INIT firmware even in RFKILL * For the unified firmware case, the ucode_type is not * INIT, but we still need to run it. */ ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill); if (ret) { iwl_fw_set_current_image(&mvm->fwrt, old_type); iwl_remove_notification(&mvm->notif_wait, &alive_wait); return ret; } /* * Some things may run in the background now, but we * just wait for the ALIVE notification here. */ ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, MVM_UCODE_ALIVE_TIMEOUT); + + if (mvm->trans->trans_cfg->device_family == + IWL_DEVICE_FAMILY_AX210) { + /* print these registers regardless of alive fail/success */ + IWL_INFO(mvm, "WFPM_UMAC_PD_NOTIFICATION: 0x%x\n", + iwl_read_umac_prph(mvm->trans, WFPM_ARC1_PD_NOTIFICATION)); + IWL_INFO(mvm, "WFPM_LMAC2_PD_NOTIFICATION: 0x%x\n", + iwl_read_umac_prph(mvm->trans, WFPM_LMAC2_PD_NOTIFICATION)); + IWL_INFO(mvm, "WFPM_AUTH_KEY_0: 0x%x\n", + iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG)); + IWL_INFO(mvm, "CNVI_SCU_SEQ_DATA_DW9: 0x%x\n", + iwl_read_prph(mvm->trans, CNVI_SCU_SEQ_DATA_DW9)); + } + if (ret) { struct iwl_trans *trans = mvm->trans; /* SecBoot info */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS), iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS)); } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) { IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", iwl_read_prph(trans, SB_CPU_1_STATUS), iwl_read_prph(trans, SB_CPU_2_STATUS)); } iwl_mvm_print_pd_notification(mvm); /* LMAC/UMAC PC info */ if (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_22000) { + pc_data = trans->dbg.pc_data; + for (count = 0; count < trans->dbg.num_pc; + count++, pc_data++) + IWL_ERR(mvm, "%s: 0x%x\n", + pc_data->pc_name, + pc_data->pc_address); + } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { IWL_ERR(mvm, "UMAC PC: 0x%x\n", iwl_read_umac_prph(trans, UREG_UMAC_CURRENT_PC)); IWL_ERR(mvm, "LMAC PC: 0x%x\n", iwl_read_umac_prph(trans, UREG_LMAC1_CURRENT_PC)); if (iwl_mvm_is_cdb_supported(mvm)) IWL_ERR(mvm, "LMAC2 PC: 0x%x\n", iwl_read_umac_prph(trans, UREG_LMAC2_CURRENT_PC)); } - if (ret == -ETIMEDOUT) + if (ret == -ETIMEDOUT && !mvm->pldr_sync) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_ALIVE_TIMEOUT); iwl_fw_set_current_image(&mvm->fwrt, old_type); return ret; } if (!alive_data.valid) { IWL_ERR(mvm, "Loaded ucode is not valid!\n"); iwl_fw_set_current_image(&mvm->fwrt, old_type); return -EIO; } - ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait); + /* if reached this point, Alive notification was received */ + iwl_mei_alive_notif(true); + + ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait, + &mvm->fw->ucode_capa); if (ret) { IWL_ERR(mvm, "Timeout waiting for PNVM load!\n"); iwl_fw_set_current_image(&mvm->fwrt, old_type); return ret; } iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); /* * Note: all the queues are enabled as part of the interface * initialization, but in firmware restart scenarios they * could be stopped, so wake them up. In firmware restart, * mac80211 will have the queues stopped as well until the * reconfiguration completes. During normal startup, they * will be empty. */ memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); /* * Set a 'fake' TID for the command queue, since we use the * hweight() of the tid_bitmap as a refcount now. Not that * we ever even consider the command queue as one we might * want to reuse, but be safe nevertheless. */ mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap = BIT(IWL_MAX_TID_COUNT + 2); set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_fw_set_dbg_rec_on(&mvm->fwrt); #endif /* * All the BSSes in the BSS table include the GP2 in the system * at the beacon Rx time, this is of course no longer relevant * since we are resetting the firmware. * Purge all the BSS table. */ cfg80211_bss_flush(mvm->hw->wiphy); return 0; } +static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, + struct iwl_phy_specific_cfg *phy_filters) +{ +#ifdef CONFIG_ACPI + *phy_filters = mvm->phy_filters; +#endif /* CONFIG_ACPI */ +} + +#if defined(CONFIG_ACPI) && defined(CONFIG_EFI) +static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) +{ + u8 cmd_ver; + int ret; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, + SAR_OFFSET_MAPPING_TABLE_CMD), + .flags = 0, + .data[0] = &mvm->fwrt.sgom_table, + .len[0] = sizeof(mvm->fwrt.sgom_table), + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + + if (!mvm->fwrt.sgom_enabled) { + IWL_DEBUG_RADIO(mvm, "SGOM table is disabled\n"); + return 0; + } + + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, + IWL_FW_CMD_VER_UNKNOWN); + + if (cmd_ver != 2) { + IWL_DEBUG_RADIO(mvm, "command version is unsupported. version = %d\n", + cmd_ver); + return 0; + } + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret < 0) + IWL_ERR(mvm, "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret); + + return ret; +} +#else + +static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) +{ + return 0; +} +#endif + +static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) +{ + u32 cmd_id = PHY_CONFIGURATION_CMD; + struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd; + enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img; + u8 cmd_ver; + size_t cmd_size; + + if (iwl_mvm_has_unified_ucode(mvm) && + !mvm->trans->cfg->tx_with_siso_diversity) + return 0; + + if (mvm->trans->cfg->tx_with_siso_diversity) { + /* + * TODO: currently we don't set the antenna but letting the NIC + * to decide which antenna to use. This should come from BIOS. + */ + phy_cfg_cmd.phy_cfg = + cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED); + } + + /* Set parameters */ + phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); + + /* set flags extra PHY configuration flags from the device's cfg */ + phy_cfg_cmd.phy_cfg |= + cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags); + + phy_cfg_cmd.calib_control.event_trigger = + mvm->fw->default_calib[ucode_type].event_trigger; + phy_cfg_cmd.calib_control.flow_trigger = + mvm->fw->default_calib[ucode_type].flow_trigger; + + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, + IWL_FW_CMD_VER_UNKNOWN); + if (cmd_ver >= 3) + iwl_mvm_phy_filter_init(mvm, &phy_cfg_cmd.phy_specific_cfg); + + IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", + phy_cfg_cmd.phy_cfg); + cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) : + sizeof(struct iwl_phy_cfg_cmd_v1); + return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd); +} + static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) { struct iwl_notification_wait init_wait; struct iwl_nvm_access_complete_cmd nvm_complete = {}; struct iwl_init_extended_cfg_cmd init_cfg = { .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)), }; static const u16 init_complete[] = { INIT_COMPLETE_NOTIF, }; int ret; if (mvm->trans->cfg->tx_with_siso_diversity) init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY)); lockdep_assert_held(&mvm->mutex); mvm->rfkill_safe_init_done = false; iwl_init_notification_wait(&mvm->notif_wait, &init_wait, init_complete, ARRAY_SIZE(init_complete), iwl_wait_init_complete, NULL); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL); /* Will also start the device */ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); goto error; } iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, NULL); /* Send init config command to mark that we are sending NVM access * commands */ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD), CMD_SEND_IN_RFKILL, sizeof(init_cfg), &init_cfg); if (ret) { IWL_ERR(mvm, "Failed to run init config command: %d\n", ret); goto error; } /* Load NVM to NIC if needed */ if (mvm->nvm_file_name) { ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, mvm->nvm_sections); if (ret) goto error; ret = iwl_mvm_load_nvm_to_nic(mvm); if (ret) goto error; } if (IWL_MVM_PARSE_NVM && !mvm->nvm_data) { ret = iwl_nvm_init(mvm); if (ret) { IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); goto error; } } ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_ACCESS_COMPLETE), CMD_SEND_IN_RFKILL, sizeof(nvm_complete), &nvm_complete); if (ret) { IWL_ERR(mvm, "Failed to run complete NVM access: %d\n", ret); goto error; } + ret = iwl_send_phy_cfg_cmd(mvm); + if (ret) { + IWL_ERR(mvm, "Failed to run PHY configuration: %d\n", + ret); + goto error; + } + /* We wait for the INIT complete notification */ ret = iwl_wait_notification(&mvm->notif_wait, &init_wait, MVM_UCODE_ALIVE_TIMEOUT); if (ret) return ret; /* Read the NVM only at driver load time, no need to do this twice */ if (!IWL_MVM_PARSE_NVM && !mvm->nvm_data) { mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw); if (IS_ERR(mvm->nvm_data)) { ret = PTR_ERR(mvm->nvm_data); mvm->nvm_data = NULL; IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); return ret; } } mvm->rfkill_safe_init_done = true; return 0; error: iwl_remove_notification(&mvm->notif_wait, &init_wait); return ret; } -#ifdef CONFIG_ACPI -static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, - struct iwl_phy_specific_cfg *phy_filters) -{ - /* - * TODO: read specific phy config from BIOS - * ACPI table for this feature has not been defined yet, - * so for now we use hardcoded values. - */ - - if (IWL_MVM_PHY_FILTER_CHAIN_A) { - phy_filters->filter_cfg_chain_a = - cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_A); - } - if (IWL_MVM_PHY_FILTER_CHAIN_B) { - phy_filters->filter_cfg_chain_b = - cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_B); - } - if (IWL_MVM_PHY_FILTER_CHAIN_C) { - phy_filters->filter_cfg_chain_c = - cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_C); - } - if (IWL_MVM_PHY_FILTER_CHAIN_D) { - phy_filters->filter_cfg_chain_d = - cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_D); - } -} -#else /* CONFIG_ACPI */ - -static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, - struct iwl_phy_specific_cfg *phy_filters) -{ -} -#endif /* CONFIG_ACPI */ - -#if defined(CONFIG_ACPI) && defined(CONFIG_EFI) -static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) -{ - u8 cmd_ver; - int ret; - struct iwl_host_cmd cmd = { - .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, - SAR_OFFSET_MAPPING_TABLE_CMD), - .flags = 0, - .data[0] = &mvm->fwrt.sgom_table, - .len[0] = sizeof(mvm->fwrt.sgom_table), - .dataflags[0] = IWL_HCMD_DFL_NOCOPY, - }; - - if (!mvm->fwrt.sgom_enabled) { - IWL_DEBUG_RADIO(mvm, "SGOM table is disabled\n"); - return 0; - } - - cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, - IWL_FW_CMD_VER_UNKNOWN); - - if (cmd_ver != 2) { - IWL_DEBUG_RADIO(mvm, "command version is unsupported. version = %d\n", - cmd_ver); - return 0; - } - - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret < 0) - IWL_ERR(mvm, "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret); - - return ret; -} -#else - -static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) -{ - return 0; -} -#endif - -static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) -{ - u32 cmd_id = PHY_CONFIGURATION_CMD; - struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd; - enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img; - struct iwl_phy_specific_cfg phy_filters = {}; - u8 cmd_ver; - size_t cmd_size; - - if (iwl_mvm_has_unified_ucode(mvm) && - !mvm->trans->cfg->tx_with_siso_diversity) - return 0; - - if (mvm->trans->cfg->tx_with_siso_diversity) { - /* - * TODO: currently we don't set the antenna but letting the NIC - * to decide which antenna to use. This should come from BIOS. - */ - phy_cfg_cmd.phy_cfg = - cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED); - } - - /* Set parameters */ - phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); - - /* set flags extra PHY configuration flags from the device's cfg */ - phy_cfg_cmd.phy_cfg |= - cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags); - - phy_cfg_cmd.calib_control.event_trigger = - mvm->fw->default_calib[ucode_type].event_trigger; - phy_cfg_cmd.calib_control.flow_trigger = - mvm->fw->default_calib[ucode_type].flow_trigger; - - cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, - IWL_FW_CMD_VER_UNKNOWN); - if (cmd_ver == 3) { - iwl_mvm_phy_filter_init(mvm, &phy_filters); - memcpy(&phy_cfg_cmd.phy_specific_cfg, &phy_filters, - sizeof(struct iwl_phy_specific_cfg)); - } - - IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", - phy_cfg_cmd.phy_cfg); - cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) : - sizeof(struct iwl_phy_cfg_cmd_v1); - return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd); -} - int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm) { struct iwl_notification_wait calib_wait; static const u16 init_complete[] = { INIT_COMPLETE_NOTIF, CALIB_RES_NOTIF_PHY_DB }; int ret; if (iwl_mvm_has_unified_ucode(mvm)) return iwl_run_unified_mvm_ucode(mvm); lockdep_assert_held(&mvm->mutex); mvm->rfkill_safe_init_done = false; iwl_init_notification_wait(&mvm->notif_wait, &calib_wait, init_complete, ARRAY_SIZE(init_complete), iwl_wait_phy_db_entry, mvm->phy_db); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL); /* Will also start the device */ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT); if (ret) { IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret); goto remove_notif; } if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) { ret = iwl_mvm_send_bt_init_conf(mvm); if (ret) goto remove_notif; } /* Read the NVM only at driver load time, no need to do this twice */ if (!mvm->nvm_data) { ret = iwl_nvm_init(mvm); if (ret) { IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); goto remove_notif; } } /* In case we read the NVM from external file, load it to the NIC */ if (mvm->nvm_file_name) { ret = iwl_mvm_load_nvm_to_nic(mvm); if (ret) goto remove_notif; } WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver, "Too old NVM version (0x%0x, required = 0x%0x)", mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver); /* * abort after reading the nvm in case RF Kill is on, we will complete * the init seq later when RF kill will switch to off */ if (iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "jump over all phy activities due to RF kill\n"); goto remove_notif; } mvm->rfkill_safe_init_done = true; /* Send TX valid antennas before triggering calibrations */ ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); if (ret) goto remove_notif; ret = iwl_send_phy_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", ret); goto remove_notif; } /* * Some things may run in the background now, but we * just wait for the calibration complete notification. */ ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, MVM_UCODE_CALIB_TIMEOUT); if (!ret) goto out; if (iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); ret = 0; } else { IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", ret); } goto out; remove_notif: iwl_remove_notification(&mvm->notif_wait, &calib_wait); out: mvm->rfkill_safe_init_done = false; if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { /* we want to debug INIT and we have no NVM - fake */ mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + sizeof(struct ieee80211_channel) + sizeof(struct ieee80211_rate), GFP_KERNEL); if (!mvm->nvm_data) return -ENOMEM; mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels; mvm->nvm_data->bands[0].n_channels = 1; mvm->nvm_data->bands[0].n_bitrates = 1; mvm->nvm_data->bands[0].bitrates = (void *)(mvm->nvm_data->channels + 1); mvm->nvm_data->bands[0].bitrates->hw_value = 10; } return ret; } static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) { struct iwl_ltr_config_cmd cmd = { .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE), }; if (!mvm->trans->ltr_enabled) return 0; return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, sizeof(cmd), &cmd); } #ifdef CONFIG_ACPI int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) { u32 cmd_id = REDUCE_TX_POWER_CMD; struct iwl_dev_tx_power_cmd cmd = { .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), }; __le16 *per_chain; int ret; u16 len = 0; u32 n_subbands; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver == 7) { len = sizeof(cmd.v7); n_subbands = IWL_NUM_SUB_BANDS_V2; per_chain = cmd.v7.per_chain[0][0]; cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags); } else if (cmd_ver == 6) { len = sizeof(cmd.v6); n_subbands = IWL_NUM_SUB_BANDS_V2; per_chain = cmd.v6.per_chain[0][0]; } else if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCE_TX_POWER)) { len = sizeof(cmd.v5); n_subbands = IWL_NUM_SUB_BANDS_V1; per_chain = cmd.v5.per_chain[0][0]; } else if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) { len = sizeof(cmd.v4); n_subbands = IWL_NUM_SUB_BANDS_V1; per_chain = cmd.v4.per_chain[0][0]; } else { len = sizeof(cmd.v3); n_subbands = IWL_NUM_SUB_BANDS_V1; per_chain = cmd.v3.per_chain[0][0]; } /* all structs have the same common part, add it */ len += sizeof(cmd.common); ret = iwl_sar_select_profile(&mvm->fwrt, per_chain, IWL_NUM_CHAIN_TABLES, n_subbands, prof_a, prof_b); /* return on error or if the profile is disabled (positive number) */ if (ret) return ret; iwl_mei_set_power_limit(per_chain); IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); } int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) { union iwl_geo_tx_power_profiles_cmd geo_tx_cmd; struct iwl_geo_tx_power_profiles_resp *resp; u16 len; int ret; struct iwl_host_cmd cmd = { .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD), .flags = CMD_WANT_SKB, .data = { &geo_tx_cmd }, }; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, IWL_FW_CMD_VER_UNKNOWN); /* the ops field is at the same spot for all versions, so set in v1 */ geo_tx_cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE); if (cmd_ver == 5) len = sizeof(geo_tx_cmd.v5); else if (cmd_ver == 4) len = sizeof(geo_tx_cmd.v4); else if (cmd_ver == 3) len = sizeof(geo_tx_cmd.v3); else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) len = sizeof(geo_tx_cmd.v2); else len = sizeof(geo_tx_cmd.v1); if (!iwl_sar_geo_support(&mvm->fwrt)) return -EOPNOTSUPP; cmd.len[0] = len; ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret); return ret; } resp = (void *)cmd.resp_pkt->data; ret = le32_to_cpu(resp->profile_idx); if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES_REV3)) ret = -EIO; iwl_free_resp(&cmd); return ret; } static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) { u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD); union iwl_geo_tx_power_profiles_cmd cmd; u16 len; u32 n_bands; u32 n_profiles; u32 sk = 0; int ret; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) || offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) || offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) || offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, ops)); /* the ops field is at the same spot for all versions, so set in v1 */ cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES); if (cmd_ver == 5) { len = sizeof(cmd.v5); n_bands = ARRAY_SIZE(cmd.v5.table[0]); n_profiles = ACPI_NUM_GEO_PROFILES_REV3; } else if (cmd_ver == 4) { len = sizeof(cmd.v4); n_bands = ARRAY_SIZE(cmd.v4.table[0]); n_profiles = ACPI_NUM_GEO_PROFILES_REV3; } else if (cmd_ver == 3) { len = sizeof(cmd.v3); n_bands = ARRAY_SIZE(cmd.v3.table[0]); n_profiles = ACPI_NUM_GEO_PROFILES; } else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) { len = sizeof(cmd.v2); n_bands = ARRAY_SIZE(cmd.v2.table[0]); n_profiles = ACPI_NUM_GEO_PROFILES; } else { len = sizeof(cmd.v1); n_bands = ARRAY_SIZE(cmd.v1.table[0]); n_profiles = ACPI_NUM_GEO_PROFILES; } BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) || offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) || offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) || offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) != offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table)); /* the table is at the same position for all versions, so set use v1 */ ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0], n_bands, n_profiles); /* * It is a valid scenario to not support SAR, or miss wgds table, * but in that case there is no need to send the command. */ if (ret) return 0; /* Only set to South Korea if the table revision is 1 */ if (mvm->fwrt.geo_rev == 1) sk = 1; /* * Set the table_revision to South Korea (1) or not (0). The * element name is misleading, as it doesn't contain the table * revision number, but whether the South Korea variation * should be used. * This must be done after calling iwl_sar_geo_init(). */ if (cmd_ver == 5) cmd.v5.table_revision = cpu_to_le32(sk); else if (cmd_ver == 4) cmd.v4.table_revision = cpu_to_le32(sk); else if (cmd_ver == 3) cmd.v3.table_revision = cpu_to_le32(sk); else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) cmd.v2.table_revision = cpu_to_le32(sk); return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); } int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) { union iwl_ppag_table_cmd cmd; int ret, cmd_size; ret = iwl_read_ppag_table(&mvm->fwrt, &cmd, &cmd_size); /* Not supporting PPAG table is a valid scenario */ - if(ret < 0) + if (ret < 0) return 0; IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n"); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD), 0, cmd_size, &cmd); if (ret < 0) IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n", ret); return ret; } static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) { /* no need to read the table, done in INIT stage */ if (!(iwl_acpi_is_ppag_approved(&mvm->fwrt))) return 0; return iwl_mvm_ppag_send_cmd(mvm); } static const struct dmi_system_id dmi_tas_approved_list[] = { { .ident = "HP", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), }, }, { .ident = "SAMSUNG", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), }, }, { .ident = "LENOVO", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Lenovo"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), }, }, { .ident = "DELL", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), }, }, - + { .ident = "MSFT", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + }, + }, + { .ident = "Acer", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + }, + }, + { .ident = "ASUS", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + }, + }, + { .ident = "MSI", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."), + }, + }, + { .ident = "Honor", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HONOR"), + }, + }, /* keep last */ {} }; +bool iwl_mvm_is_vendor_in_approved_list(void) +{ + return dmi_check_system(dmi_tas_approved_list); +} + static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc) { int i; u32 size = le32_to_cpu(*le_size); /* Verify that there is room for another country */ if (size >= IWL_TAS_BLOCK_LIST_MAX) return false; for (i = 0; i < size; i++) { if (list[i] == cpu_to_le32(mcc)) return true; } list[size++] = cpu_to_le32(mcc); *le_size = cpu_to_le32(size); return true; } static void iwl_mvm_tas_init(struct iwl_mvm *mvm) { u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG); int ret; union iwl_tas_config_cmd cmd = {}; int cmd_size, fw_ver; BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) < APCI_WTAS_BLACK_LIST_MAX); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) { IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n"); return; } fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "TAS table invalid or unavailable. (%d)\n", ret); return; } if (ret == 0) return; - if (!dmi_check_system(dmi_tas_approved_list)) { + if (!iwl_mvm_is_vendor_in_approved_list()) { IWL_DEBUG_RADIO(mvm, "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n", dmi_get_system_info(DMI_SYS_VENDOR)); if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array, &cmd.v4.block_list_size, IWL_TAS_US_MCC)) || (!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array, &cmd.v4.block_list_size, IWL_TAS_CANADA_MCC))) { IWL_DEBUG_RADIO(mvm, "Unable to add US/Canada to TAS block list, disabling TAS\n"); return; } + } else { + IWL_DEBUG_RADIO(mvm, + "System vendor '%s' is in the approved list.\n", + dmi_get_system_info(DMI_SYS_VENDOR)); } /* v4 is the same size as v3, so no need to differentiate here */ cmd_size = fw_ver < 3 ? sizeof(struct iwl_tas_config_cmd_v2) : sizeof(struct iwl_tas_config_cmd_v3); ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd); if (ret < 0) IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret); } static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) { u8 value; int ret = iwl_acpi_get_dsm_u8(mvm->fwrt.dev, 0, DSM_RFI_FUNC_ENABLE, &iwl_rfi_guid, &value); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret); } else if (value >= DSM_VALUE_RFI_MAX) { IWL_DEBUG_RADIO(mvm, "DSM RFI got invalid value, ret=%d\n", value); } else if (value == DSM_VALUE_RFI_ENABLE) { IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n"); return DSM_VALUE_RFI_ENABLE; } IWL_DEBUG_RADIO(mvm, "DSM RFI is disabled\n"); /* default behaviour is disabled */ return DSM_VALUE_RFI_DISABLE; } static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) { int ret; u32 value; struct iwl_lari_config_change_cmd_v6 cmd = {}; cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt); ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_11AX_ENABLEMENT, &iwl_guid, &value); if (!ret) cmd.oem_11ax_allow_bitmap = cpu_to_le32(value); ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_ENABLE_UNII4_CHAN, &iwl_guid, &value); if (!ret) cmd.oem_unii4_allow_bitmap = cpu_to_le32(value); ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_ACTIVATE_CHANNEL, &iwl_guid, &value); if (!ret) cmd.chan_state_active_bitmap = cpu_to_le32(value); ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_ENABLE_6E, &iwl_guid, &value); if (!ret) cmd.oem_uhb_allow_bitmap = cpu_to_le32(value); ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_FORCE_DISABLE_CHANNELS, &iwl_guid, &value); if (!ret) cmd.force_disable_channels_bitmap = cpu_to_le32(value); if (cmd.config_bitmap || cmd.oem_uhb_allow_bitmap || cmd.oem_11ax_allow_bitmap || cmd.oem_unii4_allow_bitmap || cmd.chan_state_active_bitmap || cmd.force_disable_channels_bitmap) { size_t cmd_size; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(REGULATORY_AND_NVM_GROUP, LARI_CONFIG_CHANGE), 1); switch (cmd_ver) { case 6: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6); break; case 5: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5); break; case 4: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4); break; case 3: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3); break; case 2: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2); break; default: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1); break; } IWL_DEBUG_RADIO(mvm, "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n", le32_to_cpu(cmd.config_bitmap), le32_to_cpu(cmd.oem_11ax_allow_bitmap)); IWL_DEBUG_RADIO(mvm, "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n", le32_to_cpu(cmd.oem_unii4_allow_bitmap), le32_to_cpu(cmd.chan_state_active_bitmap), cmd_ver); IWL_DEBUG_RADIO(mvm, "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n", le32_to_cpu(cmd.oem_uhb_allow_bitmap), le32_to_cpu(cmd.force_disable_channels_bitmap)); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, LARI_CONFIG_CHANGE), 0, cmd_size, &cmd); if (ret < 0) IWL_DEBUG_RADIO(mvm, "Failed to send LARI_CONFIG_CHANGE (%d)\n", ret); } } void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) { int ret; /* read PPAG table */ ret = iwl_acpi_get_ppag_table(&mvm->fwrt); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "PPAG BIOS table invalid or unavailable. (%d)\n", ret); } /* read SAR tables */ ret = iwl_sar_get_wrds_table(&mvm->fwrt); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "WRDS SAR BIOS table invalid or unavailable. (%d)\n", ret); /* * If not available, don't fail and don't bother with EWRD and * WGDS */ if (!iwl_sar_get_wgds_table(&mvm->fwrt)) { /* * If basic SAR is not available, we check for WGDS, * which should *not* be available either. If it is * available, issue an error, because we can't use SAR * Geo without basic SAR. */ IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n"); } } else { ret = iwl_sar_get_ewrd_table(&mvm->fwrt); /* if EWRD is not available, we can still use * WRDS, so don't fail */ if (ret < 0) IWL_DEBUG_RADIO(mvm, "EWRD SAR BIOS table invalid or unavailable. (%d)\n", ret); /* read geo SAR table */ if (iwl_sar_geo_support(&mvm->fwrt)) { ret = iwl_sar_get_wgds_table(&mvm->fwrt); if (ret < 0) IWL_DEBUG_RADIO(mvm, "Geo SAR BIOS table invalid or unavailable. (%d)\n", ret); /* we don't fail if the table is not available */ } } + + iwl_acpi_get_phy_filters(&mvm->fwrt, &mvm->phy_filters); } #else /* CONFIG_ACPI */ inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) { return 1; } inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) { return -ENOENT; } static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) { return 0; } int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) { return -ENOENT; } static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) { return 0; } static void iwl_mvm_tas_init(struct iwl_mvm *mvm) { } static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) { } +bool iwl_mvm_is_vendor_in_approved_list(void) +{ + return false; +} + static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) { return DSM_VALUE_RFI_DISABLE; } void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) { } #endif /* CONFIG_ACPI */ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) { u32 error_log_size = mvm->fw->ucode_capa.error_log_size; int ret; u32 resp; struct iwl_fw_error_recovery_cmd recovery_cmd = { .flags = cpu_to_le32(flags), .buf_size = 0, }; struct iwl_host_cmd host_cmd = { .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD), .flags = CMD_WANT_SKB, .data = {&recovery_cmd, }, .len = {sizeof(recovery_cmd), }, }; /* no error log was defined in TLV */ if (!error_log_size) return; if (flags & ERROR_RECOVERY_UPDATE_DB) { /* no buf was allocated while HW reset */ if (!mvm->error_recovery_buf) return; host_cmd.data[1] = mvm->error_recovery_buf; host_cmd.len[1] = error_log_size; host_cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; recovery_cmd.buf_size = cpu_to_le32(error_log_size); } ret = iwl_mvm_send_cmd(mvm, &host_cmd); kfree(mvm->error_recovery_buf); mvm->error_recovery_buf = NULL; if (ret) { IWL_ERR(mvm, "Failed to send recovery cmd %d\n", ret); return; } /* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */ if (flags & ERROR_RECOVERY_UPDATE_DB) { resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data); if (resp) IWL_ERR(mvm, "Failed to send recovery cmd blob was invalid %d\n", resp); } } static int iwl_mvm_sar_init(struct iwl_mvm *mvm) { return iwl_mvm_sar_select_profile(mvm, 1, 1); } static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) { int ret; if (iwl_mvm_has_unified_ucode(mvm)) return iwl_run_unified_mvm_ucode(mvm); ret = iwl_run_init_mvm_ucode(mvm); if (ret) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); if (iwlmvm_mod_params.init_dbg) return 0; return ret; } iwl_fw_dbg_stop_sync(&mvm->fwrt); iwl_trans_stop_device(mvm->trans); ret = iwl_trans_start_hw(mvm->trans); if (ret) return ret; mvm->rfkill_safe_init_done = false; ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); if (ret) return ret; mvm->rfkill_safe_init_done = true; iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, NULL); return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img); } int iwl_mvm_up(struct iwl_mvm *mvm) { int ret, i; struct ieee80211_channel *chan; struct cfg80211_chan_def chandef; struct ieee80211_supported_band *sband = NULL; + u32 sb_cfg; lockdep_assert_held(&mvm->mutex); ret = iwl_trans_start_hw(mvm->trans); if (ret) return ret; + sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG); + mvm->pldr_sync = !(sb_cfg & SB_CFG_RESIDES_IN_OTP_MASK); + if (mvm->pldr_sync && iwl_mei_pldr_req()) + return -EBUSY; + ret = iwl_mvm_load_rt_fw(mvm); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); - if (ret != -ERFKILL) + if (ret != -ERFKILL && !mvm->pldr_sync) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); goto error; } + /* FW loaded successfully */ + mvm->pldr_sync = false; + iwl_get_shared_mem_conf(&mvm->fwrt); ret = iwl_mvm_sf_update(mvm, NULL, false); if (ret) IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); if (!iwl_trans_dbg_ini_valid(mvm->trans)) { mvm->fwrt.dump.conf = FW_DBG_INVALID; /* if we have a destination, assume EARLY START */ if (mvm->fw->dbg.dest_tlv) mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE; iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE); } ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); if (ret) goto error; if (!iwl_mvm_has_unified_ucode(mvm)) { /* Send phy db control command and then phy db calibration */ ret = iwl_send_phy_db_data(mvm->phy_db); if (ret) goto error; + ret = iwl_send_phy_cfg_cmd(mvm); + if (ret) + goto error; } - ret = iwl_send_phy_cfg_cmd(mvm); - if (ret) - goto error; - ret = iwl_mvm_send_bt_init_conf(mvm); if (ret) goto error; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) { ret = iwl_set_soc_latency(&mvm->fwrt); if (ret) goto error; } + iwl_mvm_lari_cfg(mvm); + /* Init RSS configuration */ ret = iwl_configure_rxq(&mvm->fwrt); if (ret) goto error; if (iwl_mvm_has_new_rx_api(mvm)) { ret = iwl_send_rss_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", ret); goto error; } } /* init the fw <-> mac80211 STA mapping */ - for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) + for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); + RCU_INIT_POINTER(mvm->fw_id_to_link_sta[i], NULL); + } + + for (i = 0; i < IWL_MVM_FW_MAX_LINK_ID + 1; i++) + RCU_INIT_POINTER(mvm->link_id_to_link_conf[i], NULL); + + memset(&mvm->fw_link_ids_map, 0, sizeof(mvm->fw_link_ids_map)); mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; /* reset quota debouncing buffer - 0xff will yield invalid data */ memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DQA_SUPPORT)) { ret = iwl_mvm_send_dqa_cmd(mvm); if (ret) goto error; } /* * Add auxiliary station for scanning. * Newer versions of this command implies that the fw uses * internal aux station for all aux activities that don't * requires a dedicated data queue. */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) { + if (!iwl_mvm_has_new_station_api(mvm->fw)) { /* * In old version the aux station uses mac id like other * station and not lmac id */ ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX); if (ret) goto error; } /* Add all the PHY contexts */ i = 0; while (!sband && i < NUM_NL80211_BANDS) sband = mvm->hw->wiphy->bands[i++]; if (WARN_ON_ONCE(!sband)) { ret = -ENODEV; goto error; } chan = &sband->channels[0]; cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); for (i = 0; i < NUM_PHY_CTX; i++) { /* * The channel used here isn't relevant as it's * going to be overwritten in the other flows. * For now use the first channel we have. */ ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i], &chandef, 1, 1); if (ret) goto error; } if (iwl_mvm_is_tt_in_fw(mvm)) { /* in order to give the responsibility of ct-kill and * TX backoff to FW we need to send empty temperature reporting * cmd during init time */ iwl_mvm_send_temp_report_ths_cmd(mvm); } else { /* Initialize tx backoffs to the minimal possible */ iwl_mvm_tt_tx_backoff(mvm, 0); } #ifdef CONFIG_THERMAL /* TODO: read the budget from BIOS / Platform NVM */ /* * In case there is no budget from BIOS / Platform NVM the default * budget should be 2000mW (cooling state 0). */ if (iwl_mvm_is_ctdp_supported(mvm)) { ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, mvm->cooling_dev.cur_state); if (ret) goto error; } #endif if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2)) WARN_ON(iwl_mvm_config_ltr(mvm)); ret = iwl_mvm_power_update_device(mvm); if (ret) goto error; - iwl_mvm_lari_cfg(mvm); /* * RTNL is not taken during Ct-kill, but we don't need to scan/Tx * anyway, so don't init MCC. */ if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) { ret = iwl_mvm_init_mcc(mvm); if (ret) goto error; } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { mvm->scan_type = IWL_SCAN_TYPE_NOT_SET; mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET; ret = iwl_mvm_config_scan(mvm); if (ret) goto error; } - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB); + if (mvm->time_sync.active) + iwl_mvm_time_sync_config(mvm, mvm->time_sync.peer_addr, + IWL_TIME_SYNC_PROTOCOL_TM | + IWL_TIME_SYNC_PROTOCOL_FTM); + } + + if (!mvm->ptp_data.ptp_clock) + iwl_mvm_ptp_init(mvm); + if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid)) IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n"); ret = iwl_mvm_ppag_init(mvm); if (ret) goto error; ret = iwl_mvm_sar_init(mvm); if (ret == 0) ret = iwl_mvm_sar_geo_init(mvm); if (ret < 0) goto error; ret = iwl_mvm_sgom_init(mvm); if (ret) goto error; iwl_mvm_tas_init(mvm); iwl_mvm_leds_sync(mvm); - iwl_mvm_ftm_initiator_smooth_config(mvm); - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) { + if (iwl_rfi_supported(mvm)) { if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE) iwl_rfi_send_config_cmd(mvm, NULL); } + iwl_mvm_mei_device_state(mvm, true); + IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: if (!iwlmvm_mod_params.init_dbg || !ret) iwl_mvm_stop_device(mvm); return ret; } int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) { int ret, i; lockdep_assert_held(&mvm->mutex); ret = iwl_trans_start_hw(mvm->trans); if (ret) return ret; ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN); if (ret) { IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret); goto error; } ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); if (ret) goto error; /* Send phy db control command and then phy db calibration*/ ret = iwl_send_phy_db_data(mvm->phy_db); if (ret) goto error; ret = iwl_send_phy_cfg_cmd(mvm); if (ret) goto error; /* init the fw <-> mac80211 STA mapping */ - for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) + for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); + RCU_INIT_POINTER(mvm->fw_id_to_link_sta[i], NULL); + } - if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) { + if (!iwl_mvm_has_new_station_api(mvm->fw)) { /* * Add auxiliary station for scanning. * Newer versions of this command implies that the fw uses * internal aux station for all aux activities that don't * requires a dedicated data queue. * In old version the aux station uses mac id like other * station and not lmac id */ ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX); if (ret) goto error; } return 0; error: iwl_mvm_stop_device(mvm); return ret; } void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; IWL_DEBUG_INFO(mvm, "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n", le32_to_cpu(mfuart_notif->installed_ver), le32_to_cpu(mfuart_notif->external_ver), le32_to_cpu(mfuart_notif->status), le32_to_cpu(mfuart_notif->duration)); if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif)) IWL_DEBUG_INFO(mvm, "MFUART: image size: 0x%08x\n", le32_to_cpu(mfuart_notif->image_size)); } diff --git a/sys/contrib/dev/iwlwifi/mvm/link.c b/sys/contrib/dev/iwlwifi/mvm/link.c new file mode 100644 index 000000000000..ace82e2c5bd9 --- /dev/null +++ b/sys/contrib/dev/iwlwifi/mvm/link.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2022 - 2023 Intel Corporation + */ +#include "mvm.h" +#include "time-event.h" + +static u32 iwl_mvm_get_free_fw_link_id(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvm_vif) +{ + u32 link_id; + + lockdep_assert_held(&mvm->mutex); + + link_id = ffz(mvm->fw_link_ids_map); + + /* this case can happen if there're deactivated but not removed links */ + if (link_id > IWL_MVM_FW_MAX_LINK_ID) + return IWL_MVM_FW_LINK_ID_INVALID; + + mvm->fw_link_ids_map |= BIT(link_id); + return link_id; +} + +static void iwl_mvm_release_fw_link_id(struct iwl_mvm *mvm, u32 link_id) +{ + lockdep_assert_held(&mvm->mutex); + + if (!WARN_ON(link_id > IWL_MVM_FW_MAX_LINK_ID)) + mvm->fw_link_ids_map &= ~BIT(link_id); +} + +static int iwl_mvm_link_cmd_send(struct iwl_mvm *mvm, + struct iwl_link_config_cmd *cmd, + enum iwl_ctxt_action action) +{ + int ret; + + cmd->action = cpu_to_le32(action); + ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD), 0, + sizeof(*cmd), cmd); + if (ret) + IWL_ERR(mvm, "Failed to send LINK_CONFIG_CMD (action:%d): %d\n", + action, ret); + return ret; +} + +int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + unsigned int link_id = link_conf->link_id; + struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; + struct iwl_link_config_cmd cmd = {}; + struct iwl_mvm_phy_ctxt *phyctxt; + + if (WARN_ON_ONCE(!link_info)) + return -EINVAL; + + if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) { + link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm, + mvmvif); + if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) + return -EINVAL; + + rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id], + link_conf); + } + + /* Update SF - Disable if needed. if this fails, SF might still be on + * while many macs are bound, which is forbidden - so fail the binding. + */ + if (iwl_mvm_sf_update(mvm, vif, false)) + return -EINVAL; + + cmd.link_id = cpu_to_le32(link_info->fw_link_id); + cmd.mac_id = cpu_to_le32(mvmvif->id); + cmd.spec_link_id = link_conf->link_id; + /* P2P-Device already has a valid PHY context during add */ + phyctxt = link_info->phy_ctxt; + if (phyctxt) + cmd.phy_id = cpu_to_le32(phyctxt->id); + else + cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID); + + memcpy(cmd.local_link_addr, link_conf->addr, ETH_ALEN); + + if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid) + memcpy(cmd.ibss_bssid_addr, link_conf->bssid, ETH_ALEN); + + cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac); + + return iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_ADD); +} + +int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u32 changes, bool active) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + unsigned int link_id = link_conf->link_id; + struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; + struct iwl_mvm_phy_ctxt *phyctxt; + struct iwl_link_config_cmd cmd = {}; + u32 ht_flag, flags = 0, flags_mask = 0; + int ret; + + if (WARN_ON_ONCE(!link_info || + link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)) + return -EINVAL; + + if (changes & LINK_CONTEXT_MODIFY_ACTIVE) { + /* When activating a link, phy context should be valid; + * when deactivating a link, it also should be valid since + * the link was active before. So, do nothing in this case. + * Since a link is added first with FW_CTXT_INVALID, then we + * can get here in case it's removed before it was activated. + */ + if (!link_info->phy_ctxt) + return 0; + + /* Catch early if driver tries to activate or deactivate a link + * twice. + */ + WARN_ON_ONCE(active == link_info->active); + + /* When deactivating a link session protection should + * be stopped + */ + if (!active && vif->type == NL80211_IFTYPE_STATION) + iwl_mvm_stop_session_protection(mvm, vif); + } + + cmd.link_id = cpu_to_le32(link_info->fw_link_id); + + /* The phy_id, link address and listen_lmac can be modified only until + * the link becomes active, otherwise they will be ignored. + */ + phyctxt = link_info->phy_ctxt; + if (phyctxt) + cmd.phy_id = cpu_to_le32(phyctxt->id); + else + cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID); + cmd.mac_id = cpu_to_le32(mvmvif->id); + + memcpy(cmd.local_link_addr, link_conf->addr, ETH_ALEN); + + cmd.active = cpu_to_le32(active); + + if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid) + memcpy(cmd.ibss_bssid_addr, link_conf->bssid, ETH_ALEN); + + iwl_mvm_set_fw_basic_rates(mvm, vif, link_conf, + &cmd.cck_rates, &cmd.ofdm_rates); + + cmd.cck_short_preamble = cpu_to_le32(link_conf->use_short_preamble); + cmd.short_slot = cpu_to_le32(link_conf->use_short_slot); + + /* The fw does not distinguish between ht and fat */ + ht_flag = LINK_PROT_FLG_HT_PROT | LINK_PROT_FLG_FAT_PROT; + iwl_mvm_set_fw_protection_flags(mvm, vif, link_conf, + &cmd.protection_flags, + ht_flag, LINK_PROT_FLG_TGG_PROTECT); + + iwl_mvm_set_fw_qos_params(mvm, vif, link_conf, cmd.ac, + &cmd.qos_flags); + + + cmd.bi = cpu_to_le32(link_conf->beacon_int); + cmd.dtim_interval = cpu_to_le32(link_conf->beacon_int * + link_conf->dtim_period); + + if (!link_conf->he_support || iwlwifi_mod_params.disable_11ax || + (vif->type == NL80211_IFTYPE_STATION && !vif->cfg.assoc)) { + changes &= ~LINK_CONTEXT_MODIFY_HE_PARAMS; + goto send_cmd; + } + + cmd.htc_trig_based_pkt_ext = link_conf->htc_trig_based_pkt_ext; + + if (link_conf->uora_exists) { + cmd.rand_alloc_ecwmin = + link_conf->uora_ocw_range & 0x7; + cmd.rand_alloc_ecwmax = + (link_conf->uora_ocw_range >> 3) & 0x7; + } + + /* TODO how to set ndp_fdbk_buff_th_exp? */ + + if (iwl_mvm_set_fw_mu_edca_params(mvm, mvmvif->link[link_id], + &cmd.trig_based_txf[0])) { + flags |= LINK_FLG_MU_EDCA_CW; + flags_mask |= LINK_FLG_MU_EDCA_CW; + } + + if (link_conf->eht_puncturing && !iwlwifi_mod_params.disable_11be) + cmd.puncture_mask = cpu_to_le16(link_conf->eht_puncturing); + else + /* This flag can be set only if the MAC has eht support */ + changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS; + + cmd.bss_color = link_conf->he_bss_color.color; + + if (!link_conf->he_bss_color.enabled) { + flags |= LINK_FLG_BSS_COLOR_DIS; + flags_mask |= LINK_FLG_BSS_COLOR_DIS; + } + + cmd.frame_time_rts_th = cpu_to_le16(link_conf->frame_time_rts_th); + + /* Block 26-tone RU OFDMA transmissions */ + if (link_info->he_ru_2mhz_block) { + flags |= LINK_FLG_RU_2MHZ_BLOCK; + flags_mask |= LINK_FLG_RU_2MHZ_BLOCK; + } + + if (link_conf->nontransmitted) { + ether_addr_copy(cmd.ref_bssid_addr, + link_conf->transmitter_bssid); + cmd.bssid_index = link_conf->bssid_index; + } + +send_cmd: + cmd.modify_mask = cpu_to_le32(changes); + cmd.flags = cpu_to_le32(flags); + cmd.flags_mask = cpu_to_le32(flags_mask); + cmd.spec_link_id = link_conf->link_id; + cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac); + + ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_MODIFY); + if (!ret && (changes & LINK_CONTEXT_MODIFY_ACTIVE)) + link_info->active = active; + + return ret; +} + +int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + unsigned int link_id = link_conf->link_id; + struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; + struct iwl_link_config_cmd cmd = {}; + int ret; + + if (WARN_ON(!link_info || + link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)) + return -EINVAL; + + RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id], + NULL); + cmd.link_id = cpu_to_le32(link_info->fw_link_id); + iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id); + link_info->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; + cmd.spec_link_id = link_conf->link_id; + + ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_REMOVE); + + if (!ret) + if (iwl_mvm_sf_update(mvm, vif, true)) + IWL_ERR(mvm, "Failed to update SF state\n"); + + return ret; +} + +/* link should be deactivated before removal, so in most cases we need to + * perform these two operations together + */ +int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + int ret; + + ret = iwl_mvm_link_changed(mvm, vif, link_conf, + LINK_CONTEXT_MODIFY_ACTIVE, false); + if (ret) + return ret; + + ret = iwl_mvm_remove_link(mvm, vif, link_conf); + if (ret) + return ret; + + return ret; +} diff --git a/sys/contrib/dev/iwlwifi/mvm/mac-ctxt.c b/sys/contrib/dev/iwlwifi/mvm/mac-ctxt.c index 99624505c132..702f3bcd3549 100644 --- a/sys/contrib/dev/iwlwifi/mvm/mac-ctxt.c +++ b/sys/contrib/dev/iwlwifi/mvm/mac-ctxt.c @@ -1,1677 +1,1903 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #include #include #include #include "iwl-io.h" #include "iwl-prph.h" #include "fw-api.h" #include "mvm.h" #include "time-event.h" const u8 iwl_mvm_ac_to_tx_fifo[] = { IWL_MVM_TX_FIFO_VO, IWL_MVM_TX_FIFO_VI, IWL_MVM_TX_FIFO_BE, IWL_MVM_TX_FIFO_BK, }; const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = { IWL_GEN2_EDCA_TX_FIFO_VO, IWL_GEN2_EDCA_TX_FIFO_VI, IWL_GEN2_EDCA_TX_FIFO_BE, IWL_GEN2_EDCA_TX_FIFO_BK, IWL_GEN2_TRIG_TX_FIFO_VO, IWL_GEN2_TRIG_TX_FIFO_VI, IWL_GEN2_TRIG_TX_FIFO_BE, IWL_GEN2_TRIG_TX_FIFO_BK, }; struct iwl_mvm_mac_iface_iterator_data { struct iwl_mvm *mvm; struct ieee80211_vif *vif; unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)]; unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)]; enum iwl_tsf_id preferred_tsf; bool found_vif; }; static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mac_iface_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 min_bi; /* Skip the interface for which we are trying to assign a tsf_id */ if (vif == data->vif) return; /* * The TSF is a hardware/firmware resource, there are 4 and * the driver should assign and free them as needed. However, * there are cases where 2 MACs should share the same TSF ID * for the purpose of clock sync, an optimization to avoid * clock drift causing overlapping TBTTs/DTIMs for a GO and * client in the system. * * The firmware will decide according to the MAC type which * will be the leader and follower. Clients that need to sync * with a remote station will be the leader, and an AP or GO * will be the follower. * * Depending on the new interface type it can be following * or become the leader of an existing interface. */ switch (data->vif->type) { case NL80211_IFTYPE_STATION: /* * The new interface is a client, so if the one we're iterating * is an AP, and the beacon interval of the AP is a multiple or * divisor of the beacon interval of the client, the same TSF * should be used to avoid drift between the new client and * existing AP. The existing AP will get drift updates from the * new client context in this case. */ if (vif->type != NL80211_IFTYPE_AP || data->preferred_tsf != NUM_TSF_IDS || !test_bit(mvmvif->tsf_id, data->available_tsf_ids)) break; min_bi = min(data->vif->bss_conf.beacon_int, vif->bss_conf.beacon_int); if (!min_bi) break; if ((data->vif->bss_conf.beacon_int - vif->bss_conf.beacon_int) % min_bi == 0) { data->preferred_tsf = mvmvif->tsf_id; return; } break; case NL80211_IFTYPE_AP: /* * The new interface is AP/GO, so if its beacon interval is a * multiple or a divisor of the beacon interval of an existing * interface, it should get drift updates from an existing * client or use the same TSF as an existing GO. There's no * drift between TSFs internally but if they used different * TSFs then a new client MAC could update one of them and * cause drift that way. */ if ((vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_STATION) || data->preferred_tsf != NUM_TSF_IDS || !test_bit(mvmvif->tsf_id, data->available_tsf_ids)) break; min_bi = min(data->vif->bss_conf.beacon_int, vif->bss_conf.beacon_int); if (!min_bi) break; if ((data->vif->bss_conf.beacon_int - vif->bss_conf.beacon_int) % min_bi == 0) { data->preferred_tsf = mvmvif->tsf_id; return; } break; default: /* * For all other interface types there's no need to * take drift into account. Either they're exclusive * like IBSS and monitor, or we don't care much about * their TSF (like P2P Device), but we won't be able * to share the TSF resource. */ break; } /* * Unless we exited above, we can't share the TSF resource * that the virtual interface we're iterating over is using * with the new one, so clear the available bit and if this * was the preferred one, reset that as well. */ __clear_bit(mvmvif->tsf_id, data->available_tsf_ids); if (data->preferred_tsf == mvmvif->tsf_id) data->preferred_tsf = NUM_TSF_IDS; } static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mac_iface_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); /* Iterator may already find the interface being added -- skip it */ if (vif == data->vif) { data->found_vif = true; return; } /* Mark MAC IDs as used by clearing the available bit, and * (below) mark TSFs as used if their existing use is not * compatible with the new interface type. * No locking or atomic bit operations are needed since the * data is on the stack of the caller function. */ __clear_bit(mvmvif->id, data->available_mac_ids); /* find a suitable tsf_id */ iwl_mvm_mac_tsf_id_iter(_data, mac, vif); } void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_mac_iface_iterator_data data = { .mvm = mvm, .vif = vif, .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, /* no preference yet */ .preferred_tsf = NUM_TSF_IDS, }; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_mac_tsf_id_iter, &data); if (data.preferred_tsf != NUM_TSF_IDS) mvmvif->tsf_id = data.preferred_tsf; else if (!test_bit(mvmvif->tsf_id, data.available_tsf_ids)) mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, NUM_TSF_IDS); } int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_mac_iface_iterator_data data = { .mvm = mvm, .vif = vif, .available_mac_ids = { (1 << NUM_MAC_INDEX_DRIVER) - 1 }, .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, /* no preference yet */ .preferred_tsf = NUM_TSF_IDS, .found_vif = false, }; int ret, i; lockdep_assert_held(&mvm->mutex); /* * Allocate a MAC ID and a TSF for this MAC, along with the queues * and other resources. */ /* * Before the iterator, we start with all MAC IDs and TSFs available. * * During iteration, all MAC IDs are cleared that are in use by other * virtual interfaces, and all TSF IDs are cleared that can't be used * by this new virtual interface because they're used by an interface * that can't share it with the new one. * At the same time, we check if there's a preferred TSF in the case * that we should share it with another interface. */ - /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */ - switch (vif->type) { - case NL80211_IFTYPE_ADHOC: - break; - case NL80211_IFTYPE_STATION: - if (!vif->p2p) + /* MAC ID 0 should be used only for the managed/IBSS vif with non-MLO + * FW API + */ + if (!mvm->mld_api_is_used) { + switch (vif->type) { + case NL80211_IFTYPE_ADHOC: break; - fallthrough; - default: - __clear_bit(0, data.available_mac_ids); + case NL80211_IFTYPE_STATION: + if (!vif->p2p) + break; + fallthrough; + default: + __clear_bit(0, data.available_mac_ids); + } } ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_mac_iface_iterator, &data); /* * In the case we're getting here during resume, it's similar to * firmware restart, and with RESUME_ALL the iterator will find * the vif being added already. * We don't want to reassign any IDs in either case since doing * so would probably assign different IDs (as interfaces aren't * necessarily added in the same order), but the old IDs were * preserved anyway, so skip ID assignment for both resume and * recovery. */ if (data.found_vif) return 0; /* Therefore, in recovery, we can't get here */ if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return -EBUSY; mvmvif->id = find_first_bit(data.available_mac_ids, NUM_MAC_INDEX_DRIVER); if (mvmvif->id == NUM_MAC_INDEX_DRIVER) { IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n"); ret = -EIO; goto exit_fail; } if (data.preferred_tsf != NUM_TSF_IDS) mvmvif->tsf_id = data.preferred_tsf; else mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, NUM_TSF_IDS); if (mvmvif->tsf_id == NUM_TSF_IDS) { IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n"); ret = -EIO; goto exit_fail; } mvmvif->color = 0; INIT_LIST_HEAD(&mvmvif->time_event_data.list); mvmvif->time_event_data.id = TE_MAX; /* No need to allocate data queues to P2P Device MAC and NAN.*/ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) return 0; /* Allocate the CAB queue for softAP and GO interfaces */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { /* * For TVQM this will be overwritten later with the FW assigned * queue value (when queue is enabled). */ - mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; + mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE; } - mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA; - mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA; - mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; + mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA; + mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA; + mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA; for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) - mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; + mvmvif->deflink.smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; return 0; exit_fail: memset(mvmvif, 0, sizeof(struct iwl_mvm_vif)); return ret; } static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum nl80211_band band, u8 *cck_rates, u8 *ofdm_rates) { struct ieee80211_supported_band *sband; unsigned long basic = vif->bss_conf.basic_rates; int lowest_present_ofdm = 100; int lowest_present_cck = 100; u8 cck = 0; u8 ofdm = 0; int i; sband = mvm->hw->wiphy->bands[band]; for_each_set_bit(i, &basic, BITS_PER_LONG) { int hw = sband->bitrates[i].hw_value; if (hw >= IWL_FIRST_OFDM_RATE) { ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE); if (lowest_present_ofdm > hw) lowest_present_ofdm = hw; } else { BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); cck |= BIT(hw); if (lowest_present_cck > hw) lowest_present_cck = hw; } } /* * Now we've got the basic rates as bitmaps in the ofdm and cck * variables. This isn't sufficient though, as there might not * be all the right rates in the bitmap. E.g. if the only basic * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps * and 6 Mbps because the 802.11-2007 standard says in 9.6: * * [...] a STA responding to a received frame shall transmit * its Control Response frame [...] at the highest rate in the * BSSBasicRateSet parameter that is less than or equal to the * rate of the immediately previous frame in the frame exchange * sequence ([...]) and that is of the same modulation class * ([...]) as the received frame. If no rate contained in the * BSSBasicRateSet parameter meets these conditions, then the * control frame sent in response to a received frame shall be * transmitted at the highest mandatory rate of the PHY that is * less than or equal to the rate of the received frame, and * that is of the same modulation class as the received frame. * * As a consequence, we need to add all mandatory rates that are * lower than all of the basic rates to these bitmaps. */ if (IWL_RATE_24M_INDEX < lowest_present_ofdm) ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE; if (IWL_RATE_12M_INDEX < lowest_present_ofdm) ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE; /* 6M already there or needed so always add */ ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE; /* * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP. * Note, however: * - if no CCK rates are basic, it must be ERP since there must * be some basic rates at all, so they're OFDM => ERP PHY * (or we're in 5 GHz, and the cck bitmap will never be used) * - if 11M is a basic rate, it must be ERP as well, so add 5.5M * - if 5.5M is basic, 1M and 2M are mandatory * - if 2M is basic, 1M is mandatory * - if 1M is basic, that's the only valid ACK rate. * As a consequence, it's not as complicated as it sounds, just add * any lower rates to the ACK rate bitmap. */ if (IWL_RATE_11M_INDEX < lowest_present_cck) cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE; if (IWL_RATE_5M_INDEX < lowest_present_cck) cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE; if (IWL_RATE_2M_INDEX < lowest_present_cck) cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE; /* 1M already there or needed so always add */ cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE; *cck_rates = cck; *ofdm_rates = ofdm; } -static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mac_ctx_cmd *cmd) +void iwl_mvm_set_fw_basic_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + __le32 *cck_rates, __le32 *ofdm_rates) +{ + struct ieee80211_chanctx_conf *chanctx; + u8 cck_ack_rates = 0, ofdm_ack_rates = 0; + + rcu_read_lock(); + chanctx = rcu_dereference(link_conf->chanctx_conf); + iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band + : NL80211_BAND_2GHZ, + &cck_ack_rates, &ofdm_ack_rates); + + rcu_read_unlock(); + + *cck_rates = cpu_to_le32((u32)cck_ack_rates); + *ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates); +} + +void iwl_mvm_set_fw_protection_flags(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + __le32 *protection_flags, u32 ht_flag, + u32 tgg_flag) { /* for both sta and ap, ht_operation_mode hold the protection_mode */ - u8 protection_mode = vif->bss_conf.ht_operation_mode & + u8 protection_mode = link_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; - /* The fw does not distinguish between ht and fat */ - u32 ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT; + bool ht_enabled = !!(link_conf->ht_operation_mode & + IEEE80211_HT_OP_MODE_PROTECTION); + + if (link_conf->use_cts_prot) + *protection_flags |= cpu_to_le32(tgg_flag); + + IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", + link_conf->use_cts_prot, + link_conf->ht_operation_mode); + + if (!ht_enabled) + return; IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode); /* * See section 9.23.3.1 of IEEE 80211-2012. * Nongreenfield HT STAs Present is not supported. */ switch (protection_mode) { case IEEE80211_HT_OP_MODE_PROTECTION_NONE: break; case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: - cmd->protection_flags |= cpu_to_le32(ht_flag); + *protection_flags |= cpu_to_le32(ht_flag); break; case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: /* Protect when channel wider than 20MHz */ - if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20) - cmd->protection_flags |= cpu_to_le32(ht_flag); + if (link_conf->chandef.width > NL80211_CHAN_WIDTH_20) + *protection_flags |= cpu_to_le32(ht_flag); break; default: IWL_ERR(mvm, "Illegal protection mode %d\n", protection_mode); break; } } -static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mac_ctx_cmd *cmd, - const u8 *bssid_override, - u32 action) +void iwl_mvm_set_fw_qos_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct iwl_ac_qos *ac, __le32 *qos_flags) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct ieee80211_chanctx_conf *chanctx; - bool ht_enabled = !!(vif->bss_conf.ht_operation_mode & - IEEE80211_HT_OP_MODE_PROTECTION); - u8 cck_ack_rates, ofdm_ack_rates; - const u8 *bssid = bssid_override ?: vif->bss_conf.bssid; + struct iwl_mvm_vif_link_info *mvm_link = + mvmvif->link[link_conf->link_id]; int i; - cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color)); - cmd->action = cpu_to_le32(action); + if (!mvm_link) + return; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); + u8 ucode_ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); + + ac[ucode_ac].cw_min = + cpu_to_le16(mvm_link->queue_params[i].cw_min); + ac[ucode_ac].cw_max = + cpu_to_le16(mvm_link->queue_params[i].cw_max); + ac[ucode_ac].edca_txop = + cpu_to_le16(mvm_link->queue_params[i].txop * 32); + ac[ucode_ac].aifsn = mvm_link->queue_params[i].aifs; + ac[ucode_ac].fifos_mask = BIT(txf); + } + + if (link_conf->qos) + *qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); + + if (link_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT) + *qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN); +} + +int iwl_mvm_get_mac_type(struct ieee80211_vif *vif) +{ + u32 mac_type = FW_MAC_TYPE_BSS_STA; switch (vif->type) { case NL80211_IFTYPE_STATION: if (vif->p2p) - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA); + mac_type = FW_MAC_TYPE_P2P_STA; else - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA); + mac_type = FW_MAC_TYPE_BSS_STA; break; case NL80211_IFTYPE_AP: - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO); + mac_type = FW_MAC_TYPE_GO; break; case NL80211_IFTYPE_MONITOR: - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER); + mac_type = FW_MAC_TYPE_LISTENER; break; case NL80211_IFTYPE_P2P_DEVICE: - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE); + mac_type = FW_MAC_TYPE_P2P_DEVICE; break; case NL80211_IFTYPE_ADHOC: - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS); + mac_type = FW_MAC_TYPE_IBSS; break; default: WARN_ON_ONCE(1); } + return mac_type; +} + +static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mac_ctx_cmd *cmd, + const u8 *bssid_override, + u32 action) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + const u8 *bssid = bssid_override ?: vif->bss_conf.bssid; + u32 ht_flag; + + cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)); + cmd->action = cpu_to_le32(action); + cmd->mac_type = cpu_to_le32(iwl_mvm_get_mac_type(vif)); cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id); memcpy(cmd->node_addr, vif->addr, ETH_ALEN); if (bssid) memcpy(cmd->bssid_addr, bssid, ETH_ALEN); else eth_broadcast_addr(cmd->bssid_addr); - rcu_read_lock(); - chanctx = rcu_dereference(vif->chanctx_conf); - iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band - : NL80211_BAND_2GHZ, - &cck_ack_rates, &ofdm_ack_rates); - rcu_read_unlock(); - - cmd->cck_rates = cpu_to_le32((u32)cck_ack_rates); - cmd->ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates); + iwl_mvm_set_fw_basic_rates(mvm, vif, &vif->bss_conf, &cmd->cck_rates, + &cmd->ofdm_rates); cmd->cck_short_preamble = cpu_to_le32(vif->bss_conf.use_short_preamble ? MAC_FLG_SHORT_PREAMBLE : 0); cmd->short_slot = cpu_to_le32(vif->bss_conf.use_short_slot ? MAC_FLG_SHORT_SLOT : 0); cmd->filter_flags = 0; - for (i = 0; i < IEEE80211_NUM_ACS; i++) { - u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); - u8 ucode_ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); - - cmd->ac[ucode_ac].cw_min = - cpu_to_le16(mvmvif->queue_params[i].cw_min); - cmd->ac[ucode_ac].cw_max = - cpu_to_le16(mvmvif->queue_params[i].cw_max); - cmd->ac[ucode_ac].edca_txop = - cpu_to_le16(mvmvif->queue_params[i].txop * 32); - cmd->ac[ucode_ac].aifsn = mvmvif->queue_params[i].aifs; - cmd->ac[ucode_ac].fifos_mask = BIT(txf); - } - - if (vif->bss_conf.qos) - cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); - - if (vif->bss_conf.use_cts_prot) - cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); + iwl_mvm_set_fw_qos_params(mvm, vif, &vif->bss_conf, cmd->ac, + &cmd->qos_flags); - IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", - vif->bss_conf.use_cts_prot, - vif->bss_conf.ht_operation_mode); - if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) - cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN); - if (ht_enabled) - iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd); + /* The fw does not distinguish between ht and fat */ + ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT; + iwl_mvm_set_fw_protection_flags(mvm, vif, &vif->bss_conf, + &cmd->protection_flags, + ht_flag, MAC_PROT_FLG_TGG_PROTECT); } static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, struct iwl_mac_ctx_cmd *cmd) { int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, sizeof(*cmd), cmd); if (ret) - IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n", + IWL_ERR(mvm, "Failed to send MAC_CONTEXT_CMD (action:%d): %d\n", le32_to_cpu(cmd->action), ret); return ret; } +void iwl_mvm_set_fw_dtim_tbtt(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + __le64 *dtim_tsf, __le32 *dtim_time, + __le32 *assoc_beacon_arrive_time) +{ + u32 dtim_offs; + + /* + * The DTIM count counts down, so when it is N that means N + * more beacon intervals happen until the DTIM TBTT. Therefore + * add this to the current time. If that ends up being in the + * future, the firmware will handle it. + * + * Also note that the system_timestamp (which we get here as + * "sync_device_ts") and TSF timestamp aren't at exactly the + * same offset in the frame -- the TSF is at the first symbol + * of the TSF, the system timestamp is at signal acquisition + * time. This means there's an offset between them of at most + * a few hundred microseconds (24 * 8 bits + PLCP time gives + * 384us in the longest case), this is currently not relevant + * as the firmware wakes up around 2ms before the TBTT. + */ + dtim_offs = link_conf->sync_dtim_count * + link_conf->beacon_int; + /* convert TU to usecs */ + dtim_offs *= 1024; + + *dtim_tsf = + cpu_to_le64(link_conf->sync_tsf + dtim_offs); + *dtim_time = + cpu_to_le32(link_conf->sync_device_ts + dtim_offs); + *assoc_beacon_arrive_time = + cpu_to_le32(link_conf->sync_device_ts); + + IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n", + le64_to_cpu(*dtim_tsf), + le32_to_cpu(*dtim_time), + dtim_offs); +} + +__le32 iwl_mvm_mac_ctxt_cmd_p2p_sta_get_oppps_ctwin(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct ieee80211_p2p_noa_attr *noa = + &vif->bss_conf.p2p_noa_attr; + + return cpu_to_le32(noa->oppps_ctwindow & + IEEE80211_P2P_OPPPS_CTWINDOW_MASK); +} + +u32 iwl_mvm_mac_ctxt_cmd_sta_get_twt_policy(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + u32 twt_policy = 0; + + if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT) + twt_policy |= TWT_SUPPORTED; + if (vif->bss_conf.twt_protected) + twt_policy |= PROTECTED_TWT_SUPPORTED; + if (vif->bss_conf.twt_broadcast) + twt_policy |= BROADCAST_TWT_SUPPORTED; + + return twt_policy; +} + static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action, bool force_assoc_off, const u8 *bssid_override) { struct iwl_mac_ctx_cmd cmd = {}; struct iwl_mac_data_sta *ctxt_sta; WARN_ON(vif->type != NL80211_IFTYPE_STATION); /* Fill the common data for all mac context types */ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action); + /* + * We always want to hear MCAST frames, if we're not authorized yet, + * we'll drop them. + */ + cmd.filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP); + if (vif->p2p) { - struct ieee80211_p2p_noa_attr *noa = - &vif->bss_conf.p2p_noa_attr; + cmd.p2p_sta.ctwin = + iwl_mvm_mac_ctxt_cmd_p2p_sta_get_oppps_ctwin(mvm, vif); - cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow & - IEEE80211_P2P_OPPPS_CTWINDOW_MASK); ctxt_sta = &cmd.p2p_sta.sta; } else { ctxt_sta = &cmd.sta; } /* We need the dtim_period to set the MAC as associated */ - if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && + if (vif->cfg.assoc && vif->bss_conf.dtim_period && !force_assoc_off) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u8 ap_sta_id = mvmvif->ap_sta_id; - u32 dtim_offs; - /* - * The DTIM count counts down, so when it is N that means N - * more beacon intervals happen until the DTIM TBTT. Therefore - * add this to the current time. If that ends up being in the - * future, the firmware will handle it. - * - * Also note that the system_timestamp (which we get here as - * "sync_device_ts") and TSF timestamp aren't at exactly the - * same offset in the frame -- the TSF is at the first symbol - * of the TSF, the system timestamp is at signal acquisition - * time. This means there's an offset between them of at most - * a few hundred microseconds (24 * 8 bits + PLCP time gives - * 384us in the longest case), this is currently not relevant - * as the firmware wakes up around 2ms before the TBTT. - */ - dtim_offs = vif->bss_conf.sync_dtim_count * - vif->bss_conf.beacon_int; - /* convert TU to usecs */ - dtim_offs *= 1024; - - ctxt_sta->dtim_tsf = - cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs); - ctxt_sta->dtim_time = - cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs); - ctxt_sta->assoc_beacon_arrive_time = - cpu_to_le32(vif->bss_conf.sync_device_ts); - - IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n", - le64_to_cpu(ctxt_sta->dtim_tsf), - le32_to_cpu(ctxt_sta->dtim_time), - dtim_offs); + iwl_mvm_set_fw_dtim_tbtt(mvm, vif, &vif->bss_conf, + &ctxt_sta->dtim_tsf, + &ctxt_sta->dtim_time, + &ctxt_sta->assoc_beacon_arrive_time); ctxt_sta->is_assoc = cpu_to_le32(1); if (!mvmvif->authorized && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO)) ctxt_sta->data_policy |= cpu_to_le32(COEX_HIGH_PRIORITY_ENABLE); - - /* - * allow multicast data frames only as long as the station is - * authorized, i.e., GTK keys are already installed (if needed) - */ - if (ap_sta_id < mvm->fw->ucode_capa.num_stations) { - struct ieee80211_sta *sta; - - rcu_read_lock(); - - sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); - if (!IS_ERR_OR_NULL(sta)) { - struct iwl_mvm_sta *mvmsta = - iwl_mvm_sta_from_mac80211(sta); - - if (mvmsta->sta_state == - IEEE80211_STA_AUTHORIZED) - cmd.filter_flags |= - cpu_to_le32(MAC_FILTER_ACCEPT_GRP); - } - - rcu_read_unlock(); - } } else { ctxt_sta->is_assoc = cpu_to_le32(0); /* Allow beacons to pass through as long as we are not * associated, or we do not have dtim period information. */ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); } ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * vif->bss_conf.dtim_period); ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval); - ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid); + ctxt_sta->assoc_id = cpu_to_le32(vif->cfg.aid); - if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p) + if (vif->probe_req_reg && vif->cfg.assoc && vif->p2p) cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) { cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX); - if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT) - ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED); - if (vif->bss_conf.twt_protected) - ctxt_sta->data_policy |= - cpu_to_le32(PROTECTED_TWT_SUPPORTED); - if (vif->bss_conf.twt_broadcast) - ctxt_sta->data_policy |= - cpu_to_le32(BROADCAST_TWT_SUPPORTED); + ctxt_sta->data_policy |= + cpu_to_le32(iwl_mvm_mac_ctxt_cmd_sta_get_twt_policy(mvm, vif)); } return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; - u32 tfd_queue_msk = BIT(mvm->snif_queue); + u32 tfd_queue_msk = 0; int ret; WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC | MAC_FILTER_IN_CONTROL_AND_MGMT | MAC_FILTER_IN_BEACON | MAC_FILTER_IN_PROBE_REQUEST | MAC_FILTER_IN_CRC32 | MAC_FILTER_ACCEPT_GRP); ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); + /* + * the queue mask is only relevant for old TX API, and + * mvm->snif_queue isn't set here (it's still set to + * IWL_MVM_INVALID_QUEUE so the BIT() of it is UB) + */ + if (!iwl_mvm_has_new_tx_api(mvm)) + tfd_queue_msk = BIT(mvm->snif_queue); + /* Allocate sniffer station */ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->snif_sta, tfd_queue_msk, vif->type, IWL_STA_GENERAL_PURPOSE); if (ret) return ret; return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_ctx_cmd cmd = {}; WARN_ON(vif->type != NL80211_IFTYPE_ADHOC); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | MAC_FILTER_IN_PROBE_REQUEST | MAC_FILTER_ACCEPT_GRP); /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */ cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int); /* TODO: Assumes that the beacon id == mac context id */ cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } struct iwl_mvm_go_iterator_data { bool go_active; }; static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_go_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->type == NL80211_IFTYPE_AP && vif->p2p && mvmvif->ap_ibss_active) data->go_active = true; } -static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 action) +__le32 iwl_mac_ctxt_p2p_dev_has_extended_disc(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) { - struct iwl_mac_ctx_cmd cmd = {}; struct iwl_mvm_go_iterator_data data = {}; - WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE); - - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); - - /* Override the filter flags to accept only probe requests */ - cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); - /* * This flag should be set to true when the P2P Device is * discoverable and there is at least another active P2P GO. Settings * this flag will allow the P2P Device to be discoverable on other * channels in addition to its listen channel. * Note that this flag should not be set in other cases as it opens the * Rx filters on all MAC and increases the number of interrupts. */ ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_go_iterator, &data); - cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0); + return cpu_to_le32(data.go_active ? 1 : 0); +} + +static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_ctx_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE); + + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); + + cmd.p2p_dev.is_disc_extended = + iwl_mac_ctxt_p2p_dev_has_extended_disc(mvm, vif); + + /* Override the filter flags to accept only probe requests */ + cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, __le32 *tim_index, __le32 *tim_size, u8 *beacon, u32 frame_size) { u32 tim_idx; struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; /* The index is relative to frame start but we start looking at the * variable-length part of the beacon. */ tim_idx = mgmt->u.beacon.variable - beacon; /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ while ((tim_idx < (frame_size - 2)) && (beacon[tim_idx] != WLAN_EID_TIM)) tim_idx += beacon[tim_idx+1] + 2; /* If TIM field was found, set variables */ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { *tim_index = cpu_to_le32(tim_idx); *tim_size = cpu_to_le32((u32)beacon[tim_idx + 1]); } else { IWL_WARN(mvm, "Unable to find TIM Element in beacon\n"); } } static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) { struct ieee80211_mgmt *mgmt = (void *)beacon; const u8 *ie; if (WARN_ON_ONCE(frame_size <= (mgmt->u.beacon.variable - beacon))) return 0; frame_size -= mgmt->u.beacon.variable - beacon; ie = cfg80211_find_ie(eid, mgmt->u.beacon.variable, frame_size); if (!ie) return 0; return ie - beacon; } -u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, +u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, struct ieee80211_vif *vif) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_supported_band *sband; + unsigned long basic = vif->bss_conf.basic_rates; + u16 lowest_cck = IWL_RATE_COUNT, lowest_ofdm = IWL_RATE_COUNT; + u32 link_id = u32_get_bits(info->control.flags, + IEEE80211_TX_CTRL_MLO_LINK); + u8 band = info->band; u8 rate; - if (info->band == NL80211_BAND_2GHZ && !vif->p2p) - rate = IWL_FIRST_CCK_RATE; - else - rate = IWL_FIRST_OFDM_RATE; + u32 i; + + if (link_id == IEEE80211_LINK_UNSPECIFIED && ieee80211_vif_is_mld(vif)) { + for (i = 0; i < ARRAY_SIZE(mvmvif->link); i++) { + if (!mvmvif->link[i]) + continue; + /* shouldn't do this when >1 link is active */ + WARN_ON_ONCE(link_id != IEEE80211_LINK_UNSPECIFIED); + link_id = i; + } + } + + if (link_id < IEEE80211_LINK_UNSPECIFIED) { + struct ieee80211_bss_conf *link_conf; + + rcu_read_lock(); + link_conf = rcu_dereference(vif->link_conf[link_id]); + if (link_conf) { + basic = link_conf->basic_rates; + if (link_conf->chandef.chan) + band = link_conf->chandef.chan->band; + } + rcu_read_unlock(); + } + + sband = mvm->hw->wiphy->bands[band]; + for_each_set_bit(i, &basic, BITS_PER_LONG) { + u16 hw = sband->bitrates[i].hw_value; + + if (hw >= IWL_FIRST_OFDM_RATE) { + if (lowest_ofdm > hw) + lowest_ofdm = hw; + } else if (lowest_cck > hw) { + lowest_cck = hw; + } + } + + if (band == NL80211_BAND_2GHZ && !vif->p2p && + vif->type != NL80211_IFTYPE_P2P_DEVICE && + !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) { + if (lowest_cck != IWL_RATE_COUNT) + rate = lowest_cck; + else if (lowest_ofdm != IWL_RATE_COUNT) + rate = lowest_ofdm; + else + rate = IWL_RATE_1M_INDEX; + } else if (lowest_ofdm != IWL_RATE_COUNT) { + rate = lowest_ofdm; + } else { + rate = IWL_RATE_6M_INDEX; + } return rate; } u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx) { u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx); bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10; if (rate_idx <= IWL_FIRST_CCK_RATE) flags |= is_new_rate ? IWL_MAC_BEACON_CCK : IWL_MAC_BEACON_CCK_V1; return flags; } +u8 iwl_mvm_mac_ctxt_get_beacon_rate(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + struct ieee80211_vif *vif) +{ + struct ieee80211_supported_band *sband = + mvm->hw->wiphy->bands[info->band]; + u32 legacy = vif->bss_conf.beacon_tx_rate.control[info->band].legacy; + + /* if beacon rate was configured try using it */ + if (hweight32(legacy) == 1) { + u32 rate = ffs(legacy) - 1; + + return sband->bitrates[rate].hw_value; + } + + return iwl_mvm_mac_ctxt_get_lowest_rate(mvm, info, vif); +} + static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon, struct iwl_tx_cmd *tx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_tx_info *info; u8 rate; u32 tx_flags; info = IEEE80211_SKB_CB(beacon); /* Set up TX command fields */ tx->len = cpu_to_le16((u16)beacon->len); - tx->sta_id = mvmvif->bcast_sta.sta_id; + tx->sta_id = mvmvif->deflink.bcast_sta.sta_id; tx->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF; tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) << TX_CMD_FLG_BT_PRIO_POS; tx->tx_flags = cpu_to_le32(tx_flags); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); tx->rate_n_flags = cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS); - rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); + rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif); tx->rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate)); if (rate == IWL_FIRST_CCK_RATE) tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK_V1); } int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, struct sk_buff *beacon, void *data, int len) { struct iwl_host_cmd cmd = { .id = BEACON_TEMPLATE_CMD, .flags = CMD_ASYNC, }; cmd.len[0] = len; cmd.data[0] = data; cmd.dataflags[0] = 0; cmd.len[1] = beacon->len; cmd.data[1] = beacon->data; cmd.dataflags[1] = IWL_HCMD_DFL_DUP; return iwl_mvm_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctxt_send_beacon_v6(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_beacon_cmd_v6 beacon_cmd = {}; iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data, beacon->len); return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_beacon_cmd_v7 beacon_cmd = {}; iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data, beacon->len); beacon_cmd.csa_offset = cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_CHANNEL_SWITCH, beacon->len)); beacon_cmd.ecsa_offset = cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_EXT_CHANSWITCH_ANN, beacon->len)); return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct sk_buff *beacon) + struct sk_buff *beacon, + struct ieee80211_bss_conf *link_conf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(beacon); struct iwl_mac_beacon_cmd beacon_cmd = {}; - u8 rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); + u8 rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif); u16 flags; struct ieee80211_chanctx_conf *ctx; int channel; flags = iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw, rate); /* Enable FILS on PSC channels only */ rcu_read_lock(); - ctx = rcu_dereference(vif->chanctx_conf); + ctx = rcu_dereference(link_conf->chanctx_conf); channel = ieee80211_frequency_to_channel(ctx->def.chan->center_freq); WARN_ON(channel == 0); if (cfg80211_channel_is_psc(ctx->def.chan) && !IWL_MVM_DISABLE_AP_FILS) { flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) > 10 ? IWL_MAC_BEACON_FILS : IWL_MAC_BEACON_FILS_V1; beacon_cmd.short_ssid = - cpu_to_le32(~crc32_le(~0, vif->bss_conf.ssid, - vif->bss_conf.ssid_len)); + cpu_to_le32(~crc32_le(~0, vif->cfg.ssid, + vif->cfg.ssid_len)); } rcu_read_unlock(); beacon_cmd.flags = cpu_to_le16(flags); beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); - beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + + if (WARN_ON(!mvmvif->link[link_conf->link_id])) + return -EINVAL; + + if (iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) > 12) + beacon_cmd.link_id = + cpu_to_le32(mvmvif->link[link_conf->link_id]->fw_link_id); + else + beacon_cmd.link_id = cpu_to_le32((u32)mvmvif->id); if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data, beacon->len); beacon_cmd.csa_offset = cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_CHANNEL_SWITCH, beacon->len)); beacon_cmd.ecsa_offset = cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_EXT_CHANSWITCH_ANN, beacon->len)); return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } -int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct sk_buff *beacon) +static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon, + struct ieee80211_bss_conf *link_conf) { if (WARN_ON(!beacon)) return -EINVAL; if (IWL_MVM_NON_TRANSMITTING_AP) return 0; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) return iwl_mvm_mac_ctxt_send_beacon_v6(mvm, vif, beacon); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) - return iwl_mvm_mac_ctxt_send_beacon_v9(mvm, vif, beacon); + return iwl_mvm_mac_ctxt_send_beacon_v9(mvm, vif, beacon, + link_conf); return iwl_mvm_mac_ctxt_send_beacon_v7(mvm, vif, beacon); } /* The beacon template for the AP/GO/IBSS has changed and needs update */ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) { struct sk_buff *beacon; int ret; WARN_ON(vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC); - beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL, 0); + beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL, + link_conf->link_id); if (!beacon) return -ENOMEM; #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvm->beacon_inject_active) { dev_kfree_skb(beacon); return -EBUSY; } #endif - ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon); + ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon, link_conf); dev_kfree_skb(beacon); return ret; } struct iwl_mvm_mac_ap_iterator_data { struct iwl_mvm *mvm; struct ieee80211_vif *vif; u32 beacon_device_ts; u16 beacon_int; }; /* Find the beacon_device_ts and beacon_int for a managed interface */ static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mac_ap_iterator_data *data = _data; - if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) + if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc) return; /* Station client has higher priority over P2P client*/ if (vif->p2p && data->beacon_device_ts) return; data->beacon_device_ts = vif->bss_conf.sync_device_ts; data->beacon_int = vif->bss_conf.beacon_int; } +/* + * Fill the filter flags for mac context of type AP or P2P GO. + */ +void iwl_mvm_mac_ctxt_cmd_ap_set_filter_flags(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + __le32 *filter_flags, + int accept_probe_req_flag, + int accept_beacon_flag) +{ + /* + * in AP mode, pass probe requests and beacons from other APs + * (needed for ht protection); when there're no any associated + * station don't ask FW to pass beacons to prevent unnecessary + * wake-ups. + */ + *filter_flags |= cpu_to_le32(accept_probe_req_flag); + if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) { + *filter_flags |= cpu_to_le32(accept_beacon_flag); + IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); + } else { + IWL_DEBUG_HC(mvm, "No need to receive beacons\n"); + } +} + /* * Fill the specific data for mac context of type AP of P2P GO */ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_ctx_cmd *cmd, struct iwl_mac_data_ap *ctxt_ap, bool add) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_mac_ap_iterator_data data = { .mvm = mvm, .vif = vif, .beacon_device_ts = 0 }; /* in AP mode, the MCAST FIFO takes the EDCA params from VO */ cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST); - /* - * in AP mode, pass probe requests and beacons from other APs - * (needed for ht protection); when there're no any associated - * station don't ask FW to pass beacons to prevent unnecessary - * wake-ups. - */ - cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); - if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) { - cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); - IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); - } else { - IWL_DEBUG_HC(mvm, "No need to receive beacons\n"); - } + iwl_mvm_mac_ctxt_cmd_ap_set_filter_flags(mvm, mvmvif, + &cmd->filter_flags, + MAC_FILTER_IN_PROBE_REQUEST, + MAC_FILTER_IN_BEACON); ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int); ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * vif->bss_conf.dtim_period); if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) - ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->cab_queue); + ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->deflink.cab_queue); /* * Only set the beacon time when the MAC is being added, when we * just modify the MAC then we should keep the time -- the firmware * can otherwise have a "jumping" TBTT. */ if (add) { /* * If there is a station/P2P client interface which is * associated, set the AP's TBTT far enough from the station's * TBTT. Otherwise, set it to the current system time */ ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_mac_ap_iterator, &data); if (data.beacon_device_ts) { - u32 rand = (prandom_u32() % (64 - 36)) + 36; + u32 rand = get_random_u32_inclusive(36, 63); mvmvif->ap_beacon_time = data.beacon_device_ts + ieee80211_tu_to_usec(data.beacon_int * rand / 100); } else { mvmvif->ap_beacon_time = iwl_mvm_get_systime(mvm); } } ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time); ctxt_ap->beacon_tsf = 0; /* unused */ /* TODO: Assume that the beacon id == mac context id */ ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id); } static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p); /* Fill the common data for all mac context types */ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* Fill the data specific for ap mode */ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.ap, action == FW_CTXT_ACTION_ADD); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr; WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p); /* Fill the common data for all mac context types */ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* Fill the data specific for GO mode */ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.go.ap, action == FW_CTXT_ACTION_ADD); cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK); cmd.go.opp_ps_enabled = cpu_to_le32(!!(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT)); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action, bool force_assoc_off, const u8 *bssid_override) { switch (vif->type) { case NL80211_IFTYPE_STATION: return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action, force_assoc_off, bssid_override); case NL80211_IFTYPE_AP: if (!vif->p2p) return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action); else return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action); case NL80211_IFTYPE_MONITOR: return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action); case NL80211_IFTYPE_P2P_DEVICE: return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action); case NL80211_IFTYPE_ADHOC: return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action); default: break; } return -EOPNOTSUPP; } int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n", vif->addr, ieee80211_vif_type_p2p(vif))) return -EIO; ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD, true, NULL); if (ret) return ret; /* will only do anything at resume from D3 time */ iwl_mvm_set_last_nonqos_seq(mvm, vif); mvmvif->uploaded = true; return 0; } int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool force_assoc_off, const u8 *bssid_override) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n", vif->addr, ieee80211_vif_type_p2p(vif))) return -EIO; return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY, force_assoc_off, bssid_override); } int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_ctx_cmd cmd; int ret; if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n", vif->addr, ieee80211_vif_type_p2p(vif))) return -EIO; memset(&cmd, 0, sizeof(cmd)); cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); - ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, - sizeof(cmd), &cmd); - if (ret) { - IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret); + ret = iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); + if (ret) return ret; - } mvmvif->uploaded = false; if (vif->type == NL80211_IFTYPE_MONITOR) { __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags); iwl_mvm_dealloc_snif_sta(mvm); } return 0; } static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm, struct ieee80211_vif *csa_vif, u32 gp2, bool tx_success) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(csa_vif); /* Don't start to countdown from a failed beacon */ if (!tx_success && !mvmvif->csa_countdown) return; mvmvif->csa_countdown = true; if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) { int c = ieee80211_beacon_update_cntdwn(csa_vif); - iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif); + iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif, + &csa_vif->bss_conf); if (csa_vif->p2p && !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 && tx_success) { u32 rel_time = (c + 1) * csa_vif->bss_conf.beacon_int - IWL_MVM_CHANNEL_SWITCH_TIME_GO; u32 apply_time = gp2 + rel_time * 1024; iwl_mvm_schedule_csa_period(mvm, csa_vif, IWL_MVM_CHANNEL_SWITCH_TIME_GO - IWL_MVM_CHANNEL_SWITCH_MARGIN, apply_time); } } else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) { /* we don't have CSA NoA scheduled yet, switch now */ ieee80211_csa_finish(csa_vif); RCU_INIT_POINTER(mvm->csa_vif, NULL); } } void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); struct iwl_extended_beacon_notif *beacon = (void *)pkt->data; struct iwl_extended_beacon_notif_v5 *beacon_v5 = (void *)pkt->data; struct ieee80211_vif *csa_vif; struct ieee80211_vif *tx_blocked_vif; struct agg_tx_status *agg_status; u16 status; lockdep_assert_held(&mvm->mutex); mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2); if (!iwl_mvm_is_short_beacon_notif_supported(mvm)) { struct iwl_mvm_tx_resp *beacon_notify_hdr = &beacon_v5->beacon_notify_hdr; if (unlikely(pkt_len < sizeof(*beacon_v5))) return; mvm->ibss_manager = beacon_v5->ibss_mgr_status != 0; agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr); status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK; IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%016llX gp2:0x%X rate:%d\n", status, beacon_notify_hdr->failure_frame, le64_to_cpu(beacon->tsf), mvm->ap_last_beacon_gp2, le32_to_cpu(beacon_notify_hdr->initial_rate)); } else { if (unlikely(pkt_len < sizeof(*beacon))) return; mvm->ibss_manager = beacon->ibss_mgr_status != 0; status = le32_to_cpu(beacon->status) & TX_STATUS_MSK; IWL_DEBUG_RX(mvm, "beacon status %#x tsf:0x%016llX gp2:0x%X\n", status, le64_to_cpu(beacon->tsf), mvm->ap_last_beacon_gp2); } csa_vif = rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex)); - if (unlikely(csa_vif && csa_vif->csa_active)) + if (unlikely(csa_vif && csa_vif->bss_conf.csa_active)) iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2, (status == TX_STATUS_SUCCESS)); tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex)); if (unlikely(tx_blocked_vif)) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); /* * The channel switch is started and we have blocked the * stations. If this is the first beacon (the timeout wasn't * set), set the unblock timeout, otherwise countdown */ if (!mvm->csa_tx_block_bcn_timeout) mvm->csa_tx_block_bcn_timeout = IWL_MVM_CS_UNBLOCK_TX_TIMEOUT; else mvm->csa_tx_block_bcn_timeout--; /* Check if the timeout is expired, and unblock tx */ if (mvm->csa_tx_block_bcn_timeout == 0) { iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); } } } void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_missed_beacons_notif *mb = (void *)pkt->data; struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig; struct iwl_fw_dbg_trigger_tlv *trigger; u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx; u32 rx_missed_bcon, rx_missed_bcon_since_rx; struct ieee80211_vif *vif; - u32 id = le32_to_cpu(mb->mac_id); + /* Id can be mac/link id depending on the notification version */ + u32 id = le32_to_cpu(mb->link_id); union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt }; + u32 mac_type; + u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + MISSED_BEACONS_NOTIFICATION, + 0); + + rcu_read_lock(); + + /* before version four the ID in the notification refers to mac ID */ + if (notif_ver < 4) { + vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); + } else { + struct ieee80211_bss_conf *bss_conf = + iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, true); + + if (!bss_conf) + goto out; + + vif = bss_conf->vif; + } IWL_DEBUG_INFO(mvm, - "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n", - le32_to_cpu(mb->mac_id), + "missed bcn %s_id=%u, consecutive=%u (%u, %u, %u)\n", + notif_ver < 4 ? "mac" : "link", + id, le32_to_cpu(mb->consec_missed_beacons), le32_to_cpu(mb->consec_missed_beacons_since_last_rx), le32_to_cpu(mb->num_recvd_beacons), le32_to_cpu(mb->num_expected_beacons)); - rcu_read_lock(); - - vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); if (!vif) goto out; + mac_type = iwl_mvm_get_mac_type(vif); + + IWL_DEBUG_INFO(mvm, "missed beacon mac_type=%u,\n", mac_type); + + mvm->trans->dbg.dump_file_name_ext_valid = true; + snprintf(mvm->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME, + "MacId_%d_MacType_%d", id, mac_type); + rx_missed_bcon = le32_to_cpu(mb->consec_missed_beacons); rx_missed_bcon_since_rx = le32_to_cpu(mb->consec_missed_beacons_since_last_rx); /* * TODO: the threshold should be adjusted based on latency conditions, * and/or in case of a CS flow on one of the other AP vifs. */ if (rx_missed_bcon > IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG) iwl_mvm_connection_loss(mvm, vif, "missed beacons"); else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) ieee80211_beacon_loss(vif); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data); trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MISSED_BEACONS); if (!trigger) goto out; bcon_trig = (void *)trigger->data; stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon); stop_trig_missed_bcon_since_rx = le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx); /* TODO: implement start trigger */ if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || rx_missed_bcon >= stop_trig_missed_bcon) #if defined(__linux__) iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); #elif defined(__FreeBSD__) iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, ""); #endif out: rcu_read_unlock(); } void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); struct iwl_stored_beacon_notif_common *sb = (void *)pkt->data; struct ieee80211_rx_status rx_status; struct sk_buff *skb; u8 *data; u32 size = le32_to_cpu(sb->byte_count); int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF), 0); if (size == 0) return; /* handle per-version differences */ if (ver <= 2) { struct iwl_stored_beacon_notif_v2 *sb_v2 = (void *)pkt->data; if (pkt_len < struct_size(sb_v2, data, size)) return; data = sb_v2->data; } else { struct iwl_stored_beacon_notif_v3 *sb_v3 = (void *)pkt->data; if (pkt_len < struct_size(sb_v3, data, size)) return; data = sb_v3->data; } skb = alloc_skb(size, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); return; } /* update rx_status according to the notification's metadata */ memset(&rx_status, 0, sizeof(rx_status)); rx_status.mactime = le64_to_cpu(sb->tsf); /* TSF as indicated by the firmware is at INA time */ rx_status.flag |= RX_FLAG_MACTIME_PLCP_START; rx_status.device_timestamp = le32_to_cpu(sb->system_time); rx_status.band = (sb->band & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(sb->channel), rx_status.band); /* copy the data */ skb_put_data(skb, data, size); memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); /* pass it as regular rx to mac80211 */ ieee80211_rx_napi(mvm->hw, NULL, skb, NULL); } void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_probe_resp_data_notif *notif = (void *)pkt->data; struct iwl_probe_resp_data *old_data, *new_data; u32 id = le32_to_cpu(notif->mac_id); struct ieee80211_vif *vif; struct iwl_mvm_vif *mvmvif; IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n", notif->noa_active, notif->csa_counter); vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false); if (!vif) return; mvmvif = iwl_mvm_vif_from_mac80211(vif); new_data = kzalloc(sizeof(*new_data), GFP_KERNEL); if (!new_data) return; memcpy(&new_data->notif, notif, sizeof(new_data->notif)); /* noa_attr contains 1 reserved byte, need to substruct it */ new_data->noa_len = sizeof(struct ieee80211_vendor_ie) + sizeof(new_data->notif.noa_attr) - 1; /* * If it's a one time NoA, only one descriptor is needed, * adjust the length according to len_low. */ if (new_data->notif.noa_attr.len_low == sizeof(struct ieee80211_p2p_noa_desc) + 2) new_data->noa_len -= sizeof(struct ieee80211_p2p_noa_desc); - old_data = rcu_dereference_protected(mvmvif->probe_resp_data, - lockdep_is_held(&mvmvif->mvm->mutex)); - rcu_assign_pointer(mvmvif->probe_resp_data, new_data); + old_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data, + lockdep_is_held(&mvmvif->mvm->mutex)); + rcu_assign_pointer(mvmvif->deflink.probe_resp_data, new_data); if (old_data) kfree_rcu(old_data, rcu_head); if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA && notif->csa_counter >= 1) ieee80211_beacon_set_cntdwn(vif, notif->csa_counter); } void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_channel_switch_start_notif *notif = (void *)pkt->data; struct ieee80211_vif *csa_vif, *vif; - struct iwl_mvm_vif *mvmvif; - u32 id_n_color, csa_id, mac_id; + struct iwl_mvm_vif *mvmvif, *csa_mvmvif; + u32 id_n_color, csa_id; + /* save mac_id or link_id to use later to cancel csa if needed */ + u32 id; + u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, + CHANNEL_SWITCH_START_NOTIF, 0); + bool csa_active; - id_n_color = le32_to_cpu(notif->id_and_color); - mac_id = id_n_color & FW_CTXT_ID_MSK; + rcu_read_lock(); - if (WARN_ON_ONCE(mac_id >= NUM_MAC_INDEX_DRIVER)) - return; + if (notif_ver < 3) { + struct iwl_channel_switch_start_notif_v1 *notif = (void *)pkt->data; + u32 mac_id; + + id_n_color = le32_to_cpu(notif->id_and_color); + mac_id = id_n_color & FW_CTXT_ID_MSK; + + vif = iwl_mvm_rcu_dereference_vif_id(mvm, mac_id, true); + if (!vif) + goto out_unlock; + + id = mac_id; + csa_active = vif->bss_conf.csa_active; + } else { + struct iwl_channel_switch_start_notif *notif = (void *)pkt->data; + u32 link_id = le32_to_cpu(notif->link_id); + struct ieee80211_bss_conf *bss_conf = + iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, link_id, true); + + if (!bss_conf) + goto out_unlock; + + id = link_id; + vif = bss_conf->vif; + csa_active = bss_conf->csa_active; + } - rcu_read_lock(); - vif = rcu_dereference(mvm->vif_id_to_mac[mac_id]); mvmvif = iwl_mvm_vif_from_mac80211(vif); + if (notif_ver >= 3) + id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); switch (vif->type) { case NL80211_IFTYPE_AP: csa_vif = rcu_dereference(mvm->csa_vif); - if (WARN_ON(!csa_vif || !csa_vif->csa_active || + if (WARN_ON(!csa_vif || !csa_vif->bss_conf.csa_active || csa_vif != vif)) goto out_unlock; - csa_id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); + csa_mvmvif = iwl_mvm_vif_from_mac80211(csa_vif); + csa_id = FW_CMD_ID_AND_COLOR(csa_mvmvif->id, csa_mvmvif->color); if (WARN(csa_id != id_n_color, "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)", csa_id, id_n_color)) goto out_unlock; IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n"); schedule_delayed_work(&mvm->cs_tx_unblock_dwork, msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT * csa_vif->bss_conf.beacon_int)); ieee80211_csa_finish(csa_vif); rcu_read_unlock(); RCU_INIT_POINTER(mvm->csa_vif, NULL); return; case NL80211_IFTYPE_STATION: /* * if we don't know about an ongoing channel switch, * make sure FW cancels it */ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, - 0) && !vif->csa_active) { + 0) && !csa_active) { IWL_DEBUG_INFO(mvm, "Channel Switch was canceled\n"); - iwl_mvm_cancel_channel_switch(mvm, vif, mac_id); + iwl_mvm_cancel_channel_switch(mvm, vif, id); break; } iwl_mvm_csa_client_absent(mvm, vif); cancel_delayed_work(&mvmvif->csa_work); ieee80211_chswitch_done(vif, true); break; default: /* should never happen */ WARN_ON_ONCE(1); break; } out_unlock: rcu_read_unlock(); } void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_channel_switch_error_notif *notif = (void *)pkt->data; struct ieee80211_vif *vif; - u32 id = le32_to_cpu(notif->mac_id); + u32 id = le32_to_cpu(notif->link_id); u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask); rcu_read_lock(); vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); if (!vif) { rcu_read_unlock(); return; } - IWL_DEBUG_INFO(mvm, "FW reports CSA error: mac_id=%u, csa_err_mask=%u\n", + IWL_DEBUG_INFO(mvm, "FW reports CSA error: id=%u, csa_err_mask=%u\n", id, csa_err_mask); if (csa_err_mask & (CS_ERR_COUNT_ERROR | CS_ERR_LONG_DELAY_AFTER_CS | CS_ERR_TX_BLOCK_TIMER_EXPIRED)) ieee80211_channel_switch_disconnect(vif, true); rcu_read_unlock(); } void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_missed_vap_notif *mb = (void *)pkt->data; struct ieee80211_vif *vif; u32 id = le32_to_cpu(mb->mac_id); IWL_DEBUG_INFO(mvm, "missed_vap notify mac_id=%u, num_beacon_intervals_elapsed=%u, profile_periodicity=%u\n", le32_to_cpu(mb->mac_id), mb->num_beacon_intervals_elapsed, mb->profile_periodicity); rcu_read_lock(); vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); if (vif) iwl_mvm_connection_loss(mvm, vif, "missed vap beacon"); rcu_read_unlock(); } diff --git a/sys/contrib/dev/iwlwifi/mvm/mac80211.c b/sys/contrib/dev/iwlwifi/mvm/mac80211.c index f8ddf87f3eaa..9c3f74d63317 100644 --- a/sys/contrib/dev/iwlwifi/mvm/mac80211.c +++ b/sys/contrib/dev/iwlwifi/mvm/mac80211.c @@ -1,5522 +1,6318 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include #include #include #include #include #if defined(__FreeBSD__) #include #endif #include #include #include #if defined(__FreeBSD__) #include #endif #include "iwl-drv.h" #include "iwl-op-mode.h" #include "iwl-io.h" #include "mvm.h" #include "sta.h" #include "time-event.h" #include "iwl-eeprom-parse.h" #include "iwl-phy-db.h" #ifdef CONFIG_NL80211_TESTMODE #include "testmode.h" #endif #include "fw/error-dump.h" #include "iwl-prph.h" #include "iwl-nvm-parse.h" +#include "time-sync.h" static const struct ieee80211_iface_limit iwl_mvm_limits[] = { { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, { .max = 1, .types = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), }, { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE), }, }; static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { { .num_different_channels = 2, .max_interfaces = 3, .limits = iwl_mvm_limits, .n_limits = ARRAY_SIZE(iwl_mvm_limits), }, }; static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { .max_peers = IWL_MVM_TOF_MAX_APS, .report_ap_tsf = 1, .randomize_mac_addr = 1, .ftm = { .supported = 1, .asap = 1, .non_asap = 1, .request_lci = 1, .request_civicloc = 1, .trigger_based = 1, .non_trigger_based = 1, .max_bursts_exponent = -1, /* all supported */ .max_ftms_per_burst = 0, /* no limits */ .bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | BIT(NL80211_CHAN_WIDTH_40) | BIT(NL80211_CHAN_WIDTH_80) | BIT(NL80211_CHAN_WIDTH_160), .preambles = BIT(NL80211_PREAMBLE_LEGACY) | BIT(NL80211_PREAMBLE_HT) | BIT(NL80211_PREAMBLE_VHT) | BIT(NL80211_PREAMBLE_HE), }, }; static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key); static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) { int i; memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts)); for (i = 0; i < NUM_PHY_CTX; i++) { mvm->phy_ctxts[i].id = i; mvm->phy_ctxts[i].ref = 0; } } struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, const char *alpha2, enum iwl_mcc_source src_id, bool *changed) { struct ieee80211_regdomain *regd = NULL; struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mcc_update_resp *resp; + struct iwl_mcc_update_resp_v8 *resp; u8 resp_ver; IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); lockdep_assert_held(&mvm->mutex); resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); if (IS_ERR_OR_NULL(resp)) { IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", PTR_ERR_OR_ZERO(resp)); resp = NULL; goto out; } if (changed) { u32 status = le32_to_cpu(resp->status); *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || status == MCC_RESP_ILLEGAL); } resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, MCC_UPDATE_CMD, 0); IWL_DEBUG_LAR(mvm, "MCC update response version: %d\n", resp_ver); regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, __le32_to_cpu(resp->n_channels), resp->channels, __le16_to_cpu(resp->mcc), __le16_to_cpu(resp->geo_info), - __le16_to_cpu(resp->cap), resp_ver); + le32_to_cpu(resp->cap), resp_ver); /* Store the return source id */ src_id = resp->source_id; if (IS_ERR_OR_NULL(regd)) { IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", PTR_ERR_OR_ZERO(regd)); goto out; } IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n", regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id); mvm->lar_regdom_set = true; mvm->mcc_src = src_id; iwl_mei_set_country_code(__le16_to_cpu(resp->mcc)); out: kfree(resp); return regd; } void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm) { bool changed; struct ieee80211_regdomain *regd; if (!iwl_mvm_is_lar_supported(mvm)) return; regd = iwl_mvm_get_current_regdomain(mvm, &changed); if (!IS_ERR_OR_NULL(regd)) { /* only update the regulatory core if changed */ if (changed) regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); kfree(regd); } } struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, bool *changed) { return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ", iwl_mvm_is_wifi_mcc_supported(mvm) ? MCC_SOURCE_GET_CURRENT : MCC_SOURCE_OLD_FW, changed); } int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) { enum iwl_mcc_source used_src; struct ieee80211_regdomain *regd; int ret; bool changed; const struct ieee80211_regdomain *r = wiphy_dereference(mvm->hw->wiphy, mvm->hw->wiphy->regd); if (!r) return -ENOENT; /* save the last source in case we overwrite it below */ used_src = mvm->mcc_src; if (iwl_mvm_is_wifi_mcc_supported(mvm)) { /* Notify the firmware we support wifi location updates */ regd = iwl_mvm_get_current_regdomain(mvm, NULL); if (!IS_ERR_OR_NULL(regd)) kfree(regd); } /* Now set our last stored MCC and source */ regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, &changed); if (IS_ERR_OR_NULL(regd)) return -EIO; /* update cfg80211 if the regdomain was changed */ if (changed) ret = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd); else ret = 0; kfree(regd); return ret; } +/* Each capability added here should also be add to tm_if_types_ext_capa_sta */ static const u8 he_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, - [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, + [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF | + WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB, + [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB, }; -static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { +static const u8 tm_if_types_ext_capa_sta[] = { + [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT | + WLAN_EXT_CAPA3_TIMING_MEASUREMENT_SUPPORT, + [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF | + WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB, + [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB, + [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, +}; + +/* Additional interface types for which extended capabilities are + * specified separately + */ + +#define IWL_MVM_EMLSR_CAPA (IEEE80211_EML_CAP_EMLSR_SUPP | \ + IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US << \ + __bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \ + IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \ + __bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY)) + +static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = { { .iftype = NL80211_IFTYPE_STATION, .extended_capabilities = he_if_types_ext_capa_sta, .extended_capabilities_mask = he_if_types_ext_capa_sta, .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta), + /* relevant only if EHT is supported */ + .eml_capabilities = IWL_MVM_EMLSR_CAPA, + }, + { + .iftype = NL80211_IFTYPE_STATION, + .extended_capabilities = tm_if_types_ext_capa_sta, + .extended_capabilities_mask = tm_if_types_ext_capa_sta, + .extended_capabilities_len = sizeof(tm_if_types_ext_capa_sta), + /* relevant only if EHT is supported */ + .eml_capabilities = IWL_MVM_EMLSR_CAPA, }, }; -static int -iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) +int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); *tx_ant = iwl_mvm_get_valid_tx_ant(mvm); *rx_ant = iwl_mvm_get_valid_rx_ant(mvm); return 0; } int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) { struct ieee80211_hw *hw = mvm->hw; int num_mac, ret, i; static const u32 mvm_ciphers[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, }; #ifdef CONFIG_PM_SLEEP bool unified = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); #endif + u32 sec_key_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD); + u8 sec_key_ver = iwl_fw_lookup_cmd_ver(mvm->fw, sec_key_id, 0); /* Tell mac80211 our characteristics */ ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, SPECTRUM_MGMT); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(hw, WANT_MONITOR_VIF); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, AMPDU_AGGREGATION); - ieee80211_hw_set(hw, TIMING_BEACON_ONLY); ieee80211_hw_set(hw, CONNECTION_MONITOR); ieee80211_hw_set(hw, CHANCTX_STA_CSA); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); - ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); ieee80211_hw_set(hw, STA_MMPDU_TXQ); + + /* Set this early since we need to have it for the check below */ + if (mvm->mld_api_is_used && + mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; + + /* With MLD FW API, it tracks timing by itself, + * no need for any timing from the host + */ + if (!mvm->mld_api_is_used) + ieee80211_hw_set(hw, TIMING_BEACON_ONLY); + + /* We should probably have this, but mac80211 + * currently doesn't support it for MLO. + */ + if (!(hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)) + ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); + /* * On older devices, enabling TX A-MSDU occasionally leads to * something getting messed up, the command read from the FIFO * gets out of sync and isn't a TX command, so that we have an * assert EDC. * * It's not clear where the bug is, but since we didn't used to * support A-MSDU until moving the mac80211 iTXQs, just leave it * for older devices. We also don't see this issue on any newer * devices. */ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) ieee80211_hw_set(hw, TX_AMSDU); ieee80211_hw_set(hw, TX_FRAG_LIST); if (iwl_mvm_has_tlc_offload(mvm)) { ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); ieee80211_hw_set(hw, HAS_RATE_CONTROL); } if (iwl_mvm_has_new_rx_api(mvm)) ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) { ieee80211_hw_set(hw, AP_LINK_PS); } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { /* * we absolutely need this for the new TX API since that comes * with many more queues than the current code can deal with * for station powersave */ return -EINVAL; } if (mvm->trans->num_rx_queues > 1) ieee80211_hw_set(hw, USES_RSS); if (mvm->trans->max_skb_frags) hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; hw->queues = IEEE80211_NUM_ACS; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | IEEE80211_RADIOTAP_MCS_HAVE_STBC; hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; hw->radiotap_timestamp.units_pos = IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US | IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ; /* this is the case for CCK frames, it's better (only 8) for OFDM */ hw->radiotap_timestamp.accuracy = 22; if (!iwl_mvm_has_tlc_offload(mvm)) hw->rate_control_algorithm = RS_NAME; hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; hw->max_tx_fragments = mvm->trans->max_skb_frags; BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); hw->wiphy->cipher_suites = mvm->ciphers; if (iwl_mvm_has_new_rx_api(mvm)) { mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_GCMP; hw->wiphy->n_cipher_suites++; mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_GCMP_256; hw->wiphy->n_cipher_suites++; } if (iwlwifi_mod_params.swcrypto) IWL_ERR(mvm, "iwlmvm doesn't allow to disable HW crypto, check swcrypto module parameter\n"); if (!iwlwifi_mod_params.bt_coex_active) IWL_ERR(mvm, "iwlmvm doesn't allow to disable BT Coex, check bt_coex_active module parameter\n"); ieee80211_hw_set(hw, MFP_CAPABLE); mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC; hw->wiphy->n_cipher_suites++; if (iwl_mvm_has_new_rx_api(mvm)) { mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_BIP_GMAC_128; hw->wiphy->n_cipher_suites++; mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_BIP_GMAC_256; hw->wiphy->n_cipher_suites++; } + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_BEACON_RATE_LEGACY); + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT); + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) { wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa; } - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT)) + if (sec_key_ver && + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BIGTK_TX_SUPPORT)) + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_BEACON_PROTECTION); + else if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT)) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT); + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM)) + hw->wiphy->hw_timestamp_max_peers = 1; + ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); hw->wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_ND_RANDOM_MAC_ADDR; hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); hw->chanctx_data_size = sizeof(u16); hw->txq_data_size = sizeof(struct iwl_mvm_txq); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_ADHOC); hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); /* The new Tx API does not allow to pass the key or keyid of a MPDU to * the hw, preventing us to control which key(id) to use per MPDU. * Till that's fixed we can't use Extended Key ID for the newer cards. */ if (!iwl_mvm_has_new_tx_api(mvm)) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_EXT_KEY_ID); hw->wiphy->features |= NL80211_FEATURE_HT_IBSS; hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; if (iwl_mvm_is_lar_supported(mvm)) hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; else hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | REGULATORY_DISABLE_BEACON_HINTS; hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ; hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; hw->wiphy->n_iface_combinations = ARRAY_SIZE(iwl_mvm_iface_combinations); hw->wiphy->max_remain_on_channel_duration = 10000; hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; /* Extract MAC address */ memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); hw->wiphy->addresses = mvm->addresses; hw->wiphy->n_addresses = 1; /* Extract additional MAC addresses if available */ num_mac = (mvm->nvm_data->n_hw_addrs > 1) ? min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1; for (i = 1; i < num_mac; i++) { memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr, ETH_ALEN); mvm->addresses[i].addr[5]++; hw->wiphy->n_addresses++; } iwl_mvm_reset_phy_ctxts(mvm); hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm); hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; else mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) hw->wiphy->bands[NL80211_BAND_2GHZ] = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) { hw->wiphy->bands[NL80211_BAND_5GHZ] = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS)) hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT) && mvm->nvm_data->bands[NL80211_BAND_6GHZ].n_channels) hw->wiphy->bands[NL80211_BAND_6GHZ] = &mvm->nvm_data->bands[NL80211_BAND_6GHZ]; hw->wiphy->hw_version = mvm->trans->hw_id; if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; else hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->max_sched_scan_reqs = 1; hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; hw->wiphy->max_match_sets = iwl_umac_scan_get_max_profiles(mvm->fw); /* we create the 802.11 header and zero length SSID IE. */ hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS; hw->wiphy->max_sched_scan_plan_interval = U16_MAX; /* * the firmware uses u8 for num of iterations, but 0xff is saved for * infinite loop, so the maximum number of iterations is actually 254. */ hw->wiphy->max_sched_scan_plan_iterations = 254; hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_P2P_GO_OPPPS | NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | NL80211_FEATURE_DYNAMIC_SMPS | NL80211_FEATURE_STATIC_SMPS | NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_QUIET; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL, IWL_FW_CMD_VER_UNKNOWN) == 3) hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) { wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_START_TIME); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BSS_PARENT_TSF); } if (iwl_mvm_is_oce_supported(mvm)) { u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, 0); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); /* Old firmware also supports probe deferral and suppression */ if (scan_ver < 15) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION); } + hw->wiphy->iftype_ext_capab = NULL; + hw->wiphy->num_iftype_ext_capab = 0; + if (mvm->nvm_data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) { - hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa; + hw->wiphy->iftype_ext_capab = add_iftypes_ext_capa; hw->wiphy->num_iftype_ext_capab = - ARRAY_SIZE(he_iftypes_ext_capa); + ARRAY_SIZE(add_iftypes_ext_capa) - 1; ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); } + if (iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(DATA_PATH_GROUP, + WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD), + IWL_FW_CMD_VER_UNKNOWN) >= 1) { + IWL_DEBUG_INFO(mvm->trans, "Timing measurement supported\n"); + + if (!hw->wiphy->iftype_ext_capab) { + hw->wiphy->num_iftype_ext_capab = 1; + hw->wiphy->iftype_ext_capab = add_iftypes_ext_capa + + ARRAY_SIZE(add_iftypes_ext_capa) - 1; + } else { + hw->wiphy->iftype_ext_capab = add_iftypes_ext_capa + 1; + } + } + mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; #ifdef CONFIG_PM_SLEEP if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) && mvm->trans->ops->d3_suspend && mvm->trans->ops->d3_resume && device_can_wakeup(mvm->trans->dev)) { mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | WIPHY_WOWLAN_EAP_IDENTITY_REQ | WIPHY_WOWLAN_RFKILL_RELEASE | WIPHY_WOWLAN_NET_DETECT; mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_GTK_REKEY_FAILURE | WIPHY_WOWLAN_4WAY_HANDSHAKE; mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; mvm->wowlan.max_nd_match_sets = iwl_umac_scan_get_max_profiles(mvm->fw); hw->wiphy->wowlan = &mvm->wowlan; } #endif ret = iwl_mvm_leds_init(mvm); if (ret) return ret; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; ieee80211_hw_set(hw, TDLS_WIDER_BW); } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) { IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; } hw->netdev_features |= mvm->cfg->features; if (!iwl_mvm_is_csum_supported(mvm)) hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK; if (mvm->cfg->vht_mu_mimo_supported) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT)) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_PROTECTED_TWT); iwl_mvm_vendor_cmds_register(mvm); hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm); hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm); ret = ieee80211_register_hw(mvm->hw); if (ret) { iwl_mvm_leds_exit(mvm); } return ret; } static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta) { if (likely(sta)) { if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0)) return; } else { if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0)) return; } ieee80211_free_txskb(mvm->hw, skb); } -static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, - struct ieee80211_tx_control *control, - struct sk_buff *skb) +void iwl_mvm_mac_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, struct sk_buff *skb) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_sta *sta = control->sta; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; bool offchannel = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_TX_OFFCHAN; + u32 link_id = u32_get_bits(info->control.flags, + IEEE80211_TX_CTRL_MLO_LINK); + struct ieee80211_sta *tmp_sta = sta; if (iwl_mvm_is_radio_killed(mvm)) { IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); goto drop; } if (offchannel && !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) goto drop; /* * bufferable MMPDUs or MMPDUs on STA interfaces come via TXQs * so we treat the others as broadcast */ if (ieee80211_is_mgmt(hdr->frame_control)) sta = NULL; /* If there is no sta, and it's not offchannel - send through AP */ if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION && !offchannel) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); - u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); + u8 ap_sta_id = READ_ONCE(mvmvif->deflink.ap_sta_id); if (ap_sta_id < mvm->fw->ucode_capa.num_stations) { /* mac80211 holds rcu read lock */ sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); if (IS_ERR_OR_NULL(sta)) goto drop; } } + if (tmp_sta && !sta && link_id != IEEE80211_LINK_UNSPECIFIED && + !ieee80211_is_probe_resp(hdr->frame_control)) { + /* translate MLD addresses to LINK addresses */ + struct ieee80211_link_sta *link_sta = + rcu_dereference(tmp_sta->link[link_id]); + struct ieee80211_bss_conf *link_conf = + rcu_dereference(info->control.vif->link_conf[link_id]); + struct ieee80211_mgmt *mgmt; + + if (WARN_ON(!link_sta || !link_conf)) + goto drop; + + /* if sta is NULL, the frame is a management frame */ + mgmt = (void *)hdr; + memcpy(mgmt->da, link_sta->addr, ETH_ALEN); + memcpy(mgmt->sa, link_conf->addr, ETH_ALEN); + memcpy(mgmt->bssid, link_conf->bssid, ETH_ALEN); + } + iwl_mvm_tx_skb(mvm, skb, sta); return; drop: ieee80211_free_txskb(hw, skb); } void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); struct sk_buff *skb = NULL; /* * No need for threads to be pending here, they can leave the first * taker all the work. * * mvmtxq->tx_request logic: * * If 0, no one is currently TXing, set to 1 to indicate current thread * will now start TX and other threads should quit. * * If 1, another thread is currently TXing, set to 2 to indicate to * that thread that there was another request. Since that request may * have raced with the check whether the queue is empty, the TXing * thread should check the queue's status one more time before leaving. * This check is done in order to not leave any TX hanging in the queue * until the next TX invocation (which may not even happen). * * If 2, another thread is currently TXing, and it will already double * check the queue, so do nothing. */ if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2)) return; rcu_read_lock(); do { - while (likely(!mvmtxq->stopped && + while (likely(!test_bit(IWL_MVM_TXQ_STATE_STOP_FULL, + &mvmtxq->state) && + !test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, + &mvmtxq->state) && !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) { skb = ieee80211_tx_dequeue(hw, txq); if (!skb) { if (txq->sta) IWL_DEBUG_TX(mvm, "TXQ of sta %pM tid %d is now empty\n", txq->sta->addr, txq->tid); break; } iwl_mvm_tx_skb(mvm, skb, txq->sta); } } while (atomic_dec_return(&mvmtxq->tx_request)); rcu_read_unlock(); } -static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, - struct ieee80211_txq *txq) +void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); - /* - * Please note that racing is handled very carefully here: - * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is - * deleted afterwards. - * This means that if: - * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list): - * queue is allocated and we can TX. - * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list): - * a race, should defer the frame. - * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list): - * need to allocate the queue and defer the frame. - * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list): - * queue is already scheduled for allocation, no need to allocate, - * should defer the frame. - */ - - /* If the queue is allocated TX and return. */ - if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) { - /* - * Check that list is empty to avoid a race where txq_id is - * already updated, but the queue allocation work wasn't - * finished - */ - if (unlikely(txq->sta && !list_empty(&mvmtxq->list))) - return; - + if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) || + !txq->sta) { iwl_mvm_mac_itxq_xmit(hw, txq); return; } - /* The list is being deleted only after the queue is fully allocated. */ - if (!list_empty(&mvmtxq->list)) - return; + /* iwl_mvm_mac_itxq_xmit() will later be called by the worker + * to handle any packets we leave on the txq now + */ - list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); - schedule_work(&mvm->add_stream_wk); + spin_lock_bh(&mvm->add_stream_lock); + /* The list is being deleted only after the queue is fully allocated. */ + if (list_empty(&mvmtxq->list) && + /* recheck under lock */ + !test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) { + list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); + schedule_work(&mvm->add_stream_wk); + } + spin_unlock_bh(&mvm->add_stream_lock); } #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ do { \ if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ break; \ iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \ } while (0) static void iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn, enum ieee80211_ampdu_mlme_action action) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_BA); if (!trig) return; ba_trig = (void *)trig->data; switch (action) { case IEEE80211_AMPDU_TX_OPERATIONAL: { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid, "TX AGG START: MAC %pM tid %d ssn %d\n", sta->addr, tid, tid_data->ssn); break; } case IEEE80211_AMPDU_TX_STOP_CONT: CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid, "TX AGG STOP: MAC %pM tid %d\n", sta->addr, tid); break; case IEEE80211_AMPDU_RX_START: CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid, "RX AGG START: MAC %pM tid %d ssn %d\n", sta->addr, tid, rx_ba_ssn); break; case IEEE80211_AMPDU_RX_STOP: CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid, "RX AGG STOP: MAC %pM tid %d\n", sta->addr, tid); break; default: break; } } -static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_ampdu_params *params) +int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; struct ieee80211_sta *sta = params->sta; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; u16 *ssn = ¶ms->ssn; u16 buf_size = params->buf_size; bool amsdu = params->amsdu; u16 timeout = params->timeout; IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", sta->addr, tid, action); if (!(mvm->nvm_data->sku_cap_11n_enable)) return -EACCES; mutex_lock(&mvm->mutex); switch (action) { case IEEE80211_AMPDU_RX_START: - if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id == - iwl_mvm_sta_from_mac80211(sta)->sta_id) { + if (iwl_mvm_vif_from_mac80211(vif)->deflink.ap_sta_id == + iwl_mvm_sta_from_mac80211(sta)->deflink.sta_id) { struct iwl_mvm_vif *mvmvif; u16 macid = iwl_mvm_vif_from_mac80211(vif)->id; struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid]; mdata->opened_rx_ba_sessions = true; mvmvif = iwl_mvm_vif_from_mac80211(vif); cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk); } if (!iwl_enable_rx_ampdu()) { ret = -EINVAL; break; } ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size, timeout); break; case IEEE80211_AMPDU_RX_STOP: ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size, timeout); break; case IEEE80211_AMPDU_TX_START: if (!iwl_enable_tx_ampdu()) { ret = -EINVAL; break; } ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); break; case IEEE80211_AMPDU_TX_STOP_CONT: ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); break; case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size, amsdu); break; default: WARN_ON_ONCE(1); ret = -EINVAL; break; } if (!ret) { u16 rx_ba_ssn = 0; if (action == IEEE80211_AMPDU_RX_START) rx_ba_ssn = *ssn; iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid, rx_ba_ssn, action); } mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_probe_resp_data *probe_data; + unsigned int link_id; mvmvif->uploaded = false; - mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); spin_unlock_bh(&mvm->time_event_lock); - mvmvif->phy_ctxt = NULL; memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); - memset(&mvmvif->probe_resp_data, 0, sizeof(mvmvif->probe_resp_data)); + + for_each_mvm_vif_valid_link(mvmvif, link_id) { + mvmvif->link[link_id]->ap_sta_id = IWL_MVM_INVALID_STA; + mvmvif->link[link_id]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; + mvmvif->link[link_id]->phy_ctxt = NULL; + mvmvif->link[link_id]->active = 0; + mvmvif->link[link_id]->igtk = NULL; + } + + probe_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data, + lockdep_is_held(&mvm->mutex)); + if (probe_data) + kfree_rcu(probe_data, rcu_head); + RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL); } static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) { iwl_mvm_stop_device(mvm); mvm->cur_aid = 0; mvm->scan_status = 0; mvm->ps_disabled = false; mvm->rfkill_safe_init_done = false; /* just in case one was running */ iwl_mvm_cleanup_roc_te(mvm); ieee80211_remain_on_channel_expired(mvm->hw); iwl_mvm_ftm_restart(mvm); /* * cleanup all interfaces, even inactive ones, as some might have * gone down during the HW restart */ ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); mvm->p2p_device_vif = NULL; iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); ieee80211_wake_queues(mvm->hw); - mvm->vif_count = 0; mvm->rx_ba_sessions = 0; mvm->fwrt.dump.conf = FW_DBG_INVALID; mvm->monitor_on = false; +#ifdef CONFIG_IWLWIFI_DEBUGFS + mvm->beacon_inject_active = false; +#endif /* keep statistics ticking */ iwl_mvm_accu_radio_stats(mvm); } int __iwl_mvm_mac_start(struct iwl_mvm *mvm) { int ret; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_mei_get_ownership(mvm); if (ret) return ret; if (mvm->mei_nvm_data) { /* We got the NIC, we can now free the MEI NVM data */ kfree(mvm->mei_nvm_data); mvm->mei_nvm_data = NULL; /* * We can't free the nvm_data we allocated based on the SAP * data because we registered to cfg80211 with the channels * allocated on mvm->nvm_data. Keep a pointer in temp_nvm_data * just in order to be able free it later. * NULLify nvm_data so that we will read the NVM from the * firmware this time. */ mvm->temp_nvm_data = mvm->nvm_data; mvm->nvm_data = NULL; } if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { /* * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART * so later code will - from now on - see that we're doing it. */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); /* Clean up some internal and mac80211 state on restart */ iwl_mvm_restart_cleanup(mvm); } ret = iwl_mvm_up(mvm); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT, NULL); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC, NULL); mvm->last_reset_or_resume_time_jiffies = jiffies; if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { /* Something went wrong - we need to finish some cleanup * that normally iwl_mvm_mac_restart_complete() below * would do. */ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); } return ret; } -static int iwl_mvm_mac_start(struct ieee80211_hw *hw) +int iwl_mvm_mac_start(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; int retry, max_retry = 0; mutex_lock(&mvm->mutex); /* we are starting the mac not in error flow, and restart is enabled */ if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) && iwlwifi_mod_params.fw_restart) { max_retry = IWL_MAX_INIT_RETRY; /* * This will prevent mac80211 recovery flows to trigger during * init failures */ set_bit(IWL_MVM_STATUS_STARTING, &mvm->status); } for (retry = 0; retry <= max_retry; retry++) { ret = __iwl_mvm_mac_start(mvm); if (!ret) break; + /* + * In PLDR sync PCI re-enumeration is needed. no point to retry + * mac start before that. + */ + if (mvm->pldr_sync) { + iwl_mei_alive_notif(false); + iwl_trans_pcie_remove(mvm->trans, true); + break; + } + IWL_ERR(mvm, "mac start retry %d\n", retry); } clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status); mutex_unlock(&mvm->mutex); iwl_mvm_mei_set_sw_rfkill_state(mvm); return ret; } static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) { int ret; mutex_lock(&mvm->mutex); clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); ret = iwl_mvm_update_quotas(mvm, true, NULL); if (ret) IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", ret); iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_END_OF_RECOVERY); /* * If we have TDLS peers, remove them. We don't know the last seqno/PN * of packets the FW sent out, so we must reconnect. */ iwl_mvm_teardown_tdls_peers(mvm); mutex_unlock(&mvm->mutex); } -static void -iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, - enum ieee80211_reconfig_type reconfig_type) +void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); switch (reconfig_type) { case IEEE80211_RECONFIG_TYPE_RESTART: iwl_mvm_restart_complete(mvm); break; case IEEE80211_RECONFIG_TYPE_SUSPEND: break; } } void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) { lockdep_assert_held(&mvm->mutex); iwl_mvm_ftm_initiator_smooth_stop(mvm); /* firmware counters are obviously reset now, but we shouldn't * partially track so also clear the fw_reset_accu counters. */ memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats)); /* async_handlers_wk is now blocked */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) + if (!iwl_mvm_has_new_station_api(mvm->fw)) iwl_mvm_rm_aux_sta(mvm); iwl_mvm_stop_device(mvm); iwl_mvm_async_handlers_purge(mvm); /* async_handlers_list is empty and will stay empty: HW is stopped */ /* * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the * hw (as restart_complete() won't be called in this case) and mac80211 * won't execute the restart. * But make sure to cleanup interfaces that have gone down before/during * HW restart was requested. */ if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); /* We shouldn't have any UIDs still set. Loop over all the UIDs to * make sure there's nothing left there and warn if any is found. */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { int i; for (i = 0; i < mvm->max_scans; i++) { if (WARN_ONCE(mvm->scan_uid_status[i], "UMAC scan UID %d status was not cleaned\n", i)) mvm->scan_uid_status[i] = 0; } } } -static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) +void iwl_mvm_mac_stop(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); flush_work(&mvm->async_handlers_wk); flush_work(&mvm->add_stream_wk); /* * Lock and clear the firmware running bit here already, so that * new commands coming in elsewhere, e.g. from debugfs, will not * be able to proceed. This is important here because one of those * debugfs files causes the firmware dump to be triggered, and if we * don't stop debugfs accesses before canceling that it could be * retriggered after we flush it but before we've cleared the bit. */ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); cancel_delayed_work_sync(&mvm->scan_timeout_dwork); /* * The work item could be running or queued if the * ROC time event stops just as we get here. */ flush_work(&mvm->roc_done_wk); iwl_mvm_mei_set_sw_rfkill_state(mvm); mutex_lock(&mvm->mutex); __iwl_mvm_mac_stop(mvm); mutex_unlock(&mvm->mutex); /* * The worker might have been waiting for the mutex, let it run and * discover that its list is now empty. */ cancel_work_sync(&mvm->async_handlers_wk); } -static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) +struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) { u16 i; lockdep_assert_held(&mvm->mutex); for (i = 0; i < NUM_PHY_CTX; i++) if (!mvm->phy_ctxts[i].ref) return &mvm->phy_ctxts[i]; IWL_ERR(mvm, "No available PHY context\n"); return NULL; } -static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - s16 tx_power) +int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + s16 tx_power) { u32 cmd_id = REDUCE_TX_POWER_CMD; int len; struct iwl_dev_tx_power_cmd cmd = { .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), .common.mac_context_id = cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), .common.pwr_restriction = cpu_to_le16(8 * tx_power), }; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); if (tx_power == IWL_DEFAULT_MAX_TX_POWER) cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); if (cmd_ver == 7) len = sizeof(cmd.v7); else if (cmd_ver == 6) len = sizeof(cmd.v6); else if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCE_TX_POWER)) len = sizeof(cmd.v5); else if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) len = sizeof(cmd.v4); else len = sizeof(cmd.v3); /* all structs have the same common part, add it */ len += sizeof(cmd.common); return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); } -static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); if (vif->type == NL80211_IFTYPE_STATION) { struct iwl_mvm_sta *mvmsta; mvmvif->csa_bcn_pending = false; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, - mvmvif->ap_sta_id); + mvmvif->deflink.ap_sta_id); if (WARN_ON(!mvmsta)) { ret = -EIO; goto out_unlock; } iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); - - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + if (mvm->mld_api_is_used) + iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false); + else + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); if (ret) goto out_unlock; iwl_mvm_stop_session_protection(mvm, vif); } } mvmvif->ps_disabled = false; ret = iwl_mvm_power_update_ps(mvm); out_unlock: if (mvmvif->csa_failed) ret = -EIO; mutex_unlock(&mvm->mutex); return ret; } -static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_chan_switch_te_cmd cmd = { .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), }; /* * In the new flow since FW is in charge of the timing, * if driver has canceled the channel switch he will receive the * CHANNEL_SWITCH_START_NOTIF notification from FW and then cancel it */ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, 0)) return; IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id); mutex_lock(&mvm->mutex); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) iwl_mvm_remove_csa_period(mvm, vif); else WARN_ON(iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd)); mvmvif->csa_failed = true; mutex_unlock(&mvm->mutex); iwl_mvm_post_channel_switch(hw, vif); } -static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) +void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) { struct iwl_mvm_vif *mvmvif; struct ieee80211_vif *vif; mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work); vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); /* Trigger disconnect (should clear the CSA state) */ ieee80211_chswitch_done(vif, false); } +static u8 +iwl_mvm_chandef_get_primary_80(struct cfg80211_chan_def *chandef) +{ + int data_start; + int control_start; + int bw; + + if (chandef->width == NL80211_CHAN_WIDTH_320) + bw = 320; + else if (chandef->width == NL80211_CHAN_WIDTH_160) + bw = 160; + else + return 0; + + /* data is bw wide so the start is half the width */ + data_start = chandef->center_freq1 - bw / 2; + /* control is 20Mhz width */ + control_start = chandef->chan->center_freq - 10; + + return (control_start - data_start) / 80; +} + +static int iwl_mvm_alloc_bcast_mcast_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_alloc_bcast_sta(mvm, vif); + if (ret) { + IWL_ERR(mvm, "Failed to allocate bcast sta\n"); + return ret; + } + + /* Only queue for this station is the mcast queue, + * which shouldn't be in TFD mask anyway + */ + return iwl_mvm_allocate_int_sta(mvm, &mvmvif->deflink.mcast_sta, 0, + vif->type, + IWL_STA_MULTICAST); +} + static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; + mutex_lock(&mvm->mutex); + mvmvif->mvm = mvm; - RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); + + /* the first link always points to the default one */ + mvmvif->link[0] = &mvmvif->deflink; /* * Not much to do here. The stack will not allow interface * types or combinations that we didn't advertise, so we * don't really have to check the types. */ - mutex_lock(&mvm->mutex); - /* make sure that beacon statistics don't go backwards with FW reset */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - mvmvif->beacon_stats.accu_num_beacons += - mvmvif->beacon_stats.num_beacons; + mvmvif->deflink.beacon_stats.accu_num_beacons += + mvmvif->deflink.beacon_stats.num_beacons; /* Allocate resources for the MAC context, and add it to the fw */ ret = iwl_mvm_mac_ctxt_init(mvm, vif); if (ret) - goto out_unlock; + goto out; rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); - /* Counting number of interfaces is needed for legacy PM */ - if (vif->type != NL80211_IFTYPE_P2P_DEVICE) - mvm->vif_count++; + /* Currently not much to do for NAN */ + if (vif->type == NL80211_IFTYPE_NAN) { + ret = 0; + goto out; + } /* * The AP binding flow can be done only after the beacon * template is configured (which happens only in the mac80211 * start_ap() flow), and adding the broadcast station can happen * only after the binding. * In addition, since modifying the MAC before adding a bcast * station is not allowed by the FW, delay the adding of MAC context to * the point where we can also add the bcast station. * In short: there's not much we can do at this point, other than * allocating resources :) */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { - ret = iwl_mvm_alloc_bcast_sta(mvm, vif); - if (ret) { - IWL_ERR(mvm, "Failed to allocate bcast sta\n"); - goto out_release; - } - - /* - * Only queue for this station is the mcast queue, - * which shouldn't be in TFD mask anyway - */ - ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, - 0, vif->type, - IWL_STA_MULTICAST); - if (ret) - goto out_release; - iwl_mvm_vif_dbgfs_register(mvm, vif); - goto out_unlock; + ret = 0; + goto out; } mvmvif->features |= hw->netdev_features; ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) - goto out_release; + goto out_unlock; ret = iwl_mvm_power_update_mac(mvm); if (ret) goto out_remove_mac; /* beacon filtering */ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); if (ret) goto out_remove_mac; if (!mvm->bf_allowed_vif && vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { mvm->bf_allowed_vif = mvmvif; vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI; } /* * P2P_DEVICE interface does not have a channel context assigned to it, * so a dedicated PHY context is allocated to it and the corresponding * MAC context is bound to it at this stage. */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); - if (!mvmvif->phy_ctxt) { + mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); + if (!mvmvif->deflink.phy_ctxt) { ret = -ENOSPC; goto out_free_bf; } - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) goto out_unref_phy; ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif); if (ret) goto out_unbind; /* Save a pointer to p2p device vif, so it can later be used to * update the p2p device MAC when a GO is started/stopped */ mvm->p2p_device_vif = vif; } iwl_mvm_tcm_add_vif(mvm, vif); INIT_DELAYED_WORK(&mvmvif->csa_work, iwl_mvm_channel_switch_disconnect_wk); - if (vif->type == NL80211_IFTYPE_MONITOR) + if (vif->type == NL80211_IFTYPE_MONITOR) { mvm->monitor_on = true; + mvm->monitor_p80 = + iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chandef); + } iwl_mvm_vif_dbgfs_register(mvm, vif); if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && vif->type == NL80211_IFTYPE_STATION && !vif->p2p && !mvm->csme_vif && mvm->mei_registered) { iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr); iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev); mvm->csme_vif = vif; } +out: + if (!ret && (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC)) + ret = iwl_mvm_alloc_bcast_mcast_sta(mvm, vif); + goto out_unlock; out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); out_unref_phy: - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); out_free_bf: if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI); } out_remove_mac: - mvmvif->phy_ctxt = NULL; + mvmvif->deflink.phy_ctxt = NULL; iwl_mvm_mac_ctxt_remove(mvm, vif); - out_release: - if (vif->type != NL80211_IFTYPE_P2P_DEVICE) - mvm->vif_count--; out_unlock: mutex_unlock(&mvm->mutex); return ret; } -static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) +void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) { if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { /* * Flush the ROC worker which will flush the OFFCHANNEL queue. * We assume here that all the packets sent to the OFFCHANNEL * queue are sent in ROC session. */ flush_work(&mvm->roc_done_wk); } } -static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +/* This function is doing the common part of removing the interface for + * both - MLD and non-MLD modes. Returns true if removing the interface + * is done + */ +static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_probe_resp_data *probe_data; iwl_mvm_prepare_mac_removal(mvm, vif); if (!(vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC)) iwl_mvm_tcm_rm_vif(mvm, vif); mutex_lock(&mvm->mutex); if (vif == mvm->csme_vif) { iwl_mei_set_netdev(NULL); mvm->csme_vif = NULL; } - probe_data = rcu_dereference_protected(mvmvif->probe_resp_data, + probe_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data, lockdep_is_held(&mvm->mutex)); - RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); + RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL); if (probe_data) kfree_rcu(probe_data, rcu_head); if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI); } if (vif->bss_conf.ftm_responder) memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); iwl_mvm_vif_dbgfs_clean(mvm, vif); /* * For AP/GO interface, the tear down of the resources allocated to the * interface is be handled as part of the stop_ap flow. */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { #ifdef CONFIG_NL80211_TESTMODE if (vif == mvm->noa_vif) { mvm->noa_vif = NULL; mvm->noa_duration = 0; } #endif - iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta); - iwl_mvm_dealloc_bcast_sta(mvm, vif); - goto out_release; + return true; } + iwl_mvm_power_update_mac(mvm); + return false; +} + +static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (iwl_mvm_mac_remove_interface_common(hw, vif)) + goto out; + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvm->p2p_device_vif = NULL; iwl_mvm_rm_p2p_bcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); - mvmvif->phy_ctxt = NULL; + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); + mvmvif->deflink.phy_ctxt = NULL; } - if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) - mvm->vif_count--; - - iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = false; -out_release: - mutex_unlock(&mvm->mutex); -} +out: + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) { + iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.mcast_sta); + iwl_mvm_dealloc_bcast_sta(mvm, vif); + } -static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) -{ - return 0; + mutex_unlock(&mvm->mutex); } struct iwl_mvm_mc_iter_data { struct iwl_mvm *mvm; int port_id; }; static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mc_iter_data *data = _data; struct iwl_mvm *mvm = data->mvm; struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; struct iwl_host_cmd hcmd = { .id = MCAST_FILTER_CMD, .flags = CMD_ASYNC, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; int ret, len; /* if we don't have free ports, mcast frames will be dropped */ if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM)) return; if (vif->type != NL80211_IFTYPE_STATION || - !vif->bss_conf.assoc) + !vif->cfg.assoc) return; cmd->port_id = data->port_id++; memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); hcmd.len[0] = len; hcmd.data[0] = cmd; ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); } static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) { struct iwl_mvm_mc_iter_data iter_data = { .mvm = mvm, }; int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) return; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mc_iface_iterator, &iter_data); /* * Send a (synchronous) ech command so that we wait for the * multiple asynchronous MCAST_FILTER_CMD commands sent by * the interface iterator. Otherwise, we might get here over * and over again (by userspace just sending a lot of these) * and the CPU can send them faster than the firmware can * process them. * Note that the CPU is still faster - but with this we'll * actually send fewer commands overall because the CPU will * not schedule the work in mac80211 as frequently if it's * still running when rescheduled (possibly multiple times). */ ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); if (ret) IWL_ERR(mvm, "Failed to synchronize multicast groups update\n"); } -static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, - struct netdev_hw_addr_list *mc_list) +u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcast_filter_cmd *cmd; struct netdev_hw_addr *addr; int addr_count; bool pass_all; int len; addr_count = netdev_hw_addr_list_count(mc_list); pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES || IWL_MVM_FW_MCAST_FILTER_PASS_ALL; if (pass_all) addr_count = 0; len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4); cmd = kzalloc(len, GFP_ATOMIC); if (!cmd) return 0; if (pass_all) { cmd->pass_all = 1; #if defined(__linux__) return (u64)(unsigned long)cmd; #elif defined(__FreeBSD__) return (u64)(uintptr_t)cmd; #endif } netdev_hw_addr_list_for_each(addr, mc_list) { #if defined(__linux__) IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n", cmd->count, addr->addr); #elif defined(__FreeBSD__) IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %6D\n", cmd->count, addr->addr, ":"); #endif memcpy(&cmd->addr_list[cmd->count * ETH_ALEN], addr->addr, ETH_ALEN); cmd->count++; } #if defined(__linux__) return (u64)(unsigned long)cmd; #elif defined(__FreeBSD__) return (u64)(uintptr_t)cmd; #endif } -static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, - u64 multicast) +void iwl_mvm_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, u64 multicast) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); #if defined(__linux__) struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; #elif defined(__FreeBSD__) struct iwl_mcast_filter_cmd *cmd = (void *)(uintptr_t)multicast; #endif mutex_lock(&mvm->mutex); /* replace previous configuration */ kfree(mvm->mcast_filter_cmd); mvm->mcast_filter_cmd = cmd; if (!cmd) goto out; if (changed_flags & FIF_ALLMULTI) cmd->pass_all = !!(*total_flags & FIF_ALLMULTI); if (cmd->pass_all) cmd->count = 0; iwl_mvm_recalc_multicast(mvm); out: mutex_unlock(&mvm->mutex); *total_flags = 0; } static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int filter_flags, unsigned int changed_flags) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* We support only filter for probe requests */ if (!(changed_flags & FIF_PROBE_REQ)) return; /* Supported only for p2p client interfaces */ - if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || + if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc || !vif->p2p) return; mutex_lock(&mvm->mutex); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); mutex_unlock(&mvm->mutex); } -static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) +int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mu_group_mgmt_cmd cmd = {}; memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership, WLAN_MEMBERSHIP_LEN); memcpy(cmd.user_position, vif->bss_conf.mu_group.position, WLAN_USER_POSITION_LEN); return iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(DATA_PATH_GROUP, UPDATE_MU_GROUPS_CMD), 0, sizeof(cmd), &cmd); } static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { - if (vif->mu_mimo_owner) { + if (vif->bss_conf.mu_mimo_owner) { struct iwl_mu_group_mgmt_notif *notif = _data; /* * MU-MIMO Group Id action frame is little endian. We treat * the data received from firmware as if it came from the * action frame, so no conversion is needed. */ - ieee80211_update_mu_groups(vif, + ieee80211_update_mu_groups(vif, 0, (u8 *)¬if->membership_status, (u8 *)¬if->user_position); } } void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mu_mimo_iface_iterator, notif); } static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit) { u8 byte_num = ppe_pos_bit / 8; u8 bit_num = ppe_pos_bit % 8; u8 residue_bits; u8 res; if (bit_num <= 5) return (ppe[byte_num] >> bit_num) & (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1); /* * If bit_num > 5, we have to combine bits with next byte. * Calculate how many bits we need to take from current byte (called * here "residue_bits"), and add them to bits from next byte. */ residue_bits = 8 - bit_num; res = (ppe[byte_num + 1] & (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) << residue_bits; res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1); return res; } static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm, struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss, - u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit) + u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit, + bool inheritance) { int i; /* * FW currently supports only nss == MAX_HE_SUPP_NSS * * If nss > MAX: we can ignore values we don't support * If nss < MAX: we can set zeros in other streams */ if (nss > MAX_HE_SUPP_NSS) { - IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, - MAX_HE_SUPP_NSS); + IWL_DEBUG_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, + MAX_HE_SUPP_NSS); nss = MAX_HE_SUPP_NSS; } for (i = 0; i < nss; i++) { u8 ru_index_tmp = ru_index_bitmap << 1; u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE; u8 bw; for (bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); bw++) { ru_index_tmp >>= 1; - if (!(ru_index_tmp & 1)) - continue; + /* + * According to the 11be spec, if for a specific BW the PPE Thresholds + * isn't present - it should inherit the thresholds from the last + * BW for which we had PPE Thresholds. In 11ax though, we don't have + * this inheritance - continue in this case + */ + if (!(ru_index_tmp & 1)) { + if (inheritance) + goto set_thresholds; + else + continue; + } high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; +set_thresholds: pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; } } } static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct iwl_he_pkt_ext_v2 *pkt_ext) + struct ieee80211_link_sta *link_sta, + struct iwl_he_pkt_ext_v2 *pkt_ext, + bool inheritance) { - u8 nss = (sta->deflink.he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1; - u8 *ppe = &sta->deflink.he_cap.ppe_thres[0]; + u8 nss = (link_sta->he_cap.ppe_thres[0] & + IEEE80211_PPE_THRES_NSS_MASK) + 1; + u8 *ppe = &link_sta->he_cap.ppe_thres[0]; u8 ru_index_bitmap = u8_get_bits(*ppe, IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); /* Starting after PPE header */ u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE; - iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit); + iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit, + inheritance); } -static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext, - u8 nominal_padding, - u32 *flags) +static int +iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext, + u8 nominal_padding) { int low_th = -1; int high_th = -1; int i; + /* all the macros are the same for EHT and HE */ switch (nominal_padding) { - case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US: + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US: low_th = IWL_HE_PKT_EXT_NONE; high_th = IWL_HE_PKT_EXT_NONE; break; - case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US: + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US: low_th = IWL_HE_PKT_EXT_BPSK; high_th = IWL_HE_PKT_EXT_NONE; break; - case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US: + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US: + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US: low_th = IWL_HE_PKT_EXT_NONE; high_th = IWL_HE_PKT_EXT_BPSK; break; } + if (low_th < 0 || high_th < 0) + return -EINVAL; + /* Set the PPE thresholds accordingly */ - if (low_th >= 0 && high_th >= 0) { - for (i = 0; i < MAX_HE_SUPP_NSS; i++) { - u8 bw; + for (i = 0; i < MAX_HE_SUPP_NSS; i++) { + u8 bw; - for (bw = 0; - bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); - bw++) { - pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; - pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; - } + for (bw = 0; + bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); + bw++) { + pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; + pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; + } + } + + return 0; +} + +static void iwl_mvm_get_optimal_ppe_info(struct iwl_he_pkt_ext_v2 *pkt_ext, + u8 nominal_padding) +{ + int i; + + for (i = 0; i < MAX_HE_SUPP_NSS; i++) { + u8 bw; + + for (bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); + bw++) { + u8 *qam_th = &pkt_ext->pkt_ext_qam_th[i][bw][0]; + + if (nominal_padding > + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US && + qam_th[1] == IWL_HE_PKT_EXT_NONE) + qam_th[1] = IWL_HE_PKT_EXT_4096QAM; + else if (nominal_padding == + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US && + qam_th[0] == IWL_HE_PKT_EXT_NONE && + qam_th[1] == IWL_HE_PKT_EXT_NONE) + qam_th[0] = IWL_HE_PKT_EXT_4096QAM; + } + } +} + +/* Set the pkt_ext field according to PPE Thresholds element */ +int iwl_mvm_set_sta_pkt_ext(struct iwl_mvm *mvm, + struct ieee80211_link_sta *link_sta, + struct iwl_he_pkt_ext_v2 *pkt_ext) +{ + u8 nominal_padding; + int i, ret = 0; + + if (WARN_ON(!link_sta)) + return -EINVAL; + + /* Initialize the PPE thresholds to "None" (7), as described in Table + * 9-262ac of 80211.ax/D3.0. + */ + memset(pkt_ext, IWL_HE_PKT_EXT_NONE, + sizeof(struct iwl_he_pkt_ext_v2)); + + if (link_sta->eht_cap.has_eht) { + nominal_padding = + u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); + + /* If PPE Thresholds exists, parse them into a FW-familiar + * format. + */ + if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] & + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) { + u8 nss = (link_sta->eht_cap.eht_ppe_thres[0] & + IEEE80211_EHT_PPE_THRES_NSS_MASK) + 1; + u8 *ppe = &link_sta->eht_cap.eht_ppe_thres[0]; + u8 ru_index_bitmap = + u16_get_bits(*ppe, + IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); + /* Starting after PPE header */ + u8 ppe_pos_bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE; + + iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, + ppe, ppe_pos_bit, true); + /* EHT PPE Thresholds doesn't exist - set the API according to + * HE PPE Tresholds + */ + } else if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { + /* Even though HE Capabilities IE doesn't contain PPE + * Thresholds for BW 320Mhz, thresholds for this BW will + * be filled in with the same values as 160Mhz, due to + * the inheritance, as required. + */ + iwl_mvm_set_pkt_ext_from_he_ppe(mvm, link_sta, pkt_ext, + true); + + /* According to the requirements, for MCSs 12-13 the + * maximum value between HE PPE Threshold and Common + * Nominal Packet Padding needs to be taken + */ + iwl_mvm_get_optimal_ppe_info(pkt_ext, nominal_padding); + + /* if PPE Thresholds doesn't present in both EHT IE and HE IE - + * take the Thresholds from Common Nominal Packet Padding field + */ + } else { + ret = iwl_mvm_set_pkt_ext_from_nominal_padding(pkt_ext, + nominal_padding); + } + } else if (link_sta->he_cap.has_he) { + /* If PPE Thresholds exist, parse them into a FW-familiar format. */ + if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { + iwl_mvm_set_pkt_ext_from_he_ppe(mvm, link_sta, pkt_ext, + false); + /* PPE Thresholds doesn't exist - set the API PPE values + * according to Common Nominal Packet Padding field. + */ + } else { + nominal_padding = + u8_get_bits(link_sta->he_cap.he_cap_elem.phy_cap_info[9], + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); + if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) + ret = iwl_mvm_set_pkt_ext_from_nominal_padding(pkt_ext, + nominal_padding); + } + } + + for (i = 0; i < MAX_HE_SUPP_NSS; i++) { + int bw; + + for (bw = 0; + bw < ARRAY_SIZE(*pkt_ext->pkt_ext_qam_th[i]); + bw++) { + u8 *qam_th = + &pkt_ext->pkt_ext_qam_th[i][bw][0]; + + IWL_DEBUG_HT(mvm, + "PPE table: nss[%d] bw[%d] PPET8 = %d, PPET16 = %d\n", + i, bw, qam_th[0], qam_th[1]); + } + } + return ret; +} + +/* + * This function sets the MU EDCA parameters ans returns whether MU EDCA + * is enabled or not + */ +bool iwl_mvm_set_fw_mu_edca_params(struct iwl_mvm *mvm, + const struct iwl_mvm_vif_link_info *link_info, + struct iwl_he_backoff_conf *trig_based_txf) +{ + int i; + /* Mark MU EDCA as enabled, unless none detected on some AC */ + bool mu_edca_enabled = true; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + const struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = + &link_info->queue_params[i].mu_edca_param_rec; + u8 ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); + + if (!link_info->queue_params[i].mu_edca) { + mu_edca_enabled = false; + break; } - *flags |= STA_CTXT_HE_PACKET_EXT; + trig_based_txf[ac].cwmin = + cpu_to_le16(mu_edca->ecw_min_max & 0xf); + trig_based_txf[ac].cwmax = + cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); + trig_based_txf[ac].aifsn = + cpu_to_le16(mu_edca->aifsn & 0xf); + trig_based_txf[ac].mu_time = + cpu_to_le16(mu_edca->mu_edca_timer); + } + + return mu_edca_enabled; +} + +bool iwl_mvm_is_nic_ack_enabled(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + const struct ieee80211_supported_band *sband; + const struct ieee80211_sta_he_cap *own_he_cap = NULL; + + /* This capability is the same for all bands, + * so take it from one of them. + */ + sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]; + own_he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif); + + return (own_he_cap && (own_he_cap->he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_ACK_EN)); +} + +__le32 iwl_mvm_get_sta_htc_flags(struct ieee80211_sta *sta, + struct ieee80211_link_sta *link_sta) +{ + u8 *mac_cap_info = + &link_sta->he_cap.he_cap_elem.mac_cap_info[0]; + __le32 htc_flags = 0; + + if (mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) + htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT); + if ((mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || + (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { + u8 link_adap = + ((mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + + (mac_cap_info[1] & + IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); + + if (link_adap == 2) + htc_flags |= + cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED); + else if (link_adap == 3) + htc_flags |= cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); } + if (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) + htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); + if (mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL) + htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP); + if (mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) + htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); + + return htc_flags; } static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 sta_id) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_he_sta_context_cmd_v3 sta_ctxt_cmd = { .sta_id = sta_id, .tid_limit = IWL_MAX_TID_COUNT, .bss_color = vif->bss_conf.he_bss_color.color, .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext, .frame_time_rts_th = cpu_to_le16(vif->bss_conf.frame_time_rts_th), }; struct iwl_he_sta_context_cmd_v2 sta_ctxt_cmd_v2 = {}; u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, STA_HE_CTXT_CMD); u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 2); int size; struct ieee80211_sta *sta; u32 flags; int i; - const struct ieee80211_sta_he_cap *own_he_cap = NULL; - struct ieee80211_chanctx_conf *chanctx_conf; - const struct ieee80211_supported_band *sband; void *cmd; if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE)) ver = 1; switch (ver) { case 1: /* same layout as v2 except some data at the end */ cmd = &sta_ctxt_cmd_v2; size = sizeof(struct iwl_he_sta_context_cmd_v1); break; case 2: cmd = &sta_ctxt_cmd_v2; size = sizeof(struct iwl_he_sta_context_cmd_v2); break; case 3: cmd = &sta_ctxt_cmd; size = sizeof(struct iwl_he_sta_context_cmd_v3); break; default: IWL_ERR(mvm, "bad STA_HE_CTXT_CMD version %d\n", ver); return; } rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->chanctx_conf); - if (WARN_ON(!chanctx_conf)) { - rcu_read_unlock(); - return; - } - - sband = mvm->hw->wiphy->bands[chanctx_conf->def.chan->band]; - own_he_cap = ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(vif)); - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); if (IS_ERR_OR_NULL(sta)) { rcu_read_unlock(); WARN(1, "Can't find STA to configure HE\n"); return; } if (!sta->deflink.he_cap.has_he) { rcu_read_unlock(); return; } flags = 0; /* Block 26-tone RU OFDMA transmissions */ - if (mvmvif->he_ru_2mhz_block) + if (mvmvif->deflink.he_ru_2mhz_block) flags |= STA_CTXT_HE_RU_2MHZ_BLOCK; /* HTC flags */ - if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[0] & - IEEE80211_HE_MAC_CAP0_HTC_HE) - sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT); - if ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] & - IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || - (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & - IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { - u8 link_adap = - ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & - IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + - (sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] & - IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); + sta_ctxt_cmd.htc_flags = iwl_mvm_get_sta_htc_flags(sta, &sta->deflink); - if (link_adap == 2) - sta_ctxt_cmd.htc_flags |= - cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED); - else if (link_adap == 3) - sta_ctxt_cmd.htc_flags |= - cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); - } - if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) - sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); - if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[3] & - IEEE80211_HE_MAC_CAP3_OMI_CONTROL) - sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP); - if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) - sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); - - /* - * Initialize the PPE thresholds to "None" (7), as described in Table - * 9-262ac of 80211.ax/D3.0. - */ - memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE, - sizeof(sta_ctxt_cmd.pkt_ext)); - - /* If PPE Thresholds exist, parse them into a FW-familiar format. */ - if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[6] & - IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { - iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta, - &sta_ctxt_cmd.pkt_ext); + /* PPE Thresholds */ + if (!iwl_mvm_set_sta_pkt_ext(mvm, &sta->deflink, &sta_ctxt_cmd.pkt_ext)) flags |= STA_CTXT_HE_PACKET_EXT; - /* PPE Thresholds doesn't exist - set the API PPE values - * according to Common Nominal Packet Padding fiels. */ - } else { - u8 nominal_padding = - u8_get_bits(sta->deflink.he_cap.he_cap_elem.phy_cap_info[9], - IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); - if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) - iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext, - nominal_padding, - &flags); - } if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP) flags |= STA_CTXT_HE_32BIT_BA_BITMAP; if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_ACK_EN) flags |= STA_CTXT_HE_ACK_ENABLED; rcu_read_unlock(); - /* Mark MU EDCA as enabled, unless none detected on some AC */ - flags |= STA_CTXT_HE_MU_EDCA_CW; - for (i = 0; i < IEEE80211_NUM_ACS; i++) { - struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = - &mvmvif->queue_params[i].mu_edca_param_rec; - u8 ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); - - if (!mvmvif->queue_params[i].mu_edca) { - flags &= ~STA_CTXT_HE_MU_EDCA_CW; - break; - } - - sta_ctxt_cmd.trig_based_txf[ac].cwmin = - cpu_to_le16(mu_edca->ecw_min_max & 0xf); - sta_ctxt_cmd.trig_based_txf[ac].cwmax = - cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); - sta_ctxt_cmd.trig_based_txf[ac].aifsn = - cpu_to_le16(mu_edca->aifsn); - sta_ctxt_cmd.trig_based_txf[ac].mu_time = - cpu_to_le16(mu_edca->mu_edca_timer); - } - + if (iwl_mvm_set_fw_mu_edca_params(mvm, &mvmvif->deflink, + &sta_ctxt_cmd.trig_based_txf[0])) + flags |= STA_CTXT_HE_MU_EDCA_CW; if (vif->bss_conf.uora_exists) { flags |= STA_CTXT_HE_TRIG_RND_ALLOC; sta_ctxt_cmd.rand_alloc_ecwmin = vif->bss_conf.uora_ocw_range & 0x7; sta_ctxt_cmd.rand_alloc_ecwmax = (vif->bss_conf.uora_ocw_range >> 3) & 0x7; } - if (own_he_cap && !(own_he_cap->he_cap_elem.mac_cap_info[2] & - IEEE80211_HE_MAC_CAP2_ACK_EN)) + if (!iwl_mvm_is_nic_ack_enabled(mvm, vif)) flags |= STA_CTXT_HE_NIC_NOT_ACK_ENABLED; if (vif->bss_conf.nontransmitted) { flags |= STA_CTXT_HE_REF_BSSID_VALID; ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr, vif->bss_conf.transmitter_bssid); sta_ctxt_cmd.max_bssid_indicator = vif->bss_conf.bssid_indicator; sta_ctxt_cmd.bssid_index = vif->bss_conf.bssid_index; sta_ctxt_cmd.ema_ap = vif->bss_conf.ema_ap; sta_ctxt_cmd.profile_periodicity = vif->bss_conf.profile_periodicity; } sta_ctxt_cmd.flags = cpu_to_le32(flags); if (ver < 3) { /* fields before pkt_ext */ BUILD_BUG_ON(offsetof(typeof(sta_ctxt_cmd), pkt_ext) != offsetof(typeof(sta_ctxt_cmd_v2), pkt_ext)); memcpy(&sta_ctxt_cmd_v2, &sta_ctxt_cmd, offsetof(typeof(sta_ctxt_cmd), pkt_ext)); /* pkt_ext */ for (i = 0; i < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th); i++) { u8 bw; for (bw = 0; bw < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i]); bw++) { BUILD_BUG_ON(sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]) != sizeof(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw])); memcpy(&sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw], &sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw], sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw])); } } /* fields after pkt_ext */ BUILD_BUG_ON(sizeof(sta_ctxt_cmd) - offsetofend(typeof(sta_ctxt_cmd), pkt_ext) != sizeof(sta_ctxt_cmd_v2) - offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext)); memcpy((u8 *)&sta_ctxt_cmd_v2 + offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext), (u8 *)&sta_ctxt_cmd + offsetofend(typeof(sta_ctxt_cmd), pkt_ext), sizeof(sta_ctxt_cmd) - offsetofend(typeof(sta_ctxt_cmd), pkt_ext)); sta_ctxt_cmd_v2.reserved3 = 0; } if (iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, size, cmd)) IWL_ERR(mvm, "Failed to config FW to work HE!\n"); } -static void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 duration_override) +void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + u32 duration_override) { u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; if (duration_override > duration) duration = duration_override; /* Try really hard to protect the session and hear a beacon * The new session protection command allows us to protect the * session for a much longer time since the firmware will internally * create two events: a 300TU one with a very high priority that * won't be fragmented which should be enough for 99% of the cases, * and another one (which we configure here to be 900TU long) which * will have a slightly lower priority, but more importantly, can be * fragmented so that it'll allow other activities to run. */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) iwl_mvm_schedule_session_protection(mvm, vif, 900, min_duration, false); else iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); } +/* Handle association common part to MLD and non-MLD modes */ +void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u64 changes) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + /* The firmware tracks the MU-MIMO group on its own. + * However, on HW restart we should restore this data. + */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + (changes & BSS_CHANGED_MU_GROUPS) && vif->bss_conf.mu_mimo_owner) { + ret = iwl_mvm_update_mu_groups(mvm, vif); + if (ret) + IWL_ERR(mvm, + "failed to update VHT MU_MIMO groups\n"); + } + + iwl_mvm_recalc_multicast(mvm); + + /* reset rssi values */ + mvmvif->bf_data.ave_beacon_signal = 0; + + iwl_mvm_bt_coex_vif_change(mvm); + iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_TT, + IEEE80211_SMPS_AUTOMATIC); + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_UMAC_SCAN)) + iwl_mvm_config_scan(mvm); +} + +/* Execute the common part for MLD and non-MLD modes */ +void +iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u64 changes) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + if (changes & BSS_CHANGED_BEACON_INFO) { + /* We received a beacon from the associated AP so + * remove the session protection. + */ + iwl_mvm_stop_session_protection(mvm, vif); + + iwl_mvm_sf_update(mvm, vif, false); + WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + } + + if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | + /* Send power command on every beacon change, + * because we may have not enabled beacon abort yet. + */ + BSS_CHANGED_BEACON_INFO)) { + ret = iwl_mvm_power_update_mac(mvm); + if (ret) + IWL_ERR(mvm, "failed to update power mode\n"); + } + + if (changes & BSS_CHANGED_CQM) { + IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); + /* reset cqm events tracking */ + mvmvif->bf_data.last_cqm_event = 0; + if (mvmvif->bf_data.bf_enabled) { + /* FIXME: need to update per link when FW API will + * support it + */ + ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); + if (ret) + IWL_ERR(mvm, + "failed to update CQM thresholds\n"); + } + } + + if (changes & BSS_CHANGED_BANDWIDTH) + iwl_mvm_update_link_smps(vif, link_conf); +} + static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u64 changes) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; /* * Re-calculate the tsf id, as the leader-follower relations depend * on the beacon interval, which was not known when the station * interface was added. */ - if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { - if (vif->bss_conf.he_support && - !iwlwifi_mod_params.disable_11ax) - iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); + if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) { + if ((vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax) || + (vif->bss_conf.eht_support && + !iwlwifi_mod_params.disable_11be)) + iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id); iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); } /* Update MU EDCA params */ if (changes & BSS_CHANGED_QOS && mvmvif->associated && - bss_conf->assoc && vif->bss_conf.he_support && - !iwlwifi_mod_params.disable_11ax) - iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); + vif->cfg.assoc && + ((vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax) || + (vif->bss_conf.eht_support && + !iwlwifi_mod_params.disable_11be))) + iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id); /* * If we're not associated yet, take the (new) BSSID before associating * so the firmware knows. If we're already associated, then use the old * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC * branch for disassociation below. */ if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) - memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); + memcpy(mvmvif->deflink.bssid, bss_conf->bssid, ETH_ALEN); - ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); + ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->deflink.bssid); if (ret) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); /* after sending it once, adopt mac80211 data */ - memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); - mvmvif->associated = bss_conf->assoc; + memcpy(mvmvif->deflink.bssid, bss_conf->bssid, ETH_ALEN); + mvmvif->associated = vif->cfg.assoc; if (changes & BSS_CHANGED_ASSOC) { - if (bss_conf->assoc) { + if (vif->cfg.assoc) { /* clear statistics to get clean beacon counter */ iwl_mvm_request_statistics(mvm, true); - memset(&mvmvif->beacon_stats, 0, - sizeof(mvmvif->beacon_stats)); + memset(&mvmvif->deflink.beacon_stats, 0, + sizeof(mvmvif->deflink.beacon_stats)); /* add quota for this interface */ ret = iwl_mvm_update_quotas(mvm, true, NULL); if (ret) { IWL_ERR(mvm, "failed to update quotas\n"); return; } if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && !fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { /* * If we're restarting then the firmware will * obviously have lost synchronisation with * the AP. It will attempt to synchronise by * itself, but we can make it more reliable by * scheduling a session protection time event. * * The firmware needs to receive a beacon to * catch up with synchronisation, use 110% of * the beacon interval. * * Set a large maximum delay to allow for more * than a single interface. * * For new firmware versions, rely on the * firmware. This is relevant for DCM scenarios * only anyway. */ u32 dur = (11 * vif->bss_conf.beacon_int) / 10; iwl_mvm_protect_session(mvm, vif, dur, dur, 5 * dur, false); } else if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && !vif->bss_conf.dtim_period) { /* * If we're not restarting and still haven't * heard a beacon (dtim period unknown) then * make sure we still have enough minimum time * remaining in the time event, since the auth * might actually have taken quite a while * (especially for SAE) and so the remaining * time could be small without us having heard * a beacon yet. */ iwl_mvm_protect_assoc(mvm, vif, 0); } iwl_mvm_sf_update(mvm, vif, false); iwl_mvm_power_vif_assoc(mvm, vif); if (vif->p2p) { iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_PROT, - IEEE80211_SMPS_DYNAMIC); + IEEE80211_SMPS_DYNAMIC, 0); } - } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { + } else if (mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) { iwl_mvm_mei_host_disassociated(mvm); /* * If update fails - SF might be running in associated * mode while disassociated - which is forbidden. */ ret = iwl_mvm_sf_update(mvm, vif, false); WARN_ONCE(ret && !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status), "Failed to update SF upon disassociation\n"); /* * If we get an assert during the connection (after the * station has been added, but before the vif is set * to associated), mac80211 will re-add the station and * then configure the vif. Since the vif is not * associated, we would remove the station here and * this would fail the recovery. */ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + /* first remove remaining keys */ + iwl_mvm_sec_key_remove_ap(mvm, vif, + &mvmvif->deflink, 0); + /* * Remove AP station now that * the MAC is unassoc */ ret = iwl_mvm_rm_sta_id(mvm, vif, - mvmvif->ap_sta_id); + mvmvif->deflink.ap_sta_id); if (ret) IWL_ERR(mvm, "failed to remove AP station\n"); - mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; + mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA; } /* remove quota for this interface */ ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) IWL_ERR(mvm, "failed to update quotas\n"); /* this will take the cleared BSSID from bss_conf */ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (ret) IWL_ERR(mvm, "failed to update MAC %pM (clear after unassoc)\n", vif->addr); } - /* - * The firmware tracks the MU-MIMO group on its own. - * However, on HW restart we should restore this data. - */ - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && - (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) { - ret = iwl_mvm_update_mu_groups(mvm, vif); - if (ret) - IWL_ERR(mvm, - "failed to update VHT MU_MIMO groups\n"); - } + iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes); + } - iwl_mvm_recalc_multicast(mvm); + iwl_mvm_bss_info_changed_station_common(mvm, vif, &vif->bss_conf, + changes); +} - /* reset rssi values */ - mvmvif->bf_data.ave_beacon_signal = 0; +bool iwl_mvm_start_ap_ibss_common(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + int *ret) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int i; - iwl_mvm_bt_coex_vif_change(mvm); - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, - IEEE80211_SMPS_AUTOMATIC); - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_UMAC_SCAN)) - iwl_mvm_config_scan(mvm); - } + lockdep_assert_held(&mvm->mutex); - if (changes & BSS_CHANGED_BEACON_INFO) { - /* - * We received a beacon from the associated AP so - * remove the session protection. - */ - iwl_mvm_stop_session_protection(mvm, vif); + mvmvif->ap_assoc_sta_count = 0; - iwl_mvm_sf_update(mvm, vif, false); - WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); - } + /* must be set before quota calculations */ + mvmvif->ap_ibss_active = true; - if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | - /* - * Send power command on every beacon change, - * because we may have not enabled beacon abort yet. - */ - BSS_CHANGED_BEACON_INFO)) { - ret = iwl_mvm_power_update_mac(mvm); - if (ret) - IWL_ERR(mvm, "failed to update power mode\n"); + /* send all the early keys to the device now */ + for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { + struct ieee80211_key_conf *key = mvmvif->ap_early_keys[i]; + + if (!key) + continue; + + mvmvif->ap_early_keys[i] = NULL; + + *ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key); + if (*ret) + return true; } - if (changes & BSS_CHANGED_CQM) { - IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); - /* reset cqm events tracking */ - mvmvif->bf_data.last_cqm_event = 0; - if (mvmvif->bf_data.bf_enabled) { - ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); - if (ret) - IWL_ERR(mvm, - "failed to update CQM thresholds\n"); - } + if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { + iwl_mvm_vif_set_low_latency(mvmvif, true, + LOW_LATENCY_VIF_TYPE); + iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id); } - if (changes & BSS_CHANGED_BANDWIDTH) - iwl_mvm_apply_fw_smps_request(vif); + /* power updated needs to be done before quotas */ + iwl_mvm_power_update_mac(mvm); + + return false; } static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret, i; + int ret; mutex_lock(&mvm->mutex); - /* Send the beacon template */ - ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif); - if (ret) - goto out_unlock; - /* * Re-calculate the tsf id, as the leader-follower relations depend on * the beacon interval, which was not known when the AP interface * was added. */ if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); - mvmvif->ap_assoc_sta_count = 0; + /* For older devices need to send beacon template before adding mac + * context. For the newer, the beacon is a resource that belongs to a + * MAC, so need to send beacon template after adding the mac. + */ + if (mvm->trans->trans_cfg->device_family > IWL_DEVICE_FAMILY_22000) { + /* Add the mac context */ + ret = iwl_mvm_mac_ctxt_add(mvm, vif); + if (ret) + goto out_unlock; - /* Add the mac context */ - ret = iwl_mvm_mac_ctxt_add(mvm, vif); - if (ret) - goto out_unlock; + /* Send the beacon template */ + ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf); + if (ret) + goto out_unlock; + } else { + /* Send the beacon template */ + ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf); + if (ret) + goto out_unlock; + + /* Add the mac context */ + ret = iwl_mvm_mac_ctxt_add(mvm, vif); + if (ret) + goto out_unlock; + } /* Perform the binding */ ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) goto out_remove; /* * This is not very nice, but the simplest: * For older FWs adding the mcast sta before the bcast station may * cause assert 0x2b00. * This is fixed in later FW so make the order of removal depend on * the TLV */ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { ret = iwl_mvm_add_mcast_sta(mvm, vif); if (ret) goto out_unbind; /* * Send the bcast station. At this stage the TBTT and DTIM time * events are added and applied to the scheduler */ ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) { iwl_mvm_rm_mcast_sta(mvm, vif); goto out_unbind; } } else { /* * Send the bcast station. At this stage the TBTT and DTIM time * events are added and applied to the scheduler */ ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) goto out_unbind; ret = iwl_mvm_add_mcast_sta(mvm, vif); if (ret) { iwl_mvm_send_rm_bcast_sta(mvm, vif); goto out_unbind; } } - /* must be set before quota calculations */ - mvmvif->ap_ibss_active = true; - - /* send all the early keys to the device now */ - for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { - struct ieee80211_key_conf *key = mvmvif->ap_early_keys[i]; - - if (!key) - continue; - - mvmvif->ap_early_keys[i] = NULL; - - ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key); - if (ret) - goto out_quota_failed; - } - - if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { - iwl_mvm_vif_set_low_latency(mvmvif, true, - LOW_LATENCY_VIF_TYPE); - iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id); - } - - /* power updated needs to be done before quotas */ - iwl_mvm_power_update_mac(mvm); + if (iwl_mvm_start_ap_ibss_common(hw, vif, &ret)) + goto out_failed; ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) - goto out_quota_failed; + goto out_failed; /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_bt_coex_vif_change(mvm); /* we don't support TDLS during DCM */ if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); - iwl_mvm_ftm_restart_responder(mvm, vif); + iwl_mvm_ftm_restart_responder(mvm, vif, &vif->bss_conf); goto out_unlock; -out_quota_failed: +out_failed: iwl_mvm_power_update_mac(mvm); mvmvif->ap_ibss_active = false; iwl_mvm_send_rm_bcast_sta(mvm, vif); iwl_mvm_rm_mcast_sta(mvm, vif); out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); out_remove: iwl_mvm_mac_ctxt_remove(mvm, vif); out_unlock: mutex_unlock(&mvm->mutex); return ret; } static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf) { return iwl_mvm_start_ap_ibss(hw, vif, link_conf); } static int iwl_mvm_start_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { return iwl_mvm_start_ap_ibss(hw, vif, &vif->bss_conf); } -static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *link_conf) +/* Common part for MLD and non-MLD ops */ +void iwl_mvm_stop_ap_ibss_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) { - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - iwl_mvm_prepare_mac_removal(mvm, vif); + lockdep_assert_held(&mvm->mutex); - mutex_lock(&mvm->mutex); + iwl_mvm_prepare_mac_removal(mvm, vif); /* Handle AP stop while in CSA */ if (rcu_access_pointer(mvm->csa_vif) == vif) { iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); RCU_INIT_POINTER(mvm->csa_vif, NULL); mvmvif->csa_countdown = false; } if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); mvm->csa_tx_block_bcn_timeout = 0; } mvmvif->ap_ibss_active = false; mvm->ap_last_beacon_gp2 = 0; if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { iwl_mvm_vif_set_low_latency(mvmvif, false, LOW_LATENCY_VIF_TYPE); iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id); } iwl_mvm_bt_coex_vif_change(mvm); +} + +static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + + iwl_mvm_stop_ap_ibss_common(mvm, vif); /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_update_quotas(mvm, false, NULL); iwl_mvm_ftm_responder_clear(mvm, vif); /* * This is not very nice, but the simplest: * For older FWs removing the mcast sta before the bcast station may * cause assert 0x2b00. * This is fixed in later FW (which will stop beaconing when removing * bcast station). * So make the order of removal depend on the TLV */ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) iwl_mvm_rm_mcast_sta(mvm, vif); iwl_mvm_send_rm_bcast_sta(mvm, vif); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) iwl_mvm_rm_mcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); mutex_unlock(&mvm->mutex); } static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf) { iwl_mvm_stop_ap_ibss(hw, vif, link_conf); } static void iwl_mvm_stop_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { iwl_mvm_stop_ap_ibss(hw, vif, &vif->bss_conf); } static void iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u64 changes) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); /* Changes will be applied when the AP/IBSS is started */ if (!mvmvif->ap_ibss_active) return; if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) && iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL)) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); /* Need to send a new beacon template to the FW */ if (changes & BSS_CHANGED_BEACON && - iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) + iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, &vif->bss_conf)) IWL_WARN(mvm, "Failed updating beacon data\n"); if (changes & BSS_CHANGED_FTM_RESPONDER) { - int ret = iwl_mvm_ftm_start_responder(mvm, vif); + int ret = iwl_mvm_ftm_start_responder(mvm, vif, &vif->bss_conf); if (ret) IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n", ret); } } static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u64 changes) +{ + static const struct iwl_mvm_bss_info_changed_ops callbacks = { + .bss_info_changed_sta = iwl_mvm_bss_info_changed_station, + .bss_info_changed_ap_ibss = iwl_mvm_bss_info_changed_ap_ibss, + }; + + iwl_mvm_bss_info_changed_common(hw, vif, bss_conf, &callbacks, + changes); +} + +void +iwl_mvm_bss_info_changed_common(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + const struct iwl_mvm_bss_info_changed_ops *callbacks, + u64 changes) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); - if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) + if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); switch (vif->type) { case NL80211_IFTYPE_STATION: - iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); + callbacks->bss_info_changed_sta(mvm, vif, bss_conf, changes); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: - iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); + callbacks->bss_info_changed_ap_ibss(mvm, vif, bss_conf, + changes); break; case NL80211_IFTYPE_MONITOR: if (changes & BSS_CHANGED_MU_GROUPS) iwl_mvm_update_mu_groups(mvm, vif); break; default: /* shouldn't happen */ WARN_ON_ONCE(1); } if (changes & BSS_CHANGED_TXPOWER) { IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d dBm\n", bss_conf->txpower); iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); } mutex_unlock(&mvm->mutex); } -static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_scan_request *hw_req) +int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; if (hw_req->req.n_channels == 0 || hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) return -EINVAL; mutex_lock(&mvm->mutex); ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); mutex_unlock(&mvm->mutex); return ret; } -static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); /* Due to a race condition, it's possible that mac80211 asks * us to stop a hw_scan when it's already stopped. This can * happen, for instance, if we stopped the scan ourselves, * called ieee80211_scan_completed() and the userspace called * cancel scan scan before ieee80211_scan_work() could run. * To handle that, simply return if the scan is not running. */ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); mutex_unlock(&mvm->mutex); } -static void +void iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* Called when we need to transmit (a) frame(s) from mac80211 */ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, tids, more_data, false); } -static void +void iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, tids, more_data, true); } static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); unsigned long txqs = 0, tids = 0; int tid; /* * If we have TVQM then we get too high queue numbers - luckily * we really shouldn't get here with that because such hardware * should have firmware supporting buffer station offload. */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return; spin_lock_bh(&mvmsta->lock); for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) continue; __set_bit(tid_data->txq_id, &txqs); if (iwl_mvm_tid_queued(mvm, tid_data) == 0) continue; __set_bit(tid, &tids); } switch (cmd) { case STA_NOTIFY_SLEEP: for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) ieee80211_sta_set_buffered(sta, tid, true); if (txqs) iwl_trans_freeze_txq_timer(mvm->trans, txqs, true); /* * The fw updates the STA to be asleep. Tx packets on the Tx * queues to this station will not be transmitted. The fw will * send a Tx response with TX_STATUS_FAIL_DEST_PS. */ break; case STA_NOTIFY_AWAKE: - if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA)) break; if (txqs) iwl_trans_freeze_txq_timer(mvm->trans, txqs, false); iwl_mvm_sta_modify_ps_wake(mvm, sta); break; default: break; } spin_unlock_bh(&mvmsta->lock); } -static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum sta_notify_cmd cmd, - struct ieee80211_sta *sta) +void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { __iwl_mvm_mac_sta_notify(hw, cmd, sta); } void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE); if (WARN_ON(notif->sta_id >= mvm->fw->ucode_capa.num_stations)) return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); if (WARN_ON(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return; } mvmsta = iwl_mvm_sta_from_mac80211(sta); if (!mvmsta->vif || mvmsta->vif->type != NL80211_IFTYPE_AP) { rcu_read_unlock(); return; } if (mvmsta->sleeping != sleeping) { mvmsta->sleeping = sleeping; __iwl_mvm_mac_sta_notify(mvm->hw, sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, sta); ieee80211_sta_ps_transition(sta, sleeping); } if (sleeping) { switch (notif->type) { case IWL_MVM_PM_EVENT_AWAKE: case IWL_MVM_PM_EVENT_ASLEEP: break; case IWL_MVM_PM_EVENT_UAPSD: ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS); break; case IWL_MVM_PM_EVENT_PS_POLL: ieee80211_sta_pspoll(sta); break; default: break; } } rcu_read_unlock(); } -static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + unsigned int link_id; /* * This is called before mac80211 does RCU synchronisation, * so here we already invalidate our internal RCU-protected * station pointer. The rest of the code will thus no longer * be able to find the station this way, and we don't rely * on further RCU synchronisation after the sta_state() * callback deleted the station. + * Since there's mvm->mutex here, no need to have RCU lock for + * mvm_sta->link access. */ mutex_lock(&mvm->mutex); - if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id])) - rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], - ERR_PTR(-ENOENT)); + for (link_id = 0; link_id < ARRAY_SIZE(mvm_sta->link); link_id++) { + struct iwl_mvm_link_sta *link_sta; + u32 sta_id; + if (!mvm_sta->link[link_id]) + continue; + + link_sta = rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + sta_id = link_sta->sta_id; + if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[sta_id])) { + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], + ERR_PTR(-ENOENT)); + RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL); + } + } mutex_unlock(&mvm->mutex); } static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const u8 *bssid) { int i; if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_tcm_mac *mdata; mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id]; ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); mdata->opened_rx_ba_sessions = false; } if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) return; if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } if (!vif->p2p && (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) { if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } } vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; } static void iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *peer_addr, enum nl80211_tdls_operation action) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tdls *tdls_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_TDLS); if (!trig) return; tdls_trig = (void *)trig->data; if (!(tdls_trig->action_bitmap & BIT(action))) return; if (tdls_trig->peer_mode && memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "TDLS event occurred, peer %pM, action %d", peer_addr, action); } struct iwl_mvm_he_obss_narrow_bw_ru_data { bool tolerated; }; static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, struct cfg80211_bss *bss, void *_data) { struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data; const struct cfg80211_bss_ies *ies; const struct element *elem; rcu_read_lock(); ies = rcu_dereference(bss->ies); elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data, ies->len); if (!elem || elem->datalen < 10 || !(elem->data[10] & WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) { data->tolerated = false; } rcu_read_unlock(); } -static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static void +iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + unsigned int link_id, + struct ieee80211_bss_conf *link_conf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_he_obss_narrow_bw_ru_data iter_data = { .tolerated = true, }; - if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR)) { - mvmvif->he_ru_2mhz_block = false; + if (WARN_ON_ONCE(!link_conf->chandef.chan || + !mvmvif->link[link_id])) + return; + + if (!(link_conf->chandef.chan->flags & IEEE80211_CHAN_RADAR)) { + mvmvif->link[link_id]->he_ru_2mhz_block = false; return; } - cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef, + cfg80211_bss_iter(hw->wiphy, &link_conf->chandef, iwl_mvm_check_he_obss_narrow_bw_ru_iter, &iter_data); /* * If there is at least one AP on radar channel that cannot * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU. */ - mvmvif->he_ru_2mhz_block = !iter_data.tolerated; + mvmvif->link[link_id]->he_ru_2mhz_block = !iter_data.tolerated; } static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct ieee80211_supported_band *sband; const struct ieee80211_sta_he_cap *he_cap; if (vif->type != NL80211_IFTYPE_STATION) return; if (!mvm->cca_40mhz_workaround) return; /* decrement and check that we reached zero */ mvm->cca_40mhz_workaround--; if (mvm->cca_40mhz_workaround) return; sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]; sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; - he_cap = ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(vif)); + he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif); if (he_cap) { /* we know that ours is writable */ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap; he->he_cap_elem.phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; } } static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_sta *mvm_sta) { #if IS_ENABLED(CONFIG_IWLMEI) struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mei_conn_info conn_info = { - .ssid_len = vif->bss_conf.ssid_len, - .channel = vif->bss_conf.chandef.chan->hw_value, + .ssid_len = vif->cfg.ssid_len, }; if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) return; if (!mvm->mei_registered) return; + /* FIXME: MEI needs to be updated for MLO */ + if (!vif->bss_conf.chandef.chan) + return; + + conn_info.channel = vif->bss_conf.chandef.chan->hw_value; + switch (mvm_sta->pairwise_cipher) { + case WLAN_CIPHER_SUITE_TKIP: + conn_info.pairwise_cipher = IWL_MEI_CIPHER_TKIP; + break; case WLAN_CIPHER_SUITE_CCMP: conn_info.pairwise_cipher = IWL_MEI_CIPHER_CCMP; break; case WLAN_CIPHER_SUITE_GCMP: conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP; break; case WLAN_CIPHER_SUITE_GCMP_256: conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP_256; break; case 0: /* open profile */ break; default: /* cipher not supported, don't send anything to iwlmei */ return; } switch (mvmvif->rekey_data.akm) { case WLAN_AKM_SUITE_SAE & 0xff: conn_info.auth_mode = IWL_MEI_AKM_AUTH_SAE; break; case WLAN_AKM_SUITE_PSK & 0xff: conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA_PSK; break; case WLAN_AKM_SUITE_8021X & 0xff: conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA; break; case 0: /* open profile */ conn_info.auth_mode = IWL_MEI_AKM_AUTH_OPEN; break; default: /* auth method / AKM not supported */ /* TODO: All the FT vesions of these? */ return; } - memcpy(conn_info.ssid, vif->bss_conf.ssid, vif->bss_conf.ssid_len); + memcpy(conn_info.ssid, vif->cfg.ssid, vif->cfg.ssid_len); memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN); /* TODO: add support for collocated AP data */ iwl_mei_host_associated(&conn_info, NULL); #endif } +static int iwl_mvm_mac_ctxt_changed_wrapper(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool force_assoc_off) +{ + return iwl_mvm_mac_ctxt_changed(mvm, vif, force_assoc_off, NULL); +} + static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) +{ + static const struct iwl_mvm_sta_state_ops callbacks = { + .add_sta = iwl_mvm_add_sta, + .update_sta = iwl_mvm_update_sta, + .rm_sta = iwl_mvm_rm_sta, + .mac_ctxt_changed = iwl_mvm_mac_ctxt_changed_wrapper, + }; + + return iwl_mvm_mac_sta_state_common(hw, vif, sta, old_state, new_state, + &callbacks); +} + +/* FIXME: temporary making two assumptions in all sta handling functions: + * (1) when setting sta state, the link exists and protected + * (2) if a link is valid in sta then it's valid in vif (can + * use same index in the link array) + */ +static void iwl_mvm_rs_rate_init_all_links(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + unsigned int link_id; + + for_each_mvm_vif_valid_link(mvmvif, link_id) { + struct ieee80211_bss_conf *conf = + link_conf_dereference_check(vif, link_id); + struct ieee80211_link_sta *link_sta = + link_sta_dereference_check(sta, link_id); + + if (!conf || !link_sta || !mvmvif->link[link_id]->phy_ctxt) + continue; + + iwl_mvm_rs_rate_init(mvm, vif, sta, conf, link_sta, + mvmvif->link[link_id]->phy_ctxt->channel->band); + } +} + +#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16 + +static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ieee80211_link_sta *link_sta; + unsigned int link_id; + + /* Beacon interval check - firmware will crash if the beacon + * interval is less than 16. We can't avoid connecting at all, + * so refuse the station state change, this will cause mac80211 + * to abandon attempts to connect to this AP, and eventually + * wpa_s will blocklist the AP... + */ + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + + if (!link_conf) + continue; + + if (link_conf->beacon_int < IWL_MVM_MIN_BEACON_INTERVAL_TU) { + IWL_ERR(mvm, + "Beacon interval %d for AP %pM is too small\n", + link_conf->beacon_int, link_sta->addr); + return false; + } + + link_conf->he_support = link_sta->he_cap.has_he; + } + + return true; +} + +static void iwl_mvm_vif_set_he_support(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + bool is_sta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_link_sta *link_sta; + unsigned int link_id; + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + + if (!link_conf || !mvmvif->link[link_id]) + continue; + + link_conf->he_support = link_sta->he_cap.has_he; + + if (is_sta) { + mvmvif->link[link_id]->he_ru_2mhz_block = false; + if (link_sta->he_cap.has_he) + iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif, + link_id, + link_conf); + } + } +} + +static int +iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + const struct iwl_mvm_sta_state_ops *callbacks) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_link_sta *link_sta; + unsigned int i; + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (vif->type == NL80211_IFTYPE_STATION && + !iwl_mvm_vif_conf_from_sta(mvm, vif, sta)) + return -EINVAL; + + if (sta->tdls && + (vif->p2p || + iwl_mvm_tdls_sta_count(mvm, NULL) == IWL_MVM_TDLS_STA_COUNT || + iwl_mvm_phy_ctx_count(mvm) > 1)) { + IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); + return -EBUSY; + } + + ret = callbacks->add_sta(mvm, vif, sta); + if (sta->tdls && ret == 0) { + iwl_mvm_recalc_tdls_state(mvm, vif, true); + iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, + NL80211_TDLS_SETUP); + } + + for_each_sta_active_link(vif, sta, link_sta, i) + link_sta->agg.max_rc_amsdu_len = 1; + + ieee80211_sta_recalc_aggregates(sta); + + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) + mvmvif->ap_sta = sta; + + return 0; +} + +static int +iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw, + struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + const struct iwl_mvm_sta_state_ops *callbacks) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_link_sta *link_sta; + unsigned int link_id; + + lockdep_assert_held(&mvm->mutex); + + if (vif->type == NL80211_IFTYPE_AP) { + iwl_mvm_vif_set_he_support(hw, vif, sta, false); + mvmvif->ap_assoc_sta_count++; + callbacks->mac_ctxt_changed(mvm, vif, false); + + /* since the below is not for MLD API, it's ok to use + * the default bss_conf + */ + if (!mvm->mld_api_is_used && + ((vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax) || + (vif->bss_conf.eht_support && + !iwlwifi_mod_params.disable_11be))) + iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->deflink.sta_id); + } else if (vif->type == NL80211_IFTYPE_STATION) { + iwl_mvm_vif_set_he_support(hw, vif, sta, true); + + callbacks->mac_ctxt_changed(mvm, vif, false); + + if (!mvm->mld_api_is_used) + goto out; + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + + if (WARN_ON(!link_conf)) + return -EINVAL; + if (!mvmvif->link[link_id]) + continue; + + iwl_mvm_link_changed(mvm, vif, link_conf, + LINK_CONTEXT_MODIFY_ALL & + ~LINK_CONTEXT_MODIFY_ACTIVE, + true); + } + } + +out: + iwl_mvm_rs_rate_init_all_links(mvm, vif, sta); + + return callbacks->update_sta(mvm, vif, sta); +} + +static int +iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + const struct iwl_mvm_sta_state_ops *callbacks) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + + lockdep_assert_held(&mvm->mutex); + + /* we don't support TDLS during DCM */ + if (iwl_mvm_phy_ctx_count(mvm) > 1) + iwl_mvm_teardown_tdls_peers(mvm); + + if (sta->tdls) { + iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, + NL80211_TDLS_ENABLE_LINK); + } else { + /* enable beacon filtering */ + WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + + mvmvif->authorized = 1; + + callbacks->mac_ctxt_changed(mvm, vif, false); + iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); + } + + mvm_sta->authorized = true; + + iwl_mvm_rs_rate_init_all_links(mvm, vif, sta); + + return 0; +} + +static int +iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + const struct iwl_mvm_sta_state_ops *callbacks) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + + lockdep_assert_held(&mvm->mutex); + + mvmsta->authorized = false; + + /* once we move into assoc state, need to update rate scale to + * disable using wide bandwidth + */ + iwl_mvm_rs_rate_init_all_links(mvm, vif, sta); + + if (!sta->tdls) { + /* Set this but don't call iwl_mvm_mac_ctxt_changed() + * yet to avoid sending high prio again for a little + * time. + */ + mvmvif->authorized = 0; + + /* disable beacon filtering */ + iwl_mvm_disable_beacon_filter(mvm, vif, 0); + } + + return 0; +} + +/* Common part for MLD and non-MLD modes */ +int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state, + const struct iwl_mvm_sta_state_ops *callbacks) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_link_sta *link_sta; + unsigned int link_id; int ret; IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", sta->addr, old_state, new_state); - /* this would be a mac80211 bug ... but don't crash */ - if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) - return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL; - /* * If we are in a STA removal flow and in DQA mode: * * This is after the sync_rcu part, so the queues have already been * flushed. No more TXs on their way in mac80211's path, and no more in * the queues. * Also, we won't be getting any new TX frames for this station. * What we might have are deferred TX frames that need to be taken care * of. * * Drop any still-queued deferred-frame before removing the STA, and * make sure the worker is no longer handling frames for this STA. */ if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { flush_work(&mvm->add_stream_wk); /* * No need to make sure deferred TX indication is off since the * worker will already remove it if it was on */ /* * Additionally, reset the 40 MHz capability if we disconnected * from the AP now. */ iwl_mvm_reset_cca_40mhz_workaround(mvm, vif); + + /* Also free dup data just in case any assertions below fail */ + kfree(mvm_sta->dup_data); } mutex_lock(&mvm->mutex); - /* track whether or not the station is associated */ - mvm_sta->sta_state = new_state; - if (old_state == IEEE80211_STA_NOTEXIST && - new_state == IEEE80211_STA_NONE) { - /* - * Firmware bug - it'll crash if the beacon interval is less - * than 16. We can't avoid connecting at all, so refuse the - * station state change, this will cause mac80211 to abandon - * attempts to connect to this AP, and eventually wpa_s will - * blocklist the AP... - */ - if (vif->type == NL80211_IFTYPE_STATION && - vif->bss_conf.beacon_int < 16) { - IWL_ERR(mvm, - "AP %pM beacon interval is %d, refusing due to firmware bug!\n", - sta->addr, vif->bss_conf.beacon_int); - ret = -EINVAL; - goto out_unlock; - } - - if (vif->type == NL80211_IFTYPE_STATION) - vif->bss_conf.he_support = sta->deflink.he_cap.has_he; - - if (sta->tdls && - (vif->p2p || - iwl_mvm_tdls_sta_count(mvm, NULL) == - IWL_MVM_TDLS_STA_COUNT || - iwl_mvm_phy_ctx_count(mvm) > 1)) { - IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); - ret = -EBUSY; - goto out_unlock; - } - - ret = iwl_mvm_add_sta(mvm, vif, sta); - if (sta->tdls && ret == 0) { - iwl_mvm_recalc_tdls_state(mvm, vif, true); - iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, - NL80211_TDLS_SETUP); + /* this would be a mac80211 bug ... but don't crash */ + for_each_sta_active_link(vif, sta, link_sta, link_id) { + if (WARN_ON_ONCE(!mvmvif->link[link_id]->phy_ctxt)) { + mutex_unlock(&mvm->mutex); + return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status) ? 0 : -EINVAL; } + } - sta->max_rc_amsdu_len = 1; + /* track whether or not the station is associated */ + mvm_sta->sta_state = new_state; + + if (old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) { + ret = iwl_mvm_sta_state_notexist_to_none(mvm, vif, sta, + callbacks); + if (ret < 0) + goto out_unlock; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_AUTH) { /* * EBS may be disabled due to previous failures reported by FW. * Reset EBS status here assuming environment has been changed. */ mvm->last_ebs_successful = true; iwl_mvm_check_uapsd(mvm, vif, sta->addr); ret = 0; } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) { - if (vif->type == NL80211_IFTYPE_AP) { - vif->bss_conf.he_support = sta->deflink.he_cap.has_he; - mvmvif->ap_assoc_sta_count++; - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - if (vif->bss_conf.he_support && - !iwlwifi_mod_params.disable_11ax) - iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id); - } else if (vif->type == NL80211_IFTYPE_STATION) { - vif->bss_conf.he_support = sta->deflink.he_cap.has_he; - - mvmvif->he_ru_2mhz_block = false; - if (sta->deflink.he_cap.has_he) - iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif); - - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - } - - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, - false); - ret = iwl_mvm_update_sta(mvm, vif, sta); + ret = iwl_mvm_sta_state_auth_to_assoc(hw, mvm, vif, sta, + callbacks); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { - ret = 0; - - /* we don't support TDLS during DCM */ - if (iwl_mvm_phy_ctx_count(mvm) > 1) - iwl_mvm_teardown_tdls_peers(mvm); - - if (sta->tdls) { - iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, - NL80211_TDLS_ENABLE_LINK); - } else { - /* enable beacon filtering */ - WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); - - mvmvif->authorized = 1; - - /* - * Now that the station is authorized, i.e., keys were already - * installed, need to indicate to the FW that - * multicast data frames can be forwarded to the driver - */ - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); - } - - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, - true); + ret = iwl_mvm_sta_state_assoc_to_authorized(mvm, vif, sta, + callbacks); } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { - /* once we move into assoc state, need to update rate scale to - * disable using wide bandwidth - */ - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, - false); - if (!sta->tdls) { - /* Multicast data frames are no longer allowed */ - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - - /* - * Set this after the above iwl_mvm_mac_ctxt_changed() - * to avoid sending high prio again for a little time. - */ - mvmvif->authorized = 0; - - /* disable beacon filtering */ - ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); - WARN_ON(ret && - !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, - &mvm->status)); - } - ret = 0; + ret = iwl_mvm_sta_state_authorized_to_assoc(mvm, vif, sta, + callbacks); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) { if (vif->type == NL80211_IFTYPE_AP) { mvmvif->ap_assoc_sta_count--; - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + callbacks->mac_ctxt_changed(mvm, vif, false); } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) iwl_mvm_stop_session_protection(mvm, vif); ret = 0; } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_NONE) { ret = 0; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { - if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { iwl_mvm_stop_session_protection(mvm, vif); - ret = iwl_mvm_rm_sta(mvm, vif, sta); + mvmvif->ap_sta = NULL; + } + ret = callbacks->rm_sta(mvm, vif, sta); if (sta->tdls) { iwl_mvm_recalc_tdls_state(mvm, vif, false); iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_DISABLE_LINK); } if (unlikely(ret && test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status))) ret = 0; } else { ret = -EIO; } out_unlock: mutex_unlock(&mvm->mutex); if (sta->tdls && ret == 0) { if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID); else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID); } return ret; } -static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mvm->rts_threshold = value; return 0; } -static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u32 changed) +void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_SUPP_RATES_CHANGED | IEEE80211_RC_NSS_CHANGED)) - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, - true); + iwl_mvm_rs_rate_init_all_links(mvm, vif, sta); if (vif->type == NL80211_IFTYPE_STATION && changed & IEEE80211_RC_NSS_CHANGED) iwl_mvm_sf_update(mvm, vif, false); } static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 ac, const struct ieee80211_tx_queue_params *params) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - mvmvif->queue_params[ac] = *params; + mvmvif->deflink.queue_params[ac] = *params; /* * No need to update right away, we'll get BSS_CHANGED_QOS * The exception is P2P_DEVICE interface which needs immediate update. */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { int ret; mutex_lock(&mvm->mutex); ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); mutex_unlock(&mvm->mutex); return ret; } return 0; } -static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_prep_tx_info *info) +void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); iwl_mvm_protect_assoc(mvm, vif, info->duration); mutex_unlock(&mvm->mutex); } -static void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_prep_tx_info *info) +void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* for successful cases (auth/assoc), don't cancel session protection */ if (info->success) return; mutex_lock(&mvm->mutex); iwl_mvm_stop_session_protection(mvm, vif); mutex_unlock(&mvm->mutex); } -static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_sched_scan_request *req, - struct ieee80211_scan_ies *ies) +int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); - if (!vif->bss_conf.idle) { + if (!vif->cfg.idle) { ret = -EBUSY; goto out; } ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); out: mutex_unlock(&mvm->mutex); return ret; } -static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); /* Due to a race condition, it's possible that mac80211 asks * us to stop a sched_scan when it's already stopped. This * can happen, for instance, if we stopped the scan ourselves, * called ieee80211_sched_scan_stopped() and the userspace called * stop sched scan scan before ieee80211_sched_scan_stopped_work() * could run. To handle this, simply return if the scan is * not running. */ if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) { mutex_unlock(&mvm->mutex); return 0; } ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false); mutex_unlock(&mvm->mutex); iwl_mvm_wait_for_async_handlers(mvm); return ret; } static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvmsta = NULL; - struct iwl_mvm_key_pn *ptk_pn; + struct iwl_mvm_key_pn *ptk_pn = NULL; int keyidx = key->keyidx; + u32 sec_key_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD); + u8 sec_key_ver = iwl_fw_lookup_cmd_ver(mvm->fw, sec_key_id, 0); int ret, i; u8 key_offset; if (sta) mvmsta = iwl_mvm_sta_from_mac80211(sta); switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: if (!mvm->trans->trans_cfg->gen2) { key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; } else if (vif->type == NL80211_IFTYPE_STATION) { key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE; } else { IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n"); return -EOPNOTSUPP; } break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (!iwl_mvm_has_new_tx_api(mvm)) key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: if (vif->type == NL80211_IFTYPE_STATION) break; if (iwl_mvm_has_new_tx_api(mvm)) return -EOPNOTSUPP; /* support HW crypto on TX */ return 0; default: return -EOPNOTSUPP; } switch (cmd) { case SET_KEY: - if (keyidx == 6 || keyidx == 7) + if (vif->type == NL80211_IFTYPE_STATION && + (keyidx == 6 || keyidx == 7)) rcu_assign_pointer(mvmvif->bcn_prot.keys[keyidx - 6], key); if ((vif->type == NL80211_IFTYPE_ADHOC || vif->type == NL80211_IFTYPE_AP) && !sta) { /* * GTK on AP interface is a TX-only key, return 0; * on IBSS they're per-station and because we're lazy * we don't support them for RX, so do the same. * CMAC/GMAC in AP/IBSS modes must be done in software. + * + * Except, of course, beacon protection - it must be + * offloaded since we just set a beacon template. */ - if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || - key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || - key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { + if (keyidx < 6 && + (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || + key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || + key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) { ret = -EOPNOTSUPP; break; } if (key->cipher != WLAN_CIPHER_SUITE_GCMP && key->cipher != WLAN_CIPHER_SUITE_GCMP_256 && !iwl_mvm_has_new_tx_api(mvm)) { key->hw_key_idx = STA_KEY_IDX_INVALID; ret = 0; break; } if (!mvmvif->ap_ibss_active) { for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { if (!mvmvif->ap_early_keys[i]) { mvmvif->ap_early_keys[i] = key; break; } } if (i >= ARRAY_SIZE(mvmvif->ap_early_keys)) ret = -ENOSPC; else ret = 0; break; } } /* During FW restart, in order to restore the state as it was, * don't try to reprogram keys we previously failed for. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && key->hw_key_idx == STA_KEY_IDX_INVALID) { IWL_DEBUG_MAC80211(mvm, "skip invalid idx key programming during restart\n"); ret = 0; break; } if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && mvmsta && iwl_mvm_has_new_rx_api(mvm) && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && (key->cipher == WLAN_CIPHER_SUITE_CCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { struct ieee80211_key_seq seq; int tid, q; WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); ptk_pn = kzalloc(struct_size(ptk_pn, q, mvm->trans->num_rx_queues), GFP_KERNEL); if (!ptk_pn) { ret = -ENOMEM; break; } for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { ieee80211_get_key_rx_seq(key, tid, &seq); for (q = 0; q < mvm->trans->num_rx_queues; q++) memcpy(ptk_pn->q[q].pn[tid], seq.ccmp.pn, IEEE80211_CCMP_PN_LEN); } rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn); } /* in HW restart reuse the index, otherwise request a new one */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) key_offset = key->hw_key_idx; else key_offset = STA_KEY_IDX_INVALID; if (mvmsta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE) mvmsta->pairwise_cipher = key->cipher; - IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); - ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); + IWL_DEBUG_MAC80211(mvm, "set hwcrypto key (sta:%pM, id:%d)\n", + sta ? sta->addr : NULL, key->keyidx); + + if (sec_key_ver) + ret = iwl_mvm_sec_key_add(mvm, vif, sta, key); + else + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); + if (ret) { IWL_WARN(mvm, "set key failed\n"); key->hw_key_idx = STA_KEY_IDX_INVALID; + if (ptk_pn) { + RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL); + kfree(ptk_pn); + } /* * can't add key for RX, but we don't need it * in the device for TX so still return 0, * unless we have new TX API where we cannot * put key material into the TX_CMD */ if (iwl_mvm_has_new_tx_api(mvm)) ret = -EOPNOTSUPP; else ret = 0; } break; case DISABLE_KEY: - if (keyidx == 6 || keyidx == 7) + if (vif->type == NL80211_IFTYPE_STATION && + (keyidx == 6 || keyidx == 7)) RCU_INIT_POINTER(mvmvif->bcn_prot.keys[keyidx - 6], NULL); ret = -ENOENT; for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { if (mvmvif->ap_early_keys[i] == key) { mvmvif->ap_early_keys[i] = NULL; ret = 0; } } /* found in pending list - don't do anything else */ if (ret == 0) break; if (key->hw_key_idx == STA_KEY_IDX_INVALID) { ret = 0; break; } if (mvmsta && iwl_mvm_has_new_rx_api(mvm) && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && (key->cipher == WLAN_CIPHER_SUITE_CCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { ptk_pn = rcu_dereference_protected( mvmsta->ptk_pn[keyidx], lockdep_is_held(&mvm->mutex)); RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL); if (ptk_pn) kfree_rcu(ptk_pn, rcu_head); } IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); - ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); + if (sec_key_ver) + ret = iwl_mvm_sec_key_del(mvm, vif, sta, key); + else + ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); break; default: ret = -EINVAL; } return ret; } -static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, - enum set_key_cmd cmd, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) +int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key); mutex_unlock(&mvm->mutex); return ret; } -static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, - u32 iv32, u16 *phase1key) +void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) return; iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key); } static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_hs20_roc_res *resp; int resp_len = iwl_rx_packet_payload_len(pkt); struct iwl_mvm_time_event_data *te_data = data; if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD)) return true; if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n"); return true; } resp = (void *)pkt->data; IWL_DEBUG_TE(mvm, "Aux ROC: Received response from ucode: status=%d uid=%d\n", resp->status, resp->event_unique_id); te_data->uid = le32_to_cpu(resp->event_unique_id); IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", te_data->uid); spin_lock_bh(&mvm->time_event_lock); list_add_tail(&te_data->list, &mvm->aux_roc_te_list); spin_unlock_bh(&mvm->time_event_lock); return true; } #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100) #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200) #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, struct ieee80211_channel *channel, struct ieee80211_vif *vif, int duration) { int res; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; static const u16 time_event_response[] = { HOT_SPOT_CMD }; struct iwl_notification_wait wait_time_event; u32 dtim_interval = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; u32 req_dur, delay; struct iwl_hs20_roc_req aux_roc_req = { .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), }; struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, &aux_roc_req.channel_info); u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm); /* Set the channel info data */ iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value, iwl_mvm_phy_band_from_nl80211(channel->band), - PHY_VHT_CHANNEL_MODE20, + IWL_PHY_CHANNEL_MODE20, 0); /* Set the time and duration */ tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm)); delay = AUX_ROC_MIN_DELAY; req_dur = MSEC_TO_TU(duration); /* * If we are associated we want the delay time to be at least one * dtim interval so that the FW can wait until after the DTIM and * then start the time event, this will potentially allow us to * remain off-channel for the max duration. * Since we want to use almost a whole dtim interval we would also * like the delay to be for 2-3 dtim intervals, in case there are * other time events with higher priority. */ - if (vif->bss_conf.assoc) { + if (vif->cfg.assoc) { delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); /* We cannot remain off-channel longer than the DTIM interval */ if (dtim_interval <= req_dur) { req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER; if (req_dur <= AUX_ROC_MIN_DURATION) req_dur = dtim_interval - AUX_ROC_MIN_SAFETY_BUFFER; } } tail->duration = cpu_to_le32(req_dur); tail->apply_time_max_delay = cpu_to_le32(delay); IWL_DEBUG_TE(mvm, "ROC: Requesting to remain on channel %u for %ums\n", channel->hw_value, req_dur); IWL_DEBUG_TE(mvm, "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", duration, delay, dtim_interval); /* Set the node address */ memcpy(tail->node_addr, vif->addr, ETH_ALEN); lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); if (WARN_ON(te_data->id == HOT_SPOT_CMD)) { spin_unlock_bh(&mvm->time_event_lock); return -EIO; } te_data->vif = vif; te_data->duration = duration; te_data->id = HOT_SPOT_CMD; spin_unlock_bh(&mvm->time_event_lock); /* * Use a notification wait, which really just processes the * command response and doesn't wait for anything, in order * to be able to process the response and get the UID inside * the RX path. Using CMD_WANT_SKB doesn't work because it * stores the buffer and then wakes up this thread, by which * time another notification (that the time event started) * might already be processed unsuccessfully. */ iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, time_event_response, ARRAY_SIZE(time_event_response), iwl_mvm_rx_aux_roc, te_data); res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len, &aux_roc_req); if (res) { IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res); iwl_remove_notification(&mvm->notif_wait, &wait_time_event); goto out_clear_te; } /* No need to wait for anything, so just pass 1 (0 isn't valid) */ res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); /* should never fail */ WARN_ON_ONCE(res); if (res) { out_clear_te: spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, te_data); spin_unlock_bh(&mvm->time_event_lock); } return res; } +static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id) +{ + int ret = 0; + + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { + IWL_ERR(mvm, "hotspot not supported\n"); + return -EINVAL; + } + + if (iwl_mvm_has_new_station_api(mvm->fw)) { + ret = iwl_mvm_add_aux_sta(mvm, lmac_id); + WARN(ret, "Failed to allocate aux station"); + } + + return ret; +} + +static int iwl_mvm_roc_switch_binding(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_phy_ctxt *new_phy_ctxt) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret = 0; + + lockdep_assert_held(&mvm->mutex); + + /* Unbind the P2P_DEVICE from the current PHY context, + * and if the PHY context is not used remove it. + */ + ret = iwl_mvm_binding_remove_vif(mvm, vif); + if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) + return ret; + + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); + + /* Bind the P2P_DEVICE to the current PHY Context */ + mvmvif->deflink.phy_ctxt = new_phy_ctxt; + + ret = iwl_mvm_binding_add_vif(mvm, vif); + WARN(ret, "Failed binding P2P_DEVICE\n"); + return ret; +} + static int iwl_mvm_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *channel, int duration, enum ieee80211_roc_type type) +{ + static const struct iwl_mvm_roc_ops ops = { + .add_aux_sta_for_hs20 = iwl_mvm_add_aux_sta_for_hs20, + .switch_phy_ctxt = iwl_mvm_roc_switch_binding, + }; + + return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops); +} + +/* Execute the common part for MLD and non-MLD modes */ +int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel *channel, int duration, + enum ieee80211_roc_type type, + const struct iwl_mvm_roc_ops *ops) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct cfg80211_chan_def chandef; struct iwl_mvm_phy_ctxt *phy_ctxt; bool band_change_removal; int ret, i; + u32 lmac_id; IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, duration, type); /* * Flush the done work, just in case it's still pending, so that * the work it does can complete and we can accept new frames. */ flush_work(&mvm->roc_done_wk); mutex_lock(&mvm->mutex); switch (vif->type) { case NL80211_IFTYPE_STATION: - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { - /* Use aux roc framework (HS20) */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) { - u32 lmac_id; - - lmac_id = iwl_mvm_get_lmac_id(mvm->fw, - channel->band); - ret = iwl_mvm_add_aux_sta(mvm, lmac_id); - if (WARN(ret, - "Failed to allocate aux station")) - goto out_unlock; - } + lmac_id = iwl_mvm_get_lmac_id(mvm, channel->band); + + /* Use aux roc framework (HS20) */ + ret = ops->add_aux_sta_for_hs20(mvm, lmac_id); + if (!ret) ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration); - goto out_unlock; - } - IWL_ERR(mvm, "hotspot not supported\n"); - ret = -EINVAL; goto out_unlock; case NL80211_IFTYPE_P2P_DEVICE: /* handle below */ break; default: IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); ret = -EINVAL; goto out_unlock; } for (i = 0; i < NUM_PHY_CTX; i++) { phy_ctxt = &mvm->phy_ctxts[i]; - if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) + if (phy_ctxt->ref == 0 || mvmvif->deflink.phy_ctxt == phy_ctxt) continue; if (phy_ctxt->ref && channel == phy_ctxt->channel) { - /* - * Unbind the P2P_DEVICE from the current PHY context, - * and if the PHY context is not used remove it. - */ - ret = iwl_mvm_binding_remove_vif(mvm, vif); - if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) - goto out_unlock; - - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); - - /* Bind the P2P_DEVICE to the current PHY Context */ - mvmvif->phy_ctxt = phy_ctxt; - - ret = iwl_mvm_binding_add_vif(mvm, vif); - if (WARN(ret, "Failed binding P2P_DEVICE\n")) + ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt); + if (ret) goto out_unlock; - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); goto schedule_time_event; } } /* Need to update the PHY context only if the ROC channel changed */ - if (channel == mvmvif->phy_ctxt->channel) + if (channel == mvmvif->deflink.phy_ctxt->channel) goto schedule_time_event; cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); /* * Check if the remain-on-channel is on a different band and that * requires context removal, see iwl_mvm_phy_ctxt_changed(). If * so, we'll need to release and then re-configure here, since we * must not remove a PHY context that's part of a binding. */ band_change_removal = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) && - mvmvif->phy_ctxt->channel->band != chandef.chan->band; + mvmvif->deflink.phy_ctxt->channel->band != chandef.chan->band; - if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) { + if (mvmvif->deflink.phy_ctxt->ref == 1 && !band_change_removal) { /* * Change the PHY context configuration as it is currently * referenced only by the P2P Device MAC (and we can modify it) */ - ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, + ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->deflink.phy_ctxt, &chandef, 1, 1); if (ret) goto out_unlock; } else { /* * The PHY context is shared with other MACs (or we're trying to * switch bands), so remove the P2P Device from the binding, * allocate an new PHY context and create a new binding. */ phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); if (!phy_ctxt) { ret = -ENOSPC; goto out_unlock; } ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, 1, 1); if (ret) { IWL_ERR(mvm, "Failed to change PHY context\n"); goto out_unlock; } - /* Unbind the P2P_DEVICE from the current PHY context */ - ret = iwl_mvm_binding_remove_vif(mvm, vif); - if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) - goto out_unlock; - - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); - - /* Bind the P2P_DEVICE to the new allocated PHY context */ - mvmvif->phy_ctxt = phy_ctxt; - - ret = iwl_mvm_binding_add_vif(mvm, vif); - if (WARN(ret, "Failed binding P2P_DEVICE\n")) + ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt); + if (ret) goto out_unlock; - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); } schedule_time_event: /* Schedule the time events */ ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); out_unlock: mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); return ret; } -static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); IWL_DEBUG_MAC80211(mvm, "enter\n"); mutex_lock(&mvm->mutex); iwl_mvm_stop_roc(mvm, vif); mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); return 0; } struct iwl_mvm_ftm_responder_iter_data { bool responder; struct ieee80211_chanctx_conf *ctx; }; static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_ftm_responder_iter_data *data = _data; - if (rcu_access_pointer(vif->chanctx_conf) == data->ctx && + if (rcu_access_pointer(vif->bss_conf.chanctx_conf) == data->ctx && vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params) data->responder = true; } static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm_ftm_responder_iter_data data = { .responder = false, .ctx = ctx, }; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_ftm_responder_chanctx_iter, &data); return data.responder; } static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt; bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; int ret; lockdep_assert_held(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "Add channel context\n"); phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); if (!phy_ctxt) { ret = -ENOSPC; goto out; } ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, ctx->rx_chains_static, ctx->rx_chains_dynamic); if (ret) { IWL_ERR(mvm, "Failed to add PHY context\n"); goto out; } iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); *phy_ctxt_id = phy_ctxt->id; out: return ret; } -static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, - struct ieee80211_chanctx_conf *ctx) +int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = __iwl_mvm_add_chanctx(mvm, ctx); mutex_unlock(&mvm->mutex); return ret; } static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; lockdep_assert_held(&mvm->mutex); iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); } -static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, - struct ieee80211_chanctx_conf *ctx) +void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); __iwl_mvm_remove_chanctx(mvm, ctx); mutex_unlock(&mvm->mutex); } -static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, - struct ieee80211_chanctx_conf *ctx, - u32 changed) +void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; if (WARN_ONCE((phy_ctxt->ref > 1) && (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | IEEE80211_CHANCTX_CHANGE_RX_CHAINS | IEEE80211_CHANCTX_CHANGE_RADAR | IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)), "Cannot change PHY. Ref=%d, changed=0x%X\n", phy_ctxt->ref, changed)) return; mutex_lock(&mvm->mutex); /* we are only changing the min_width, may be a noop */ if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) { if (phy_ctxt->width == def->width) goto out_unlock; /* we are just toggling between 20_NOHT and 20 */ if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 && def->width <= NL80211_CHAN_WIDTH_20) goto out_unlock; } iwl_mvm_bt_coex_vif_change(mvm); iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, ctx->rx_chains_static, ctx->rx_chains_dynamic); out_unlock: mutex_unlock(&mvm->mutex); } -static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_chanctx_conf *ctx, - bool switching_chanctx) +/* + * This function executes the common part for MLD and non-MLD modes. + * + * Returns true if we're done assigning the chanctx + * (either on failure or success) + */ +static bool +__iwl_mvm_assign_vif_chanctx_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx, + bool switching_chanctx, int *ret) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret; lockdep_assert_held(&mvm->mutex); - mvmvif->phy_ctxt = phy_ctxt; + mvmvif->deflink.phy_ctxt = phy_ctxt; switch (vif->type) { case NL80211_IFTYPE_AP: /* only needed if we're switching chanctx (i.e. during CSA) */ if (switching_chanctx) { mvmvif->ap_ibss_active = true; break; } fallthrough; case NL80211_IFTYPE_ADHOC: /* * The AP binding flow is handled as part of the start_ap flow * (in bss_info_changed), similarly for IBSS. */ - ret = 0; - goto out; + *ret = 0; + return true; case NL80211_IFTYPE_STATION: - mvmvif->csa_bcn_pending = false; break; case NL80211_IFTYPE_MONITOR: /* always disable PS when a monitor interface is active */ mvmvif->ps_disabled = true; break; default: - ret = -EINVAL; - goto out; + *ret = -EINVAL; + return true; } + return false; +} + +static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx, + bool switching_chanctx) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + if (WARN_ON(!link_conf)) + return -EINVAL; + + if (__iwl_mvm_assign_vif_chanctx_common(mvm, vif, ctx, + switching_chanctx, &ret)) + goto out; ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) goto out; /* * Power state must be updated before quotas, * otherwise fw will complain. */ iwl_mvm_power_update_mac(mvm); /* Setting the quota at this stage is only required for monitor * interfaces. For the other types, the bss_info changed flow * will handle quota settings. */ if (vif->type == NL80211_IFTYPE_MONITOR) { mvmvif->monitor_active = true; ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) goto out_remove_binding; ret = iwl_mvm_add_snif_sta(mvm, vif); if (ret) goto out_remove_binding; } /* Handle binding during CSA */ if (vif->type == NL80211_IFTYPE_AP) { iwl_mvm_update_quotas(mvm, false, NULL); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } - if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { + if (vif->type == NL80211_IFTYPE_STATION) { + if (!switching_chanctx) { + mvmvif->csa_bcn_pending = false; + goto out; + } + mvmvif->csa_bcn_pending = true; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { u32 duration = 3 * vif->bss_conf.beacon_int; /* Protect the session to make sure we hear the first * beacon on the new channel. */ iwl_mvm_protect_session(mvm, vif, duration, duration, vif->bss_conf.beacon_int / 2, true); } iwl_mvm_update_quotas(mvm, false, NULL); } goto out; out_remove_binding: iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_power_update_mac(mvm); out: if (ret) - mvmvif->phy_ctxt = NULL; + mvmvif->deflink.phy_ctxt = NULL; return ret; } + static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); - ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false); + ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, link_conf, ctx, false); mutex_unlock(&mvm->mutex); return ret; } -static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_chanctx_conf *ctx, - bool switching_chanctx) +/* + * This function executes the common part for MLD and non-MLD modes. + * + * Returns if chanctx unassign chanctx is done + * (either on failure or success) + */ +static bool __iwl_mvm_unassign_vif_chanctx_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool switching_chanctx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct ieee80211_vif *disabled_vif = NULL; lockdep_assert_held(&mvm->mutex); - iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); + iwl_mvm_remove_time_event(mvm, mvmvif, + &mvmvif->time_event_data); switch (vif->type) { case NL80211_IFTYPE_ADHOC: - goto out; + return true; case NL80211_IFTYPE_MONITOR: mvmvif->monitor_active = false; mvmvif->ps_disabled = false; - iwl_mvm_rm_snif_sta(mvm, vif); break; case NL80211_IFTYPE_AP: /* This part is triggered only during CSA */ if (!switching_chanctx || !mvmvif->ap_ibss_active) - goto out; + return true; mvmvif->csa_countdown = false; /* Set CS bit on all the stations */ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); /* Save blocked iface, the timeout is set on the next beacon */ rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif); mvmvif->ap_ibss_active = false; break; - case NL80211_IFTYPE_STATION: - if (!switching_chanctx) - break; + default: + break; + } + return false; +} - disabled_vif = vif; +static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx, + bool switching_chanctx) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_vif *disabled_vif = NULL; + + if (__iwl_mvm_unassign_vif_chanctx_common(mvm, vif, switching_chanctx)) + goto out; + + if (vif->type == NL80211_IFTYPE_MONITOR) + iwl_mvm_rm_snif_sta(mvm, vif); + + if (vif->type == NL80211_IFTYPE_STATION && switching_chanctx) { + disabled_vif = vif; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); - break; - default: - break; } iwl_mvm_update_quotas(mvm, false, disabled_vif); iwl_mvm_binding_remove_vif(mvm, vif); out: if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) && switching_chanctx) return; - mvmvif->phy_ctxt = NULL; + mvmvif->deflink.phy_ctxt = NULL; iwl_mvm_power_update_mac(mvm); } static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); - __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false); + __iwl_mvm_unassign_vif_chanctx(mvm, vif, link_conf, ctx, false); mutex_unlock(&mvm->mutex); } static int iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, - struct ieee80211_vif_chanctx_switch *vifs) + struct ieee80211_vif_chanctx_switch *vifs, + const struct iwl_mvm_switch_vif_chanctx_ops *ops) { int ret; mutex_lock(&mvm->mutex); - __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); + ops->__unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, + vifs[0].old_ctx, true); __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx); if (ret) { IWL_ERR(mvm, "failed to add new_ctx during channel switch\n"); goto out_reassign; } - ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, - true); + ret = ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, + vifs[0].new_ctx, true); if (ret) { IWL_ERR(mvm, "failed to assign new_ctx during channel switch\n"); goto out_remove; } /* we don't support TDLS during DCM - can be caused by channel switch */ if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); goto out; out_remove: __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); out_reassign: if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) { IWL_ERR(mvm, "failed to add old_ctx back after failure.\n"); goto out_restart; } - if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, - true)) { + if (ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, + vifs[0].old_ctx, true)) { IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); goto out_restart; } goto out; out_restart: /* things keep failing, better restart the hw */ iwl_mvm_nic_restart(mvm, false); out: mutex_unlock(&mvm->mutex); return ret; } static int iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, - struct ieee80211_vif_chanctx_switch *vifs) + struct ieee80211_vif_chanctx_switch *vifs, + const struct iwl_mvm_switch_vif_chanctx_ops *ops) { int ret; mutex_lock(&mvm->mutex); - __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); + ops->__unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, + vifs[0].old_ctx, true); - ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, - true); + ret = ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, + vifs[0].new_ctx, true); if (ret) { IWL_ERR(mvm, "failed to assign new_ctx during channel switch\n"); goto out_reassign; } goto out; out_reassign: - if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, - true)) { + if (ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, + vifs[0].old_ctx, true)) { IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); goto out_restart; } goto out; out_restart: /* things keep failing, better restart the hw */ iwl_mvm_nic_restart(mvm, false); out: mutex_unlock(&mvm->mutex); return ret; } -static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, - struct ieee80211_vif_chanctx_switch *vifs, - int n_vifs, - enum ieee80211_chanctx_switch_mode mode) +/* Execute the common part for both MLD and non-MLD modes */ +int +iwl_mvm_switch_vif_chanctx_common(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode, + const struct iwl_mvm_switch_vif_chanctx_ops *ops) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; /* we only support a single-vif right now */ if (n_vifs > 1) return -EOPNOTSUPP; switch (mode) { case CHANCTX_SWMODE_SWAP_CONTEXTS: - ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs); + ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs, ops); break; case CHANCTX_SWMODE_REASSIGN_VIF: - ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs); + ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs, ops); break; default: ret = -EOPNOTSUPP; break; } return ret; } -static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) +static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode) +{ + static const struct iwl_mvm_switch_vif_chanctx_ops ops = { + .__assign_vif_chanctx = __iwl_mvm_assign_vif_chanctx, + .__unassign_vif_chanctx = __iwl_mvm_unassign_vif_chanctx, + }; + + return iwl_mvm_switch_vif_chanctx_common(hw, vifs, n_vifs, mode, &ops); +} + +int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); return mvm->ibss_manager; } -static int iwl_mvm_set_tim(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - bool set) +int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + bool set) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); if (!mvm_sta || !mvm_sta->vif) { IWL_ERR(mvm, "Station is not associated to a vif\n"); return -EINVAL; } - return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); + return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif, + &mvm_sta->vif->bss_conf); } #ifdef CONFIG_NL80211_TESTMODE static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = { [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 }, [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 }, [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 }, }; static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, void *data, int len) { struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1]; int err; u32 noa_duration; err = nla_parse_deprecated(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy, NULL); if (err) return err; if (!tb[IWL_MVM_TM_ATTR_CMD]) return -EINVAL; switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) { case IWL_MVM_TM_CMD_SET_NOA: if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p || !vif->bss_conf.enable_beacon || !tb[IWL_MVM_TM_ATTR_NOA_DURATION]) return -EINVAL; noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]); if (noa_duration >= vif->bss_conf.beacon_int) return -EINVAL; mvm->noa_duration = noa_duration; mvm->noa_vif = vif; return iwl_mvm_update_quotas(mvm, true, NULL); case IWL_MVM_TM_CMD_SET_BEACON_FILTER: /* must be associated client vif - ignore authorized */ if (!vif || vif->type != NL80211_IFTYPE_STATION || - !vif->bss_conf.assoc || !vif->bss_conf.dtim_period || + !vif->cfg.assoc || !vif->bss_conf.dtim_period || !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]) return -EINVAL; if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) return iwl_mvm_enable_beacon_filter(mvm, vif, 0); return iwl_mvm_disable_beacon_filter(mvm, vif, 0); } return -EOPNOTSUPP; } -static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - void *data, int len) +int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void *data, int len) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int err; mutex_lock(&mvm->mutex); err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); mutex_unlock(&mvm->mutex); return err; } #endif -static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_channel_switch *chsw) +void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) { /* By implementing this operation, we prevent mac80211 from * starting its own channel switch timer, so that we can call * ieee80211_chswitch_done() ourselves at the right time * (which is when the absence time event starts). */ IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "dummy channel switch op\n"); } static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_chan_switch_te_cmd cmd = { .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .tsf = cpu_to_le32(chsw->timestamp), .cs_count = chsw->count, .cs_mode = chsw->block_tx, }; lockdep_assert_held(&mvm->mutex); if (chsw->delay) cmd.cs_delayed_bcn_count = DIV_ROUND_UP(chsw->delay, vif->bss_conf.beacon_int); return iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd); } static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u32 apply_time; /* Schedule the time event to a bit before beacon 1, * to make sure we're in the new channel when the * GO/AP arrives. In case count <= 1 immediately schedule the * TE (this might result with some packet loss or connection * loss). */ if (chsw->count <= 1) apply_time = 0; else apply_time = chsw->device_timestamp + ((vif->bss_conf.beacon_int * (chsw->count - 1) - IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); if (chsw->block_tx) iwl_mvm_csa_client_absent(mvm, vif); if (mvmvif->bf_data.bf_enabled) { int ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); if (ret) return ret; } iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, apply_time); return 0; } #define IWL_MAX_CSA_BLOCK_TX 1500 -static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_channel_switch *chsw) +int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_vif *csa_vif; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; mutex_lock(&mvm->mutex); mvmvif->csa_failed = false; IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", chsw->chandef.center_freq1); iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_CHANNEL_SWITCH); switch (vif->type) { case NL80211_IFTYPE_AP: csa_vif = rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex)); - if (WARN_ONCE(csa_vif && csa_vif->csa_active, + if (WARN_ONCE(csa_vif && csa_vif->bss_conf.csa_active, "Another CSA is already in progress")) { ret = -EBUSY; goto out_unlock; } /* we still didn't unblock tx. prevent new CS meanwhile */ if (rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex))) { ret = -EBUSY; goto out_unlock; } rcu_assign_pointer(mvm->csa_vif, vif); if (WARN_ONCE(mvmvif->csa_countdown, "Previous CSA countdown didn't complete")) { ret = -EBUSY; goto out_unlock; } mvmvif->csa_target_freq = chsw->chandef.chan->center_freq; break; case NL80211_IFTYPE_STATION: /* * In the new flow FW is in charge of timing the switch so there * is no need for all of this */ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, 0)) break; /* * We haven't configured the firmware to be associated yet since * we don't know the dtim period. In this case, the firmware can't * track the beacons. */ - if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period) { + if (!vif->cfg.assoc || !vif->bss_conf.dtim_period) { ret = -EBUSY; goto out_unlock; } if (chsw->delay > IWL_MAX_CSA_BLOCK_TX) schedule_delayed_work(&mvmvif->csa_work, 0); if (chsw->block_tx) { /* * In case of undetermined / long time with immediate * quiet monitor status to gracefully disconnect */ if (!chsw->count || chsw->count * vif->bss_conf.beacon_int > IWL_MAX_CSA_BLOCK_TX) schedule_delayed_work(&mvmvif->csa_work, msecs_to_jiffies(IWL_MAX_CSA_BLOCK_TX)); } if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { ret = iwl_mvm_old_pre_chan_sw_sta(mvm, vif, chsw); if (ret) goto out_unlock; } else { iwl_mvm_schedule_client_csa(mvm, vif, chsw); } mvmvif->csa_count = chsw->count; mvmvif->csa_misbehave = false; break; default: break; } mvmvif->ps_disabled = true; ret = iwl_mvm_power_update_ps(mvm); if (ret) goto out_unlock; /* we won't be on this channel any longer */ iwl_mvm_teardown_tdls_peers(mvm); out_unlock: mutex_unlock(&mvm->mutex); return ret; } -static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_channel_switch *chsw) +void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_chan_switch_te_cmd cmd = { .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY), .tsf = cpu_to_le32(chsw->timestamp), .cs_count = chsw->count, .cs_mode = chsw->block_tx, }; /* * In the new flow FW is in charge of timing the switch so there is no * need for all of this */ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, 0)) return; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY)) return; IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d count = %d (old %d) mode = %d\n", mvmvif->id, chsw->count, mvmvif->csa_count, chsw->block_tx); if (chsw->count >= mvmvif->csa_count && chsw->block_tx) { if (mvmvif->csa_misbehave) { /* Second time, give up on this AP*/ iwl_mvm_abort_channel_switch(hw, vif); ieee80211_chswitch_done(vif, false); mvmvif->csa_misbehave = false; return; } mvmvif->csa_misbehave = true; } mvmvif->csa_count = chsw->count; mutex_lock(&mvm->mutex); if (mvmvif->csa_failed) goto out_unlock; WARN_ON(iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd)); out_unlock: mutex_unlock(&mvm->mutex); } static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) { int i; if (!iwl_mvm_has_new_tx_api(mvm)) { if (drop) { mutex_lock(&mvm->mutex); iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm) & queues); mutex_unlock(&mvm->mutex); } else { iwl_trans_wait_tx_queues_empty(mvm->trans, queues); } return; } mutex_lock(&mvm->mutex); for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { struct ieee80211_sta *sta; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) continue; if (drop) iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF); else iwl_mvm_wait_sta_queues_empty(mvm, iwl_mvm_sta_from_mac80211(sta)); } mutex_unlock(&mvm->mutex); } -static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u32 queues, bool drop) +void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif; struct iwl_mvm_sta *mvmsta; struct ieee80211_sta *sta; + bool ap_sta_done = false; int i; u32 msk = 0; if (!vif) { iwl_mvm_flush_no_vif(mvm, queues, drop); return; } if (vif->type != NL80211_IFTYPE_STATION) return; /* Make sure we're done with the deferred traffic before flushing */ flush_work(&mvm->add_stream_wk); mutex_lock(&mvm->mutex); mvmvif = iwl_mvm_vif_from_mac80211(vif); /* flush the AP-station and all TDLS peers */ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) continue; mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->vif != vif) continue; + if (sta == mvmvif->ap_sta) { + if (ap_sta_done) + continue; + ap_sta_done = true; + } + /* make sure only TDLS peers or the AP are flushed */ - WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); + WARN_ON_ONCE(sta != mvmvif->ap_sta && !sta->tdls); if (drop) { if (iwl_mvm_flush_sta(mvm, mvmsta, false)) IWL_ERR(mvm, "flush request fail\n"); } else { - msk |= mvmsta->tfd_queue_msk; if (iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_wait_sta_queues_empty(mvm, mvmsta); + else /* only used for !iwl_mvm_has_new_tx_api() below */ + msk |= mvmsta->tfd_queue_msk; } } mutex_unlock(&mvm->mutex); /* this can take a while, and we may need/want other operations * to succeed while doing this, so do it without the mutex held */ if (!drop && !iwl_mvm_has_new_tx_api(mvm)) iwl_trans_wait_tx_queues_empty(mvm->trans, msk); } -static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, - struct survey_info *survey) +void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int i; + + mutex_lock(&mvm->mutex); + for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { + struct iwl_mvm_sta *mvmsta; + struct ieee80211_sta *tmp; + + tmp = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (tmp != sta) + continue; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + if (iwl_mvm_flush_sta(mvm, mvmsta, false)) + IWL_ERR(mvm, "flush request fail\n"); + } + mutex_unlock(&mvm->mutex); +} + +int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; memset(survey, 0, sizeof(*survey)); /* only support global statistics right now */ if (idx != 0) return -ENOENT; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) return -ENOENT; mutex_lock(&mvm->mutex); if (iwl_mvm_firmware_running(mvm)) { ret = iwl_mvm_request_statistics(mvm, false); if (ret) goto out; } survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_TX | SURVEY_INFO_TIME_SCAN; survey->time = mvm->accu_radio_stats.on_time_rf + mvm->radio_stats.on_time_rf; do_div(survey->time, USEC_PER_MSEC); survey->time_rx = mvm->accu_radio_stats.rx_time + mvm->radio_stats.rx_time; do_div(survey->time_rx, USEC_PER_MSEC); survey->time_tx = mvm->accu_radio_stats.tx_time + mvm->radio_stats.tx_time; do_div(survey->time_tx, USEC_PER_MSEC); survey->time_scan = mvm->accu_radio_stats.on_time_scan + mvm->radio_stats.on_time_scan; do_div(survey->time_scan, USEC_PER_MSEC); ret = 0; out: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) { u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; + u32 gi_ltf; switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: rinfo->bw = RATE_INFO_BW_20; break; case RATE_MCS_CHAN_WIDTH_40: rinfo->bw = RATE_INFO_BW_40; break; case RATE_MCS_CHAN_WIDTH_80: rinfo->bw = RATE_INFO_BW_80; break; case RATE_MCS_CHAN_WIDTH_160: rinfo->bw = RATE_INFO_BW_160; break; + case RATE_MCS_CHAN_WIDTH_320: + rinfo->bw = RATE_INFO_BW_320; + break; } if (format == RATE_MCS_CCK_MSK || format == RATE_MCS_LEGACY_OFDM_MSK) { int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK); /* add the offset needed to get to the legacy ofdm indices */ if (format == RATE_MCS_LEGACY_OFDM_MSK) rate += IWL_FIRST_OFDM_RATE; switch (rate) { case IWL_RATE_1M_INDEX: rinfo->legacy = 10; break; case IWL_RATE_2M_INDEX: rinfo->legacy = 20; break; case IWL_RATE_5M_INDEX: rinfo->legacy = 55; break; case IWL_RATE_11M_INDEX: rinfo->legacy = 110; break; case IWL_RATE_6M_INDEX: rinfo->legacy = 60; break; case IWL_RATE_9M_INDEX: rinfo->legacy = 90; break; case IWL_RATE_12M_INDEX: rinfo->legacy = 120; break; case IWL_RATE_18M_INDEX: rinfo->legacy = 180; break; case IWL_RATE_24M_INDEX: rinfo->legacy = 240; break; case IWL_RATE_36M_INDEX: rinfo->legacy = 360; break; case IWL_RATE_48M_INDEX: rinfo->legacy = 480; break; case IWL_RATE_54M_INDEX: rinfo->legacy = 540; } return; } rinfo->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1; rinfo->mcs = format == RATE_MCS_HT_MSK ? RATE_HT_MCS_INDEX(rate_n_flags) : u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK); - if (format == RATE_MCS_HE_MSK) { - u32 gi_ltf = u32_get_bits(rate_n_flags, - RATE_MCS_HE_GI_LTF_MSK); + if (rate_n_flags & RATE_MCS_SGI_MSK) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + + switch (format) { + case RATE_MCS_EHT_MSK: + /* TODO: GI/LTF/RU. How does the firmware encode them? */ + rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS; + break; + case RATE_MCS_HE_MSK: + gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK); rinfo->flags |= RATE_INFO_FLAGS_HE_MCS; if (rate_n_flags & RATE_MCS_HE_106T_MSK) { rinfo->bw = RATE_INFO_BW_HE_RU; rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106; } switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) { case RATE_MCS_HE_TYPE_SU: case RATE_MCS_HE_TYPE_EXT_SU: if (gi_ltf == 0 || gi_ltf == 1) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; else if (gi_ltf == 2) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else if (gi_ltf == 3) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; else rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; break; case RATE_MCS_HE_TYPE_MU: if (gi_ltf == 0 || gi_ltf == 1) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; else if (gi_ltf == 2) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; break; case RATE_MCS_HE_TYPE_TRIG: if (gi_ltf == 0 || gi_ltf == 1) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; break; } if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK) rinfo->he_dcm = 1; - return; - } - - if (rate_n_flags & RATE_MCS_SGI_MSK) - rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; - - if (format == RATE_MCS_HT_MSK) { + break; + case RATE_MCS_HT_MSK: rinfo->flags |= RATE_INFO_FLAGS_MCS; - - } else if (format == RATE_MCS_VHT_MSK) { + break; + case RATE_MCS_VHT_MSK: rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; + break; } - } -static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct station_info *sinfo) +void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (mvmsta->avg_energy) { - sinfo->signal_avg = -(s8)mvmsta->avg_energy; + if (mvmsta->deflink.avg_energy) { + sinfo->signal_avg = -(s8)mvmsta->deflink.avg_energy; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } if (iwl_mvm_has_tlc_offload(mvm)) { - struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; + struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->deflink.lq_sta.rs_fw; iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } /* if beacon filtering isn't on mac80211 does it anyway */ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) return; - if (!vif->bss_conf.assoc) + if (!vif->cfg.assoc) return; mutex_lock(&mvm->mutex); - if (mvmvif->ap_sta_id != mvmsta->sta_id) + if (mvmvif->deflink.ap_sta_id != mvmsta->deflink.sta_id) goto unlock; if (iwl_mvm_request_statistics(mvm, false)) goto unlock; - sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + - mvmvif->beacon_stats.accu_num_beacons; + sinfo->rx_beacon = mvmvif->deflink.beacon_stats.num_beacons + + mvmvif->deflink.beacon_stats.accu_num_beacons; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); - if (mvmvif->beacon_stats.avg_signal) { + if (mvmvif->deflink.beacon_stats.avg_signal) { /* firmware only reports a value after RXing a few beacons */ - sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; + sinfo->rx_beacon_signal_avg = + mvmvif->deflink.beacon_stats.avg_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); } unlock: mutex_unlock(&mvm->mutex); } static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_mlme_event *mlme) { if ((mlme->data == ASSOC_EVENT || mlme->data == AUTH_EVENT) && (mlme->status == MLME_DENIED || mlme->status == MLME_TIMEOUT)) { iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_ASSOC_FAILED, NULL); return; } if (mlme->data == DEAUTH_RX_EVENT || mlme->data == DEAUTH_TX_EVENT) { iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_DEASSOC, NULL); return; } } static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_event *event) { #define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ do { \ if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ break; \ iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \ } while (0) struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; if (iwl_trans_dbg_ini_valid(mvm->trans)) { iwl_mvm_event_mlme_callback_ini(mvm, vif, &event->u.mlme); return; } trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MLME); if (!trig) return; trig_mlme = (void *)trig->data; if (event->u.mlme.data == ASSOC_EVENT) { if (event->u.mlme.status == MLME_DENIED) CHECK_MLME_TRIGGER(stop_assoc_denied, "DENIED ASSOC: reason %d", event->u.mlme.reason); else if (event->u.mlme.status == MLME_TIMEOUT) CHECK_MLME_TRIGGER(stop_assoc_timeout, "ASSOC TIMEOUT"); } else if (event->u.mlme.data == AUTH_EVENT) { if (event->u.mlme.status == MLME_DENIED) CHECK_MLME_TRIGGER(stop_auth_denied, "DENIED AUTH: reason %d", event->u.mlme.reason); else if (event->u.mlme.status == MLME_TIMEOUT) CHECK_MLME_TRIGGER(stop_auth_timeout, "AUTH TIMEOUT"); } else if (event->u.mlme.data == DEAUTH_RX_EVENT) { CHECK_MLME_TRIGGER(stop_rx_deauth, "DEAUTH RX %d", event->u.mlme.reason); } else if (event->u.mlme.data == DEAUTH_TX_EVENT) { CHECK_MLME_TRIGGER(stop_tx_deauth, "DEAUTH TX %d", event->u.mlme.reason); } #undef CHECK_MLME_TRIGGER } static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_event *event) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_BA); if (!trig) return; ba_trig = (void *)trig->data; if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "BAR received from %pM, tid %d, ssn %d", event->u.ba.sta->addr, event->u.ba.tid, event->u.ba.ssn); } -static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - const struct ieee80211_event *event) +void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const struct ieee80211_event *event) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); switch (event->type) { case MLME_EVENT: iwl_mvm_event_mlme_callback(mvm, vif, event); break; case BAR_RX_EVENT: iwl_mvm_event_bar_rx_callback(mvm, vif, event); break; case BA_FRAME_TIMEOUT: iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta, event->u.ba.tid); break; default: break; } } void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, enum iwl_mvm_rxq_notif_type type, bool sync, const void *data, u32 size) { struct { struct iwl_rxq_sync_cmd cmd; struct iwl_mvm_internal_rxq_notif notif; } __packed cmd = { .cmd.rxq_mask = cpu_to_le32(BIT(mvm->trans->num_rx_queues) - 1), .cmd.count = cpu_to_le32(sizeof(struct iwl_mvm_internal_rxq_notif) + size), .notif.type = type, .notif.sync = sync, }; struct iwl_host_cmd hcmd = { .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD), .data[0] = &cmd, .len[0] = sizeof(cmd), .data[1] = data, .len[1] = size, .flags = sync ? 0 : CMD_ASYNC, }; int ret; /* size must be a multiple of DWORD */ if (WARN_ON(cmd.cmd.count & cpu_to_le32(3))) return; if (!iwl_mvm_has_new_rx_api(mvm)) return; if (sync) { cmd.notif.cookie = mvm->queue_sync_cookie; mvm->queue_sync_state = (1 << mvm->trans->num_rx_queues) - 1; } ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) { IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); goto out; } if (sync) { lockdep_assert_held(&mvm->mutex); ret = wait_event_timeout(mvm->rx_sync_waitq, READ_ONCE(mvm->queue_sync_state) == 0 || iwl_mvm_is_radio_killed(mvm), HZ); WARN_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm), "queue sync: failed to sync, state is 0x%lx\n", mvm->queue_sync_state); } out: if (sync) { mvm->queue_sync_state = 0; mvm->queue_sync_cookie++; } } -static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) +void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0); mutex_unlock(&mvm->mutex); } -static int +int iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_ftm_responder_stats *stats) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->p2p || vif->type != NL80211_IFTYPE_AP || !mvmvif->ap_ibss_active || !vif->bss_conf.ftm_responder) return -EINVAL; mutex_lock(&mvm->mutex); *stats = mvm->ftm_resp_stats; mutex_unlock(&mvm->mutex); stats->filled = BIT(NL80211_FTM_STATS_SUCCESS_NUM) | BIT(NL80211_FTM_STATS_PARTIAL_NUM) | BIT(NL80211_FTM_STATS_FAILED_NUM) | BIT(NL80211_FTM_STATS_ASAP_NUM) | BIT(NL80211_FTM_STATS_NON_ASAP_NUM) | BIT(NL80211_FTM_STATS_TOTAL_DURATION_MSEC) | BIT(NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM) | BIT(NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM) | BIT(NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM); return 0; } -static int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_pmsr_request *request) +int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *request) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = iwl_mvm_ftm_start(mvm, vif, request); mutex_unlock(&mvm->mutex); return ret; } -static void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_pmsr_request *request) +void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *request) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); iwl_mvm_ftm_abort(mvm, request); mutex_unlock(&mvm->mutex); } static bool iwl_mvm_can_hw_csum(struct sk_buff *skb) { u8 protocol = ip_hdr(skb)->protocol; if (!IS_ENABLED(CONFIG_INET)) return false; return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP; } static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, struct sk_buff *head, struct sk_buff *skb) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) - return iwl_mvm_tx_csum_bz(mvm, head, true) == - iwl_mvm_tx_csum_bz(mvm, skb, true); - /* For now don't aggregate IPv6 in AMSDU */ if (skb->protocol != htons(ETH_P_IP)) return false; if (!iwl_mvm_is_csum_supported(mvm)) return true; return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head); } +int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_set_hw_timestamp *hwts) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + u32 protocols = 0; + int ret; + + /* HW timestamping is only supported for a specific station */ + if (!hwts->macaddr) + return -EOPNOTSUPP; + + if (hwts->enable) + protocols = + IWL_TIME_SYNC_PROTOCOL_TM | IWL_TIME_SYNC_PROTOCOL_FTM; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_time_sync_config(mvm, hwts->macaddr, protocols); + mutex_unlock(&mvm->mutex); + + return ret; +} + const struct ieee80211_ops iwl_mvm_hw_ops = { .tx = iwl_mvm_mac_tx, .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, .ampdu_action = iwl_mvm_mac_ampdu_action, .get_antenna = iwl_mvm_op_get_antenna, .start = iwl_mvm_mac_start, .reconfig_complete = iwl_mvm_mac_reconfig_complete, .stop = iwl_mvm_mac_stop, .add_interface = iwl_mvm_mac_add_interface, .remove_interface = iwl_mvm_mac_remove_interface, .config = iwl_mvm_mac_config, .prepare_multicast = iwl_mvm_prepare_multicast, .configure_filter = iwl_mvm_configure_filter, .config_iface_filter = iwl_mvm_config_iface_filter, .bss_info_changed = iwl_mvm_bss_info_changed, .hw_scan = iwl_mvm_mac_hw_scan, .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove, .sta_state = iwl_mvm_mac_sta_state, .sta_notify = iwl_mvm_mac_sta_notify, .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, .sta_rc_update = iwl_mvm_sta_rc_update, .conf_tx = iwl_mvm_mac_conf_tx, .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, .mgd_complete_tx = iwl_mvm_mac_mgd_complete_tx, .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, .flush = iwl_mvm_mac_flush, + .flush_sta = iwl_mvm_mac_flush_sta, .sched_scan_start = iwl_mvm_mac_sched_scan_start, .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, .set_key = iwl_mvm_mac_set_key, .update_tkip_key = iwl_mvm_mac_update_tkip_key, .remain_on_channel = iwl_mvm_roc, .cancel_remain_on_channel = iwl_mvm_cancel_roc, .add_chanctx = iwl_mvm_add_chanctx, .remove_chanctx = iwl_mvm_remove_chanctx, .change_chanctx = iwl_mvm_change_chanctx, .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx, .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx, .start_ap = iwl_mvm_start_ap, .stop_ap = iwl_mvm_stop_ap, .join_ibss = iwl_mvm_start_ibss, .leave_ibss = iwl_mvm_stop_ibss, .tx_last_beacon = iwl_mvm_tx_last_beacon, .set_tim = iwl_mvm_set_tim, .channel_switch = iwl_mvm_channel_switch, .pre_channel_switch = iwl_mvm_pre_channel_switch, .post_channel_switch = iwl_mvm_post_channel_switch, .abort_channel_switch = iwl_mvm_abort_channel_switch, .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, .tdls_channel_switch = iwl_mvm_tdls_channel_switch, .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, .event_callback = iwl_mvm_mac_event_callback, .sync_rx_queues = iwl_mvm_sync_rx_queues, CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) #ifdef CONFIG_PM_SLEEP /* look at d3.c */ .suspend = iwl_mvm_suspend, .resume = iwl_mvm_resume, .set_wakeup = iwl_mvm_set_wakeup, .set_rekey_data = iwl_mvm_set_rekey_data, #if IS_ENABLED(CONFIG_IPV6) .ipv6_addr_change = iwl_mvm_ipv6_addr_change, #endif .set_default_unicast_key = iwl_mvm_set_default_unicast_key, #endif .get_survey = iwl_mvm_mac_get_survey, .sta_statistics = iwl_mvm_mac_sta_statistics, .get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats, .start_pmsr = iwl_mvm_start_pmsr, .abort_pmsr = iwl_mvm_abort_pmsr, .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate, #ifdef CONFIG_IWLWIFI_DEBUGFS - .sta_add_debugfs = iwl_mvm_sta_add_debugfs, + .link_sta_add_debugfs = iwl_mvm_link_sta_add_debugfs, #endif + .set_hw_timestamp = iwl_mvm_set_hw_timestamp, }; diff --git a/sys/contrib/dev/iwlwifi/mvm/mld-key.c b/sys/contrib/dev/iwlwifi/mvm/mld-key.c new file mode 100644 index 000000000000..2c9f2f71b083 --- /dev/null +++ b/sys/contrib/dev/iwlwifi/mvm/mld-key.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2022 - 2023 Intel Corporation + */ +#include +#include +#include "mvm.h" +#include "fw/api/context.h" +#include "fw/api/datapath.h" + +static u32 iwl_mvm_get_sec_sta_mask(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *link_info = &mvmvif->deflink; + + lockdep_assert_held(&mvm->mutex); + + if (keyconf->link_id >= 0) { + link_info = mvmvif->link[keyconf->link_id]; + if (!link_info) + return 0; + } + + /* AP group keys are per link and should be on the mcast STA */ + if (vif->type == NL80211_IFTYPE_AP && + !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return BIT(link_info->mcast_sta.sta_id); + + /* for client mode use the AP STA also for group keys */ + if (!sta && vif->type == NL80211_IFTYPE_STATION) + sta = mvmvif->ap_sta; + + /* During remove the STA was removed and the group keys come later + * (which sounds like a bad sequence, but remember that to mac80211 the + * group keys have no sta pointer), so we don't have a STA now. + * Since this happens for group keys only, just use the link_info as + * the group keys are per link; make sure that is the case by checking + * we do have a link_id or are not doing MLO. + * Of course the same can be done during add as well, but we must do + * it during remove, since we don't have the mvmvif->ap_sta pointer. + */ + if (!sta && (keyconf->link_id >= 0 || !ieee80211_vif_is_mld(vif))) + return BIT(link_info->ap_sta_id); + + /* STA should be non-NULL now, but iwl_mvm_sta_fw_id_mask() checks */ + + /* pass link_id to filter by it if not -1 (GTK on client) */ + return iwl_mvm_sta_fw_id_mask(mvm, sta, keyconf->link_id); +} + +u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 flags = 0; + + lockdep_assert_held(&mvm->mutex); + + if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + flags |= IWL_SEC_KEY_FLAG_MCAST_KEY; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_WEP104: + flags |= IWL_SEC_KEY_FLAG_KEY_SIZE; + fallthrough; + case WLAN_CIPHER_SUITE_WEP40: + flags |= IWL_SEC_KEY_FLAG_CIPHER_WEP; + break; + case WLAN_CIPHER_SUITE_TKIP: + flags |= IWL_SEC_KEY_FLAG_CIPHER_TKIP; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_CCMP: + flags |= IWL_SEC_KEY_FLAG_CIPHER_CCMP; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + flags |= IWL_SEC_KEY_FLAG_KEY_SIZE; + fallthrough; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + flags |= IWL_SEC_KEY_FLAG_CIPHER_GCMP; + break; + } + + if (!sta && vif->type == NL80211_IFTYPE_STATION) + sta = mvmvif->ap_sta; + + if (!IS_ERR_OR_NULL(sta) && sta->mfp) + flags |= IWL_SEC_KEY_FLAG_MFP; + + return flags; +} + +struct iwl_mvm_sta_key_update_data { + struct ieee80211_sta *sta; + u32 old_sta_mask; + u32 new_sta_mask; + int err; +}; + +static void iwl_mvm_mld_update_sta_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD); + struct iwl_mvm_sta_key_update_data *data = _data; + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_sec_key_cmd cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY), + .u.modify.old_sta_mask = cpu_to_le32(data->old_sta_mask), + .u.modify.new_sta_mask = cpu_to_le32(data->new_sta_mask), + .u.modify.key_id = cpu_to_le32(key->keyidx), + .u.modify.key_flags = + cpu_to_le32(iwl_mvm_get_sec_flags(mvm, vif, sta, key)), + }; + int err; + + /* only need to do this for pairwise keys (link_id == -1) */ + if (sta != data->sta || key->link_id >= 0) + return; + + err = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, sizeof(cmd), &cmd); + + if (err) + data->err = err; +} + +int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 old_sta_mask, + u32 new_sta_mask) +{ + struct iwl_mvm_sta_key_update_data data = { + .sta = sta, + .old_sta_mask = old_sta_mask, + .new_sta_mask = new_sta_mask, + }; + + ieee80211_iter_keys_rcu(mvm->hw, vif, iwl_mvm_mld_update_sta_key, + &data); + return data.err; +} + +static int __iwl_mvm_sec_key_del(struct iwl_mvm *mvm, u32 sta_mask, + u32 key_flags, u32 keyidx, u32 flags) +{ + u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD); + struct iwl_sec_key_cmd cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), + .u.remove.sta_mask = cpu_to_le32(sta_mask), + .u.remove.key_id = cpu_to_le32(keyidx), + .u.remove.key_flags = cpu_to_le32(key_flags), + }; + + return iwl_mvm_send_cmd_pdu(mvm, cmd_id, flags, sizeof(cmd), &cmd); +} + +int iwl_mvm_mld_send_key(struct iwl_mvm *mvm, u32 sta_mask, u32 key_flags, + struct ieee80211_key_conf *keyconf) +{ + u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD); + struct iwl_sec_key_cmd cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_ADD), + .u.add.sta_mask = cpu_to_le32(sta_mask), + .u.add.key_id = cpu_to_le32(keyconf->keyidx), + .u.add.key_flags = cpu_to_le32(key_flags), + .u.add.tx_seq = cpu_to_le64(atomic64_read(&keyconf->tx_pn)), + }; + int max_key_len = sizeof(cmd.u.add.key); + int ret; + + if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || + keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) + max_key_len -= IWL_SEC_WEP_KEY_OFFSET; + + if (WARN_ON(keyconf->keylen > max_key_len)) + return -EINVAL; + + if (WARN_ON(!sta_mask)) + return -EINVAL; + + if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || + keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) + memcpy(cmd.u.add.key + IWL_SEC_WEP_KEY_OFFSET, keyconf->key, + keyconf->keylen); + else + memcpy(cmd.u.add.key, keyconf->key, keyconf->keylen); + + if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) { + memcpy(cmd.u.add.tkip_mic_rx_key, + keyconf->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, + 8); + memcpy(cmd.u.add.tkip_mic_tx_key, + keyconf->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, + 8); + } + + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd); + if (ret) + return ret; + + /* + * For WEP, the same key is used for multicast and unicast so need to + * upload it again. If this fails, remove the original as well. + */ + if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || + keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { + cmd.u.add.key_flags ^= cpu_to_le32(IWL_SEC_KEY_FLAG_MCAST_KEY); + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd); + if (ret) + __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags, + keyconf->keyidx, 0); + } + + return ret; +} + +int iwl_mvm_sec_key_add(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf) +{ + u32 sta_mask = iwl_mvm_get_sec_sta_mask(mvm, vif, sta, keyconf); + u32 key_flags = iwl_mvm_get_sec_flags(mvm, vif, sta, keyconf); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *mvm_link = NULL; + int ret; + + if (keyconf->keyidx == 4 || keyconf->keyidx == 5) { + unsigned int link_id = 0; + + /* set to -1 for non-MLO right now */ + if (keyconf->link_id >= 0) + link_id = keyconf->link_id; + + mvm_link = mvmvif->link[link_id]; + if (WARN_ON(!mvm_link)) + return -EINVAL; + + if (mvm_link->igtk) { + IWL_DEBUG_MAC80211(mvm, "remove old IGTK %d\n", + mvm_link->igtk->keyidx); + ret = iwl_mvm_sec_key_del(mvm, vif, sta, + mvm_link->igtk); + if (ret) + IWL_ERR(mvm, + "failed to remove old IGTK (ret=%d)\n", + ret); + } + + WARN_ON(mvm_link->igtk); + } + + ret = iwl_mvm_mld_send_key(mvm, sta_mask, key_flags, keyconf); + if (ret) + return ret; + + if (mvm_link) + mvm_link->igtk = keyconf; + + /* We don't really need this, but need it to be not invalid, + * and if we switch links multiple times it might go to be + * invalid when removed. + */ + keyconf->hw_key_idx = 0; + + return 0; +} + +static int _iwl_mvm_sec_key_del(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf, + u32 flags) +{ + u32 sta_mask = iwl_mvm_get_sec_sta_mask(mvm, vif, sta, keyconf); + u32 key_flags = iwl_mvm_get_sec_flags(mvm, vif, sta, keyconf); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + if (WARN_ON(!sta_mask)) + return -EINVAL; + + if (keyconf->keyidx == 4 || keyconf->keyidx == 5) { + struct iwl_mvm_vif_link_info *mvm_link; + unsigned int link_id = 0; + + /* set to -1 for non-MLO right now */ + if (keyconf->link_id >= 0) + link_id = keyconf->link_id; + + mvm_link = mvmvif->link[link_id]; + if (WARN_ON(!mvm_link)) + return -EINVAL; + + if (mvm_link->igtk == keyconf) { + /* no longer in HW - mark for later */ + mvm_link->igtk->hw_key_idx = STA_KEY_IDX_INVALID; + mvm_link->igtk = NULL; + } + } + + ret = __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags, keyconf->keyidx, + flags); + if (ret) + return ret; + + /* For WEP, delete the key again as unicast */ + if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || + keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { + key_flags ^= IWL_SEC_KEY_FLAG_MCAST_KEY; + ret = __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags, + keyconf->keyidx, flags); + } + + return ret; +} + +int iwl_mvm_sec_key_del(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf) +{ + return _iwl_mvm_sec_key_del(mvm, vif, sta, keyconf, 0); +} + +static void iwl_mvm_sec_key_remove_ap_iter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + unsigned int link_id = (uintptr_t)data; + + if (key->hw_key_idx == STA_KEY_IDX_INVALID) + return; + + if (sta) + return; + + if (key->link_id >= 0 && key->link_id != link_id) + return; + + _iwl_mvm_sec_key_del(mvm, vif, NULL, key, CMD_ASYNC); + key->hw_key_idx = STA_KEY_IDX_INVALID; +} + +void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_vif_link_info *link, + unsigned int link_id) +{ + u32 sec_key_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD); + u8 sec_key_ver = iwl_fw_lookup_cmd_ver(mvm->fw, sec_key_id, 0); + + if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION || + link->ap_sta_id == IWL_MVM_INVALID_STA)) + return; + + if (!sec_key_ver) + return; + + ieee80211_iter_keys_rcu(mvm->hw, vif, + iwl_mvm_sec_key_remove_ap_iter, + (void *)(uintptr_t)link_id); +} diff --git a/sys/contrib/dev/iwlwifi/mvm/mld-mac.c b/sys/contrib/dev/iwlwifi/mvm/mld-mac.c new file mode 100644 index 000000000000..f313a8d771e4 --- /dev/null +++ b/sys/contrib/dev/iwlwifi/mvm/mld-mac.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2022 - 2023 Intel Corporation + */ +#include "mvm.h" + +static void iwl_mvm_mld_set_he_support(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mac_config_cmd *cmd) +{ + if (vif->type == NL80211_IFTYPE_AP) + cmd->he_ap_support = cpu_to_le16(1); + else + cmd->he_support = cpu_to_le16(1); +} + +static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mac_config_cmd *cmd, + u32 action) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; + + cmd->id_and_color = cpu_to_le32(mvmvif->id); + cmd->action = cpu_to_le32(action); + + cmd->mac_type = cpu_to_le32(iwl_mvm_get_mac_type(vif)); + + memcpy(cmd->local_mld_addr, vif->addr, ETH_ALEN); + + cmd->he_support = 0; + cmd->eht_support = 0; + + /* should be set by specific context type handler */ + cmd->filter_flags = 0; + + cmd->nic_not_ack_enabled = + cpu_to_le32(!iwl_mvm_is_nic_ack_enabled(mvm, vif)); + + if (iwlwifi_mod_params.disable_11ax) + return; + + /* If we have MLO enabled, then the firmware needs to enable + * address translation for the station(s) we add. That depends + * on having EHT enabled in firmware, which in turn depends on + * mac80211 in the code below. + * However, mac80211 doesn't enable HE/EHT until it has parsed + * the association response successfully, so just skip all that + * and enable both when we have MLO. + */ + if (ieee80211_vif_is_mld(vif)) { + iwl_mvm_mld_set_he_support(mvm, vif, cmd); + cmd->eht_support = cpu_to_le32(1); + return; + } + + rcu_read_lock(); + for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++) { + link_conf = rcu_dereference(vif->link_conf[link_id]); + if (!link_conf) + continue; + + if (link_conf->he_support) + iwl_mvm_mld_set_he_support(mvm, vif, cmd); + + /* it's not reasonable to have EHT without HE and FW API doesn't + * support it. Ignore EHT in this case. + */ + if (!link_conf->he_support && link_conf->eht_support) + continue; + + if (link_conf->eht_support) { + cmd->eht_support = cpu_to_le32(1); + break; + } + } + rcu_read_unlock(); +} + +static int iwl_mvm_mld_mac_ctxt_send_cmd(struct iwl_mvm *mvm, + struct iwl_mac_config_cmd *cmd) +{ + int ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, MAC_CONFIG_CMD), + 0, sizeof(*cmd), cmd); + if (ret) + IWL_ERR(mvm, "Failed to send MAC_CONFIG_CMD (action:%d): %d\n", + le32_to_cpu(cmd->action), ret); + return ret; +} + +static int iwl_mvm_mld_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action, bool force_assoc_off) +{ + struct iwl_mac_config_cmd cmd = {}; + u16 esr_transition_timeout; + + WARN_ON(vif->type != NL80211_IFTYPE_STATION); + + /* Fill the common data for all mac context types */ + iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + + /* + * We always want to hear MCAST frames, if we're not authorized yet, + * we'll drop them. + */ + cmd.filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_GRP); + + if (vif->p2p) + cmd.client.ctwin = + iwl_mvm_mac_ctxt_cmd_p2p_sta_get_oppps_ctwin(mvm, vif); + + if (vif->cfg.assoc && !force_assoc_off) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + cmd.client.is_assoc = 1; + + if (!mvmvif->authorized && + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO)) + cmd.client.data_policy |= + cpu_to_le16(COEX_HIGH_PRIORITY_ENABLE); + + } else { + cmd.client.is_assoc = 0; + + /* Allow beacons to pass through as long as we are not + * associated, or we do not have dtim period information. + */ + cmd.filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON); + } + + cmd.client.assoc_id = cpu_to_le16(vif->cfg.aid); + if (ieee80211_vif_is_mld(vif)) { + esr_transition_timeout = + u16_get_bits(vif->cfg.eml_cap, + IEEE80211_EML_CAP_TRANSITION_TIMEOUT); + + cmd.client.esr_transition_timeout = + min_t(u16, IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU, + esr_transition_timeout); + cmd.client.medium_sync_delay = + cpu_to_le16(vif->cfg.eml_med_sync_delay); + } + + if (vif->probe_req_reg && vif->cfg.assoc && vif->p2p) + cmd.filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ); + + if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) + cmd.client.data_policy |= + cpu_to_le16(iwl_mvm_mac_ctxt_cmd_sta_get_twt_policy(mvm, vif)); + + return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mld_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_config_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); + + iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + + cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_PROMISC | + MAC_FILTER_IN_CONTROL_AND_MGMT | + MAC_CFG_FILTER_ACCEPT_BEACON | + MAC_CFG_FILTER_ACCEPT_PROBE_REQ | + MAC_CFG_FILTER_ACCEPT_GRP); + + return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mld_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_config_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_ADHOC); + + iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + + cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON | + MAC_CFG_FILTER_ACCEPT_PROBE_REQ | + MAC_CFG_FILTER_ACCEPT_GRP); + + return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mld_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_config_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE); + + iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + + cmd.p2p_dev.is_disc_extended = + iwl_mac_ctxt_p2p_dev_has_extended_disc(mvm, vif); + + /* Override the filter flags to accept only probe requests */ + cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ); + + return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mld_mac_ctxt_cmd_ap_go(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_config_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_AP); + + /* Fill the common data for all mac context types */ + iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + + iwl_mvm_mac_ctxt_cmd_ap_set_filter_flags(mvm, mvmvif, + &cmd.filter_flags, + MAC_CFG_FILTER_ACCEPT_PROBE_REQ, + MAC_CFG_FILTER_ACCEPT_BEACON); + + return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mld_mac_ctx_send(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action, bool force_assoc_off) +{ + switch (vif->type) { + case NL80211_IFTYPE_STATION: + return iwl_mvm_mld_mac_ctxt_cmd_sta(mvm, vif, action, + force_assoc_off); + case NL80211_IFTYPE_AP: + return iwl_mvm_mld_mac_ctxt_cmd_ap_go(mvm, vif, action); + case NL80211_IFTYPE_MONITOR: + return iwl_mvm_mld_mac_ctxt_cmd_listener(mvm, vif, action); + case NL80211_IFTYPE_P2P_DEVICE: + return iwl_mvm_mld_mac_ctxt_cmd_p2p_device(mvm, vif, action); + case NL80211_IFTYPE_ADHOC: + return iwl_mvm_mld_mac_ctxt_cmd_ibss(mvm, vif, action); + default: + break; + } + + return -EOPNOTSUPP; +} + +int iwl_mvm_mld_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN)) + return -EOPNOTSUPP; + + if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n", + vif->addr, ieee80211_vif_type_p2p(vif))) + return -EIO; + + ret = iwl_mvm_mld_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD, + true); + if (ret) + return ret; + + /* will only do anything at resume from D3 time */ + iwl_mvm_set_last_nonqos_seq(mvm, vif); + + mvmvif->uploaded = true; + return 0; +} + +int iwl_mvm_mld_mac_ctxt_changed(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool force_assoc_off) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN)) + return -EOPNOTSUPP; + + if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n", + vif->addr, ieee80211_vif_type_p2p(vif))) + return -EIO; + + return iwl_mvm_mld_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY, + force_assoc_off); +} + +int iwl_mvm_mld_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_config_cmd cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), + .id_and_color = cpu_to_le32(mvmvif->id), + }; + int ret; + + if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN)) + return -EOPNOTSUPP; + + if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n", + vif->addr, ieee80211_vif_type_p2p(vif))) + return -EIO; + + ret = iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd); + if (ret) + return ret; + + mvmvif->uploaded = false; + + return 0; +} diff --git a/sys/contrib/dev/iwlwifi/mvm/mld-mac80211.c b/sys/contrib/dev/iwlwifi/mvm/mld-mac80211.c new file mode 100644 index 000000000000..8b6c641772ee --- /dev/null +++ b/sys/contrib/dev/iwlwifi/mvm/mld-mac80211.c @@ -0,0 +1,1215 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2022-2023 Intel Corporation + */ +#include "mvm.h" + +static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + mutex_lock(&mvm->mutex); + + mvmvif->mvm = mvm; + + /* Not much to do here. The stack will not allow interface + * types or combinations that we didn't advertise, so we + * don't really have to check the types. + */ + + /* make sure that beacon statistics don't go backwards with FW reset */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + mvmvif->deflink.beacon_stats.accu_num_beacons += + mvmvif->deflink.beacon_stats.num_beacons; + + /* Allocate resources for the MAC context, and add it to the fw */ + ret = iwl_mvm_mac_ctxt_init(mvm, vif); + if (ret) + goto out_unlock; + + rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); + + mvmvif->features |= hw->netdev_features; + + /* reset deflink MLO parameters */ + mvmvif->deflink.fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; + mvmvif->deflink.active = 0; + /* the first link always points to the default one */ + mvmvif->link[0] = &mvmvif->deflink; + + ret = iwl_mvm_mld_mac_ctxt_add(mvm, vif); + if (ret) + goto out_unlock; + + /* beacon filtering */ + ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + if (ret) + goto out_remove_mac; + + if (!mvm->bf_allowed_vif && + vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { + mvm->bf_allowed_vif = mvmvif; + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI; + } + + /* + * P2P_DEVICE interface does not have a channel context assigned to it, + * so a dedicated PHY context is allocated to it and the corresponding + * MAC context is bound to it at this stage. + */ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); + if (!mvmvif->deflink.phy_ctxt) { + ret = -ENOSPC; + goto out_free_bf; + } + + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); + ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); + if (ret) + goto out_unref_phy; + + ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, + LINK_CONTEXT_MODIFY_ACTIVE | + LINK_CONTEXT_MODIFY_RATES_INFO, + true); + if (ret) + goto out_remove_link; + + ret = iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf); + if (ret) + goto out_remove_link; + + /* Save a pointer to p2p device vif, so it can later be used to + * update the p2p device MAC when a GO is started/stopped + */ + mvm->p2p_device_vif = vif; + } else { + ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); + if (ret) + goto out_free_bf; + } + + ret = iwl_mvm_power_update_mac(mvm); + if (ret) + goto out_free_bf; + + iwl_mvm_tcm_add_vif(mvm, vif); + INIT_DELAYED_WORK(&mvmvif->csa_work, + iwl_mvm_channel_switch_disconnect_wk); + + if (vif->type == NL80211_IFTYPE_MONITOR) { + mvm->monitor_on = true; + ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); + } + + iwl_mvm_vif_dbgfs_register(mvm, vif); + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + vif->type == NL80211_IFTYPE_STATION && !vif->p2p && + !mvm->csme_vif && mvm->mei_registered) { + iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr); + iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev); + mvm->csme_vif = vif; + } + + goto out_unlock; + + out_remove_link: + iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); + out_unref_phy: + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); + out_free_bf: + if (mvm->bf_allowed_vif == mvmvif) { + mvm->bf_allowed_vif = NULL; + vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI); + } + out_remove_mac: + mvmvif->deflink.phy_ctxt = NULL; + mvmvif->link[0] = NULL; + iwl_mvm_mld_mac_ctxt_remove(mvm, vif); + out_unlock: + mutex_unlock(&mvm->mutex); + + return ret; +} + +static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_probe_resp_data *probe_data; + + iwl_mvm_prepare_mac_removal(mvm, vif); + + if (!(vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC)) + iwl_mvm_tcm_rm_vif(mvm, vif); + + mutex_lock(&mvm->mutex); + + if (vif == mvm->csme_vif) { + iwl_mei_set_netdev(NULL); + mvm->csme_vif = NULL; + } + + if (mvm->bf_allowed_vif == mvmvif) { + mvm->bf_allowed_vif = NULL; + vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI); + } + + if (vif->bss_conf.ftm_responder) + memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); + + iwl_mvm_vif_dbgfs_clean(mvm, vif); + + /* For AP/GO interface, the tear down of the resources allocated to the + * interface is be handled as part of the stop_ap flow. + */ + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) { +#ifdef CONFIG_NL80211_TESTMODE + if (vif == mvm->noa_vif) { + mvm->noa_vif = NULL; + mvm->noa_duration = 0; + } +#endif + } + + iwl_mvm_power_update_mac(mvm); + + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + mvm->p2p_device_vif = NULL; + + /* P2P device uses only one link */ + iwl_mvm_mld_rm_bcast_sta(mvm, vif, &vif->bss_conf); + iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); + mvmvif->deflink.phy_ctxt = NULL; + } else { + iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); + } + + iwl_mvm_mld_mac_ctxt_remove(mvm, vif); + + RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); + + probe_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data, + lockdep_is_held(&mvm->mutex)); + RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL); + if (probe_data) + kfree_rcu(probe_data, rcu_head); + + if (vif->type == NL80211_IFTYPE_MONITOR) { + mvm->monitor_on = false; + __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags); + } + + mutex_unlock(&mvm->mutex); +} + +static unsigned int iwl_mvm_mld_count_active_links(struct ieee80211_vif *vif) +{ + unsigned int n_active = 0; + int i; + + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { + struct ieee80211_bss_conf *link_conf; + + link_conf = link_conf_dereference_protected(vif, i); + if (link_conf && + rcu_access_pointer(link_conf->chanctx_conf)) + n_active++; + } + + return n_active; +} + +static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int link_id, ret = 0; + + mvmvif->esr_active = true; + + /* Disable SMPS overrideing by user */ + vif->driver_flags |= IEEE80211_VIF_DISABLE_SMPS_OVERRIDE; + + iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_FW, + IEEE80211_SMPS_OFF); + + for_each_mvm_vif_valid_link(mvmvif, link_id) { + struct iwl_mvm_vif_link_info *link = mvmvif->link[link_id]; + + if (!link->phy_ctxt) + continue; + + ret = iwl_mvm_phy_send_rlc(mvm, link->phy_ctxt, 2, 2); + if (ret) + break; + + link->phy_ctxt->rlc_disabled = true; + } + + return ret; +} + +static int +__iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx, + bool switching_chanctx) +{ + u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; + struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; + unsigned int n_active = iwl_mvm_mld_count_active_links(vif); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + unsigned int link_id = link_conf->link_id; + int ret; + + /* if the assigned one was not counted yet, count it now */ + if (!rcu_access_pointer(link_conf->chanctx_conf)) + n_active++; + + if (n_active > iwl_mvm_max_active_links(mvm, vif)) + return -EOPNOTSUPP; + + if (WARN_ON_ONCE(!mvmvif->link[link_id])) + return -EINVAL; + + /* mac parameters such as HE support can change at this stage + * For sta, need first to configure correct state from drv_sta_state + * and only after that update mac config. + */ + if (vif->type == NL80211_IFTYPE_AP) { + ret = iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false); + if (ret) { + IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); + return -EINVAL; + } + } + + if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) { + mvmvif->link[link_id]->listen_lmac = true; + ret = iwl_mvm_esr_mode_active(mvm, vif); + if (ret) { + IWL_ERR(mvm, "failed to activate ESR mode (%d)\n", ret); + return ret; + } + } + + mvmvif->link[link_id]->phy_ctxt = phy_ctxt; + + if (switching_chanctx) { + /* reactivate if we turned this off during channel switch */ + if (vif->type == NL80211_IFTYPE_AP) + mvmvif->ap_ibss_active = true; + } + + /* send it first with phy context ID */ + ret = iwl_mvm_link_changed(mvm, vif, link_conf, 0, false); + if (ret) + goto out; + + /* Initialize rate control for the AP station, since we might be + * doing a link switch here - we cannot initialize it before since + * this needs the phy context assigned (and in FW?), and we cannot + * do it later because it needs to be initialized as soon as we're + * able to TX on the link, i.e. when active. + * + * Firmware restart isn't quite correct yet for MLO, but we don't + * need to do it in that case anyway since it will happen from the + * normal station state callback. + */ + if (mvmvif->ap_sta && + !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + struct ieee80211_link_sta *link_sta; + + rcu_read_lock(); + link_sta = rcu_dereference(mvmvif->ap_sta->link[link_id]); + + if (!WARN_ON_ONCE(!link_sta)) + iwl_mvm_rs_rate_init(mvm, vif, mvmvif->ap_sta, + link_conf, link_sta, + phy_ctxt->channel->band); + rcu_read_unlock(); + } + + /* then activate */ + ret = iwl_mvm_link_changed(mvm, vif, link_conf, + LINK_CONTEXT_MODIFY_ACTIVE | + LINK_CONTEXT_MODIFY_RATES_INFO, + true); + if (ret) + goto out; + + /* + * Power state must be updated before quotas, + * otherwise fw will complain. + */ + iwl_mvm_power_update_mac(mvm); + + if (vif->type == NL80211_IFTYPE_MONITOR) { + ret = iwl_mvm_mld_add_snif_sta(mvm, vif, link_conf); + if (ret) + goto deactivate; + } + + return 0; + +deactivate: + iwl_mvm_link_changed(mvm, vif, link_conf, LINK_CONTEXT_MODIFY_ACTIVE, + false); +out: + mvmvif->link[link_id]->phy_ctxt = NULL; + iwl_mvm_power_update_mac(mvm); + return ret; +} + +static int iwl_mvm_mld_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + ret = __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false); + mutex_unlock(&mvm->mutex); + + return ret; +} + +static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_bss_conf *link_conf; + int link_id, ret = 0; + + mvmvif->esr_active = false; + + vif->driver_flags &= ~IEEE80211_VIF_DISABLE_SMPS_OVERRIDE; + + iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_FW, + IEEE80211_SMPS_AUTOMATIC); + + for_each_vif_active_link(vif, link_conf, link_id) { + struct ieee80211_chanctx_conf *chanctx_conf; + struct iwl_mvm_phy_ctxt *phy_ctxt; + u8 static_chains, dynamic_chains; + + mvmvif->link[link_id]->listen_lmac = false; + + rcu_read_lock(); + + chanctx_conf = rcu_dereference(link_conf->chanctx_conf); + phy_ctxt = mvmvif->link[link_id]->phy_ctxt; + + if (!chanctx_conf || !phy_ctxt) { + rcu_read_unlock(); + continue; + } + + phy_ctxt->rlc_disabled = false; + static_chains = chanctx_conf->rx_chains_static; + dynamic_chains = chanctx_conf->rx_chains_dynamic; + + rcu_read_unlock(); + + ret = iwl_mvm_phy_send_rlc(mvm, phy_ctxt, static_chains, + dynamic_chains); + if (ret) + break; + } + + return ret; +} + +static void +__iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx, + bool switching_chanctx) + +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + unsigned int n_active = iwl_mvm_mld_count_active_links(vif); + unsigned int link_id = link_conf->link_id; + + /* shouldn't happen, but verify link_id is valid before accessing */ + if (WARN_ON_ONCE(!mvmvif->link[link_id])) + return; + + if (vif->type == NL80211_IFTYPE_AP && switching_chanctx) { + mvmvif->csa_countdown = false; + + /* Set CS bit on all the stations */ + iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); + + /* Save blocked iface, the timeout is set on the next beacon */ + rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif); + + mvmvif->ap_ibss_active = false; + } + + if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) { + int ret = iwl_mvm_esr_mode_inactive(mvm, vif); + + if (ret) + IWL_ERR(mvm, "failed to deactivate ESR mode (%d)\n", + ret); + } + + if (vif->type == NL80211_IFTYPE_MONITOR) + iwl_mvm_mld_rm_snif_sta(mvm, vif); + + iwl_mvm_link_changed(mvm, vif, link_conf, + LINK_CONTEXT_MODIFY_ACTIVE, false); + + if (switching_chanctx) + return; + mvmvif->link[link_id]->phy_ctxt = NULL; + iwl_mvm_power_update_mac(mvm); +} + +static void iwl_mvm_mld_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + __iwl_mvm_mld_unassign_vif_chanctx(mvm, vif, link_conf, ctx, false); + mutex_unlock(&mvm->mutex); +} + +static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + mutex_lock(&mvm->mutex); + /* Send the beacon template */ + ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf); + if (ret) + goto out_unlock; + + /* the link should be already activated when assigning chan context */ + ret = iwl_mvm_link_changed(mvm, vif, link_conf, + LINK_CONTEXT_MODIFY_ALL & + ~LINK_CONTEXT_MODIFY_ACTIVE, + true); + if (ret) + goto out_unlock; + + ret = iwl_mvm_mld_add_mcast_sta(mvm, vif, link_conf); + if (ret) + goto out_unlock; + + /* Send the bcast station. At this stage the TBTT and DTIM time + * events are added and applied to the scheduler + */ + ret = iwl_mvm_mld_add_bcast_sta(mvm, vif, link_conf); + if (ret) + goto out_rm_mcast; + + if (iwl_mvm_start_ap_ibss_common(hw, vif, &ret)) + goto out_failed; + + /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ + if (vif->p2p && mvm->p2p_device_vif) + iwl_mvm_mld_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false); + + iwl_mvm_bt_coex_vif_change(mvm); + + /* we don't support TDLS during DCM */ + if (iwl_mvm_phy_ctx_count(mvm) > 1) + iwl_mvm_teardown_tdls_peers(mvm); + + iwl_mvm_ftm_restart_responder(mvm, vif, link_conf); + + goto out_unlock; + +out_failed: + iwl_mvm_power_update_mac(mvm); + mvmvif->ap_ibss_active = false; + iwl_mvm_mld_rm_bcast_sta(mvm, vif, link_conf); +out_rm_mcast: + iwl_mvm_mld_rm_mcast_sta(mvm, vif, link_conf); +out_unlock: + mutex_unlock(&mvm->mutex); + return ret; +} + +static int iwl_mvm_mld_start_ap(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + return iwl_mvm_mld_start_ap_ibss(hw, vif, link_conf); +} + +static int iwl_mvm_mld_start_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + return iwl_mvm_mld_start_ap_ibss(hw, vif, &vif->bss_conf); +} + +static void iwl_mvm_mld_stop_ap_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + + iwl_mvm_stop_ap_ibss_common(mvm, vif); + + /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ + if (vif->p2p && mvm->p2p_device_vif) + iwl_mvm_mld_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false); + + iwl_mvm_ftm_responder_clear(mvm, vif); + + iwl_mvm_mld_rm_bcast_sta(mvm, vif, link_conf); + iwl_mvm_mld_rm_mcast_sta(mvm, vif, link_conf); + + iwl_mvm_power_update_mac(mvm); + mutex_unlock(&mvm->mutex); +} + +static void iwl_mvm_mld_stop_ap(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + iwl_mvm_mld_stop_ap_ibss(hw, vif, link_conf); +} + +static void iwl_mvm_mld_stop_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + iwl_mvm_mld_stop_ap_ibss(hw, vif, &vif->bss_conf); +} + +static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + static const struct iwl_mvm_sta_state_ops callbacks = { + .add_sta = iwl_mvm_mld_add_sta, + .update_sta = iwl_mvm_mld_update_sta, + .rm_sta = iwl_mvm_mld_rm_sta, + .mac_ctxt_changed = iwl_mvm_mld_mac_ctxt_changed, + }; + + return iwl_mvm_mac_sta_state_common(hw, vif, sta, old_state, new_state, + &callbacks); +} + +static void +iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u64 changes) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + bool has_he, has_eht; + u32 link_changes = 0; + int ret; + + if (WARN_ON_ONCE(!mvmvif->link[link_conf->link_id])) + return; + + has_he = link_conf->he_support && !iwlwifi_mod_params.disable_11ax; + has_eht = link_conf->eht_support && !iwlwifi_mod_params.disable_11be; + + /* Update EDCA params */ + if (changes & BSS_CHANGED_QOS && vif->cfg.assoc && link_conf->qos) + link_changes |= LINK_CONTEXT_MODIFY_QOS_PARAMS; + + if (changes & BSS_CHANGED_ERP_SLOT) + link_changes |= LINK_CONTEXT_MODIFY_RATES_INFO; + + if (vif->cfg.assoc && (has_he || has_eht)) { + IWL_DEBUG_MAC80211(mvm, "Associated in HE mode\n"); + link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS; + } + + /* Update EHT Puncturing info */ + if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc && has_eht) + link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS; + + if (link_changes) { + ret = iwl_mvm_link_changed(mvm, vif, link_conf, link_changes, + true); + if (ret) + IWL_ERR(mvm, "failed to update link\n"); + } + + ret = iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false); + if (ret) + IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); + + memcpy(mvmvif->link[link_conf->link_id]->bssid, link_conf->bssid, + ETH_ALEN); + + iwl_mvm_bss_info_changed_station_common(mvm, vif, link_conf, changes); +} + +static bool iwl_mvm_mld_vif_have_valid_ap_sta(struct iwl_mvm_vif *mvmvif) +{ + int i; + + for_each_mvm_vif_valid_link(mvmvif, i) { + if (mvmvif->link[i]->ap_sta_id != IWL_MVM_INVALID_STA) + return true; + } + + return false; +} + +static void iwl_mvm_mld_vif_delete_all_stas(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int i, ret; + + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + return; + + for_each_mvm_vif_valid_link(mvmvif, i) { + struct iwl_mvm_vif_link_info *link = mvmvif->link[i]; + + if (!link) + continue; + + iwl_mvm_sec_key_remove_ap(mvm, vif, link, i); + ret = iwl_mvm_mld_rm_sta_id(mvm, link->ap_sta_id); + if (ret) + IWL_ERR(mvm, "failed to remove AP station\n"); + + link->ap_sta_id = IWL_MVM_INVALID_STA; + } +} + +static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u64 changes) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_bss_conf *link_conf; + bool protect = false; + unsigned int i; + int ret; + + /* This might get called without active links during the + * chanctx switch, but we don't care about it anyway. + */ + if (changes == BSS_CHANGED_IDLE) + return; + + ret = iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false); + if (ret) + IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); + + mvmvif->associated = vif->cfg.assoc; + + if (!(changes & BSS_CHANGED_ASSOC)) + return; + + if (vif->cfg.assoc) { + /* clear statistics to get clean beacon counter */ + iwl_mvm_request_statistics(mvm, true); + iwl_mvm_sf_update(mvm, vif, false); + iwl_mvm_power_vif_assoc(mvm, vif); + + for_each_mvm_vif_valid_link(mvmvif, i) { + memset(&mvmvif->link[i]->beacon_stats, 0, + sizeof(mvmvif->link[i]->beacon_stats)); + + if (vif->p2p) { + iwl_mvm_update_smps(mvm, vif, + IWL_MVM_SMPS_REQ_PROT, + IEEE80211_SMPS_DYNAMIC, i); + } + + rcu_read_lock(); + link_conf = rcu_dereference(vif->link_conf[i]); + if (link_conf && !link_conf->dtim_period) + protect = true; + rcu_read_unlock(); + } + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + protect) { + /* If we're not restarting and still haven't + * heard a beacon (dtim period unknown) then + * make sure we still have enough minimum time + * remaining in the time event, since the auth + * might actually have taken quite a while + * (especially for SAE) and so the remaining + * time could be small without us having heard + * a beacon yet. + */ + iwl_mvm_protect_assoc(mvm, vif, 0); + } + + iwl_mvm_sf_update(mvm, vif, false); + + /* FIXME: need to decide about misbehaving AP handling */ + iwl_mvm_power_vif_assoc(mvm, vif); + } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) { + iwl_mvm_mei_host_disassociated(mvm); + + /* If update fails - SF might be running in associated + * mode while disassociated - which is forbidden. + */ + ret = iwl_mvm_sf_update(mvm, vif, false); + WARN_ONCE(ret && + !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status), + "Failed to update SF upon disassociation\n"); + + /* If we get an assert during the connection (after the + * station has been added, but before the vif is set + * to associated), mac80211 will re-add the station and + * then configure the vif. Since the vif is not + * associated, we would remove the station here and + * this would fail the recovery. + */ + iwl_mvm_mld_vif_delete_all_stas(mvm, vif); + } + + iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes); +} + +static void +iwl_mvm_mld_link_info_changed_ap_ibss(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u64 changes) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 link_changes = LINK_CONTEXT_MODIFY_PROTECT_FLAGS | + LINK_CONTEXT_MODIFY_QOS_PARAMS; + + /* Changes will be applied when the AP/IBSS is started */ + if (!mvmvif->ap_ibss_active) + return; + + if (link_conf->he_support) + link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS; + + if (changes & BSS_CHANGED_ERP_SLOT) + link_changes |= LINK_CONTEXT_MODIFY_RATES_INFO; + + if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_SLOT | + BSS_CHANGED_HT | + BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS | + BSS_CHANGED_HE_BSS_COLOR) && + iwl_mvm_link_changed(mvm, vif, link_conf, + link_changes, true)) + IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); + + /* Need to send a new beacon template to the FW */ + if (changes & BSS_CHANGED_BEACON && + iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf)) + IWL_WARN(mvm, "Failed updating beacon data\n"); + + /* FIXME: need to decide if we need FTM responder per link */ + if (changes & BSS_CHANGED_FTM_RESPONDER) { + int ret = iwl_mvm_ftm_start_responder(mvm, vif, link_conf); + + if (ret) + IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n", + ret); + } +} + +static void iwl_mvm_mld_link_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u64 changes) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + iwl_mvm_mld_link_info_changed_station(mvm, vif, link_conf, + changes); + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_ADHOC: + iwl_mvm_mld_link_info_changed_ap_ibss(mvm, vif, link_conf, + changes); + break; + case NL80211_IFTYPE_MONITOR: + if (changes & BSS_CHANGED_MU_GROUPS) + iwl_mvm_update_mu_groups(mvm, vif); + break; + default: + /* shouldn't happen */ + WARN_ON_ONCE(1); + } + + if (changes & BSS_CHANGED_TXPOWER) { + IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d dBm\n", + link_conf->txpower); + iwl_mvm_set_tx_power(mvm, vif, link_conf->txpower); + } + + mutex_unlock(&mvm->mutex); +} + +static void iwl_mvm_mld_vif_cfg_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u64 changes) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + + if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle) + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); + + if (vif->type == NL80211_IFTYPE_STATION) + iwl_mvm_mld_vif_cfg_changed_station(mvm, vif, changes); + + mutex_unlock(&mvm->mutex); +} + +static int +iwl_mvm_mld_switch_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode) +{ + static const struct iwl_mvm_switch_vif_chanctx_ops ops = { + .__assign_vif_chanctx = __iwl_mvm_mld_assign_vif_chanctx, + .__unassign_vif_chanctx = __iwl_mvm_mld_unassign_vif_chanctx, + }; + + return iwl_mvm_switch_vif_chanctx_common(hw, vifs, n_vifs, mode, &ops); +} + +static void iwl_mvm_mld_config_iface_filter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + unsigned int filter_flags, + unsigned int changed_flags) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + /* We support only filter for probe requests */ + if (!(changed_flags & FIF_PROBE_REQ)) + return; + + /* Supported only for p2p client interfaces */ + if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc || + !vif->p2p) + return; + + mutex_lock(&mvm->mutex); + iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false); + mutex_unlock(&mvm->mutex); +} + +static int +iwl_mvm_mld_mac_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + unsigned int link_id, u16 ac, + const struct ieee80211_tx_queue_params *params) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *mvm_link = mvmvif->link[link_id]; + + if (!mvm_link) + return -EINVAL; + + mvm_link->queue_params[ac] = *params; + + /* No need to update right away, we'll get BSS_CHANGED_QOS + * The exception is P2P_DEVICE interface which needs immediate update. + */ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + int ret; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, + LINK_CONTEXT_MODIFY_QOS_PARAMS, + true); + mutex_unlock(&mvm->mutex); + return ret; + } + return 0; +} + +static int iwl_mvm_link_switch_phy_ctx(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_phy_ctxt *new_phy_ctxt) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret = 0; + + lockdep_assert_held(&mvm->mutex); + + /* Inorder to change the phy_ctx of a link, the link needs to be + * inactive. Therefore, first deactivate the link, then change its + * phy_ctx, and then activate it again. + */ + ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, + LINK_CONTEXT_MODIFY_ACTIVE, false); + if (WARN(ret, "Failed to deactivate link\n")) + return ret; + + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); + + mvmvif->deflink.phy_ctxt = new_phy_ctxt; + + ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false); + if (WARN(ret, "Failed to deactivate link\n")) + return ret; + + ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, + LINK_CONTEXT_MODIFY_ACTIVE, true); + WARN(ret, "Failed binding P2P_DEVICE\n"); + return ret; +} + +static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel *channel, int duration, + enum ieee80211_roc_type type) +{ + static const struct iwl_mvm_roc_ops ops = { + .add_aux_sta_for_hs20 = iwl_mvm_mld_add_aux_sta, + .switch_phy_ctxt = iwl_mvm_link_switch_phy_ctx, + }; + + return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops); +} + +static int +iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u16 old_links, u16 new_links, + struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]) +{ + struct iwl_mvm_vif_link_info *new_link[IEEE80211_MLD_MAX_NUM_LINKS] = {}; + unsigned int n_active = iwl_mvm_mld_count_active_links(vif); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + u16 removed = old_links & ~new_links; + u16 added = new_links & ~old_links; + int err, i; + + if (hweight16(new_links) > 1 && + n_active > iwl_mvm_max_active_links(mvm, vif)) + return -EOPNOTSUPP; + + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { + int r; + + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + break; + + if (!(added & BIT(i))) + continue; + new_link[i] = kzalloc(sizeof(*new_link[i]), GFP_KERNEL); + if (!new_link[i]) { + err = -ENOMEM; + goto free; + } + + new_link[i]->bcast_sta.sta_id = IWL_MVM_INVALID_STA; + new_link[i]->mcast_sta.sta_id = IWL_MVM_INVALID_STA; + new_link[i]->ap_sta_id = IWL_MVM_INVALID_STA; + new_link[i]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; + + for (r = 0; r < NUM_IWL_MVM_SMPS_REQ; r++) + new_link[i]->smps_requests[r] = + IEEE80211_SMPS_AUTOMATIC; + } + + mutex_lock(&mvm->mutex); + + if (old_links == 0) { + err = iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); + if (err) + goto out_err; + mvmvif->link[0] = NULL; + } + + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { + if (removed & BIT(i)) { + struct ieee80211_bss_conf *link_conf = old[i]; + + err = iwl_mvm_disable_link(mvm, vif, link_conf); + if (err) + goto out_err; + kfree(mvmvif->link[i]); + mvmvif->link[i] = NULL; + } else if (added & BIT(i)) { + struct ieee80211_bss_conf *link_conf; + + link_conf = link_conf_dereference_protected(vif, i); + if (WARN_ON(!link_conf)) + continue; + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, + &mvm->status)) + mvmvif->link[i] = new_link[i]; + new_link[i] = NULL; + err = iwl_mvm_add_link(mvm, vif, link_conf); + if (err) + goto out_err; + } + } + + if (err) + goto out_err; + + err = 0; + if (new_links == 0) { + mvmvif->link[0] = &mvmvif->deflink; + err = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); + } + +out_err: + /* we really don't have a good way to roll back here ... */ + mutex_unlock(&mvm->mutex); + +free: + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) + kfree(new_link[i]); + return err; +} + +static int +iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u16 old_links, u16 new_links) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_mld_update_sta_links(mvm, vif, sta, old_links, new_links); + mutex_unlock(&mvm->mutex); + + return ret; +} + +const struct ieee80211_ops iwl_mvm_mld_hw_ops = { + .tx = iwl_mvm_mac_tx, + .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, + .ampdu_action = iwl_mvm_mac_ampdu_action, + .get_antenna = iwl_mvm_op_get_antenna, + .start = iwl_mvm_mac_start, + .reconfig_complete = iwl_mvm_mac_reconfig_complete, + .stop = iwl_mvm_mac_stop, + .add_interface = iwl_mvm_mld_mac_add_interface, + .remove_interface = iwl_mvm_mld_mac_remove_interface, + .config = iwl_mvm_mac_config, + .prepare_multicast = iwl_mvm_prepare_multicast, + .configure_filter = iwl_mvm_configure_filter, + .config_iface_filter = iwl_mvm_mld_config_iface_filter, + .link_info_changed = iwl_mvm_mld_link_info_changed, + .vif_cfg_changed = iwl_mvm_mld_vif_cfg_changed, + .hw_scan = iwl_mvm_mac_hw_scan, + .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, + .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove, + .sta_state = iwl_mvm_mld_mac_sta_state, + .sta_notify = iwl_mvm_mac_sta_notify, + .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, + .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, + .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, + .sta_rc_update = iwl_mvm_sta_rc_update, + .conf_tx = iwl_mvm_mld_mac_conf_tx, + .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, + .mgd_complete_tx = iwl_mvm_mac_mgd_complete_tx, + .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, + .flush = iwl_mvm_mac_flush, + .flush_sta = iwl_mvm_mac_flush_sta, + .sched_scan_start = iwl_mvm_mac_sched_scan_start, + .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, + .set_key = iwl_mvm_mac_set_key, + .update_tkip_key = iwl_mvm_mac_update_tkip_key, + .remain_on_channel = iwl_mvm_mld_roc, + .cancel_remain_on_channel = iwl_mvm_cancel_roc, + .add_chanctx = iwl_mvm_add_chanctx, + .remove_chanctx = iwl_mvm_remove_chanctx, + .change_chanctx = iwl_mvm_change_chanctx, + .assign_vif_chanctx = iwl_mvm_mld_assign_vif_chanctx, + .unassign_vif_chanctx = iwl_mvm_mld_unassign_vif_chanctx, + .switch_vif_chanctx = iwl_mvm_mld_switch_vif_chanctx, + + .start_ap = iwl_mvm_mld_start_ap, + .stop_ap = iwl_mvm_mld_stop_ap, + .join_ibss = iwl_mvm_mld_start_ibss, + .leave_ibss = iwl_mvm_mld_stop_ibss, + + .tx_last_beacon = iwl_mvm_tx_last_beacon, + + .set_tim = iwl_mvm_set_tim, + + .channel_switch = iwl_mvm_channel_switch, + .pre_channel_switch = iwl_mvm_pre_channel_switch, + .post_channel_switch = iwl_mvm_post_channel_switch, + .abort_channel_switch = iwl_mvm_abort_channel_switch, + .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, + + .tdls_channel_switch = iwl_mvm_tdls_channel_switch, + .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, + .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, + + .event_callback = iwl_mvm_mac_event_callback, + + .sync_rx_queues = iwl_mvm_sync_rx_queues, + + CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) + +#ifdef CONFIG_PM_SLEEP + /* look at d3.c */ + .suspend = iwl_mvm_suspend, + .resume = iwl_mvm_resume, + .set_wakeup = iwl_mvm_set_wakeup, + .set_rekey_data = iwl_mvm_set_rekey_data, +#if IS_ENABLED(CONFIG_IPV6) + .ipv6_addr_change = iwl_mvm_ipv6_addr_change, +#endif + .set_default_unicast_key = iwl_mvm_set_default_unicast_key, +#endif + .get_survey = iwl_mvm_mac_get_survey, + .sta_statistics = iwl_mvm_mac_sta_statistics, + .get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats, + .start_pmsr = iwl_mvm_start_pmsr, + .abort_pmsr = iwl_mvm_abort_pmsr, + +#ifdef CONFIG_IWLWIFI_DEBUGFS + .link_sta_add_debugfs = iwl_mvm_link_sta_add_debugfs, +#endif + .set_hw_timestamp = iwl_mvm_set_hw_timestamp, + + .change_vif_links = iwl_mvm_mld_change_vif_links, + .change_sta_links = iwl_mvm_mld_change_sta_links, +}; diff --git a/sys/contrib/dev/iwlwifi/mvm/mld-sta.c b/sys/contrib/dev/iwlwifi/mvm/mld-sta.c new file mode 100644 index 000000000000..524852cf5cd2 --- /dev/null +++ b/sys/contrib/dev/iwlwifi/mvm/mld-sta.c @@ -0,0 +1,1181 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2022-2023 Intel Corporation + */ +#include "mvm.h" +#include "time-sync.h" +#include "sta.h" + +u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int filter_link_id) +{ + struct iwl_mvm_sta *mvmsta; + unsigned int link_id; + u32 result = 0; + + if (!sta) + return 0; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + /* it's easy when the STA is not an MLD */ + if (!sta->valid_links) + return BIT(mvmsta->deflink.sta_id); + + /* but if it is an MLD, get the mask of all the FW STAs it has ... */ + for (link_id = 0; link_id < ARRAY_SIZE(mvmsta->link); link_id++) { + struct iwl_mvm_link_sta *link_sta; + + /* unless we have a specific link in mind */ + if (filter_link_id >= 0 && link_id != filter_link_id) + continue; + + link_sta = + rcu_dereference_check(mvmsta->link[link_id], + lockdep_is_held(&mvm->mutex)); + if (!link_sta) + continue; + + result |= BIT(link_sta->sta_id); + } + + return result; +} + +static int iwl_mvm_mld_send_sta_cmd(struct iwl_mvm *mvm, + struct iwl_mvm_sta_cfg_cmd *cmd) +{ + int ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD), + 0, sizeof(*cmd), cmd); + if (ret) + IWL_ERR(mvm, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret); + return ret; +} + +/* + * Add an internal station to the FW table + */ +static int iwl_mvm_mld_add_int_sta_to_fw(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + const u8 *addr, int link_id) +{ + struct iwl_mvm_sta_cfg_cmd cmd; + + lockdep_assert_held(&mvm->mutex); + + memset(&cmd, 0, sizeof(cmd)); + cmd.sta_id = cpu_to_le32((u8)sta->sta_id); + + cmd.link_id = cpu_to_le32(link_id); + + cmd.station_type = cpu_to_le32(sta->type); + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT) && + sta->type == STATION_TYPE_BCAST_MGMT) + cmd.mfp = cpu_to_le32(1); + + if (addr) { + memcpy(cmd.peer_mld_address, addr, ETH_ALEN); + memcpy(cmd.peer_link_address, addr, ETH_ALEN); + } + + return iwl_mvm_mld_send_sta_cmd(mvm, &cmd); +} + +/* + * Remove a station from the FW table. Before sending the command to remove + * the station validate that the station is indeed known to the driver (sanity + * only). + */ +static int iwl_mvm_mld_rm_sta_from_fw(struct iwl_mvm *mvm, u32 sta_id) +{ + struct iwl_mvm_remove_sta_cmd rm_sta_cmd = { + .sta_id = cpu_to_le32(sta_id), + }; + int ret; + + /* Note: internal stations are marked as error values */ + if (!rcu_access_pointer(mvm->fw_id_to_mac_id[sta_id])) { + IWL_ERR(mvm, "Invalid station id %d\n", sta_id); + return -EINVAL; + } + + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, STA_REMOVE_CMD), + 0, sizeof(rm_sta_cmd), &rm_sta_cmd); + if (ret) { + IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); + return ret; + } + + return 0; +} + +static int iwl_mvm_add_aux_sta_to_fw(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + u32 lmac_id) +{ + int ret; + + struct iwl_mvm_aux_sta_cmd cmd = { + .sta_id = cpu_to_le32(sta->sta_id), + .lmac_id = cpu_to_le32(lmac_id), + }; + + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, AUX_STA_CMD), + 0, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, "Failed to send AUX_STA_CMD\n"); + return ret; +} + +/* + * Adds an internal sta to the FW table with its queues + */ +int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + const u8 *addr, int link_id, + u16 *queue, u8 tid, + unsigned int *_wdg_timeout) +{ + int ret, txq; + unsigned int wdg_timeout = _wdg_timeout ? *_wdg_timeout : + mvm->trans->trans_cfg->base_params->wd_timeout; + + if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) + return -ENOSPC; + + if (sta->type == STATION_TYPE_AUX) + ret = iwl_mvm_add_aux_sta_to_fw(mvm, sta, link_id); + else + ret = iwl_mvm_mld_add_int_sta_to_fw(mvm, sta, addr, link_id); + if (ret) + return ret; + + /* + * For 22000 firmware and on we cannot add queue to a station unknown + * to firmware so enable queue here - after the station was added + */ + txq = iwl_mvm_tvqm_enable_txq(mvm, NULL, sta->sta_id, tid, + wdg_timeout); + if (txq < 0) { + iwl_mvm_mld_rm_sta_from_fw(mvm, sta->sta_id); + return txq; + } + *queue = txq; + + return 0; +} + +/* + * Adds a new int sta: allocate it in the driver, add it to the FW table, + * and add its queues. + */ +static int iwl_mvm_mld_add_int_sta(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *int_sta, u16 *queue, + enum nl80211_iftype iftype, + enum iwl_fw_sta_type sta_type, + int link_id, const u8 *addr, u8 tid, + unsigned int *wdg_timeout) +{ + int ret; + + lockdep_assert_held(&mvm->mutex); + + /* qmask argument is not used in the new tx api, send a don't care */ + ret = iwl_mvm_allocate_int_sta(mvm, int_sta, 0, iftype, + sta_type); + if (ret) + return ret; + + ret = iwl_mvm_mld_add_int_sta_with_queue(mvm, int_sta, addr, link_id, + queue, tid, wdg_timeout); + if (ret) { + iwl_mvm_dealloc_int_sta(mvm, int_sta); + return ret; + } + + return 0; +} + +/* Allocate a new station entry for the broadcast station to the given vif, + * and send it to the FW. + * Note that each P2P mac should have its own broadcast station. + */ +int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *mvm_link = + mvmvif->link[link_conf->link_id]; + struct iwl_mvm_int_sta *bsta = &mvm_link->bcast_sta; + static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + const u8 *baddr = _baddr; + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, vif, false, false); + u16 *queue; + + lockdep_assert_held(&mvm->mutex); + + if (vif->type == NL80211_IFTYPE_ADHOC) + baddr = link_conf->bssid; + + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) { + queue = &mvm_link->mgmt_queue; + } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + queue = &mvm->p2p_dev_queue; + } else { + WARN(1, "Missing required TXQ for adding bcast STA\n"); + return -EINVAL; + } + + return iwl_mvm_mld_add_int_sta(mvm, bsta, queue, + ieee80211_vif_type_p2p(vif), + STATION_TYPE_BCAST_MGMT, + mvm_link->fw_link_id, baddr, + IWL_MAX_TID_COUNT, &wdg_timeout); +} + +/* Allocate a new station entry for the broadcast station to the given vif, + * and send it to the FW. + * Note that each AP/GO mac should have its own multicast station. + */ +int iwl_mvm_mld_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *mvm_link = + mvmvif->link[link_conf->link_id]; + struct iwl_mvm_int_sta *msta = &mvm_link->mcast_sta; + static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; + const u8 *maddr = _maddr; + unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON(vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_ADHOC)) + return -EOPNOTSUPP; + + /* In IBSS, ieee80211_check_queues() sets the cab_queue to be + * invalid, so make sure we use the queue we want. + * Note that this is done here as we want to avoid making DQA + * changes in mac80211 layer. + */ + if (vif->type == NL80211_IFTYPE_ADHOC) + mvm_link->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; + + return iwl_mvm_mld_add_int_sta(mvm, msta, &mvm_link->cab_queue, + vif->type, STATION_TYPE_MCAST, + mvm_link->fw_link_id, maddr, 0, + &timeout); +} + +/* Allocate a new station entry for the sniffer station to the given vif, + * and send it to the FW. + */ +int iwl_mvm_mld_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *mvm_link = + mvmvif->link[link_conf->link_id]; + + lockdep_assert_held(&mvm->mutex); + + return iwl_mvm_mld_add_int_sta(mvm, &mvm->snif_sta, &mvm->snif_queue, + vif->type, STATION_TYPE_BCAST_MGMT, + mvm_link->fw_link_id, NULL, + IWL_MAX_TID_COUNT, NULL); +} + +int iwl_mvm_mld_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id) +{ + lockdep_assert_held(&mvm->mutex); + + /* In CDB NICs we need to specify which lmac to use for aux activity; + * use the link_id argument place to send lmac_id to the function. + */ + return iwl_mvm_mld_add_int_sta(mvm, &mvm->aux_sta, &mvm->aux_queue, + NL80211_IFTYPE_UNSPECIFIED, + STATION_TYPE_AUX, lmac_id, NULL, + IWL_MAX_TID_COUNT, NULL); +} + +static int iwl_mvm_mld_disable_txq(struct iwl_mvm *mvm, u32 sta_mask, + u16 *queueptr, u8 tid) +{ + int queue = *queueptr; + int ret = 0; + + if (tid == IWL_MAX_TID_COUNT) + tid = IWL_MGMT_TID; + + if (mvm->sta_remove_requires_queue_remove) { + u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, + SCD_QUEUE_CONFIG_CMD); + struct iwl_scd_queue_cfg_cmd remove_cmd = { + .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE), + .u.remove.tid = cpu_to_le32(tid), + .u.remove.sta_mask = cpu_to_le32(sta_mask), + }; + + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, + sizeof(remove_cmd), + &remove_cmd); + } + + iwl_trans_txq_free(mvm->trans, queue); + *queueptr = IWL_MVM_INVALID_QUEUE; + + return ret; +} + +/* Removes a sta from the FW table, disable its queues, and dealloc it + */ +static int iwl_mvm_mld_rm_int_sta(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *int_sta, + bool flush, u8 tid, u16 *queuptr) +{ + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON_ONCE(int_sta->sta_id == IWL_MVM_INVALID_STA)) + return -EINVAL; + + if (flush) + iwl_mvm_flush_sta(mvm, int_sta, true); + + iwl_mvm_mld_disable_txq(mvm, BIT(int_sta->sta_id), queuptr, tid); + + ret = iwl_mvm_mld_rm_sta_from_fw(mvm, int_sta->sta_id); + if (ret) + IWL_WARN(mvm, "Failed sending remove station\n"); + + iwl_mvm_dealloc_int_sta(mvm, int_sta); + + return ret; +} + +int iwl_mvm_mld_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *link = mvmvif->link[link_conf->link_id]; + u16 *queueptr; + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON(!link)) + return -EIO; + + switch (vif->type) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_ADHOC: + queueptr = &link->mgmt_queue; + break; + case NL80211_IFTYPE_P2P_DEVICE: + queueptr = &mvm->p2p_dev_queue; + break; + default: + WARN(1, "Can't free bcast queue on vif type %d\n", + vif->type); + return -EINVAL; + } + + return iwl_mvm_mld_rm_int_sta(mvm, &link->bcast_sta, + true, IWL_MAX_TID_COUNT, queueptr); +} + +/* Send the FW a request to remove the station from it's internal data + * structures, and in addition remove it from the local data structure. + */ +int iwl_mvm_mld_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *link = mvmvif->link[link_conf->link_id]; + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON(!link)) + return -EIO; + + return iwl_mvm_mld_rm_int_sta(mvm, &link->mcast_sta, true, 0, + &link->cab_queue); +} + +int iwl_mvm_mld_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + lockdep_assert_held(&mvm->mutex); + + return iwl_mvm_mld_rm_int_sta(mvm, &mvm->snif_sta, false, + IWL_MAX_TID_COUNT, &mvm->snif_queue); +} + +int iwl_mvm_mld_rm_aux_sta(struct iwl_mvm *mvm) +{ + lockdep_assert_held(&mvm->mutex); + + return iwl_mvm_mld_rm_int_sta(mvm, &mvm->aux_sta, false, + IWL_MAX_TID_COUNT, &mvm->aux_queue); +} + +/* send a cfg sta command to add/update a sta in firmware */ +static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + struct ieee80211_bss_conf *link_conf, + struct iwl_mvm_link_sta *mvm_link_sta) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *link_info = + mvm_vif->link[link_conf->link_id]; + struct iwl_mvm_sta_cfg_cmd cmd = { + .sta_id = cpu_to_le32(mvm_link_sta->sta_id), + .station_type = cpu_to_le32(mvm_sta->sta_type), + }; + u32 agg_size = 0, mpdu_dens = 0; + + /* when adding sta, link should exist in FW */ + if (WARN_ON(link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)) + return -EINVAL; + + cmd.link_id = cpu_to_le32(link_info->fw_link_id); + + memcpy(&cmd.peer_mld_address, sta->addr, ETH_ALEN); + memcpy(&cmd.peer_link_address, link_sta->addr, ETH_ALEN); + + if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC) + cmd.assoc_id = cpu_to_le32(sta->aid); + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT) && + (sta->mfp || mvm_sta->sta_state < IEEE80211_STA_AUTHORIZED)) + cmd.mfp = cpu_to_le32(1); + + switch (link_sta->rx_nss) { + case 1: + cmd.mimo = cpu_to_le32(0); + break; + case 2 ... 8: + cmd.mimo = cpu_to_le32(1); + break; + } + + switch (sta->deflink.smps_mode) { + case IEEE80211_SMPS_AUTOMATIC: + case IEEE80211_SMPS_NUM_MODES: + WARN_ON(1); + break; + case IEEE80211_SMPS_STATIC: + /* override NSS */ + cmd.mimo = cpu_to_le32(0); + break; + case IEEE80211_SMPS_DYNAMIC: + cmd.mimo_protection = cpu_to_le32(1); + break; + case IEEE80211_SMPS_OFF: + /* nothing */ + break; + } + + mpdu_dens = iwl_mvm_get_sta_ampdu_dens(link_sta, link_conf, &agg_size); + cmd.tx_ampdu_spacing = cpu_to_le32(mpdu_dens); + cmd.tx_ampdu_max_size = cpu_to_le32(agg_size); + + if (sta->wme) { + cmd.sp_length = + cpu_to_le32(sta->max_sp ? sta->max_sp * 2 : 128); + cmd.uapsd_acs = cpu_to_le32(iwl_mvm_get_sta_uapsd_acs(sta)); + } + + if (link_sta->he_cap.has_he) { + cmd.trig_rnd_alloc = + cpu_to_le32(link_conf->uora_exists ? 1 : 0); + + /* PPE Thresholds */ + iwl_mvm_set_sta_pkt_ext(mvm, link_sta, &cmd.pkt_ext); + + /* HTC flags */ + cmd.htc_flags = iwl_mvm_get_sta_htc_flags(sta, link_sta); + + if (link_sta->he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_ACK_EN) + cmd.ack_enabled = cpu_to_le32(1); + } + + return iwl_mvm_mld_send_sta_cmd(mvm, &cmd); +} + +static void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta, + struct iwl_mvm_link_sta *mvm_sta_link, + unsigned int link_id, + bool is_in_fw) +{ + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id], + is_in_fw ? ERR_PTR(-EINVAL) : NULL); + RCU_INIT_POINTER(mvm->fw_id_to_link_sta[mvm_sta_link->sta_id], NULL); + RCU_INIT_POINTER(mvm_sta->link[link_id], NULL); + + if (mvm_sta_link != &mvm_sta->deflink) + kfree_rcu(mvm_sta_link, rcu_head); +} + +static void iwl_mvm_mld_sta_rm_all_sta_links(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta) +{ + unsigned int link_id; + + for (link_id = 0; link_id < ARRAY_SIZE(mvm_sta->link); link_id++) { + struct iwl_mvm_link_sta *link = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + if (!link) + continue; + + iwl_mvm_mld_free_sta_link(mvm, mvm_sta, link, link_id, false); + } +} + +static int iwl_mvm_mld_alloc_sta_link(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + unsigned int link_id) +{ + struct ieee80211_link_sta *link_sta = + link_sta_dereference_protected(sta, link_id); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_link_sta *link; + u32 sta_id = iwl_mvm_find_free_sta_id(mvm, + ieee80211_vif_type_p2p(vif)); + + if (sta_id == IWL_MVM_INVALID_STA) + return -ENOSPC; + + if (rcu_access_pointer(sta->link[link_id]) == &sta->deflink) { + link = &mvm_sta->deflink; + } else { + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) + return -ENOMEM; + } + + link->sta_id = sta_id; + rcu_assign_pointer(mvm_sta->link[link_id], link); + rcu_assign_pointer(mvm->fw_id_to_mac_id[link->sta_id], sta); + rcu_assign_pointer(mvm->fw_id_to_link_sta[link->sta_id], + link_sta); + + return 0; +} + +/* allocate all the links of a sta, called when the station is first added */ +static int iwl_mvm_mld_alloc_sta_links(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + unsigned int link_id; + int ret; + + lockdep_assert_held(&mvm->mutex); + + for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) { + if (!rcu_access_pointer(sta->link[link_id]) || + mvm_sta->link[link_id]) + continue; + + ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id); + if (ret) + goto err; + } + + return 0; + +err: + iwl_mvm_mld_sta_rm_all_sta_links(mvm, mvm_sta); + return ret; +} + +static void iwl_mvm_mld_set_ap_sta_id(struct ieee80211_sta *sta, + struct iwl_mvm_vif_link_info *vif_link, + struct iwl_mvm_link_sta *sta_link) +{ + if (!sta->tdls) { + WARN_ON(vif_link->ap_sta_id != IWL_MVM_INVALID_STA); + vif_link->ap_sta_id = sta_link->sta_id; + } else { + WARN_ON(vif_link->ap_sta_id == IWL_MVM_INVALID_STA); + } +} + +/* FIXME: consider waiting for mac80211 to add the STA instead of allocating + * queues here + */ +static int iwl_mvm_alloc_sta_after_restart(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_link_sta *link_sta; + unsigned int link_id; + /* no active link found */ + int ret = -EINVAL; + int sta_id; + + /* First add an empty station since allocating a queue requires + * a valid station. Since we need a link_id to allocate a station, + * pick up the first valid one. + */ + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct iwl_mvm_vif_link_info *mvm_link; + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + struct iwl_mvm_link_sta *mvm_link_sta = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + if (!link_conf) + continue; + + mvm_link = mvmvif->link[link_conf->link_id]; + + if (!mvm_link || !mvm_link_sta) + continue; + + sta_id = mvm_link_sta->sta_id; + ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, + link_conf, mvm_link_sta); + if (ret) + return ret; + + rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); + rcu_assign_pointer(mvm->fw_id_to_link_sta[sta_id], link_sta); + ret = 0; + } + + iwl_mvm_realloc_queues_after_restart(mvm, sta); + + return ret; +} + +int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + unsigned long link_sta_added_to_fw = 0; + struct ieee80211_link_sta *link_sta; + int ret = 0; + unsigned int link_id; + + lockdep_assert_held(&mvm->mutex); + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + ret = iwl_mvm_mld_alloc_sta_links(mvm, vif, sta); + if (ret) + return ret; + + spin_lock_init(&mvm_sta->lock); + + ret = iwl_mvm_sta_init(mvm, vif, sta, IWL_MVM_INVALID_STA, + STATION_TYPE_PEER); + } else { + ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta); + } + + if (ret) + goto err; + + /* at this stage sta link pointers are already allocated */ + ret = iwl_mvm_mld_update_sta(mvm, vif, sta); + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + struct iwl_mvm_link_sta *mvm_link_sta = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + if (WARN_ON(!link_conf || !mvm_link_sta)) + goto err; + + ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf, + mvm_link_sta); + if (ret) + goto err; + + link_sta_added_to_fw |= BIT(link_id); + + if (vif->type == NL80211_IFTYPE_STATION) + iwl_mvm_mld_set_ap_sta_id(sta, mvm_vif->link[link_id], + mvm_link_sta); + } + + return 0; + +err: + /* remove all already allocated stations in FW */ + for_each_set_bit(link_id, &link_sta_added_to_fw, + IEEE80211_MLD_MAX_NUM_LINKS) { + struct iwl_mvm_link_sta *mvm_link_sta = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_link_sta->sta_id); + } + + /* free all sta resources in the driver */ + iwl_mvm_mld_sta_rm_all_sta_links(mvm, mvm_sta); + return ret; +} + +int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_link_sta *link_sta; + unsigned int link_id; + int ret = -EINVAL; + + lockdep_assert_held(&mvm->mutex); + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + struct iwl_mvm_link_sta *mvm_link_sta = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + if (WARN_ON(!link_conf || !mvm_link_sta)) + return -EINVAL; + + ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf, + mvm_link_sta); + + if (ret) { + IWL_ERR(mvm, "Failed to update sta link %d\n", link_id); + break; + } + } + + return ret; +} + +static void iwl_mvm_mld_disable_sta_queues(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + u32 sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1); + int i; + + lockdep_assert_held(&mvm->mutex); + + for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { + if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) + continue; + + iwl_mvm_mld_disable_txq(mvm, sta_mask, + &mvm_sta->tid_data[i].txq_id, i); + mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; + } + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_mac80211(sta->txq[i]); + + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + } +} + +int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_link_sta *link_sta; + unsigned int link_id; + int ret; + + lockdep_assert_held(&mvm->mutex); + + /* flush its queues here since we are freeing mvm_sta */ + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct iwl_mvm_link_sta *mvm_link_sta = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + if (WARN_ON(!mvm_link_sta)) + return -EINVAL; + + ret = iwl_mvm_flush_sta_tids(mvm, mvm_link_sta->sta_id, + 0xffff); + if (ret) + return ret; + } + + ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); + if (ret) + return ret; + + iwl_mvm_mld_disable_sta_queues(mvm, vif, sta); + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct iwl_mvm_link_sta *mvm_link_sta = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + bool stay_in_fw; + + stay_in_fw = iwl_mvm_sta_del(mvm, vif, sta, link_sta, &ret); + if (ret) + break; + + if (!stay_in_fw) + ret = iwl_mvm_mld_rm_sta_from_fw(mvm, + mvm_link_sta->sta_id); + + iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta, + link_id, stay_in_fw); + } + + return ret; +} + +int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id) +{ + int ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id); + + lockdep_assert_held(&mvm->mutex); + + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); + RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL); + return ret; +} + +void iwl_mvm_mld_sta_modify_disable_tx(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, + bool disable) +{ + struct iwl_mvm_sta_disable_tx_cmd cmd; + int ret; + + cmd.sta_id = cpu_to_le32(mvmsta->deflink.sta_id); + cmd.disable = cpu_to_le32(disable); + + ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, STA_DISABLE_TX_CMD), + CMD_ASYNC, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, + "Failed to send STA_DISABLE_TX_CMD command (%d)\n", + ret); +} + +void iwl_mvm_mld_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + bool disable) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + + spin_lock_bh(&mvm_sta->lock); + + if (mvm_sta->disable_tx == disable) { + spin_unlock_bh(&mvm_sta->lock); + return; + } + + iwl_mvm_mld_sta_modify_disable_tx(mvm, mvm_sta, disable); + + spin_unlock_bh(&mvm_sta->lock); +} + +void iwl_mvm_mld_modify_all_sta_disable_tx(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + bool disable) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvm_sta; + int i; + + rcu_read_lock(); + + /* Block/unblock all the stations of the given mvmvif */ + for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { + sta = rcu_dereference(mvm->fw_id_to_mac_id[i]); + if (IS_ERR_OR_NULL(sta)) + continue; + + mvm_sta = iwl_mvm_sta_from_mac80211(sta); + if (mvm_sta->mac_id_n_color != + FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) + continue; + + iwl_mvm_mld_sta_modify_disable_tx(mvm, mvm_sta, disable); + } + + rcu_read_unlock(); +} + +static int iwl_mvm_mld_update_sta_queues(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + u32 old_sta_mask, + u32 new_sta_mask) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_scd_queue_cfg_cmd cmd = { + .operation = cpu_to_le32(IWL_SCD_QUEUE_MODIFY), + .u.modify.old_sta_mask = cpu_to_le32(old_sta_mask), + .u.modify.new_sta_mask = cpu_to_le32(new_sta_mask), + }; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD), + .len[0] = sizeof(cmd), + .data[0] = &cmd + }; + int tid; + int ret; + + lockdep_assert_held(&mvm->mutex); + + for (tid = 0; tid <= IWL_MAX_TID_COUNT; tid++) { + struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[tid]; + int txq_id = tid_data->txq_id; + + if (txq_id == IWL_MVM_INVALID_QUEUE) + continue; + + if (tid == IWL_MAX_TID_COUNT) + cmd.u.modify.tid = cpu_to_le32(IWL_MGMT_TID); + else + cmd.u.modify.tid = cpu_to_le32(tid); + + ret = iwl_mvm_send_cmd(mvm, &hcmd); + if (ret) + return ret; + } + + return 0; +} + +static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm, + u32 old_sta_mask, + u32 new_sta_mask) +{ + struct iwl_rx_baid_cfg_cmd cmd = { + .action = cpu_to_le32(IWL_RX_BAID_ACTION_MODIFY), + .modify.old_sta_id_mask = cpu_to_le32(old_sta_mask), + .modify.new_sta_id_mask = cpu_to_le32(new_sta_mask), + }; + u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD); + int baid; + + BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); + + for (baid = 0; baid < ARRAY_SIZE(mvm->baid_map); baid++) { + struct iwl_mvm_baid_data *data; + int ret; + + data = rcu_dereference_protected(mvm->baid_map[baid], + lockdep_is_held(&mvm->mutex)); + if (!data) + continue; + + if (!(data->sta_mask & old_sta_mask)) + continue; + + WARN_ONCE(data->sta_mask != old_sta_mask, + "BAID data for %d corrupted - expected 0x%x found 0x%x\n", + baid, old_sta_mask, data->sta_mask); + + cmd.modify.tid = cpu_to_le32(data->tid); + + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd); + data->sta_mask = new_sta_mask; + if (ret) + return ret; + } + + return 0; +} + +static int iwl_mvm_mld_update_sta_resources(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 old_sta_mask, + u32 new_sta_mask) +{ + int ret; + + ret = iwl_mvm_mld_update_sta_queues(mvm, sta, + old_sta_mask, + new_sta_mask); + if (ret) + return ret; + + ret = iwl_mvm_mld_update_sta_keys(mvm, vif, sta, + old_sta_mask, + new_sta_mask); + if (ret) + return ret; + + return iwl_mvm_mld_update_sta_baids(mvm, old_sta_mask, new_sta_mask); +} + +int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u16 old_links, u16 new_links) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_link_sta *mvm_sta_link; + struct iwl_mvm_vif_link_info *mvm_vif_link; + unsigned long links_to_add = ~old_links & new_links; + unsigned long links_to_rem = old_links & ~new_links; + unsigned long old_links_long = old_links; + u32 current_sta_mask = 0, sta_mask_added = 0, sta_mask_to_rem = 0; + unsigned long link_sta_added_to_fw = 0, link_sta_allocated = 0; + unsigned int link_id; + int ret; + + lockdep_assert_held(&mvm->mutex); + + for_each_set_bit(link_id, &old_links_long, + IEEE80211_MLD_MAX_NUM_LINKS) { + mvm_sta_link = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + if (WARN_ON(!mvm_sta_link)) { + ret = -EINVAL; + goto err; + } + + current_sta_mask |= BIT(mvm_sta_link->sta_id); + if (links_to_rem & BIT(link_id)) + sta_mask_to_rem |= BIT(mvm_sta_link->sta_id); + } + + if (sta_mask_to_rem) { + ret = iwl_mvm_mld_update_sta_resources(mvm, vif, sta, + current_sta_mask, + current_sta_mask & + ~sta_mask_to_rem); + if (WARN_ON(ret)) + goto err; + + current_sta_mask &= ~sta_mask_to_rem; + } + + for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) { + mvm_sta_link = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + mvm_vif_link = mvm_vif->link[link_id]; + + if (WARN_ON(!mvm_sta_link || !mvm_vif_link)) { + ret = -EINVAL; + goto err; + } + + ret = iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_sta_link->sta_id); + if (WARN_ON(ret)) + goto err; + + if (vif->type == NL80211_IFTYPE_STATION) + mvm_vif_link->ap_sta_id = IWL_MVM_INVALID_STA; + + iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id, + false); + } + + for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + struct ieee80211_link_sta *link_sta = + link_sta_dereference_protected(sta, link_id); + mvm_vif_link = mvm_vif->link[link_id]; + + if (WARN_ON(!mvm_vif_link || !link_conf || !link_sta || + mvm_sta->link[link_id])) { + ret = -EINVAL; + goto err; + } + + ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id); + if (WARN_ON(ret)) + goto err; + + link_sta->agg.max_rc_amsdu_len = 1; + ieee80211_sta_recalc_aggregates(sta); + + mvm_sta_link = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + if (WARN_ON(!mvm_sta_link)) { + ret = -EINVAL; + goto err; + } + + if (vif->type == NL80211_IFTYPE_STATION) + iwl_mvm_mld_set_ap_sta_id(sta, mvm_vif_link, + mvm_sta_link); + + link_sta_allocated |= BIT(link_id); + + sta_mask_added |= BIT(mvm_sta_link->sta_id); + + ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf, + mvm_sta_link); + if (WARN_ON(ret)) + goto err; + + link_sta_added_to_fw |= BIT(link_id); + + iwl_mvm_rs_add_sta_link(mvm, mvm_sta_link); + } + + if (sta_mask_added) { + ret = iwl_mvm_mld_update_sta_resources(mvm, vif, sta, + current_sta_mask, + current_sta_mask | + sta_mask_added); + if (WARN_ON(ret)) + goto err; + } + + return 0; + +err: + /* remove all already allocated stations in FW */ + for_each_set_bit(link_id, &link_sta_added_to_fw, + IEEE80211_MLD_MAX_NUM_LINKS) { + mvm_sta_link = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_sta_link->sta_id); + } + + /* remove all already allocated station links in driver */ + for_each_set_bit(link_id, &link_sta_allocated, + IEEE80211_MLD_MAX_NUM_LINKS) { + mvm_sta_link = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id, + false); + } + + return ret; +} diff --git a/sys/contrib/dev/iwlwifi/mvm/mvm.h b/sys/contrib/dev/iwlwifi/mvm/mvm.h index 7e905deab8ab..c5687a9fba9e 100644 --- a/sys/contrib/dev/iwlwifi/mvm/mvm.h +++ b/sys/contrib/dev/iwlwifi/mvm/mvm.h @@ -1,2235 +1,2738 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __IWL_MVM_H__ #define __IWL_MVM_H__ #include #include #ifdef CONFIG_IWLWIFI_LEDS #include #endif #include #ifdef CONFIG_THERMAL #include #endif +#include + #include #include "iwl-op-mode.h" #include "iwl-trans.h" #include "fw/notif-wait.h" #include "iwl-eeprom-parse.h" #include "fw/file.h" #include "iwl-config.h" #include "sta.h" #include "fw-api.h" #include "constants.h" #include "fw/runtime.h" #include "fw/dbg.h" #include "fw/acpi.h" #include "mei/iwl-mei.h" #include "iwl-nvm-parse.h" #include #if defined(__FreeBSD__) #include #endif #define IWL_MVM_MAX_ADDRESSES 5 /* RSSI offset for WkP */ #define IWL_RSSI_OFFSET 50 #define IWL_MVM_MISSED_BEACONS_THRESHOLD 8 #define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 16 /* A TimeUnit is 1024 microsecond */ #define MSEC_TO_TU(_msec) (_msec*1000/1024) /* For GO, this value represents the number of TUs before CSA "beacon * 0" TBTT when the CSA time-event needs to be scheduled to start. It * must be big enough to ensure that we switch in time. */ #define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40 /* For client, this value represents the number of TUs before CSA * "beacon 1" TBTT, instead. This is because we don't know when the * GO/AP will be in the new channel, so we switch early enough. */ #define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT 10 /* * This value (in TUs) is used to fine tune the CSA NoA end time which should * be just before "beacon 0" TBTT. */ #define IWL_MVM_CHANNEL_SWITCH_MARGIN 4 /* * Number of beacons to transmit on a new channel until we unblock tx to * the stations, even if we didn't identify them on a new channel */ #define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3 /* offchannel queue towards mac80211 */ #define IWL_MVM_OFFCHANNEL_QUEUE 0 +/* invalid value for FW link id */ +#define IWL_MVM_FW_LINK_ID_INVALID 0xff + extern const struct ieee80211_ops iwl_mvm_hw_ops; +extern const struct ieee80211_ops iwl_mvm_mld_hw_ops; /** * struct iwl_mvm_mod_params - module parameters for iwlmvm * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted. * We will register to mac80211 to have testmode working. The NIC must not * be up'ed after the INIT fw asserted. This is useful to be able to use * proprietary tools over testmode to debug the INIT fw. * @power_scheme: one of enum iwl_power_scheme */ struct iwl_mvm_mod_params { bool init_dbg; int power_scheme; }; extern struct iwl_mvm_mod_params iwlmvm_mod_params; struct iwl_mvm_phy_ctxt { u16 id; u16 color; u32 ref; enum nl80211_chan_width width; struct ieee80211_channel *channel; /* track for RLC config command */ u32 center_freq1; + bool rlc_disabled; }; struct iwl_mvm_time_event_data { struct ieee80211_vif *vif; struct list_head list; unsigned long end_jiffies; u32 duration; bool running; u32 uid; /* * The access to the 'id' field must be done when the * mvm->time_event_lock is held, as it value is used to indicate * if the te is in the time event list or not (when id == TE_MAX) */ u32 id; }; /* Power management */ /** * enum iwl_power_scheme * @IWL_POWER_LEVEL_CAM - Continuously Active Mode * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default) * @IWL_POWER_LEVEL_LP - Low Power */ enum iwl_power_scheme { IWL_POWER_SCHEME_CAM = 1, IWL_POWER_SCHEME_BPS, IWL_POWER_SCHEME_LP }; #define IWL_CONN_MAX_LISTEN_INTERVAL 10 #define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL #ifdef CONFIG_IWLWIFI_DEBUGFS enum iwl_dbgfs_pm_mask { MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0), MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1), MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2), MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3), MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4), MVM_DEBUGFS_PM_LPRX_ENA = BIT(6), MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7), MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8), MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9), MVM_DEBUGFS_PM_USE_PS_POLL = BIT(10), }; struct iwl_dbgfs_pm { u16 keep_alive_seconds; u32 rx_data_timeout; u32 tx_data_timeout; bool skip_over_dtim; u8 skip_dtim_periods; bool lprx_ena; u32 lprx_rssi_threshold; bool snooze_ena; bool uapsd_misbehaving; bool use_ps_poll; int mask; }; /* beacon filtering */ enum iwl_dbgfs_bf_mask { MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0), MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1), MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2), MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3), MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4), MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5), MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6), MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7), MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8), MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9), MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10), }; struct iwl_dbgfs_bf { u32 bf_energy_delta; u32 bf_roaming_energy_delta; u32 bf_roaming_state; u32 bf_temp_threshold; u32 bf_temp_fast_filter; u32 bf_temp_slow_filter; u32 bf_enable_beacon_filter; u32 bf_debug_flag; u32 bf_escape_timer; u32 ba_escape_timer; u32 ba_enable_beacon_abort; int mask; }; #endif enum iwl_mvm_smps_type_request { IWL_MVM_SMPS_REQ_BT_COEX, IWL_MVM_SMPS_REQ_TT, IWL_MVM_SMPS_REQ_PROT, IWL_MVM_SMPS_REQ_FW, NUM_IWL_MVM_SMPS_REQ, }; enum iwl_bt_force_ant_mode { BT_FORCE_ANT_DIS = 0, BT_FORCE_ANT_AUTO, BT_FORCE_ANT_BT, BT_FORCE_ANT_WIFI, BT_FORCE_ANT_MAX, }; /** * struct iwl_mvm_low_latency_force - low latency force mode set by debugfs * @LOW_LATENCY_FORCE_UNSET: unset force mode * @LOW_LATENCY_FORCE_ON: for low latency on * @LOW_LATENCY_FORCE_OFF: for low latency off * @NUM_LOW_LATENCY_FORCE: max num of modes */ enum iwl_mvm_low_latency_force { LOW_LATENCY_FORCE_UNSET, LOW_LATENCY_FORCE_ON, LOW_LATENCY_FORCE_OFF, NUM_LOW_LATENCY_FORCE }; /** * struct iwl_mvm_low_latency_cause - low latency set causes * @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected * @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs * @LOW_LATENCY_VCMD: low latency mode set from vendor command * @LOW_LATENCY_VIF_TYPE: low latency mode set because of vif type (ap) * @LOW_LATENCY_DEBUGFS_FORCE_ENABLE: indicate that force mode is enabled * the actual set/unset is done with LOW_LATENCY_DEBUGFS_FORCE * @LOW_LATENCY_DEBUGFS_FORCE: low latency force mode from debugfs * set this with LOW_LATENCY_DEBUGFS_FORCE_ENABLE flag * in low_latency. */ enum iwl_mvm_low_latency_cause { LOW_LATENCY_TRAFFIC = BIT(0), LOW_LATENCY_DEBUGFS = BIT(1), LOW_LATENCY_VCMD = BIT(2), LOW_LATENCY_VIF_TYPE = BIT(3), LOW_LATENCY_DEBUGFS_FORCE_ENABLE = BIT(4), LOW_LATENCY_DEBUGFS_FORCE = BIT(5), }; /** * struct iwl_mvm_vif_bf_data - beacon filtering related data * @bf_enabled: indicates if beacon filtering is enabled * @ba_enabled: indicated if beacon abort is enabled * @ave_beacon_signal: average beacon signal * @last_cqm_event: rssi of the last cqm event * @bt_coex_min_thold: minimum threshold for BT coex * @bt_coex_max_thold: maximum threshold for BT coex * @last_bt_coex_event: rssi of the last BT coex event */ struct iwl_mvm_vif_bf_data { bool bf_enabled; bool ba_enabled; int ave_beacon_signal; int last_cqm_event; int bt_coex_min_thold; int bt_coex_max_thold; int last_bt_coex_event; }; /** * struct iwl_probe_resp_data - data for NoA/CSA updates * @rcu_head: used for freeing the data on update * @notif: notification data * @noa_len: length of NoA attribute, calculated from the notification */ struct iwl_probe_resp_data { struct rcu_head rcu_head; struct iwl_probe_resp_data_notif notif; int noa_len; }; +/** + * struct iwl_mvm_vif_link_info - per link data in Virtual Interface + * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA + * @fw_link_id: the id of the link according to the FW API + * @bssid: BSSID for this (client) interface + * @bcast_sta: station used for broadcast packets. Used by the following + * vifs: P2P_DEVICE, GO and AP. + * @beacon_stats: beacon statistics, containing the # of received beacons, + * # of received beacons accumulated over FW restart, and the current + * average signal of beacons retrieved from the firmware + * @smps_requests: the SMPS requests of different parts of the driver, + * combined on update to yield the overall request to mac80211. + * @probe_resp_data: data from FW notification to store NOA and CSA related + * data to be inserted into probe response. + * @he_ru_2mhz_block: 26-tone RU OFDMA transmissions should be blocked + * @queue_params: QoS params for this MAC + * @mgmt_queue: queue number for unbufferable management frames + * @igtk: the current IGTK programmed into the firmware + * @listen_lmac: indicates this link is allocated to the listen LMAC + */ +struct iwl_mvm_vif_link_info { + u8 bssid[ETH_ALEN]; + u8 ap_sta_id; + u8 fw_link_id; + + struct iwl_mvm_int_sta bcast_sta; + struct iwl_mvm_int_sta mcast_sta; + + struct { + u32 num_beacons, accu_num_beacons; + u8 avg_signal; + } beacon_stats; + + enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; + struct iwl_probe_resp_data __rcu *probe_resp_data; + + struct ieee80211_key_conf *igtk; + + bool he_ru_2mhz_block; + bool active; + bool listen_lmac; + + u16 cab_queue; + /* Assigned while mac80211 has the link in a channel context, + * or, for P2P Device, while it exists. + */ + struct iwl_mvm_phy_ctxt *phy_ctxt; + /* QoS data from mac80211, need to store this here + * as mac80211 has a separate callback but we need + * to have the data for the MAC context + */ + struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; + + u16 mgmt_queue; +}; + /** * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context * @id: between 0 and 3 * @color: to solve races upon MAC addition and removal - * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA - * @bssid: BSSID for this (client) interface * @associated: indicates that we're currently associated, used only for * managing the firmware state in iwl_mvm_bss_info_changed_station() * @ap_assoc_sta_count: count of stations associated to us - valid only * if VIF type is AP * @uploaded: indicates the MAC context has been added to the device * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface * should get quota etc. - * @pm_enabled - Indicate if MAC power management is allowed + * @pm_enabled - indicate if MAC power management is allowed * @monitor_active: indicates that monitor context is configured, and that the * interface should get quota etc. * @low_latency: bit flags for low latency * see enum &iwl_mvm_low_latency_cause for causes. * @low_latency_actual: boolean, indicates low latency is set, * as a result from low_latency bit flags and takes force into account. * @authorized: indicates the AP station was set to authorized * @ps_disabled: indicates that this interface requires PS to be disabled - * @queue_params: QoS params for this MAC - * @bcast_sta: station used for broadcast packets. Used by the following - * vifs: P2P_DEVICE, GO and AP. - * @beacon_skb: the skb used to hold the AP/GO beacon template - * @smps_requests: the SMPS requests of different parts of the driver, - * combined on update to yield the overall request to mac80211. - * @beacon_stats: beacon statistics, containing the # of received beacons, - * # of received beacons accumulated over FW restart, and the current - * average signal of beacons retrieved from the firmware + * @csa_countdown: indicates that CSA countdown may be started * @csa_failed: CSA failed to schedule time event, report an error later + * @csa_bcn_pending: indicates that we are waiting for a beacon on a new channel * @features: hw features active for this vif - * @probe_resp_data: data from FW notification to store NOA and CSA related - * data to be inserted into probe response. */ struct iwl_mvm_vif { struct iwl_mvm *mvm; u16 id; u16 color; - u8 ap_sta_id; - u8 bssid[ETH_ALEN]; bool associated; u8 ap_assoc_sta_count; - - u16 cab_queue; - bool uploaded; bool ap_ibss_active; bool pm_enabled; bool monitor_active; + bool esr_active; + u8 low_latency: 6; u8 low_latency_actual: 1; + u8 authorized:1; bool ps_disabled; - struct iwl_mvm_vif_bf_data bf_data; - - struct { - u32 num_beacons, accu_num_beacons; - u8 avg_signal; - } beacon_stats; u32 ap_beacon_time; - - enum iwl_tsf_id tsf_id; - - /* - * QoS data from mac80211, need to store this here - * as mac80211 has a separate callback but we need - * to have the data for the MAC context - */ - struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; - struct iwl_mvm_time_event_data time_event_data; - struct iwl_mvm_time_event_data hs_time_event_data; - - struct iwl_mvm_int_sta bcast_sta; - struct iwl_mvm_int_sta mcast_sta; - - /* - * Assigned while mac80211 has the interface in a channel context, - * or, for P2P Device, while it exists. - */ - struct iwl_mvm_phy_ctxt *phy_ctxt; + struct iwl_mvm_vif_bf_data bf_data; #ifdef CONFIG_PM /* WoWLAN GTK rekey data */ struct { u8 kck[NL80211_KCK_EXT_LEN]; u8 kek[NL80211_KEK_EXT_LEN]; size_t kek_len; size_t kck_len; u32 akm; __le64 replay_ctr; bool valid; } rekey_data; int tx_key_idx; bool seqno_valid; u16 seqno; #endif #if IS_ENABLED(CONFIG_IPV6) /* IPv6 addresses for WoWLAN */ struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX]; unsigned long tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)]; int num_target_ipv6_addrs; #endif #ifdef CONFIG_IWLWIFI_DEBUGFS struct dentry *dbgfs_dir; struct dentry *dbgfs_slink; struct iwl_dbgfs_pm dbgfs_pm; struct iwl_dbgfs_bf dbgfs_bf; struct iwl_mac_power_cmd mac_pwr_cmd; int dbgfs_quota_min; #endif - enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; - /* FW identified misbehaving AP */ - u8 uapsd_misbehaving_bssid[ETH_ALEN]; - + u8 uapsd_misbehaving_ap_addr[ETH_ALEN] __aligned(2); struct delayed_work uapsd_nonagg_detected_wk; - /* Indicates that CSA countdown may be started */ bool csa_countdown; bool csa_failed; + bool csa_bcn_pending; u16 csa_target_freq; u16 csa_count; u16 csa_misbehave; struct delayed_work csa_work; - /* Indicates that we are waiting for a beacon on a new channel */ - bool csa_bcn_pending; + enum iwl_tsf_id tsf_id; + + struct iwl_mvm_time_event_data time_event_data; + struct iwl_mvm_time_event_data hs_time_event_data; /* TCP Checksum Offload */ netdev_features_t features; - struct iwl_probe_resp_data __rcu *probe_resp_data; + struct ieee80211_sta *ap_sta; /* we can only have 2 GTK + 2 IGTK active at a time */ struct ieee80211_key_conf *ap_early_keys[4]; - /* 26-tone RU OFDMA transmissions should be blocked */ - bool he_ru_2mhz_block; - struct { struct ieee80211_key_conf __rcu *keys[2]; } bcn_prot; + + struct iwl_mvm_vif_link_info deflink; + struct iwl_mvm_vif_link_info *link[IEEE80211_MLD_MAX_NUM_LINKS]; }; +#define for_each_mvm_vif_valid_link(mvm_vif, link_id) \ + for (link_id = 0; \ + link_id < ARRAY_SIZE((mvm_vif)->link); \ + link_id++) \ + if ((mvm_vif)->link[link_id]) + static inline struct iwl_mvm_vif * iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) { return (void *)vif->drv_priv; } extern const u8 tid_to_mac80211_ac[]; #define IWL_MVM_SCAN_STOPPING_SHIFT 8 enum iwl_scan_status { IWL_MVM_SCAN_REGULAR = BIT(0), IWL_MVM_SCAN_SCHED = BIT(1), IWL_MVM_SCAN_NETDETECT = BIT(2), IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8), IWL_MVM_SCAN_STOPPING_SCHED = BIT(9), IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10), IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR | IWL_MVM_SCAN_STOPPING_REGULAR, IWL_MVM_SCAN_SCHED_MASK = IWL_MVM_SCAN_SCHED | IWL_MVM_SCAN_STOPPING_SCHED, IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT | IWL_MVM_SCAN_STOPPING_NETDETECT, IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT, IWL_MVM_SCAN_MASK = 0xff, }; enum iwl_mvm_scan_type { IWL_SCAN_TYPE_NOT_SET, IWL_SCAN_TYPE_UNASSOC, IWL_SCAN_TYPE_WILD, IWL_SCAN_TYPE_MILD, IWL_SCAN_TYPE_FRAGMENTED, IWL_SCAN_TYPE_FAST_BALANCE, }; enum iwl_mvm_sched_scan_pass_all_states { SCHED_SCAN_PASS_ALL_DISABLED, SCHED_SCAN_PASS_ALL_ENABLED, SCHED_SCAN_PASS_ALL_FOUND, }; /** * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure * @ct_kill_exit: worker to exit thermal kill * @dynamic_smps: Is thermal throttling enabled dynamic_smps? * @tx_backoff: The current thremal throttling tx backoff in uSec. * @min_backoff: The minimal tx backoff due to power restrictions * @params: Parameters to configure the thermal throttling algorithm. * @throttle: Is thermal throttling is active? */ struct iwl_mvm_tt_mgmt { struct delayed_work ct_kill_exit; bool dynamic_smps; u32 tx_backoff; u32 min_backoff; struct iwl_tt_params params; bool throttle; }; #ifdef CONFIG_THERMAL /** *struct iwl_mvm_thermal_device - thermal zone related data * @temp_trips: temperature thresholds for report * @fw_trips_index: keep indexes to original array - temp_trips * @tzone: thermal zone device data */ struct iwl_mvm_thermal_device { - s16 temp_trips[IWL_MAX_DTS_TRIPS]; + struct thermal_trip trips[IWL_MAX_DTS_TRIPS]; u8 fw_trips_index[IWL_MAX_DTS_TRIPS]; struct thermal_zone_device *tzone; }; /* * struct iwl_mvm_cooling_device * @cur_state: current state * @cdev: struct thermal cooling device */ struct iwl_mvm_cooling_device { u32 cur_state; struct thermal_cooling_device *cdev; }; #endif #define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8 struct iwl_mvm_frame_stats { u32 legacy_frames; u32 ht_frames; u32 vht_frames; u32 bw_20_frames; u32 bw_40_frames; u32 bw_80_frames; u32 bw_160_frames; u32 sgi_frames; u32 ngi_frames; u32 siso_frames; u32 mimo2_frames; u32 agg_frames; u32 ampdu_count; u32 success_frames; u32 fail_frames; u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES]; int last_frame_idx; }; #define IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE 0xff #define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100 #define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200 enum iwl_mvm_tdls_cs_state { IWL_MVM_TDLS_SW_IDLE = 0, IWL_MVM_TDLS_SW_REQ_SENT, IWL_MVM_TDLS_SW_RESP_RCVD, IWL_MVM_TDLS_SW_REQ_RCVD, IWL_MVM_TDLS_SW_ACTIVE, }; enum iwl_mvm_traffic_load { IWL_MVM_TRAFFIC_LOW, IWL_MVM_TRAFFIC_MEDIUM, IWL_MVM_TRAFFIC_HIGH, }; DECLARE_EWMA(rate, 16, 16) struct iwl_mvm_tcm_mac { struct { u32 pkts[IEEE80211_NUM_ACS]; u32 airtime; } tx; struct { u32 pkts[IEEE80211_NUM_ACS]; u32 airtime; u32 last_ampdu_ref; } rx; struct { /* track AP's transfer in client mode */ u64 rx_bytes; struct ewma_rate rate; bool detected; } uapsd_nonagg_detect; bool opened_rx_ba_sessions; }; struct iwl_mvm_tcm { struct delayed_work work; spinlock_t lock; /* used when time elapsed */ unsigned long ts; /* timestamp when period ends */ unsigned long ll_ts; unsigned long uapsd_nonagg_ts; bool paused; struct iwl_mvm_tcm_mac data[NUM_MAC_INDEX_DRIVER]; struct { u32 elapsed; /* milliseconds for this TCM period */ u32 airtime[NUM_MAC_INDEX_DRIVER]; enum iwl_mvm_traffic_load load[NUM_MAC_INDEX_DRIVER]; enum iwl_mvm_traffic_load band_load[NUM_NL80211_BANDS]; enum iwl_mvm_traffic_load global_load; bool low_latency[NUM_MAC_INDEX_DRIVER]; bool change[NUM_MAC_INDEX_DRIVER]; } result; }; /** * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer * @head_sn: reorder window head sn * @num_stored: number of mpdus stored in the buffer * @buf_size: the reorder buffer size as set by the last addba request * @queue: queue of this reorder buffer * @last_amsdu: track last ASMDU SN for duplication detection * @last_sub_index: track ASMDU sub frame index for duplication detection * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU * it is the time of last received sub-frame * @removed: prevent timer re-arming * @valid: reordering is valid for this queue * @lock: protect reorder buffer internal state * @mvm: mvm pointer, needed for frame timer context * @consec_oldsn_drops: consecutive drops due to old SN * @consec_oldsn_ampdu_gp2: A-MPDU GP2 timestamp to track * when to apply old SN consecutive drop workaround * @consec_oldsn_prev_drop: track whether or not an MPDU * that was single/part of the previous A-MPDU was * dropped due to old SN */ struct iwl_mvm_reorder_buffer { u16 head_sn; u16 num_stored; u16 buf_size; int queue; u16 last_amsdu; u8 last_sub_index; struct timer_list reorder_timer; bool removed; bool valid; spinlock_t lock; struct iwl_mvm *mvm; unsigned int consec_oldsn_drops; u32 consec_oldsn_ampdu_gp2; unsigned int consec_oldsn_prev_drop:1; } ____cacheline_aligned_in_smp; /** * struct _iwl_mvm_reorder_buf_entry - reorder buffer entry per-queue/per-seqno * @frames: list of skbs stored * @reorder_time: time the packet was stored in the reorder buffer */ struct _iwl_mvm_reorder_buf_entry { struct sk_buff_head frames; unsigned long reorder_time; }; /* make this indirection to get the aligned thing */ struct iwl_mvm_reorder_buf_entry { struct _iwl_mvm_reorder_buf_entry e; } #if defined(__FreeBSD__) __aligned(roundup2(sizeof(struct _iwl_mvm_reorder_buf_entry), 32)) #elif !defined(__CHECKER__) /* sparse doesn't like this construct: "bad integer constant expression" */ /* clang on FreeBSD: error: 'aligned' attribute requires integer constant */ __aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry))) #endif ; /** * struct iwl_mvm_baid_data - BA session data - * @sta_id: station id + * @sta_mask: current station mask for the BAID * @tid: tid of the session * @baid baid of the session * @timeout: the timeout set in the addba request * @entries_per_queue: # of buffers per queue, this actually gets * aligned up to avoid cache line sharing between queues * @last_rx: last rx jiffies, updated only if timeout passed from last update * @session_timer: timer to check if BA session expired, runs at 2 * timeout * @mvm: mvm pointer, needed for timer context * @reorder_buf: reorder buffer, allocated per queue * @reorder_buf_data: data */ struct iwl_mvm_baid_data { struct rcu_head rcu_head; - u8 sta_id; + u32 sta_mask; u8 tid; u8 baid; u16 timeout; u16 entries_per_queue; unsigned long last_rx; struct timer_list session_timer; struct iwl_mvm_baid_data __rcu **rcu_ptr; struct iwl_mvm *mvm; struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES]; struct iwl_mvm_reorder_buf_entry entries[]; }; static inline struct iwl_mvm_baid_data * iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf) { return (void *)((u8 *)buf - offsetof(struct iwl_mvm_baid_data, reorder_buf) - sizeof(*buf) * buf->queue); } /* * enum iwl_mvm_queue_status - queue status * @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved * Basically, this means that this queue can be used for any purpose * @IWL_MVM_QUEUE_RESERVED: queue is reserved but not yet in use * This is the state of a queue that has been dedicated for some RATID * (agg'd or not), but that hasn't yet gone through the actual enablement * of iwl_mvm_enable_txq(), and therefore no traffic can go through it yet. * Note that in this state there is no requirement to already know what TID * should be used with this queue, it is just marked as a queue that will * be used, and shouldn't be allocated to anyone else. * @IWL_MVM_QUEUE_READY: queue is ready to be used * This is the state of a queue that has been fully configured (including * SCD pointers, etc), has a specific RA/TID assigned to it, and can be * used to send traffic. * @IWL_MVM_QUEUE_SHARED: queue is shared, or in a process of becoming shared * This is a state in which a single queue serves more than one TID, all of * which are not aggregated. Note that the queue is only associated to one * RA. */ enum iwl_mvm_queue_status { IWL_MVM_QUEUE_FREE, IWL_MVM_QUEUE_RESERVED, IWL_MVM_QUEUE_READY, IWL_MVM_QUEUE_SHARED, }; #define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) #define IWL_MVM_INVALID_QUEUE 0xFFFF #define IWL_MVM_NUM_CIPHERS 10 struct iwl_mvm_txq { struct list_head list; u16 txq_id; atomic_t tx_request; - bool stopped; +#define IWL_MVM_TXQ_STATE_STOP_FULL 0 +#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 1 +#define IWL_MVM_TXQ_STATE_READY 2 + unsigned long state; }; static inline struct iwl_mvm_txq * iwl_mvm_txq_from_mac80211(struct ieee80211_txq *txq) { return (void *)txq->drv_priv; } static inline struct iwl_mvm_txq * iwl_mvm_txq_from_tid(struct ieee80211_sta *sta, u8 tid) { if (tid == IWL_MAX_TID_COUNT) tid = IEEE80211_NUM_TIDS; return (void *)sta->txq[tid]->drv_priv; } /** * struct iwl_mvm_tvqm_txq_info - maps TVQM hw queue to tid * * @sta_id: sta id * @txq_tid: txq tid */ struct iwl_mvm_tvqm_txq_info { u8 sta_id; u8 txq_tid; }; struct iwl_mvm_dqa_txq_info { u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ bool reserved; /* Is this the TXQ reserved for a STA */ u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */ u8 txq_tid; /* The TID "owner" of this queue*/ u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ /* Timestamp for inactivation per TID of this queue */ unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1]; enum iwl_mvm_queue_status status; }; +struct ptp_data { + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + + struct delayed_work dwork; + + /* The last GP2 reading from the hw */ + u32 last_gp2; + + /* number of wraparounds since scale_update_adj_time_ns */ + u32 wrap_counter; + + /* GP2 time when the scale was last updated */ + u32 scale_update_gp2; + + /* Adjusted time when the scale was last updated in nanoseconds */ + u64 scale_update_adj_time_ns; + + /* clock frequency offset, scaled to 65536000000 */ + u64 scaled_freq; + + /* Delta between hardware clock and ptp clock in nanoseconds */ + s64 delta; +}; + +struct iwl_time_sync_data { + struct sk_buff_head frame_list; + u8 peer_addr[ETH_ALEN]; + bool active; +}; + +struct iwl_mei_scan_filter { + bool is_mei_limited_scan; + struct sk_buff_head scan_res; + struct work_struct scan_work; +}; + struct iwl_mvm { /* for logger access */ struct device *dev; struct iwl_trans *trans; const struct iwl_fw *fw; const struct iwl_cfg *cfg; struct iwl_phy_db *phy_db; struct ieee80211_hw *hw; /* for protecting access to iwl_mvm */ struct mutex mutex; struct list_head async_handlers_list; spinlock_t async_handlers_lock; struct work_struct async_handlers_wk; struct work_struct roc_done_wk; unsigned long init_status; unsigned long status; u32 queue_sync_cookie; unsigned long queue_sync_state; /* * for beacon filtering - * currently only one interface can be supported */ struct iwl_mvm_vif *bf_allowed_vif; bool hw_registered; bool rfkill_safe_init_done; u8 cca_40mhz_workaround; u32 ampdu_ref; bool ampdu_toggle; struct iwl_notif_wait_data notif_wait; union { struct mvm_statistics_rx_v3 rx_stats_v3; struct mvm_statistics_rx rx_stats; }; struct { u64 rx_time; u64 tx_time; u64 on_time_rf; u64 on_time_scan; } radio_stats, accu_radio_stats; struct list_head add_stream_txqs; union { struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES]; struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES]; }; struct work_struct add_stream_wk; /* To add streams to queues */ + spinlock_t add_stream_lock; const char *nvm_file_name; struct iwl_nvm_data *nvm_data; struct iwl_mei_nvm *mei_nvm_data; struct iwl_mvm_csme_conn_info __rcu *csme_conn_info; bool mei_rfkill_blocked; bool mei_registered; struct work_struct sap_connected_wk; /* * NVM built based on the SAP data but that we can't free even after * we get ownership because it contains the cfg80211's channel. */ struct iwl_nvm_data *temp_nvm_data; /* NVM sections */ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; struct iwl_fw_runtime fwrt; /* EEPROM MAC addresses */ struct mac_address addresses[IWL_MVM_MAX_ADDRESSES]; /* data related to data path */ struct iwl_rx_phy_info last_phy_info; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT_MAX]; + struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_MVM_STATION_COUNT_MAX]; + unsigned long fw_link_ids_map; u8 rx_ba_sessions; /* configured by mac80211 */ u32 rts_threshold; /* Scan status, cmd (pre-allocated) and auxiliary station */ unsigned int scan_status; + size_t scan_cmd_size; void *scan_cmd; struct iwl_mcast_filter_cmd *mcast_filter_cmd; /* For CDB this is low band scan type, for non-CDB - type. */ enum iwl_mvm_scan_type scan_type; enum iwl_mvm_scan_type hb_scan_type; enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all; struct delayed_work scan_timeout_dwork; /* max number of simultaneous scans the FW supports */ unsigned int max_scans; /* UMAC scan tracking */ u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS]; /* start time of last scan in TSF of the mac that requested the scan */ u64 scan_start; /* the vif that requested the current scan */ struct iwl_mvm_vif *scan_vif; /* rx chain antennas set through debugfs for the scan command */ u8 scan_rx_ant; /* Internal station */ struct iwl_mvm_int_sta aux_sta; struct iwl_mvm_int_sta snif_sta; bool last_ebs_successful; u8 scan_last_antenna_idx; /* to toggle TX between antennas */ u8 mgmt_last_antenna_idx; /* last smart fifo state that was successfully sent to firmware */ enum iwl_sf_state sf_state; /* * Leave this pointer outside the ifdef below so that it can be * assigned without ifdef in the source code. */ struct dentry *debugfs_dir; #ifdef CONFIG_IWLWIFI_DEBUGFS u32 dbgfs_sram_offset, dbgfs_sram_len; u32 dbgfs_prph_reg_addr; bool disable_power_off; bool disable_power_off_d3; bool beacon_inject_active; bool scan_iter_notif_enabled; struct debugfs_blob_wrapper nvm_hw_blob; struct debugfs_blob_wrapper nvm_sw_blob; struct debugfs_blob_wrapper nvm_calib_blob; struct debugfs_blob_wrapper nvm_prod_blob; struct debugfs_blob_wrapper nvm_phy_sku_blob; struct debugfs_blob_wrapper nvm_reg_blob; struct iwl_mvm_frame_stats drv_rx_stats; spinlock_t drv_stats_lock; u16 dbgfs_rx_phyinfo; #endif struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX]; struct list_head time_event_list; spinlock_t time_event_lock; /* * A bitmap indicating the index of the key in use. The firmware * can hold 16 keys at most. Reflect this fact. */ unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)]; u8 fw_key_deleted[STA_KEY_MAX_NUM]; - u8 vif_count; struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER]; + struct ieee80211_bss_conf __rcu *link_id_to_link_conf[IWL_MVM_FW_MAX_LINK_ID + 1]; + /* -1 for always, 0 for never, >0 for that many times */ s8 fw_restart; u8 *error_recovery_buf; #ifdef CONFIG_IWLWIFI_LEDS struct led_classdev led; #endif struct ieee80211_vif *p2p_device_vif; #ifdef CONFIG_PM struct wiphy_wowlan_support wowlan; int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; /* sched scan settings for net detect */ struct ieee80211_scan_ies nd_ies; struct cfg80211_match_set *nd_match_sets; int n_nd_match_sets; struct ieee80211_channel **nd_channels; int n_nd_channels; bool net_detect; u8 offload_tid; #ifdef CONFIG_IWLWIFI_DEBUGFS bool d3_wake_sysassert; bool d3_test_active; u32 d3_test_pme_ptr; struct ieee80211_vif *keep_vif; u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */ #endif #endif wait_queue_head_t rx_sync_waitq; /* BT-Coex */ struct iwl_bt_coex_profile_notif last_bt_notif; struct iwl_bt_coex_ci_cmd last_bt_ci_cmd; u8 bt_tx_prio; enum iwl_bt_force_ant_mode bt_force_ant_mode; /* Aux ROC */ struct list_head aux_roc_te_list; /* Thermal Throttling and CTkill */ struct iwl_mvm_tt_mgmt thermal_throttle; #ifdef CONFIG_THERMAL struct iwl_mvm_thermal_device tz_device; struct iwl_mvm_cooling_device cooling_dev; #endif s32 temperature; /* Celsius */ /* * Debug option to set the NIC temperature. This option makes the * driver think this is the actual NIC temperature, and ignore the * real temperature that is received from the fw */ bool temperature_test; /* Debug test temperature is enabled */ bool fw_static_smps_request; unsigned long bt_coex_last_tcm_ts; struct iwl_mvm_tcm tcm; u8 uapsd_noagg_bssid_write_idx; struct mac_address uapsd_noagg_bssids[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM] __aligned(2); struct iwl_time_quota_cmd last_quota_cmd; #ifdef CONFIG_NL80211_TESTMODE u32 noa_duration; struct ieee80211_vif *noa_vif; #endif /* Tx queues */ u16 aux_queue; u16 snif_queue; u16 probe_queue; u16 p2p_dev_queue; /* Indicate if device power save is allowed */ u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ /* Indicate if 32Khz external clock is valid */ u32 ext_clock_valid; /* This vif used by CSME to send / receive traffic */ struct ieee80211_vif *csme_vif; struct ieee80211_vif __rcu *csa_vif; struct ieee80211_vif __rcu *csa_tx_blocked_vif; u8 csa_tx_block_bcn_timeout; /* system time of last beacon (for AP/GO interface) */ u32 ap_last_beacon_gp2; /* indicates that we transmitted the last beacon */ bool ibss_manager; bool lar_regdom_set; enum iwl_mcc_source mcc_src; /* TDLS channel switch data */ struct { struct delayed_work dwork; enum iwl_mvm_tdls_cs_state state; /* * Current cs sta - might be different from periodic cs peer * station. Value is meaningless when the cs-state is idle. */ u8 cur_sta_id; /* TDLS periodic channel-switch peer */ struct { u8 sta_id; u8 op_class; bool initiator; /* are we the link initiator */ struct cfg80211_chan_def chandef; struct sk_buff *skb; /* ch sw template */ u32 ch_sw_tm_ie; /* timestamp of last ch-sw request sent (GP2 time) */ u32 sent_timestamp; } peer; } tdls_cs; u32 ciphers[IWL_MVM_NUM_CIPHERS]; struct cfg80211_ftm_responder_stats ftm_resp_stats; struct { struct cfg80211_pmsr_request *req; struct wireless_dev *req_wdev; struct list_head loc_list; int responses[IWL_MVM_TOF_MAX_APS]; struct { struct list_head resp; } smooth; struct list_head pasn_list; } ftm_initiator; struct list_head resp_pasn_list; + struct ptp_data ptp_data; + struct { - u8 d0i3_resp; u8 range_resp; } cmd_ver; struct ieee80211_vif *nan_vif; struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID]; /* * Drop beacons from other APs in AP mode when there are no connected * clients. */ bool drop_bcn_ap_mode; struct delayed_work cs_tx_unblock_dwork; /* does a monitor vif exist (only one can exist hence bool) */ bool monitor_on; + /* + * primary channel position relative to he whole bandwidth, + * in steps of 80 MHz + */ + u8 monitor_p80; /* sniffer data to include in radiotap */ __le16 cur_aid; u8 cur_bssid[ETH_ALEN]; +#ifdef CONFIG_ACPI + struct iwl_phy_specific_cfg phy_filters; +#endif + unsigned long last_6ghz_passive_scan_jiffies; unsigned long last_reset_or_resume_time_jiffies; bool sta_remove_requires_queue_remove; + bool mld_api_is_used; + + bool pldr_sync; + + struct iwl_time_sync_data time_sync; + + struct iwl_mei_scan_filter mei_scan_filter; }; /* Extract MVM priv from op_mode and _hw */ #define IWL_OP_MODE_GET_MVM(_iwl_op_mode) \ ((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific) #define IWL_MAC80211_GET_MVM(_hw) \ IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv)) /** * enum iwl_mvm_status - MVM status bits * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA * @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it) * @IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE: suppress one error log * if this is set, when intentionally triggered * @IWL_MVM_STATUS_STARTING: starting mac, * used to disable restart flow while in STARTING state */ enum iwl_mvm_status { IWL_MVM_STATUS_HW_RFKILL, IWL_MVM_STATUS_HW_CTKILL, IWL_MVM_STATUS_ROC_RUNNING, IWL_MVM_STATUS_HW_RESTART_REQUESTED, IWL_MVM_STATUS_IN_HW_RESTART, IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_FIRMWARE_RUNNING, IWL_MVM_STATUS_NEED_FLUSH_P2P, IWL_MVM_STATUS_IN_D3, IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, IWL_MVM_STATUS_STARTING, }; struct iwl_mvm_csme_conn_info { struct rcu_head rcu_head; struct iwl_mei_conn_info conn_info; }; /* Keep track of completed init configuration */ enum iwl_mvm_init_status { IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0), IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE = BIT(1), }; static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) { return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) || test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); } static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm) { return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); } static inline bool iwl_mvm_firmware_running(struct iwl_mvm *mvm) { return test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); } /* Must be called with rcu_read_lock() held and it can only be * released when mvmsta is not needed anymore. */ static inline struct iwl_mvm_sta * iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id) { struct ieee80211_sta *sta; if (sta_id >= mvm->fw->ucode_capa.num_stations) return NULL; sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* This can happen if the station has been removed right now */ if (IS_ERR_OR_NULL(sta)) return NULL; return iwl_mvm_sta_from_mac80211(sta); } static inline struct iwl_mvm_sta * iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id) { struct ieee80211_sta *sta; if (sta_id >= mvm->fw->ucode_capa.num_stations) return NULL; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); /* This can happen if the station has been removed right now */ if (IS_ERR_OR_NULL(sta)) return NULL; return iwl_mvm_sta_from_mac80211(sta); } static inline struct ieee80211_vif * iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu) { if (WARN_ON(vif_id >= ARRAY_SIZE(mvm->vif_id_to_mac))) return NULL; if (rcu) return rcu_dereference(mvm->vif_id_to_mac[vif_id]); return rcu_dereference_protected(mvm->vif_id_to_mac[vif_id], lockdep_is_held(&mvm->mutex)); } +static inline struct ieee80211_bss_conf * +iwl_mvm_rcu_fw_link_id_to_link_conf(struct iwl_mvm *mvm, u8 link_id, bool rcu) +{ + if (WARN_ON(link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))) + return NULL; + + if (rcu) + return rcu_dereference(mvm->link_id_to_link_conf[link_id]); + + return rcu_dereference_protected(mvm->link_id_to_link_conf[link_id], + lockdep_is_held(&mvm->mutex)); +} + static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ADAPTIVE_DWELL); } static inline bool iwl_mvm_is_adaptive_dwell_v2_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2); } static inline bool iwl_mvm_is_adwell_hb_ap_num_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP); } static inline bool iwl_mvm_is_oce_supported(struct iwl_mvm *mvm) { /* OCE should never be enabled for LMAC scan FWs */ return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_OCE); } static inline bool iwl_mvm_is_frag_ebs_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAG_EBS); } static inline bool iwl_mvm_is_short_beacon_notif_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF); } static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue) { return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) && (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE); } static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue) { return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) && (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE); } static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) { bool nvm_lar = mvm->nvm_data->lar_enabled; bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); /* * Enable LAR only if it is supported by the FW (TLV) && * enabled in the NVM */ if (mvm->cfg->nvm_type == IWL_NVM_EXT) return nvm_lar && tlv_lar; else return tlv_lar; } static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) || fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC); } static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BT_COEX_RRC) && IWL_MVM_BT_COEX_RRC; } static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CSUM_SUPPORT) && - !IWL_MVM_HW_CSUM_DISABLE; + !IWL_MVM_HW_CSUM_DISABLE; } static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT) && IWL_MVM_BT_COEX_MPLUT; } static inline bool iwl_mvm_is_p2p_scm_uapsd_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD) && !(iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_P2P_CLIENT); } static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT); } +static inline bool iwl_mvm_has_mld_api(const struct iwl_fw *fw) +{ + return fw_has_capa(&fw->ucode_capa, + IWL_UCODE_TLV_CAPA_MLD_API_SUPPORT); +} + +static inline bool iwl_mvm_has_new_station_api(const struct iwl_fw *fw) +{ + return iwl_mvm_has_mld_api(fw) || + iwl_fw_lookup_cmd_ver(fw, ADD_STA, 0) >= 12; +} + static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm) { /* TODO - replace with TLV once defined */ - return mvm->trans->trans_cfg->use_tfh; + return mvm->trans->trans_cfg->gen2; } static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm) { /* TODO - better define this */ return mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000; } static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) { /* * TODO: * The issue of how to determine CDB APIs and usage is still not fully * defined. * There is a compilation for CDB and non-CDB FW, but there may * be also runtime check. * For now there is a TLV for checking compilation mode, but a * runtime check will also have to be here - once defined. */ return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CDB_SUPPORT); } static inline bool iwl_mvm_cdb_scan_api(struct iwl_mvm *mvm) { /* * TODO: should this be the same as iwl_mvm_is_cdb_supported()? * but then there's a little bit of code in scan that won't make * any sense... */ return mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000; } static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER); } static inline bool iwl_mvm_is_reduced_config_scan_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG); } static inline bool iwl_mvm_is_band_in_rx_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BAND_IN_RX_DATA); } static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_RX_STATS); } static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY); } static inline bool iwl_mvm_has_tlc_offload(const struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD); } static inline struct agg_tx_status * iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp) { if (iwl_mvm_has_new_tx_api(mvm)) return &((struct iwl_mvm_tx_resp *)tx_resp)->status; else return ((struct iwl_mvm_tx_resp_v3 *)tx_resp)->status; } static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) { /* these two TLV are redundant since the responsibility to CT-kill by * FW happens only after we send at least one command of * temperature THs report. */ return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT); } static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CTDP_SUPPORT); } +static inline bool iwl_mvm_is_esr_supported(struct iwl_trans *trans) +{ + if ((CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM) && + !CSR_HW_RFID_IS_CDB(trans->hw_rf_id)) + /* Step A doesn't support eSR */ + return CSR_HW_RFID_STEP(trans->hw_rf_id); + + return false; +} + +static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_trans *trans = mvm->fwrt.trans; + + if (vif->type == NL80211_IFTYPE_AP) + return mvm->fw->ucode_capa.num_beacons; + + if (iwl_mvm_is_esr_supported(trans) || + (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM && + CSR_HW_RFID_IS_CDB(trans->hw_rf_id))) + return IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM; + + return 1; +} + extern const u8 iwl_mvm_ac_to_tx_fifo[]; extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[]; static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm, enum ieee80211_ac_numbers ac) { return iwl_mvm_has_new_tx_api(mvm) ? iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac]; } struct iwl_rate_info { u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */ u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ }; void __iwl_mvm_mac_stop(struct iwl_mvm *mvm); int __iwl_mvm_mac_start(struct iwl_mvm *mvm); /****************** * MVM Methods ******************/ /* uCode */ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm); /* Utils */ int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band); int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band); void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, enum nl80211_band band, struct ieee80211_tx_rate *r); void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags, enum nl80211_band band, struct ieee80211_tx_rate *r); u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx); u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac); +bool iwl_mvm_is_nic_ack_enabled(struct iwl_mvm *mvm, struct ieee80211_vif *vif); static inline void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) { iwl_fwrt_dump_error_logs(&mvm->fwrt); } u8 first_antenna(u8 mask); u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2, u64 *boottime, ktime_t *realtime); u32 iwl_mvm_get_systime(struct iwl_mvm *mvm); /* Tx / Host Commands */ int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd); int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, u32 flags, u16 len, const void *data); int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, u32 *status); int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, const void *data, u32 *status); int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta); int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, u8 sta_id); void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc); void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq); unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, struct ieee80211_sta *sta, unsigned int tid); -u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu); #ifdef CONFIG_IWLWIFI_DEBUG const char *iwl_mvm_get_tx_fail_reason(u32 status); #else static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; } #endif int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk); int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal); int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids); +/* Utils to extract sta related data */ +__le32 iwl_mvm_get_sta_htc_flags(struct ieee80211_sta *sta, + struct ieee80211_link_sta *link_sta); +u8 iwl_mvm_get_sta_uapsd_acs(struct ieee80211_sta *sta); +u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta, + struct ieee80211_bss_conf *link_conf, + u32 *_agg_size); +int iwl_mvm_set_sta_pkt_ext(struct iwl_mvm *mvm, + struct ieee80211_link_sta *link_sta, + struct iwl_he_pkt_ext_v2 *pkt_ext); + void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm); static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info, struct iwl_tx_cmd *tx_cmd) { struct ieee80211_key_conf *keyconf = info->control.hw_key; tx_cmd->sec_ctl = TX_CMD_SEC_CCM; memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); } static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm) { flush_work(&mvm->async_handlers_wk); } /* Statistics */ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt); void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear); void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); /* NVM */ int iwl_nvm_init(struct iwl_mvm *mvm); int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm) { return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ? mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant : mvm->fw->valid_tx_ant; } static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm) { return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ? mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant : mvm->fw->valid_rx_ant; } static inline void iwl_mvm_toggle_tx_ant(struct iwl_mvm *mvm, u8 *ant) { *ant = iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), *ant); } static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm) { u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN | FW_PHY_CFG_RX_CHAIN); u32 valid_rx_ant = iwl_mvm_get_valid_rx_ant(mvm); u32 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS | valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS; return mvm->fw->phy_config & phy_config; } int iwl_mvm_up(struct iwl_mvm *mvm); int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm); /* * FW notifications / CMD responses handlers * Convention: iwl_mvm_rx_ */ void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags); void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* MVM PHY */ +struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm); int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic); int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic); void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt); void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt); int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm); u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef); u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef); +int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, + u8 chains_static, u8 chains_dynamic); /* MAC (virtual interface) programming */ + +void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +void iwl_mvm_set_fw_basic_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + __le32 *cck_rates, __le32 *ofdm_rates); +void iwl_mvm_set_fw_protection_flags(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + __le32 *protection_flags, u32 ht_flag, + u32 tgg_flag); +void iwl_mvm_set_fw_qos_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct iwl_ac_qos *ac, __le32 *qos_flags); +bool iwl_mvm_set_fw_mu_edca_params(struct iwl_mvm *mvm, + const struct iwl_mvm_vif_link_info *link_info, + struct iwl_he_backoff_conf *trig_based_txf); +void iwl_mvm_set_fw_dtim_tbtt(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + __le64 *dtim_tsf, __le32 *dtim_time, + __le32 *assoc_beacon_arrive_time); +__le32 iwl_mac_ctxt_p2p_dev_has_extended_disc(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +void iwl_mvm_mac_ctxt_cmd_ap_set_filter_flags(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + __le32 *filter_flags, + int accept_probe_req_flag, + int accept_beacon_flag); +int iwl_mvm_get_mac_type(struct ieee80211_vif *vif); +__le32 iwl_mvm_mac_ctxt_cmd_p2p_sta_get_oppps_ctwin(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +u32 iwl_mvm_mac_ctxt_cmd_sta_get_twt_policy(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +int iwl_mvm_mld_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_mld_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool force_assoc_off); +int iwl_mvm_mld_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool force_assoc_off, const u8 *bssid_override); int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); -int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct sk_buff *beacon); + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, struct sk_buff *beacon, void *data, int len); -u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, +u8 iwl_mvm_mac_ctxt_get_beacon_rate(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + struct ieee80211_vif *vif); +u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, struct ieee80211_vif *vif); u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx); void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, __le32 *tim_index, __le32 *tim_size, u8 *beacon, u32 frame_size); void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_window_status_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* Bindings */ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +u32 iwl_mvm_get_lmac_id(struct iwl_mvm *mvm, enum nl80211_band band); + +/* Links */ +int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); +int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u32 changes, bool active); +int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); +int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); + +/* AP and IBSS */ +bool iwl_mvm_start_ap_ibss_common(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int *ret); +void iwl_mvm_stop_ap_ibss_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); + +/* BSS Info */ +/** + * struct iwl_mvm_bss_info_changed_ops - callbacks for the bss_info_changed() + * + * Since the only difference between both MLD and + * non-MLD versions of bss_info_changed() is these function calls, + * each version will send its specific function calls to + * %iwl_mvm_bss_info_changed_common(). + * + * @bss_info_changed_sta: pointer to the function that handles changes + * in bss_info in sta mode + * @bss_info_changed_ap_ibss: pointer to the function that handles changes + * in bss_info in ap and ibss modes + */ +struct iwl_mvm_bss_info_changed_ops { + void (*bss_info_changed_sta)(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u64 changes); + void (*bss_info_changed_ap_ibss)(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u64 changes); +}; + +void +iwl_mvm_bss_info_changed_common(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + const struct iwl_mvm_bss_info_changed_ops *callbacks, + u64 changes); +void +iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u64 changes); +void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u64 changes); + +/* ROC */ +/** + * struct iwl_mvm_roc_ops - callbacks for the remain_on_channel() + * + * Since the only difference between both MLD and + * non-MLD versions of remain_on_channel() is these function calls, + * each version will send its specific function calls to + * %iwl_mvm_roc_common(). + * + * @add_aux_sta_for_hs20: pointer to the function that adds an aux sta + * for Hot Spot 2.0 + * @switch_phy_ctxt: pointer to the function that switches a vif from one + * phy_ctx to another + */ +struct iwl_mvm_roc_ops { + int (*add_aux_sta_for_hs20)(struct iwl_mvm *mvm, u32 lmac_id); + int (*switch_phy_ctxt)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_phy_ctxt *new_phy_ctxt); +}; + +int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel *channel, int duration, + enum ieee80211_roc_type type, + const struct iwl_mvm_roc_ops *ops); +int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +/*Session Protection */ +void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + u32 duration_override); /* Quota management */ static inline size_t iwl_mvm_quota_cmd_size(struct iwl_mvm *mvm) { return iwl_mvm_has_quota_low_latency(mvm) ? sizeof(struct iwl_time_quota_cmd) : sizeof(struct iwl_time_quota_cmd_v1); } static inline struct iwl_time_quota_data *iwl_mvm_quota_cmd_get_quota(struct iwl_mvm *mvm, struct iwl_time_quota_cmd *cmd, int i) { struct iwl_time_quota_data_v1 *quotas; if (iwl_mvm_has_quota_low_latency(mvm)) return &cmd->quotas[i]; quotas = (struct iwl_time_quota_data_v1 *)cmd->quotas; return (struct iwl_time_quota_data *)"as[i]; } int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload, struct ieee80211_vif *disabled_vif); /* Scanning */ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies); -int iwl_mvm_scan_size(struct iwl_mvm *mvm); +size_t iwl_mvm_scan_size(struct iwl_mvm *mvm); int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); void iwl_mvm_scan_timeout_wk(struct work_struct *work); /* Scheduled scan */ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies, int type); void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* UMAC scan */ int iwl_mvm_config_scan(struct iwl_mvm *mvm); void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* MVM debugfs */ #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm); void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif); #else static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) { } static inline void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } static inline void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } #endif /* CONFIG_IWLWIFI_DEBUGFS */ /* rate scaling */ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq); void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg); int rs_pretty_print_rate_v1(char *buf, int bufsz, const u32 rate); void rs_update_last_rssi(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, struct ieee80211_rx_status *rx_status); /* power management */ int iwl_mvm_power_update_device(struct iwl_mvm *mvm); int iwl_mvm_power_update_mac(struct iwl_mvm *mvm); int iwl_mvm_power_update_ps(struct iwl_mvm *mvm); int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, char *buf, int bufsz); void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); #ifdef CONFIG_IWLWIFI_LEDS int iwl_mvm_leds_init(struct iwl_mvm *mvm); void iwl_mvm_leds_exit(struct iwl_mvm *mvm); void iwl_mvm_leds_sync(struct iwl_mvm *mvm); #else static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm) { return 0; } static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm) { } static inline void iwl_mvm_leds_sync(struct iwl_mvm *mvm) { } #endif /* D3 (WoWLAN, NetDetect) */ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); int iwl_mvm_resume(struct ieee80211_hw *hw); void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled); void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *data); void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct inet6_dev *idev); void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx); extern const struct file_operations iwl_dbgfs_d3_test_ops; #ifdef CONFIG_PM void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif); #else static inline void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } #endif void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, struct iwl_wowlan_config_cmd *cmd); int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool disable_offloading, bool offload_ns, u32 cmd_flags); /* BT Coex */ int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm); void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event_data); void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm); u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, struct ieee80211_sta *sta); bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, struct ieee80211_sta *sta); bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant); bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm); bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, enum nl80211_band band); u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants); u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac); /* beacon filtering */ #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd); #else static inline void iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd) {} #endif int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags); int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags); /* SMPS */ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_mvm_smps_type_request req_type, - enum ieee80211_smps_mode smps_request); + enum ieee80211_smps_mode smps_request, + unsigned int link_id); +void +iwl_mvm_update_smps_on_active_links(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + enum iwl_mvm_smps_type_request req_type, + enum ieee80211_smps_mode smps_request); bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt); -void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif); +void iwl_mvm_update_link_smps(struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); /* Low latency */ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool low_latency, enum iwl_mvm_low_latency_cause cause); /* get SystemLowLatencyMode - only needed for beacon threshold? */ bool iwl_mvm_low_latency(struct iwl_mvm *mvm); bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band); void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, bool low_latency, u16 mac_id); /* get VMACLowLatencyMode */ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) { /* * should this consider associated/active/... state? * * Normally low-latency should only be active on interfaces * that are active, but at least with debugfs it can also be * enabled on interfaces that aren't active. However, when * interface aren't active then they aren't added into the * binding, so this has no real impact. For now, just return * the current desired low-latency state. */ return mvmvif->low_latency_actual; } static inline void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set, enum iwl_mvm_low_latency_cause cause) { u8 new_state; if (set) mvmvif->low_latency |= cause; else mvmvif->low_latency &= ~cause; /* * if LOW_LATENCY_DEBUGFS_FORCE_ENABLE is enabled no changes are * allowed to actual mode. */ if (mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE_ENABLE && cause != LOW_LATENCY_DEBUGFS_FORCE_ENABLE) return; if (cause == LOW_LATENCY_DEBUGFS_FORCE_ENABLE && set) /* * We enter force state */ new_state = !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE); else /* * Check if any other one set low latency */ new_state = !!(mvmvif->low_latency & ~(LOW_LATENCY_DEBUGFS_FORCE_ENABLE | LOW_LATENCY_DEBUGFS_FORCE)); mvmvif->low_latency_actual = new_state; } /* Return a bitmask with all the hw supported queues, except for the * command queue, which can't be flushed. */ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) { return ((BIT(mvm->trans->trans_cfg->base_params->num_of_queues) - 1) & ~BIT(IWL_MVM_DQA_CMD_QUEUE)); } void iwl_mvm_stop_device(struct iwl_mvm *mvm); /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tt_handler(struct iwl_mvm *mvm); void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff); void iwl_mvm_thermal_exit(struct iwl_mvm *mvm); void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm); int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm); int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget); #if IS_ENABLED(CONFIG_IWLMEI) /* vendor commands */ void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm); #else static inline void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm) {} #endif /* Location Aware Regulatory */ -struct iwl_mcc_update_resp * +struct iwl_mcc_update_resp_v8 * iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, enum iwl_mcc_source src_id); int iwl_mvm_init_mcc(struct iwl_mvm *mvm); void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, const char *alpha2, enum iwl_mcc_source src_id, bool *changed); struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, bool *changed); int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm); void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm); /* smart fifo */ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool added_vif); /* FTM responder */ -int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf); void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf); void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); #if defined(__linux__) int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr); int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr, u32 cipher, u8 *tk, u32 tk_len, u8 *hltk, u32 hltk_len); #endif void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm, struct ieee80211_vif *vif); /* FTM initiator */ void iwl_mvm_ftm_restart(struct iwl_mvm *mvm); void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request); void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req); void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm); void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm); int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr, u32 cipher, u8 *tk, u32 tk_len, u8 *hltk, u32 hltk_len); void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr); /* TDLS */ /* * We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present. * This TID is marked as used vs the AP and all connected TDLS peers. */ #define IWL_MVM_TDLS_FW_TID 4 int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm); void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool sta_added); void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u8 oper_class, struct cfg80211_chan_def *chandef, struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie); void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_tdls_ch_sw_params *params); void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tdls_ch_switch_work(struct work_struct *work); void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, enum iwl_mvm_rxq_notif_type type, bool sync, const void *data, u32 size); void iwl_mvm_reorder_timer_expired(struct timer_list *t); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid); bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm); #define MVM_TCM_PERIOD_MSEC 500 #define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000) #define MVM_LL_PERIOD (10 * HZ) void iwl_mvm_tcm_work(struct work_struct *work); void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm); void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel); void iwl_mvm_resume_tcm(struct iwl_mvm *mvm); void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed); void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool tdls, bool cmd_q); void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg); void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_sta *sta, u16 tid); +void iwl_mvm_mei_scan_filter_init(struct iwl_mei_scan_filter *mei_scan_filter); +void iwl_mvm_ptp_init(struct iwl_mvm *mvm); +void iwl_mvm_ptp_remove(struct iwl_mvm *mvm); +u64 iwl_mvm_ptp_get_adj_time(struct iwl_mvm *mvm, u64 base_time); int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b); int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm); int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm); void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm); #ifdef CONFIG_IWLWIFI_DEBUGFS -void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct dentry *dir); +void iwl_mvm_link_sta_add_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + struct dentry *dir); #endif +/* new MLD related APIs */ +int iwl_mvm_sec_key_add(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf); +int iwl_mvm_sec_key_del(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf); +void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_vif_link_info *link, + unsigned int link_id); +int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 old_sta_mask, + u32 new_sta_mask); +int iwl_mvm_mld_send_key(struct iwl_mvm *mvm, u32 sta_mask, u32 key_flags, + struct ieee80211_key_conf *keyconf); +u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf); + +bool iwl_rfi_supported(struct iwl_mvm *mvm); int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table); struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm); void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band) { switch (band) { case NL80211_BAND_2GHZ: return PHY_BAND_24; case NL80211_BAND_5GHZ: return PHY_BAND_5; case NL80211_BAND_6GHZ: return PHY_BAND_6; default: WARN_ONCE(1, "Unsupported band (%u)\n", band); return PHY_BAND_5; } } +/* Channel Switch */ +void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk); +int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + +/* Channel Context */ +/** + * struct iwl_mvm_switch_vif_chanctx_ops - callbacks for switch_vif_chanctx() + * + * Since the only difference between both MLD and + * non-MLD versions of switch_vif_chanctx() is these function calls, + * each version will send its specific function calls to + * %iwl_mvm_switch_vif_chanctx_common(). + * + * @__assign_vif_chanctx: pointer to the function that assigns a chanctx to + * a given vif + * @__unassign_vif_chanctx: pointer to the function that unassigns a chanctx to + * a given vif + */ +struct iwl_mvm_switch_vif_chanctx_ops { + int (*__assign_vif_chanctx)(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx, + bool switching_chanctx); + void (*__unassign_vif_chanctx)(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx, + bool switching_chanctx); +}; + +int +iwl_mvm_switch_vif_chanctx_common(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode, + const struct iwl_mvm_switch_vif_chanctx_ops *ops); + /* Channel info utils */ static inline bool iwl_mvm_has_ultra_hb_channel(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS); } static inline void *iwl_mvm_chan_info_cmd_tail(struct iwl_mvm *mvm, struct iwl_fw_channel_info *ci) { return (u8 *)ci + (iwl_mvm_has_ultra_hb_channel(mvm) ? sizeof(struct iwl_fw_channel_info) : sizeof(struct iwl_fw_channel_info_v1)); } static inline size_t iwl_mvm_chan_info_padding(struct iwl_mvm *mvm) { return iwl_mvm_has_ultra_hb_channel(mvm) ? 0 : sizeof(struct iwl_fw_channel_info) - sizeof(struct iwl_fw_channel_info_v1); } static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm, struct iwl_fw_channel_info *ci, u32 chan, u8 band, u8 width, u8 ctrl_pos) { if (iwl_mvm_has_ultra_hb_channel(mvm)) { ci->channel = cpu_to_le32(chan); ci->band = band; ci->width = width; ci->ctrl_pos = ctrl_pos; } else { struct iwl_fw_channel_info_v1 *ci_v1 = (struct iwl_fw_channel_info_v1 *)ci; ci_v1->channel = chan; ci_v1->band = band; ci_v1->width = width; ci_v1->ctrl_pos = ctrl_pos; } } static inline void iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm, struct iwl_fw_channel_info *ci, struct cfg80211_chan_def *chandef) { enum nl80211_band band = chandef->chan->band; iwl_mvm_set_chan_info(mvm, ci, chandef->chan->hw_value, iwl_mvm_phy_band_from_nl80211(band), iwl_mvm_get_channel_width(chandef), iwl_mvm_get_ctrl_pos(chandef)); } static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw) { u8 ver = iwl_fw_lookup_cmd_ver(fw, SCAN_OFFLOAD_UPDATE_PROFILES_CMD, IWL_FW_CMD_VER_UNKNOWN); return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ? IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2; } static inline enum iwl_location_cipher iwl_mvm_cipher_to_location_cipher(u32 cipher) { switch (cipher) { case WLAN_CIPHER_SUITE_CCMP: return IWL_LOCATION_CIPHER_CCMP_128; case WLAN_CIPHER_SUITE_GCMP: return IWL_LOCATION_CIPHER_GCMP_128; case WLAN_CIPHER_SUITE_GCMP_256: return IWL_LOCATION_CIPHER_GCMP_256; default: return IWL_LOCATION_CIPHER_INVALID; } } struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm); static inline int iwl_mvm_mei_get_ownership(struct iwl_mvm *mvm) { if (mvm->mei_registered) return iwl_mei_get_ownership(); return 0; } static inline void iwl_mvm_mei_tx_copy_to_csme(struct iwl_mvm *mvm, struct sk_buff *skb, unsigned int ivlen) { if (mvm->mei_registered) iwl_mei_tx_copy_to_csme(skb, ivlen); } static inline void iwl_mvm_mei_host_disassociated(struct iwl_mvm *mvm) { if (mvm->mei_registered) iwl_mei_host_disassociated(); } -static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm) +static inline void iwl_mvm_mei_device_state(struct iwl_mvm *mvm, bool up) { if (mvm->mei_registered) - iwl_mei_device_down(); + iwl_mei_device_state(up); } static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm) { bool sw_rfkill = mvm->hw_registered ? rfkill_soft_blocked(mvm->hw->wiphy->rfkill) : false; if (mvm->mei_registered) iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm), sw_rfkill); } +static inline bool iwl_mvm_mei_filter_scan(struct iwl_mvm *mvm, + struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + if (mvm->mei_scan_filter.is_mei_limited_scan && + (ieee80211_is_probe_resp(mgmt->frame_control) || + ieee80211_is_beacon(mgmt->frame_control))) { + skb_queue_tail(&mvm->mei_scan_filter.scan_res, skb); + schedule_work(&mvm->mei_scan_filter.scan_work); + return true; + } + + return false; +} + void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool forbidden); +bool iwl_mvm_is_vendor_in_approved_list(void); + +/* Callbacks for ieee80211_ops */ +void iwl_mvm_mac_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, struct sk_buff *skb); +void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq); + +int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params); +int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); +int iwl_mvm_mac_start(struct ieee80211_hw *hw); +void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type); +void iwl_mvm_mac_stop(struct ieee80211_hw *hw); +static inline int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) +{ + return 0; +} +u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list); + +void iwl_mvm_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, u64 multicast); +int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req); +void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta); +void +iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u16 tids, + int num_frames, + enum ieee80211_frame_release_type reason, + bool more_data); +void +iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u16 tids, + int num_frames, + enum ieee80211_frame_release_type reason, + bool more_data); +int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value); +void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u32 changed); +void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info); +void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info); +void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop); +void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies); +int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); +void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key); +int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx); +void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx); +void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, u32 changed); +int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw); +int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + bool set); +void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw); +int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw); +void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw); +void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const struct ieee80211_event *event); +void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw); +int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void *data, int len); +int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey); +void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo); +int +iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_ftm_responder_stats *stats); +int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *request); +void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *request); + +bool iwl_mvm_have_links_same_channel(struct iwl_mvm_vif *vif1, + struct iwl_mvm_vif *vif2); +bool iwl_mvm_vif_is_active(struct iwl_mvm_vif *mvmvif); +int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + s16 tx_power); +int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_set_hw_timestamp *hwts); +int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif); #endif /* __IWL_MVM_H__ */ diff --git a/sys/contrib/dev/iwlwifi/mvm/nvm.c b/sys/contrib/dev/iwlwifi/mvm/nvm.c index 43f302450d3f..33bb9adf06fa 100644 --- a/sys/contrib/dev/iwlwifi/mvm/nvm.c +++ b/sys/contrib/dev/iwlwifi/mvm/nvm.c @@ -1,598 +1,643 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #if defined(__linux__) #include #endif #include "iwl-trans.h" #include "iwl-csr.h" #include "mvm.h" #include "iwl-eeprom-parse.h" #include "iwl-eeprom-read.h" #include "iwl-nvm-parse.h" #include "iwl-prph.h" #include "fw/acpi.h" /* Default NVM size to read */ #define IWL_NVM_DEFAULT_CHUNK_SIZE (2 * 1024) #define NVM_WRITE_OPCODE 1 #define NVM_READ_OPCODE 0 /* load nvm chunk response */ enum { READ_NVM_CHUNK_SUCCEED = 0, READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1 }; /* * prepare the NVM host command w/ the pointers to the nvm buffer * and send it to fw */ static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section, u16 offset, u16 length, const u8 *data) { struct iwl_nvm_access_cmd nvm_access_cmd = { .offset = cpu_to_le16(offset), .length = cpu_to_le16(length), .type = cpu_to_le16(section), .op_code = NVM_WRITE_OPCODE, }; struct iwl_host_cmd cmd = { .id = NVM_ACCESS_CMD, .len = { sizeof(struct iwl_nvm_access_cmd), length }, .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &nvm_access_cmd, data }, /* data may come from vmalloc, so use _DUP */ .dataflags = { 0, IWL_HCMD_DFL_DUP }, }; struct iwl_rx_packet *pkt; struct iwl_nvm_access_resp *nvm_resp; int ret; ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) return ret; pkt = cmd.resp_pkt; /* Extract & check NVM write response */ nvm_resp = (void *)pkt->data; if (le16_to_cpu(nvm_resp->status) != READ_NVM_CHUNK_SUCCEED) { IWL_ERR(mvm, "NVM access write command failed for section %u (status = 0x%x)\n", section, le16_to_cpu(nvm_resp->status)); ret = -EIO; } iwl_free_resp(&cmd); return ret; } static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, u16 offset, u16 length, u8 *data) { struct iwl_nvm_access_cmd nvm_access_cmd = { .offset = cpu_to_le16(offset), .length = cpu_to_le16(length), .type = cpu_to_le16(section), .op_code = NVM_READ_OPCODE, }; struct iwl_nvm_access_resp *nvm_resp; struct iwl_rx_packet *pkt; struct iwl_host_cmd cmd = { .id = NVM_ACCESS_CMD, .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &nvm_access_cmd, }, }; int ret, bytes_read, offset_read; u8 *resp_data; cmd.len[0] = sizeof(struct iwl_nvm_access_cmd); ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) return ret; pkt = cmd.resp_pkt; /* Extract NVM response */ nvm_resp = (void *)pkt->data; ret = le16_to_cpu(nvm_resp->status); bytes_read = le16_to_cpu(nvm_resp->length); offset_read = le16_to_cpu(nvm_resp->offset); resp_data = nvm_resp->data; if (ret) { if ((offset != 0) && (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) { /* * meaning of NOT_VALID_ADDRESS: * driver try to read chunk from address that is * multiple of 2K and got an error since addr is empty. * meaning of (offset != 0): driver already * read valid data from another chunk so this case * is not an error. */ IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM access command failed on offset 0x%x since that section size is multiple 2K\n", offset); ret = 0; } else { IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM access command failed with status %d (device: %s)\n", ret, mvm->trans->name); ret = -ENODATA; } goto exit; } if (offset_read != offset) { IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n", offset_read); ret = -EINVAL; goto exit; } /* Write data to NVM */ memcpy(data + offset, resp_data, bytes_read); ret = bytes_read; exit: iwl_free_resp(&cmd); return ret; } static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section, const u8 *data, u16 length) { int offset = 0; /* copy data in chunks of 2k (and remainder if any) */ while (offset < length) { int chunk_size, ret; chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE, length - offset); ret = iwl_nvm_write_chunk(mvm, section, offset, chunk_size, data + offset); if (ret < 0) return ret; offset += chunk_size; } return 0; } /* * Reads an NVM section completely. * NICs prior to 7000 family doesn't have a real NVM, but just read * section 0 which is the EEPROM. Because the EEPROM reading is unlimited * by uCode, we need to manually check in this case that we don't * overflow and try to read more than the EEPROM size. * For 7000 family NICs, we supply the maximal size we can read, and * the uCode fills the response with as much data as we can, * without overflowing, so no check is needed. */ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section, u8 *data, u32 size_read) { u16 length, offset = 0; int ret; /* Set nvm section read length */ length = IWL_NVM_DEFAULT_CHUNK_SIZE; ret = length; /* Read the NVM until exhausted (reading less than requested) */ while (ret == length) { /* Check no memory assumptions fail and cause an overflow */ if ((size_read + offset + length) > mvm->trans->trans_cfg->base_params->eeprom_size) { IWL_ERR(mvm, "EEPROM size is too small for NVM\n"); return -ENOBUFS; } ret = iwl_nvm_read_chunk(mvm, section, offset, length, data); if (ret < 0) { IWL_DEBUG_EEPROM(mvm->trans->dev, "Cannot read NVM from section %d offset %d, length %d\n", section, offset, length); return ret; } offset += ret; } iwl_nvm_fixups(mvm->trans->hw_id, section, data, offset); IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM section %d read completed\n", section); return offset; } static struct iwl_nvm_data * iwl_parse_nvm_sections(struct iwl_mvm *mvm) { struct iwl_nvm_section *sections = mvm->nvm_sections; const __be16 *hw; const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku; int regulatory_type; /* Checking for required sections */ if (mvm->trans->cfg->nvm_type == IWL_NVM) { if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n"); return NULL; } } else { if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP) regulatory_type = NVM_SECTION_TYPE_REGULATORY_SDP; else regulatory_type = NVM_SECTION_TYPE_REGULATORY; /* SW and REGULATORY sections are mandatory */ if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || !mvm->nvm_sections[regulatory_type].data) { IWL_ERR(mvm, "Can't parse empty family 8000 OTP/NVM sections\n"); return NULL; } /* MAC_OVERRIDE or at least HW section must exist */ if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data && !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) { IWL_ERR(mvm, "Can't parse mac_address, empty sections\n"); return NULL; } /* PHY_SKU section is mandatory in B0 */ if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT && !mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) { IWL_ERR(mvm, "Can't parse phy_sku in B0, empty sections\n"); return NULL; } } hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data; sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; mac_override = (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data; phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data; regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP ? (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data : (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data; return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib, regulatory, mac_override, phy_sku, mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant); } /* Loads the NVM data stored in mvm->nvm_sections into the NIC */ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm) { int i, ret = 0; struct iwl_nvm_section *sections = mvm->nvm_sections; IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n"); for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) { if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length) continue; ret = iwl_nvm_write_section(mvm, i, sections[i].data, sections[i].length); if (ret < 0) { IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret); break; } } return ret; } int iwl_nvm_init(struct iwl_mvm *mvm) { int ret, section; u32 size_read = 0; u8 *nvm_buffer, *temp; const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step; if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS)) return -EINVAL; /* load NVM values from nic */ /* Read From FW NVM */ IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); nvm_buffer = kmalloc(mvm->trans->trans_cfg->base_params->eeprom_size, GFP_KERNEL); if (!nvm_buffer) return -ENOMEM; for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) { /* we override the constness for initial read */ ret = iwl_nvm_read_section(mvm, section, nvm_buffer, size_read); if (ret == -ENODATA) { ret = 0; continue; } if (ret < 0) break; size_read += ret; temp = kmemdup(nvm_buffer, ret, GFP_KERNEL); if (!temp) { ret = -ENOMEM; break; } iwl_nvm_fixups(mvm->trans->hw_id, section, temp, ret); mvm->nvm_sections[section].data = temp; mvm->nvm_sections[section].length = ret; #ifdef CONFIG_IWLWIFI_DEBUGFS switch (section) { case NVM_SECTION_TYPE_SW: mvm->nvm_sw_blob.data = temp; mvm->nvm_sw_blob.size = ret; break; case NVM_SECTION_TYPE_CALIBRATION: mvm->nvm_calib_blob.data = temp; mvm->nvm_calib_blob.size = ret; break; case NVM_SECTION_TYPE_PRODUCTION: mvm->nvm_prod_blob.data = temp; mvm->nvm_prod_blob.size = ret; break; case NVM_SECTION_TYPE_PHY_SKU: mvm->nvm_phy_sku_blob.data = temp; mvm->nvm_phy_sku_blob.size = ret; break; case NVM_SECTION_TYPE_REGULATORY_SDP: case NVM_SECTION_TYPE_REGULATORY: mvm->nvm_reg_blob.data = temp; mvm->nvm_reg_blob.size = ret; break; default: if (section == mvm->cfg->nvm_hw_section_num) { mvm->nvm_hw_blob.data = temp; mvm->nvm_hw_blob.size = ret; break; } } #endif } if (!size_read) IWL_ERR(mvm, "OTP is blank\n"); kfree(nvm_buffer); /* Only if PNVM selected in the mod param - load external NVM */ if (mvm->nvm_file_name) { /* read External NVM file from the mod param */ ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, mvm->nvm_sections); if (ret) { mvm->nvm_file_name = nvm_file_C; if ((ret == -EFAULT || ret == -ENOENT) && mvm->nvm_file_name) { /* in case nvm file was failed try again */ ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, mvm->nvm_sections); if (ret) return ret; } else { return ret; } } } /* parse the relevant nvm sections */ mvm->nvm_data = iwl_parse_nvm_sections(mvm); if (!mvm->nvm_data) return -ENODATA; IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n", mvm->nvm_data->nvm_version); return ret < 0 ? ret : 0; } -struct iwl_mcc_update_resp * +struct iwl_mcc_update_resp_v8 * iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, enum iwl_mcc_source src_id) { struct iwl_mcc_update_cmd mcc_update_cmd = { .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]), .source_id = (u8)src_id, }; - struct iwl_mcc_update_resp *resp_cp; + struct iwl_mcc_update_resp_v8 *resp_cp; struct iwl_rx_packet *pkt; struct iwl_host_cmd cmd = { .id = MCC_UPDATE_CMD, .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &mcc_update_cmd }, }; - int ret; + int ret, resp_ver; u32 status; int resp_len, n_channels; u16 mcc; if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) return ERR_PTR(-EOPNOTSUPP); cmd.len[0] = sizeof(struct iwl_mcc_update_cmd); IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n", alpha2[0], alpha2[1], src_id); ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) return ERR_PTR(ret); pkt = cmd.resp_pkt; + resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, + MCC_UPDATE_CMD, 0); + /* Extract MCC response */ - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) { - struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data; - - n_channels = __le32_to_cpu(mcc_resp->n_channels); - resp_len = sizeof(struct iwl_mcc_update_resp) + - n_channels * sizeof(__le32); - resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL); + if (resp_ver >= 8) { + struct iwl_mcc_update_resp_v8 *mcc_resp_v8 = (void *)pkt->data; + + n_channels = __le32_to_cpu(mcc_resp_v8->n_channels); + if (iwl_rx_packet_payload_len(pkt) != + struct_size(mcc_resp_v8, channels, n_channels)) { + resp_cp = ERR_PTR(-EINVAL); + goto exit; + } + resp_len = struct_size(resp_cp, channels, n_channels); + resp_cp = kzalloc(resp_len, GFP_KERNEL); + if (!resp_cp) { + resp_cp = ERR_PTR(-ENOMEM); + goto exit; + } + resp_cp->status = mcc_resp_v8->status; + resp_cp->mcc = mcc_resp_v8->mcc; + resp_cp->cap = mcc_resp_v8->cap; + resp_cp->source_id = mcc_resp_v8->source_id; + resp_cp->time = mcc_resp_v8->time; + resp_cp->geo_info = mcc_resp_v8->geo_info; + resp_cp->n_channels = mcc_resp_v8->n_channels; + memcpy(resp_cp->channels, mcc_resp_v8->channels, + n_channels * sizeof(__le32)); + } else if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) { + struct iwl_mcc_update_resp_v4 *mcc_resp_v4 = (void *)pkt->data; + + n_channels = __le32_to_cpu(mcc_resp_v4->n_channels); + if (iwl_rx_packet_payload_len(pkt) != + struct_size(mcc_resp_v4, channels, n_channels)) { + resp_cp = ERR_PTR(-EINVAL); + goto exit; + } + resp_len = struct_size(resp_cp, channels, n_channels); + resp_cp = kzalloc(resp_len, GFP_KERNEL); if (!resp_cp) { resp_cp = ERR_PTR(-ENOMEM); goto exit; } + + resp_cp->status = mcc_resp_v4->status; + resp_cp->mcc = mcc_resp_v4->mcc; + resp_cp->cap = cpu_to_le32(le16_to_cpu(mcc_resp_v4->cap)); + resp_cp->source_id = mcc_resp_v4->source_id; + resp_cp->time = mcc_resp_v4->time; + resp_cp->geo_info = mcc_resp_v4->geo_info; + resp_cp->n_channels = mcc_resp_v4->n_channels; + memcpy(resp_cp->channels, mcc_resp_v4->channels, + n_channels * sizeof(__le32)); } else { struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data; n_channels = __le32_to_cpu(mcc_resp_v3->n_channels); - resp_len = sizeof(struct iwl_mcc_update_resp) + - n_channels * sizeof(__le32); + if (iwl_rx_packet_payload_len(pkt) != + struct_size(mcc_resp_v3, channels, n_channels)) { + resp_cp = ERR_PTR(-EINVAL); + goto exit; + } + resp_len = struct_size(resp_cp, channels, n_channels); resp_cp = kzalloc(resp_len, GFP_KERNEL); if (!resp_cp) { resp_cp = ERR_PTR(-ENOMEM); goto exit; } resp_cp->status = mcc_resp_v3->status; resp_cp->mcc = mcc_resp_v3->mcc; - resp_cp->cap = cpu_to_le16(mcc_resp_v3->cap); + resp_cp->cap = cpu_to_le32(mcc_resp_v3->cap); resp_cp->source_id = mcc_resp_v3->source_id; resp_cp->time = mcc_resp_v3->time; resp_cp->geo_info = mcc_resp_v3->geo_info; resp_cp->n_channels = mcc_resp_v3->n_channels; memcpy(resp_cp->channels, mcc_resp_v3->channels, n_channels * sizeof(__le32)); } status = le32_to_cpu(resp_cp->status); mcc = le16_to_cpu(resp_cp->mcc); /* W/A for a FW/NVM issue - returns 0x00 for the world domain */ if (mcc == 0) { mcc = 0x3030; /* "00" - world */ resp_cp->mcc = cpu_to_le16(mcc); } IWL_DEBUG_LAR(mvm, "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n", status, mcc, mcc >> 8, mcc & 0xff, n_channels); exit: iwl_free_resp(&cmd); return resp_cp; } int iwl_mvm_init_mcc(struct iwl_mvm *mvm) { bool tlv_lar; bool nvm_lar; int retval; struct ieee80211_regdomain *regd; char mcc[3]; if (mvm->cfg->nvm_type == IWL_NVM_EXT) { tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); nvm_lar = mvm->nvm_data->lar_enabled; if (tlv_lar != nvm_lar) IWL_INFO(mvm, "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n", tlv_lar ? "enabled" : "disabled", nvm_lar ? "enabled" : "disabled"); } if (!iwl_mvm_is_lar_supported(mvm)) return 0; /* * try to replay the last set MCC to FW. If it doesn't exist, * queue an update to cfg80211 to retrieve the default alpha2 from FW. */ retval = iwl_mvm_init_fw_regd(mvm); if (retval != -ENOENT) return retval; /* * Driver regulatory hint for initial update, this also informs the * firmware we support wifi location updates. * Disallow scans that might crash the FW while the LAR regdomain * is not set. */ mvm->lar_regdom_set = false; regd = iwl_mvm_get_current_regdomain(mvm, NULL); if (IS_ERR_OR_NULL(regd)) return -EIO; if (iwl_mvm_is_wifi_mcc_supported(mvm) && !iwl_acpi_get_mcc(mvm->dev, mcc)) { kfree(regd); regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, MCC_SOURCE_BIOS, NULL); if (IS_ERR_OR_NULL(regd)) return -EIO; } retval = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd); kfree(regd); return retval; } void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mcc_chub_notif *notif = (void *)pkt->data; enum iwl_mcc_source src; char mcc[3]; struct ieee80211_regdomain *regd; int wgds_tbl_idx; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_is_vif_assoc(mvm) && notif->source_id == MCC_SOURCE_WIFI) { IWL_DEBUG_LAR(mvm, "Ignore mcc update while associated\n"); return; } if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) return; mcc[0] = le16_to_cpu(notif->mcc) >> 8; mcc[1] = le16_to_cpu(notif->mcc) & 0xff; mcc[2] = '\0'; src = notif->source_id; IWL_DEBUG_LAR(mvm, "RX: received chub update mcc cmd (mcc '%s' src %d)\n", mcc, src); regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL); if (IS_ERR_OR_NULL(regd)) return; wgds_tbl_idx = iwl_mvm_get_sar_geo_profile(mvm); if (wgds_tbl_idx < 1) IWL_DEBUG_INFO(mvm, "SAR WGDS is disabled or error received (%d)\n", wgds_tbl_idx); else IWL_DEBUG_INFO(mvm, "SAR WGDS: geo profile %d is configured\n", wgds_tbl_idx); regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); kfree(regd); } diff --git a/sys/contrib/dev/iwlwifi/mvm/offloading.c b/sys/contrib/dev/iwlwifi/mvm/offloading.c index c7dabc6b3765..dfb16ca5b438 100644 --- a/sys/contrib/dev/iwlwifi/mvm/offloading.c +++ b/sys/contrib/dev/iwlwifi/mvm/offloading.c @@ -1,206 +1,210 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2021 Intel Corporation + * Copyright (C) 2012-2014, 2021-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 Intel Deutschland GmbH */ #include #include #include #include "mvm.h" void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, struct iwl_wowlan_config_cmd *cmd) { int i; /* * For QoS counters, we store the one to use next, so subtract 0x10 * since the uCode will add 0x10 *before* using the value while we * increment after using the value (i.e. store the next value to use). */ for (i = 0; i < IWL_MAX_TID_COUNT; i++) { u16 seq = mvm_ap_sta->tid_data[i].seq_number; seq -= 0x10; cmd->qos_seq[i] = cpu_to_le16(seq); } } int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool disable_offloading, bool offload_ns, u32 cmd_flags) { union { struct iwl_proto_offload_cmd_v1 v1; struct iwl_proto_offload_cmd_v2 v2; struct iwl_proto_offload_cmd_v3_small v3s; struct iwl_proto_offload_cmd_v4 v4; } cmd = {}; struct iwl_host_cmd hcmd = { .id = PROT_OFFLOAD_CONFIG_CMD, .flags = cmd_flags, .data[0] = &cmd, .dataflags[0] = IWL_HCMD_DFL_DUP, }; struct iwl_proto_offload_cmd_common *common; u32 enabled = 0, size; u32 capa_flags = mvm->fw->ucode_capa.flags; int ver = iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 0); #if IS_ENABLED(CONFIG_IPV6) struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int i; /* * Skip tentative address when ns offload is enabled to avoid * violating RFC4862. * Keep tentative address when ns offload is disabled so the NS packets * will not be filtered out and will wake up the host. */ bool skip_tentative = offload_ns; if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL || capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) { struct iwl_ns_config *nsc; struct iwl_targ_addr *addrs; int n_nsc, n_addrs; int c; int num_skipped = 0; if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) { nsc = cmd.v3s.ns_config; n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S; addrs = cmd.v3s.targ_addrs; n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S; } else { nsc = cmd.v4.ns_config; n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L; addrs = cmd.v4.targ_addrs; n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L; } /* * For each address we have (and that will fit) fill a target * address struct and combine for NS offload structs with the * solicited node addresses. */ for (i = 0, c = 0; i < mvmvif->num_target_ipv6_addrs && i < n_addrs && c < n_nsc; i++) { struct in6_addr solicited_addr; int j; if (skip_tentative && test_bit(i, mvmvif->tentative_addrs)) { num_skipped++; continue; } addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i], &solicited_addr); for (j = 0; j < c; j++) if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr, &solicited_addr) == 0) break; if (j == c) c++; addrs[i].addr = mvmvif->target_ipv6_addrs[i]; addrs[i].config_num = cpu_to_le32(j); nsc[j].dest_ipv6_addr = solicited_addr; memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN); } if (mvmvif->num_target_ipv6_addrs - num_skipped) enabled |= IWL_D3_PROTO_IPV6_VALID; if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i - num_skipped); else cmd.v4.num_valid_ipv6_addrs = cpu_to_le32(i - num_skipped); } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { bool found = false; BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) != sizeof(mvmvif->target_ipv6_addrs[0])); for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++) { if (skip_tentative && test_bit(i, mvmvif->tentative_addrs)) continue; memcpy(cmd.v2.target_ipv6_addr[i], &mvmvif->target_ipv6_addrs[i], sizeof(cmd.v2.target_ipv6_addr[i])); found = true; } if (found) { enabled |= IWL_D3_PROTO_IPV6_VALID; memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN); } } else { bool found = false; BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) != sizeof(mvmvif->target_ipv6_addrs[0])); for (i = 0; i < min(mvmvif->num_target_ipv6_addrs, IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++) { if (skip_tentative && test_bit(i, mvmvif->tentative_addrs)) continue; memcpy(cmd.v1.target_ipv6_addr[i], &mvmvif->target_ipv6_addrs[i], sizeof(cmd.v1.target_ipv6_addr[i])); found = true; } if (found) { enabled |= IWL_D3_PROTO_IPV6_VALID; memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN); } } if (offload_ns && (enabled & IWL_D3_PROTO_IPV6_VALID)) enabled |= IWL_D3_PROTO_OFFLOAD_NS; #endif if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) { common = &cmd.v3s.common; size = sizeof(cmd.v3s); } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) { common = &cmd.v4.common; size = sizeof(cmd.v4); if (ver < 4) { /* * This basically uses iwl_proto_offload_cmd_v3_large * which doesn't have the sta_id parameter before the * common part. */ size -= sizeof(cmd.v4.sta_id); hcmd.data[0] = common; } } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) { common = &cmd.v2.common; size = sizeof(cmd.v2); } else { common = &cmd.v1.common; size = sizeof(cmd.v1); } - if (vif->bss_conf.arp_addr_cnt) { + if (vif->cfg.arp_addr_cnt) { enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID; - common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0]; + common->host_ipv4_addr = vif->cfg.arp_addr_list[0]; memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN); } + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_OFFLOAD_BTM_SUPPORT)) + enabled |= IWL_D3_PROTO_OFFLOAD_BTM; + if (!disable_offloading) common->enabled = cpu_to_le32(enabled); hcmd.len[0] = size; return iwl_mvm_send_cmd(mvm, &hcmd); } diff --git a/sys/contrib/dev/iwlwifi/mvm/ops.c b/sys/contrib/dev/iwlwifi/mvm/ops.c index 0978504b11cd..e6de9995e09b 100644 --- a/sys/contrib/dev/iwlwifi/mvm/ops.c +++ b/sys/contrib/dev/iwlwifi/mvm/ops.c @@ -1,1993 +1,2062 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #if defined(__FreeBSD__) #define LINUXKPI_PARAM_PREFIX iwlwifi_mvm_ #endif #include -#if defined(__linux__) #include -#endif #include #include #include "fw/notif-wait.h" #include "iwl-trans.h" #include "iwl-op-mode.h" #include "fw/img.h" #include "iwl-debug.h" #include "iwl-drv.h" #include "iwl-modparams.h" #include "mvm.h" #include "iwl-phy-db.h" #include "iwl-eeprom-parse.h" #include "iwl-csr.h" #include "iwl-io.h" #include "iwl-prph.h" #include "rs.h" #include "fw/api/scan.h" #include "fw/api/rfi.h" #include "time-event.h" #include "fw-api.h" #include "fw/acpi.h" #include "fw/uefi.h" +#include "time-sync.h" #if defined(__linux__) #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" MODULE_LICENSE("GPL"); #elif defined(__FreeBSD__) #define DRV_DESCRIPTION "The new Intel(R) wireless AGN/AC/AX based driver for FreeBSD" MODULE_LICENSE("BSD"); #endif MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_IMPORT_NS(IWLWIFI); static const struct iwl_op_mode_ops iwl_mvm_ops; static const struct iwl_op_mode_ops iwl_mvm_ops_mq; struct iwl_mvm_mod_params iwlmvm_mod_params = { #if defined(__FreeBSD__) .power_scheme = IWL_POWER_SCHEME_CAM, /* disable default PS */ #else .power_scheme = IWL_POWER_SCHEME_BPS, #endif /* rest of fields are 0 by default */ }; module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444); MODULE_PARM_DESC(init_dbg, "set to true to debug an ASSERT in INIT fw (default: false"); module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444); MODULE_PARM_DESC(power_scheme, "power management scheme: 1-active, 2-balanced, 3-low power, default: 2"); /* * module init and exit functions */ static int __init iwl_mvm_init(void) { int ret; ret = iwl_mvm_rate_control_register(); if (ret) { pr_err("Unable to register rate control algorithm: %d\n", ret); return ret; } ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops); if (ret) pr_err("Unable to register MVM op_mode: %d\n", ret); return ret; } #if defined(__linux__) module_init(iwl_mvm_init); #elif defined(__FreeBSD__) module_init_order(iwl_mvm_init, SI_ORDER_SECOND); #endif static void __exit iwl_mvm_exit(void) { iwl_opmode_deregister("iwlmvm"); iwl_mvm_rate_control_unregister(); } module_exit(iwl_mvm_exit); static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; u32 reg_val; u32 phy_config = iwl_mvm_get_phy_config(mvm); radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >> FW_PHY_CFG_RADIO_TYPE_POS; radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >> FW_PHY_CFG_RADIO_STEP_POS; radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >> FW_PHY_CFG_RADIO_DASH_POS; + IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, + radio_cfg_step, radio_cfg_dash); + + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + return; + /* SKU control */ reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev); /* radio configuration */ reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) & ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE); /* * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC * sampling, and shouldn't be set to any non-zero value. * The same is supposed to be true of the other HW, but unsetting * them (such as the 7260) causes automatic tests to fail on seemingly * unrelated errors. Need to further investigate this, but for now * we'll separate cases. */ if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI; if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt)) reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG; iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH | CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | CSR_HW_IF_CONFIG_REG_BIT_MAC_SI | CSR_HW_IF_CONFIG_REG_D3_DEBUG, reg_val); - IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, - radio_cfg_step, radio_cfg_dash); - /* * W/A : NIC is stuck in a reset state after Early PCIe power off * (PCIe power is lost before PERST# is asserted), causing ME FW * to lose ownership and not being able to obtain it back. */ if (!mvm->trans->cfg->apmg_not_supported) iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); } static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_datapath_monitor_notif *notif = (void *)pkt->data; struct ieee80211_supported_band *sband; const struct ieee80211_sta_he_cap *he_cap; struct ieee80211_vif *vif; if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA)) return; vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id); if (!vif || vif->type != NL80211_IFTYPE_STATION) return; if (!vif->bss_conf.chandef.chan || vif->bss_conf.chandef.chan->band != NL80211_BAND_2GHZ || vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40) return; - if (!vif->bss_conf.assoc) + if (!vif->cfg.assoc) return; /* this shouldn't happen *again*, ignore it */ if (mvm->cca_40mhz_workaround) return; /* * We'll decrement this on disconnect - so set to 2 since we'll * still have to disconnect from the current AP first. */ mvm->cca_40mhz_workaround = 2; /* * This capability manipulation isn't really ideal, but it's the * easiest choice - otherwise we'd have to do some major changes * in mac80211 to support this, which isn't worth it. This does * mean that userspace may have outdated information, but that's * actually not an issue at all. */ sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]; WARN_ON(!sband->ht_cap.ht_supported); WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)); sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; - he_cap = ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(vif)); + he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif); if (he_cap) { /* we know that ours is writable */ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap; WARN_ON(!he->has_he); WARN_ON(!(he->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)); he->he_cap_elem.phy_cap_info[0] &= ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; } ieee80211_disconnect(vif, true); } -void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif) +void iwl_mvm_update_link_smps(struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; enum ieee80211_smps_mode mode = IEEE80211_SMPS_AUTOMATIC; + if (!link_conf) + return; + if (mvm->fw_static_smps_request && - vif->bss_conf.chandef.width == NL80211_CHAN_WIDTH_160 && - vif->bss_conf.he_support) + link_conf->chandef.width == NL80211_CHAN_WIDTH_160 && + link_conf->he_support) mode = IEEE80211_SMPS_STATIC; - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, mode); + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, mode, + link_conf->link_id); } static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac, struct ieee80211_vif *vif) { - iwl_mvm_apply_fw_smps_request(vif); + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; + + rcu_read_lock(); + + for_each_vif_active_link(vif, link_conf, link_id) + iwl_mvm_update_link_smps(vif, link_conf); + + rcu_read_unlock(); } static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_thermal_dual_chain_request *req = (void *)pkt->data; /* * We could pass it to the iterator data, but also need to remember * it for new interfaces that are added while in this state. */ mvm->fw_static_smps_request = req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE); ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER, iwl_mvm_intf_dual_chain_req, NULL); } /** * enum iwl_rx_handler_context context for Rx handler * @RX_HANDLER_SYNC : this means that it will be called in the Rx path * which can't acquire mvm->mutex. * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex * (and only in this case!), it should be set as ASYNC. In that case, * it will be called from a worker with mvm->mutex held. * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the * mutex itself, it will be called from a worker without mvm->mutex held. */ enum iwl_rx_handler_context { RX_HANDLER_SYNC, RX_HANDLER_ASYNC_LOCKED, RX_HANDLER_ASYNC_UNLOCKED, }; /** * struct iwl_rx_handlers handler for FW notification * @cmd_id: command id * @min_size: minimum size to expect for the notification * @context: see &iwl_rx_handler_context * @fn: the function is called when notification is received */ struct iwl_rx_handlers { u16 cmd_id, min_size; enum iwl_rx_handler_context context; void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); }; #define RX_HANDLER_NO_SIZE(_cmd_id, _fn, _context) \ { .cmd_id = _cmd_id, .fn = _fn, .context = _context, } #define RX_HANDLER_GRP_NO_SIZE(_grp, _cmd, _fn, _context) \ { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context, } #define RX_HANDLER(_cmd_id, _fn, _context, _struct) \ { .cmd_id = _cmd_id, .fn = _fn, \ .context = _context, .min_size = sizeof(_struct), } #define RX_HANDLER_GRP(_grp, _cmd, _fn, _context, _struct) \ { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, \ .context = _context, .min_size = sizeof(_struct), } /* * Handlers for fw notifications * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME * This list should be in order of frequency for performance purposes. * * The handler can be one from three contexts, see &iwl_rx_handler_context */ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC, struct iwl_mvm_tx_resp), RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC, struct iwl_mvm_ba_notif), RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF, iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC, struct iwl_tlc_update_notif), RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_bt_coex_profile_notif), RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID, iwl_mvm_window_status_notif, RX_HANDLER_SYNC, struct iwl_ba_window_status_notif), RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, RX_HANDLER_SYNC, struct iwl_time_event_notif), RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF, iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC, struct iwl_mvm_session_prot_notif), RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, RX_HANDLER_ASYNC_LOCKED, struct iwl_mcc_chub_notif), RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC, struct iwl_mvm_eosp_notification), RX_HANDLER(SCAN_ITERATION_COMPLETE, iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC, struct iwl_lmac_scan_complete_notif), RX_HANDLER(SCAN_OFFLOAD_COMPLETE, iwl_mvm_rx_lmac_scan_complete_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_periodic_scan_complete), RX_HANDLER_NO_SIZE(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found, RX_HANDLER_SYNC), RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_umac_scan_complete), RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC, struct iwl_umac_scan_iter_complete_notif), RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif, RX_HANDLER_SYNC, struct iwl_missed_beacons_notif), RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC, struct iwl_error_resp), RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC, struct iwl_uapsd_misbehaving_ap_notif), RX_HANDLER_NO_SIZE(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP_NO_SIZE(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED), RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION, iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC, struct ct_kill_notif), RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_tdls_channel_switch_notif), RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, RX_HANDLER_SYNC, struct iwl_mfuart_load_notif_v1), RX_HANDLER_GRP(LOCATION_GROUP, TOF_RESPONDER_STATS, iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED, struct iwl_ftm_responder_stats), RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF, iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_LC_NOTIF, iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF, iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC, struct iwl_mfu_assert_dump_notif), RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC, struct iwl_stored_beacon_notif_v2), RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF, iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC, struct iwl_mu_group_mgmt_notif), RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF, iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC, struct iwl_mvm_pm_state_notification), RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF, iwl_mvm_probe_resp_data_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_probe_resp_data_notif), RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF, iwl_mvm_channel_switch_start_notif, RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif), RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, iwl_mvm_channel_switch_error_notif, RX_HANDLER_ASYNC_UNLOCKED, struct iwl_channel_switch_error_notif), RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF, iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_datapath_monitor_notif), RX_HANDLER_GRP(DATA_PATH_GROUP, THERMAL_DUAL_CHAIN_REQUEST, iwl_mvm_rx_thermal_dual_chain_req, RX_HANDLER_ASYNC_LOCKED, struct iwl_thermal_dual_chain_request), RX_HANDLER_GRP(SYSTEM_GROUP, RFI_DEACTIVATE_NOTIF, iwl_rfi_deactivate_notif_handler, RX_HANDLER_ASYNC_UNLOCKED, struct iwl_rfi_deactivate_notif), + + RX_HANDLER_GRP(LEGACY_GROUP, + WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION, + iwl_mvm_time_sync_msmt_event, RX_HANDLER_SYNC, + struct iwl_time_msmt_notify), + RX_HANDLER_GRP(LEGACY_GROUP, + WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION, + iwl_mvm_time_sync_msmt_confirm_event, RX_HANDLER_SYNC, + struct iwl_time_msmt_cfm_notify), }; #undef RX_HANDLER #undef RX_HANDLER_GRP /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(UCODE_ALIVE_NTFY), HCMD_NAME(REPLY_ERROR), HCMD_NAME(ECHO_CMD), HCMD_NAME(INIT_COMPLETE_NOTIF), HCMD_NAME(PHY_CONTEXT_CMD), HCMD_NAME(DBG_CFG), HCMD_NAME(SCAN_CFG_CMD), HCMD_NAME(SCAN_REQ_UMAC), HCMD_NAME(SCAN_ABORT_UMAC), HCMD_NAME(SCAN_COMPLETE_UMAC), HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID), HCMD_NAME(ADD_STA_KEY), HCMD_NAME(ADD_STA), HCMD_NAME(REMOVE_STA), - HCMD_NAME(FW_GET_ITEM_CMD), HCMD_NAME(TX_CMD), HCMD_NAME(SCD_QUEUE_CFG), HCMD_NAME(TXPATH_FLUSH), HCMD_NAME(MGMT_MCAST_KEY), HCMD_NAME(WEP_KEY), HCMD_NAME(SHARED_MEM_CFG), HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD), HCMD_NAME(MAC_CONTEXT_CMD), HCMD_NAME(TIME_EVENT_CMD), HCMD_NAME(TIME_EVENT_NOTIFICATION), HCMD_NAME(BINDING_CONTEXT_CMD), HCMD_NAME(TIME_QUOTA_CMD), HCMD_NAME(NON_QOS_TX_COUNTER_CMD), HCMD_NAME(LEDS_CMD), HCMD_NAME(LQ_CMD), HCMD_NAME(FW_PAGING_BLOCK_CMD), HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD), HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD), HCMD_NAME(HOT_SPOT_CMD), HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD), HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP), HCMD_NAME(BT_COEX_CI), + HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION), + HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION), HCMD_NAME(PHY_CONFIGURATION_CMD), HCMD_NAME(CALIB_RES_NOTIF_PHY_DB), HCMD_NAME(PHY_DB_CMD), HCMD_NAME(SCAN_OFFLOAD_COMPLETE), HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD), HCMD_NAME(POWER_TABLE_CMD), HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF), HCMD_NAME(NVM_ACCESS_CMD), HCMD_NAME(BEACON_NOTIFICATION), HCMD_NAME(BEACON_TEMPLATE_CMD), HCMD_NAME(TX_ANT_CONFIGURATION_CMD), HCMD_NAME(BT_CONFIG), HCMD_NAME(STATISTICS_CMD), HCMD_NAME(STATISTICS_NOTIFICATION), HCMD_NAME(EOSP_NOTIFICATION), HCMD_NAME(REDUCE_TX_POWER_CMD), HCMD_NAME(MISSED_BEACONS_NOTIFICATION), HCMD_NAME(TDLS_CONFIG_CMD), HCMD_NAME(MAC_PM_POWER_TABLE), HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION), HCMD_NAME(MFUART_LOAD_NOTIFICATION), HCMD_NAME(RSS_CONFIG_CMD), HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), HCMD_NAME(REPLY_RX_PHY_CMD), HCMD_NAME(REPLY_RX_MPDU_CMD), HCMD_NAME(BAR_FRAME_RELEASE), HCMD_NAME(FRAME_RELEASE), HCMD_NAME(BA_NOTIF), HCMD_NAME(MCC_UPDATE_CMD), HCMD_NAME(MCC_CHUB_UPDATE_CMD), HCMD_NAME(MARKER_CMD), HCMD_NAME(BT_PROFILE_NOTIFICATION), HCMD_NAME(MCAST_FILTER_CMD), HCMD_NAME(REPLY_SF_CFG_CMD), HCMD_NAME(REPLY_BEACON_FILTERING_CMD), HCMD_NAME(D3_CONFIG_CMD), HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD), - HCMD_NAME(OFFLOADS_QUERY_CMD), HCMD_NAME(MATCH_FOUND_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION), HCMD_NAME(WOWLAN_PATTERNS), HCMD_NAME(WOWLAN_CONFIGURATION), HCMD_NAME(WOWLAN_TSC_RSC_PARAM), HCMD_NAME(WOWLAN_TKIP_PARAM), HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL), HCMD_NAME(WOWLAN_GET_STATUSES), HCMD_NAME(SCAN_ITERATION_COMPLETE), HCMD_NAME(D0I3_END_CMD), HCMD_NAME(LTR_CONFIG), HCMD_NAME(LDBG_CONFIG_CMD), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { HCMD_NAME(SHARED_MEM_CFG_CMD), HCMD_NAME(INIT_EXTENDED_CFG_CMD), HCMD_NAME(FW_ERROR_RECOVERY_CMD), HCMD_NAME(RFI_CONFIG_CMD), HCMD_NAME(RFI_GET_FREQ_TABLE_CMD), HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD), HCMD_NAME(RFI_DEACTIVATE_NOTIF), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD), HCMD_NAME(SESSION_PROTECTION_CMD), + HCMD_NAME(MAC_CONFIG_CMD), + HCMD_NAME(LINK_CONFIG_CMD), + HCMD_NAME(STA_CONFIG_CMD), + HCMD_NAME(AUX_STA_CMD), + HCMD_NAME(STA_REMOVE_CMD), + HCMD_NAME(STA_DISABLE_TX_CMD), HCMD_NAME(SESSION_PROTECTION_NOTIF), HCMD_NAME(CHANNEL_SWITCH_START_NOTIF), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), HCMD_NAME(CTDP_CONFIG_CMD), HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD), HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD), HCMD_NAME(CT_KILL_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(DQA_ENABLE_CMD), HCMD_NAME(UPDATE_MU_GROUPS_CMD), HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), HCMD_NAME(STA_HE_CTXT_CMD), HCMD_NAME(RLC_CONFIG_CMD), HCMD_NAME(RFH_QUEUE_CONFIG_CMD), HCMD_NAME(TLC_MNG_CONFIG_CMD), HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD), HCMD_NAME(SCD_QUEUE_CONFIG_CMD), + HCMD_NAME(SEC_KEY_CMD), HCMD_NAME(MONITOR_NOTIF), HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST), HCMD_NAME(STA_PM_NOTIF), HCMD_NAME(MU_GROUP_MGMT_NOTIF), HCMD_NAME(RX_QUEUES_NOTIFICATION), }; +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_scan_names[] = { + HCMD_NAME(OFFLOAD_MATCH_INFO_NOTIF), +}; + /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_location_names[] = { HCMD_NAME(TOF_RANGE_REQ_CMD), HCMD_NAME(TOF_CONFIG_CMD), HCMD_NAME(TOF_RANGE_ABORT_CMD), HCMD_NAME(TOF_RANGE_REQ_EXT_CMD), HCMD_NAME(TOF_RESPONDER_CONFIG_CMD), HCMD_NAME(TOF_RESPONDER_DYN_CONFIG_CMD), HCMD_NAME(TOF_LC_NOTIF), HCMD_NAME(TOF_RESPONDER_STATS), HCMD_NAME(TOF_MCSI_DEBUG_NOTIF), HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { + HCMD_NAME(WOWLAN_WAKE_PKT_NOTIFICATION), + HCMD_NAME(WOWLAN_INFO_NOTIFICATION), + HCMD_NAME(D3_END_NOTIFICATION), HCMD_NAME(STORED_BEACON_NTF), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = { HCMD_NAME(NVM_ACCESS_COMPLETE), HCMD_NAME(NVM_GET_INFO), HCMD_NAME(TAS_CONFIG), }; static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names), [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), + [SCAN_GROUP] = HCMD_ARR(iwl_mvm_scan_names), [LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mvm_regulatory_and_nvm_names), }; /* this forward declaration can avoid to export the function */ static void iwl_mvm_async_handlers_wk(struct work_struct *wk); static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm) { const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs; u64 dflt_pwr_limit; if (!backoff) return 0; dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev); while (backoff->pwr) { if (dflt_pwr_limit >= backoff->pwr) return backoff->backoff; backoff++; } return 0; } static void iwl_mvm_tx_unblock_dwork(struct work_struct *work) { struct iwl_mvm *mvm = container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work); struct ieee80211_vif *tx_blocked_vif; struct iwl_mvm_vif *mvmvif; mutex_lock(&mvm->mutex); tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex)); if (!tx_blocked_vif) goto unlock; mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); unlock: mutex_unlock(&mvm->mutex); } static void iwl_mvm_fwrt_dump_start(void *ctx) { struct iwl_mvm *mvm = ctx; mutex_lock(&mvm->mutex); } static void iwl_mvm_fwrt_dump_end(void *ctx) { struct iwl_mvm *mvm = ctx; mutex_unlock(&mvm->mutex); } static bool iwl_mvm_fwrt_fw_running(void *ctx) { return iwl_mvm_firmware_running(ctx); } static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd) { struct iwl_mvm *mvm = (struct iwl_mvm *)ctx; int ret; mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, host_cmd); mutex_unlock(&mvm->mutex); return ret; } static bool iwl_mvm_d3_debug_enable(void *ctx) { return IWL_MVM_D3_DEBUG; } static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { .dump_start = iwl_mvm_fwrt_dump_start, .dump_end = iwl_mvm_fwrt_dump_end, .fw_running = iwl_mvm_fwrt_fw_running, .send_hcmd = iwl_mvm_fwrt_send_hcmd, .d3_debug_enable = iwl_mvm_d3_debug_enable, }; static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) { struct iwl_trans *trans = mvm->trans; int ret; if (trans->csme_own) { if (WARN(!mvm->mei_registered, "csme is owner, but we aren't registered to iwlmei\n")) goto get_nvm_from_fw; mvm->mei_nvm_data = iwl_mei_get_nvm(); if (mvm->mei_nvm_data) { /* * mvm->mei_nvm_data is set and because of that, * we'll load the NVM from the FW when we'll get * ownership. */ mvm->nvm_data = iwl_parse_mei_nvm_data(trans, trans->cfg, mvm->mei_nvm_data, mvm->fw); return 0; } IWL_ERR(mvm, "Got a NULL NVM from CSME, trying to get it from the device\n"); } get_nvm_from_fw: rtnl_lock(); wiphy_lock(mvm->hw->wiphy); mutex_lock(&mvm->mutex); ret = iwl_trans_start_hw(mvm->trans); if (ret) { mutex_unlock(&mvm->mutex); wiphy_unlock(mvm->hw->wiphy); rtnl_unlock(); return ret; } ret = iwl_run_init_mvm_ucode(mvm); if (ret && ret != -ERFKILL) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); if (!ret && iwl_mvm_is_lar_supported(mvm)) { mvm->hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; ret = iwl_mvm_init_mcc(mvm); } if (!iwlmvm_mod_params.init_dbg || !ret) iwl_mvm_stop_device(mvm); mutex_unlock(&mvm->mutex); wiphy_unlock(mvm->hw->wiphy); rtnl_unlock(); if (ret) IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); return ret; } static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm) { struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused; int ret; iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); ret = iwl_mvm_mac_setup_register(mvm); if (ret) return ret; mvm->hw_registered = true; iwl_mvm_dbgfs_register(mvm); wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy, mvm->mei_rfkill_blocked, RFKILL_HARD_BLOCK_NOT_OWNER); iwl_mvm_mei_set_sw_rfkill_state(mvm); return 0; } struct iwl_mvm_frob_txf_data { u8 *buf; size_t buflen; }; static void iwl_mvm_frob_txf_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data) { struct iwl_mvm_frob_txf_data *txf = data; u8 keylen, match, matchend; u8 *keydata; size_t i; switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: keydata = key->key; keylen = key->keylen; break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: /* * WEP has short keys which might show up in the payload, * and then you can deduce the key, so in this case just * remove all FIFO data. * For TKIP, we don't know the phase 2 keys here, so same. */ memset(txf->buf, 0xBB, txf->buflen); return; default: return; } /* scan for key material and clear it out */ match = 0; for (i = 0; i < txf->buflen; i++) { if (txf->buf[i] != keydata[match]) { match = 0; continue; } match++; if (match == keylen) { memset(txf->buf + i - keylen, 0xAA, keylen); match = 0; } } /* we're dealing with a FIFO, so check wrapped around data */ matchend = match; for (i = 0; match && i < keylen - match; i++) { if (txf->buf[i] != keydata[match]) break; match++; if (match == keylen) { memset(txf->buf, 0xAA, i + 1); memset(txf->buf + txf->buflen - matchend, 0xAA, matchend); break; } } } static void iwl_mvm_frob_txf(void *ctx, void *buf, size_t buflen) { struct iwl_mvm_frob_txf_data txf = { .buf = buf, .buflen = buflen, }; struct iwl_mvm *mvm = ctx; /* embedded key material exists only on old API */ if (iwl_mvm_has_new_tx_api(mvm)) return; rcu_read_lock(); ieee80211_iter_keys_rcu(mvm->hw, NULL, iwl_mvm_frob_txf_key_iter, &txf); rcu_read_unlock(); } static void iwl_mvm_frob_hcmd(void *ctx, void *hcmd, size_t len) { /* we only use wide headers for commands */ struct iwl_cmd_header_wide *hdr = hcmd; unsigned int frob_start = sizeof(*hdr), frob_end = 0; if (len < sizeof(hdr)) return; /* all the commands we care about are in LONG_GROUP */ if (hdr->group_id != LONG_GROUP) return; switch (hdr->cmd) { case WEP_KEY: case WOWLAN_TKIP_PARAM: case WOWLAN_KEK_KCK_MATERIAL: case ADD_STA_KEY: /* * blank out everything here, easier than dealing * with the various versions of the command */ frob_end = INT_MAX; break; case MGMT_MCAST_KEY: frob_start = offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk); BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) != offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk)); frob_end = offsetofend(struct iwl_mvm_mgmt_mcast_key_cmd, igtk); BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) < offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk)); break; } if (frob_start >= frob_end) return; if (frob_end > len) frob_end = len; memset((u8 *)hcmd + frob_start, 0xAA, frob_end - frob_start); } static void iwl_mvm_frob_mem(void *ctx, u32 mem_addr, void *mem, size_t buflen) { const struct iwl_dump_exclude *excl; struct iwl_mvm *mvm = ctx; int i; switch (mvm->fwrt.cur_fw_img) { case IWL_UCODE_INIT: default: /* not relevant */ return; case IWL_UCODE_REGULAR: case IWL_UCODE_REGULAR_USNIFFER: excl = mvm->fw->dump_excl; break; case IWL_UCODE_WOWLAN: excl = mvm->fw->dump_excl_wowlan; break; } BUILD_BUG_ON(sizeof(mvm->fw->dump_excl) != sizeof(mvm->fw->dump_excl_wowlan)); for (i = 0; i < ARRAY_SIZE(mvm->fw->dump_excl); i++) { u32 start, end; if (!excl[i].addr || !excl[i].size) continue; start = excl[i].addr; end = start + excl[i].size; if (end <= mem_addr || start >= mem_addr + buflen) continue; if (start < mem_addr) start = mem_addr; if (end > mem_addr + buflen) end = mem_addr + buflen; memset((u8 *)mem + start - mem_addr, 0xAA, end - start); } } static const struct iwl_dump_sanitize_ops iwl_mvm_sanitize_ops = { .frob_txf = iwl_mvm_frob_txf, .frob_hcmd = iwl_mvm_frob_hcmd, .frob_mem = iwl_mvm_frob_mem, }; #if IS_ENABLED(CONFIG_IWLMEI) static void iwl_mvm_me_conn_status(void *priv, const struct iwl_mei_conn_info *conn_info) { struct iwl_mvm *mvm = priv; struct iwl_mvm_csme_conn_info *prev_conn_info, *curr_conn_info; /* * This is protected by the guarantee that this function will not be * called twice on two different threads */ prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true); curr_conn_info = kzalloc(sizeof(*curr_conn_info), GFP_KERNEL); if (!curr_conn_info) return; curr_conn_info->conn_info = *conn_info; rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info); if (prev_conn_info) kfree_rcu(prev_conn_info, rcu_head); } -static void iwl_mvm_mei_rfkill(void *priv, bool blocked) +static void iwl_mvm_mei_rfkill(void *priv, bool blocked, + bool csme_taking_ownership) { struct iwl_mvm *mvm = priv; + if (blocked && !csme_taking_ownership) + return; + mvm->mei_rfkill_blocked = blocked; if (!mvm->hw_registered) return; wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy, mvm->mei_rfkill_blocked, RFKILL_HARD_BLOCK_NOT_OWNER); } static void iwl_mvm_mei_roaming_forbidden(void *priv, bool forbidden) { struct iwl_mvm *mvm = priv; if (!mvm->hw_registered || !mvm->csme_vif) return; iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden); } #endif static void iwl_mvm_sap_connected_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sap_connected_wk); int ret; ret = iwl_mvm_start_get_nvm(mvm); if (ret) goto out_free; ret = iwl_mvm_start_post_nvm(mvm); if (ret) goto out_free; return; out_free: IWL_ERR(mvm, "Couldn't get started...\n"); iwl_mei_start_unregister(); iwl_mei_unregister_complete(); iwl_fw_flush_dumps(&mvm->fwrt); iwl_mvm_thermal_exit(mvm); iwl_fw_runtime_free(&mvm->fwrt); iwl_phy_db_free(mvm->phy_db); kfree(mvm->scan_cmd); iwl_trans_op_mode_leave(mvm->trans); kfree(mvm->nvm_data); kfree(mvm->mei_nvm_data); ieee80211_free_hw(mvm->hw); } #if IS_ENABLED(CONFIG_IWLMEI) static void iwl_mvm_mei_sap_connected(void *priv) { struct iwl_mvm *mvm = priv; if (!mvm->hw_registered) schedule_work(&mvm->sap_connected_wk); } static void iwl_mvm_mei_nic_stolen(void *priv) { struct iwl_mvm *mvm = priv; rtnl_lock(); cfg80211_shutdown_all_interfaces(mvm->hw->wiphy); rtnl_unlock(); } static const struct iwl_mei_ops mei_ops = { .me_conn_status = iwl_mvm_me_conn_status, .rfkill = iwl_mvm_mei_rfkill, .roaming_forbidden = iwl_mvm_mei_roaming_forbidden, .sap_connected = iwl_mvm_mei_sap_connected, .nic_stolen = iwl_mvm_mei_nic_stolen, }; #endif static struct iwl_op_mode * iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) { struct ieee80211_hw *hw; struct iwl_op_mode *op_mode; struct iwl_mvm *mvm; struct iwl_trans_config trans_cfg = {}; static const u8 no_reclaim_cmds[] = { TX_CMD, }; - int scan_size; + u32 max_agg; + size_t scan_size; u32 min_backoff; struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused; /* * We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station * index all over the driver - check that its value corresponds to the * array size. */ BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT_MAX); /******************************** * 1. Allocating and configuring HW data ********************************/ hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) + sizeof(struct iwl_mvm), + iwl_mvm_has_mld_api(fw) ? &iwl_mvm_mld_hw_ops : &iwl_mvm_hw_ops); if (!hw) return NULL; - hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + max_agg = IEEE80211_MAX_AMPDU_BUF_EHT; + else + max_agg = IEEE80211_MAX_AMPDU_BUF_HE; + + hw->max_rx_aggregation_subframes = max_agg; if (cfg->max_tx_agg_size) hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; else - hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; + hw->max_tx_aggregation_subframes = max_agg; op_mode = hw->priv; mvm = IWL_OP_MODE_GET_MVM(op_mode); mvm->dev = trans->dev; mvm->trans = trans; mvm->cfg = cfg; mvm->fw = fw; mvm->hw = hw; iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm, &iwl_mvm_sanitize_ops, mvm, dbgfs_dir); iwl_mvm_get_acpi_tables(mvm); iwl_uefi_get_sgom_table(trans, &mvm->fwrt); + iwl_uefi_get_step_table(trans); mvm->init_status = 0; if (iwl_mvm_has_new_rx_api(mvm)) { op_mode->ops = &iwl_mvm_ops_mq; trans->rx_mpdu_cmd_hdr_size = (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) ? sizeof(struct iwl_rx_mpdu_desc) : IWL_RX_DESC_SIZE_V1; } else { op_mode->ops = &iwl_mvm_ops; trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); if (WARN_ON(trans->num_rx_queues > 1)) goto out_free; } mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; if (iwl_mvm_has_new_tx_api(mvm)) { /* * If we have the new TX/queue allocation API initialize them * all to invalid numbers. We'll rewrite the ones that we need * later, but that doesn't happen for all of them all of the * time (e.g. P2P Device is optional), and if a dynamic queue * ends up getting number 2 (IWL_MVM_DQA_P2P_DEVICE_QUEUE) then * iwl_mvm_is_static_queue() erroneously returns true, and we * might have things getting stuck. */ mvm->aux_queue = IWL_MVM_INVALID_QUEUE; mvm->snif_queue = IWL_MVM_INVALID_QUEUE; mvm->probe_queue = IWL_MVM_INVALID_QUEUE; mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE; } else { mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE; mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; } mvm->sf_state = SF_UNINIT; if (iwl_mvm_has_unified_ucode(mvm)) iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR); else iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT); mvm->drop_bcn_ap_mode = true; mutex_init(&mvm->mutex); spin_lock_init(&mvm->async_handlers_lock); INIT_LIST_HEAD(&mvm->time_event_list); INIT_LIST_HEAD(&mvm->aux_roc_te_list); INIT_LIST_HEAD(&mvm->async_handlers_list); spin_lock_init(&mvm->time_event_lock); INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list); INIT_LIST_HEAD(&mvm->ftm_initiator.pasn_list); INIT_LIST_HEAD(&mvm->resp_pasn_list); INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); INIT_LIST_HEAD(&mvm->add_stream_txqs); + spin_lock_init(&mvm->add_stream_lock); init_waitqueue_head(&mvm->rx_sync_waitq); mvm->queue_sync_state = 0; SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); spin_lock_init(&mvm->tcm.lock); INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work); mvm->tcm.ts = jiffies; mvm->tcm.ll_ts = jiffies; mvm->tcm.uapsd_nonagg_ts = jiffies; INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork); - mvm->cmd_ver.d0i3_resp = - iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, D0I3_END_CMD, - 0); - /* we only support version 1 */ - if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1)) - goto out_free; - mvm->cmd_ver.range_resp = iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF, 5); /* we only support up to version 9 */ if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9)) goto out_free; /* * Populate the state variables that the transport layer needs * to know about. */ trans_cfg.op_mode = op_mode; trans_cfg.no_reclaim_cmds = no_reclaim_cmds; trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); switch (iwlwifi_mod_params.amsdu_size) { case IWL_AMSDU_DEF: trans_cfg.rx_buf_size = IWL_AMSDU_4K; break; case IWL_AMSDU_4K: trans_cfg.rx_buf_size = IWL_AMSDU_4K; break; case IWL_AMSDU_8K: trans_cfg.rx_buf_size = IWL_AMSDU_8K; break; case IWL_AMSDU_12K: trans_cfg.rx_buf_size = IWL_AMSDU_12K; break; default: pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, iwlwifi_mod_params.amsdu_size); trans_cfg.rx_buf_size = IWL_AMSDU_4K; } trans->wide_cmd_header = true; trans_cfg.bc_table_dword = mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210; trans_cfg.command_groups = iwl_mvm_groups; trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; trans_cfg.scd_set_active = true; trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info, driver_data[2]); /* Set a short watchdog for the command queue */ trans_cfg.cmd_q_wdg_timeout = iwl_mvm_get_wd_timeout(mvm, NULL, false, true); snprintf(mvm->hw->wiphy->fw_version, sizeof(mvm->hw->wiphy->fw_version), "%s", fw->fw_version); trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE); trans_cfg.queue_alloc_cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD), 0); mvm->sta_remove_requires_queue_remove = trans_cfg.queue_alloc_cmd_ver > 0; + mvm->mld_api_is_used = iwl_mvm_has_mld_api(mvm->fw); + /* Configure transport layer */ iwl_trans_configure(mvm->trans, &trans_cfg); trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv; trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg; memcpy(trans->dbg.conf_tlv, mvm->fw->dbg.conf_tlv, sizeof(trans->dbg.conf_tlv)); trans->dbg.trigger_tlv = mvm->fw->dbg.trigger_tlv; trans->iml = mvm->fw->iml; trans->iml_len = mvm->fw->iml_len; /* set up notification wait support */ iwl_notification_wait_init(&mvm->notif_wait); /* Init phy db */ mvm->phy_db = iwl_phy_db_init(trans); if (!mvm->phy_db) { IWL_ERR(mvm, "Cannot init phy_db\n"); goto out_free; } IWL_INFO(mvm, "Detected %s, REV=0x%X\n", mvm->trans->name, mvm->trans->hw_rev); if (iwlwifi_mod_params.nvm_file) mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; else IWL_DEBUG_EEPROM(mvm->trans->dev, "working without external nvm file\n"); scan_size = iwl_mvm_scan_size(mvm); mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); if (!mvm->scan_cmd) goto out_free; + mvm->scan_cmd_size = scan_size; /* invalidate ids to prevent accidental removal of sta_id 0 */ mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA; mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA; /* Set EBS as successful as long as not stated otherwise by the FW. */ mvm->last_ebs_successful = true; min_backoff = iwl_mvm_min_backoff(mvm); iwl_mvm_thermal_initialize(mvm, min_backoff); if (!iwl_mvm_has_new_rx_stats_api(mvm)) memset(&mvm->rx_stats_v3, 0, sizeof(struct mvm_statistics_rx_v3)); else memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); + iwl_mvm_ftm_initiator_smooth_config(mvm); + + iwl_mvm_init_time_sync(&mvm->time_sync); + mvm->debugfs_dir = dbgfs_dir; #if IS_ENABLED(CONFIG_IWLMEI) mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops); #else mvm->mei_registered = false; #endif + iwl_mvm_mei_scan_filter_init(&mvm->mei_scan_filter); + if (iwl_mvm_start_get_nvm(mvm)) { /* * Getting NVM failed while CSME is the owner, but we are * registered to MEI, we'll get the NVM later when it'll be * possible to get it from CSME. */ if (trans->csme_own && mvm->mei_registered) return op_mode; goto out_thermal_exit; } if (iwl_mvm_start_post_nvm(mvm)) goto out_thermal_exit; return op_mode; out_thermal_exit: iwl_mvm_thermal_exit(mvm); if (mvm->mei_registered) { iwl_mei_start_unregister(); iwl_mei_unregister_complete(); } out_free: iwl_fw_flush_dumps(&mvm->fwrt); iwl_fw_runtime_free(&mvm->fwrt); if (iwlmvm_mod_params.init_dbg) return op_mode; iwl_phy_db_free(mvm->phy_db); kfree(mvm->scan_cmd); iwl_trans_op_mode_leave(trans); ieee80211_free_hw(mvm->hw); return NULL; } void iwl_mvm_stop_device(struct iwl_mvm *mvm) { lockdep_assert_held(&mvm->mutex); iwl_fw_cancel_timestamp(&mvm->fwrt); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); iwl_fw_dbg_stop_sync(&mvm->fwrt); iwl_trans_stop_device(mvm->trans); iwl_free_fw_paging(&mvm->fwrt); iwl_fw_dump_conf_clear(&mvm->fwrt); - iwl_mvm_mei_device_down(mvm); + iwl_mvm_mei_device_state(mvm, false); } static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); int i; if (mvm->mei_registered) { rtnl_lock(); iwl_mei_set_netdev(NULL); rtnl_unlock(); iwl_mei_start_unregister(); } /* * After we unregister from mei, the worker can't be scheduled * anymore. */ cancel_work_sync(&mvm->sap_connected_wk); iwl_mvm_leds_exit(mvm); iwl_mvm_thermal_exit(mvm); /* * If we couldn't get ownership on the device and we couldn't * get the NVM from CSME, we haven't registered to mac80211. * In that case, we didn't fail op_mode_start, because we are * waiting for CSME to allow us to get the NVM to register to * mac80211. If that didn't happen, we haven't registered to * mac80211, hence the if below. */ if (mvm->hw_registered) ieee80211_unregister_hw(mvm->hw); kfree(mvm->scan_cmd); kfree(mvm->mcast_filter_cmd); mvm->mcast_filter_cmd = NULL; kfree(mvm->error_recovery_buf); mvm->error_recovery_buf = NULL; + iwl_mvm_ptp_remove(mvm); + iwl_trans_op_mode_leave(mvm->trans); iwl_phy_db_free(mvm->phy_db); mvm->phy_db = NULL; kfree(mvm->nvm_data); kfree(mvm->mei_nvm_data); kfree(rcu_access_pointer(mvm->csme_conn_info)); kfree(mvm->temp_nvm_data); for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) kfree(mvm->nvm_sections[i].data); cancel_delayed_work_sync(&mvm->tcm.work); iwl_fw_runtime_free(&mvm->fwrt); mutex_destroy(&mvm->mutex); if (mvm->mei_registered) iwl_mei_unregister_complete(); ieee80211_free_hw(mvm->hw); } struct iwl_async_handler_entry { struct list_head list; struct iwl_rx_cmd_buffer rxb; enum iwl_rx_handler_context context; void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); }; void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm) { struct iwl_async_handler_entry *entry, *tmp; spin_lock_bh(&mvm->async_handlers_lock); list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) { iwl_free_rxb(&entry->rxb); list_del(&entry->list); kfree(entry); } spin_unlock_bh(&mvm->async_handlers_lock); } static void iwl_mvm_async_handlers_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, async_handlers_wk); struct iwl_async_handler_entry *entry, *tmp; LIST_HEAD(local_list); /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */ /* * Sync with Rx path with a lock. Remove all the entries from this list, * add them to a local one (lock free), and then handle them. */ spin_lock_bh(&mvm->async_handlers_lock); list_splice_init(&mvm->async_handlers_list, &local_list); spin_unlock_bh(&mvm->async_handlers_lock); list_for_each_entry_safe(entry, tmp, &local_list, list) { if (entry->context == RX_HANDLER_ASYNC_LOCKED) mutex_lock(&mvm->mutex); entry->fn(mvm, &entry->rxb); iwl_free_rxb(&entry->rxb); list_del(&entry->list); if (entry->context == RX_HANDLER_ASYNC_LOCKED) mutex_unlock(&mvm->mutex); kfree(entry); } } static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_cmd *cmds_trig; int i; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_FW_NOTIF); if (!trig) return; cmds_trig = (void *)trig->data; for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { /* don't collect on CMD 0 */ if (!cmds_trig->cmds[i].cmd_id) break; if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd || cmds_trig->cmds[i].group_id != pkt->hdr.group_id) continue; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "CMD 0x%02x.%02x received", pkt->hdr.group_id, pkt->hdr.cmd); break; } } static void iwl_mvm_rx_common(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_rx_packet *pkt) { unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); int i; union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt }; iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF, &tp_data); iwl_mvm_rx_check_trigger(mvm, pkt); /* * Do the notification wait before RX handlers so * even if the RX handler consumes the RXB we have * access to it in the notification wait entry. */ iwl_notification_wait_notify(&mvm->notif_wait, pkt); for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) { const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i]; struct iwl_async_handler_entry *entry; if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) continue; - if (unlikely(pkt_len < rx_h->min_size)) + if (IWL_FW_CHECK(mvm, pkt_len < rx_h->min_size, + "unexpected notification 0x%04x size %d, need %d\n", + rx_h->cmd_id, pkt_len, rx_h->min_size)) return; if (rx_h->context == RX_HANDLER_SYNC) { rx_h->fn(mvm, rxb); return; } entry = kzalloc(sizeof(*entry), GFP_ATOMIC); /* we can't do much... */ if (!entry) return; entry->rxb._page = rxb_steal_page(rxb); entry->rxb._offset = rxb->_offset; entry->rxb._rx_page_order = rxb->_rx_page_order; entry->fn = rx_h->fn; entry->context = rx_h->context; spin_lock(&mvm->async_handlers_lock); list_add_tail(&entry->list, &mvm->async_handlers_list); spin_unlock(&mvm->async_handlers_lock); schedule_work(&mvm->async_handlers_wk); break; } } static void iwl_mvm_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD)) iwl_mvm_rx_rx_phy_cmd(mvm, rxb); else iwl_mvm_rx_common(mvm, rxb, pkt); } void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, RX_QUEUES_NOTIFICATION))) iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0); else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); else if (cmd == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE)) iwl_mvm_rx_bar_frame_release(mvm, napi, rxb, 0); else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF)) iwl_mvm_rx_monitor_no_data(mvm, napi, rxb, 0); else iwl_mvm_rx_common(mvm, rxb, pkt); } static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, const struct iwl_device_cmd *cmd) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); /* * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA * commands that need to block the Tx queues. */ iwl_trans_block_txq_ptrs(mvm->trans, false); } static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue) { return queue == mvm->aux_queue || queue == mvm->probe_queue || queue == mvm->p2p_dev_queue || queue == mvm->snif_queue; } static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode, int hw_queue, bool start) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct ieee80211_sta *sta; struct ieee80211_txq *txq; struct iwl_mvm_txq *mvmtxq; int i; unsigned long tid_bitmap; struct iwl_mvm_sta *mvmsta; u8 sta_id; sta_id = iwl_mvm_has_new_tx_api(mvm) ? mvm->tvqm_info[hw_queue].sta_id : mvm->queue_info[hw_queue].ra_sta_id; if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (IS_ERR_OR_NULL(sta)) goto out; mvmsta = iwl_mvm_sta_from_mac80211(sta); if (iwl_mvm_is_static_queue(mvm, hw_queue)) { if (!start) ieee80211_stop_queues(mvm->hw); else if (mvmsta->sta_state != IEEE80211_STA_NOTEXIST) ieee80211_wake_queues(mvm->hw); goto out; } if (iwl_mvm_has_new_tx_api(mvm)) { int tid = mvm->tvqm_info[hw_queue].txq_tid; tid_bitmap = BIT(tid); } else { tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap; } for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { int tid = i; if (tid == IWL_MAX_TID_COUNT) tid = IEEE80211_NUM_TIDS; txq = sta->txq[tid]; mvmtxq = iwl_mvm_txq_from_mac80211(txq); - mvmtxq->stopped = !start; + if (start) + clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state); + else + set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state); - if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) + if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) { + local_bh_disable(); iwl_mvm_mac_itxq_xmit(mvm->hw, txq); + local_bh_enable(); + } } out: rcu_read_unlock(); } static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) { iwl_mvm_queue_state_change(op_mode, hw_queue, false); } static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) { iwl_mvm_queue_state_change(op_mode, hw_queue, true); } static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm) { bool state = iwl_mvm_is_radio_killed(mvm); if (state) wake_up(&mvm->rx_sync_waitq); wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state); } void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) { if (state) set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); else clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); iwl_mvm_set_rfkill_state(mvm); } struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm) { return rcu_dereference_protected(mvm->csme_conn_info, lockdep_is_held(&mvm->mutex)); } static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done); bool unified = iwl_mvm_has_unified_ucode(mvm); if (state) set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); else clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); iwl_mvm_set_rfkill_state(mvm); /* iwl_run_init_mvm_ucode is waiting for results, abort it. */ if (rfkill_safe_init_done) iwl_abort_notification_waits(&mvm->notif_wait); /* * Don't ask the transport to stop the firmware. We'll do it * after cfg80211 takes us down. */ if (unified) return false; /* * Stop the device if we run OPERATIONAL firmware or if we are in the * middle of the calibrations. */ return state && rfkill_safe_init_done; } static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct ieee80211_tx_info *info; info = IEEE80211_SKB_CB(skb); iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); ieee80211_free_txskb(mvm->hw, skb); } struct iwl_mvm_reprobe { struct device *dev; struct work_struct work; }; static void iwl_mvm_reprobe_wk(struct work_struct *wk) { struct iwl_mvm_reprobe *reprobe; reprobe = container_of(wk, struct iwl_mvm_reprobe, work); if (device_reprobe(reprobe->dev)) dev_err(reprobe->dev, "reprobe failed!\n"); put_device(reprobe->dev); kfree(reprobe); module_put(THIS_MODULE); } void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) { iwl_abort_notification_waits(&mvm->notif_wait); iwl_dbg_tlv_del_timers(mvm->trans); /* * This is a bit racy, but worst case we tell mac80211 about * a stopped/aborted scan when that was already done which * is not a problem. It is necessary to abort any os scan * here because mac80211 requires having the scan cleared * before restarting. * We'll reset the scan_status to NONE in restart cleanup in * the next start() call from mac80211. If restart isn't called * (no fw restart) scan status will stay busy. */ iwl_mvm_report_scan_aborted(mvm); /* * If we're restarting already, don't cycle restarts. * If INIT fw asserted, it will likely fail again. * If WoWLAN fw asserted, don't restart either, mac80211 * can't recover this since we're already half suspended. */ if (!mvm->fw_restart && fw_error) { iwl_fw_error_collect(&mvm->fwrt, false); } else if (test_bit(IWL_MVM_STATUS_STARTING, &mvm->status)) { IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n"); } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; IWL_ERR(mvm, "Firmware error during reconfiguration - reprobe!\n"); /* * get a module reference to avoid doing this while unloading * anyway and to avoid scheduling a work with code that's * being removed. */ if (!try_module_get(THIS_MODULE)) { IWL_ERR(mvm, "Module is being unloaded - abort\n"); return; } reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC); if (!reprobe) { module_put(THIS_MODULE); return; } reprobe->dev = get_device(mvm->trans->dev); INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); schedule_work(&reprobe->work); } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { IWL_ERR(mvm, "HW restart already requested, but not started\n"); } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && mvm->hw_registered && !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { /* This should be first thing before trying to collect any * data to avoid endless loops if any HW error happens while * collecting debug data. */ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); if (mvm->fw->ucode_capa.error_log_size) { u32 src_size = mvm->fw->ucode_capa.error_log_size; u32 src_addr = mvm->fw->ucode_capa.error_log_addr; u8 *recover_buf = kzalloc(src_size, GFP_ATOMIC); if (recover_buf) { mvm->error_recovery_buf = recover_buf; iwl_trans_read_mem_bytes(mvm->trans, src_addr, recover_buf, src_size); } } iwl_fw_error_collect(&mvm->fwrt, false); if (fw_error && mvm->fw_restart > 0) { mvm->fw_restart--; ieee80211_restart_hw(mvm->hw); } else if (mvm->fwrt.trans->dbg.restart_required) { IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n"); mvm->fwrt.trans->dbg.restart_required = FALSE; ieee80211_restart_hw(mvm->hw); } else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) { ieee80211_restart_hw(mvm->hw); } } } static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + if (mvm->pldr_sync) + return; + if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) && !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status)) iwl_mvm_dump_nic_error_log(mvm); if (sync) { iwl_fw_error_collect(&mvm->fwrt, true); /* * Currently, the only case for sync=true is during * shutdown, so just stop in this case. If/when that * changes, we need to be a bit smarter here. */ return; } /* * If the firmware crashes while we're already considering it * to be dead then don't ask for a restart, that cannot do * anything useful anyway. */ if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status)) return; iwl_mvm_nic_restart(mvm, false); } static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); WARN_ON(1); iwl_mvm_nic_restart(mvm, true); } static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode, enum iwl_fw_ini_time_point tp_id, union iwl_dbg_tlv_tp_data *tp_data) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data); } #define IWL_MVM_COMMON_OPS \ /* these could be differentiated */ \ .async_cb = iwl_mvm_async_cb, \ .queue_full = iwl_mvm_stop_sw_queue, \ .queue_not_full = iwl_mvm_wake_sw_queue, \ .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \ .free_skb = iwl_mvm_free_skb, \ .nic_error = iwl_mvm_nic_error, \ .cmd_queue_full = iwl_mvm_cmd_queue_full, \ .nic_config = iwl_mvm_nic_config, \ /* as we only register one, these MUST be common! */ \ .start = iwl_op_mode_mvm_start, \ .stop = iwl_op_mode_mvm_stop, \ .time_point = iwl_op_mode_mvm_time_point static const struct iwl_op_mode_ops iwl_mvm_ops = { IWL_MVM_COMMON_OPS, .rx = iwl_mvm_rx, }; static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, unsigned int queue) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); if (unlikely(queue >= mvm->trans->num_rx_queues)) return; if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))) iwl_mvm_rx_frame_release(mvm, napi, rxb, queue); else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, RX_QUEUES_NOTIFICATION))) iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue); else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); } static const struct iwl_op_mode_ops iwl_mvm_ops_mq = { IWL_MVM_COMMON_OPS, .rx = iwl_mvm_rx_mq, .rx_rss = iwl_mvm_rx_mq_rss, }; diff --git a/sys/contrib/dev/iwlwifi/mvm/phy-ctxt.c b/sys/contrib/dev/iwlwifi/mvm/phy-ctxt.c index a3cefbc43e80..a5b432bc9e2f 100644 --- a/sys/contrib/dev/iwlwifi/mvm/phy-ctxt.c +++ b/sys/contrib/dev/iwlwifi/mvm/phy-ctxt.c @@ -1,403 +1,406 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH */ #include #include "fw-api.h" #include "mvm.h" /* Maps the driver specific channel width definition to the fw values */ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef) { switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: - return PHY_VHT_CHANNEL_MODE20; + return IWL_PHY_CHANNEL_MODE20; case NL80211_CHAN_WIDTH_40: - return PHY_VHT_CHANNEL_MODE40; + return IWL_PHY_CHANNEL_MODE40; case NL80211_CHAN_WIDTH_80: - return PHY_VHT_CHANNEL_MODE80; + return IWL_PHY_CHANNEL_MODE80; case NL80211_CHAN_WIDTH_160: - return PHY_VHT_CHANNEL_MODE160; + return IWL_PHY_CHANNEL_MODE160; + case NL80211_CHAN_WIDTH_320: + return IWL_PHY_CHANNEL_MODE320; default: WARN(1, "Invalid channel width=%u", chandef->width); - return PHY_VHT_CHANNEL_MODE20; + return IWL_PHY_CHANNEL_MODE20; } } /* * Maps the driver specific control channel position (relative to the center * freq) definitions to the the fw values */ u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef) { - switch (chandef->chan->center_freq - chandef->center_freq1) { - case -70: - return PHY_VHT_CTRL_POS_4_BELOW; - case -50: - return PHY_VHT_CTRL_POS_3_BELOW; - case -30: - return PHY_VHT_CTRL_POS_2_BELOW; - case -10: - return PHY_VHT_CTRL_POS_1_BELOW; - case 10: - return PHY_VHT_CTRL_POS_1_ABOVE; - case 30: - return PHY_VHT_CTRL_POS_2_ABOVE; - case 50: - return PHY_VHT_CTRL_POS_3_ABOVE; - case 70: - return PHY_VHT_CTRL_POS_4_ABOVE; - default: - WARN(1, "Invalid channel definition"); - fallthrough; - case 0: + int offs = chandef->chan->center_freq - chandef->center_freq1; + int abs_offs = abs(offs); + u8 ret; + + if (offs == 0) { /* * The FW is expected to check the control channel position only * when in HT/VHT and the channel width is not 20MHz. Return * this value as the default one. */ - return PHY_VHT_CTRL_POS_1_BELOW; + return 0; } + + /* this results in a value 0-7, i.e. fitting into 0b0111 */ + ret = (abs_offs - 10) / 20; + /* + * But we need the value to be in 0b1011 because 0b0100 is + * IWL_PHY_CTRL_POS_ABOVE, so shift bit 2 up to land in + * IWL_PHY_CTRL_POS_OFFS_EXT (0b1000) + */ + ret = (ret & IWL_PHY_CTRL_POS_OFFS_MSK) | + ((ret & BIT(2)) << 1); + /* and add the above bit */ + ret |= (offs > 0) * IWL_PHY_CTRL_POS_ABOVE; + + return ret; } /* * Construct the generic fields of the PHY context command */ static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, struct iwl_phy_context_cmd *cmd, u32 action) { cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, ctxt->color)); cmd->action = cpu_to_le32(action); } static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, __le32 *rxchain_info, u8 chains_static, u8 chains_dynamic) { u8 active_cnt, idle_cnt; /* Set rx the chains */ idle_cnt = chains_static; active_cnt = chains_dynamic; /* In scenarios where we only ever use a single-stream rates, * i.e. legacy 11b/g/a associations, single-stream APs or even * static SMPS, enable both chains to get diversity, improving * the case where we're far enough from the AP that attenuation * between the two antennas is sufficiently different to impact * performance. */ if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm, ctxt)) { idle_cnt = 2; active_cnt = 2; } /* * If the firmware requested it, then we know that it supports * getting zero for the values to indicate "use one, but pick * which one yourself", which means it can dynamically pick one * that e.g. has better RSSI. */ if (mvm->fw_static_smps_request && active_cnt == 1 && idle_cnt == 1) { idle_cnt = 0; active_cnt = 0; } *rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) << PHY_RX_CHAIN_VALID_POS); *rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); *rxchain_info |= cpu_to_le32(active_cnt << PHY_RX_CHAIN_MIMO_CNT_POS); #ifdef CONFIG_IWLWIFI_DEBUGFS if (unlikely(mvm->dbgfs_rx_phyinfo)) *rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo); #endif } /* * Add the phy configuration to the PHY context command */ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct iwl_phy_context_cmd_v1 *cmd, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { struct iwl_phy_context_cmd_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, &cmd->ci); /* Set the channel info data */ iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef); iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &tail->rxchain_info, chains_static, chains_dynamic); tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); } /* * Add the phy configuration to the PHY context command */ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct iwl_phy_context_cmd *cmd, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { - cmd->lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm->fw, + cmd->lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm, chandef->chan->band)); /* Set the channel info data */ iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef); /* we only support RLC command version 2 */ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2) iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info, chains_static, chains_dynamic); } -static int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, - struct iwl_mvm_phy_ctxt *ctxt, - u8 chains_static, u8 chains_dynamic) +int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, + u8 chains_static, u8 chains_dynamic) { struct iwl_rlc_config_cmd cmd = { .phy_id = cpu_to_le32(ctxt->id), }; - if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2) + if (ctxt->rlc_disabled) + return 0; + + if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, + RLC_CONFIG_CMD), 0) < 2) return 0; BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_DRIVER_FORCE != PHY_RX_CHAIN_DRIVER_FORCE_MSK); BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_VALID != PHY_RX_CHAIN_VALID_MSK); BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_FORCE != PHY_RX_CHAIN_FORCE_SEL_MSK); BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_FORCE_MIMO != PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK); BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_COUNT != PHY_RX_CHAIN_CNT_MSK); BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_MIMO_COUNT != PHY_RX_CHAIN_MIMO_CNT_MSK); iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd.rlc.rx_chain_info, chains_static, chains_dynamic); return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(RLC_CONFIG_CMD, DATA_PATH_GROUP, 2), 0, sizeof(cmd), &cmd); } /* * Send a command to apply the current phy configuration. The command is send * only if something in the configuration changed: in case that this is the * first time that the phy configuration is applied or in case that the phy * configuration changed from the previous apply. */ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic, u32 action) { int ret; int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1); if (ver == 3 || ver == 4) { struct iwl_phy_context_cmd cmd = {}; /* Set the command header fields */ iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action); /* Set the command data */ iwl_mvm_phy_ctxt_cmd_data(mvm, ctxt, &cmd, chandef, chains_static, chains_dynamic); ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd); } else if (ver < 3) { struct iwl_phy_context_cmd_v1 cmd = {}; u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm); /* Set the command header fields */ iwl_mvm_phy_ctxt_cmd_hdr(ctxt, (struct iwl_phy_context_cmd *)&cmd, action); /* Set the command data */ iwl_mvm_phy_ctxt_cmd_data_v1(mvm, ctxt, &cmd, chandef, chains_static, chains_dynamic); ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, len, &cmd); } else { IWL_ERR(mvm, "PHY ctxt cmd error ver %d not supported\n", ver); return -EOPNOTSUPP; } if (ret) { IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret); return ret; } if (action != FW_CTXT_ACTION_REMOVE) return iwl_mvm_phy_send_rlc(mvm, ctxt, chains_static, chains_dynamic); return 0; } /* * Send a command to add a PHY context based on the current HW configuration. */ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && ctxt->ref); lockdep_assert_held(&mvm->mutex); ctxt->channel = chandef->chan; ctxt->width = chandef->width; ctxt->center_freq1 = chandef->center_freq1; return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, FW_CTXT_ACTION_ADD); } /* * Update the number of references to the given PHY context. This is valid only * in case the PHY context was already created, i.e., its reference count > 0. */ void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) { lockdep_assert_held(&mvm->mutex); ctxt->ref++; } /* * Send a command to modify the PHY context based on the current HW * configuration. Note that the function does not check that the configuration * changed. */ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY; lockdep_assert_held(&mvm->mutex); if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) >= 2 && ctxt->channel == chandef->chan && ctxt->width == chandef->width && ctxt->center_freq1 == chandef->center_freq1) return iwl_mvm_phy_send_rlc(mvm, ctxt, chains_static, chains_dynamic); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) && ctxt->channel->band != chandef->chan->band) { int ret; /* ... remove it here ...*/ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, FW_CTXT_ACTION_REMOVE); if (ret) return ret; /* ... and proceed to add it again */ action = FW_CTXT_ACTION_ADD; } ctxt->channel = chandef->chan; ctxt->width = chandef->width; ctxt->center_freq1 = chandef->center_freq1; return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, action); } void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) { lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(!ctxt)) return; ctxt->ref--; /* * Move unused phy's to a default channel. When the phy is moved the, * fw will cleanup immediate quiet bit if it was previously set, * otherwise we might not be able to reuse this phy. */ if (ctxt->ref == 0) { struct ieee80211_channel *chan = NULL; struct cfg80211_chan_def chandef; struct ieee80211_supported_band *sband; enum nl80211_band band; int channel; for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { sband = mvm->hw->wiphy->bands[band]; if (!sband) continue; for (channel = 0; channel < sband->n_channels; channel++) if (!(sband->channels[channel].flags & IEEE80211_CHAN_DISABLED)) { chan = &sband->channels[channel]; break; } if (chan) break; } if (WARN_ON(!chan)) return; cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1); } } static void iwl_mvm_binding_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { unsigned long *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (!mvmvif->phy_ctxt) + if (!mvmvif->deflink.phy_ctxt) return; if (vif->type == NL80211_IFTYPE_STATION || vif->type == NL80211_IFTYPE_AP) - __set_bit(mvmvif->phy_ctxt->id, data); + __set_bit(mvmvif->deflink.phy_ctxt->id, data); } int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm) { unsigned long phy_ctxt_counter = 0; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_binding_iterator, &phy_ctxt_counter); return hweight8(phy_ctxt_counter); } diff --git a/sys/contrib/dev/iwlwifi/mvm/power.c b/sys/contrib/dev/iwlwifi/mvm/power.c index b2ea2fca5376..9131b5f1bc76 100644 --- a/sys/contrib/dev/iwlwifi/mvm/power.c +++ b/sys/contrib/dev/iwlwifi/mvm/power.c @@ -1,956 +1,980 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #include #include #include #include #include #include "iwl-debug.h" #include "mvm.h" #include "iwl-modparams.h" #include "fw/api/power.h" #define POWER_KEEP_ALIVE_PERIOD_SEC 25 static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, struct iwl_beacon_filter_cmd *cmd, u32 flags) { u16 len; IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n", le32_to_cpu(cmd->ba_enable_beacon_abort)); IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n", le32_to_cpu(cmd->ba_escape_timer)); IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n", le32_to_cpu(cmd->bf_debug_flag)); IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n", le32_to_cpu(cmd->bf_enable_beacon_filter)); IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n", le32_to_cpu(cmd->bf_energy_delta)); IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n", le32_to_cpu(cmd->bf_escape_timer)); IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n", le32_to_cpu(cmd->bf_roaming_energy_delta)); IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n", le32_to_cpu(cmd->bf_roaming_state)); IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n", le32_to_cpu(cmd->bf_temp_threshold)); IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n", le32_to_cpu(cmd->bf_temp_fast_filter)); IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n", le32_to_cpu(cmd->bf_temp_slow_filter)); IWL_DEBUG_POWER(mvm, "bf_threshold_absolute_low is: %d, %d\n", le32_to_cpu(cmd->bf_threshold_absolute_low[0]), le32_to_cpu(cmd->bf_threshold_absolute_low[1])); IWL_DEBUG_POWER(mvm, "bf_threshold_absolute_high is: %d, %d\n", le32_to_cpu(cmd->bf_threshold_absolute_high[0]), le32_to_cpu(cmd->bf_threshold_absolute_high[1])); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BEACON_FILTER_V4)) len = sizeof(struct iwl_beacon_filter_cmd); else len = offsetof(struct iwl_beacon_filter_cmd, bf_threshold_absolute_low); return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags, len, cmd); } static void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->bss_conf.cqm_rssi_thold) { cmd->bf_energy_delta = cpu_to_le32(vif->bss_conf.cqm_rssi_hyst); /* fw uses an absolute value for this */ cmd->bf_roaming_state = cpu_to_le32(-vif->bss_conf.cqm_rssi_thold); } cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled); } static void iwl_mvm_power_log(struct iwl_mvm *mvm, struct iwl_mac_power_cmd *cmd) { IWL_DEBUG_POWER(mvm, "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n", cmd->id_and_color, iwlmvm_mod_params.power_scheme, le16_to_cpu(cmd->flags)); IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", le16_to_cpu(cmd->keep_alive_seconds)); if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) { IWL_DEBUG_POWER(mvm, "Disable power management\n"); return; } IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n", le32_to_cpu(cmd->rx_data_timeout)); IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n", le32_to_cpu(cmd->tx_data_timeout)); if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n", cmd->skip_dtim_periods); if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", cmd->lprx_rssi_threshold); if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) { IWL_DEBUG_POWER(mvm, "uAPSD enabled\n"); IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n", le32_to_cpu(cmd->rx_data_timeout_uapsd)); IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n", le32_to_cpu(cmd->tx_data_timeout_uapsd)); IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid); IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags); IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp); } } static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_power_cmd *cmd) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); enum ieee80211_ac_numbers ac; bool tid_found = false; if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) || cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { cmd->rx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); cmd->tx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); } else { cmd->rx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT); cmd->tx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT); } #ifdef CONFIG_IWLWIFI_DEBUGFS /* set advanced pm flag with no uapsd ACs to enable ps-poll */ if (mvmvif->dbgfs_pm.use_ps_poll) { cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); return; } #endif for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) { - if (!mvmvif->queue_params[ac].uapsd) + if (!mvmvif->deflink.queue_params[ac].uapsd) continue; if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); cmd->uapsd_ac_flags |= BIT(ac); /* QNDP TID - the highest TID with no admission control */ - if (!tid_found && !mvmvif->queue_params[ac].acm) { + if (!tid_found && !mvmvif->deflink.queue_params[ac].acm) { tid_found = true; switch (ac) { case IEEE80211_AC_VO: cmd->qndp_tid = 6; break; case IEEE80211_AC_VI: cmd->qndp_tid = 5; break; case IEEE80211_AC_BE: cmd->qndp_tid = 0; break; case IEEE80211_AC_BK: cmd->qndp_tid = 1; break; } } } cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK); if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) | BIT(IEEE80211_AC_VI) | BIT(IEEE80211_AC_BE) | BIT(IEEE80211_AC_BK))) { cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL); cmd->snooze_window = test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ? cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) : cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); } cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len; if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { cmd->heavy_tx_thld_packets = IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS; cmd->heavy_rx_thld_packets = IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS; } else { cmd->heavy_tx_thld_packets = IWL_MVM_PS_HEAVY_TX_THLD_PACKETS; cmd->heavy_rx_thld_packets = IWL_MVM_PS_HEAVY_RX_THLD_PACKETS; } cmd->heavy_tx_thld_percentage = IWL_MVM_PS_HEAVY_TX_THLD_PERCENT; cmd->heavy_rx_thld_percentage = IWL_MVM_PS_HEAVY_RX_THLD_PERCENT; } static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { bool *is_p2p_standalone = _data; switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_AP: *is_p2p_standalone = false; break; case NL80211_IFTYPE_STATION: - if (vif->bss_conf.assoc) + if (vif->cfg.assoc) *is_p2p_standalone = false; break; default: break; } } static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid, - ETH_ALEN)) + if (ether_addr_equal(mvmvif->uapsd_misbehaving_ap_addr, + vif->cfg.ap_addr)) return false; /* * Avoid using uAPSD if P2P client is associated to GO that uses * opportunistic power save. This is due to current FW limitation. */ if (vif->p2p && (vif->bss_conf.p2p_noa_attr.oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT)) return false; /* * Avoid using uAPSD if client is in DCM - * low latency issue in Miracast */ if (iwl_mvm_phy_ctx_count(mvm) >= 2) return false; if (vif->p2p) { /* Allow U-APSD only if p2p is stand alone */ bool is_p2p_standalone = true; if (!iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) return false; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_p2p_standalone_iterator, &is_p2p_standalone); if (!is_p2p_standalone) return false; } return true; } static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif) { struct ieee80211_chanctx_conf *chanctx_conf; - struct ieee80211_channel *chan; + struct ieee80211_bss_conf *link_conf; bool radar_detect = false; + unsigned int link_id; rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->chanctx_conf); - WARN_ON(!chanctx_conf); - if (chanctx_conf) { - chan = chanctx_conf->def.chan; - radar_detect = chan->flags & IEEE80211_CHAN_RADAR; + for_each_vif_active_link(vif, link_conf, link_id) { + chanctx_conf = rcu_dereference(link_conf->chanctx_conf); + /* this happens on link switching, just ignore inactive ones */ + if (!chanctx_conf) + continue; + + radar_detect = !!(chanctx_conf->def.chan->flags & + IEEE80211_CHAN_RADAR); + if (radar_detect) + goto out; } - rcu_read_unlock(); +out: + rcu_read_unlock(); return radar_detect; } static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_power_cmd *cmd) { int dtimper = vif->bss_conf.dtim_period ?: 1; int skip; /* disable, in case we're supposed to override */ cmd->skip_dtim_periods = 0; cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); if (iwl_mvm_power_is_radar(vif)) return; if (dtimper >= 10) return; if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) { if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP) return; skip = 2; } else { int dtimper_tu = dtimper * vif->bss_conf.beacon_int; if (WARN_ON(!dtimper_tu)) return; - /* configure skip over dtim up to 306TU - 314 msec */ - skip = max_t(u8, 1, 306 / dtimper_tu); + /* configure skip over dtim up to 900 TU DTIM interval */ + skip = max_t(u8, 1, 900 / dtimper_tu); } - /* the firmware really expects "look at every X DTIMs", so add 1 */ - cmd->skip_dtim_periods = 1 + skip; + cmd->skip_dtim_periods = skip; cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); } static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_power_cmd *cmd) { int dtimper, bi; int keep_alive; struct iwl_mvm_vif *mvmvif __maybe_unused = iwl_mvm_vif_from_mac80211(vif); cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); dtimper = vif->bss_conf.dtim_period; bi = vif->bss_conf.beacon_int; /* * Regardless of power management state the driver must set * keep alive period. FW will use it for sending keep alive NDPs * immediately after association. Check that keep alive period * is at least 3 * DTIM */ keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi), USEC_PER_SEC); keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC); cmd->keep_alive_seconds = cpu_to_le16(keep_alive); if (mvm->ps_disabled) return; cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); - if (!vif->bss_conf.ps || !mvmvif->pm_enabled) + if (!vif->cfg.ps || !mvmvif->pm_enabled) return; if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p && (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS) || !IWL_MVM_P2P_LOWLATENCY_PS_ENABLE)) return; cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); if (vif->bss_conf.beacon_rate && (vif->bss_conf.beacon_rate->bitrate == 10 || vif->bss_conf.beacon_rate->bitrate == 60)) { cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD; } iwl_mvm_power_config_skip_dtim(mvm, vif, cmd); if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) { cmd->rx_data_timeout = cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); cmd->tx_data_timeout = cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); } else if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS)) { cmd->tx_data_timeout = cpu_to_le32(IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT); cmd->rx_data_timeout = cpu_to_le32(IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT); } else { cmd->rx_data_timeout = cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT); cmd->tx_data_timeout = cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT); } if (iwl_mvm_power_allow_uapsd(mvm, vif)) iwl_mvm_power_configure_uapsd(mvm, vif, cmd); #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE) cmd->keep_alive_seconds = cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds); if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) { if (mvmvif->dbgfs_pm.skip_over_dtim) cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); else cmd->flags &= cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK); } if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT) cmd->rx_data_timeout = cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout); if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT) cmd->tx_data_timeout = cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout); if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS) cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods; if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) { if (mvmvif->dbgfs_pm.lprx_ena) cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); else cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK); } if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD) cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold; if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) { if (mvmvif->dbgfs_pm.snooze_ena) cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); else cmd->flags &= cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK); } if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_UAPSD_MISBEHAVING) { u16 flag = POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK; if (mvmvif->dbgfs_pm.uapsd_misbehaving) cmd->flags |= cpu_to_le16(flag); else cmd->flags &= cpu_to_le16(flag); } #endif /* CONFIG_IWLWIFI_DEBUGFS */ } static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mac_power_cmd cmd = {}; iwl_mvm_power_build_cmd(mvm, vif, &cmd); iwl_mvm_power_log(mvm, &cmd); #ifdef CONFIG_IWLWIFI_DEBUGFS memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd)); #endif return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, 0, sizeof(cmd), &cmd); } int iwl_mvm_power_update_device(struct iwl_mvm *mvm) { struct iwl_device_power_cmd cmd = { .flags = 0, }; if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) mvm->ps_disabled = true; if (!mvm->ps_disabled) cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); #ifdef CONFIG_IWLWIFI_DEBUGFS if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ? mvm->disable_power_off_d3 : mvm->disable_power_off) cmd.flags &= cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); #endif if (mvm->ext_clock_valid) cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK); IWL_DEBUG_POWER(mvm, "Sending device power command with flags = 0x%X\n", cmd.flags); return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, 0, sizeof(cmd), &cmd); } void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid, - ETH_ALEN)) - eth_zero_addr(mvmvif->uapsd_misbehaving_bssid); + if (!ether_addr_equal(mvmvif->uapsd_misbehaving_ap_addr, + vif->cfg.ap_addr)) + eth_zero_addr(mvmvif->uapsd_misbehaving_ap_addr); } static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { u8 *ap_sta_id = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; - /* The ap_sta_id is not expected to change during current association - * so no explicit protection is needed - */ - if (mvmvif->ap_sta_id == *ap_sta_id) - memcpy(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid, - ETH_ALEN); + rcu_read_lock(); + for_each_vif_active_link(vif, link_conf, link_id) { + struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; + + /* The ap_sta_id is not expected to change during current + * association so no explicit protection is needed + */ + if (link_info->ap_sta_id == *ap_sta_id) { + ether_addr_copy(mvmvif->uapsd_misbehaving_ap_addr, + vif->cfg.ap_addr); + break; + } + } + rcu_read_unlock(); } void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data; u8 ap_sta_id = le32_to_cpu(notif->sta_id); ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id); } struct iwl_power_vifs { struct iwl_mvm *mvm; struct ieee80211_vif *bss_vif; struct ieee80211_vif *p2p_vif; struct ieee80211_vif *ap_vif; struct ieee80211_vif *monitor_vif; bool p2p_active; bool bss_active; bool ap_active; bool monitor_active; }; static void iwl_mvm_power_disable_pm_iterator(void *_data, u8* mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->pm_enabled = false; } static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); bool *disable_ps = _data; - if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX) + if (iwl_mvm_vif_is_active(mvmvif)) *disable_ps |= mvmvif->ps_disabled; } static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_power_vifs *power_iterator = _data; - bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX; + bool active; + + if (!mvmvif->uploaded) + return; + + active = iwl_mvm_vif_is_active(mvmvif); switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_P2P_DEVICE: break; case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_AP: /* only a single MAC of the same type */ WARN_ON(power_iterator->ap_vif); power_iterator->ap_vif = vif; if (active) power_iterator->ap_active = true; break; case NL80211_IFTYPE_MONITOR: /* only a single MAC of the same type */ WARN_ON(power_iterator->monitor_vif); power_iterator->monitor_vif = vif; if (active) power_iterator->monitor_active = true; break; case NL80211_IFTYPE_P2P_CLIENT: /* only a single MAC of the same type */ WARN_ON(power_iterator->p2p_vif); power_iterator->p2p_vif = vif; if (active) power_iterator->p2p_active = true; break; case NL80211_IFTYPE_STATION: power_iterator->bss_vif = vif; if (active) power_iterator->bss_active = true; break; default: break; } } static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm, struct iwl_power_vifs *vifs) { struct iwl_mvm_vif *bss_mvmvif = NULL; struct iwl_mvm_vif *p2p_mvmvif = NULL; struct iwl_mvm_vif *ap_mvmvif = NULL; bool client_same_channel = false; bool ap_same_channel = false; lockdep_assert_held(&mvm->mutex); /* set pm_enable to false */ ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_power_disable_pm_iterator, NULL); if (vifs->bss_vif) bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif); if (vifs->p2p_vif) p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif); if (vifs->ap_vif) ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif); /* don't allow PM if any TDLS stations exist */ if (iwl_mvm_tdls_sta_count(mvm, NULL)) return; /* enable PM on bss if bss stand alone */ - if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) { + if (bss_mvmvif && vifs->bss_active && !vifs->p2p_active && + !vifs->ap_active) { bss_mvmvif->pm_enabled = true; return; } /* enable PM on p2p if p2p stand alone */ - if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) { + if (p2p_mvmvif && vifs->p2p_active && !vifs->bss_active && + !vifs->ap_active) { p2p_mvmvif->pm_enabled = true; return; } - if (vifs->bss_active && vifs->p2p_active) - client_same_channel = (bss_mvmvif->phy_ctxt->id == - p2p_mvmvif->phy_ctxt->id); - if (vifs->bss_active && vifs->ap_active) - ap_same_channel = (bss_mvmvif->phy_ctxt->id == - ap_mvmvif->phy_ctxt->id); + if (p2p_mvmvif && bss_mvmvif && vifs->bss_active && vifs->p2p_active) + client_same_channel = + iwl_mvm_have_links_same_channel(bss_mvmvif, p2p_mvmvif); + + if (bss_mvmvif && ap_mvmvif && vifs->bss_active && vifs->ap_active) + ap_same_channel = + iwl_mvm_have_links_same_channel(bss_mvmvif, ap_mvmvif); /* clients are not stand alone: enable PM if DCM */ if (!(client_same_channel || ap_same_channel)) { - if (vifs->bss_active) + if (bss_mvmvif && vifs->bss_active) bss_mvmvif->pm_enabled = true; - if (vifs->p2p_active) + if (p2p_mvmvif && vifs->p2p_active) p2p_mvmvif->pm_enabled = true; return; } /* * There is only one channel in the system and there are only * bss and p2p clients that share it */ if (client_same_channel && !vifs->ap_active) { /* share same channel*/ bss_mvmvif->pm_enabled = true; p2p_mvmvif->pm_enabled = true; } } #ifdef CONFIG_IWLWIFI_DEBUGFS int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, char *buf, int bufsz) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_power_cmd cmd = {}; int pos = 0; mutex_lock(&mvm->mutex); memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd)); mutex_unlock(&mvm->mutex); pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n", iwlmvm_mod_params.power_scheme); pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n", le16_to_cpu(cmd.flags)); pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n", le16_to_cpu(cmd.keep_alive_seconds)); if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) return pos; pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n", (cmd.flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0); pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n", cmd.skip_dtim_periods); if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) { pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n", le32_to_cpu(cmd.rx_data_timeout)); pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n", le32_to_cpu(cmd.tx_data_timeout)); } if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) pos += scnprintf(buf+pos, bufsz-pos, "lprx_rssi_threshold = %d\n", cmd.lprx_rssi_threshold); if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) return pos; pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout_uapsd = %d\n", le32_to_cpu(cmd.rx_data_timeout_uapsd)); pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout_uapsd = %d\n", le32_to_cpu(cmd.tx_data_timeout_uapsd)); pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n", cmd.qndp_tid); pos += scnprintf(buf+pos, bufsz-pos, "uapsd_ac_flags = 0x%x\n", cmd.uapsd_ac_flags); pos += scnprintf(buf+pos, bufsz-pos, "uapsd_max_sp = %d\n", cmd.uapsd_max_sp); pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_packets = %d\n", cmd.heavy_tx_thld_packets); pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_packets = %d\n", cmd.heavy_rx_thld_packets); pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_percentage = %d\n", cmd.heavy_tx_thld_percentage); pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_percentage = %d\n", cmd.heavy_rx_thld_percentage); pos += scnprintf(buf+pos, bufsz-pos, "uapsd_misbehaving_enable = %d\n", (cmd.flags & cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK)) ? 1 : 0); if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK))) return pos; pos += scnprintf(buf+pos, bufsz-pos, "snooze_interval = %d\n", cmd.snooze_interval); pos += scnprintf(buf+pos, bufsz-pos, "snooze_window = %d\n", cmd.snooze_window); return pos; } void iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf; if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA) cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA) cmd->bf_roaming_energy_delta = cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE) cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD) cmd->bf_temp_threshold = cpu_to_le32(dbgfs_bf->bf_temp_threshold); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER) cmd->bf_temp_fast_filter = cpu_to_le32(dbgfs_bf->bf_temp_fast_filter); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER) cmd->bf_temp_slow_filter = cpu_to_le32(dbgfs_bf->bf_temp_slow_filter); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG) cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag); if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER) cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer); if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER) cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer); if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT) cmd->ba_enable_beacon_abort = cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort); } #endif static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd, u32 cmd_flags) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; if (mvmvif != mvm->bf_allowed_vif || !vif->bss_conf.dtim_period || vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd); iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd); ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags); if (!ret) mvmvif->bf_data.bf_enabled = true; return ret; } int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags) { struct iwl_beacon_filter_cmd cmd = { IWL_BF_CMD_CONFIG_DEFAULTS, .bf_enable_beacon_filter = cpu_to_le32(1), }; return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags); } static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags) { struct iwl_beacon_filter_cmd cmd = {}; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags); if (!ret) mvmvif->bf_data.bf_enabled = false; return ret; } int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags) { return _iwl_mvm_disable_beacon_filter(mvm, vif, flags); } static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm) { bool disable_ps; int ret; /* disable PS if CAM */ disable_ps = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM); /* ...or if any of the vifs require PS to be off */ ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_power_ps_disabled_iterator, &disable_ps); /* update device power state if it has changed */ if (mvm->ps_disabled != disable_ps) { bool old_ps_disabled = mvm->ps_disabled; mvm->ps_disabled = disable_ps; ret = iwl_mvm_power_update_device(mvm); if (ret) { mvm->ps_disabled = old_ps_disabled; return ret; } } return 0; } static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_beacon_filter_cmd cmd = { IWL_BF_CMD_CONFIG_DEFAULTS, .bf_enable_beacon_filter = cpu_to_le32(1), }; if (!mvmvif->bf_data.bf_enabled) return 0; if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled || mvm->ps_disabled || - !vif->bss_conf.ps || + !vif->cfg.ps || iwl_mvm_vif_low_latency(mvmvif)); return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0); } int iwl_mvm_power_update_ps(struct iwl_mvm *mvm) { struct iwl_power_vifs vifs = { .mvm = mvm, }; int ret; lockdep_assert_held(&mvm->mutex); /* get vifs info */ ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_power_get_vifs_iterator, &vifs); ret = iwl_mvm_power_set_ps(mvm); if (ret) return ret; if (vifs.bss_vif) return iwl_mvm_power_set_ba(mvm, vifs.bss_vif); return 0; } int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) { struct iwl_power_vifs vifs = { .mvm = mvm, }; int ret; lockdep_assert_held(&mvm->mutex); /* get vifs info */ ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_power_get_vifs_iterator, &vifs); iwl_mvm_power_set_pm(mvm, &vifs); ret = iwl_mvm_power_set_ps(mvm); if (ret) return ret; if (vifs.bss_vif) { ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif); if (ret) return ret; } if (vifs.p2p_vif) { ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif); if (ret) return ret; } if (vifs.bss_vif) return iwl_mvm_power_set_ba(mvm, vifs.bss_vif); return 0; } diff --git a/sys/contrib/dev/iwlwifi/mvm/ptp.c b/sys/contrib/dev/iwlwifi/mvm/ptp.c new file mode 100644 index 000000000000..e89259de6f4c --- /dev/null +++ b/sys/contrib/dev/iwlwifi/mvm/ptp.c @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2021 - 2023 Intel Corporation + */ + +#include "mvm.h" +#include "iwl-debug.h" +#include +#include + +#define IWL_PTP_GP2_WRAP 0x100000000ULL +#define IWL_PTP_WRAP_TIME (3600 * HZ) + +/* The scaled_ppm parameter is ppm (parts per million) with a 16-bit fractional + * part, which means that a value of 1 in one of those fields actually means + * 2^-16 ppm, and 2^16=65536 is 1 ppm. + */ +#define SCALE_FACTOR 65536000000ULL +#define IWL_PTP_WRAP_THRESHOLD_USEC (5000) + +#define IWL_PTP_GET_CROSS_TS_NUM 5 + +static void iwl_mvm_ptp_update_new_read(struct iwl_mvm *mvm, u32 gp2) +{ + /* If the difference is above the threshold, assume it's a wraparound. + * Otherwise assume it's an old read and ignore it. + */ + if (gp2 < mvm->ptp_data.last_gp2 && + mvm->ptp_data.last_gp2 - gp2 < IWL_PTP_WRAP_THRESHOLD_USEC) { + IWL_DEBUG_INFO(mvm, + "PTP: ignore old read (gp2=%u, last_gp2=%u)\n", + gp2, mvm->ptp_data.last_gp2); + return; + } + + if (gp2 < mvm->ptp_data.last_gp2) { + mvm->ptp_data.wrap_counter++; + IWL_DEBUG_INFO(mvm, + "PTP: wraparound detected (new counter=%u)\n", + mvm->ptp_data.wrap_counter); + } + + mvm->ptp_data.last_gp2 = gp2; + schedule_delayed_work(&mvm->ptp_data.dwork, IWL_PTP_WRAP_TIME); +} + +u64 iwl_mvm_ptp_get_adj_time(struct iwl_mvm *mvm, u64 base_time_ns) +{ + struct ptp_data *data = &mvm->ptp_data; + u64 last_gp2_ns = mvm->ptp_data.scale_update_gp2 * NSEC_PER_USEC; + u64 res; + u64 diff; + + iwl_mvm_ptp_update_new_read(mvm, + div64_u64(base_time_ns, NSEC_PER_USEC)); + + IWL_DEBUG_INFO(mvm, "base_time_ns=%llu, wrap_counter=%u\n", + (unsigned long long)base_time_ns, data->wrap_counter); + + base_time_ns = base_time_ns + + (data->wrap_counter * IWL_PTP_GP2_WRAP * NSEC_PER_USEC); + + /* It is possible that a GP2 timestamp was received from fw before the + * last scale update. Since we don't know how to scale - ignore it. + */ + if (base_time_ns < last_gp2_ns) { + IWL_DEBUG_INFO(mvm, "Time before scale update - ignore\n"); + return 0; + } + + diff = base_time_ns - last_gp2_ns; + IWL_DEBUG_INFO(mvm, "diff ns=%llu\n", (unsigned long long)diff); + + diff = mul_u64_u64_div_u64(diff, data->scaled_freq, + SCALE_FACTOR); + IWL_DEBUG_INFO(mvm, "scaled diff ns=%llu\n", (unsigned long long)diff); + + res = data->scale_update_adj_time_ns + data->delta + diff; + + IWL_DEBUG_INFO(mvm, "base=%llu delta=%lld adj=%llu\n", + (unsigned long long)base_time_ns, (long long)data->delta, + (unsigned long long)res); + return res; +} + +static int +iwl_mvm_get_crosstimestamp_fw(struct iwl_mvm *mvm, u32 *gp2, u64 *sys_time) +{ + struct iwl_synced_time_cmd synced_time_cmd = { + .operation = cpu_to_le32(IWL_SYNCED_TIME_OPERATION_READ_BOTH) + }; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(DATA_PATH_GROUP, WNM_PLATFORM_PTM_REQUEST_CMD), + .flags = CMD_WANT_SKB, + .data[0] = &synced_time_cmd, + .len[0] = sizeof(synced_time_cmd), + }; + struct iwl_synced_time_rsp *resp; + struct iwl_rx_packet *pkt; + int ret; + u64 gp2_10ns; + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) + return ret; + + pkt = cmd.resp_pkt; + + if (iwl_rx_packet_payload_len(pkt) != sizeof(*resp)) { + IWL_ERR(mvm, "PTP: Invalid command response\n"); + iwl_free_resp(&cmd); + return -EIO; + } + + resp = (void *)pkt->data; + + gp2_10ns = (u64)le32_to_cpu(resp->gp2_timestamp_hi) << 32 | + le32_to_cpu(resp->gp2_timestamp_lo); + *gp2 = div_u64(gp2_10ns, 100); + + *sys_time = (u64)le32_to_cpu(resp->platform_timestamp_hi) << 32 | + le32_to_cpu(resp->platform_timestamp_lo); + + return ret; +} + +static void iwl_mvm_phc_get_crosstimestamp_loop(struct iwl_mvm *mvm, + ktime_t *sys_time, u32 *gp2) +{ + u64 diff = 0, new_diff; + u64 tmp_sys_time; + u32 tmp_gp2; + int i; + + for (i = 0; i < IWL_PTP_GET_CROSS_TS_NUM; i++) { + iwl_mvm_get_sync_time(mvm, CLOCK_REALTIME, &tmp_gp2, NULL, + &tmp_sys_time); + new_diff = tmp_sys_time - ((u64)tmp_gp2 * NSEC_PER_USEC); + if (!diff || new_diff < diff) { + *sys_time = tmp_sys_time; + *gp2 = tmp_gp2; + diff = new_diff; + IWL_DEBUG_INFO(mvm, "PTP: new times: gp2=%u sys=%lld\n", + *gp2, *sys_time); + } + } +} + +static int +iwl_mvm_phc_get_crosstimestamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *xtstamp) +{ + struct iwl_mvm *mvm = container_of(ptp, struct iwl_mvm, + ptp_data.ptp_clock_info); + int ret = 0; + /* Raw value read from GP2 register in usec */ + u32 gp2; + /* GP2 value in ns*/ + s64 gp2_ns; + /* System (wall) time */ + ktime_t sys_time; + + memset(xtstamp, 0, sizeof(struct system_device_crosststamp)); + + if (!mvm->ptp_data.ptp_clock) { + IWL_ERR(mvm, "No PHC clock registered\n"); + return -ENODEV; + } + + mutex_lock(&mvm->mutex); + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SYNCED_TIME)) { + ret = iwl_mvm_get_crosstimestamp_fw(mvm, &gp2, &sys_time); + + if (ret) + goto out; + } else { + iwl_mvm_phc_get_crosstimestamp_loop(mvm, &sys_time, &gp2); + } + + gp2_ns = iwl_mvm_ptp_get_adj_time(mvm, (u64)gp2 * NSEC_PER_USEC); + + IWL_INFO(mvm, "Got Sync Time: GP2:%u, last_GP2: %u, GP2_ns: %lld, sys_time: %lld\n", + gp2, mvm->ptp_data.last_gp2, gp2_ns, (s64)sys_time); + + /* System monotonic raw time is not used */ + xtstamp->device = (ktime_t)gp2_ns; + xtstamp->sys_realtime = sys_time; + +out: + mutex_unlock(&mvm->mutex); + return ret; +} + +static void iwl_mvm_ptp_work(struct work_struct *wk) +{ + struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, + ptp_data.dwork.work); + u32 gp2; + + mutex_lock(&mvm->mutex); + gp2 = iwl_mvm_get_systime(mvm); + iwl_mvm_ptp_update_new_read(mvm, gp2); + mutex_unlock(&mvm->mutex); +} + +static int iwl_mvm_ptp_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct iwl_mvm *mvm = container_of(ptp, struct iwl_mvm, + ptp_data.ptp_clock_info); + u64 gp2; + u64 ns; + + mutex_lock(&mvm->mutex); + gp2 = iwl_mvm_get_systime(mvm); + ns = iwl_mvm_ptp_get_adj_time(mvm, gp2 * NSEC_PER_USEC); + mutex_unlock(&mvm->mutex); + + *ts = ns_to_timespec64(ns); + return 0; +} + +static int iwl_mvm_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct iwl_mvm *mvm = container_of(ptp, struct iwl_mvm, + ptp_data.ptp_clock_info); + struct ptp_data *data = container_of(ptp, struct ptp_data, + ptp_clock_info); + + mutex_lock(&mvm->mutex); + data->delta += delta; + IWL_DEBUG_INFO(mvm, "delta=%lld, new delta=%lld\n", (long long)delta, + (long long)data->delta); + mutex_unlock(&mvm->mutex); + return 0; +} + +static int iwl_mvm_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct iwl_mvm *mvm = container_of(ptp, struct iwl_mvm, + ptp_data.ptp_clock_info); + struct ptp_data *data = &mvm->ptp_data; + u32 gp2; + + mutex_lock(&mvm->mutex); + + /* Must call _iwl_mvm_ptp_get_adj_time() before updating + * data->scale_update_gp2 or data->scaled_freq since + * scale_update_adj_time_ns should reflect the previous scaled_freq. + */ + gp2 = iwl_mvm_get_systime(mvm); + data->scale_update_adj_time_ns = + iwl_mvm_ptp_get_adj_time(mvm, gp2 * NSEC_PER_USEC); + data->scale_update_gp2 = gp2; + data->wrap_counter = 0; + data->delta = 0; + + data->scaled_freq = SCALE_FACTOR + scaled_ppm; + IWL_DEBUG_INFO(mvm, "adjfine: scaled_ppm=%ld new=%llu\n", + scaled_ppm, (unsigned long long)data->scaled_freq); + + mutex_unlock(&mvm->mutex); + return 0; +} + +/* iwl_mvm_ptp_init - initialize PTP for devices which support it. + * @mvm: internal mvm structure, see &struct iwl_mvm. + * + * Performs the required steps for enabling PTP support. + */ +void iwl_mvm_ptp_init(struct iwl_mvm *mvm) +{ + /* Warn if the interface already has a ptp_clock defined */ + if (WARN_ON(mvm->ptp_data.ptp_clock)) + return; + + mvm->ptp_data.ptp_clock_info.owner = THIS_MODULE; + mvm->ptp_data.ptp_clock_info.max_adj = 0x7fffffff; + mvm->ptp_data.ptp_clock_info.getcrosststamp = + iwl_mvm_phc_get_crosstimestamp; + mvm->ptp_data.ptp_clock_info.adjfine = iwl_mvm_ptp_adjfine; + mvm->ptp_data.ptp_clock_info.adjtime = iwl_mvm_ptp_adjtime; + mvm->ptp_data.ptp_clock_info.gettime64 = iwl_mvm_ptp_gettime; + mvm->ptp_data.scaled_freq = SCALE_FACTOR; + + /* Give a short 'friendly name' to identify the PHC clock */ + snprintf(mvm->ptp_data.ptp_clock_info.name, + sizeof(mvm->ptp_data.ptp_clock_info.name), + "%s", "iwlwifi-PTP"); + + INIT_DELAYED_WORK(&mvm->ptp_data.dwork, iwl_mvm_ptp_work); + + mvm->ptp_data.ptp_clock = + ptp_clock_register(&mvm->ptp_data.ptp_clock_info, mvm->dev); + + if (IS_ERR(mvm->ptp_data.ptp_clock)) { + IWL_ERR(mvm, "Failed to register PHC clock (%ld)\n", + PTR_ERR(mvm->ptp_data.ptp_clock)); + mvm->ptp_data.ptp_clock = NULL; + } else if (mvm->ptp_data.ptp_clock) { + IWL_INFO(mvm, "Registered PHC clock: %s, with index: %d\n", + mvm->ptp_data.ptp_clock_info.name, + ptp_clock_index(mvm->ptp_data.ptp_clock)); + } +} + +/* iwl_mvm_ptp_remove - disable PTP device. + * @mvm: internal mvm structure, see &struct iwl_mvm. + * + * Disable PTP support. + */ +void iwl_mvm_ptp_remove(struct iwl_mvm *mvm) +{ + if (mvm->ptp_data.ptp_clock) { + IWL_INFO(mvm, "Unregistering PHC clock: %s, with index: %d\n", + mvm->ptp_data.ptp_clock_info.name, + ptp_clock_index(mvm->ptp_data.ptp_clock)); + + ptp_clock_unregister(mvm->ptp_data.ptp_clock); + mvm->ptp_data.ptp_clock = NULL; + memset(&mvm->ptp_data.ptp_clock_info, 0, + sizeof(mvm->ptp_data.ptp_clock_info)); + mvm->ptp_data.last_gp2 = 0; + cancel_delayed_work_sync(&mvm->ptp_data.dwork); + } +} diff --git a/sys/contrib/dev/iwlwifi/mvm/quota.c b/sys/contrib/dev/iwlwifi/mvm/quota.c index c862bd243b55..aad2614af9ad 100644 --- a/sys/contrib/dev/iwlwifi/mvm/quota.c +++ b/sys/contrib/dev/iwlwifi/mvm/quota.c @@ -1,298 +1,299 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018, 2021 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2021-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include "fw-api.h" #include "mvm.h" #define QUOTA_100 IWL_MVM_MAX_QUOTA #define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100) struct iwl_mvm_quota_iterator_data { int n_interfaces[MAX_BINDINGS]; int colors[MAX_BINDINGS]; int low_latency[MAX_BINDINGS]; #ifdef CONFIG_IWLWIFI_DEBUGFS int dbgfs_min[MAX_BINDINGS]; #endif int n_low_latency_bindings; struct ieee80211_vif *disabled_vif; }; static void iwl_mvm_quota_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_quota_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 id; /* skip disabled interfaces here immediately */ if (vif == data->disabled_vif) return; - if (!mvmvif->phy_ctxt) + if (!mvmvif->deflink.phy_ctxt) return; /* currently, PHY ID == binding ID */ - id = mvmvif->phy_ctxt->id; + id = mvmvif->deflink.phy_ctxt->id; /* need at least one binding per PHY */ BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS); if (WARN_ON_ONCE(id >= MAX_BINDINGS)) return; switch (vif->type) { case NL80211_IFTYPE_STATION: - if (vif->bss_conf.assoc) + if (vif->cfg.assoc) break; return; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: if (mvmvif->ap_ibss_active) break; return; case NL80211_IFTYPE_MONITOR: if (mvmvif->monitor_active) break; return; case NL80211_IFTYPE_P2P_DEVICE: return; default: WARN_ON_ONCE(1); return; } if (data->colors[id] < 0) - data->colors[id] = mvmvif->phy_ctxt->color; + data->colors[id] = mvmvif->deflink.phy_ctxt->color; else - WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color); + WARN_ON_ONCE(data->colors[id] != + mvmvif->deflink.phy_ctxt->color); data->n_interfaces[id]++; #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvmvif->dbgfs_quota_min) data->dbgfs_min[id] = max(data->dbgfs_min[id], mvmvif->dbgfs_quota_min); #endif if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) { data->n_low_latency_bindings++; data->low_latency[id] = true; } } static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm, struct iwl_time_quota_cmd *cmd) { #ifdef CONFIG_NL80211_TESTMODE struct iwl_mvm_vif *mvmvif; int i, phy_id = -1, beacon_int = 0; if (!mvm->noa_duration || !mvm->noa_vif) return; mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif); if (!mvmvif->ap_ibss_active) return; - phy_id = mvmvif->phy_ctxt->id; + phy_id = mvmvif->deflink.phy_ctxt->id; beacon_int = mvm->noa_vif->bss_conf.beacon_int; for (i = 0; i < MAX_BINDINGS; i++) { struct iwl_time_quota_data *data = iwl_mvm_quota_cmd_get_quota(mvm, cmd, i); u32 id_n_c = le32_to_cpu(data->id_and_color); u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS; u32 quota = le32_to_cpu(data->quota); if (id != phy_id) continue; quota *= (beacon_int - mvm->noa_duration); quota /= beacon_int; IWL_DEBUG_QUOTA(mvm, "quota: adjust for NoA from %d to %d\n", le32_to_cpu(data->quota), quota); data->quota = cpu_to_le32(quota); } #endif } int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_update, struct ieee80211_vif *disabled_vif) { struct iwl_time_quota_cmd cmd = {}; int i, idx, err, num_active_macs, quota, quota_rem, n_non_lowlat; struct iwl_mvm_quota_iterator_data data = { .n_interfaces = {}, .colors = { -1, -1, -1, -1 }, .disabled_vif = disabled_vif, }; struct iwl_time_quota_cmd *last = &mvm->last_quota_cmd; struct iwl_time_quota_data *qdata, *last_data; bool send = false; lockdep_assert_held(&mvm->mutex); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) return 0; /* update all upon completion */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) return 0; /* iterator data above must match */ BUILD_BUG_ON(MAX_BINDINGS != 4); ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_quota_iterator, &data); /* * The FW's scheduling session consists of * IWL_MVM_MAX_QUOTA fragments. Divide these fragments * equally between all the bindings that require quota */ num_active_macs = 0; for (i = 0; i < MAX_BINDINGS; i++) { qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i); qdata->id_and_color = cpu_to_le32(FW_CTXT_INVALID); num_active_macs += data.n_interfaces[i]; } n_non_lowlat = num_active_macs; if (data.n_low_latency_bindings == 1) { for (i = 0; i < MAX_BINDINGS; i++) { if (data.low_latency[i]) { n_non_lowlat -= data.n_interfaces[i]; break; } } } if (data.n_low_latency_bindings == 1 && n_non_lowlat) { /* * Reserve quota for the low latency binding in case that * there are several data bindings but only a single * low latency one. Split the rest of the quota equally * between the other data interfaces. */ quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat; quota_rem = QUOTA_100 - n_non_lowlat * quota - QUOTA_LOWLAT_MIN; IWL_DEBUG_QUOTA(mvm, "quota: low-latency binding active, remaining quota per other binding: %d\n", quota); } else if (num_active_macs) { /* * There are 0 or more than 1 low latency bindings, or all the * data interfaces belong to the single low latency binding. * Split the quota equally between the data interfaces. */ quota = QUOTA_100 / num_active_macs; quota_rem = QUOTA_100 % num_active_macs; IWL_DEBUG_QUOTA(mvm, "quota: splitting evenly per binding: %d\n", quota); } else { /* values don't really matter - won't be used */ quota = 0; quota_rem = 0; } for (idx = 0, i = 0; i < MAX_BINDINGS; i++) { if (data.colors[i] < 0) continue; qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, idx); qdata->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i])); if (data.n_interfaces[i] <= 0) qdata->quota = cpu_to_le32(0); #ifdef CONFIG_IWLWIFI_DEBUGFS else if (data.dbgfs_min[i]) qdata->quota = cpu_to_le32(data.dbgfs_min[i] * QUOTA_100 / 100); #endif else if (data.n_low_latency_bindings == 1 && n_non_lowlat && data.low_latency[i]) /* * There is more than one binding, but only one of the * bindings is in low latency. For this case, allocate * the minimal required quota for the low latency * binding. */ qdata->quota = cpu_to_le32(QUOTA_LOWLAT_MIN); else qdata->quota = cpu_to_le32(quota * data.n_interfaces[i]); WARN_ONCE(le32_to_cpu(qdata->quota) > QUOTA_100, "Binding=%d, quota=%u > max=%u\n", idx, le32_to_cpu(qdata->quota), QUOTA_100); qdata->max_duration = cpu_to_le32(0); idx++; } /* Give the remainder of the session to the first data binding */ for (i = 0; i < MAX_BINDINGS; i++) { qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i); if (le32_to_cpu(qdata->quota) != 0) { le32_add_cpu(&qdata->quota, quota_rem); IWL_DEBUG_QUOTA(mvm, "quota: giving remainder of %d to binding %d\n", quota_rem, i); break; } } iwl_mvm_adjust_quota_for_noa(mvm, &cmd); /* check that we have non-zero quota for all valid bindings */ for (i = 0; i < MAX_BINDINGS; i++) { qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i); last_data = iwl_mvm_quota_cmd_get_quota(mvm, last, i); if (qdata->id_and_color != last_data->id_and_color) send = true; if (qdata->max_duration != last_data->max_duration) send = true; if (abs((int)le32_to_cpu(qdata->quota) - (int)le32_to_cpu(last_data->quota)) > IWL_MVM_QUOTA_THRESHOLD) send = true; if (qdata->id_and_color == cpu_to_le32(FW_CTXT_INVALID)) continue; WARN_ONCE(qdata->quota == 0, "zero quota on binding %d\n", i); } if (!send && !force_update) { /* don't send a practically unchanged command, the firmware has * to re-initialize a lot of state and that can have an adverse * impact on it */ return 0; } err = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, iwl_mvm_quota_cmd_size(mvm), &cmd); if (err) IWL_ERR(mvm, "Failed to send quota: %d\n", err); else mvm->last_quota_cmd = cmd; return err; } diff --git a/sys/contrib/dev/iwlwifi/mvm/rfi.c b/sys/contrib/dev/iwlwifi/mvm/rfi.c index bb77bc9aa821..2ecd32bed752 100644 --- a/sys/contrib/dev/iwlwifi/mvm/rfi.c +++ b/sys/contrib/dev/iwlwifi/mvm/rfi.c @@ -1,143 +1,153 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2020 - 2021 Intel Corporation + * Copyright (C) 2020 - 2022 Intel Corporation */ #include "mvm.h" #include "fw/api/commands.h" #include "fw/api/phy-ctxt.h" /* * DDR needs frequency in units of 16.666MHz, so provide FW with the * frequency values in the adjusted format. */ static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = { /* frequency 2667MHz */ {cpu_to_le16(160), {50, 58, 60, 62, 64, 52, 54, 56}, {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, /* frequency 2933MHz */ {cpu_to_le16(176), {149, 151, 153, 157, 159, 161, 165, 163, 167, 169, 171, 173, 175}, {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, /* frequency 3200MHz */ {cpu_to_le16(192), {79, 81, 83, 85, 87, 89, 91, 93}, {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}}, /* frequency 3733MHz */ {cpu_to_le16(223), {114, 116, 118, 120, 122, 106, 110, 124, 126}, {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, /* frequency 4000MHz */ {cpu_to_le16(240), {114, 151, 155, 157, 159, 161, 165}, {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, /* frequency 4267MHz */ {cpu_to_le16(256), {79, 83, 85, 87, 89, 91, 93,}, {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}}, /* frequency 4400MHz */ {cpu_to_le16(264), {111, 119, 123, 125, 129, 131, 133, 135, 143,}, {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}}, /* frequency 5200MHz */ {cpu_to_le16(312), {36, 38, 40, 42, 44, 46, 50,}, {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, /* frequency 5600MHz */ {cpu_to_le16(336), {106, 110, 112, 114, 116, 118, 120, 122}, {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, /* frequency 6000MHz */ {cpu_to_le16(360), {3, 5, 7, 9, 11, 13, 15,}, {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}}, /* frequency 6400MHz */ {cpu_to_le16(384), {79, 83, 85, 87, 89, 91, 93,}, {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}}, }; +bool iwl_rfi_supported(struct iwl_mvm *mvm) +{ + /* The feature depends on a platform bugfix, so for now + * it's always disabled. + * When the platform support detection is implemented we should + * check FW TLV and platform support instead. + */ + return false; +} + int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table) { int ret; struct iwl_rfi_config_cmd cmd; struct iwl_host_cmd hcmd = { .id = WIDE_ID(SYSTEM_GROUP, RFI_CONFIG_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd, .len[0] = sizeof(cmd), }; - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) + if (!iwl_rfi_supported(mvm)) return -EOPNOTSUPP; lockdep_assert_held(&mvm->mutex); /* in case no table is passed, use the default one */ if (!rfi_table) { memcpy(cmd.table, iwl_rfi_table, sizeof(cmd.table)); } else { memcpy(cmd.table, rfi_table, sizeof(cmd.table)); /* notify FW the table is not the default one */ cmd.oem = 1; } ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) IWL_ERR(mvm, "Failed to send RFI config cmd %d\n", ret); return ret; } struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm) { struct iwl_rfi_freq_table_resp_cmd *resp; int resp_size = sizeof(*resp); int ret; struct iwl_host_cmd cmd = { .id = WIDE_ID(SYSTEM_GROUP, RFI_GET_FREQ_TABLE_CMD), .flags = CMD_WANT_SKB, }; - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) + if (!iwl_rfi_supported(mvm)) return ERR_PTR(-EOPNOTSUPP); mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &cmd); mutex_unlock(&mvm->mutex); if (ret) return ERR_PTR(ret); if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size)) return ERR_PTR(-EIO); resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL); if (!resp) return ERR_PTR(-ENOMEM); iwl_free_resp(&cmd); return resp; } void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rfi_deactivate_notif *notif = (void *)pkt->data; IWL_INFO(mvm, "RFIm is deactivated, reason = %d\n", notif->reason); } diff --git a/sys/contrib/dev/iwlwifi/mvm/rs-fw.c b/sys/contrib/dev/iwlwifi/mvm/rs-fw.c index d8c3d7ff4f44..dbd6c9acda37 100644 --- a/sys/contrib/dev/iwlwifi/mvm/rs-fw.c +++ b/sys/contrib/dev/iwlwifi/mvm/rs-fw.c @@ -1,533 +1,751 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include "rs.h" #include "fw-api.h" #include "sta.h" #include "iwl-op-mode.h" #include "mvm.h" -static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta) +static u8 rs_fw_bw_from_sta_bw(const struct ieee80211_link_sta *link_sta) { - switch (sta->deflink.bandwidth) { + switch (link_sta->bandwidth) { + case IEEE80211_STA_RX_BW_320: + return IWL_TLC_MNG_CH_WIDTH_320MHZ; case IEEE80211_STA_RX_BW_160: return IWL_TLC_MNG_CH_WIDTH_160MHZ; case IEEE80211_STA_RX_BW_80: return IWL_TLC_MNG_CH_WIDTH_80MHZ; case IEEE80211_STA_RX_BW_40: return IWL_TLC_MNG_CH_WIDTH_40MHZ; case IEEE80211_STA_RX_BW_20: default: return IWL_TLC_MNG_CH_WIDTH_20MHZ; } } static u8 rs_fw_set_active_chains(u8 chains) { u8 fw_chains = 0; if (chains & ANT_A) fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK; if (chains & ANT_B) fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK; return fw_chains; } -static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta) +static u8 rs_fw_sgi_cw_support(struct ieee80211_link_sta *link_sta) { - struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; - struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; - struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; + struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap; + struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap; u8 supp = 0; if (he_cap->has_he) return 0; if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ); if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) supp |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ); if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80) supp |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ); if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160) supp |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ); return supp; } static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct ieee80211_supported_band *sband) + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + const struct ieee80211_sta_he_cap *sband_he_cap) { - struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; - struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; - struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; + struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap; + struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap; bool vht_ena = vht_cap->vht_supported; u16 flags = 0; /* get STBC flags */ if (mvm->cfg->ht_params->stbc && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) { if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK) flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; } if (mvm->cfg->ht_params->ldpc && ((ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) || (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; /* consider LDPC support in case of HE */ if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; - if (sband->iftype_data && sband->iftype_data->he_cap.has_he && - !(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[1] & - IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) + if (sband_he_cap && + !(sband_he_cap->he_cap_elem.phy_cap_info[1] & + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) flags &= ~IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK && - sband->iftype_data && - sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[3] & - IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK)) + sband_he_cap && + sband_he_cap->he_cap_elem.phy_cap_info[3] & + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK)) flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK; return flags; } static int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap *vht_cap, int nss) { u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) & (0x3 << (2 * (nss - 1))); rx_mcs >>= (2 * (nss - 1)); switch (rx_mcs) { case IEEE80211_VHT_MCS_SUPPORT_0_7: return IWL_TLC_MNG_HT_RATE_MCS7; case IEEE80211_VHT_MCS_SUPPORT_0_8: return IWL_TLC_MNG_HT_RATE_MCS8; case IEEE80211_VHT_MCS_SUPPORT_0_9: return IWL_TLC_MNG_HT_RATE_MCS9; default: WARN_ON_ONCE(1); break; } return 0; } static void -rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, +rs_fw_vht_set_enabled_rates(const struct ieee80211_link_sta *link_sta, const struct ieee80211_sta_vht_cap *vht_cap, struct iwl_tlc_config_cmd_v4 *cmd) { u16 supp; int i, highest_mcs; - u8 max_nss = sta->deflink.rx_nss; + u8 max_nss = link_sta->rx_nss; struct ieee80211_vht_cap ieee_vht_cap = { .vht_cap_info = cpu_to_le32(vht_cap->cap), .supp_mcs = vht_cap->vht_mcs, }; /* the station support only a single receive chain */ - if (sta->smps_mode == IEEE80211_SMPS_STATIC) + if (link_sta->smps_mode == IEEE80211_SMPS_STATIC) max_nss = 1; for (i = 0; i < max_nss && i < IWL_TLC_NSS_MAX; i++) { int nss = i + 1; highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, nss); if (!highest_mcs) continue; supp = BIT(highest_mcs + 1) - 1; - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20) supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9); cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp); /* * Check if VHT extended NSS indicates that the bandwidth/NSS * configuration is supported - only for MCS 0 since we already * decoded the MCS bits anyway ourselves. */ - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160 && + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160 && ieee80211_get_vht_max_nss(&ieee_vht_cap, IEEE80211_VHT_CHANWIDTH_160MHZ, 0, true, nss) >= nss) cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] = cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80]; } } static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs) { switch (mcs) { case IEEE80211_HE_MCS_SUPPORT_0_7: return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1; case IEEE80211_HE_MCS_SUPPORT_0_9: return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1; case IEEE80211_HE_MCS_SUPPORT_0_11: return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1; case IEEE80211_HE_MCS_NOT_SUPPORTED: return 0; } WARN(1, "invalid HE MCS %d\n", mcs); return 0; } static void -rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, - struct ieee80211_supported_band *sband, +rs_fw_he_set_enabled_rates(const struct ieee80211_link_sta *link_sta, + const struct ieee80211_sta_he_cap *sband_he_cap, struct iwl_tlc_config_cmd_v4 *cmd) { - const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; + const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap; u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); - u16 tx_mcs_80 = - le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80); - u16 tx_mcs_160 = - le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160); + u16 tx_mcs_80 = le16_to_cpu(sband_he_cap->he_mcs_nss_supp.tx_mcs_80); + u16 tx_mcs_160 = le16_to_cpu(sband_he_cap->he_mcs_nss_supp.tx_mcs_160); int i; - u8 nss = sta->deflink.rx_nss; + u8 nss = link_sta->rx_nss; /* the station support only a single receive chain */ - if (sta->smps_mode == IEEE80211_SMPS_STATIC) + if (link_sta->smps_mode == IEEE80211_SMPS_STATIC) nss = 1; for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) { u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3; u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3; u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3; u16 _tx_mcs_80 = (tx_mcs_80 >> (2 * i)) & 0x3; /* If one side doesn't support - mark both as not supporting */ if (_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED || _tx_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED) { _mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED; _tx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED; } if (_mcs_80 > _tx_mcs_80) _mcs_80 = _tx_mcs_80; cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80)); /* If one side doesn't support - mark both as not supporting */ if (_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED || _tx_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED) { _mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED; _tx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED; } if (_mcs_160 > _tx_mcs_160) _mcs_160 = _tx_mcs_160; cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] = cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160)); } } -static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, +static u8 rs_fw_eht_max_nss(u8 rx_nss, u8 tx_nss) +{ + u8 tx = u8_get_bits(tx_nss, IEEE80211_EHT_MCS_NSS_TX); + u8 rx = u8_get_bits(rx_nss, IEEE80211_EHT_MCS_NSS_RX); + /* the max nss that can be used, + * is the min with our tx capa and the peer rx capa. + */ + return min(tx, rx); +} + +#define MAX_NSS_MCS(mcs_num, rx, tx) \ + rs_fw_eht_max_nss((rx)->rx_tx_mcs ##mcs_num## _max_nss, \ + (tx)->rx_tx_mcs ##mcs_num## _max_nss) + +static void rs_fw_set_eht_mcs_nss(__le16 ht_rates[][3], + enum IWL_TLC_MCS_PER_BW bw, + u8 max_nss, u16 mcs_msk) +{ + if (max_nss >= 2) + ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le16(mcs_msk); + + if (max_nss >= 1) + ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le16(mcs_msk); +} + +static const +struct ieee80211_eht_mcs_nss_supp_bw * +rs_fw_rs_mcs2eht_mcs(enum IWL_TLC_MCS_PER_BW bw, + const struct ieee80211_eht_mcs_nss_supp *eht_mcs) +{ + switch (bw) { + case IWL_TLC_MCS_PER_BW_80: + return &eht_mcs->bw._80; + case IWL_TLC_MCS_PER_BW_160: + return &eht_mcs->bw._160; + case IWL_TLC_MCS_PER_BW_320: + return &eht_mcs->bw._320; + default: + return NULL; + } +} + +static void +rs_fw_eht_set_enabled_rates(struct ieee80211_vif *vif, + const struct ieee80211_link_sta *link_sta, + const struct ieee80211_sta_he_cap *sband_he_cap, + const struct ieee80211_sta_eht_cap *sband_eht_cap, + struct iwl_tlc_config_cmd_v4 *cmd) +{ + /* peer RX mcs capa */ + const struct ieee80211_eht_mcs_nss_supp *eht_rx_mcs = + &link_sta->eht_cap.eht_mcs_nss_supp; + /* our TX mcs capa */ + const struct ieee80211_eht_mcs_nss_supp *eht_tx_mcs = + &sband_eht_cap->eht_mcs_nss_supp; + + enum IWL_TLC_MCS_PER_BW bw; + struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_rx_20; + struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_tx_20; + + /* peer is 20Mhz only */ + if (vif->type == NL80211_IFTYPE_AP && + !(link_sta->he_cap.he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) { + mcs_rx_20 = eht_rx_mcs->only_20mhz; + } else { + mcs_rx_20.rx_tx_mcs7_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss; + mcs_rx_20.rx_tx_mcs9_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss; + mcs_rx_20.rx_tx_mcs11_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs11_max_nss; + mcs_rx_20.rx_tx_mcs13_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs13_max_nss; + } + + /* nic is 20Mhz only */ + if (!(sband_he_cap->he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) { + mcs_tx_20 = eht_tx_mcs->only_20mhz; + } else { + mcs_tx_20.rx_tx_mcs7_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss; + mcs_tx_20.rx_tx_mcs9_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss; + mcs_tx_20.rx_tx_mcs11_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs11_max_nss; + mcs_tx_20.rx_tx_mcs13_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs13_max_nss; + } + + /* rates for 20/40/80 bw */ + bw = IWL_TLC_MCS_PER_BW_80; + rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw, + MAX_NSS_MCS(7, &mcs_rx_20, &mcs_tx_20), GENMASK(7, 0)); + rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw, + MAX_NSS_MCS(9, &mcs_rx_20, &mcs_tx_20), GENMASK(9, 8)); + rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw, + MAX_NSS_MCS(11, &mcs_rx_20, &mcs_tx_20), GENMASK(11, 10)); + rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw, + MAX_NSS_MCS(13, &mcs_rx_20, &mcs_tx_20), GENMASK(13, 12)); + + /* rate for 160/320 bw */ + for (bw = IWL_TLC_MCS_PER_BW_160; bw <= IWL_TLC_MCS_PER_BW_320; bw++) { + const struct ieee80211_eht_mcs_nss_supp_bw *mcs_rx = + rs_fw_rs_mcs2eht_mcs(bw, eht_rx_mcs); + const struct ieee80211_eht_mcs_nss_supp_bw *mcs_tx = + rs_fw_rs_mcs2eht_mcs(bw, eht_tx_mcs); + + /* got unsupported index for bw */ + if (!mcs_rx || !mcs_tx) + continue; + + /* break out if we don't support the bandwidth */ + if (cmd->max_ch_width < (bw + IWL_TLC_MNG_CH_WIDTH_80MHZ)) + break; + + rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw, + MAX_NSS_MCS(9, mcs_rx, mcs_tx), GENMASK(9, 0)); + rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw, + MAX_NSS_MCS(11, mcs_rx, mcs_tx), GENMASK(11, 10)); + rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw, + MAX_NSS_MCS(13, mcs_rx, mcs_tx), GENMASK(13, 12)); + } + + /* the station support only a single receive chain */ + if (link_sta->smps_mode == IEEE80211_SMPS_STATIC || + link_sta->rx_nss < 2) + memset(cmd->ht_rates[IWL_TLC_NSS_2], 0, + sizeof(cmd->ht_rates[IWL_TLC_NSS_2])); +} + +static void rs_fw_set_supp_rates(struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, struct ieee80211_supported_band *sband, + const struct ieee80211_sta_he_cap *sband_he_cap, + const struct ieee80211_sta_eht_cap *sband_eht_cap, struct iwl_tlc_config_cmd_v4 *cmd) { int i; u16 supp = 0; unsigned long tmp; /* must be unsigned long for for_each_set_bit */ - const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; - const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; - const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; + const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap; + const struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap; + const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap; /* non HT rates */ - tmp = sta->deflink.supp_rates[sband->band]; + tmp = link_sta->supp_rates[sband->band]; for_each_set_bit(i, &tmp, BITS_PER_LONG) supp |= BIT(sband->bitrates[i].hw_value); cmd->non_ht_rates = cpu_to_le16(supp); cmd->mode = IWL_TLC_MNG_MODE_NON_HT; /* HT/VHT rates */ - if (he_cap->has_he) { + if (link_sta->eht_cap.has_eht && sband_he_cap && sband_eht_cap) { + cmd->mode = IWL_TLC_MNG_MODE_EHT; + rs_fw_eht_set_enabled_rates(vif, link_sta, sband_he_cap, + sband_eht_cap, cmd); + } else if (he_cap->has_he && sband_he_cap) { cmd->mode = IWL_TLC_MNG_MODE_HE; - rs_fw_he_set_enabled_rates(sta, sband, cmd); + rs_fw_he_set_enabled_rates(link_sta, sband_he_cap, cmd); } else if (vht_cap->vht_supported) { cmd->mode = IWL_TLC_MNG_MODE_VHT; - rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd); + rs_fw_vht_set_enabled_rates(link_sta, vht_cap, cmd); } else if (ht_cap->ht_supported) { cmd->mode = IWL_TLC_MNG_MODE_HT; cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(ht_cap->mcs.rx_mask[0]); /* the station support only a single receive chain */ - if (sta->smps_mode == IEEE80211_SMPS_STATIC) + if (link_sta->smps_mode == IEEE80211_SMPS_STATIC) cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] = 0; else cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(ht_cap->mcs.rx_mask[1]); } } void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_tlc_update_notif *notif; struct ieee80211_sta *sta; + struct ieee80211_link_sta *link_sta; struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_link_sta *mvm_link_sta; struct iwl_lq_sta_rs_fw *lq_sta; u32 flags; rcu_read_lock(); notif = (void *)pkt->data; + link_sta = rcu_dereference(mvm->fw_id_to_link_sta[notif->sta_id]); sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); - if (IS_ERR_OR_NULL(sta)) { + if (IS_ERR_OR_NULL(sta) || !link_sta) { /* can happen in remove station flow where mvm removed internally * the station before removing from FW */ IWL_DEBUG_RATE(mvm, "Invalid mvm RCU pointer for sta id (%d) in TLC notification\n", notif->sta_id); goto out; } mvmsta = iwl_mvm_sta_from_mac80211(sta); if (!mvmsta) { IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", notif->sta_id); goto out; } flags = le32_to_cpu(notif->flags); - lq_sta = &mvmsta->lq_sta.rs_fw; + mvm_link_sta = rcu_dereference(mvmsta->link[link_sta->link_id]); + if (!mvm_link_sta) { + IWL_DEBUG_RATE(mvm, + "Invalid mvmsta RCU pointer for link (%d) of sta id (%d) in TLC notification\n", + link_sta->link_id, notif->sta_id); + goto out; + } + lq_sta = &mvm_link_sta->lq_sta.rs_fw; if (flags & IWL_TLC_NOTIF_FLAG_RATE) { char pretty_rate[100]; if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF, 0) < 3) { rs_pretty_print_rate_v1(pretty_rate, sizeof(pretty_rate), le32_to_cpu(notif->rate)); IWL_DEBUG_RATE(mvm, "Got rate in old format. Rate: %s. Converting.\n", pretty_rate); lq_sta->last_rate_n_flags = iwl_new_rate_from_v1(le32_to_cpu(notif->rate)); } else { lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate); } rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate), lq_sta->last_rate_n_flags); IWL_DEBUG_RATE(mvm, "new rate: %s\n", pretty_rate); } - if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvmsta->orig_amsdu_len) { + if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvm_link_sta->orig_amsdu_len) { u16 size = le32_to_cpu(notif->amsdu_size); int i; - if (sta->max_amsdu_len < size) { + if (link_sta->agg.max_amsdu_len < size) { /* - * In debug sta->max_amsdu_len < size + * In debug link_sta->agg.max_amsdu_len < size * so also check with orig_amsdu_len which holds the * original data before debugfs changed the value */ - WARN_ON(mvmsta->orig_amsdu_len < size); + WARN_ON(mvm_link_sta->orig_amsdu_len < size); goto out; } mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled); mvmsta->max_amsdu_len = size; - sta->max_rc_amsdu_len = mvmsta->max_amsdu_len; + link_sta->agg.max_rc_amsdu_len = mvmsta->max_amsdu_len; for (i = 0; i < IWL_MAX_TID_COUNT; i++) { if (mvmsta->amsdu_enabled & BIT(i)) - sta->max_tid_amsdu_len[i] = + link_sta->agg.max_tid_amsdu_len[i] = iwl_mvm_max_amsdu_size(mvm, sta, i); else /* * Not so elegant, but this will effectively * prevent AMSDU on this TID */ - sta->max_tid_amsdu_len[i] = 1; + link_sta->agg.max_tid_amsdu_len[i] = 1; } IWL_DEBUG_RATE(mvm, "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n", le32_to_cpu(notif->amsdu_size), size, mvmsta->amsdu_enabled); } out: rcu_read_unlock(); } -u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta) +u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta) { +#if defined(__linux__) struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; - const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; +#endif + const struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap; + const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap; + const struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap; + + if (WARN_ON_ONCE(!link_conf->chandef.chan)) + return IEEE80211_MAX_MPDU_LEN_VHT_3895; - if (mvmsta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) { - switch (le16_get_bits(sta->deflink.he_6ghz_capa.capa, + if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) { + switch (le16_get_bits(link_sta->he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN)) { case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: return IEEE80211_MAX_MPDU_LEN_VHT_11454; case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991: return IEEE80211_MAX_MPDU_LEN_VHT_7991; default: return IEEE80211_MAX_MPDU_LEN_VHT_3895; } - } else - if (vht_cap->vht_supported) { + } else if (link_conf->chandef.chan->band == NL80211_BAND_2GHZ && + eht_cap->has_eht) { + switch (u8_get_bits(eht_cap->eht_cap_elem.mac_cap_info[0], + IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK)) { + case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454: + return IEEE80211_MAX_MPDU_LEN_VHT_11454; + case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991: + return IEEE80211_MAX_MPDU_LEN_VHT_7991; + default: + return IEEE80211_MAX_MPDU_LEN_VHT_3895; + } + } else if (vht_cap->vht_supported) { switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) { case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: return IEEE80211_MAX_MPDU_LEN_VHT_11454; case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991: return IEEE80211_MAX_MPDU_LEN_VHT_7991; default: return IEEE80211_MAX_MPDU_LEN_VHT_3895; } } else if (ht_cap->ht_supported) { if (ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU) /* * agg is offloaded so we need to assume that agg * are enabled and max mpdu in ampdu is 4095 * (spec 802.11-2016 9.3.2.1) */ return IEEE80211_MAX_MPDU_LEN_HT_BA; else return IEEE80211_MAX_MPDU_LEN_HT_3839; } /* in legacy mode no amsdu is enabled so return zero */ return 0; } -void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band, bool update) +void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, + enum nl80211_band band) { struct ieee80211_hw *hw = mvm->hw; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD); struct ieee80211_supported_band *sband = hw->wiphy->bands[band]; - u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta); + u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta, link_conf, link_sta); + const struct ieee80211_sta_he_cap *sband_he_cap = + ieee80211_get_he_iftype_cap_vif(sband, vif); + const struct ieee80211_sta_eht_cap *sband_eht_cap = + ieee80211_get_eht_iftype_cap_vif(sband, vif); + struct iwl_mvm_link_sta *mvm_link_sta; + struct iwl_lq_sta_rs_fw *lq_sta; struct iwl_tlc_config_cmd_v4 cfg_cmd = { - .sta_id = mvmsta->sta_id, - .max_ch_width = update ? - rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20, - .flags = cpu_to_le16(rs_fw_get_config_flags(mvm, sta, sband)), + .max_ch_width = mvmsta->authorized ? + rs_fw_bw_from_sta_bw(link_sta) : IWL_TLC_MNG_CH_WIDTH_20MHZ, + .flags = cpu_to_le16(rs_fw_get_config_flags(mvm, vif, link_sta, + sband_he_cap)), .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)), - .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta), + .sgi_ch_width_supp = rs_fw_sgi_cw_support(link_sta), .max_mpdu_len = iwl_mvm_is_csum_supported(mvm) ? cpu_to_le16(max_amsdu_len) : 0, }; - int ret; + unsigned int link_id = link_conf->link_id; int cmd_ver; + int ret; + + /* Enable external EHT LTF only for GL device and if there's + * mutual support by AP and client + */ + if (CSR_HW_REV_TYPE(mvm->trans->hw_rev) == IWL_CFG_MAC_TYPE_GL && + sband_eht_cap && + sband_eht_cap->eht_cap_elem.phy_cap_info[5] & + IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF && + link_sta->eht_cap.has_eht && + link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] & + IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF) { + IWL_DEBUG_RATE(mvm, "Set support for Extra EHT LTF\n"); + cfg_cmd.flags |= + cpu_to_le16(IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK); + } + + rcu_read_lock(); + mvm_link_sta = rcu_dereference(mvmsta->link[link_id]); + if (WARN_ON_ONCE(!mvm_link_sta)) { + rcu_read_unlock(); + return; + } + cfg_cmd.sta_id = mvm_link_sta->sta_id; + + lq_sta = &mvm_link_sta->lq_sta.rs_fw; memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers)); + rcu_read_unlock(); + #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_mvm_reset_frame_stats(mvm); #endif - rs_fw_set_supp_rates(sta, sband, &cfg_cmd); + rs_fw_set_supp_rates(vif, link_sta, sband, + sband_he_cap, sband_eht_cap, + &cfg_cmd); /* * since TLC offload works with one mode we can assume * that only vht/ht is used and also set it as station max amsdu */ - sta->max_amsdu_len = max_amsdu_len; + sta->deflink.agg.max_amsdu_len = max_amsdu_len; cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD), 0); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n", cfg_cmd.sta_id, cfg_cmd.max_ch_width, cfg_cmd.mode); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, chains=0x%X, ch_wid_supp=%d, flags=0x%X\n", cfg_cmd.chains, cfg_cmd.sgi_ch_width_supp, cfg_cmd.flags); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, mpdu_len=%d, no_ht_rate=0x%X, tx_op=%d\n", cfg_cmd.max_mpdu_len, cfg_cmd.non_ht_rates, cfg_cmd.max_tx_op); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][0]=0x%X, ht_rate[1][0]=0x%X\n", cfg_cmd.ht_rates[0][0], cfg_cmd.ht_rates[1][0]); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][1]=0x%X, ht_rate[1][1]=0x%X\n", cfg_cmd.ht_rates[0][1], cfg_cmd.ht_rates[1][1]); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][2]=0x%X, ht_rate[1][2]=0x%X\n", cfg_cmd.ht_rates[0][2], cfg_cmd.ht_rates[1][2]); if (cmd_ver == 4) { ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, sizeof(cfg_cmd), &cfg_cmd); } else if (cmd_ver < 4) { struct iwl_tlc_config_cmd_v3 cfg_cmd_v3 = { .sta_id = cfg_cmd.sta_id, .max_ch_width = cfg_cmd.max_ch_width, .mode = cfg_cmd.mode, .chains = cfg_cmd.chains, .amsdu = !!cfg_cmd.max_mpdu_len, .flags = cfg_cmd.flags, .non_ht_rates = cfg_cmd.non_ht_rates, .ht_rates[0][0] = cfg_cmd.ht_rates[0][0], .ht_rates[0][1] = cfg_cmd.ht_rates[0][1], .ht_rates[1][0] = cfg_cmd.ht_rates[1][0], .ht_rates[1][1] = cfg_cmd.ht_rates[1][1], .sgi_ch_width_supp = cfg_cmd.sgi_ch_width_supp, .max_mpdu_len = cfg_cmd.max_mpdu_len, }; u16 cmd_size = sizeof(cfg_cmd_v3); /* In old versions of the API the struct is 4 bytes smaller */ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD), 0) < 3) cmd_size -= 4; ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size, &cfg_cmd_v3); } else { ret = -EINVAL; } if (ret) IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret); } int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable) { /* TODO: need to introduce a new FW cmd since LQ cmd is not relevant */ IWL_DEBUG_RATE(mvm, "tx protection - not implemented yet.\n"); return 0; } -void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta) +void iwl_mvm_rs_add_sta_link(struct iwl_mvm *mvm, + struct iwl_mvm_link_sta *link_sta) { - struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; + struct iwl_lq_sta_rs_fw *lq_sta; - IWL_DEBUG_RATE(mvm, "create station rate scale window\n"); + lq_sta = &link_sta->lq_sta.rs_fw; lq_sta->pers.drv = mvm; - lq_sta->pers.sta_id = mvmsta->sta_id; + lq_sta->pers.sta_id = link_sta->sta_id; lq_sta->pers.chains = 0; - memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal)); + memset(lq_sta->pers.chain_signal, 0, + sizeof(lq_sta->pers.chain_signal)); lq_sta->pers.last_rssi = S8_MIN; lq_sta->last_rate_n_flags = 0; #ifdef CONFIG_MAC80211_DEBUGFS lq_sta->pers.dbg_fixed_rate = 0; #endif } + +void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta) +{ + unsigned int link_id; + + IWL_DEBUG_RATE(mvm, "create station rate scale window\n"); + + for (link_id = 0; link_id < ARRAY_SIZE(mvmsta->link); link_id++) { + struct iwl_mvm_link_sta *link = + rcu_dereference_protected(mvmsta->link[link_id], + lockdep_is_held(&mvm->mutex)); + if (!link) + continue; + + iwl_mvm_rs_add_sta_link(mvm, link); + } +} diff --git a/sys/contrib/dev/iwlwifi/mvm/rs.c b/sys/contrib/dev/iwlwifi/mvm/rs.c index fb9c7210122c..981686e66920 100644 --- a/sys/contrib/dev/iwlwifi/mvm/rs.c +++ b/sys/contrib/dev/iwlwifi/mvm/rs.c @@ -1,105 +1,107 @@ /*- * Copyright (c) 2020-2021 The FreeBSD Foundation * * This software was developed by Björn Zeeb under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * XXX-BZ: * This file is left as a wrapper to make mvm compile and we will only * deal with it on a need basis. Most newer chipsets do this in firmware. */ #include #include "mvm.h" #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) { } void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm) { } #endif int iwl_mvm_rate_control_register(void) { return (0); } int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *sta, bool enable) { return (0); } void iwl_mvm_rate_control_unregister(void) { } void -iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band, bool t) +iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_bss_conf *link_conf, struct ieee80211_link_sta *link_sta, + enum nl80211_band band) { } void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, struct ieee80211_tx_info *ba_info, bool t) { } void rs_update_last_rssi(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, struct ieee80211_rx_status *rx_status) { /* * Assumption based on mvm/sta.h is that this should update * mvmsta->lq_sta.rs_drv but so far we only saw a iwl_lq_cmd (lq) * access in that struct so nowhere to put rssi information. * So the only thing would be if this is required internally * to functions in this file. */ } int rs_pretty_print_rate_v1(char *buf, int bufsz, const u32 rate) { return (0); } diff --git a/sys/contrib/dev/iwlwifi/mvm/rs.h b/sys/contrib/dev/iwlwifi/mvm/rs.h index 9f99f89bd744..a9b10b841f95 100644 --- a/sys/contrib/dev/iwlwifi/mvm/rs.h +++ b/sys/contrib/dev/iwlwifi/mvm/rs.h @@ -1,97 +1,105 @@ /*- * Copyright (c) 2020-2021 The FreeBSD Foundation * * This software was developed by Björn Zeeb under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * XXX-BZ: * This file is left as a wrapper to make mvm compile and we will only * deal with it on a need basis. Most newer chipsets do this in firmware. */ #ifndef _IWLWIFI_MVM_RS_H #define _IWLWIFI_MVM_RS_H #include #include "iwl-trans.h" #include "fw-api.h" #define RS_NAME "XXX_unknown" #define LINK_QUAL_AGG_FRAME_LIMIT_DEF (64-1) #define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (256-1) struct iwl_mvm; struct iwl_lq_sta_rs_fw { int last_rate_n_flags; struct { struct iwl_mvm *drv; uint8_t sta_id; uint8_t chains; uint8_t chain_signal[IEEE80211_MAX_CHAINS]; uint8_t last_rssi; #ifdef CONFIG_MAC80211_DEBUGFS uint32_t dbg_fixed_rate; uint32_t dbg_agg_frame_count_lim; #endif } pers; }; struct iwl_lq_sta { struct iwl_lq_cmd lq; struct { spinlock_t lock; + uint16_t max_agg_bufsize; } pers; }; #define RS_DRV_DATA_PACK(_c, _f) ((void *)(uintptr_t)(_c | (uintptr_t)(_f) << sizeof(_c))) /* XXX TODO | ? */ struct iwl_mvm_sta; +struct iwl_mvm_link_sta; #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_reset_frame_stats(struct iwl_mvm *); #endif void iwl_mvm_rs_add_sta(struct iwl_mvm *, struct iwl_mvm_sta *); void iwl_mvm_tlc_update_notif(struct iwl_mvm *, struct iwl_rx_cmd_buffer *); -u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *); +u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *, + struct ieee80211_bss_conf *, struct ieee80211_link_sta *); void rs_fw_rate_init(struct iwl_mvm *, struct ieee80211_sta *, enum nl80211_band, bool); int rs_fw_tx_protection(struct iwl_mvm *, struct iwl_mvm_sta *, bool); int iwl_mvm_tx_protection(struct iwl_mvm *, struct iwl_mvm_sta *, bool); int iwl_mvm_rate_control_register(void); void iwl_mvm_rate_control_unregister(void); -void iwl_mvm_rs_rate_init(struct iwl_mvm *, struct ieee80211_sta *, - enum nl80211_band, bool); +void iwl_mvm_rs_rate_init(struct iwl_mvm *, struct ieee80211_vif *, + struct ieee80211_sta *, struct ieee80211_bss_conf *, + struct ieee80211_link_sta *, enum nl80211_band); +void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *, struct ieee80211_vif *, + struct ieee80211_sta *, struct ieee80211_bss_conf *, + struct ieee80211_link_sta *, enum nl80211_band); void iwl_mvm_rs_tx_status(struct iwl_mvm *, struct ieee80211_sta *, int, struct ieee80211_tx_info *, bool); +void iwl_mvm_rs_add_sta_link(struct iwl_mvm *, struct iwl_mvm_link_sta *); #endif /* _IWLWIFI_MVM_RS_H */ diff --git a/sys/contrib/dev/iwlwifi/mvm/rx.c b/sys/contrib/dev/iwlwifi/mvm/rx.c index ee9249a0c5b0..e279f42a7810 100644 --- a/sys/contrib/dev/iwlwifi/mvm/rx.c +++ b/sys/contrib/dev/iwlwifi/mvm/rx.c @@ -1,1064 +1,1086 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" /* * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler * * Copies the phy information in mvm->last_phy_info, it will be used when the * actual data will come from the fw in the next packet. */ void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); if (unlikely(pkt_len < sizeof(mvm->last_phy_info))) return; memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info)); mvm->ampdu_ref++; #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { spin_lock(&mvm->drv_stats_lock); mvm->drv_rx_stats.ampdu_count++; spin_unlock(&mvm->drv_stats_lock); } #endif } /* * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211 * * Adds the rxb to a new skb and give it to mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct napi_struct *napi, struct sk_buff *skb, struct ieee80211_hdr *hdr, u16 len, u8 crypt_len, struct iwl_rx_cmd_buffer *rxb) { unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); unsigned int fraglen; /* * The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len, * but those are all multiples of 4 long) all goes away, but we * want the *end* of it, which is going to be the start of the IP * header, to be aligned when it gets pulled in. * The beginning of the skb->data is aligned on at least a 4-byte * boundary after allocation. Everything here is aligned at least * on a 2-byte boundary so we can just take hdrlen & 3 and pad by * the result. */ skb_reserve(skb, hdrlen & 3); /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and * an additional 8 bytes for SNAP/ethertype, see below) so that * splice() or TCP coalesce are more efficient. * * Since, in addition, ieee80211_data_to_8023() always pull in at * least 8 bytes (possibly more for mesh) we can do the same here * to save the cost of doing it later. That still doesn't pull in * the actual IP header since the typical case has a SNAP header. * If the latter changes (there are efforts in the standards group * to do so) we should revisit this and ieee80211_data_to_8023(). */ hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8; skb_put_data(skb, hdr, hdrlen); fraglen = len - hdrlen; if (fraglen) { int offset = (u8 *)hdr + hdrlen - (u8 *)rxb_addr(rxb) + rxb_offset(rxb); skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize); } ieee80211_rx_napi(mvm->hw, sta, skb, napi); } /* * iwl_mvm_get_signal_strength - use new rx PHY INFO API * values are reported by the fw as positive values - need to negate * to obtain their dBM. Account for missing antennas by replacing 0 * values by -256dBm: practically 0 power and a non-feasible 8 bit value. */ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, struct iwl_rx_phy_info *phy_info, struct ieee80211_rx_status *rx_status) { int energy_a, energy_b, max_energy; u32 val; val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]); energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> IWL_RX_INFO_ENERGY_ANT_A_POS; energy_a = energy_a ? -energy_a : S8_MIN; energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> IWL_RX_INFO_ENERGY_ANT_B_POS; energy_b = energy_b ? -energy_b : S8_MIN; max_energy = max(energy_a, energy_b); IWL_DEBUG_STATS(mvm, "energy In A %d B %d , and max %d\n", energy_a, energy_b, max_energy); rx_status->signal = max_energy; rx_status->chains = (le16_to_cpu(phy_info->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA) >> RX_RES_PHY_FLAGS_ANTENNA_POS; rx_status->chain_signal[0] = energy_a; rx_status->chain_signal[1] = energy_b; } /* * iwl_mvm_set_mac80211_rx_flag - translate fw status to mac80211 format * @mvm: the mvm object * @hdr: 80211 header * @stats: status in mac80211's format * @rx_pkt_status: status coming from fw * * returns non 0 value if the packet should be dropped */ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_rx_status *stats, u32 rx_pkt_status, u8 *crypt_len) { if (!ieee80211_has_protected(hdr->frame_control) || (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_NO_ENC) return 0; /* packet was encrypted with unknown alg */ if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_ENC_ERR) return 0; switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) { case RX_MPDU_RES_STATUS_SEC_CCM_ENC: /* alg is CCM: check MIC only */ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; *crypt_len = IEEE80211_CCMP_HDR_LEN; return 0; case RX_MPDU_RES_STATUS_SEC_TKIP_ENC: /* Don't drop the frame and decrypt it in SW */ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_DEPRECATE_TTAK) && !(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK)) return 0; *crypt_len = IEEE80211_TKIP_IV_LEN; fallthrough; case RX_MPDU_RES_STATUS_SEC_WEP_ENC: if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_WEP_ENC) *crypt_len = IEEE80211_WEP_IV_LEN; return 0; case RX_MPDU_RES_STATUS_SEC_EXT_ENC: if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; return 0; default: /* Expected in monitor (not having the keys) */ #if defined(__linux__) if (!mvm->monitor_on) - IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status); + IWL_WARN(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status); #elif defined(__FreeBSD__) if (!mvm->monitor_on && net_ratelimit()) - IWL_ERR(mvm, "%s: Unhandled alg: 0x%x\n", - __func__, rx_pkt_status); + IWL_WARN(mvm, "%s: Unhandled alg: 0x%x\n", __func__, rx_pkt_status); #endif } return 0; } static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct ieee80211_hdr *hdr, u32 len, struct iwl_rx_phy_info *phy_info, u32 rate_n_flags) { struct iwl_mvm_sta *mvmsta; struct iwl_mvm_tcm_mac *mdata; int mac; int ac = IEEE80211_AC_BE; /* treat non-QoS as BE */ struct iwl_mvm_vif *mvmvif; /* expected throughput in 100Kbps, single stream, 20 MHz */ static const u8 thresh_tpt[] = { 9, 18, 30, 42, 60, 78, 90, 96, 120, 135, }; u16 thr; - if (ieee80211_is_data_qos(hdr->frame_control)) - ac = tid_to_mac80211_ac[ieee80211_get_tid(hdr)]; + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 tid = ieee80211_get_tid(hdr); + + if (tid < IWL_MAX_TID_COUNT) + ac = tid_to_mac80211_ac[tid]; + } mvmsta = iwl_mvm_sta_from_mac80211(sta); mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) schedule_delayed_work(&mvm->tcm.work, 0); mdata = &mvm->tcm.data[mac]; mdata->rx.pkts[ac]++; /* count the airtime only once for each ampdu */ if (mdata->rx.last_ampdu_ref != mvm->ampdu_ref) { mdata->rx.last_ampdu_ref = mvm->ampdu_ref; mdata->rx.airtime += le16_to_cpu(phy_info->frame_time); } if (!(rate_n_flags & (RATE_MCS_HT_MSK_V1 | RATE_MCS_VHT_MSK_V1))) return; mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); if (mdata->opened_rx_ba_sessions || mdata->uapsd_nonagg_detect.detected || - (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd && - !mvmvif->queue_params[IEEE80211_AC_VI].uapsd && - !mvmvif->queue_params[IEEE80211_AC_BE].uapsd && - !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) || - mvmsta->sta_id != mvmvif->ap_sta_id) + (!mvmvif->deflink.queue_params[IEEE80211_AC_VO].uapsd && + !mvmvif->deflink.queue_params[IEEE80211_AC_VI].uapsd && + !mvmvif->deflink.queue_params[IEEE80211_AC_BE].uapsd && + !mvmvif->deflink.queue_params[IEEE80211_AC_BK].uapsd) || + mvmsta->deflink.sta_id != mvmvif->deflink.ap_sta_id) return; if (rate_n_flags & RATE_MCS_HT_MSK_V1) { thr = thresh_tpt[rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK_V1]; thr *= 1 + ((rate_n_flags & RATE_HT_MCS_NSS_MSK_V1) >> RATE_HT_MCS_NSS_POS_V1); } else { if (WARN_ON((rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK) >= ARRAY_SIZE(thresh_tpt))) return; thr = thresh_tpt[rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK]; - thr *= 1 + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> - RATE_VHT_MCS_NSS_POS); + thr *= 1 + FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags); } thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) >> RATE_MCS_CHAN_WIDTH_POS); mdata->uapsd_nonagg_detect.rx_bytes += len; ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, thr); } static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, struct sk_buff *skb, u32 status) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); if (mvmvif->features & NETIF_F_RXCSUM && status & RX_MPDU_RES_STATUS_CSUM_DONE && status & RX_MPDU_RES_STATUS_CSUM_OK) skb->ip_summed = CHECKSUM_UNNECESSARY; } /* * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler * * Handles the actual data of the Rx packet from the fw */ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb) { struct ieee80211_hdr *hdr; struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_phy_info *phy_info; struct iwl_rx_mpdu_res_start *rx_res; struct ieee80211_sta *sta = NULL; struct sk_buff *skb; u32 len, pkt_len = iwl_rx_packet_payload_len(pkt); u32 rate_n_flags; u32 rx_pkt_status; u8 crypt_len = 0; if (unlikely(pkt_len < sizeof(*rx_res))) { IWL_DEBUG_DROP(mvm, "Bad REPLY_RX_MPDU_CMD size\n"); return; } phy_info = &mvm->last_phy_info; rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data; hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res)); len = le16_to_cpu(rx_res->byte_count); if (unlikely(len + sizeof(*rx_res) + sizeof(__le32) > pkt_len)) { IWL_DEBUG_DROP(mvm, "FW lied about packet len\n"); return; } rx_pkt_status = get_unaligned_le32((__le32 *) (pkt->data + sizeof(*rx_res) + len)); /* Dont use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. */ skb = alloc_skb(128, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); return; } rx_status = IEEE80211_SKB_RXCB(skb); - /* - * drop the packet if it has failed being decrypted by HW - */ - if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status, - &crypt_len)) { - IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", - rx_pkt_status); - kfree_skb(skb); - return; - } - /* * Keep packets with CRC errors (and with overrun) for monitor mode * (otherwise the firmware discards them) but mark them as bad. */ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) || !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) { IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status); rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; } /* This will be used in several places later */ rate_n_flags = le32_to_cpu(phy_info->rate_n_flags); /* rx_status carries information about the packet to mac80211 */ rx_status->mactime = le64_to_cpu(phy_info->timestamp); rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp); rx_status->band = (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; rx_status->freq = ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), rx_status->band); /* TSF as indicated by the firmware is at INA time */ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; iwl_mvm_get_signal_strength(mvm, phy_info, rx_status); IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal, (unsigned long long)rx_status->mactime); rcu_read_lock(); if (rx_pkt_status & RX_MPDU_RES_STATUS_SRC_STA_FOUND) { u32 id = rx_pkt_status & RX_MPDU_RES_STATUS_STA_ID_MSK; id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT; if (!WARN_ON_ONCE(id >= mvm->fw->ucode_capa.num_stations)) { sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); if (IS_ERR(sta)) sta = NULL; } } else if (!is_multicast_ether_addr(hdr->addr2)) { /* This is fine since we prevent two stations with the same * address from being added. */ sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL); } + if (sta) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_vif *vif = mvmsta->vif; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + /* + * Don't even try to decrypt a MCAST frame that was received + * before the managed vif is authorized, we'd fail anyway. + */ + if (is_multicast_ether_addr(hdr->addr1) && + vif->type == NL80211_IFTYPE_STATION && + !mvmvif->authorized && + ieee80211_has_protected(hdr->frame_control)) { + IWL_DEBUG_DROP(mvm, "MCAST before the vif is authorized\n"); + kfree_skb(skb); + rcu_read_unlock(); + return; + } + } + + /* + * drop the packet if it has failed being decrypted by HW + */ + if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status, + &crypt_len)) { + IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", + rx_pkt_status); + kfree_skb(skb); + rcu_read_unlock(); + return; + } + if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_vif *tx_blocked_vif = rcu_dereference(mvm->csa_tx_blocked_vif); struct iwl_fw_dbg_trigger_tlv *trig; struct ieee80211_vif *vif = mvmsta->vif; /* We have tx blocked stations (with CS bit). If we heard * frames from a blocked station on a new channel we can * TX to it again. */ if (unlikely(tx_blocked_vif) && vif == tx_blocked_vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); if (mvmvif->csa_target_freq == rx_status->freq) iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false); } rs_update_last_rssi(mvm, mvmsta, rx_status); trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_RSSI); if (trig && ieee80211_is_beacon(hdr->frame_control)) { struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; s32 rssi; rssi_trig = (void *)trig->data; rssi = le32_to_cpu(rssi_trig->rssi); if (rx_status->signal < rssi) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, #if defined(__linux__) NULL); #elif defined(__FreeBSD__) ""); #endif } if (!mvm->tcm.paused && len >= sizeof(*hdr) && !is_multicast_ether_addr(hdr->addr1) && ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_handle_tcm(mvm, sta, hdr, len, phy_info, rate_n_flags); if (ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_csum(sta, skb, rx_pkt_status); } rcu_read_unlock(); /* set the preamble flag if appropriate */ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE)) rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { /* * We know which subframes of an A-MPDU belong * together since we get a single PHY response * from the firmware for all of them */ rx_status->flag |= RX_FLAG_AMPDU_DETAILS; rx_status->ampdu_reference = mvm->ampdu_ref; } /* Set up the HT phy flags */ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) { case RATE_MCS_CHAN_WIDTH_20: break; case RATE_MCS_CHAN_WIDTH_40: rx_status->bw = RATE_INFO_BW_40; break; case RATE_MCS_CHAN_WIDTH_80: rx_status->bw = RATE_INFO_BW_80; break; case RATE_MCS_CHAN_WIDTH_160: rx_status->bw = RATE_INFO_BW_160; break; } if (!(rate_n_flags & RATE_MCS_CCK_MSK_V1) && rate_n_flags & RATE_MCS_SGI_MSK_V1) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_HT_MCS_GF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; if (rate_n_flags & RATE_MCS_LDPC_MSK_V1) rx_status->enc_flags |= RX_ENC_FLAG_LDPC; if (rate_n_flags & RATE_MCS_HT_MSK_V1) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->nss = - ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> - RATE_VHT_MCS_NSS_POS) + 1; + FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags) + 1; rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; rx_status->encoding = RX_ENC_VHT; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_BF; } else { int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, rx_status->band); if (WARN(rate < 0 || rate > 0xFF, "Invalid rate flags 0x%x, band %d,\n", rate_n_flags, rx_status->band)) { kfree_skb(skb); return; } rx_status->rate_idx = rate; } #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_mvm_update_frame_stats(mvm, rate_n_flags, rx_status->flag & RX_FLAG_AMPDU_DETAILS); #endif if (unlikely((ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control)) && mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)) mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND; if (unlikely(ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control))) rx_status->boottime_ns = ktime_get_boottime_ns(); iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len, crypt_len, rxb); } struct iwl_mvm_stat_data { struct iwl_mvm *mvm; __le32 flags; __le32 mac_id; u8 beacon_filter_average_energy; __le32 *beacon_counter; u8 *beacon_average_energy; }; struct iwl_mvm_stat_data_all_macs { struct iwl_mvm *mvm; __le32 flags; struct iwl_statistics_ntfy_per_mac *per_mac_stats; }; static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; int thold = vif->bss_conf.cqm_rssi_thold; int hyst = vif->bss_conf.cqm_rssi_hyst; int last_event; if (sig == 0) { IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n"); return; } mvmvif->bf_data.ave_beacon_signal = sig; /* BT Coex */ if (mvmvif->bf_data.bt_coex_min_thold != mvmvif->bf_data.bt_coex_max_thold) { last_event = mvmvif->bf_data.last_bt_coex_event; if (sig > mvmvif->bf_data.bt_coex_max_thold && (last_event <= mvmvif->bf_data.bt_coex_min_thold || last_event == 0)) { mvmvif->bf_data.last_bt_coex_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n", sig); iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH); } else if (sig < mvmvif->bf_data.bt_coex_min_thold && (last_event >= mvmvif->bf_data.bt_coex_max_thold || last_event == 0)) { mvmvif->bf_data.last_bt_coex_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n", sig); iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW); } } if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) return; /* CQM Notification */ last_event = mvmvif->bf_data.last_cqm_event; if (thold && sig < thold && (last_event == 0 || sig < last_event - hyst)) { mvmvif->bf_data.last_cqm_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n", sig); ieee80211_cqm_rssi_notify( vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, sig, GFP_KERNEL); } else if (sig > thold && (last_event == 0 || sig > last_event + hyst)) { mvmvif->bf_data.last_cqm_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n", sig); ieee80211_cqm_rssi_notify( vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, sig, GFP_KERNEL); } } static void iwl_mvm_stat_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_stat_data *data = _data; int sig = -data->beacon_filter_average_energy; u16 id = le32_to_cpu(data->mac_id); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 vif_id = mvmvif->id; /* This doesn't need the MAC ID check since it's not taking the * data copied into the "data" struct, but rather the data from * the notification directly. */ - mvmvif->beacon_stats.num_beacons = + mvmvif->deflink.beacon_stats.num_beacons = le32_to_cpu(data->beacon_counter[vif_id]); - mvmvif->beacon_stats.avg_signal = + mvmvif->deflink.beacon_stats.avg_signal = -data->beacon_average_energy[vif_id]; if (mvmvif->id != id) return; if (vif->type != NL80211_IFTYPE_STATION) return; /* make sure that beacon statistics don't go backwards with TCM * request to clear statistics */ if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) - mvmvif->beacon_stats.accu_num_beacons += - mvmvif->beacon_stats.num_beacons; + mvmvif->deflink.beacon_stats.accu_num_beacons += + mvmvif->deflink.beacon_stats.num_beacons; iwl_mvm_update_vif_sig(vif, sig); } static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_stat_data_all_macs *data = _data; struct iwl_statistics_ntfy_per_mac *mac_stats; int sig; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 vif_id = mvmvif->id; if (WARN_ONCE(vif_id >= MAC_INDEX_AUX, "invalid vif id: %d", vif_id)) return; if (vif->type != NL80211_IFTYPE_STATION) return; mac_stats = &data->per_mac_stats[vif_id]; - mvmvif->beacon_stats.num_beacons = + mvmvif->deflink.beacon_stats.num_beacons = le32_to_cpu(mac_stats->beacon_counter); - mvmvif->beacon_stats.avg_signal = + mvmvif->deflink.beacon_stats.avg_signal = -le32_to_cpu(mac_stats->beacon_average_energy); /* make sure that beacon statistics don't go backwards with TCM * request to clear statistics */ if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) - mvmvif->beacon_stats.accu_num_beacons += - mvmvif->beacon_stats.num_beacons; + mvmvif->deflink.beacon_stats.accu_num_beacons += + mvmvif->deflink.beacon_stats.num_beacons; sig = -le32_to_cpu(mac_stats->beacon_filter_average_energy); iwl_mvm_update_vif_sig(vif, sig); } static inline void iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_stats *trig_stats; u32 trig_offset, trig_thold; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_STATS); if (!trig) return; trig_stats = (void *)trig->data; trig_offset = le32_to_cpu(trig_stats->stop_offset); trig_thold = le32_to_cpu(trig_stats->stop_threshold); if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt))) return; if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold) return; #if defined(__linux__) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL); #elif defined(__FreeBSD__) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, ""); #endif } static void iwl_mvm_stats_energy_iter(void *_data, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); u8 *energy = _data; - u32 sta_id = mvmsta->sta_id; + u32 sta_id = mvmsta->deflink.sta_id; if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT_MAX, "sta_id %d >= %d", sta_id, IWL_MVM_STATION_COUNT_MAX)) return; if (energy[sta_id]) - mvmsta->avg_energy = energy[sta_id]; + mvmsta->deflink.avg_energy = energy[sta_id]; } static void iwl_mvm_update_tcm_from_stats(struct iwl_mvm *mvm, __le32 *air_time_le, __le32 *rx_bytes_le) { int i; spin_lock(&mvm->tcm.lock); for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) { struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i]; u32 rx_bytes = le32_to_cpu(rx_bytes_le[i]); u32 airtime = le32_to_cpu(air_time_le[i]); mdata->rx.airtime += airtime; mdata->uapsd_nonagg_detect.rx_bytes += rx_bytes; if (airtime) { /* re-init every time to store rate from FW */ ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, rx_bytes * 8 / airtime); } } spin_unlock(&mvm->tcm.lock); } static void iwl_mvm_stats_ver_15(struct iwl_mvm *mvm, struct iwl_statistics_operational_ntfy *stats) { struct iwl_mvm_stat_data_all_macs data = { .mvm = mvm, .flags = stats->flags, .per_mac_stats = stats->per_mac_stats, }; ieee80211_iterate_active_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_stat_iterator_all_macs, &data); } static void iwl_mvm_stats_ver_14(struct iwl_mvm *mvm, struct iwl_statistics_operational_ntfy_ver_14 *stats) { struct iwl_mvm_stat_data data = { .mvm = mvm, }; u8 beacon_average_energy[MAC_INDEX_AUX]; __le32 flags; int i; flags = stats->flags; data.mac_id = stats->mac_id; data.beacon_filter_average_energy = le32_to_cpu(stats->beacon_filter_average_energy); data.flags = flags; data.beacon_counter = stats->beacon_counter; for (i = 0; i < ARRAY_SIZE(beacon_average_energy); i++) beacon_average_energy[i] = le32_to_cpu(stats->beacon_average_energy[i]); data.beacon_average_energy = beacon_average_energy; ieee80211_iterate_active_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_stat_iterator, &data); } static bool iwl_mvm_verify_stats_len(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt, u32 expected_size) { struct iwl_statistics_ntfy_hdr *hdr; if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) < expected_size, "received invalid statistics size (%d)!, expected_size: %d\n", iwl_rx_packet_payload_len(pkt), expected_size)) return false; hdr = (void *)&pkt->data; if (WARN_ONCE((hdr->type & IWL_STATISTICS_TYPE_MSK) != FW_STATISTICS_OPERATIONAL || hdr->version != iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, STATISTICS_NOTIFICATION, 0), "received unsupported hdr type %d, version %d\n", hdr->type, hdr->version)) return false; if (WARN_ONCE(le16_to_cpu(hdr->size) != expected_size, "received invalid statistics size in header (%d)!, expected_size: %d\n", le16_to_cpu(hdr->size), expected_size)) return false; return true; } static void iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { u8 average_energy[IWL_MVM_STATION_COUNT_MAX]; __le32 air_time[MAC_INDEX_AUX]; __le32 rx_bytes[MAC_INDEX_AUX]; __le32 flags = 0; int i; u32 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, STATISTICS_NOTIFICATION, 0); if (WARN_ONCE(notif_ver > 15, "invalid statistics version id: %d\n", notif_ver)) return; if (notif_ver == 14) { struct iwl_statistics_operational_ntfy_ver_14 *stats = (void *)pkt->data; if (!iwl_mvm_verify_stats_len(mvm, pkt, sizeof(*stats))) return; iwl_mvm_stats_ver_14(mvm, stats); flags = stats->flags; mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time); mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time); mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf); mvm->radio_stats.on_time_scan = le64_to_cpu(stats->on_time_scan); for (i = 0; i < ARRAY_SIZE(average_energy); i++) average_energy[i] = le32_to_cpu(stats->average_energy[i]); for (i = 0; i < ARRAY_SIZE(air_time); i++) { air_time[i] = stats->air_time[i]; rx_bytes[i] = stats->rx_bytes[i]; } } if (notif_ver == 15) { struct iwl_statistics_operational_ntfy *stats = (void *)pkt->data; if (!iwl_mvm_verify_stats_len(mvm, pkt, sizeof(*stats))) return; iwl_mvm_stats_ver_15(mvm, stats); flags = stats->flags; mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time); mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time); mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf); mvm->radio_stats.on_time_scan = le64_to_cpu(stats->on_time_scan); for (i = 0; i < ARRAY_SIZE(average_energy); i++) average_energy[i] = le32_to_cpu(stats->per_sta_stats[i].average_energy); for (i = 0; i < ARRAY_SIZE(air_time); i++) { air_time[i] = stats->per_mac_stats[i].air_time; rx_bytes[i] = stats->per_mac_stats[i].rx_bytes; } } iwl_mvm_rx_stats_check_trigger(mvm, pkt); ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter, average_energy); /* * Don't update in case the statistics are not cleared, since * we will end up counting twice the same airtime, once in TCM * request and once in statistics notification. */ if (le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) iwl_mvm_update_tcm_from_stats(mvm, air_time, rx_bytes); } void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_mvm_stat_data data = { .mvm = mvm, }; __le32 *bytes, *air_time, flags; int expected_size; u8 *energy; /* From ver 14 and up we use TLV statistics format */ if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, STATISTICS_NOTIFICATION, 0) >= 14) return iwl_mvm_handle_rx_statistics_tlv(mvm, pkt); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { if (iwl_mvm_has_new_rx_api(mvm)) expected_size = sizeof(struct iwl_notif_statistics_v11); else expected_size = sizeof(struct iwl_notif_statistics_v10); } else { expected_size = sizeof(struct iwl_notif_statistics); } if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) != expected_size, "received invalid statistics size (%d)!\n", iwl_rx_packet_payload_len(pkt))) return; if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data; data.mac_id = stats->rx.general.mac_id; data.beacon_filter_average_energy = stats->general.common.beacon_filter_average_energy; mvm->rx_stats_v3 = stats->rx; mvm->radio_stats.rx_time = le64_to_cpu(stats->general.common.rx_time); mvm->radio_stats.tx_time = le64_to_cpu(stats->general.common.tx_time); mvm->radio_stats.on_time_rf = le64_to_cpu(stats->general.common.on_time_rf); mvm->radio_stats.on_time_scan = le64_to_cpu(stats->general.common.on_time_scan); data.beacon_counter = stats->general.beacon_counter; data.beacon_average_energy = stats->general.beacon_average_energy; flags = stats->flag; } else { struct iwl_notif_statistics *stats = (void *)&pkt->data; data.mac_id = stats->rx.general.mac_id; data.beacon_filter_average_energy = stats->general.common.beacon_filter_average_energy; mvm->rx_stats = stats->rx; mvm->radio_stats.rx_time = le64_to_cpu(stats->general.common.rx_time); mvm->radio_stats.tx_time = le64_to_cpu(stats->general.common.tx_time); mvm->radio_stats.on_time_rf = le64_to_cpu(stats->general.common.on_time_rf); mvm->radio_stats.on_time_scan = le64_to_cpu(stats->general.common.on_time_scan); data.beacon_counter = stats->general.beacon_counter; data.beacon_average_energy = stats->general.beacon_average_energy; flags = stats->flag; } data.flags = flags; iwl_mvm_rx_stats_check_trigger(mvm, pkt); ieee80211_iterate_active_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_stat_iterator, &data); if (!iwl_mvm_has_new_rx_api(mvm)) return; if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct iwl_notif_statistics_v11 *v11 = (void *)&pkt->data; energy = (void *)&v11->load_stats.avg_energy; bytes = (void *)&v11->load_stats.byte_count; air_time = (void *)&v11->load_stats.air_time; } else { struct iwl_notif_statistics *stats = (void *)&pkt->data; energy = (void *)&stats->load_stats.avg_energy; bytes = (void *)&stats->load_stats.byte_count; air_time = (void *)&stats->load_stats.air_time; } ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter, energy); /* * Don't update in case the statistics are not cleared, since * we will end up counting twice the same airtime, once in TCM * request and once in statistics notification. */ if (le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) iwl_mvm_update_tcm_from_stats(mvm, air_time, bytes); } void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb)); } void iwl_mvm_window_status_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_ba_window_status_notif *notif = (void *)pkt->data; int i; BUILD_BUG_ON(ARRAY_SIZE(notif->ra_tid) != BA_WINDOW_STREAMS_MAX); BUILD_BUG_ON(ARRAY_SIZE(notif->mpdu_rx_count) != BA_WINDOW_STREAMS_MAX); BUILD_BUG_ON(ARRAY_SIZE(notif->bitmap) != BA_WINDOW_STREAMS_MAX); BUILD_BUG_ON(ARRAY_SIZE(notif->start_seq_num) != BA_WINDOW_STREAMS_MAX); rcu_read_lock(); for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) { struct ieee80211_sta *sta; u8 sta_id, tid; u64 bitmap; u32 ssn; u16 ratid; u16 received_mpdu; ratid = le16_to_cpu(notif->ra_tid[i]); /* check that this TID is valid */ if (!(ratid & BA_WINDOW_STATUS_VALID_MSK)) continue; received_mpdu = le16_to_cpu(notif->mpdu_rx_count[i]); if (received_mpdu == 0) continue; tid = ratid & BA_WINDOW_STATUS_TID_MSK; /* get the station */ sta_id = (ratid & BA_WINDOW_STATUS_STA_ID_MSK) >> BA_WINDOW_STATUS_STA_ID_POS; sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (IS_ERR_OR_NULL(sta)) continue; bitmap = le64_to_cpu(notif->bitmap[i]); ssn = le32_to_cpu(notif->start_seq_num[i]); /* update mac80211 with the bitmap for the reordering buffer */ ieee80211_mark_rx_ba_filtered_frames(sta, tid, ssn, bitmap, received_mpdu); } rcu_read_unlock(); } diff --git a/sys/contrib/dev/iwlwifi/mvm/rxmq.c b/sys/contrib/dev/iwlwifi/mvm/rxmq.c index 872a45ea15ca..985fbfa31195 100644 --- a/sys/contrib/dev/iwlwifi/mvm/rxmq.c +++ b/sys/contrib/dev/iwlwifi/mvm/rxmq.c @@ -1,2275 +1,2833 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #include #include #if defined(__FreeBSD__) #include #endif #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" - -static void *iwl_mvm_skb_get_hdr(struct sk_buff *skb) -{ - struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); - u8 *data = skb->data; - - /* Alignment concerns */ - BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4); - BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4); - BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) % 4); - BUILD_BUG_ON(sizeof(struct ieee80211_vendor_radiotap) % 4); - - if (rx_status->flag & RX_FLAG_RADIOTAP_HE) - data += sizeof(struct ieee80211_radiotap_he); - if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU) - data += sizeof(struct ieee80211_radiotap_he_mu); - if (rx_status->flag & RX_FLAG_RADIOTAP_LSIG) - data += sizeof(struct ieee80211_radiotap_lsig); - if (rx_status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { - struct ieee80211_vendor_radiotap *radiotap = (void *)data; - - data += sizeof(*radiotap) + radiotap->len + radiotap->pad; - } - - return data; -} +#include "time-sync.h" static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, int queue, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta; - struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb); + struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb); struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb); struct iwl_mvm_key_pn *ptk_pn; int res; u8 tid, keyidx; u8 pn[IEEE80211_CCMP_PN_LEN]; u8 *extiv; /* do PN checking */ /* multicast and non-data only arrives on default queue */ if (!ieee80211_is_data(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) return 0; /* do not check PN for open AP */ if (!(stats->flag & RX_FLAG_DECRYPTED)) return 0; /* * avoid checking for default queue - we don't want to replicate * all the logic that's necessary for checking the PN on fragmented * frames, leave that to mac80211 */ if (queue == 0) return 0; /* if we are here - this for sure is either CCMP or GCMP */ if (IS_ERR_OR_NULL(sta)) { IWL_DEBUG_DROP(mvm, "expected hw-decrypted unicast frame for station\n"); return -1; } mvmsta = iwl_mvm_sta_from_mac80211(sta); extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control); keyidx = extiv[3] >> 6; ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]); if (!ptk_pn) return -1; if (ieee80211_is_data_qos(hdr->frame_control)) tid = ieee80211_get_tid(hdr); else tid = 0; /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */ if (tid >= IWL_MAX_TID_COUNT) return -1; /* load pn */ pn[0] = extiv[7]; pn[1] = extiv[6]; pn[2] = extiv[5]; pn[3] = extiv[4]; pn[4] = extiv[1]; pn[5] = extiv[0]; res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN); if (res < 0) return -1; if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN)) return -1; memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); stats->flag |= RX_FLAG_PN_VALIDATED; return 0; } /* iwl_mvm_create_skb Adds the rxb to a new skb */ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_hdr *hdr, u16 len, u8 crypt_len, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; unsigned int headlen, fraglen, pad_len = 0; unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); u8 mic_crc_len = u8_get_bits(desc->mac_flags1, IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1; if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { len -= 2; pad_len = 2; } /* * For non monitor interface strip the bytes the RADA might not have - * removed. As monitor interface cannot exist with other interfaces - * this removal is safe. + * removed (it might be disabled, e.g. for mgmt frames). As a monitor + * interface cannot exist with other interfaces, this removal is safe + * and sufficient, in monitor mode there's no decryption being done. */ - if (mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS)) { - u32 pkt_flags = le32_to_cpu(pkt->len_n_flags); - - /* - * If RADA was not enabled then decryption was not performed so - * the MIC cannot be removed. - */ - if (!(pkt_flags & FH_RSCSR_RADA_EN)) { - if (WARN_ON(crypt_len > mic_crc_len)) - return -EINVAL; - - mic_crc_len -= crypt_len; - } - - if (WARN_ON(mic_crc_len > len)) - return -EINVAL; - + if (len > mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS)) len -= mic_crc_len; - } /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and * an additional 8 bytes for SNAP/ethertype, see below) so that * splice() or TCP coalesce are more efficient. * * Since, in addition, ieee80211_data_to_8023() always pull in at * least 8 bytes (possibly more for mesh) we can do the same here * to save the cost of doing it later. That still doesn't pull in * the actual IP header since the typical case has a SNAP header. * If the latter changes (there are efforts in the standards group * to do so) we should revisit this and ieee80211_data_to_8023(). */ headlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8; /* The firmware may align the packet to DWORD. * The padding is inserted after the IV. * After copying the header + IV skip the padding if * present before copying packet data. */ hdrlen += crypt_len; if (unlikely(headlen < hdrlen)) return -EINVAL; + /* Since data doesn't move data while putting data on skb and that is + * the only way we use, data + len is the next place that hdr would be put + */ + skb_set_mac_header(skb, skb->len); skb_put_data(skb, hdr, hdrlen); skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen); /* * If we did CHECKSUM_COMPLETE, the hardware only does it right for * certain cases and starts the checksum after the SNAP. Check if * this is the case - it's easier to just bail out to CHECKSUM_NONE * in the cases the hardware didn't handle, since it's rare to see * such packets, even though the hardware did calculate the checksum * in this case, just starting after the MAC header instead. * * Starting from Bz hardware, it calculates starting directly after * the MAC header, so that matches mac80211's expectation. */ - if (skb->ip_summed == CHECKSUM_COMPLETE && - mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) { + if (skb->ip_summed == CHECKSUM_COMPLETE) { struct { u8 hdr[6]; __be16 type; } __packed *shdr = (void *)((u8 *)hdr + hdrlen + pad_len); if (unlikely(headlen - hdrlen < sizeof(*shdr) || !ether_addr_equal(shdr->hdr, rfc1042_header) || (shdr->type != htons(ETH_P_IP) && shdr->type != htons(ETH_P_ARP) && shdr->type != htons(ETH_P_IPV6) && shdr->type != htons(ETH_P_8021Q) && shdr->type != htons(ETH_P_PAE) && shdr->type != htons(ETH_P_TDLS)))) skb->ip_summed = CHECKSUM_NONE; - else + else if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) /* mac80211 assumes full CSUM including SNAP header */ skb_postpush_rcsum(skb, shdr, sizeof(*shdr)); } fraglen = len - headlen; if (fraglen) { int offset = (u8 *)hdr + headlen + pad_len - (u8 *)rxb_addr(rxb) + rxb_offset(rxb); skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize); } return 0; } +/* put a TLV on the skb and return data pointer + * + * Also pad to 4 the len and zero out all data part + */ +static void * +iwl_mvm_radiotap_put_tlv(struct sk_buff *skb, u16 type, u16 len) +{ + struct ieee80211_radiotap_tlv *tlv; + + tlv = skb_put(skb, sizeof(*tlv)); + tlv->type = cpu_to_le16(type); + tlv->len = cpu_to_le16(len); + return skb_put_zero(skb, ALIGN(len, 4)); +} + static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); - struct ieee80211_vendor_radiotap *radiotap; - const int size = sizeof(*radiotap) + sizeof(__le16); + struct ieee80211_radiotap_vendor_content *radiotap; + const u16 vendor_data_len = sizeof(mvm->cur_aid); if (!mvm->cur_aid) return; - /* ensure alignment */ - BUILD_BUG_ON((size + 2) % 4); + radiotap = iwl_mvm_radiotap_put_tlv(skb, + IEEE80211_RADIOTAP_VENDOR_NAMESPACE, + sizeof(*radiotap) + vendor_data_len); - radiotap = skb_put(skb, size + 2); - radiotap->align = 1; /* Intel OUI */ radiotap->oui[0] = 0xf6; radiotap->oui[1] = 0x54; radiotap->oui[2] = 0x25; /* radiotap sniffer config sub-namespace */ - radiotap->subns = 1; - radiotap->present = 0x1; - radiotap->len = size - sizeof(*radiotap); - radiotap->pad = 2; + radiotap->oui_subtype = 1; + radiotap->vendor_type = 0; /* fill the data now */ memcpy(radiotap->data, &mvm->cur_aid, sizeof(mvm->cur_aid)); - /* and clear the padding */ - memset(radiotap->data + sizeof(__le16), 0, radiotap->pad); - rx_status->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA; + rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; } /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct napi_struct *napi, struct sk_buff *skb, int queue, - struct ieee80211_sta *sta) + struct ieee80211_sta *sta, + struct ieee80211_link_sta *link_sta) { - if (iwl_mvm_check_pn(mvm, skb, queue, sta)) + if (unlikely(iwl_mvm_check_pn(mvm, skb, queue, sta))) { kfree_skb(skb); - else - ieee80211_rx_napi(mvm->hw, sta, skb, napi); + return; + } + + if (sta && sta->valid_links && link_sta) { + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + + rx_status->link_valid = 1; + rx_status->link_id = link_sta->link_id; + } + + ieee80211_rx_napi(mvm->hw, sta, skb, napi); } static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, struct ieee80211_rx_status *rx_status, u32 rate_n_flags, int energy_a, int energy_b) { int max_energy; u32 rate_flags = rate_n_flags; energy_a = energy_a ? -energy_a : S8_MIN; energy_b = energy_b ? -energy_b : S8_MIN; max_energy = max(energy_a, energy_b); IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n", energy_a, energy_b, max_energy); rx_status->signal = max_energy; rx_status->chains = (rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS; rx_status->chain_signal[0] = energy_a; rx_status->chain_signal[1] = energy_b; } static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, struct ieee80211_hdr *hdr, struct iwl_rx_mpdu_desc *desc, - u32 status) + u32 status, + struct ieee80211_rx_status *stats) { struct iwl_mvm_sta *mvmsta; struct iwl_mvm_vif *mvmvif; u8 keyid; struct ieee80211_key_conf *key; u32 len = le16_to_cpu(desc->mpdu_len); const u8 *frame = (void *)hdr; if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE) return 0; /* * For non-beacon, we don't really care. But beacons may * be filtered out, and we thus need the firmware's replay * detection, otherwise beacons the firmware previously * filtered could be replayed, or something like that, and * it can filter a lot - though usually only if nothing has * changed. */ if (!ieee80211_is_beacon(hdr->frame_control)) return 0; /* key mismatch - will also report !MIC_OK but we shouldn't count it */ if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID)) return -1; /* good cases */ if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK && - !(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) + !(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) { + stats->flag |= RX_FLAG_DECRYPTED; return 0; + } if (!sta) return -1; mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); /* * both keys will have the same cipher and MIC length, use * whichever one is available */ key = rcu_dereference(mvmvif->bcn_prot.keys[0]); if (!key) { key = rcu_dereference(mvmvif->bcn_prot.keys[1]); if (!key) return -1; } if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2) return -1; /* get the real key ID */ keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2]; /* and if that's the other key, look it up */ if (keyid != key->keyidx) { /* * shouldn't happen since firmware checked, but be safe * in case the MIC length is wrong too, for example */ if (keyid != 6 && keyid != 7) return -1; key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]); if (!key) return -1; } /* Report status to mac80211 */ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) ieee80211_key_mic_failure(key); else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR) ieee80211_key_replay(key); return -1; } static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct ieee80211_hdr *hdr, struct ieee80211_rx_status *stats, u16 phy_info, struct iwl_rx_mpdu_desc *desc, u32 pkt_flags, int queue, u8 *crypt_len) { u32 status = le32_to_cpu(desc->status); /* * Drop UNKNOWN frames in aggregation, unless in monitor mode * (where we don't have the keys). * We limit this to aggregation because in TKIP this is a valid * scenario, since we may not have the (correct) TTAK (phase 1 * key) in the firmware. */ if (phy_info & IWL_RX_MPDU_PHY_AMPDU && (status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on) return -1; if (unlikely(ieee80211_is_mgmt(hdr->frame_control) && !ieee80211_has_protected(hdr->frame_control))) - return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status); + return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status, stats); if (!ieee80211_has_protected(hdr->frame_control) || (status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE) return 0; /* TODO: handle packets encrypted with unknown alg */ #if defined(__FreeBSD__) /* XXX-BZ do similar to rx.c for now as these are plenty. */ if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_ENC_ERR) return (0); #endif switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) { case IWL_RX_MPDU_STATUS_SEC_CCM: case IWL_RX_MPDU_STATUS_SEC_GCM: BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN); /* alg is CCM: check MIC only */ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) return -1; - stats->flag |= RX_FLAG_DECRYPTED; - if (pkt_flags & FH_RSCSR_RADA_EN) - stats->flag |= RX_FLAG_MIC_STRIPPED; + stats->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED; *crypt_len = IEEE80211_CCMP_HDR_LEN; return 0; case IWL_RX_MPDU_STATUS_SEC_TKIP: /* Don't drop the frame and decrypt it in SW */ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_DEPRECATE_TTAK) && !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK)) return 0; if (mvm->trans->trans_cfg->gen2 && !(status & RX_MPDU_RES_STATUS_MIC_OK)) stats->flag |= RX_FLAG_MMIC_ERROR; *crypt_len = IEEE80211_TKIP_IV_LEN; fallthrough; case IWL_RX_MPDU_STATUS_SEC_WEP: if (!(status & IWL_RX_MPDU_STATUS_ICV_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_WEP) *crypt_len = IEEE80211_WEP_IV_LEN; if (pkt_flags & FH_RSCSR_RADA_EN) { stats->flag |= RX_FLAG_ICV_STRIPPED; if (mvm->trans->trans_cfg->gen2) stats->flag |= RX_FLAG_MMIC_STRIPPED; } return 0; case IWL_RX_MPDU_STATUS_SEC_EXT_ENC: if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; return 0; case RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC: break; default: /* * Sometimes we can get frames that were not decrypted * because the firmware didn't have the keys yet. This can * happen after connection where we can get multicast frames * before the GTK is installed. * Silently drop those frames. * Also drop un-decrypted frames in monitor mode. */ if (!is_multicast_ether_addr(hdr->addr1) && !mvm->monitor_on && net_ratelimit()) #if defined(__linux__) - IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status); + IWL_WARN(mvm, "Unhandled alg: 0x%x\n", status); #elif defined(__FreeBSD__) - IWL_ERR(mvm, "%s: Unhandled alg: 0x%x\n", - __func__, status); + IWL_WARN(mvm, "%s: Unhandled alg: 0x%x\n", __func__, status); #endif } return 0; } static void iwl_mvm_rx_csum(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct sk_buff *skb, struct iwl_rx_packet *pkt) { struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { if (pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) { u16 hwsum = be16_to_cpu(desc->v3.raw_xsum); skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold(~(__force __sum16)hwsum); } } else { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_vif *mvmvif; u16 flags = le16_to_cpu(desc->l3l4_flags); u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >> IWL_RX_L3_PROTO_POS); mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); if (mvmvif->features & NETIF_F_RXCSUM && flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK && (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK || l3_prot == IWL_RX_L3_TYPE_IPV6 || l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG)) skb->ip_summed = CHECKSUM_UNNECESSARY; } } /* - * returns true if a packet is a duplicate and should be dropped. + * returns true if a packet is a duplicate or invalid tid and should be dropped. * Updates AMSDU PN tracking info */ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, struct ieee80211_rx_status *rx_status, struct ieee80211_hdr *hdr, struct iwl_rx_mpdu_desc *desc) { struct iwl_mvm_sta *mvm_sta; struct iwl_mvm_rxq_dup_data *dup_data; u8 tid, sub_frame_idx; if (WARN_ON(IS_ERR_OR_NULL(sta))) return false; mvm_sta = iwl_mvm_sta_from_mac80211(sta); #if defined(__FreeBSD__) if (WARN_ON(mvm_sta->dup_data == NULL)) return false; #endif dup_data = &mvm_sta->dup_data[queue]; /* * Drop duplicate 802.11 retransmissions * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") */ if (ieee80211_is_ctl(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) { rx_status->flag |= RX_FLAG_DUP_VALIDATED; return false; } - if (ieee80211_is_data_qos(hdr->frame_control)) + if (ieee80211_is_data_qos(hdr->frame_control)) { /* frame has qos control */ tid = ieee80211_get_tid(hdr); - else + if (tid >= IWL_MAX_TID_COUNT) + return true; + } else { tid = IWL_MAX_TID_COUNT; + } /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */ sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; if (unlikely(ieee80211_has_retry(hdr->frame_control) && dup_data->last_seq[tid] == hdr->seq_ctrl && dup_data->last_sub_frame[tid] >= sub_frame_idx)) return true; /* Allow same PN as the first subframe for following sub frames */ if (dup_data->last_seq[tid] == hdr->seq_ctrl && sub_frame_idx > dup_data->last_sub_frame[tid] && desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) rx_status->flag |= RX_FLAG_ALLOW_SAME_PN; dup_data->last_seq[tid] = hdr->seq_ctrl; dup_data->last_sub_frame[tid] = sub_frame_idx; rx_status->flag |= RX_FLAG_DUP_VALIDATED; return false; } /* * Returns true if sn2 - buffer_size < sn1 < sn2. * To be used only in order to compare reorder buffer head with NSSN. * We fully trust NSSN unless it is behind us due to reorder timeout. * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN. */ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size) { return ieee80211_sn_less(sn1, sn2) && !ieee80211_sn_less(sn1, sn2 - buffer_size); } static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn) { if (IWL_MVM_USE_NSSN_SYNC) { struct iwl_mvm_nssn_sync_data notif = { .baid = baid, .nssn = nssn, }; iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NSSN_SYNC, false, ¬if, sizeof(notif)); } } #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10) enum iwl_mvm_release_flags { IWL_MVM_RELEASE_SEND_RSS_SYNC = BIT(0), IWL_MVM_RELEASE_FROM_RSS_SYNC = BIT(1), }; static void iwl_mvm_release_frames(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct napi_struct *napi, struct iwl_mvm_baid_data *baid_data, struct iwl_mvm_reorder_buffer *reorder_buf, u16 nssn, u32 flags) { struct iwl_mvm_reorder_buf_entry *entries = &baid_data->entries[reorder_buf->queue * baid_data->entries_per_queue]; u16 ssn = reorder_buf->head_sn; lockdep_assert_held(&reorder_buf->lock); /* * We keep the NSSN not too far behind, if we are sync'ing it and it * is more than 2048 ahead of us, it must be behind us. Discard it. * This can happen if the queue that hit the 0 / 2048 seqno was lagging * behind and this queue already processed packets. The next if * would have caught cases where this queue would have processed less * than 64 packets, but it may have processed more than 64 packets. */ if ((flags & IWL_MVM_RELEASE_FROM_RSS_SYNC) && ieee80211_sn_less(nssn, ssn)) goto set_timer; /* ignore nssn smaller than head sn - this can happen due to timeout */ if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size)) goto set_timer; while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) { int index = ssn % reorder_buf->buf_size; struct sk_buff_head *skb_list = &entries[index].e.frames; struct sk_buff *skb; ssn = ieee80211_sn_inc(ssn); if ((flags & IWL_MVM_RELEASE_SEND_RSS_SYNC) && (ssn == 2048 || ssn == 0)) iwl_mvm_sync_nssn(mvm, baid_data->baid, ssn); /* * Empty the list. Will have more than one frame for A-MSDU. * Empty list is valid as well since nssn indicates frames were * received. */ while ((skb = __skb_dequeue(skb_list))) { iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, reorder_buf->queue, - sta); + sta, NULL /* FIXME */); reorder_buf->num_stored--; } } reorder_buf->head_sn = nssn; set_timer: if (reorder_buf->num_stored && !reorder_buf->removed) { u16 index = reorder_buf->head_sn % reorder_buf->buf_size; while (skb_queue_empty(&entries[index].e.frames)) index = (index + 1) % reorder_buf->buf_size; /* modify timer to match next frame's expiration time */ mod_timer(&reorder_buf->reorder_timer, entries[index].e.reorder_time + 1 + RX_REORDER_BUF_TIMEOUT_MQ); } else { del_timer(&reorder_buf->reorder_timer); } } void iwl_mvm_reorder_timer_expired(struct timer_list *t) { struct iwl_mvm_reorder_buffer *buf = from_timer(buf, t, reorder_timer); struct iwl_mvm_baid_data *baid_data = iwl_mvm_baid_data_from_reorder_buf(buf); struct iwl_mvm_reorder_buf_entry *entries = &baid_data->entries[buf->queue * baid_data->entries_per_queue]; int i; u16 sn = 0, index = 0; bool expired = false; bool cont = false; spin_lock(&buf->lock); if (!buf->num_stored || buf->removed) { spin_unlock(&buf->lock); return; } for (i = 0; i < buf->buf_size ; i++) { index = (buf->head_sn + i) % buf->buf_size; if (skb_queue_empty(&entries[index].e.frames)) { /* * If there is a hole and the next frame didn't expire * we want to break and not advance SN */ cont = false; continue; } if (!cont && !time_after(jiffies, entries[index].e.reorder_time + RX_REORDER_BUF_TIMEOUT_MQ)) break; expired = true; /* continue until next hole after this expired frames */ cont = true; sn = ieee80211_sn_add(buf->head_sn, i + 1); } if (expired) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; - u8 sta_id = baid_data->sta_id; + u8 sta_id = ffs(baid_data->sta_mask) - 1; rcu_read_lock(); sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + goto out; + } + mvmsta = iwl_mvm_sta_from_mac80211(sta); /* SN is set to the last expired frame + 1 */ IWL_DEBUG_HT(buf->mvm, "Releasing expired frames for sta %u, sn %d\n", sta_id, sn); iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif, sta, baid_data->tid); iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn, IWL_MVM_RELEASE_SEND_RSS_SYNC); rcu_read_unlock(); } else { /* * If no frame expired and there are stored frames, index is now * pointing to the first unexpired frame - modify timer * accordingly to this frame. */ mod_timer(&buf->reorder_timer, entries[index].e.reorder_time + 1 + RX_REORDER_BUF_TIMEOUT_MQ); } + +out: spin_unlock(&buf->lock); } static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, struct iwl_mvm_delba_data *data) { struct iwl_mvm_baid_data *ba_data; struct ieee80211_sta *sta; struct iwl_mvm_reorder_buffer *reorder_buf; u8 baid = data->baid; + u32 sta_id; if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid)) return; rcu_read_lock(); ba_data = rcu_dereference(mvm->baid_map[baid]); if (WARN_ON_ONCE(!ba_data)) goto out; - sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); + /* pick any STA ID to find the pointer */ + sta_id = ffs(ba_data->sta_mask) - 1; + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) goto out; reorder_buf = &ba_data->reorder_buf[queue]; /* release all frames that are in the reorder buffer to the stack */ spin_lock_bh(&reorder_buf->lock); iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf, ieee80211_sn_add(reorder_buf->head_sn, reorder_buf->buf_size), 0); spin_unlock_bh(&reorder_buf->lock); del_timer_sync(&reorder_buf->reorder_timer); out: rcu_read_unlock(); } static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm, struct napi_struct *napi, u8 baid, u16 nssn, int queue, u32 flags) { struct ieee80211_sta *sta; struct iwl_mvm_reorder_buffer *reorder_buf; struct iwl_mvm_baid_data *ba_data; + u32 sta_id; IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n", baid, nssn); if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID || baid >= ARRAY_SIZE(mvm->baid_map))) return; rcu_read_lock(); ba_data = rcu_dereference(mvm->baid_map[baid]); if (!ba_data) { WARN(!(flags & IWL_MVM_RELEASE_FROM_RSS_SYNC), "BAID %d not found in map\n", baid); goto out; } - sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); + /* pick any STA ID to find the pointer */ + sta_id = ffs(ba_data->sta_mask) - 1; + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) goto out; reorder_buf = &ba_data->reorder_buf[queue]; spin_lock_bh(&reorder_buf->lock); iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf, nssn, flags); spin_unlock_bh(&reorder_buf->lock); out: rcu_read_unlock(); } static void iwl_mvm_nssn_sync(struct iwl_mvm *mvm, struct napi_struct *napi, int queue, const struct iwl_mvm_nssn_sync_data *data) { iwl_mvm_release_frames_from_notif(mvm, napi, data->baid, data->nssn, queue, IWL_MVM_RELEASE_FROM_RSS_SYNC); } void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rxq_sync_notification *notif; struct iwl_mvm_internal_rxq_notif *internal_notif; u32 len = iwl_rx_packet_payload_len(pkt); notif = (void *)pkt->data; internal_notif = (void *)notif->payload; if (WARN_ONCE(len < sizeof(*notif) + sizeof(*internal_notif), "invalid notification size %d (%d)", len, (int)(sizeof(*notif) + sizeof(*internal_notif)))) return; len -= sizeof(*notif) + sizeof(*internal_notif); if (internal_notif->sync && mvm->queue_sync_cookie != internal_notif->cookie) { WARN_ONCE(1, "Received expired RX queue sync message\n"); return; } switch (internal_notif->type) { case IWL_MVM_RXQ_EMPTY: WARN_ONCE(len, "invalid empty notification size %d", len); break; case IWL_MVM_RXQ_NOTIF_DEL_BA: if (WARN_ONCE(len != sizeof(struct iwl_mvm_delba_data), "invalid delba notification size %d (%d)", len, (int)sizeof(struct iwl_mvm_delba_data))) break; iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data); break; case IWL_MVM_RXQ_NSSN_SYNC: if (WARN_ONCE(len != sizeof(struct iwl_mvm_nssn_sync_data), "invalid nssn sync notification size %d (%d)", len, (int)sizeof(struct iwl_mvm_nssn_sync_data))) break; iwl_mvm_nssn_sync(mvm, napi, queue, (void *)internal_notif->data); break; default: WARN_ONCE(1, "Invalid identifier %d", internal_notif->type); } if (internal_notif->sync) { WARN_ONCE(!test_and_clear_bit(queue, &mvm->queue_sync_state), "queue sync: queue %d responded a second time!\n", queue); if (READ_ONCE(mvm->queue_sync_state) == 0) wake_up(&mvm->rx_sync_waitq); } } static void iwl_mvm_oldsn_workaround(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, struct iwl_mvm_reorder_buffer *buffer, u32 reorder, u32 gp2, int queue) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (gp2 != buffer->consec_oldsn_ampdu_gp2) { /* we have a new (A-)MPDU ... */ /* * reset counter to 0 if we didn't have any oldsn in * the last A-MPDU (as detected by GP2 being identical) */ if (!buffer->consec_oldsn_prev_drop) buffer->consec_oldsn_drops = 0; /* either way, update our tracking state */ buffer->consec_oldsn_ampdu_gp2 = gp2; } else if (buffer->consec_oldsn_prev_drop) { /* * tracking state didn't change, and we had an old SN * indication before - do nothing in this case, we * already noted this one down and are waiting for the * next A-MPDU (by GP2) */ return; } /* return unless this MPDU has old SN */ if (!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN)) return; /* update state */ buffer->consec_oldsn_prev_drop = 1; buffer->consec_oldsn_drops++; /* if limit is reached, send del BA and reset state */ if (buffer->consec_oldsn_drops == IWL_MVM_AMPDU_CONSEC_DROPS_DELBA) { IWL_WARN(mvm, "reached %d old SN frames from %pM on queue %d, stopping BA session on TID %d\n", IWL_MVM_AMPDU_CONSEC_DROPS_DELBA, sta->addr, queue, tid); ieee80211_stop_rx_ba_session(mvmsta->vif, BIT(tid), sta->addr); buffer->consec_oldsn_prev_drop = 0; buffer->consec_oldsn_drops = 0; } } /* * Returns true if the MPDU was buffered\dropped, false if it should be passed * to upper layer. */ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, struct napi_struct *napi, int queue, struct ieee80211_sta *sta, struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); - struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb); - struct iwl_mvm_sta *mvm_sta; + struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb); struct iwl_mvm_baid_data *baid_data; struct iwl_mvm_reorder_buffer *buffer; struct sk_buff *tail; u32 reorder = le32_to_cpu(desc->reorder_data); bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU; bool last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME; #if defined(__linux__) u8 tid = ieee80211_get_tid(hdr); #elif defined(__FreeBSD__) u8 tid; #endif u8 sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; struct iwl_mvm_reorder_buf_entry *entries; + u32 sta_mask; int index; u16 nssn, sn; u8 baid; baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT; /* * This also covers the case of receiving a Block Ack Request * outside a BA session; we'll pass it to mac80211 and that * then sends a delBA action frame. * This also covers pure monitor mode, in which case we won't * have any BA sessions. */ if (baid == IWL_RX_REORDER_DATA_INVALID_BAID) return false; /* no sta yet */ if (WARN_ONCE(IS_ERR_OR_NULL(sta), "Got valid BAID without a valid station assigned\n")) return false; - mvm_sta = iwl_mvm_sta_from_mac80211(sta); - /* not a data packet or a bar */ if (!ieee80211_is_back_req(hdr->frame_control) && (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1))) return false; if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) return false; baid_data = rcu_dereference(mvm->baid_map[baid]); if (!baid_data) { IWL_DEBUG_RX(mvm, "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n", baid, reorder); return false; } #if defined(__FreeBSD__) tid = ieee80211_get_tid(hdr); #endif - if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id, - "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n", - baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id, - tid)) + rcu_read_lock(); + sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1); + rcu_read_unlock(); + + if (IWL_FW_CHECK(mvm, + tid != baid_data->tid || + !(sta_mask & baid_data->sta_mask), + "baid 0x%x is mapped to sta_mask:0x%x tid:%d, but was received for sta_mask:0x%x tid:%d\n", + baid, baid_data->sta_mask, baid_data->tid, + sta_mask, tid)) return false; nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK; sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >> IWL_RX_MPDU_REORDER_SN_SHIFT; buffer = &baid_data->reorder_buf[queue]; entries = &baid_data->entries[queue * baid_data->entries_per_queue]; spin_lock_bh(&buffer->lock); if (!buffer->valid) { if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) { spin_unlock_bh(&buffer->lock); return false; } buffer->valid = true; } if (ieee80211_is_back_req(hdr->frame_control)) { iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn, 0); goto drop; } /* * If there was a significant jump in the nssn - adjust. * If the SN is smaller than the NSSN it might need to first go into * the reorder buffer, in which case we just release up to it and the * rest of the function will take care of storing it and releasing up to * the nssn. * This should not happen. This queue has been lagging and it should * have been updated by a IWL_MVM_RXQ_NSSN_SYNC notification. Be nice * and update the other queues. */ if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, buffer->buf_size) || !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) { u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, min_sn, IWL_MVM_RELEASE_SEND_RSS_SYNC); } iwl_mvm_oldsn_workaround(mvm, sta, tid, buffer, reorder, rx_status->device_timestamp, queue); /* drop any oudated packets */ if (ieee80211_sn_less(sn, buffer->head_sn)) goto drop; /* release immediately if allowed by nssn and no stored frames */ if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { if (iwl_mvm_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) && (!amsdu || last_subframe)) { /* * If we crossed the 2048 or 0 SN, notify all the * queues. This is done in order to avoid having a * head_sn that lags behind for too long. When that * happens, we can get to a situation where the head_sn * is within the interval [nssn - buf_size : nssn] * which will make us think that the nssn is a packet * that we already freed because of the reordering * buffer and we will ignore it. So maintain the * head_sn somewhat updated across all the queues: * when it crosses 0 and 2048. */ if (sn == 2048 || sn == 0) iwl_mvm_sync_nssn(mvm, baid, sn); buffer->head_sn = nssn; } /* No need to update AMSDU last SN - we are moving the head */ spin_unlock_bh(&buffer->lock); return false; } /* * release immediately if there are no stored frames, and the sn is * equal to the head. * This can happen due to reorder timer, where NSSN is behind head_sn. * When we released everything, and we got the next frame in the * sequence, according to the NSSN we can't release immediately, * while technically there is no hole and we can move forward. */ if (!buffer->num_stored && sn == buffer->head_sn) { if (!amsdu || last_subframe) { if (sn == 2048 || sn == 0) iwl_mvm_sync_nssn(mvm, baid, sn); buffer->head_sn = ieee80211_sn_inc(buffer->head_sn); } /* No need to update AMSDU last SN - we are moving the head */ spin_unlock_bh(&buffer->lock); return false; } index = sn % buffer->buf_size; /* * Check if we already stored this frame * As AMSDU is either received or not as whole, logic is simple: * If we have frames in that position in the buffer and the last frame * originated from AMSDU had a different SN then it is a retransmission. * If it is the same SN then if the subframe index is incrementing it * is the same AMSDU - otherwise it is a retransmission. */ tail = skb_peek_tail(&entries[index].e.frames); if (tail && !amsdu) goto drop; else if (tail && (sn != buffer->last_amsdu || buffer->last_sub_index >= sub_frame_idx)) goto drop; /* put in reorder buffer */ __skb_queue_tail(&entries[index].e.frames, skb); buffer->num_stored++; entries[index].e.reorder_time = jiffies; if (amsdu) { buffer->last_amsdu = sn; buffer->last_sub_index = sub_frame_idx; } /* * We cannot trust NSSN for AMSDU sub-frames that are not the last. * The reason is that NSSN advances on the first sub-frame, and may * cause the reorder buffer to advance before all the sub-frames arrive. * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with * SN 1. NSSN for first sub frame will be 3 with the result of driver * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is * already ahead and it will be dropped. * If the last sub-frame is not on this queue - we will get frame * release notification with up to date NSSN. */ if (!amsdu || last_subframe) iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn, IWL_MVM_RELEASE_SEND_RSS_SYNC); spin_unlock_bh(&buffer->lock); return true; drop: kfree_skb(skb); spin_unlock_bh(&buffer->lock); return true; } static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u32 reorder_data, u8 baid) { unsigned long now = jiffies; unsigned long timeout; struct iwl_mvm_baid_data *data; rcu_read_lock(); data = rcu_dereference(mvm->baid_map[baid]); if (!data) { IWL_DEBUG_RX(mvm, "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n", baid, reorder_data); goto out; } if (!data->timeout) goto out; timeout = data->timeout; /* * Do not update last rx all the time to avoid cache bouncing * between the rx queues. * Update it every timeout. Worst case is the session will * expire after ~ 2 * timeout, which doesn't matter that much. */ if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now)) /* Update is atomic */ data->last_rx = now; out: rcu_read_unlock(); } static void iwl_mvm_flip_address(u8 *addr) { int i; u8 mac_addr[ETH_ALEN]; for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = addr[ETH_ALEN - i - 1]; ether_addr_copy(addr, mac_addr); } struct iwl_mvm_rx_phy_data { enum iwl_rx_phy_info_type info_type; - __le32 d0, d1, d2, d3; + __le32 d0, d1, d2, d3, eht_d4, d5; __le16 d4; + bool with_data; + bool first_subframe; + __le32 rx_vec[4]; + + u32 rate_n_flags; + u32 gp2_on_air_rise; + u16 phy_info; + u8 energy_a, energy_b; + u8 channel; }; static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm, struct iwl_mvm_rx_phy_data *phy_data, - u32 rate_n_flags, struct ieee80211_radiotap_he_mu *he_mu) { u32 phy_data2 = le32_to_cpu(phy_data->d2); u32 phy_data3 = le32_to_cpu(phy_data->d3); u16 phy_data4 = le16_to_cpu(phy_data->d4); + u32 rate_n_flags = phy_data->rate_n_flags; if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK, phy_data4)) { he_mu->flags1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN); he_mu->flags1 |= le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU, phy_data4), IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU); he_mu->ru_ch1[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0, phy_data2); he_mu->ru_ch1[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1, phy_data3); he_mu->ru_ch1[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2, phy_data2); he_mu->ru_ch1[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3, phy_data3); } if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK, phy_data4) && (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) != RATE_MCS_CHAN_WIDTH_20) { he_mu->flags1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN); he_mu->flags2 |= le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU, phy_data4), IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU); he_mu->ru_ch2[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0, phy_data2); he_mu->ru_ch2[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1, phy_data3); he_mu->ru_ch2[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2, phy_data2); he_mu->ru_ch2[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3, phy_data3); } } static void iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, - u32 rate_n_flags, struct ieee80211_radiotap_he *he, struct ieee80211_radiotap_he_mu *he_mu, struct ieee80211_rx_status *rx_status) { /* * Unfortunately, we have to leave the mac80211 data * incorrect for the case that we receive an HE-MU * transmission and *don't* have the HE phy data (due * to the bits being used for TSF). This shouldn't * happen though as management frames where we need * the TSF/timers are not be transmitted in HE-MU. */ u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK); + u32 rate_n_flags = phy_data->rate_n_flags; u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1; u8 offs = 0; rx_status->bw = RATE_INFO_BW_HE_RU; he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); switch (ru) { case 0 ... 36: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; offs = ru; break; case 37 ... 52: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; offs = ru - 37; break; case 53 ... 60: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; offs = ru - 53; break; case 61 ... 64: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; offs = ru - 61; break; case 65 ... 66: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; offs = ru - 65; break; case 67: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; break; case 68: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; break; } he->data2 |= le16_encode_bits(offs, IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN | IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN); if (phy_data->d1 & cpu_to_le32(IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80)) he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC); #define CHECK_BW(bw) \ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS); \ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_ ## bw ## MHZ != \ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS) CHECK_BW(20); CHECK_BW(40); CHECK_BW(80); CHECK_BW(160); if (he_mu) he_mu->flags2 |= le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, rate_n_flags), IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); else if (he_type == RATE_MCS_HE_TYPE_TRIG_V1) he->data6 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) | le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, rate_n_flags), IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW); } static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, struct iwl_mvm_rx_phy_data *phy_data, struct ieee80211_radiotap_he *he, struct ieee80211_radiotap_he_mu *he_mu, struct ieee80211_rx_status *rx_status, - u32 rate_n_flags, int queue) + int queue) { switch (phy_data->info_type) { case IWL_RX_PHY_INFO_TYPE_NONE: case IWL_RX_PHY_INFO_TYPE_CCK: case IWL_RX_PHY_INFO_TYPE_OFDM_LGCY: case IWL_RX_PHY_INFO_TYPE_HT: case IWL_RX_PHY_INFO_TYPE_VHT_SU: case IWL_RX_PHY_INFO_TYPE_VHT_MU: + case IWL_RX_PHY_INFO_TYPE_EHT_MU: + case IWL_RX_PHY_INFO_TYPE_EHT_TB: + case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT: + case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT: return; case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN); he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1); he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2); he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3); he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4); fallthrough; case IWL_RX_PHY_INFO_TYPE_HE_SU: case IWL_RX_PHY_INFO_TYPE_HE_MU: case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: case IWL_RX_PHY_INFO_TYPE_HE_TB: /* HE common */ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN); he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN | IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN | IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN | IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK), IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR); if (phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB && phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB_EXT) { he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN); he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_UPLINK), IEEE80211_RADIOTAP_HE_DATA3_UL_DL); } he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM), IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG); he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK), IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD); he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_PE_DISAMBIG), IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG); he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK), IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK), IEEE80211_RADIOTAP_HE_DATA6_TXOP); he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_DOPPLER), IEEE80211_RADIOTAP_HE_DATA6_DOPPLER); break; } switch (phy_data->info_type) { case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: case IWL_RX_PHY_INFO_TYPE_HE_MU: case IWL_RX_PHY_INFO_TYPE_HE_SU: he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN); he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK), IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE); break; default: /* nothing here */ break; } switch (phy_data->info_type) { case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: he_mu->flags1 |= le16_encode_bits(le16_get_bits(phy_data->d4, IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM), IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM); he_mu->flags1 |= le16_encode_bits(le16_get_bits(phy_data->d4, IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK), IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS); he_mu->flags2 |= le16_encode_bits(le16_get_bits(phy_data->d4, IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK), IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); - iwl_mvm_decode_he_mu_ext(mvm, phy_data, rate_n_flags, he_mu); + iwl_mvm_decode_he_mu_ext(mvm, phy_data, he_mu); fallthrough; case IWL_RX_PHY_INFO_TYPE_HE_MU: he_mu->flags2 |= le16_encode_bits(le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK), IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS); he_mu->flags2 |= le16_encode_bits(le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION), IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); fallthrough; case IWL_RX_PHY_INFO_TYPE_HE_TB: case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: - iwl_mvm_decode_he_phy_ru_alloc(phy_data, rate_n_flags, - he, he_mu, rx_status); + iwl_mvm_decode_he_phy_ru_alloc(phy_data, he, he_mu, rx_status); break; case IWL_RX_PHY_INFO_TYPE_HE_SU: he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN); he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_BEAM_CHNG), IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE); break; default: /* nothing */ break; } } +#define LE32_DEC_ENC(value, dec_bits, enc_bits) \ + le32_encode_bits(le32_get_bits(value, dec_bits), enc_bits) + +#define IWL_MVM_ENC_USIG_VALUE_MASK(usig, in_value, dec_bits, enc_bits) do { \ + typeof(enc_bits) _enc_bits = enc_bits; \ + typeof(usig) _usig = usig; \ + (_usig)->mask |= cpu_to_le32(_enc_bits); \ + (_usig)->value |= LE32_DEC_ENC(in_value, dec_bits, _enc_bits); \ +} while (0) + +#define __IWL_MVM_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \ + eht->data[(rt_data)] |= \ + (cpu_to_le32 \ + (IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru ## _KNOWN) | \ + LE32_DEC_ENC(data ## fw_data, \ + IWL_RX_PHY_DATA ## fw_data ## _EHT_MU_EXT_RU_ALLOC_ ## fw_ru, \ + IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru)) + +#define _IWL_MVM_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \ + __IWL_MVM_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) + +#define IEEE80211_RADIOTAP_RU_DATA_1_1_1 1 +#define IEEE80211_RADIOTAP_RU_DATA_2_1_1 2 +#define IEEE80211_RADIOTAP_RU_DATA_1_1_2 2 +#define IEEE80211_RADIOTAP_RU_DATA_2_1_2 2 +#define IEEE80211_RADIOTAP_RU_DATA_1_2_1 3 +#define IEEE80211_RADIOTAP_RU_DATA_2_2_1 3 +#define IEEE80211_RADIOTAP_RU_DATA_1_2_2 3 +#define IEEE80211_RADIOTAP_RU_DATA_2_2_2 4 + +#define IWL_RX_RU_DATA_A1 2 +#define IWL_RX_RU_DATA_A2 2 +#define IWL_RX_RU_DATA_B1 2 +#define IWL_RX_RU_DATA_B2 3 +#define IWL_RX_RU_DATA_C1 3 +#define IWL_RX_RU_DATA_C2 3 +#define IWL_RX_RU_DATA_D1 4 +#define IWL_RX_RU_DATA_D2 4 + +#define IWL_MVM_ENC_EHT_RU(rt_ru, fw_ru) \ + _IWL_MVM_ENC_EHT_RU(IEEE80211_RADIOTAP_RU_DATA_ ## rt_ru, \ + rt_ru, \ + IWL_RX_RU_DATA_ ## fw_ru, \ + fw_ru) + +static void iwl_mvm_decode_eht_ext_mu(struct iwl_mvm *mvm, + struct iwl_mvm_rx_phy_data *phy_data, + struct ieee80211_rx_status *rx_status, + struct ieee80211_radiotap_eht *eht, + struct ieee80211_radiotap_eht_usig *usig) +{ + if (phy_data->with_data) { + __le32 data1 = phy_data->d1; + __le32 data2 = phy_data->d2; + __le32 data3 = phy_data->d3; + __le32 data4 = phy_data->eht_d4; + __le32 data5 = phy_data->d5; + u32 phy_bw = phy_data->rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK; + + IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5, + IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5, + IWL_RX_PHY_DATA5_EHT_MU_PUNC_CH_CODE, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, data4, + IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS); + IWL_MVM_ENC_USIG_VALUE_MASK + (usig, data1, IWL_RX_PHY_DATA1_EHT_MU_NUM_SIG_SYM_USIGA2, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS); + + eht->user_info[0] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN) | + LE32_DEC_ENC(data5, IWL_RX_PHY_DATA5_EHT_MU_STA_ID_USR, + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID); + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M); + eht->data[7] |= LE32_DEC_ENC + (data5, IWL_RX_PHY_DATA5_EHT_MU_NUM_USR_NON_OFDMA, + IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS); + + /* + * Hardware labels the content channels/RU allocation values + * as follows: + * Content Channel 1 Content Channel 2 + * 20 MHz: A1 + * 40 MHz: A1 B1 + * 80 MHz: A1 C1 B1 D1 + * 160 MHz: A1 C1 A2 C2 B1 D1 B2 D2 + * 320 MHz: A1 C1 A2 C2 A3 C3 A4 C4 B1 D1 B2 D2 B3 D3 B4 D4 + * + * However firmware can only give us A1-D2, so the higher + * frequencies are missing. + */ + + switch (phy_bw) { + case RATE_MCS_CHAN_WIDTH_320: + /* additional values are missing in RX metadata */ + case RATE_MCS_CHAN_WIDTH_160: + /* content channel 1 */ + IWL_MVM_ENC_EHT_RU(1_2_1, A2); + IWL_MVM_ENC_EHT_RU(1_2_2, C2); + /* content channel 2 */ + IWL_MVM_ENC_EHT_RU(2_2_1, B2); + IWL_MVM_ENC_EHT_RU(2_2_2, D2); + fallthrough; + case RATE_MCS_CHAN_WIDTH_80: + /* content channel 1 */ + IWL_MVM_ENC_EHT_RU(1_1_2, C1); + /* content channel 2 */ + IWL_MVM_ENC_EHT_RU(2_1_2, D1); + fallthrough; + case RATE_MCS_CHAN_WIDTH_40: + /* content channel 2 */ + IWL_MVM_ENC_EHT_RU(2_1_1, B1); + fallthrough; + case RATE_MCS_CHAN_WIDTH_20: + IWL_MVM_ENC_EHT_RU(1_1_1, A1); + break; + } + } else { + __le32 usig_a1 = phy_data->rx_vec[0]; + __le32 usig_a2 = phy_data->rx_vec[1]; + + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a1, + IWL_RX_USIG_A1_DISREGARD, + IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a1, + IWL_RX_USIG_A1_VALIDATE, + IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_PPDU_TYPE, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_PUNC_CHANNEL, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B8, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_SIG_MCS, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS); + IWL_MVM_ENC_USIG_VALUE_MASK + (usig, usig_a2, IWL_RX_USIG_A2_EHT_SIG_SYM_NUM, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_CRC_OK, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC); + } +} + +static void iwl_mvm_decode_eht_ext_tb(struct iwl_mvm *mvm, + struct iwl_mvm_rx_phy_data *phy_data, + struct ieee80211_rx_status *rx_status, + struct ieee80211_radiotap_eht *eht, + struct ieee80211_radiotap_eht_usig *usig) +{ + if (phy_data->with_data) { + __le32 data5 = phy_data->d5; + + IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5, + IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5, + IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE1, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1); + + IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5, + IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE2, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2); + } else { + __le32 usig_a1 = phy_data->rx_vec[0]; + __le32 usig_a2 = phy_data->rx_vec[1]; + + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a1, + IWL_RX_USIG_A1_DISREGARD, + IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_PPDU_TYPE, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_1, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_2, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_TRIG_USIG2_DISREGARD, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_CRC_OK, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC); + } +} + +static void iwl_mvm_decode_eht_ru(struct iwl_mvm *mvm, + struct ieee80211_rx_status *rx_status, + struct ieee80211_radiotap_eht *eht) +{ + u32 ru = le32_get_bits(eht->data[8], + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1); + enum nl80211_eht_ru_alloc nl_ru; + + /* Using D1.5 Table 9-53a - Encoding of PS160 and RU Allocation subfields + * in an EHT variant User Info field + */ + + switch (ru) { + case 0 ... 36: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_26; + break; + case 37 ... 52: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52; + break; + case 53 ... 60: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106; + break; + case 61 ... 64: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_242; + break; + case 65 ... 66: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484; + break; + case 67: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996; + break; + case 68: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996; + break; + case 69: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_4x996; + break; + case 70 ... 81: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52P26; + break; + case 82 ... 89: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106P26; + break; + case 90 ... 93: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484P242; + break; + case 94 ... 95: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484; + break; + case 96 ... 99: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242; + break; + case 100 ... 103: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484; + break; + case 104: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996; + break; + case 105 ... 106: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484; + break; + default: + return; + } + + rx_status->bw = RATE_INFO_BW_EHT_RU; + rx_status->eht.ru = nl_ru; +} + +static void iwl_mvm_decode_eht_phy_data(struct iwl_mvm *mvm, + struct iwl_mvm_rx_phy_data *phy_data, + struct ieee80211_rx_status *rx_status, + struct ieee80211_radiotap_eht *eht, + struct ieee80211_radiotap_eht_usig *usig) + +{ + __le32 data0 = phy_data->d0; + __le32 data1 = phy_data->d1; + __le32 usig_a1 = phy_data->rx_vec[0]; + u8 info_type = phy_data->info_type; + + /* Not in EHT range */ + if (info_type < IWL_RX_PHY_INFO_TYPE_EHT_MU || + info_type > IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT) + return; + + usig->common |= cpu_to_le32 + (IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN); + if (phy_data->with_data) { + usig->common |= LE32_DEC_ENC(data0, + IWL_RX_PHY_DATA0_EHT_UPLINK, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL); + usig->common |= LE32_DEC_ENC(data0, + IWL_RX_PHY_DATA0_EHT_BSS_COLOR_MASK, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR); + } else { + usig->common |= LE32_DEC_ENC(usig_a1, + IWL_RX_USIG_A1_UL_FLAG, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL); + usig->common |= LE32_DEC_ENC(usig_a1, + IWL_RX_USIG_A1_BSS_COLOR, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR); + } + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT)) { + usig->common |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_CHECKED); + usig->common |= + LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_VALIDATE, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK); + } + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE); + eht->data[0] |= LE32_DEC_ENC(data0, + IWL_RX_PHY_DATA0_ETH_SPATIAL_REUSE_MASK, + IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE); + + /* All RU allocating size/index is in TB format */ + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_RU_ALLOC_TB_FMT); + eht->data[8] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PS160, + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_PS_160); + eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B0, + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B0); + eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7, + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1); + + iwl_mvm_decode_eht_ru(mvm, rx_status, eht); + + /* We only get here in case of IWL_RX_MPDU_PHY_TSF_OVERLOAD is set + * which is on only in case of monitor mode so no need to check monitor + * mode + */ + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRIMARY_80); + eht->data[1] |= + le32_encode_bits(mvm->monitor_p80, + IEEE80211_RADIOTAP_EHT_DATA1_PRIMARY_80); + + usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN); + if (phy_data->with_data) + usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_TXOP_DUR_MASK, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP); + else + usig->common |= LE32_DEC_ENC(usig_a1, IWL_RX_USIG_A1_TXOP_DURATION, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP); + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM); + eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_LDPC_EXT_SYM, + IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM); + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM); + eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PRE_FEC_PAD_MASK, + IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM); + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM); + eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PE_DISAMBIG, + IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM); + + /* TODO: what about IWL_RX_PHY_DATA0_EHT_BW320_SLOT */ + + if (!le32_get_bits(data0, IWL_RX_PHY_DATA0_EHT_SIGA_CRC_OK)) + usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC); + + usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN); + usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PHY_VER, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER); + + /* + * TODO: what about TB - IWL_RX_PHY_DATA1_EHT_TB_PILOT_TYPE, + * IWL_RX_PHY_DATA1_EHT_TB_LOW_SS + */ + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF); + eht->data[0] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_SIG_LTF_NUM, + IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF); + + if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT || + info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB) + iwl_mvm_decode_eht_ext_tb(mvm, phy_data, rx_status, eht, usig); + + if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT || + info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU) + iwl_mvm_decode_eht_ext_mu(mvm, phy_data, rx_status, eht, usig); +} + +static void iwl_mvm_rx_eht(struct iwl_mvm *mvm, struct sk_buff *skb, + struct iwl_mvm_rx_phy_data *phy_data, + int queue) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + + struct ieee80211_radiotap_eht *eht; + struct ieee80211_radiotap_eht_usig *usig; + size_t eht_len = sizeof(*eht); + + u32 rate_n_flags = phy_data->rate_n_flags; + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + /* EHT and HE have the same valus for LTF */ + u8 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN; + u16 phy_info = phy_data->phy_info; + u32 bw; + + /* u32 for 1 user_info */ + if (phy_data->with_data) + eht_len += sizeof(u32); + + eht = iwl_mvm_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT, eht_len); + + usig = iwl_mvm_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT_USIG, + sizeof(*usig)); + rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; + usig->common |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN); + + /* specific handling for 320MHz */ + bw = FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags); + if (bw == RATE_MCS_CHAN_WIDTH_320_VAL) + bw += FIELD_GET(IWL_RX_PHY_DATA0_EHT_BW320_SLOT, + le32_to_cpu(phy_data->d0)); + + usig->common |= cpu_to_le32 + (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW, bw)); + + /* report the AMPDU-EOF bit on single frames */ + if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { + rx_status->flag |= RX_FLAG_AMPDU_DETAILS; + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF)) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; + } + + /* update aggregation data for monitor sake on default queue */ + if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) && + (phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) { + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF)) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; + } + + if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) + iwl_mvm_decode_eht_phy_data(mvm, phy_data, rx_status, eht, usig); + +#define CHECK_TYPE(F) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \ + (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS)) + + CHECK_TYPE(SU); + CHECK_TYPE(EXT_SU); + CHECK_TYPE(MU); + CHECK_TYPE(TRIG); + + switch (FIELD_GET(RATE_MCS_HE_GI_LTF_MSK, rate_n_flags)) { + case 0: + if (he_type == RATE_MCS_HE_TYPE_TRIG) { + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X; + } else { + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X; + } + break; + case 1: + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X; + break; + case 2: + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; + if (he_type == RATE_MCS_HE_TYPE_TRIG) + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2; + else + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8; + break; + case 3: + if (he_type != RATE_MCS_HE_TYPE_TRIG) { + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2; + } + break; + default: + /* nothing here */ + break; + } + + if (ltf != IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN) { + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_GI); + eht->data[0] |= cpu_to_le32 + (FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_LTF, + ltf) | + FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_GI, + rx_status->eht.gi)); + } + + + if (!phy_data->with_data) { + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S | + IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S); + eht->data[7] |= + le32_encode_bits(le32_get_bits(phy_data->rx_vec[2], + RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK), + IEEE80211_RADIOTAP_EHT_DATA7_NSS_S); + if (rate_n_flags & RATE_MCS_BF_MSK) + eht->data[7] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S); + } else { + eht->user_info[0] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O | + IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O | + IEEE80211_RADIOTAP_EHT_USER_INFO_DATA_FOR_USER); + + if (rate_n_flags & RATE_MCS_BF_MSK) + eht->user_info[0] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O); + + if (rate_n_flags & RATE_MCS_LDPC_MSK) + eht->user_info[0] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_CODING); + + eht->user_info[0] |= cpu_to_le32 + (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS, + FIELD_GET(RATE_VHT_MCS_RATE_CODE_MSK, + rate_n_flags)) | + FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O, + FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags))); + } +} + static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_mvm_rx_phy_data *phy_data, - u32 rate_n_flags, u16 phy_info, int queue) + int queue) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_radiotap_he *he = NULL; struct ieee80211_radiotap_he_mu *he_mu = NULL; + u32 rate_n_flags = phy_data->rate_n_flags; u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; - u8 stbc, ltf; + u8 ltf; static const struct ieee80211_radiotap_he known = { .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN), .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN | IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN), }; static const struct ieee80211_radiotap_he_mu mu_known = { .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN), .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN), }; + u16 phy_info = phy_data->phy_info; he = skb_put_data(skb, &known, sizeof(known)); rx_status->flag |= RX_FLAG_RADIOTAP_HE; if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU || phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) { he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known)); rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU; } /* report the AMPDU-EOF bit on single frames */ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { rx_status->flag |= RX_FLAG_AMPDU_DETAILS; rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) iwl_mvm_decode_he_phy_data(mvm, phy_data, he, he_mu, rx_status, - rate_n_flags, queue); + queue); /* update aggregation data for monitor sake on default queue */ if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) && - (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { - bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; - - /* toggle is switched whenever new aggregation starts */ - if (toggle_bit != mvm->ampdu_toggle) { - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; - if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; - } + (phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) { + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF)) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } if (he_type == RATE_MCS_HE_TYPE_EXT_SU && rate_n_flags & RATE_MCS_HE_106T_MSK) { rx_status->bw = RATE_INFO_BW_HE_RU; rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; } /* actually data is filled in mac80211 */ if (he_type == RATE_MCS_HE_TYPE_SU || he_type == RATE_MCS_HE_TYPE_EXT_SU) he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); - stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status->nss = - ((rate_n_flags & RATE_MCS_NSS_MSK) >> - RATE_MCS_NSS_POS) + 1; - rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; - rx_status->encoding = RX_ENC_HE; - rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; - if (rate_n_flags & RATE_MCS_BF_MSK) - rx_status->enc_flags |= RX_ENC_FLAG_BF; - - rx_status->he_dcm = - !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK); - #define CHECK_TYPE(F) \ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS)) CHECK_TYPE(SU); CHECK_TYPE(EXT_SU); CHECK_TYPE(MU); CHECK_TYPE(TRIG); he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS); if (rate_n_flags & RATE_MCS_BF_MSK) he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF); switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >> RATE_MCS_HE_GI_LTF_POS) { case 0: if (he_type == RATE_MCS_HE_TYPE_TRIG) rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; if (he_type == RATE_MCS_HE_TYPE_MU) ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; else ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X; break; case 1: if (he_type == RATE_MCS_HE_TYPE_TRIG) rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X; break; case 2: if (he_type == RATE_MCS_HE_TYPE_TRIG) { rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2; ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; } else { rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X; } break; case 3: rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2; ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; break; case 4: rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; break; default: ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN; } he->data5 |= le16_encode_bits(ltf, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); } static void iwl_mvm_decode_lsig(struct sk_buff *skb, struct iwl_mvm_rx_phy_data *phy_data) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_radiotap_lsig *lsig; switch (phy_data->info_type) { case IWL_RX_PHY_INFO_TYPE_HT: case IWL_RX_PHY_INFO_TYPE_VHT_SU: case IWL_RX_PHY_INFO_TYPE_VHT_MU: case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: case IWL_RX_PHY_INFO_TYPE_HE_SU: case IWL_RX_PHY_INFO_TYPE_HE_MU: case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: case IWL_RX_PHY_INFO_TYPE_HE_TB: + case IWL_RX_PHY_INFO_TYPE_EHT_MU: + case IWL_RX_PHY_INFO_TYPE_EHT_TB: + case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT: + case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT: lsig = skb_put(skb, sizeof(*lsig)); lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN); lsig->data2 = le16_encode_bits(le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_LSIG_LEN_MASK), IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH); rx_status->flag |= RX_FLAG_RADIOTAP_LSIG; break; default: break; } } static inline u8 iwl_mvm_nl80211_band_from_rx_msdu(u8 phy_band) { switch (phy_band) { case PHY_BAND_24: return NL80211_BAND_2GHZ; case PHY_BAND_5: return NL80211_BAND_5GHZ; case PHY_BAND_6: return NL80211_BAND_6GHZ; default: WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band); return NL80211_BAND_5GHZ; } } struct iwl_rx_sta_csa { bool all_sta_unblocked; struct ieee80211_vif *vif; }; static void iwl_mvm_rx_get_sta_block_tx(void *data, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_rx_sta_csa *rx_sta_csa = data; if (mvmsta->vif != rx_sta_csa->vif) return; if (mvmsta->disable_tx) rx_sta_csa->all_sta_unblocked = false; } +/* + * Note: requires also rx_status->band to be prefilled, as well + * as phy_data (apart from phy_data->info_type) + */ +static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm, + struct sk_buff *skb, + struct iwl_mvm_rx_phy_data *phy_data, + int queue) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + u32 rate_n_flags = phy_data->rate_n_flags; + u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK); + u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; + bool is_sgi; + + phy_data->info_type = IWL_RX_PHY_INFO_TYPE_NONE; + + if (phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) + phy_data->info_type = + le32_get_bits(phy_data->d1, + IWL_RX_PHY_DATA1_INFO_TYPE_MASK); + + /* This may be overridden by iwl_mvm_rx_he() to HE_RU */ + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + break; + case RATE_MCS_CHAN_WIDTH_40: + rx_status->bw = RATE_INFO_BW_40; + break; + case RATE_MCS_CHAN_WIDTH_80: + rx_status->bw = RATE_INFO_BW_80; + break; + case RATE_MCS_CHAN_WIDTH_160: + rx_status->bw = RATE_INFO_BW_160; + break; + case RATE_MCS_CHAN_WIDTH_320: + rx_status->bw = RATE_INFO_BW_320; + break; + } + + /* must be before L-SIG data */ + if (format == RATE_MCS_HE_MSK) + iwl_mvm_rx_he(mvm, skb, phy_data, queue); + + iwl_mvm_decode_lsig(skb, phy_data); + + rx_status->device_timestamp = phy_data->gp2_on_air_rise; + rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel, + rx_status->band); + iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, + phy_data->energy_a, phy_data->energy_b); + + /* using TLV format and must be after all fixed len fields */ + if (format == RATE_MCS_EHT_MSK) + iwl_mvm_rx_eht(mvm, skb, phy_data, queue); + + if (unlikely(mvm->monitor_on)) + iwl_mvm_add_rtap_sniffer_config(mvm, skb); + + is_sgi = format == RATE_MCS_HE_MSK ? + iwl_he_is_sgi(rate_n_flags) : + rate_n_flags & RATE_MCS_SGI_MSK; + + if (!(format == RATE_MCS_CCK_MSK) && is_sgi) + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + + if (rate_n_flags & RATE_MCS_LDPC_MSK) + rx_status->enc_flags |= RX_ENC_FLAG_LDPC; + + switch (format) { + case RATE_MCS_VHT_MSK: + rx_status->encoding = RX_ENC_VHT; + break; + case RATE_MCS_HE_MSK: + rx_status->encoding = RX_ENC_HE; + rx_status->he_dcm = + !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK); + break; + case RATE_MCS_EHT_MSK: + rx_status->encoding = RX_ENC_EHT; + break; + } + + switch (format) { + case RATE_MCS_HT_MSK: + rx_status->encoding = RX_ENC_HT; + rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags); + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; + break; + case RATE_MCS_VHT_MSK: + case RATE_MCS_HE_MSK: + case RATE_MCS_EHT_MSK: + rx_status->nss = + u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1; + rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; + break; + default: { + int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, + rx_status->band); + + rx_status->rate_idx = rate; + + if ((rate < 0 || rate > 0xFF)) { + rx_status->rate_idx = 0; + if (net_ratelimit()) + IWL_ERR(mvm, "Invalid rate flags 0x%x, band %d,\n", + rate_n_flags, rx_status->band); + } + + break; + } + } +} + void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; struct ieee80211_hdr *hdr; u32 len; u32 pkt_len = iwl_rx_packet_payload_len(pkt); - u32 rate_n_flags, gp2_on_air_rise; - u16 phy_info; struct ieee80211_sta *sta = NULL; + struct ieee80211_link_sta *link_sta = NULL; struct sk_buff *skb; - u8 crypt_len = 0, channel, energy_a, energy_b; + u8 crypt_len = 0; size_t desc_size; - struct iwl_mvm_rx_phy_data phy_data = { - .info_type = IWL_RX_PHY_INFO_TYPE_NONE, - }; + struct iwl_mvm_rx_phy_data phy_data = {}; u32 format; - bool is_sgi; if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) desc_size = sizeof(*desc); else desc_size = IWL_RX_DESC_SIZE_V1; if (unlikely(pkt_len < desc_size)) { IWL_DEBUG_DROP(mvm, "Bad REPLY_RX_MPDU_CMD size\n"); return; } if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags); - channel = desc->v3.channel; - gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise); - energy_a = desc->v3.energy_a; - energy_b = desc->v3.energy_b; + phy_data.rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags); + phy_data.channel = desc->v3.channel; + phy_data.gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise); + phy_data.energy_a = desc->v3.energy_a; + phy_data.energy_b = desc->v3.energy_b; phy_data.d0 = desc->v3.phy_data0; phy_data.d1 = desc->v3.phy_data1; phy_data.d2 = desc->v3.phy_data2; phy_data.d3 = desc->v3.phy_data3; + phy_data.eht_d4 = desc->phy_eht_data4; + phy_data.d5 = desc->v3.phy_data5; } else { - rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags); - channel = desc->v1.channel; - gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise); - energy_a = desc->v1.energy_a; - energy_b = desc->v1.energy_b; + phy_data.rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags); + phy_data.channel = desc->v1.channel; + phy_data.gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise); + phy_data.energy_a = desc->v1.energy_a; + phy_data.energy_b = desc->v1.energy_b; phy_data.d0 = desc->v1.phy_data0; phy_data.d1 = desc->v1.phy_data1; phy_data.d2 = desc->v1.phy_data2; phy_data.d3 = desc->v1.phy_data3; } + if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, REPLY_RX_MPDU_CMD, 0) < 4) { - rate_n_flags = iwl_new_rate_from_v1(rate_n_flags); + phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags); IWL_DEBUG_DROP(mvm, "Got old format rate, converting. New rate: 0x%x\n", - rate_n_flags); + phy_data.rate_n_flags); } - format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; + + format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK; len = le16_to_cpu(desc->mpdu_len); if (unlikely(len + desc_size > pkt_len)) { IWL_DEBUG_DROP(mvm, "FW lied about packet len\n"); return; } - phy_info = le16_to_cpu(desc->phy_info); + phy_data.with_data = true; + phy_data.phy_info = le16_to_cpu(desc->phy_info); phy_data.d4 = desc->phy_data4; - if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) - phy_data.info_type = - le32_get_bits(phy_data.d1, - IWL_RX_PHY_DATA1_INFO_TYPE_MASK); - hdr = (void *)(pkt->data + desc_size); /* Dont use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. */ skb = alloc_skb(128, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); return; } if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { /* * If the device inserted padding it means that (it thought) * the 802.11 header wasn't a multiple of 4 bytes long. In * this case, reserve two bytes at the start of the SKB to * align the payload properly in case we end up copying it. */ skb_reserve(skb, 2); } rx_status = IEEE80211_SKB_RXCB(skb); - /* This may be overridden by iwl_mvm_rx_he() to HE_RU */ - switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { - case RATE_MCS_CHAN_WIDTH_20: - break; - case RATE_MCS_CHAN_WIDTH_40: - rx_status->bw = RATE_INFO_BW_40; - break; - case RATE_MCS_CHAN_WIDTH_80: - rx_status->bw = RATE_INFO_BW_80; - break; - case RATE_MCS_CHAN_WIDTH_160: - rx_status->bw = RATE_INFO_BW_160; - break; - } - - if (format == RATE_MCS_HE_MSK) - iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags, - phy_info, queue); - - iwl_mvm_decode_lsig(skb, &phy_data); - /* * Keep packets with CRC errors (and with overrun) for monitor mode * (otherwise the firmware discards them) but mark them as bad. */ if (!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) || !(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK))) { IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(desc->status)); rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; } + /* set the preamble flag if appropriate */ if (format == RATE_MCS_CCK_MSK && - phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) + phy_data.phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; - if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { + if (likely(!(phy_data.phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { u64 tsf_on_air_rise; if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise); else tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise); rx_status->mactime = tsf_on_air_rise; /* TSF as indicated by the firmware is at INA time */ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; } - rx_status->device_timestamp = gp2_on_air_rise; if (iwl_mvm_is_band_in_rx_supported(mvm)) { u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx); rx_status->band = iwl_mvm_nl80211_band_from_rx_msdu(band); } else { - rx_status->band = channel > 14 ? NL80211_BAND_5GHZ : + rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; } - rx_status->freq = ieee80211_channel_to_frequency(channel, - rx_status->band); - iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a, - energy_b); /* update aggregation data for monitor sake on default queue */ - if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { - bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; + if (!queue && (phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU)) { + bool toggle_bit; + toggle_bit = phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; rx_status->flag |= RX_FLAG_AMPDU_DETAILS; /* * Toggle is switched whenever new aggregation starts. Make * sure ampdu_reference is never 0 so we can later use it to * see if the frame was really part of an A-MPDU or not. */ if (toggle_bit != mvm->ampdu_toggle) { mvm->ampdu_ref++; if (mvm->ampdu_ref == 0) mvm->ampdu_ref++; mvm->ampdu_toggle = toggle_bit; + phy_data.first_subframe = true; } rx_status->ampdu_reference = mvm->ampdu_ref; } - if (unlikely(mvm->monitor_on)) - iwl_mvm_add_rtap_sniffer_config(mvm, skb); - rcu_read_lock(); if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) { u8 id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID); if (!WARN_ON_ONCE(id >= mvm->fw->ucode_capa.num_stations)) { sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); if (IS_ERR(sta)) sta = NULL; + link_sta = rcu_dereference(mvm->fw_id_to_link_sta[id]); } } else if (!is_multicast_ether_addr(hdr->addr2)) { /* * This is fine since we prevent two stations with the same * address from being added. */ sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL); } - if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_info, desc, + if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_data.phy_info, desc, le32_to_cpu(pkt->len_n_flags), queue, &crypt_len)) { kfree_skb(skb); goto out; } + iwl_mvm_rx_fill_status(mvm, skb, &phy_data, queue); + if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_vif *tx_blocked_vif = rcu_dereference(mvm->csa_tx_blocked_vif); u8 baid = (u8)((le32_to_cpu(desc->reorder_data) & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT); struct iwl_fw_dbg_trigger_tlv *trig; struct ieee80211_vif *vif = mvmsta->vif; if (!mvm->tcm.paused && len >= sizeof(*hdr) && !is_multicast_ether_addr(hdr->addr1) && ieee80211_is_data(hdr->frame_control) && time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) schedule_delayed_work(&mvm->tcm.work, 0); /* * We have tx blocked stations (with CS bit). If we heard * frames from a blocked station on a new channel we can * TX to it again. */ if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); struct iwl_rx_sta_csa rx_sta_csa = { .all_sta_unblocked = true, .vif = tx_blocked_vif, }; if (mvmvif->csa_target_freq == rx_status->freq) iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false); ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_rx_get_sta_block_tx, &rx_sta_csa); if (rx_sta_csa.all_sta_unblocked) { RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); /* Unblock BCAST / MCAST station */ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); - cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); + cancel_delayed_work(&mvm->cs_tx_unblock_dwork); } } rs_update_last_rssi(mvm, mvmsta, rx_status); trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_RSSI); if (trig && ieee80211_is_beacon(hdr->frame_control)) { struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; s32 rssi; rssi_trig = (void *)trig->data; rssi = le32_to_cpu(rssi_trig->rssi); if (rx_status->signal < rssi) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, #if defined(__linux__) NULL); #elif defined(__FreeBSD__) ""); #endif } if (ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_csum(mvm, sta, skb, pkt); if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) { kfree_skb(skb); goto out; } /* * Our hardware de-aggregates AMSDUs but copies the mac header * as it to the de-aggregated MPDUs. We need to turn off the * AMSDU bit in the QoS control ourselves. * In addition, HW reverses addr3 and addr4 - reverse it back. */ if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) && !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) { u8 *qc = ieee80211_get_qos_ctl(hdr); *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000) { iwl_mvm_flip_address(hdr->addr3); if (ieee80211_has_a4(hdr->frame_control)) iwl_mvm_flip_address(hdr->addr4); } } if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) { u32 reorder_data = le32_to_cpu(desc->reorder_data); iwl_mvm_agg_rx_received(mvm, reorder_data, baid); } } - is_sgi = format == RATE_MCS_HE_MSK ? - iwl_he_is_sgi(rate_n_flags) : - rate_n_flags & RATE_MCS_SGI_MSK; - - if (!(format == RATE_MCS_CCK_MSK) && is_sgi) - rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; - if (rate_n_flags & RATE_MCS_LDPC_MSK) - rx_status->enc_flags |= RX_ENC_FLAG_LDPC; - if (format == RATE_MCS_HT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> - RATE_MCS_STBC_POS; - rx_status->encoding = RX_ENC_HT; - rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags); - rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; - } else if (format == RATE_MCS_VHT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> - RATE_MCS_STBC_POS; - rx_status->nss = ((rate_n_flags & RATE_MCS_NSS_MSK) >> - RATE_MCS_NSS_POS) + 1; - rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; - rx_status->encoding = RX_ENC_VHT; - rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; - if (rate_n_flags & RATE_MCS_BF_MSK) - rx_status->enc_flags |= RX_ENC_FLAG_BF; - } else if (!(format == RATE_MCS_HE_MSK)) { - int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, - rx_status->band); - - if (WARN(rate < 0 || rate > 0xFF, - "Invalid rate flags 0x%x, band %d,\n", - rate_n_flags, rx_status->band)) { - kfree_skb(skb); - goto out; - } - rx_status->rate_idx = rate; - } - /* management stuff on default queue */ if (!queue) { if (unlikely((ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control)) && mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)) mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND; if (unlikely(ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control))) rx_status->boottime_ns = ktime_get_boottime_ns(); } if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) { kfree_skb(skb); goto out; } - if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) - iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, - sta); + if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc) && + likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr2)) && + likely(!iwl_mvm_mei_filter_scan(mvm, skb))) + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta, + link_sta); out: rcu_read_unlock(); } void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_rx_no_data *desc = (void *)pkt->data; - u32 rate_n_flags = le32_to_cpu(desc->rate); - u32 gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time); - u32 rssi = le32_to_cpu(desc->rssi); - u32 info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK; - u16 phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD; + struct iwl_rx_no_data_ver_3 *desc = (void *)pkt->data; + u32 rssi; + u32 info_type; struct ieee80211_sta *sta = NULL; struct sk_buff *skb; - u8 channel, energy_a, energy_b; + struct iwl_mvm_rx_phy_data phy_data; u32 format; - struct iwl_mvm_rx_phy_data phy_data = { - .info_type = le32_get_bits(desc->phy_info[1], - IWL_RX_PHY_DATA1_INFO_TYPE_MASK), - .d0 = desc->phy_info[0], - .d1 = desc->phy_info[1], - }; - bool is_sgi; + + if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) + return; + + if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(struct iwl_rx_no_data))) + return; + + rssi = le32_to_cpu(desc->rssi); + info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK; + phy_data.d0 = desc->phy_info[0]; + phy_data.d1 = desc->phy_info[1]; + phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD; + phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time); + phy_data.rate_n_flags = le32_to_cpu(desc->rate); + phy_data.energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK); + phy_data.energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK); + phy_data.channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK); + phy_data.with_data = false; + phy_data.rx_vec[0] = desc->rx_vec[0]; + phy_data.rx_vec[1] = desc->rx_vec[1]; if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, RX_NO_DATA_NOTIF, 0) < 2) { IWL_DEBUG_DROP(mvm, "Got an old rate format. Old rate: 0x%x\n", - rate_n_flags); - rate_n_flags = iwl_new_rate_from_v1(rate_n_flags); + phy_data.rate_n_flags); + phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags); IWL_DEBUG_DROP(mvm, " Rate after conversion to the new format: 0x%x\n", - rate_n_flags); + phy_data.rate_n_flags); } - format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; - - if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*desc))) - return; - if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) - return; + format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK; - energy_a = (rssi & RX_NO_DATA_CHAIN_A_MSK) >> RX_NO_DATA_CHAIN_A_POS; - energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS; - channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS; + if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, + RX_NO_DATA_NOTIF, 0) >= 3) { + if (unlikely(iwl_rx_packet_payload_len(pkt) < + sizeof(struct iwl_rx_no_data_ver_3))) + /* invalid len for ver 3 */ + return; + phy_data.rx_vec[2] = desc->rx_vec[2]; + phy_data.rx_vec[3] = desc->rx_vec[3]; + } else { + if (format == RATE_MCS_EHT_MSK) + /* no support for EHT before version 3 API */ + return; + } /* Dont use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. */ skb = alloc_skb(128, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); return; } rx_status = IEEE80211_SKB_RXCB(skb); /* 0-length PSDU */ rx_status->flag |= RX_FLAG_NO_PSDU; switch (info_type) { case RX_NO_DATA_INFO_TYPE_NDP: rx_status->zero_length_psdu_type = IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING; break; case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED: - case RX_NO_DATA_INFO_TYPE_HE_TB_UNMATCHED: + case RX_NO_DATA_INFO_TYPE_TB_UNMATCHED: rx_status->zero_length_psdu_type = IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED; break; default: rx_status->zero_length_psdu_type = IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR; break; } - /* This may be overridden by iwl_mvm_rx_he() to HE_RU */ - switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { - case RATE_MCS_CHAN_WIDTH_20: - break; - case RATE_MCS_CHAN_WIDTH_40: - rx_status->bw = RATE_INFO_BW_40; - break; - case RATE_MCS_CHAN_WIDTH_80: - rx_status->bw = RATE_INFO_BW_80; - break; - case RATE_MCS_CHAN_WIDTH_160: - rx_status->bw = RATE_INFO_BW_160; - break; - } - - if (format == RATE_MCS_HE_MSK) - iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags, - phy_info, queue); - - iwl_mvm_decode_lsig(skb, &phy_data); - - rx_status->device_timestamp = gp2_on_air_rise; - rx_status->band = channel > 14 ? NL80211_BAND_5GHZ : + rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; - rx_status->freq = ieee80211_channel_to_frequency(channel, - rx_status->band); - iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a, - energy_b); - rcu_read_lock(); + iwl_mvm_rx_fill_status(mvm, skb, &phy_data, queue); - is_sgi = format == RATE_MCS_HE_MSK ? - iwl_he_is_sgi(rate_n_flags) : - rate_n_flags & RATE_MCS_SGI_MSK; + /* no more radio tap info should be put after this point. + * + * We mark it as mac header, for upper layers to know where + * all radio tap header ends. + */ + skb_reset_mac_header(skb); - if (!(format == RATE_MCS_CCK_MSK) && is_sgi) - rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; - if (rate_n_flags & RATE_MCS_LDPC_MSK) - rx_status->enc_flags |= RX_ENC_FLAG_LDPC; - if (format == RATE_MCS_HT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> - RATE_MCS_STBC_POS; - rx_status->encoding = RX_ENC_HT; - rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags); - rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; - } else if (format == RATE_MCS_VHT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> - RATE_MCS_STBC_POS; - rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; - rx_status->encoding = RX_ENC_VHT; - rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; - if (rate_n_flags & RATE_MCS_BF_MSK) - rx_status->enc_flags |= RX_ENC_FLAG_BF; - /* - * take the nss from the rx_vec since the rate_n_flags has - * only 2 bits for the nss which gives a max of 4 ss but - * there may be up to 8 spatial streams - */ + /* + * Override the nss from the rx_vec since the rate_n_flags has + * only 2 bits for the nss which gives a max of 4 ss but there + * may be up to 8 spatial streams. + */ + switch (format) { + case RATE_MCS_VHT_MSK: rx_status->nss = le32_get_bits(desc->rx_vec[0], RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1; - } else if (format == RATE_MCS_HE_MSK) { + break; + case RATE_MCS_HE_MSK: rx_status->nss = le32_get_bits(desc->rx_vec[0], RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1; - } else { - int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, - rx_status->band); - - if (WARN(rate < 0 || rate > 0xFF, - "Invalid rate flags 0x%x, band %d,\n", - rate_n_flags, rx_status->band)) { - kfree_skb(skb); - goto out; - } - rx_status->rate_idx = rate; + break; + case RATE_MCS_EHT_MSK: + rx_status->nss = + le32_get_bits(desc->rx_vec[2], + RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1; } + rcu_read_lock(); ieee80211_rx_napi(mvm->hw, sta, skb, napi); -out: rcu_read_unlock(); } void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_frame_release *release = (void *)pkt->data; if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release))) return; iwl_mvm_release_frames_from_notif(mvm, napi, release->baid, le16_to_cpu(release->nssn), queue, 0); } void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bar_frame_release *release = (void *)pkt->data; unsigned int baid = le32_get_bits(release->ba_info, IWL_BAR_FRAME_RELEASE_BAID_MASK); unsigned int nssn = le32_get_bits(release->ba_info, IWL_BAR_FRAME_RELEASE_NSSN_MASK); unsigned int sta_id = le32_get_bits(release->sta_tid, IWL_BAR_FRAME_RELEASE_STA_MASK); unsigned int tid = le32_get_bits(release->sta_tid, IWL_BAR_FRAME_RELEASE_TID_MASK); struct iwl_mvm_baid_data *baid_data; if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release))) return; if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID || baid >= ARRAY_SIZE(mvm->baid_map))) return; rcu_read_lock(); baid_data = rcu_dereference(mvm->baid_map[baid]); if (!baid_data) { IWL_DEBUG_RX(mvm, "Got valid BAID %d but not allocated, invalid BAR release!\n", baid); goto out; } - if (WARN(tid != baid_data->tid || sta_id != baid_data->sta_id, - "baid 0x%x is mapped to sta:%d tid:%d, but BAR release received for sta:%d tid:%d\n", - baid, baid_data->sta_id, baid_data->tid, sta_id, + if (WARN(tid != baid_data->tid || sta_id > IWL_MVM_STATION_COUNT_MAX || + !(baid_data->sta_mask & BIT(sta_id)), + "baid 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n", + baid, baid_data->sta_mask, baid_data->tid, sta_id, tid)) goto out; iwl_mvm_release_frames_from_notif(mvm, napi, baid, nssn, queue, 0); out: rcu_read_unlock(); } diff --git a/sys/contrib/dev/iwlwifi/mvm/scan.c b/sys/contrib/dev/iwlwifi/mvm/scan.c index 8c5d6860914b..7b6e7c377c55 100644 --- a/sys/contrib/dev/iwlwifi/mvm/scan.c +++ b/sys/contrib/dev/iwlwifi/mvm/scan.c @@ -1,3259 +1,3446 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include "mvm.h" #include "fw/api/scan.h" #include "iwl-io.h" #define IWL_DENSE_EBS_SCAN_RATIO 5 #define IWL_SPARSE_EBS_SCAN_RATIO 1 #define IWL_SCAN_DWELL_ACTIVE 10 #define IWL_SCAN_DWELL_PASSIVE 110 #define IWL_SCAN_DWELL_FRAGMENTED 44 #define IWL_SCAN_DWELL_EXTENDED 90 #define IWL_SCAN_NUM_OF_FRAGS 3 /* adaptive dwell max budget time [TU] for full scan */ #define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300 /* adaptive dwell max budget time [TU] for directed scan */ #define IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100 /* adaptive dwell default high band APs number */ #define IWL_SCAN_ADWELL_DEFAULT_HB_N_APS 8 /* adaptive dwell default low band APs number */ #define IWL_SCAN_ADWELL_DEFAULT_LB_N_APS 2 /* adaptive dwell default APs number in social channels (1, 6, 11) */ #define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10 /* number of scan channels */ #define IWL_SCAN_NUM_CHANNELS 112 /* adaptive dwell number of APs override mask for p2p friendly GO */ #define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT BIT(20) /* adaptive dwell number of APs override mask for social channels */ #define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT BIT(21) /* adaptive dwell number of APs override for p2p friendly GO channels */ #define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY 10 /* adaptive dwell number of APs override for social channels */ #define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS 2 /* minimal number of 2GHz and 5GHz channels in the regular scan request */ #define IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS 4 +/* Number of iterations on the channel for mei filtered scan */ +#define IWL_MEI_SCAN_NUM_ITER 5U + struct iwl_mvm_scan_timing_params { u32 suspend_time; u32 max_out_time; }; static struct iwl_mvm_scan_timing_params scan_timing[] = { [IWL_SCAN_TYPE_UNASSOC] = { .suspend_time = 0, .max_out_time = 0, }, [IWL_SCAN_TYPE_WILD] = { .suspend_time = 30, .max_out_time = 120, }, [IWL_SCAN_TYPE_MILD] = { .suspend_time = 120, .max_out_time = 120, }, [IWL_SCAN_TYPE_FRAGMENTED] = { .suspend_time = 95, .max_out_time = 44, }, [IWL_SCAN_TYPE_FAST_BALANCE] = { .suspend_time = 30, .max_out_time = 37, }, }; struct iwl_mvm_scan_params { /* For CDB this is low band scan type, for non-CDB - type. */ enum iwl_mvm_scan_type type; enum iwl_mvm_scan_type hb_type; u32 n_channels; u16 delay; int n_ssids; struct cfg80211_ssid *ssids; struct ieee80211_channel **channels; u32 flags; u8 *mac_addr; u8 *mac_addr_mask; bool no_cck; bool pass_all; int n_match_sets; struct iwl_scan_probe_req preq; struct cfg80211_match_set *match_sets; int n_scan_plans; struct cfg80211_sched_scan_plan *scan_plans; bool iter_notif; struct cfg80211_scan_6ghz_params *scan_6ghz_params; u32 n_6ghz_params; bool scan_6ghz; bool enable_6ghz_passive; bool respect_p2p_go, respect_p2p_go_hb; + u8 bssid[ETH_ALEN] __aligned(2); }; static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) return (void *)&cmd->v8.data; if (iwl_mvm_is_adaptive_dwell_supported(mvm)) return (void *)&cmd->v7.data; if (iwl_mvm_cdb_scan_api(mvm)) return (void *)&cmd->v6.data; return (void *)&cmd->v1.data; } static inline struct iwl_scan_umac_chan_param * iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm *mvm) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) return &cmd->v8.channel; if (iwl_mvm_is_adaptive_dwell_supported(mvm)) return &cmd->v7.channel; if (iwl_mvm_cdb_scan_api(mvm)) return &cmd->v6.channel; return &cmd->v1.channel; } static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) { if (mvm->scan_rx_ant != ANT_NONE) return mvm->scan_rx_ant; return iwl_mvm_get_valid_rx_ant(mvm); } static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm) { u16 rx_chain; u8 rx_ant; rx_ant = iwl_mvm_scan_rx_ant(mvm); rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS; rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS; rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS; rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS; return cpu_to_le16(rx_chain); } static inline __le32 iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band, bool no_cck) { u32 tx_ant; iwl_mvm_toggle_tx_ant(mvm, &mvm->scan_last_antenna_idx); tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS; if (band == NL80211_BAND_2GHZ && !no_cck) return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK_V1 | tx_ant); else return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant); } static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm) { return mvm->tcm.result.global_load; } static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band) { return mvm->tcm.result.band_load[band]; } struct iwl_mvm_scan_iter_data { u32 global_cnt; struct ieee80211_vif *current_vif; bool is_dcm_with_p2p_go; }; static void iwl_mvm_scan_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_scan_iter_data *data = _data; struct iwl_mvm_vif *curr_mvmvif; - if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt && - mvmvif->phy_ctxt->id < NUM_PHY_CTX) + if (vif->type != NL80211_IFTYPE_P2P_DEVICE && + mvmvif->deflink.phy_ctxt && + mvmvif->deflink.phy_ctxt->id < NUM_PHY_CTX) data->global_cnt += 1; if (!data->current_vif || vif == data->current_vif) return; curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif); if (vif->type == NL80211_IFTYPE_AP && vif->p2p && - mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt && - mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id) + mvmvif->deflink.phy_ctxt && curr_mvmvif->deflink.phy_ctxt && + mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id) data->is_dcm_with_p2p_go = true; } static enum iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_mvm_traffic_load load, bool low_latency) { struct iwl_mvm_scan_iter_data data = { .current_vif = vif, .is_dcm_with_p2p_go = false, .global_cnt = 0, }; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_scan_iterator, &data); if (!data.global_cnt) return IWL_SCAN_TYPE_UNASSOC; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE)) return IWL_SCAN_TYPE_FRAGMENTED; /* * in case of DCM with GO where BSS DTIM interval < 220msec * set all scan requests as fast-balance scan */ if (vif && vif->type == NL80211_IFTYPE_STATION && - vif->bss_conf.dtim_period < 220 && - data.is_dcm_with_p2p_go) + data.is_dcm_with_p2p_go && + ((vif->bss_conf.beacon_int * + vif->bss_conf.dtim_period) < 220)) return IWL_SCAN_TYPE_FAST_BALANCE; } if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency) return IWL_SCAN_TYPE_MILD; return IWL_SCAN_TYPE_WILD; } static enum iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { enum iwl_mvm_traffic_load load; bool low_latency; load = iwl_mvm_get_traffic_load(mvm); low_latency = iwl_mvm_low_latency(mvm); return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency); } static enum iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum nl80211_band band) { enum iwl_mvm_traffic_load load; bool low_latency; load = iwl_mvm_get_traffic_load_band(mvm, band); low_latency = iwl_mvm_low_latency_band(mvm, band); return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency); } static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm) { /* require rrm scan whenever the fw supports it */ return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT); } static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm) { int max_probe_len; max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE; /* we create the 802.11 header and SSID element */ max_probe_len -= 24 + 2; /* DS parameter set element is added on 2.4GHZ band if required */ if (iwl_mvm_rrm_scan_needed(mvm)) max_probe_len -= 3; return max_probe_len; } int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm) { int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm); /* TODO: [BUG] This function should return the maximum allowed size of * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs * in the same command. So the correct implementation of this function * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan * command has only 512 bytes and it would leave us with about 240 * bytes for scan IEs, which is clearly not enough. So meanwhile * we will report an incorrect value. This may result in a failure to * issue a scan in unified_scan_lmac and unified_sched_scan_lmac * functions with -ENOBUFS, if a large enough probe will be provided. */ return max_ie_len; } void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data; IWL_DEBUG_SCAN(mvm, "Scan offload iteration complete: status=0x%x scanned channels=%d\n", notif->status, notif->scanned_channels); if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) { IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n"); ieee80211_sched_scan_results(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED; } } void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n"); ieee80211_sched_scan_results(mvm->hw); } static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status) { switch (status) { case IWL_SCAN_EBS_SUCCESS: return "successful"; case IWL_SCAN_EBS_INACTIVE: return "inactive"; case IWL_SCAN_EBS_FAILED: case IWL_SCAN_EBS_CHAN_NOT_FOUND: default: return "failed"; } } void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data; bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED); /* If this happens, the firmware has mistakenly sent an LMAC * notification during UMAC scans -- warn and ignore it. */ if (WARN_ON_ONCE(fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))) return; /* scan status must be locked for proper checking */ lockdep_assert_held(&mvm->mutex); /* We first check if we were stopping a scan, in which case we * just clear the stopping flag. Then we check if it was a * firmware initiated stop, in which case we need to inform * mac80211. * Note that we can have a stopping and a running scan * simultaneously, but we can't have two different types of * scans stopping or running at the same time (since LMAC * doesn't support it). */ if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) { WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR); IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n", aborted ? "aborted" : "completed", iwl_mvm_ebs_status_str(scan_notif->ebs_status)); IWL_DEBUG_SCAN(mvm, "Last line %d, Last iteration %d, Time after last iteration %d\n", scan_notif->last_schedule_line, scan_notif->last_schedule_iteration, __le32_to_cpu(scan_notif->time_after_last_iter)); mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED; } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) { IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n", aborted ? "aborted" : "completed", iwl_mvm_ebs_status_str(scan_notif->ebs_status)); mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR; } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) { WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR); IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n", aborted ? "aborted" : "completed", iwl_mvm_ebs_status_str(scan_notif->ebs_status)); IWL_DEBUG_SCAN(mvm, "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n", scan_notif->last_schedule_line, scan_notif->last_schedule_iteration, __le32_to_cpu(scan_notif->time_after_last_iter)); mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { struct cfg80211_scan_info info = { .aborted = aborted, }; IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n", aborted ? "aborted" : "completed", iwl_mvm_ebs_status_str(scan_notif->ebs_status)); mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; ieee80211_scan_completed(mvm->hw, &info); cancel_delayed_work(&mvm->scan_timeout_dwork); iwl_mvm_resume_tcm(mvm); } else { IWL_ERR(mvm, "got scan complete notification but no scan is running\n"); } mvm->last_ebs_successful = scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS || scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE; } static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list) { int i; for (i = 0; i < PROBE_OPTION_MAX; i++) { if (!ssid_list[i].len) break; if (ssid_list[i].len == ssid_len && #if defined(__linux__) !memcmp(ssid_list->ssid, ssid, ssid_len)) #elif defined(__FreeBSD__) !memcmp(ssid_list[i].ssid, ssid, ssid_len)) #endif return i; } return -1; } /* We insert the SSIDs in an inverted order, because the FW will * invert it back. */ static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params, struct iwl_ssid_ie *ssids, u32 *ssid_bitmap) { int i, j; int index; u32 tmp_bitmap = 0; /* * copy SSIDs from match list. * iwl_config_sched_scan_profiles() uses the order of these ssids to * config match list. */ for (i = 0, j = params->n_match_sets - 1; j >= 0 && i < PROBE_OPTION_MAX; i++, j--) { /* skip empty SSID matchsets */ if (!params->match_sets[j].ssid.ssid_len) continue; ssids[i].id = WLAN_EID_SSID; ssids[i].len = params->match_sets[j].ssid.ssid_len; memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid, ssids[i].len); } /* add SSIDs from scan SSID list */ for (j = params->n_ssids - 1; j >= 0 && i < PROBE_OPTION_MAX; i++, j--) { index = iwl_ssid_exist(params->ssids[j].ssid, params->ssids[j].ssid_len, ssids); if (index < 0) { ssids[i].id = WLAN_EID_SSID; ssids[i].len = params->ssids[j].ssid_len; memcpy(ssids[i].ssid, params->ssids[j].ssid, ssids[i].len); tmp_bitmap |= BIT(i); } else { tmp_bitmap |= BIT(index); } } if (ssid_bitmap) *ssid_bitmap = tmp_bitmap; } static int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *req) { struct iwl_scan_offload_profile *profile; struct iwl_scan_offload_profile_cfg_v1 *profile_cfg_v1; struct iwl_scan_offload_blocklist *blocklist; struct iwl_scan_offload_profile_cfg_data *data; int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw); int profile_cfg_size = sizeof(*data) + sizeof(*profile) * max_profiles; struct iwl_host_cmd cmd = { .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD, .len[1] = profile_cfg_size, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, .dataflags[1] = IWL_HCMD_DFL_NOCOPY, }; int blocklist_len; int i; int ret; if (WARN_ON(req->n_match_sets > max_profiles)) return -EIO; if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL) blocklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN; else blocklist_len = IWL_SCAN_MAX_BLACKLIST_LEN; blocklist = kcalloc(blocklist_len, sizeof(*blocklist), GFP_KERNEL); if (!blocklist) return -ENOMEM; profile_cfg_v1 = kzalloc(profile_cfg_size, GFP_KERNEL); if (!profile_cfg_v1) { ret = -ENOMEM; goto free_blocklist; } cmd.data[0] = blocklist; cmd.len[0] = sizeof(*blocklist) * blocklist_len; cmd.data[1] = profile_cfg_v1; /* if max_profile is MAX_PROFILES_V2, we have the new API */ if (max_profiles == IWL_SCAN_MAX_PROFILES_V2) { struct iwl_scan_offload_profile_cfg *profile_cfg = (struct iwl_scan_offload_profile_cfg *)profile_cfg_v1; data = &profile_cfg->data; } else { data = &profile_cfg_v1->data; } /* No blocklist configuration */ data->num_profiles = req->n_match_sets; data->active_clients = SCAN_CLIENT_SCHED_SCAN; data->pass_match = SCAN_CLIENT_SCHED_SCAN; data->match_notify = SCAN_CLIENT_SCHED_SCAN; if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len) data->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN; for (i = 0; i < req->n_match_sets; i++) { profile = &profile_cfg_v1->profiles[i]; profile->ssid_index = i; /* Support any cipher and auth algorithm */ profile->unicast_cipher = 0xff; profile->auth_alg = IWL_AUTH_ALGO_UNSUPPORTED | IWL_AUTH_ALGO_NONE | IWL_AUTH_ALGO_PSK | IWL_AUTH_ALGO_8021X | IWL_AUTH_ALGO_SAE | IWL_AUTH_ALGO_8021X_SHA384 | IWL_AUTH_ALGO_OWE; profile->network_type = IWL_NETWORK_TYPE_ANY; profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY; profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN; } IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n"); ret = iwl_mvm_send_cmd(mvm, &cmd); kfree(profile_cfg_v1); free_blocklist: kfree(blocklist); return ret; } static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *req) { if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) { IWL_DEBUG_SCAN(mvm, "Sending scheduled scan with filtering, n_match_sets %d\n", req->n_match_sets); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; return false; } IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n"); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED; return true; } static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm) { int ret; struct iwl_host_cmd cmd = { .id = SCAN_OFFLOAD_ABORT_CMD, }; u32 status = CAN_ABORT_STATUS; ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); if (ret) return ret; if (status != CAN_ABORT_STATUS) { /* * The scan abort will return 1 for success or * 2 for "failure". A failure condition can be * due to simply not being in an active scan which * can occur if we send the scan abort before the * microcode has notified us that a scan is completed. */ IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status); ret = -ENOENT; } return ret; } static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm, struct iwl_scan_req_tx_cmd *tx_cmd, bool no_cck) { tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_BT_DIS); tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, NL80211_BAND_2GHZ, no_cck); - if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) { + if (!iwl_mvm_has_new_station_api(mvm->fw)) { tx_cmd[0].sta_id = mvm->aux_sta.sta_id; tx_cmd[1].sta_id = mvm->aux_sta.sta_id; /* * Fw doesn't use this sta anymore, pending deprecation via HOST API * change */ } else { tx_cmd[0].sta_id = 0xff; tx_cmd[1].sta_id = 0xff; } tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_BT_DIS); tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, NL80211_BAND_5GHZ, no_cck); } static void iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm, struct ieee80211_channel **channels, int n_channels, u32 ssid_bitmap, struct iwl_scan_req_lmac *cmd) { struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data; int i; for (i = 0; i < n_channels; i++) { channel_cfg[i].channel_num = cpu_to_le16(channels[i]->hw_value); channel_cfg[i].iter_count = cpu_to_le16(1); channel_cfg[i].iter_interval = 0; channel_cfg[i].flags = cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL | ssid_bitmap); } } static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies, size_t len, u8 *const pos) { static const u8 before_ds_params[] = { WLAN_EID_SSID, WLAN_EID_SUPP_RATES, WLAN_EID_REQUEST, WLAN_EID_EXT_SUPP_RATES, }; size_t offs; u8 *newpos = pos; if (!iwl_mvm_rrm_scan_needed(mvm)) { memcpy(newpos, ies, len); return newpos + len; } offs = ieee80211_ie_split(ies, len, before_ds_params, ARRAY_SIZE(before_ds_params), 0); memcpy(newpos, ies, offs); newpos += offs; /* Add a placeholder for DS Parameter Set element */ *newpos++ = WLAN_EID_DS_PARAMS; *newpos++ = 1; *newpos++ = 0; memcpy(newpos, ies + offs, len - offs); newpos += len - offs; return newpos; } #define WFA_TPC_IE_LEN 9 static void iwl_mvm_add_tpc_report_ie(u8 *pos) { pos[0] = WLAN_EID_VENDOR_SPECIFIC; pos[1] = WFA_TPC_IE_LEN - 2; pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff; pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff; pos[4] = WLAN_OUI_MICROSOFT & 0xff; pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC; pos[6] = 0; /* pos[7] - tx power will be inserted by the FW */ pos[7] = 0; pos[8] = 0; } static void iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_scan_ies *ies, struct iwl_mvm_scan_params *params) { struct ieee80211_mgmt *frame = (void *)params->preq.buf; u8 *pos, *newpos; const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ? params->mac_addr : NULL; /* * Unfortunately, right now the offload scan doesn't support randomising * within the firmware, so until the firmware API is ready we implement * it in the driver. This means that the scan iterations won't really be * random, only when it's restarted, but at least that helps a bit. */ if (mac_addr) get_random_mask_addr(frame->sa, mac_addr, params->mac_addr_mask); else memcpy(frame->sa, vif->addr, ETH_ALEN); frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); eth_broadcast_addr(frame->da); - eth_broadcast_addr(frame->bssid); + ether_addr_copy(frame->bssid, params->bssid); frame->seq_ctrl = 0; pos = frame->u.probe_req.variable; *pos++ = WLAN_EID_SSID; *pos++ = 0; params->preq.mac_header.offset = 0; params->preq.mac_header.len = cpu_to_le16(24 + 2); /* Insert ds parameter set element on 2.4 GHz band */ newpos = iwl_mvm_copy_and_insert_ds_elem(mvm, ies->ies[NL80211_BAND_2GHZ], ies->len[NL80211_BAND_2GHZ], pos); params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf); params->preq.band_data[0].len = cpu_to_le16(newpos - pos); pos = newpos; memcpy(pos, ies->ies[NL80211_BAND_5GHZ], ies->len[NL80211_BAND_5GHZ]); params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf); params->preq.band_data[1].len = cpu_to_le16(ies->len[NL80211_BAND_5GHZ]); pos += ies->len[NL80211_BAND_5GHZ]; memcpy(pos, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); params->preq.band_data[2].offset = cpu_to_le16(pos - params->preq.buf); params->preq.band_data[2].len = cpu_to_le16(ies->len[NL80211_BAND_6GHZ]); pos += ies->len[NL80211_BAND_6GHZ]; memcpy(pos, ies->common_ies, ies->common_ie_len); params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf); if (iwl_mvm_rrm_scan_needed(mvm) && !fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) { iwl_mvm_add_tpc_report_ie(pos + ies->common_ie_len); params->preq.common_data.len = cpu_to_le16(ies->common_ie_len + WFA_TPC_IE_LEN); } else { params->preq.common_data.len = cpu_to_le16(ies->common_ie_len); } } static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_lmac *cmd, struct iwl_mvm_scan_params *params) { cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE; cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE; cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED; cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); } static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids, struct ieee80211_scan_ies *ies, int n_channels) { return ((n_ssids <= PROBE_OPTION_MAX) && (n_channels <= mvm->fw->ucode_capa.n_scan_channels) & (ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] + ies->len[NL80211_BAND_5GHZ] <= iwl_mvm_max_scan_ie_fw_cmd_room(mvm))); } static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa; bool low_latency; if (iwl_mvm_is_cdb_supported(mvm)) low_latency = iwl_mvm_low_latency_band(mvm, NL80211_BAND_5GHZ); else low_latency = iwl_mvm_low_latency(mvm); /* We can only use EBS if: * 1. the feature is supported; * 2. the last EBS was successful; * 3. if only single scan, the single scan EBS API is supported; * 4. it's not a p2p find operation. * 5. we are not in low latency mode, * or if fragmented ebs is supported by the FW */ return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) && mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS && vif->type != NL80211_IFTYPE_P2P_DEVICE && (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm))); } static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params) { return params->n_scan_plans == 1 && params->scan_plans[0].iterations == 1; } static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type) { return (type == IWL_SCAN_TYPE_FRAGMENTED || type == IWL_SCAN_TYPE_FAST_BALANCE); } static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif) { int flags = 0; if (params->n_ssids == 0) flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE; if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; if (iwl_mvm_is_scan_fragmented(params->type)) flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED; if (iwl_mvm_rrm_scan_needed(mvm) && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED; if (params->pass_all) flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL; else flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH; #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvm->scan_iter_notif_enabled) flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE; #endif if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED) flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE; if (iwl_mvm_is_regular_scan(params) && vif->type != NL80211_IFTYPE_P2P_DEVICE && !iwl_mvm_is_scan_fragmented(params->type)) flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL; return flags; } static void iwl_mvm_scan_set_legacy_probe_req(struct iwl_scan_probe_req_v1 *p_req, struct iwl_scan_probe_req *src_p_req) { int i; p_req->mac_header = src_p_req->mac_header; for (i = 0; i < SCAN_NUM_BAND_PROBE_DATA_V_1; i++) p_req->band_data[i] = src_p_req->band_data[i]; p_req->common_data = src_p_req->common_data; memcpy(p_req->buf, src_p_req->buf, sizeof(p_req->buf)); } static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_scan_params *params) { struct iwl_scan_req_lmac *cmd = mvm->scan_cmd; struct iwl_scan_probe_req_v1 *preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) * mvm->fw->ucode_capa.n_scan_channels); u32 ssid_bitmap = 0; int i; u8 band; if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS)) return -EINVAL; iwl_mvm_scan_lmac_dwell(mvm, cmd, params); cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm); cmd->iter_num = cpu_to_le32(1); cmd->n_channels = (u8)params->n_channels; cmd->delay = cpu_to_le32(params->delay); cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params, vif)); band = iwl_mvm_phy_band_from_nl80211(params->channels[0]->band); cmd->flags = cpu_to_le32(band); cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP | MAC_FILTER_IN_BEACON); iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck); iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap); /* this API uses bits 1-20 instead of 0-19 */ ssid_bitmap <<= 1; for (i = 0; i < params->n_scan_plans; i++) { struct cfg80211_sched_scan_plan *scan_plan = ¶ms->scan_plans[i]; cmd->schedule[i].delay = cpu_to_le16(scan_plan->interval); cmd->schedule[i].iterations = scan_plan->iterations; cmd->schedule[i].full_scan_mul = 1; } /* * If the number of iterations of the last scan plan is set to * zero, it should run infinitely. However, this is not always the case. * For example, when regular scan is requested the driver sets one scan * plan with one iteration. */ if (!cmd->schedule[i - 1].iterations) cmd->schedule[i - 1].iterations = 0xff; if (iwl_mvm_scan_use_ebs(mvm, vif)) { cmd->channel_opt[0].flags = cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); cmd->channel_opt[0].non_ebs_ratio = cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); cmd->channel_opt[1].flags = cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); cmd->channel_opt[1].non_ebs_ratio = cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); } iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels, params->n_channels, ssid_bitmap, cmd); iwl_mvm_scan_set_legacy_probe_req(preq, ¶ms->preq); return 0; } static int rate_to_scan_rate_flag(unsigned int rate) { static const int rate_to_scan_rate[IWL_RATE_COUNT] = { [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M, [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M, [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M, [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M, [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M, [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M, [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M, [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M, [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M, [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M, [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M, [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M, }; return rate_to_scan_rate[rate]; } static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm) { struct ieee80211_supported_band *band; unsigned int rates = 0; int i; band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; for (i = 0; i < band->n_bitrates; i++) rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; for (i = 0; i < band->n_bitrates; i++) rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); /* Set both basic rates and supported rates */ rates |= SCAN_CONFIG_SUPPORTED_RATE(rates); return cpu_to_le32(rates); } static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm, struct iwl_scan_dwell *dwell) { dwell->active = IWL_SCAN_DWELL_ACTIVE; dwell->passive = IWL_SCAN_DWELL_PASSIVE; dwell->fragmented = IWL_SCAN_DWELL_FRAGMENTED; dwell->extended = IWL_SCAN_DWELL_EXTENDED; } static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels, u32 max_channels) { struct ieee80211_supported_band *band; int i, j = 0; band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; for (i = 0; i < band->n_channels && j < max_channels; i++, j++) channels[j] = band->channels[i].hw_value; band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; for (i = 0; i < band->n_channels && j < max_channels; i++, j++) channels[j] = band->channels[i].hw_value; } static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, u32 flags, u8 channel_flags, u32 max_channels) { enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL); struct iwl_scan_config_v1 *cfg = config; cfg->flags = cpu_to_le32(flags); cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm); cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time); cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time); iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell); memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); /* This function should not be called when using ADD_STA ver >=12 */ - WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12); + WARN_ON_ONCE(iwl_mvm_has_new_station_api(mvm->fw)); cfg->bcast_sta_id = mvm->aux_sta.sta_id; cfg->channel_flags = channel_flags; iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels); } static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config, u32 flags, u8 channel_flags, u32 max_channels) { struct iwl_scan_config_v2 *cfg = config; cfg->flags = cpu_to_le32(flags); cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm); if (iwl_mvm_is_cdb_supported(mvm)) { enum iwl_mvm_scan_type lb_type, hb_type; lb_type = iwl_mvm_get_scan_type_band(mvm, NULL, NL80211_BAND_2GHZ); hb_type = iwl_mvm_get_scan_type_band(mvm, NULL, NL80211_BAND_5GHZ); cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(scan_timing[lb_type].max_out_time); cfg->suspend_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(scan_timing[lb_type].suspend_time); cfg->out_of_channel_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(scan_timing[hb_type].max_out_time); cfg->suspend_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(scan_timing[hb_type].suspend_time); } else { enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL); cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(scan_timing[type].max_out_time); cfg->suspend_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(scan_timing[type].suspend_time); } iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell); memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); /* This function should not be called when using ADD_STA ver >=12 */ - WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12); + WARN_ON_ONCE(iwl_mvm_has_new_station_api(mvm->fw)); cfg->bcast_sta_id = mvm->aux_sta.sta_id; cfg->channel_flags = channel_flags; iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels); } static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm) { void *cfg; int ret, cmd_size; struct iwl_host_cmd cmd = { .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD), }; enum iwl_mvm_scan_type type; enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET; int num_channels = mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels + mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels; u32 flags; u8 channel_flags; if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) num_channels = mvm->fw->ucode_capa.n_scan_channels; if (iwl_mvm_is_cdb_supported(mvm)) { type = iwl_mvm_get_scan_type_band(mvm, NULL, NL80211_BAND_2GHZ); hb_type = iwl_mvm_get_scan_type_band(mvm, NULL, NL80211_BAND_5GHZ); if (type == mvm->scan_type && hb_type == mvm->hb_scan_type) return 0; } else { type = iwl_mvm_get_scan_type(mvm, NULL); if (type == mvm->scan_type) return 0; } if (iwl_mvm_cdb_scan_api(mvm)) cmd_size = sizeof(struct iwl_scan_config_v2); else cmd_size = sizeof(struct iwl_scan_config_v1); cmd_size += mvm->fw->ucode_capa.n_scan_channels; cfg = kzalloc(cmd_size, GFP_KERNEL); if (!cfg) return -ENOMEM; flags = SCAN_CONFIG_FLAG_ACTIVATE | SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS | SCAN_CONFIG_FLAG_SET_TX_CHAINS | SCAN_CONFIG_FLAG_SET_RX_CHAINS | SCAN_CONFIG_FLAG_SET_AUX_STA_ID | SCAN_CONFIG_FLAG_SET_ALL_TIMES | SCAN_CONFIG_FLAG_SET_LEGACY_RATES | SCAN_CONFIG_FLAG_SET_MAC_ADDR | SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS | SCAN_CONFIG_N_CHANNELS(num_channels) | (iwl_mvm_is_scan_fragmented(type) ? SCAN_CONFIG_FLAG_SET_FRAGMENTED : SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED); channel_flags = IWL_CHANNEL_FLAG_EBS | IWL_CHANNEL_FLAG_ACCURATE_EBS | IWL_CHANNEL_FLAG_EBS_ADD | IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; /* * Check for fragmented scan on LMAC2 - high band. * LMAC1 - low band is checked above. */ if (iwl_mvm_cdb_scan_api(mvm)) { if (iwl_mvm_is_cdb_supported(mvm)) flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ? SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; iwl_mvm_fill_scan_config_v2(mvm, cfg, flags, channel_flags, num_channels); } else { iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags, num_channels); } cmd.data[0] = cfg; cmd.len[0] = cmd_size; cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n"); ret = iwl_mvm_send_cmd(mvm, &cmd); if (!ret) { mvm->scan_type = type; mvm->hb_scan_type = hb_type; } kfree(cfg); return ret; } int iwl_mvm_config_scan(struct iwl_mvm *mvm) { struct iwl_scan_config cfg; struct iwl_host_cmd cmd = { .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD), .len[0] = sizeof(cfg), .data[0] = &cfg, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; if (!iwl_mvm_is_reduced_config_scan_supported(mvm)) return iwl_mvm_legacy_config_scan(mvm); memset(&cfg, 0, sizeof(cfg)); - if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) { + if (!iwl_mvm_has_new_station_api(mvm->fw)) { cfg.bcast_sta_id = mvm->aux_sta.sta_id; } else if (iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_CFG_CMD, 0) < 5) { /* * Fw doesn't use this sta anymore. Deprecated on SCAN_CFG_CMD * version 5. */ cfg.bcast_sta_id = 0xff; } cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n"); return iwl_mvm_send_cmd(mvm, &cmd); } static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status) { int i; for (i = 0; i < mvm->max_scans; i++) if (mvm->scan_uid_status[i] == status) return i; return -ENOENT; } static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_umac *cmd, struct iwl_mvm_scan_params *params) { struct iwl_mvm_scan_timing_params *timing, *hb_timing; u8 active_dwell, passive_dwell; timing = &scan_timing[params->type]; active_dwell = IWL_SCAN_DWELL_ACTIVE; passive_dwell = IWL_SCAN_DWELL_PASSIVE; if (iwl_mvm_is_adaptive_dwell_supported(mvm)) { cmd->v7.adwell_default_n_aps_social = IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL; cmd->v7.adwell_default_n_aps = IWL_SCAN_ADWELL_DEFAULT_LB_N_APS; if (iwl_mvm_is_adwell_hb_ap_num_supported(mvm)) cmd->v9.adwell_default_hb_n_aps = IWL_SCAN_ADWELL_DEFAULT_HB_N_APS; /* if custom max budget was configured with debugfs */ if (IWL_MVM_ADWELL_MAX_BUDGET) cmd->v7.adwell_max_budget = cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET); else if (params->ssids && params->ssids[0].ssid_len) cmd->v7.adwell_max_budget = cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); else cmd->v7.adwell_max_budget = cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN); cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->max_out_time); cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->suspend_time); if (iwl_mvm_is_cdb_supported(mvm)) { hb_timing = &scan_timing[params->hb_type]; cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(hb_timing->max_out_time); cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(hb_timing->suspend_time); } if (!iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) { cmd->v7.active_dwell = active_dwell; cmd->v7.passive_dwell = passive_dwell; cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; } else { cmd->v8.active_dwell[SCAN_LB_LMAC_IDX] = active_dwell; cmd->v8.passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell; if (iwl_mvm_is_cdb_supported(mvm)) { cmd->v8.active_dwell[SCAN_HB_LMAC_IDX] = active_dwell; cmd->v8.passive_dwell[SCAN_HB_LMAC_IDX] = passive_dwell; } } } else { cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED; cmd->v1.active_dwell = active_dwell; cmd->v1.passive_dwell = passive_dwell; cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; if (iwl_mvm_is_cdb_supported(mvm)) { hb_timing = &scan_timing[params->hb_type]; cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(hb_timing->max_out_time); cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(hb_timing->suspend_time); } if (iwl_mvm_cdb_scan_api(mvm)) { cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->max_out_time); cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->suspend_time); } else { cmd->v1.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); cmd->v1.max_out_time = cpu_to_le32(timing->max_out_time); cmd->v1.suspend_time = cpu_to_le32(timing->suspend_time); } } if (iwl_mvm_is_regular_scan(params)) cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); else cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); } static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params) { return iwl_mvm_is_regular_scan(params) ? IWL_SCAN_PRIORITY_EXT_6 : IWL_SCAN_PRIORITY_EXT_2; } static void iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm, struct iwl_scan_general_params_v11 *general_params, struct iwl_mvm_scan_params *params) { struct iwl_mvm_scan_timing_params *timing, *hb_timing; u8 active_dwell, passive_dwell; timing = &scan_timing[params->type]; active_dwell = IWL_SCAN_DWELL_ACTIVE; passive_dwell = IWL_SCAN_DWELL_PASSIVE; general_params->adwell_default_social_chn = IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL; general_params->adwell_default_2g = IWL_SCAN_ADWELL_DEFAULT_LB_N_APS; general_params->adwell_default_5g = IWL_SCAN_ADWELL_DEFAULT_HB_N_APS; /* if custom max budget was configured with debugfs */ if (IWL_MVM_ADWELL_MAX_BUDGET) general_params->adwell_max_budget = cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET); else if (params->ssids && params->ssids[0].ssid_len) general_params->adwell_max_budget = cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); else general_params->adwell_max_budget = cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN); general_params->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); general_params->max_out_of_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->max_out_time); general_params->suspend_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->suspend_time); hb_timing = &scan_timing[params->hb_type]; general_params->max_out_of_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(hb_timing->max_out_time); general_params->suspend_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(hb_timing->suspend_time); general_params->active_dwell[SCAN_LB_LMAC_IDX] = active_dwell; general_params->passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell; general_params->active_dwell[SCAN_HB_LMAC_IDX] = active_dwell; general_params->passive_dwell[SCAN_HB_LMAC_IDX] = passive_dwell; } struct iwl_mvm_scan_channel_segment { u8 start_idx; u8 end_idx; u8 first_channel_id; u8 last_channel_id; u8 channel_spacing_shift; u8 band; }; static const struct iwl_mvm_scan_channel_segment scan_channel_segments[] = { { .start_idx = 0, .end_idx = 13, .first_channel_id = 1, .last_channel_id = 14, .channel_spacing_shift = 0, .band = PHY_BAND_24 }, { .start_idx = 14, .end_idx = 41, .first_channel_id = 36, .last_channel_id = 144, .channel_spacing_shift = 2, .band = PHY_BAND_5 }, { .start_idx = 42, .end_idx = 50, .first_channel_id = 149, .last_channel_id = 181, .channel_spacing_shift = 2, .band = PHY_BAND_5 }, { .start_idx = 51, .end_idx = 111, .first_channel_id = 1, .last_channel_id = 241, .channel_spacing_shift = 2, .band = PHY_BAND_6 }, }; static int iwl_mvm_scan_ch_and_band_to_idx(u8 channel_id, u8 band) { int i, index; if (!channel_id) return -EINVAL; for (i = 0; i < ARRAY_SIZE(scan_channel_segments); i++) { const struct iwl_mvm_scan_channel_segment *ch_segment = &scan_channel_segments[i]; u32 ch_offset; if (ch_segment->band != band || ch_segment->first_channel_id > channel_id || ch_segment->last_channel_id < channel_id) continue; ch_offset = (channel_id - ch_segment->first_channel_id) >> ch_segment->channel_spacing_shift; index = scan_channel_segments[i].start_idx + ch_offset; if (index < IWL_SCAN_NUM_CHANNELS) return index; break; } return -EINVAL; } static const u8 p2p_go_friendly_chs[] = { 36, 40, 44, 48, 149, 153, 157, 161, 165, }; static const u8 social_chs[] = { 1, 6, 11 }; static void iwl_mvm_scan_ch_add_n_aps_override(enum nl80211_iftype vif_type, u8 ch_id, u8 band, u8 *ch_bitmap, size_t bitmap_n_entries) { int i; if (vif_type != NL80211_IFTYPE_P2P_DEVICE) return; for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) { if (p2p_go_friendly_chs[i] == ch_id) { int ch_idx, bitmap_idx; ch_idx = iwl_mvm_scan_ch_and_band_to_idx(ch_id, band); if (ch_idx < 0) return; bitmap_idx = ch_idx / 8; if (bitmap_idx >= bitmap_n_entries) return; ch_idx = ch_idx % 8; ch_bitmap[bitmap_idx] |= BIT(ch_idx); return; } } } static u32 iwl_mvm_scan_ch_n_aps_flag(enum nl80211_iftype vif_type, u8 ch_id) { int i; u32 flags = 0; if (vif_type != NL80211_IFTYPE_P2P_DEVICE) goto out; for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) { if (p2p_go_friendly_chs[i] == ch_id) { flags |= IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT; break; } } if (flags) goto out; for (i = 0; i < ARRAY_SIZE(social_chs); i++) { if (social_chs[i] == ch_id) { flags |= IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT; break; } } out: return flags; } static void iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm, struct ieee80211_channel **channels, int n_channels, u32 flags, struct iwl_scan_channel_cfg_umac *channel_cfg) { int i; for (i = 0; i < n_channels; i++) { channel_cfg[i].flags = cpu_to_le32(flags); channel_cfg[i].v1.channel_num = channels[i]->hw_value; if (iwl_mvm_is_scan_ext_chan_supported(mvm)) { enum nl80211_band band = channels[i]->band; channel_cfg[i].v2.band = iwl_mvm_phy_band_from_nl80211(band); channel_cfg[i].v2.iter_count = 1; channel_cfg[i].v2.iter_interval = 0; } else { channel_cfg[i].v1.iter_count = 1; channel_cfg[i].v1.iter_interval = 0; } } } static void iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm, struct ieee80211_channel **channels, struct iwl_scan_channel_params_v4 *cp, int n_channels, u32 flags, enum nl80211_iftype vif_type) { u8 *bitmap = cp->adwell_ch_override_bitmap; size_t bitmap_n_entries = ARRAY_SIZE(cp->adwell_ch_override_bitmap); int i; for (i = 0; i < n_channels; i++) { enum nl80211_band band = channels[i]->band; struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i]; cfg->flags = cpu_to_le32(flags); cfg->v2.channel_num = channels[i]->hw_value; cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band); cfg->v2.iter_count = 1; cfg->v2.iter_interval = 0; iwl_mvm_scan_ch_add_n_aps_override(vif_type, cfg->v2.channel_num, cfg->v2.band, bitmap, bitmap_n_entries); } } static void -iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm, +iwl_mvm_umac_scan_cfg_channels_v7(struct iwl_mvm *mvm, struct ieee80211_channel **channels, - struct iwl_scan_channel_params_v6 *cp, + struct iwl_scan_channel_params_v7 *cp, int n_channels, u32 flags, - enum nl80211_iftype vif_type) + enum nl80211_iftype vif_type, u32 version) { int i; for (i = 0; i < n_channels; i++) { enum nl80211_band band = channels[i]->band; struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i]; u32 n_aps_flag = iwl_mvm_scan_ch_n_aps_flag(vif_type, channels[i]->hw_value); + u8 iwl_band = iwl_mvm_phy_band_from_nl80211(band); cfg->flags = cpu_to_le32(flags | n_aps_flag); cfg->v2.channel_num = channels[i]->hw_value; - cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band); if (cfg80211_channel_is_psc(channels[i])) cfg->flags = 0; cfg->v2.iter_count = 1; cfg->v2.iter_interval = 0; + if (version < 17) + cfg->v2.band = iwl_band; + else + cfg->flags |= cpu_to_le32((iwl_band << + IWL_CHAN_CFG_FLAGS_BAND_POS)); } } static void iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct iwl_scan_probe_params_v4 *pp) { int j, idex_s = 0, idex_b = 0; struct cfg80211_scan_6ghz_params *scan_6ghz_params = params->scan_6ghz_params; bool hidden_supported = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN); for (j = 0; j < params->n_ssids && idex_s < SCAN_SHORT_SSID_MAX_SIZE; j++) { if (!params->ssids[j].ssid_len) continue; pp->short_ssid[idex_s] = cpu_to_le32(~crc32_le(~0, params->ssids[j].ssid, params->ssids[j].ssid_len)); if (hidden_supported) { pp->direct_scan[idex_s].id = WLAN_EID_SSID; pp->direct_scan[idex_s].len = params->ssids[j].ssid_len; memcpy(pp->direct_scan[idex_s].ssid, params->ssids[j].ssid, params->ssids[j].ssid_len); } idex_s++; } /* * Populate the arrays of the short SSIDs and the BSSIDs using the 6GHz * collocated parameters. This might not be optimal, as this processing * does not (yet) correspond to the actual channels, so it is possible * that some entries would be left out. * * TODO: improve this logic. */ for (j = 0; j < params->n_6ghz_params; j++) { int k; /* First, try to place the short SSID */ if (scan_6ghz_params[j].short_ssid_valid) { for (k = 0; k < idex_s; k++) { if (pp->short_ssid[k] == cpu_to_le32(scan_6ghz_params[j].short_ssid)) break; } if (k == idex_s && idex_s < SCAN_SHORT_SSID_MAX_SIZE) { pp->short_ssid[idex_s++] = cpu_to_le32(scan_6ghz_params[j].short_ssid); } } /* try to place BSSID for the same entry */ for (k = 0; k < idex_b; k++) { if (!memcmp(&pp->bssid_array[k], scan_6ghz_params[j].bssid, ETH_ALEN)) break; } if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) { memcpy(&pp->bssid_array[idex_b++], scan_6ghz_params[j].bssid, ETH_ALEN); } } pp->short_ssid_num = idex_s; pp->bssid_num = idex_b; } -/* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v6 */ +/* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v7 */ static u32 -iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm *mvm, +iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, u32 n_channels, struct iwl_scan_probe_params_v4 *pp, - struct iwl_scan_channel_params_v6 *cp, - enum nl80211_iftype vif_type) + struct iwl_scan_channel_params_v7 *cp, + enum nl80211_iftype vif_type, + u32 version) { int i; struct cfg80211_scan_6ghz_params *scan_6ghz_params = params->scan_6ghz_params; u32 ch_cnt; for (i = 0, ch_cnt = 0; i < params->n_channels; i++) { struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[ch_cnt]; u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0; u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries; bool force_passive, found = false, allow_passive = true, unsolicited_probe_on_chan = false, psc_no_listen = false; + s8 psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED; /* * Avoid performing passive scan on non PSC channels unless the * scan is specifically a passive scan, i.e., no SSIDs * configured in the scan command. */ if (!cfg80211_channel_is_psc(params->channels[i]) && !params->n_6ghz_params && params->n_ssids) continue; cfg->v1.channel_num = params->channels[i]->hw_value; - cfg->v2.band = 2; - cfg->v2.iter_count = 1; - cfg->v2.iter_interval = 0; + if (version < 17) + cfg->v2.band = PHY_BAND_6; + else + cfg->flags |= cpu_to_le32(PHY_BAND_6 << + IWL_CHAN_CFG_FLAGS_BAND_POS); + + cfg->v5.iter_count = 1; + cfg->v5.iter_interval = 0; /* * The optimize the scan time, i.e., reduce the scan dwell time * on each channel, the below logic tries to set 3 direct BSSID * probe requests for each broadcast probe request with a short * SSID. * TODO: improve this logic */ n_used_bssid_entries = 3; for (j = 0; j < params->n_6ghz_params; j++) { + s8 tmp_psd_20; + if (!(scan_6ghz_params[j].channel_idx == i)) continue; + /* Use the highest PSD value allowed as advertised by + * APs for this channel + */ + tmp_psd_20 = scan_6ghz_params[j].psd_20; + if (tmp_psd_20 != + IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED && + (psd_20 == + IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED || + psd_20 < tmp_psd_20)) + psd_20 = tmp_psd_20; + found = false; unsolicited_probe_on_chan |= scan_6ghz_params[j].unsolicited_probe; psc_no_listen |= scan_6ghz_params[j].psc_no_listen; for (k = 0; k < pp->short_ssid_num; k++) { if (!scan_6ghz_params[j].unsolicited_probe && le32_to_cpu(pp->short_ssid[k]) == scan_6ghz_params[j].short_ssid) { /* Relevant short SSID bit set */ if (s_ssid_bitmap & BIT(k)) { found = true; break; } /* * Use short SSID only to create a new * iteration during channel dwell or in * case that the short SSID has a * matching SSID, i.e., scan for hidden * APs. */ if (n_used_bssid_entries >= 3) { s_ssid_bitmap |= BIT(k); s_max++; n_used_bssid_entries -= 3; found = true; break; } else if (pp->direct_scan[k].len) { s_ssid_bitmap |= BIT(k); s_max++; found = true; allow_passive = false; break; } } } if (found) continue; for (k = 0; k < pp->bssid_num; k++) { if (!memcmp(&pp->bssid_array[k], scan_6ghz_params[j].bssid, ETH_ALEN)) { if (!(bssid_bitmap & BIT(k))) { bssid_bitmap |= BIT(k); b_max++; n_used_bssid_entries++; } break; } } } if (cfg80211_channel_is_psc(params->channels[i]) && psc_no_listen) flags |= IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN; if (unsolicited_probe_on_chan) flags |= IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES; /* * In the following cases apply passive scan: * 1. Non fragmented scan: * - PSC channel with NO_LISTEN_FLAG on should be treated * like non PSC channel * - Non PSC channel with more than 3 short SSIDs or more * than 9 BSSIDs. * - Non PSC Channel with unsolicited probe response and * more than 2 short SSIDs or more than 6 BSSIDs. * - PSC channel with more than 2 short SSIDs or more than * 6 BSSIDs. * 3. Fragmented scan: * - PSC channel with more than 1 SSID or 3 BSSIDs. * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs. * - Non PSC channel with unsolicited probe response and * more than 1 SSID or more than 3 BSSIDs. */ if (!iwl_mvm_is_scan_fragmented(params->type)) { if (!cfg80211_channel_is_psc(params->channels[i]) || flags & IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN) { force_passive = (s_max > 3 || b_max > 9); force_passive |= (unsolicited_probe_on_chan && (s_max > 2 || b_max > 6)); } else { force_passive = (s_max > 2 || b_max > 6); } } else if (cfg80211_channel_is_psc(params->channels[i])) { force_passive = (s_max > 1 || b_max > 3); } else { force_passive = (s_max > 2 || b_max > 6); force_passive |= (unsolicited_probe_on_chan && (s_max > 1 || b_max > 3)); } if ((allow_passive && force_passive) || (!(bssid_bitmap | s_ssid_bitmap) && !cfg80211_channel_is_psc(params->channels[i]))) flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE; else flags |= bssid_bitmap | (s_ssid_bitmap << 16); cfg->flags |= cpu_to_le32(flags); + if (version >= 17) + cfg->v5.psd_20 = psd_20; + ch_cnt++; } if (params->n_channels > ch_cnt) IWL_DEBUG_SCAN(mvm, "6GHz: reducing number channels: (%u->%u)\n", params->n_channels, ch_cnt); return ch_cnt; } static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif) { u8 flags = 0; flags |= IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER; if (iwl_mvm_scan_use_ebs(mvm, vif)) flags |= IWL_SCAN_CHANNEL_FLAG_EBS | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; /* set fragmented ebs for fragmented scan on HB channels */ if ((!iwl_mvm_is_cdb_supported(mvm) && iwl_mvm_is_scan_fragmented(params->type)) || (iwl_mvm_is_cdb_supported(mvm) && iwl_mvm_is_scan_fragmented(params->hb_type))) flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG; /* * force EBS in case the scan is a fragmented and there is a need to take P2P * GO operation into consideration during scan operation. */ if ((!iwl_mvm_is_cdb_supported(mvm) && iwl_mvm_is_scan_fragmented(params->type) && params->respect_p2p_go) || (iwl_mvm_is_cdb_supported(mvm) && iwl_mvm_is_scan_fragmented(params->hb_type) && params->respect_p2p_go_hb)) { IWL_DEBUG_SCAN(mvm, "Respect P2P GO. Force EBS\n"); flags |= IWL_SCAN_CHANNEL_FLAG_FORCE_EBS; } return flags; } static void iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif) { struct ieee80211_supported_band *sband = &mvm->nvm_data->bands[NL80211_BAND_6GHZ]; u32 n_disabled, i; params->enable_6ghz_passive = false; if (params->scan_6ghz) return; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN)) { IWL_DEBUG_SCAN(mvm, "6GHz passive scan: Not supported by FW\n"); return; } /* 6GHz passive scan allowed only on station interface */ if (vif->type != NL80211_IFTYPE_STATION) { IWL_DEBUG_SCAN(mvm, "6GHz passive scan: not station interface\n"); return; } /* * 6GHz passive scan is allowed in a defined time interval following HW * reset or resume flow, or while not associated and a large interval * has passed since the last 6GHz passive scan. */ - if ((vif->bss_conf.assoc || + if ((vif->cfg.assoc || time_after(mvm->last_6ghz_passive_scan_jiffies + (IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) && (time_before(mvm->last_reset_or_resume_time_jiffies + (IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ), jiffies))) { IWL_DEBUG_SCAN(mvm, "6GHz passive scan: %s\n", - vif->bss_conf.assoc ? "associated" : + vif->cfg.assoc ? "associated" : "timeout did not expire"); return; } /* not enough channels in the regular scan request */ if (params->n_channels < IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS) { IWL_DEBUG_SCAN(mvm, "6GHz passive scan: not enough channels\n"); return; } for (i = 0; i < params->n_ssids; i++) { if (!params->ssids[i].ssid_len) break; } /* not a wildcard scan, so cannot enable passive 6GHz scan */ if (i == params->n_ssids) { IWL_DEBUG_SCAN(mvm, "6GHz passive scan: no wildcard SSID\n"); return; } if (!sband || !sband->n_channels) { IWL_DEBUG_SCAN(mvm, "6GHz passive scan: no 6GHz channels\n"); return; } for (i = 0, n_disabled = 0; i < sband->n_channels; i++) { if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED)) n_disabled++; } /* * Not all the 6GHz channels are disabled, so no need for 6GHz passive * scan */ if (n_disabled != sband->n_channels) { IWL_DEBUG_SCAN(mvm, "6GHz passive scan: 6GHz channels enabled\n"); return; } /* all conditions to enable 6ghz passive scan are satisfied */ IWL_DEBUG_SCAN(mvm, "6GHz passive scan: can be enabled\n"); params->enable_6ghz_passive = true; } static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif, int type) { u16 flags = 0; /* * If no direct SSIDs are provided perform a passive scan. Otherwise, * if there is a single SSID which is not the broadcast SSID, assume * that the scan is intended for roaming purposes and thus enable Rx on * all chains to improve chances of hearing the beacons/probe responses. */ if (params->n_ssids == 0) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE; else if (params->n_ssids == 1 && params->ssids[0].ssid_len) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS; if (iwl_mvm_is_scan_fragmented(params->type)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1; if (iwl_mvm_is_scan_fragmented(params->hb_type)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2; if (params->pass_all) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL; else flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH; if (!iwl_mvm_is_regular_scan(params)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC; if (params->iter_notif || mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE; if (IWL_MVM_ADWELL_ENABLE) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL; if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE; if ((type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) && params->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN; if (params->enable_6ghz_passive) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN; if (iwl_mvm_is_oce_supported(mvm) && (params->flags & (NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP | NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE | NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME))) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE; return flags; } static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif, int type) { u8 flags = 0; if (iwl_mvm_is_cdb_supported(mvm)) { if (params->respect_p2p_go) flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB; if (params->respect_p2p_go_hb) flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB; } else { if (params->respect_p2p_go) flags = IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB | IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB; } + if (params->scan_6ghz && + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SCAN_DONT_TOGGLE_ANT)) + flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT; + return flags; } static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif) { u16 flags = 0; if (params->n_ssids == 0) flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE; if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT; if (iwl_mvm_is_scan_fragmented(params->type)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED; if (iwl_mvm_is_cdb_supported(mvm) && iwl_mvm_is_scan_fragmented(params->hb_type)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED; if (iwl_mvm_rrm_scan_needed(mvm) && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED; if (params->pass_all) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL; else flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH; if (!iwl_mvm_is_regular_scan(params)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC; if (params->iter_notif) flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvm->scan_iter_notif_enabled) flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; #endif if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED) flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE) flags |= IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL; /* * Extended dwell is relevant only for low band to start with, as it is * being used for social channles only (1, 6, 11), so we can check * only scan type on low band also for CDB. */ if (iwl_mvm_is_regular_scan(params) && vif->type != NL80211_IFTYPE_P2P_DEVICE && !iwl_mvm_is_scan_fragmented(params->type) && !iwl_mvm_is_adaptive_dwell_supported(mvm) && !iwl_mvm_is_oce_supported(mvm)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL; if (iwl_mvm_is_oce_supported(mvm)) { if ((params->flags & NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE; /* Since IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL and * NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION shares * the same bit, we need to make sure that we use this bit here * only when IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL cannot be * used. */ if ((params->flags & NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) && !WARN_ON_ONCE(!iwl_mvm_is_adaptive_dwell_supported(mvm))) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP; if ((params->flags & NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME)) flags |= IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME; } return flags; } static int iwl_mvm_fill_scan_sched_params(struct iwl_mvm_scan_params *params, struct iwl_scan_umac_schedule *schedule, __le16 *delay) { int i; if (WARN_ON(!params->n_scan_plans || params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS)) return -EINVAL; for (i = 0; i < params->n_scan_plans; i++) { struct cfg80211_sched_scan_plan *scan_plan = ¶ms->scan_plans[i]; schedule[i].iter_count = scan_plan->iterations; schedule[i].interval = cpu_to_le16(scan_plan->interval); } /* * If the number of iterations of the last scan plan is set to * zero, it should run infinitely. However, this is not always the case. * For example, when regular scan is requested the driver sets one scan * plan with one iteration. */ if (!schedule[params->n_scan_plans - 1].iter_count) schedule[params->n_scan_plans - 1].iter_count = 0xff; *delay = cpu_to_le16(params->delay); return 0; } static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_scan_params *params, int type, int uid) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; struct iwl_scan_umac_chan_param *chan_param; void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm); void *sec_part = (u8 *)cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels; struct iwl_scan_req_umac_tail_v2 *tail_v2 = (struct iwl_scan_req_umac_tail_v2 *)sec_part; struct iwl_scan_req_umac_tail_v1 *tail_v1; struct iwl_ssid_ie *direct_scan; int ret = 0; u32 ssid_bitmap = 0; u8 channel_flags = 0; u16 gen_flags; struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif); chan_param = iwl_mvm_get_scan_req_umac_channel(mvm); iwl_mvm_scan_umac_dwell(mvm, cmd, params); mvm->scan_uid_status[uid] = type; cmd->uid = cpu_to_le32(uid); gen_flags = iwl_mvm_scan_umac_flags(mvm, params, vif); cmd->general_flags = cpu_to_le16(gen_flags); if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) { if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED) cmd->v8.num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED) cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; cmd->v8.general_flags2 = IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER; } cmd->scan_start_mac_id = scan_vif->id; if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); if (iwl_mvm_scan_use_ebs(mvm, vif)) { channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; /* set fragmented ebs for fragmented scan on HB channels */ if (iwl_mvm_is_frag_ebs_supported(mvm)) { if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED || (!iwl_mvm_is_cdb_supported(mvm) && gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED)) channel_flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG; } } chan_param->flags = channel_flags; chan_param->count = params->n_channels; ret = iwl_mvm_fill_scan_sched_params(params, tail_v2->schedule, &tail_v2->delay); if (ret) { mvm->scan_uid_status[uid] = 0; return ret; } if (iwl_mvm_is_scan_ext_chan_supported(mvm)) { tail_v2->preq = params->preq; direct_scan = tail_v2->direct_scan; } else { tail_v1 = (struct iwl_scan_req_umac_tail_v1 *)sec_part; iwl_mvm_scan_set_legacy_probe_req(&tail_v1->preq, ¶ms->preq); direct_scan = tail_v1->direct_scan; } iwl_scan_build_ssids(params, direct_scan, &ssid_bitmap); iwl_mvm_umac_scan_cfg_channels(mvm, params->channels, params->n_channels, ssid_bitmap, cmd_data); return 0; } static void -iwl_mvm_scan_umac_fill_general_p_v11(struct iwl_mvm *mvm, +iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif, struct iwl_scan_general_params_v11 *gp, - u16 gen_flags, u8 gen_flags2) + u16 gen_flags, u8 gen_flags2, + u32 version) { struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif); iwl_mvm_scan_umac_dwell_v11(mvm, gp, params); - IWL_DEBUG_SCAN(mvm, "Gerenal: flags=0x%x, flags2=0x%x\n", + IWL_DEBUG_SCAN(mvm, "General: flags=0x%x, flags2=0x%x\n", gen_flags, gen_flags2); gp->flags = cpu_to_le16(gen_flags); gp->flags2 = gen_flags2; if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1) gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2) gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; - gp->scan_start_mac_id = scan_vif->id; + if (version < 12) { + gp->scan_start_mac_or_link_id = scan_vif->id; + } else { + struct iwl_mvm_vif_link_info *link_info; + u8 link_id = 0; + + /* Use one of the active link (if any). In the future it would + * be possible that the link ID would be part of the scan + * request coming from upper layers so we would need to use it. + */ + if (vif->active_links) + link_id = ffs(vif->active_links) - 1; + + link_info = scan_vif->link[link_id]; + if (!WARN_ON(!link_info)) + gp->scan_start_mac_or_link_id = link_info->fw_link_id; + } } static void iwl_mvm_scan_umac_fill_probe_p_v3(struct iwl_mvm_scan_params *params, struct iwl_scan_probe_params_v3 *pp) { pp->preq = params->preq; pp->ssid_num = params->n_ssids; iwl_scan_build_ssids(params, pp->direct_scan, NULL); } static void iwl_mvm_scan_umac_fill_probe_p_v4(struct iwl_mvm_scan_params *params, struct iwl_scan_probe_params_v4 *pp, u32 *bitmap_ssid) { pp->preq = params->preq; iwl_scan_build_ssids(params, pp->direct_scan, bitmap_ssid); } static void iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif, struct iwl_scan_channel_params_v4 *cp, u32 channel_cfg_flags) { cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif); cp->count = params->n_channels; cp->num_of_aps_override = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY; iwl_mvm_umac_scan_cfg_channels_v4(mvm, params->channels, cp, params->n_channels, channel_cfg_flags, vif->type); } static void -iwl_mvm_scan_umac_fill_ch_p_v6(struct iwl_mvm *mvm, +iwl_mvm_scan_umac_fill_ch_p_v7(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif, - struct iwl_scan_channel_params_v6 *cp, - u32 channel_cfg_flags) + struct iwl_scan_channel_params_v7 *cp, + u32 channel_cfg_flags, + u32 version) { cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif); cp->count = params->n_channels; cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY; cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS; - iwl_mvm_umac_scan_cfg_channels_v6(mvm, params->channels, cp, + iwl_mvm_umac_scan_cfg_channels_v7(mvm, params->channels, cp, params->n_channels, channel_cfg_flags, - vif->type); + vif->type, version); if (params->enable_6ghz_passive) { struct ieee80211_supported_band *sband = &mvm->nvm_data->bands[NL80211_BAND_6GHZ]; u32 i; for (i = 0; i < sband->n_channels; i++) { struct ieee80211_channel *channel = &sband->channels[i]; struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[cp->count]; if (!cfg80211_channel_is_psc(channel)) continue; - cfg->flags = 0; - cfg->v2.channel_num = channel->hw_value; - cfg->v2.band = PHY_BAND_6; - cfg->v2.iter_count = 1; - cfg->v2.iter_interval = 0; + cfg->v5.channel_num = channel->hw_value; + cfg->v5.iter_count = 1; + cfg->v5.iter_interval = 0; + + if (version < 17) { + cfg->flags = 0; + cfg->v2.band = PHY_BAND_6; + } else { + cfg->flags = cpu_to_le32(PHY_BAND_6 << + IWL_CHAN_CFG_FLAGS_BAND_POS); + cfg->v5.psd_20 = + IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED; + } cp->count++; } } } static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_scan_params *params, int type, int uid) { struct iwl_scan_req_umac_v12 *cmd = mvm->scan_cmd; struct iwl_scan_req_params_v12 *scan_p = &cmd->scan_params; int ret; u16 gen_flags; mvm->scan_uid_status[uid] = type; cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params)); cmd->uid = cpu_to_le32(uid); gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type); - iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif, + iwl_mvm_scan_umac_fill_general_p_v12(mvm, params, vif, &scan_p->general_params, - gen_flags, 0); + gen_flags, 0, 12); ret = iwl_mvm_fill_scan_sched_params(params, scan_p->periodic_params.schedule, &scan_p->periodic_params.delay); if (ret) return ret; iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params); iwl_mvm_scan_umac_fill_ch_p_v4(mvm, params, vif, &scan_p->channel_params, 0); return 0; } static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_scan_params *params, int type, int uid, u32 version) { - struct iwl_scan_req_umac_v15 *cmd = mvm->scan_cmd; - struct iwl_scan_req_params_v15 *scan_p = &cmd->scan_params; - struct iwl_scan_channel_params_v6 *cp = &scan_p->channel_params; + struct iwl_scan_req_umac_v17 *cmd = mvm->scan_cmd; + struct iwl_scan_req_params_v17 *scan_p = &cmd->scan_params; + struct iwl_scan_channel_params_v7 *cp = &scan_p->channel_params; struct iwl_scan_probe_params_v4 *pb = &scan_p->probe_params; int ret; u16 gen_flags; u8 gen_flags2; u32 bitmap_ssid = 0; mvm->scan_uid_status[uid] = type; cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params)); cmd->uid = cpu_to_le32(uid); gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type); if (version >= 15) gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type); else gen_flags2 = 0; - iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif, + iwl_mvm_scan_umac_fill_general_p_v12(mvm, params, vif, &scan_p->general_params, - gen_flags, gen_flags2); + gen_flags, gen_flags2, version); ret = iwl_mvm_fill_scan_sched_params(params, scan_p->periodic_params.schedule, &scan_p->periodic_params.delay); if (ret) return ret; if (!params->scan_6ghz) { - iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params, - &bitmap_ssid); - iwl_mvm_scan_umac_fill_ch_p_v6(mvm, params, vif, - &scan_p->channel_params, bitmap_ssid); - + iwl_mvm_scan_umac_fill_probe_p_v4(params, + &scan_p->probe_params, + &bitmap_ssid); + iwl_mvm_scan_umac_fill_ch_p_v7(mvm, params, vif, + &scan_p->channel_params, + bitmap_ssid, + version); return 0; } else { pb->preq = params->preq; } cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif); cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY; cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS; iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb); - cp->count = iwl_mvm_umac_scan_cfg_channels_v6_6g(mvm, params, + cp->count = iwl_mvm_umac_scan_cfg_channels_v7_6g(mvm, params, params->n_channels, - pb, cp, vif->type); + pb, cp, vif->type, + version); if (!cp->count) { mvm->scan_uid_status[uid] = 0; return -EINVAL; } if (!params->n_ssids || (params->n_ssids == 1 && !params->ssids[0].ssid_len)) cp->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER; return 0; } static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_scan_params *params, int type, int uid) { return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 14); } static int iwl_mvm_scan_umac_v15(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_scan_params *params, int type, int uid) { return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 15); } +static int iwl_mvm_scan_umac_v16(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_scan_params *params, int type, + int uid) +{ + return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 16); +} + +static int iwl_mvm_scan_umac_v17(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_scan_params *params, int type, + int uid) +{ + return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 17); +} + static int iwl_mvm_num_scans(struct iwl_mvm *mvm) { return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK); } static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) { bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); /* This looks a bit arbitrary, but the idea is that if we run * out of possible simultaneous scans and the userspace is * trying to run a scan type that is already running, we * return -EBUSY. But if the userspace wants to start a * different type of scan, we stop the opposite type to make * space for the new request. The reason is backwards * compatibility with old wpa_supplicant that wouldn't stop a * scheduled scan before starting a normal scan. */ /* FW supports only a single periodic scan */ if ((type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) && mvm->scan_status & (IWL_MVM_SCAN_SCHED | IWL_MVM_SCAN_NETDETECT)) return -EBUSY; if (iwl_mvm_num_scans(mvm) < mvm->max_scans) return 0; /* Use a switch, even though this is a bitmask, so that more * than one bits set will fall in default and we will warn. */ switch (type) { case IWL_MVM_SCAN_REGULAR: if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK) return -EBUSY; return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); case IWL_MVM_SCAN_SCHED: if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) return -EBUSY; return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); case IWL_MVM_SCAN_NETDETECT: /* For non-unified images, there's no need to stop * anything for net-detect since the firmware is * restarted anyway. This way, any sched scans that * were running will be restarted when we resume. */ if (!unified_image) return 0; /* If this is a unified image and we ran out of scans, * we need to stop something. Prefer stopping regular * scans, because the results are useless at this * point, and we should be able to keep running * another scheduled scan while suspended. */ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK) return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); /* Something is wrong if no scan was running but we * ran out of scans. */ fallthrough; default: WARN_ON(1); break; } return -EIO; } #define SCAN_TIMEOUT 30000 void iwl_mvm_scan_timeout_wk(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm, scan_timeout_dwork); IWL_ERR(mvm, "regular scan timed out\n"); iwl_force_nmi(mvm->trans); } static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif) { if (iwl_mvm_is_cdb_supported(mvm)) { params->type = iwl_mvm_get_scan_type_band(mvm, vif, NL80211_BAND_2GHZ); params->hb_type = iwl_mvm_get_scan_type_band(mvm, vif, NL80211_BAND_5GHZ); } else { params->type = iwl_mvm_get_scan_type(mvm, vif); } } struct iwl_scan_umac_handler { u8 version; int (*handler)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_scan_params *params, int type, int uid); }; #define IWL_SCAN_UMAC_HANDLER(_ver) { \ .version = _ver, \ .handler = iwl_mvm_scan_umac_v##_ver, \ } static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = { /* set the newest version first to shorten the list traverse time */ + IWL_SCAN_UMAC_HANDLER(17), + IWL_SCAN_UMAC_HANDLER(16), IWL_SCAN_UMAC_HANDLER(15), IWL_SCAN_UMAC_HANDLER(14), IWL_SCAN_UMAC_HANDLER(12), }; +static void iwl_mvm_mei_scan_work(struct work_struct *wk) +{ + struct iwl_mei_scan_filter *scan_filter = + container_of(wk, struct iwl_mei_scan_filter, scan_work); + struct iwl_mvm *mvm = + container_of(scan_filter, struct iwl_mvm, mei_scan_filter); + struct iwl_mvm_csme_conn_info *info; + struct sk_buff *skb; + u8 bssid[ETH_ALEN]; + + mutex_lock(&mvm->mutex); + info = iwl_mvm_get_csme_conn_info(mvm); + memcpy(bssid, info->conn_info.bssid, ETH_ALEN); + mutex_unlock(&mvm->mutex); + + while ((skb = skb_dequeue(&scan_filter->scan_res))) { + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + if (!memcmp(mgmt->bssid, bssid, ETH_ALEN)) + ieee80211_rx_irqsafe(mvm->hw, skb); + else + kfree_skb(skb); + } +} + +void iwl_mvm_mei_scan_filter_init(struct iwl_mei_scan_filter *mei_scan_filter) +{ + skb_queue_head_init(&mei_scan_filter->scan_res); + INIT_WORK(&mei_scan_filter->scan_work, iwl_mvm_mei_scan_work); +} + +/* In case CSME is connected and has link protection set, this function will + * override the scan request to scan only the associated channel and only for + * the associated SSID. + */ +static void iwl_mvm_mei_limited_scan(struct iwl_mvm *mvm, + struct iwl_mvm_scan_params *params) +{ + struct iwl_mvm_csme_conn_info *info = iwl_mvm_get_csme_conn_info(mvm); + struct iwl_mei_conn_info *conn_info; + struct ieee80211_channel *chan; + int scan_iters, i; + + if (!info) { + IWL_DEBUG_SCAN(mvm, "mei_limited_scan: no connection info\n"); + return; + } + + conn_info = &info->conn_info; + if (!info->conn_info.lp_state || !info->conn_info.ssid_len) + return; + + if (!params->n_channels || !params->n_ssids) + return; + + mvm->mei_scan_filter.is_mei_limited_scan = true; + + chan = ieee80211_get_channel(mvm->hw->wiphy, + ieee80211_channel_to_frequency(conn_info->channel, + conn_info->band)); + if (!chan) { + IWL_DEBUG_SCAN(mvm, + "Failed to get CSME channel (chan=%u band=%u)\n", + conn_info->channel, conn_info->band); + return; + } + + /* The mei filtered scan must find the AP, otherwise CSME will + * take the NIC ownership. Add several iterations on the channel to + * make the scan more robust. + */ + scan_iters = min(IWL_MEI_SCAN_NUM_ITER, params->n_channels); + params->n_channels = scan_iters; + for (i = 0; i < scan_iters; i++) + params->channels[i] = chan; + + IWL_DEBUG_SCAN(mvm, "Mei scan: num iterations=%u\n", scan_iters); + + params->n_ssids = 1; + params->ssids[0].ssid_len = conn_info->ssid_len; + memcpy(params->ssids[0].ssid, conn_info->ssid, conn_info->ssid_len); +} + static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_host_cmd *hcmd, struct iwl_mvm_scan_params *params, int type) { int uid, i, err; u8 scan_ver; lockdep_assert_held(&mvm->mutex); - memset(mvm->scan_cmd, 0, ksize(mvm->scan_cmd)); + memset(mvm->scan_cmd, 0, mvm->scan_cmd_size); + + iwl_mvm_mei_limited_scan(mvm, params); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { hcmd->id = SCAN_OFFLOAD_REQUEST_CMD; return iwl_mvm_scan_lmac(mvm, vif, params); } uid = iwl_mvm_scan_uid_by_status(mvm, 0); if (uid < 0) return uid; hcmd->id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_REQ_UMAC); scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, IWL_FW_CMD_VER_UNKNOWN); for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) { const struct iwl_scan_umac_handler *ver_handler = &iwl_scan_umac_handlers[i]; if (ver_handler->version != scan_ver) continue; return ver_handler->handler(mvm, vif, params, type, uid); } err = iwl_mvm_scan_umac(mvm, vif, params, type, uid); if (err) return err; return uid; } struct iwl_mvm_scan_respect_p2p_go_iter_data { struct ieee80211_vif *current_vif; bool p2p_go; enum nl80211_band band; }; static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_scan_respect_p2p_go_iter_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); /* exclude the given vif */ if (vif == data->current_vif) return; - if (vif->type == NL80211_IFTYPE_AP && vif->p2p && - mvmvif->phy_ctxt->id < NUM_PHY_CTX && - (data->band == NUM_NL80211_BANDS || - mvmvif->phy_ctxt->channel->band == data->band)) - data->p2p_go = true; + if (vif->type == NL80211_IFTYPE_AP && vif->p2p) { + u32 link_id; + + for (link_id = 0; + link_id < ARRAY_SIZE(mvmvif->link); + link_id++) { + struct iwl_mvm_vif_link_info *link = + mvmvif->link[link_id]; + + if (link && link->phy_ctxt->id < NUM_PHY_CTX && + (data->band == NUM_NL80211_BANDS || + link->phy_ctxt->channel->band == data->band)) { + data->p2p_go = true; + break; + } + } + } } static bool _iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool low_latency, enum nl80211_band band) { struct iwl_mvm_scan_respect_p2p_go_iter_data data = { .current_vif = vif, .p2p_go = false, .band = band, }; if (!low_latency) return false; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_scan_respect_p2p_go_iter, &data); return data.p2p_go; } static bool iwl_mvm_get_respect_p2p_go_band(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum nl80211_band band) { bool low_latency = iwl_mvm_low_latency_band(mvm, band); return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency, band); } static bool iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { bool low_latency = iwl_mvm_low_latency(mvm); return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency, NUM_NL80211_BANDS); } static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif) { if (iwl_mvm_is_cdb_supported(mvm)) { params->respect_p2p_go = iwl_mvm_get_respect_p2p_go_band(mvm, vif, NL80211_BAND_2GHZ); params->respect_p2p_go_hb = iwl_mvm_get_respect_p2p_go_band(mvm, vif, NL80211_BAND_5GHZ); } else { params->respect_p2p_go = iwl_mvm_get_respect_p2p_go(mvm, vif); } } int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies) { struct iwl_host_cmd hcmd = { .len = { iwl_mvm_scan_size(mvm), }, .data = { mvm->scan_cmd, }, .dataflags = { IWL_HCMD_DFL_NOCOPY, }, }; struct iwl_mvm_scan_params params = {}; int ret, uid; struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 }; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) { IWL_ERR(mvm, "scan while LAR regdomain is not set\n"); return -EBUSY; } ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR); if (ret) return ret; /* we should have failed registration if scan_cmd was NULL */ if (WARN_ON(!mvm->scan_cmd)) return -ENOMEM; if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels)) return -ENOBUFS; params.n_ssids = req->n_ssids; params.flags = req->flags; params.n_channels = req->n_channels; params.delay = 0; params.ssids = req->ssids; params.channels = req->channels; params.mac_addr = req->mac_addr; params.mac_addr_mask = req->mac_addr_mask; params.no_cck = req->no_cck; params.pass_all = true; params.n_match_sets = 0; params.match_sets = NULL; + ether_addr_copy(params.bssid, req->bssid); params.scan_plans = &scan_plan; params.n_scan_plans = 1; params.n_6ghz_params = req->n_6ghz_params; params.scan_6ghz_params = req->scan_6ghz_params; params.scan_6ghz = req->scan_6ghz; iwl_mvm_fill_scan_type(mvm, ¶ms, vif); iwl_mvm_fill_respect_p2p_go(mvm, ¶ms, vif); if (req->duration) params.iter_notif = true; iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); iwl_mvm_scan_6ghz_passive_scan(mvm, ¶ms, vif); uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, IWL_MVM_SCAN_REGULAR); if (uid < 0) return uid; iwl_mvm_pause_tcm(mvm, false); ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) { /* If the scan failed, it usually means that the FW was unable * to allocate the time events. Warn on it, but maybe we * should try to send the command again with different params. */ IWL_ERR(mvm, "Scan failed! ret %d\n", ret); iwl_mvm_resume_tcm(mvm); mvm->scan_uid_status[uid] = 0; return ret; } IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n"); mvm->scan_status |= IWL_MVM_SCAN_REGULAR; mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif); if (params.enable_6ghz_passive) mvm->last_6ghz_passive_scan_jiffies = jiffies; schedule_delayed_work(&mvm->scan_timeout_dwork, msecs_to_jiffies(SCAN_TIMEOUT)); return 0; } int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies, int type) { struct iwl_host_cmd hcmd = { .len = { iwl_mvm_scan_size(mvm), }, .data = { mvm->scan_cmd, }, .dataflags = { IWL_HCMD_DFL_NOCOPY, }, }; struct iwl_mvm_scan_params params = {}; int ret, uid; int i, j; bool non_psc_included = false; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) { IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n"); return -EBUSY; } ret = iwl_mvm_check_running_scans(mvm, type); if (ret) return ret; /* we should have failed registration if scan_cmd was NULL */ if (WARN_ON(!mvm->scan_cmd)) return -ENOMEM; params.n_ssids = req->n_ssids; params.flags = req->flags; params.n_channels = req->n_channels; params.ssids = req->ssids; params.channels = req->channels; params.mac_addr = req->mac_addr; params.mac_addr_mask = req->mac_addr_mask; params.no_cck = false; params.pass_all = iwl_mvm_scan_pass_all(mvm, req); params.n_match_sets = req->n_match_sets; params.match_sets = req->match_sets; + eth_broadcast_addr(params.bssid); if (!req->n_scan_plans) return -EINVAL; params.n_scan_plans = req->n_scan_plans; params.scan_plans = req->scan_plans; iwl_mvm_fill_scan_type(mvm, ¶ms, vif); iwl_mvm_fill_respect_p2p_go(mvm, ¶ms, vif); /* In theory, LMAC scans can handle a 32-bit delay, but since * waiting for over 18 hours to start the scan is a bit silly * and to keep it aligned with UMAC scans (which only support * 16-bit delays), trim it down to 16-bits. */ if (req->delay > U16_MAX) { IWL_DEBUG_SCAN(mvm, "delay value is > 16-bits, set to max possible\n"); params.delay = U16_MAX; } else { params.delay = req->delay; } ret = iwl_mvm_config_sched_scan_profiles(mvm, req); if (ret) return ret; iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); /* for 6 GHZ band only PSC channels need to be added */ for (i = 0; i < params.n_channels; i++) { struct ieee80211_channel *channel = params.channels[i]; if (channel->band == NL80211_BAND_6GHZ && !cfg80211_channel_is_psc(channel)) { non_psc_included = true; break; } } if (non_psc_included) { params.channels = kmemdup(params.channels, sizeof(params.channels[0]) * params.n_channels, GFP_KERNEL); if (!params.channels) return -ENOMEM; for (i = j = 0; i < params.n_channels; i++) { if (params.channels[i]->band == NL80211_BAND_6GHZ && !cfg80211_channel_is_psc(params.channels[i])) continue; params.channels[j++] = params.channels[i]; } params.n_channels = j; } if (non_psc_included && !iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) { kfree(params.channels); return -ENOBUFS; } uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, type); if (non_psc_included) kfree(params.channels); if (uid < 0) return uid; ret = iwl_mvm_send_cmd(mvm, &hcmd); if (!ret) { IWL_DEBUG_SCAN(mvm, "Sched scan request was sent successfully\n"); mvm->scan_status |= type; } else { /* If the scan failed, it usually means that the FW was unable * to allocate the time events. Warn on it, but maybe we * should try to send the command again with different params. */ IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret); mvm->scan_uid_status[uid] = 0; mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } return ret; } void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_umac_scan_complete *notif = (void *)pkt->data; u32 uid = __le32_to_cpu(notif->uid); bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED); + mvm->mei_scan_filter.is_mei_limited_scan = false; + if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status))) return; /* if the scan is already stopping, we don't need to notify mac80211 */ if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) { struct cfg80211_scan_info info = { .aborted = aborted, .scan_start_tsf = mvm->scan_start, }; - memcpy(info.tsf_bssid, mvm->scan_vif->bssid, ETH_ALEN); + memcpy(info.tsf_bssid, mvm->scan_vif->deflink.bssid, ETH_ALEN); ieee80211_scan_completed(mvm->hw, &info); mvm->scan_vif = NULL; cancel_delayed_work(&mvm->scan_timeout_dwork); iwl_mvm_resume_tcm(mvm); } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } mvm->scan_status &= ~mvm->scan_uid_status[uid]; IWL_DEBUG_SCAN(mvm, "Scan completed, uid %u type %u, status %s, EBS status %s\n", uid, mvm->scan_uid_status[uid], notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? "completed" : "aborted", iwl_mvm_ebs_status_str(notif->ebs_status)); IWL_DEBUG_SCAN(mvm, "Last line %d, Last iteration %d, Time from last iteration %d\n", notif->last_schedule, notif->last_iter, __le32_to_cpu(notif->time_from_last_iter)); if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS && notif->ebs_status != IWL_SCAN_EBS_INACTIVE) mvm->last_ebs_successful = false; mvm->scan_uid_status[uid] = 0; } void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data; mvm->scan_start = le64_to_cpu(notif->start_tsf); IWL_DEBUG_SCAN(mvm, "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n", notif->status, notif->scanned_channels); if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) { IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n"); ieee80211_sched_scan_results(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED; } IWL_DEBUG_SCAN(mvm, "UMAC Scan iteration complete: scan started at %llu (TSF)\n", mvm->scan_start); } static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) { struct iwl_umac_scan_abort cmd = {}; int uid, ret; lockdep_assert_held(&mvm->mutex); /* We should always get a valid index here, because we already * checked that this type of scan was running in the generic * code. */ uid = iwl_mvm_scan_uid_by_status(mvm, type); if (WARN_ON_ONCE(uid < 0)) return uid; cmd.uid = cpu_to_le32(uid); IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC), 0, sizeof(cmd), &cmd); if (!ret) mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT; return ret; } static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) { struct iwl_notification_wait wait_scan_done; static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC, SCAN_OFFLOAD_COMPLETE, }; int ret; lockdep_assert_held(&mvm->mutex); iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done, scan_done_notif, ARRAY_SIZE(scan_done_notif), NULL, NULL); IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) ret = iwl_mvm_umac_scan_abort(mvm, type); else ret = iwl_mvm_lmac_scan_abort(mvm); if (ret) { IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type); iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); return ret; } return iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); } -static int iwl_scan_req_umac_get_size(u8 scan_ver) +static size_t iwl_scan_req_umac_get_size(u8 scan_ver) { switch (scan_ver) { case 12: return sizeof(struct iwl_scan_req_umac_v12); case 14: case 15: - return sizeof(struct iwl_scan_req_umac_v15); + case 16: + case 17: + return sizeof(struct iwl_scan_req_umac_v17); } return 0; } -int iwl_mvm_scan_size(struct iwl_mvm *mvm) +size_t iwl_mvm_scan_size(struct iwl_mvm *mvm) { int base_size, tail_size; u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, IWL_FW_CMD_VER_UNKNOWN); base_size = iwl_scan_req_umac_get_size(scan_ver); if (base_size) return base_size; if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) base_size = IWL_SCAN_REQ_UMAC_SIZE_V8; else if (iwl_mvm_is_adaptive_dwell_supported(mvm)) base_size = IWL_SCAN_REQ_UMAC_SIZE_V7; else if (iwl_mvm_cdb_scan_api(mvm)) base_size = IWL_SCAN_REQ_UMAC_SIZE_V6; else base_size = IWL_SCAN_REQ_UMAC_SIZE_V1; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { if (iwl_mvm_is_scan_ext_chan_supported(mvm)) tail_size = sizeof(struct iwl_scan_req_umac_tail_v2); else tail_size = sizeof(struct iwl_scan_req_umac_tail_v1); return base_size + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels + tail_size; } return sizeof(struct iwl_scan_req_lmac) + sizeof(struct iwl_scan_channel_cfg_lmac) * mvm->fw->ucode_capa.n_scan_channels + sizeof(struct iwl_scan_probe_req_v1); } /* * This function is used in nic restart flow, to inform mac80211 about scans * that was aborted by restart flow or by an assert. */ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) { if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { int uid, i; uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR); if (uid >= 0) { struct cfg80211_scan_info info = { .aborted = true, }; cancel_delayed_work(&mvm->scan_timeout_dwork); ieee80211_scan_completed(mvm->hw, &info); mvm->scan_uid_status[uid] = 0; } uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED); if (uid >= 0) { /* Sched scan will be restarted by mac80211 in * restart_hw, so do not report if FW is about to be * restarted. */ if (!mvm->fw_restart) ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; mvm->scan_uid_status[uid] = 0; } uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_STOPPING_REGULAR); if (uid >= 0) mvm->scan_uid_status[uid] = 0; uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_STOPPING_SCHED); if (uid >= 0) mvm->scan_uid_status[uid] = 0; /* We shouldn't have any UIDs still set. Loop over all the * UIDs to make sure there's nothing left there and warn if * any is found. */ for (i = 0; i < mvm->max_scans; i++) { if (WARN_ONCE(mvm->scan_uid_status[i], "UMAC scan UID %d status was not cleaned\n", i)) mvm->scan_uid_status[i] = 0; } } else { if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { struct cfg80211_scan_info info = { .aborted = true, }; cancel_delayed_work(&mvm->scan_timeout_dwork); ieee80211_scan_completed(mvm->hw, &info); } /* Sched scan will be restarted by mac80211 in * restart_hw, so do not report if FW is about to be * restarted. */ if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->fw_restart) { ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } } } int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) { int ret; if (!(mvm->scan_status & type)) return 0; if (iwl_mvm_is_radio_killed(mvm)) { ret = 0; goto out; } ret = iwl_mvm_scan_stop_wait(mvm, type); if (!ret) mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT; out: /* Clear the scan status so the next scan requests will * succeed and mark the scan as stopping, so that the Rx * handler doesn't do anything, as the scan was stopped from * above. */ mvm->scan_status &= ~type; if (type == IWL_MVM_SCAN_REGULAR) { cancel_delayed_work(&mvm->scan_timeout_dwork); if (notify) { struct cfg80211_scan_info info = { .aborted = true, }; ieee80211_scan_completed(mvm->hw, &info); } } else if (notify) { ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } return ret; } diff --git a/sys/contrib/dev/iwlwifi/mvm/sf.c b/sys/contrib/dev/iwlwifi/mvm/sf.c index 693752d8f65b..30d4233595e8 100644 --- a/sys/contrib/dev/iwlwifi/mvm/sf.c +++ b/sys/contrib/dev/iwlwifi/mvm/sf.c @@ -1,279 +1,285 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2013-2014, 2018-2019 Intel Corporation + * Copyright (C) 2013-2014, 2018-2019, 2022-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH */ #include "mvm.h" /* For counting bound interfaces */ struct iwl_mvm_active_iface_iterator_data { struct ieee80211_vif *ignore_vif; - u8 sta_vif_ap_sta_id; + struct ieee80211_sta *sta_vif_ap_sta; enum iwl_sf_state sta_vif_state; u32 num_active_macs; }; /* * Count bound interfaces which are not p2p, besides data->ignore_vif. * data->station_vif will point to one bound vif of type station, if exists. */ static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_active_iface_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (vif == data->ignore_vif || !mvmvif->phy_ctxt || + if (vif == data->ignore_vif || !mvmvif->deflink.phy_ctxt || vif->type == NL80211_IFTYPE_P2P_DEVICE) return; data->num_active_macs++; if (vif->type == NL80211_IFTYPE_STATION) { - data->sta_vif_ap_sta_id = mvmvif->ap_sta_id; - if (vif->bss_conf.assoc) + data->sta_vif_ap_sta = mvmvif->ap_sta; + if (vif->cfg.assoc) data->sta_vif_state = SF_FULL_ON; else data->sta_vif_state = SF_INIT_OFF; } } /* * Aging and idle timeouts for the different possible scenarios * in default configuration */ static const __le32 sf_full_timeout_def[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = { { cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER_DEF), cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER_DEF) }, { cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER_DEF), cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER_DEF) }, { cpu_to_le32(SF_MCAST_AGING_TIMER_DEF), cpu_to_le32(SF_MCAST_IDLE_TIMER_DEF) }, { cpu_to_le32(SF_BA_AGING_TIMER_DEF), cpu_to_le32(SF_BA_IDLE_TIMER_DEF) }, { cpu_to_le32(SF_TX_RE_AGING_TIMER_DEF), cpu_to_le32(SF_TX_RE_IDLE_TIMER_DEF) }, }; /* * Aging and idle timeouts for the different possible scenarios * in single BSS MAC configuration. */ static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = { { cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER), cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER) }, { cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER), cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER) }, { cpu_to_le32(SF_MCAST_AGING_TIMER), cpu_to_le32(SF_MCAST_IDLE_TIMER) }, { cpu_to_le32(SF_BA_AGING_TIMER), cpu_to_le32(SF_BA_IDLE_TIMER) }, { cpu_to_le32(SF_TX_RE_AGING_TIMER), cpu_to_le32(SF_TX_RE_IDLE_TIMER) }, }; static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm, struct iwl_sf_cfg_cmd *sf_cmd, struct ieee80211_sta *sta) { int i, j, watermark; + u8 max_rx_nss = 0; + bool is_legacy = true; + struct ieee80211_link_sta *link_sta; + unsigned int link_id; sf_cmd->watermark[SF_LONG_DELAY_ON] = cpu_to_le32(SF_W_MARK_SCAN); /* * If we are in association flow - check antenna configuration * capabilities of the AP station, and choose the watermark accordingly. */ if (sta) { - if (sta->deflink.ht_cap.ht_supported || - sta->deflink.vht_cap.vht_supported || - sta->deflink.he_cap.has_he) { - switch (sta->deflink.rx_nss) { + /* find the maximal NSS number among all links (if relevant) */ + rcu_read_lock(); + for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) { + link_sta = rcu_dereference(sta->link[link_id]); + if (!link_sta) + continue; + + if (link_sta->ht_cap.ht_supported || + link_sta->vht_cap.vht_supported || + link_sta->eht_cap.has_eht || + link_sta->he_cap.has_he) { + is_legacy = false; + max_rx_nss = max(max_rx_nss, link_sta->rx_nss); + } + } + rcu_read_unlock(); + + if (!is_legacy) { + switch (max_rx_nss) { case 1: watermark = SF_W_MARK_SISO; break; case 2: watermark = SF_W_MARK_MIMO2; break; default: watermark = SF_W_MARK_MIMO3; break; } } else { watermark = SF_W_MARK_LEGACY; } /* default watermark value for unassociated mode. */ } else { watermark = SF_W_MARK_MIMO2; } sf_cmd->watermark[SF_FULL_ON] = cpu_to_le32(watermark); for (i = 0; i < SF_NUM_SCENARIO; i++) { for (j = 0; j < SF_NUM_TIMEOUT_TYPES; j++) { sf_cmd->long_delay_timeouts[i][j] = cpu_to_le32(SF_LONG_DELAY_AGING_TIMER); } } if (sta) { BUILD_BUG_ON(sizeof(sf_full_timeout) != sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES); memcpy(sf_cmd->full_on_timeouts, sf_full_timeout, sizeof(sf_full_timeout)); } else { BUILD_BUG_ON(sizeof(sf_full_timeout_def) != sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES); memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def, sizeof(sf_full_timeout_def)); } - } -static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, +static int iwl_mvm_sf_config(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum iwl_sf_state new_state) { struct iwl_sf_cfg_cmd sf_cmd = { .state = cpu_to_le32(new_state), }; - struct ieee80211_sta *sta; int ret = 0; - if (mvm->cfg->disable_dummy_notification) - sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF); - /* * If an associated AP sta changed its antenna configuration, the state * will remain FULL_ON but SF parameters need to be reconsidered. */ if (new_state != SF_FULL_ON && mvm->sf_state == new_state) return 0; switch (new_state) { case SF_UNINIT: iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); break; case SF_FULL_ON: - if (sta_id == IWL_MVM_INVALID_STA) { + if (!sta) { IWL_ERR(mvm, "No station: Cannot switch SF to FULL_ON\n"); return -EINVAL; } - rcu_read_lock(); - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - if (IS_ERR_OR_NULL(sta)) { - IWL_ERR(mvm, "Invalid station id\n"); - rcu_read_unlock(); - return -EINVAL; - } iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta); - rcu_read_unlock(); break; case SF_INIT_OFF: iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); break; default: WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n", new_state); return -EINVAL; } ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_SF_CFG_CMD, CMD_ASYNC, sizeof(sf_cmd), &sf_cmd); if (!ret) mvm->sf_state = new_state; return ret; } /* * Update Smart fifo: * Count bound interfaces that are not to be removed, ignoring p2p devices, * and set new state accordingly. */ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif, bool remove_vif) { enum iwl_sf_state new_state; - u8 sta_id = IWL_MVM_INVALID_STA; struct iwl_mvm_vif *mvmvif = NULL; struct iwl_mvm_active_iface_iterator_data data = { .ignore_vif = changed_vif, .sta_vif_state = SF_UNINIT, - .sta_vif_ap_sta_id = IWL_MVM_INVALID_STA, }; + struct ieee80211_sta *sta = NULL; /* * Ignore the call if we are in HW Restart flow, or if the handled * vif is a p2p device. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || (changed_vif && changed_vif->type == NL80211_IFTYPE_P2P_DEVICE)) return 0; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bound_iface_iterator, &data); /* If changed_vif exists and is not to be removed, add to the count */ if (changed_vif && !remove_vif) data.num_active_macs++; switch (data.num_active_macs) { case 0: /* If there are no active macs - change state to SF_INIT_OFF */ new_state = SF_INIT_OFF; break; case 1: if (remove_vif) { /* The one active mac left is of type station * and we filled the relevant data during iteration */ new_state = data.sta_vif_state; - sta_id = data.sta_vif_ap_sta_id; + sta = data.sta_vif_ap_sta; } else { if (WARN_ON(!changed_vif)) return -EINVAL; if (changed_vif->type != NL80211_IFTYPE_STATION) { new_state = SF_UNINIT; - } else if (changed_vif->bss_conf.assoc && + } else if (changed_vif->cfg.assoc && changed_vif->bss_conf.dtim_period) { mvmvif = iwl_mvm_vif_from_mac80211(changed_vif); - sta_id = mvmvif->ap_sta_id; + sta = mvmvif->ap_sta; new_state = SF_FULL_ON; } else { new_state = SF_INIT_OFF; } } break; default: /* If there are multiple active macs - change to SF_UNINIT */ new_state = SF_UNINIT; } - return iwl_mvm_sf_config(mvm, sta_id, new_state); + + return iwl_mvm_sf_config(mvm, sta, new_state); } diff --git a/sys/contrib/dev/iwlwifi/mvm/sta.c b/sys/contrib/dev/iwlwifi/mvm/sta.c index 71d9f2a2988a..705b9c9e967b 100644 --- a/sys/contrib/dev/iwlwifi/mvm/sta.c +++ b/sys/contrib/dev/iwlwifi/mvm/sta.c @@ -1,4177 +1,4395 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2015, 2018-2022 Intel Corporation + * Copyright (C) 2012-2015, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #if defined(__FreeBSD__) #include #endif #include "mvm.h" #include "sta.h" #include "rs.h" /* * New version of ADD_STA_sta command added new fields at the end of the * structure, so sending the size of the relevant API's structure is enough to * support both API versions. */ static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) { if (iwl_mvm_has_new_rx_api(mvm) || fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) return sizeof(struct iwl_mvm_add_sta_cmd); else return sizeof(struct iwl_mvm_add_sta_cmd_v7); } -static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, - enum nl80211_iftype iftype) +int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype) { int sta_id; u32 reserved_ids = 0; BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32); WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); lockdep_assert_held(&mvm->mutex); /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */ if (iftype != NL80211_IFTYPE_STATION) reserved_ids = BIT(0); /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) { if (BIT(sta_id) & reserved_ids) continue; if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex))) return sta_id; } return IWL_MVM_INVALID_STA; } +/* Calculate the ampdu density and max size */ +u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta, + struct ieee80211_bss_conf *link_conf, + u32 *_agg_size) +{ + u32 agg_size = 0, mpdu_dens = 0; + + if (WARN_ON(!link_sta)) + return 0; + + /* Note that we always use only legacy & highest supported PPDUs, so + * of Draft P802.11be D.30 Table 10-12a--Fields used for calculating + * the maximum A-MPDU size of various PPDU types in different bands, + * we only need to worry about the highest supported PPDU type here. + */ + + if (link_sta->ht_cap.ht_supported) { + agg_size = link_sta->ht_cap.ampdu_factor; + mpdu_dens = link_sta->ht_cap.ampdu_density; + } + + if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) { + /* overwrite HT values on 6 GHz */ + mpdu_dens = le16_get_bits(link_sta->he_6ghz_capa.capa, + IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); + agg_size = le16_get_bits(link_sta->he_6ghz_capa.capa, + IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); + } else if (link_sta->vht_cap.vht_supported) { + /* if VHT supported overwrite HT value */ + agg_size = u32_get_bits(link_sta->vht_cap.cap, + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK); + } + + /* D6.0 10.12.2 A-MPDU length limit rules + * A STA indicates the maximum length of the A-MPDU preEOF padding + * that it can receive in an HE PPDU in the Maximum A-MPDU Length + * Exponent field in its HT Capabilities, VHT Capabilities, + * and HE 6 GHz Band Capabilities elements (if present) and the + * Maximum AMPDU Length Exponent Extension field in its HE + * Capabilities element + */ + if (link_sta->he_cap.has_he) + agg_size += + u8_get_bits(link_sta->he_cap.he_cap_elem.mac_cap_info[3], + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); + + if (link_sta->eht_cap.has_eht) + agg_size += u8_get_bits(link_sta->eht_cap.eht_cap_elem.mac_cap_info[1], + IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK); + + /* Limit to max A-MPDU supported by FW */ + agg_size = min_t(u32, agg_size, + STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT); + + *_agg_size = agg_size; + return mpdu_dens; +} + +u8 iwl_mvm_get_sta_uapsd_acs(struct ieee80211_sta *sta) +{ + u8 uapsd_acs = 0; + + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) + uapsd_acs |= BIT(AC_BK); + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) + uapsd_acs |= BIT(AC_BE); + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) + uapsd_acs |= BIT(AC_VI); + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) + uapsd_acs |= BIT(AC_VO); + + return uapsd_acs | uapsd_acs << 4; +} + /* send station add/update command to firmware */ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool update, unsigned int flags) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd add_sta_cmd = { - .sta_id = mvm_sta->sta_id, + .sta_id = mvm_sta->deflink.sta_id, .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), .add_modify = update ? 1 : 0, .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | STA_FLG_MIMO_EN_MSK | STA_FLG_RTS_MIMO_PROT), .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), }; int ret; u32 status; u32 agg_size = 0, mpdu_dens = 0; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) add_sta_cmd.station_type = mvm_sta->sta_type; if (!update || (flags & STA_MODIFY_QUEUES)) { memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); if (!iwl_mvm_has_new_tx_api(mvm)) { add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); if (flags & STA_MODIFY_QUEUES) add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; } else { WARN_ON(flags & STA_MODIFY_QUEUES); } } switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_320: case IEEE80211_STA_RX_BW_160: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); fallthrough; case IEEE80211_STA_RX_BW_80: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ); fallthrough; case IEEE80211_STA_RX_BW_40: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); fallthrough; case IEEE80211_STA_RX_BW_20: if (sta->deflink.ht_cap.ht_supported) add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_20MHZ); break; } switch (sta->deflink.rx_nss) { case 1: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); break; case 2: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2); break; case 3 ... 8: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3); break; } - switch (sta->smps_mode) { + switch (sta->deflink.smps_mode) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); break; case IEEE80211_SMPS_STATIC: /* override NSS */ add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK); add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); break; case IEEE80211_SMPS_DYNAMIC: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT); break; case IEEE80211_SMPS_OFF: /* nothing */ break; } - if (sta->deflink.ht_cap.ht_supported) { + if (sta->deflink.ht_cap.ht_supported || + mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) add_sta_cmd.station_flags_msk |= cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | STA_FLG_AGG_MPDU_DENS_MSK); - mpdu_dens = sta->deflink.ht_cap.ampdu_density; - } - - if (mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) { - add_sta_cmd.station_flags_msk |= - cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | - STA_FLG_AGG_MPDU_DENS_MSK); - - mpdu_dens = le16_get_bits(sta->deflink.he_6ghz_capa.capa, - IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); - agg_size = le16_get_bits(sta->deflink.he_6ghz_capa.capa, - IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); - } else if (sta->deflink.vht_cap.vht_supported) { - agg_size = sta->deflink.vht_cap.cap & - IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; - agg_size >>= - IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; - } else if (sta->deflink.ht_cap.ht_supported) { - agg_size = sta->deflink.ht_cap.ampdu_factor; - } - - /* D6.0 10.12.2 A-MPDU length limit rules - * A STA indicates the maximum length of the A-MPDU preEOF padding - * that it can receive in an HE PPDU in the Maximum A-MPDU Length - * Exponent field in its HT Capabilities, VHT Capabilities, - * and HE 6 GHz Band Capabilities elements (if present) and the - * Maximum AMPDU Length Exponent Extension field in its HE - * Capabilities element - */ - if (sta->deflink.he_cap.has_he) - agg_size += u8_get_bits(sta->deflink.he_cap.he_cap_elem.mac_cap_info[3], - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); - - /* Limit to max A-MPDU supported by FW */ - if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT)) - agg_size = (STA_FLG_MAX_AGG_SIZE_4M >> - STA_FLG_MAX_AGG_SIZE_SHIFT); - + mpdu_dens = iwl_mvm_get_sta_ampdu_dens(&sta->deflink, + &mvm_sta->vif->bss_conf, + &agg_size); add_sta_cmd.station_flags |= cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); add_sta_cmd.station_flags |= cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); + if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC) add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); if (sta->wme) { add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; - - if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) - add_sta_cmd.uapsd_acs |= BIT(AC_BK); - if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) - add_sta_cmd.uapsd_acs |= BIT(AC_BE); - if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) - add_sta_cmd.uapsd_acs |= BIT(AC_VI); - if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) - add_sta_cmd.uapsd_acs |= BIT(AC_VO); - add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4; + add_sta_cmd.uapsd_acs = iwl_mvm_get_sta_uapsd_acs(sta); add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128; } status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &add_sta_cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); break; default: ret = -EIO; IWL_ERR(mvm, "ADD_STA failed\n"); break; } return ret; } static void iwl_mvm_rx_agg_session_expired(struct timer_list *t) { struct iwl_mvm_baid_data *data = from_timer(data, t, session_timer); struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr; struct iwl_mvm_baid_data *ba_data; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvm_sta; unsigned long timeout; + unsigned int sta_id; rcu_read_lock(); ba_data = rcu_dereference(*rcu_ptr); if (WARN_ON(!ba_data)) goto unlock; if (!ba_data->timeout) goto unlock; timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2); if (time_is_after_jiffies(timeout)) { mod_timer(&ba_data->session_timer, timeout); goto unlock; } /* Timer expired */ - sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); + sta_id = ffs(ba_data->sta_mask) - 1; /* don't care which one */ + sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[sta_id]); /* * sta should be valid unless the following happens: * The firmware asserts which triggers a reconfig flow, but * the reconfig fails before we set the pointer to sta into * the fw_id_to_mac_id pointer table. Mac80211 can't stop * A-MDPU and hence the timer continues to run. Then, the * timer expires and sta is NULL. */ - if (!sta) + if (IS_ERR_OR_NULL(sta)) goto unlock; mvm_sta = iwl_mvm_sta_from_mac80211(sta); ieee80211_rx_ba_timer_expired(mvm_sta->vif, sta->addr, ba_data->tid); unlock: rcu_read_unlock(); } /* Disable aggregations for a bitmap of TIDs for a given station */ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, unsigned long disable_agg_tids, bool remove_queue) { struct iwl_mvm_add_sta_cmd cmd = {}; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; u32 status; u8 sta_id; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; sta_id = mvm->queue_info[queue].ra_sta_id; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return -EINVAL; } mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmsta->tid_disable_agg |= disable_agg_tids; cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); - cmd.sta_id = mvmsta->sta_id; + cmd.sta_id = mvmsta->deflink.sta_id; cmd.add_modify = STA_MODE_MODIFY; cmd.modify_mask = STA_MODIFY_QUEUES; if (disable_agg_tids) cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; if (remove_queue) cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); rcu_read_unlock(); /* Notify FW of queue removal from the STA queues */ status = ADD_STA_SUCCESS; return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); } static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - u16 *queueptr, u8 tid) + int sta_id, u16 *queueptr, u8 tid) { int queue = *queueptr; struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, }; int ret; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_has_new_tx_api(mvm)) { if (mvm->sta_remove_requires_queue_remove) { u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD); struct iwl_scd_queue_cfg_cmd remove_cmd = { .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE), - .u.remove.queue = cpu_to_le32(queue), + .u.remove.sta_mask = cpu_to_le32(BIT(sta_id)), }; + if (tid == IWL_MAX_TID_COUNT) + tid = IWL_MGMT_TID; + + remove_cmd.u.remove.tid = cpu_to_le32(tid); + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(remove_cmd), &remove_cmd); } else { ret = 0; } iwl_trans_txq_free(mvm->trans, queue); *queueptr = IWL_MVM_INVALID_QUEUE; return ret; } if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) return 0; mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); cmd.action = mvm->queue_info[queue].tid_bitmap ? SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; if (cmd.action == SCD_CFG_DISABLE_QUEUE) mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; IWL_DEBUG_TX_QUEUES(mvm, "Disabling TXQ #%d tids=0x%x\n", queue, mvm->queue_info[queue].tid_bitmap); /* If the queue is still enabled - nothing left to do in this func */ if (cmd.action == SCD_CFG_ENABLE_QUEUE) return 0; cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.tid = mvm->queue_info[queue].txq_tid; /* Make sure queue info is correct even though we overwrite it */ WARN(mvm->queue_info[queue].tid_bitmap, "TXQ #%d info out-of-sync - tids=0x%x\n", queue, mvm->queue_info[queue].tid_bitmap); /* If we are here - the queue is freed and we can zero out these vals */ mvm->queue_info[queue].tid_bitmap = 0; if (sta) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); + spin_lock_bh(&mvm->add_stream_lock); + list_del_init(&mvmtxq->list); + clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + spin_unlock_bh(&mvm->add_stream_lock); } /* Regardless if this is a reserved TXQ for a STA - mark it as false */ mvm->queue_info[queue].reserved = false; iwl_trans_txq_disable(mvm->trans, queue, false); ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); if (ret) IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", queue, ret); return ret; } static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; unsigned long tid_bitmap; unsigned long agg_tids = 0; u8 sta_id; int tid; lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) return -EINVAL; mvmsta = iwl_mvm_sta_from_mac80211(sta); spin_lock_bh(&mvmsta->lock); for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { if (mvmsta->tid_data[tid].state == IWL_AGG_ON) agg_tids |= BIT(tid); } spin_unlock_bh(&mvmsta->lock); return agg_tids; } /* * Remove a queue from a station's resources. * Note that this only marks as free. It DOESN'T delete a BA agreement, and * doesn't disable the queue */ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; unsigned long tid_bitmap; unsigned long disable_agg_tids = 0; u8 sta_id; int tid; lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return 0; } mvmsta = iwl_mvm_sta_from_mac80211(sta); spin_lock_bh(&mvmsta->lock); /* Unmap MAC queues and TIDs from this queue */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); if (mvmsta->tid_data[tid].state == IWL_AGG_ON) disable_agg_tids |= BIT(tid); mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; + spin_lock_bh(&mvm->add_stream_lock); + list_del_init(&mvmtxq->list); + clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + spin_unlock_bh(&mvm->add_stream_lock); } mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ spin_unlock_bh(&mvmsta->lock); rcu_read_unlock(); /* * The TX path may have been using this TXQ_ID from the tid_data, * so make sure it's no longer running so that we can safely reuse * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE * above, but nothing guarantees we've stopped using them. Thus, * without this, we could get to iwl_mvm_disable_txq() and remove * the queue while still sending frames to it. */ synchronize_net(); return disable_agg_tids; } static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, struct ieee80211_sta *old_sta, u8 new_sta_id) { struct iwl_mvm_sta *mvmsta; u8 sta_id, tid; unsigned long disable_agg_tids = 0; bool same_sta; u16 queue_tmp = queue; int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; sta_id = mvm->queue_info[queue].ra_sta_id; tid = mvm->queue_info[queue].txq_tid; same_sta = sta_id == new_sta_id; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); if (WARN_ON(!mvmsta)) return -EINVAL; disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); /* Disable the queue */ if (disable_agg_tids) iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); - ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid); + ret = iwl_mvm_disable_txq(mvm, old_sta, sta_id, &queue_tmp, tid); if (ret) { IWL_ERR(mvm, "Failed to free inactive queue %d (ret=%d)\n", queue, ret); return ret; } /* If TXQ is allocated to another STA, update removal in FW */ if (!same_sta) iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); return 0; } static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, unsigned long tfd_queue_mask, u8 ac) { int queue = 0; u8 ac_to_queue[IEEE80211_NUM_ACS]; int i; /* * This protects us against grabbing a queue that's being reconfigured * by the inactivity checker. */ lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); /* See what ACs the existing queues for this STA have */ for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) { /* Only DATA queues can be shared */ if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) continue; ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; } /* * The queue to share is chosen only from DATA queues as follows (in * descending priority): * 1. An AC_BE queue * 2. Same AC queue * 3. Highest AC queue that is lower than new AC * 4. Any existing AC (there always is at least 1 DATA queue) */ /* Priority 1: An AC_BE queue */ if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_BE]; /* Priority 2: Same AC queue */ else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[ac]; /* Priority 3a: If new AC is VO and VI exists - use VI */ else if (ac == IEEE80211_AC_VO && ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_VI]; /* Priority 3b: No BE so only AC less than the new one is BK */ else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_BK]; /* Priority 4a: No BE nor BK - use VI if exists */ else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_VI]; /* Priority 4b: No BE, BK nor VI - use VO if exists */ else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_VO]; /* Make sure queue found (or not) is legal */ if (!iwl_mvm_is_dqa_data_queue(mvm, queue) && !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) && (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) { IWL_ERR(mvm, "No DATA queues available to share\n"); return -ENOSPC; } return queue; } /* Re-configure the SCD for a queue that has already been configured */ static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, int tid, int frame_limit, u16 ssn) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_ENABLE_QUEUE, .window = frame_limit, .sta_id = sta_id, .ssn = cpu_to_le16(ssn), .tx_fifo = fifo, .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE), .tid = tid, }; int ret; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; if (WARN(mvm->queue_info[queue].tid_bitmap == 0, "Trying to reconfig unallocated queue %d\n", queue)) return -ENXIO; IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n", queue, fifo, ret); return ret; } /* * If a given queue has a higher AC than the TID stream that is being compared * to, the queue needs to be redirected to the lower AC. This function does that * in such a case, otherwise - if no redirection required - it does nothing, * unless the %force param is true. */ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, int ac, int ssn, unsigned int wdg_timeout, bool force, struct iwl_mvm_txq *txq) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, }; bool shared_queue; int ret; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; /* * If the AC is lower than current one - FIFO needs to be redirected to * the lowest one of the streams in the queue. Check if this is needed * here. * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with * value 3 and VO with value 0, so to check if ac X is lower than ac Y * we need to check if the numerical value of X is LARGER than of Y. */ if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { IWL_DEBUG_TX_QUEUES(mvm, "No redirection needed on TXQ #%d\n", queue); return 0; } cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; cmd.tid = mvm->queue_info[queue].txq_tid; shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", queue, iwl_mvm_ac_to_tx_fifo[ac]); /* Stop the queue and wait for it to empty */ - txq->stopped = true; + set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state); ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); if (ret) { IWL_ERR(mvm, "Error draining queue %d before reconfig\n", queue); ret = -EIO; goto out; } /* Before redirecting the queue we need to de-activate it */ iwl_trans_txq_disable(mvm->trans, queue, false); ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); if (ret) IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, ret); /* Make sure the SCD wrptr is correctly set before reconfiguring */ iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); /* Update the TID "owner" of the queue */ mvm->queue_info[queue].txq_tid = tid; /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ /* Redirect to lower AC */ iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn); /* Update AC marking of the queue */ mvm->queue_info[queue].mac80211_ac = ac; /* * Mark queue as shared in transport if shared * Note this has to be done after queue enablement because enablement * can also set this value, and there is no indication there to shared * queues */ if (shared_queue) iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); out: /* Continue using the queue */ - txq->stopped = false; + clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state); return ret; } static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq) { int i; lockdep_assert_held(&mvm->mutex); if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues, "max queue %d >= num_of_queues (%d)", maxq, mvm->trans->trans_cfg->base_params->num_of_queues)) maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1; /* This should not be hit with new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -ENOSPC; /* Start by looking for a free queue */ for (i = minq; i <= maxq; i++) if (mvm->queue_info[i].tid_bitmap == 0 && mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) return i; return -ENOSPC; } -static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, - u8 sta_id, u8 tid, unsigned int timeout) +static int iwl_mvm_get_queue_size(struct ieee80211_sta *sta) +{ + int max_size = IWL_DEFAULT_QUEUE_SIZE; + unsigned int link_id; + + /* this queue isn't used for traffic (cab_queue) */ + if (!sta) + return IWL_MGMT_QUEUE_SIZE; + + rcu_read_lock(); + + for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) { + struct ieee80211_link_sta *link = + rcu_dereference(sta->link[link_id]); + + if (!link) + continue; + + /* support for 1k ba size */ + if (link->eht_cap.has_eht && + max_size < IWL_DEFAULT_QUEUE_SIZE_EHT) + max_size = IWL_DEFAULT_QUEUE_SIZE_EHT; + + /* support for 256 ba size */ + if (link->he_cap.has_he && + max_size < IWL_DEFAULT_QUEUE_SIZE_HE) + max_size = IWL_DEFAULT_QUEUE_SIZE_HE; + } + + rcu_read_unlock(); + return max_size; +} + +int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + u8 sta_id, u8 tid, unsigned int timeout) { int queue, size; + u32 sta_mask = 0; if (tid == IWL_MAX_TID_COUNT) { tid = IWL_MGMT_TID; size = max_t(u32, IWL_MGMT_QUEUE_SIZE, mvm->trans->cfg->min_txq_size); } else { - struct ieee80211_sta *sta; - - rcu_read_lock(); - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - - /* this queue isn't used for traffic (cab_queue) */ - if (IS_ERR_OR_NULL(sta)) { - size = IWL_MGMT_QUEUE_SIZE; - } else if (sta->deflink.he_cap.has_he) { - /* support for 256 ba size */ - size = IWL_DEFAULT_QUEUE_SIZE_HE; - } else { - size = IWL_DEFAULT_QUEUE_SIZE; - } - - rcu_read_unlock(); + size = iwl_mvm_get_queue_size(sta); } /* take the min with bc tbl entries allowed */ size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16)); /* size needs to be power of 2 values for calculating read/write pointers */ size = rounddown_pow_of_two(size); + if (sta) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + unsigned int link_id; + + for (link_id = 0; + link_id < ARRAY_SIZE(mvmsta->link); + link_id++) { + struct iwl_mvm_link_sta *link = + rcu_dereference_protected(mvmsta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + if (!link) + continue; + + sta_mask |= BIT(link->sta_id); + } + } else { + sta_mask |= BIT(sta_id); + } + + if (!sta_mask) + return -EINVAL; + do { - queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id), + queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask, tid, size, timeout); if (queue < 0) IWL_DEBUG_TX_QUEUES(mvm, - "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n", - size, sta_id, tid, queue); + "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %d\n", + size, sta_mask, tid, queue); size /= 2; } while (queue < 0 && size >= 16); if (queue < 0) return queue; - IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", - queue, sta_id, tid); + IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta mask 0x%x tid %d\n", + queue, sta_mask, tid); return queue; } static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 ac, int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); int queue = -1; lockdep_assert_held(&mvm->mutex); IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue for sta %d on tid %d\n", - mvmsta->sta_id, tid); - queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout); + mvmsta->deflink.sta_id, tid); + queue = iwl_mvm_tvqm_enable_txq(mvm, sta, mvmsta->deflink.sta_id, + tid, wdg_timeout); if (queue < 0) return queue; mvmtxq->txq_id = queue; mvm->tvqm_info[queue].txq_tid = tid; - mvm->tvqm_info[queue].sta_id = mvmsta->sta_id; + mvm->tvqm_info[queue].sta_id = mvmsta->deflink.sta_id; IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); spin_lock_bh(&mvmsta->lock); mvmsta->tid_data[tid].txq_id = queue; spin_unlock_bh(&mvmsta->lock); return 0; } static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int queue, u8 sta_id, u8 tid) { bool enable_queue = true; /* Make sure this TID isn't already enabled */ if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", queue, tid); return false; } /* Update mappings and refcounts */ if (mvm->queue_info[queue].tid_bitmap) enable_queue = false; mvm->queue_info[queue].tid_bitmap |= BIT(tid); mvm->queue_info[queue].ra_sta_id = sta_id; if (enable_queue) { if (tid != IWL_MAX_TID_COUNT) mvm->queue_info[queue].mac80211_ac = tid_to_mac80211_ac[tid]; else mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; mvm->queue_info[queue].txq_tid = tid; } if (sta) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); mvmtxq->txq_id = queue; } IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d tids=0x%x\n", queue, mvm->queue_info[queue].tid_bitmap); return enable_queue; } static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int wdg_timeout) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_ENABLE_QUEUE, .window = cfg->frame_limit, .sta_id = cfg->sta_id, .ssn = cpu_to_le16(ssn), .tx_fifo = cfg->fifo, .aggregate = cfg->aggregate, .tid = cfg->tid, }; bool inc_ssn; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return false; /* Send the enabling command if we need to */ if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid)) return false; inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); if (inc_ssn) le16_add_cpu(&cmd.ssn, 1); WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); return inc_ssn; } static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_UPDATE_QUEUE_TID, }; int tid; unsigned long tid_bitmap; int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return; tid_bitmap = mvm->queue_info[queue].tid_bitmap; if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) return; /* Find any TID for queue */ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); cmd.tid = tid; cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); if (ret) { IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", queue, ret); return; } mvm->queue_info[queue].txq_tid = tid; IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", queue, tid); } static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; u8 sta_id; int tid = -1; unsigned long tid_bitmap; unsigned int wdg_timeout; int ssn; int ret = true; /* queue sharing is disabled on new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return; lockdep_assert_held(&mvm->mutex); sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; /* Find TID for queue, and make sure it is the only one on the queue */ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); if (tid_bitmap != BIT(tid)) { IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", queue, tid_bitmap); return; } IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, tid); sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) return; mvmsta = iwl_mvm_sta_from_mac80211(sta); wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); ret = iwl_mvm_redirect_queue(mvm, queue, tid, tid_to_mac80211_ac[tid], ssn, wdg_timeout, true, iwl_mvm_txq_from_tid(sta, tid)); if (ret) { IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); return; } /* If aggs should be turned back on - do it */ if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { struct iwl_mvm_add_sta_cmd cmd = {0}; mvmsta->tid_disable_agg &= ~BIT(tid); cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); - cmd.sta_id = mvmsta->sta_id; + cmd.sta_id = mvmsta->deflink.sta_id; cmd.add_modify = STA_MODE_MODIFY; cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (!ret) { IWL_DEBUG_TX_QUEUES(mvm, "TXQ #%d is now aggregated again\n", queue); /* Mark queue intenally as aggregating again */ iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); } } mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; } /* * Remove inactive TIDs of a given queue. * If all queue TIDs are inactive - mark the queue as inactive * If only some the queue TIDs are inactive - unmap them from the queue * * Returns %true if all TIDs were removed and the queue could be reused. */ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, int queue, unsigned long tid_bitmap, unsigned long *unshare_queues, unsigned long *changetid_queues) { - int tid; + unsigned int tid; lockdep_assert_held(&mvmsta->lock); lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return false; /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { /* If some TFDs are still queued - don't mark TID as inactive */ if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) tid_bitmap &= ~BIT(tid); /* Don't mark as inactive any TID that has an active BA */ if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) tid_bitmap &= ~BIT(tid); } /* If all TIDs in the queue are inactive - return it can be reused */ if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue); return true; } /* * If we are here, this is a shared queue and not all TIDs timed-out. * Remove the ones that did. */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { u16 q_tid_bitmap; mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); q_tid_bitmap = mvm->queue_info[queue].tid_bitmap; /* * We need to take into account a situation in which a TXQ was * allocated to TID x, and then turned shared by adding TIDs y * and z. If TID x becomes inactive and is removed from the TXQ, * ownership must be given to one of the remaining TIDs. * This is mainly because if TID x continues - a new queue can't * be allocated for it as long as it is an owner of another TXQ. * * Mark this queue in the right bitmap, we'll send the command * to the firmware later. */ if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) set_bit(queue, changetid_queues); IWL_DEBUG_TX_QUEUES(mvm, "Removing inactive TID %d from shared Q:%d\n", tid, queue); } IWL_DEBUG_TX_QUEUES(mvm, "TXQ #%d left with tid bitmap 0x%x\n", queue, mvm->queue_info[queue].tid_bitmap); /* * There may be different TIDs with the same mac queues, so make * sure all TIDs have existing corresponding mac queues enabled */ tid_bitmap = mvm->queue_info[queue].tid_bitmap; /* If the queue is marked as shared - "unshare" it */ if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 && mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", queue); set_bit(queue, unshare_queues); } return false; } /* * Check for inactivity - this includes checking if any queue * can be unshared and finding one (and only one) that can be * reused. * This function is also invoked as a sort of clean-up task, * in which case @alloc_for_sta is IWL_MVM_INVALID_STA. * * Returns the queue number, or -ENOSPC. */ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) { unsigned long now = jiffies; unsigned long unshare_queues = 0; unsigned long changetid_queues = 0; int i, ret, free_queue = -ENOSPC; struct ieee80211_sta *queue_owner = NULL; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_has_new_tx_api(mvm)) return -ENOSPC; rcu_read_lock(); /* we skip the CMD queue below by starting at 1 */ BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0); for (i = 1; i < IWL_MAX_HW_QUEUES; i++) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; u8 sta_id; int tid; unsigned long inactive_tid_bitmap = 0; unsigned long queue_tid_bitmap; queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; if (!queue_tid_bitmap) continue; /* If TXQ isn't in active use anyway - nothing to do here... */ if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) continue; /* Check to see if there are inactive TIDs on this queue */ for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) { if (time_after(mvm->queue_info[i].last_frame_time[tid] + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) continue; inactive_tid_bitmap |= BIT(tid); } /* If all TIDs are active - finish check on this queue */ if (!inactive_tid_bitmap) continue; /* * If we are here - the queue hadn't been served recently and is * in use */ sta_id = mvm->queue_info[i].ra_sta_id; sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* * If the STA doesn't exist anymore, it isn't an error. It could * be that it was removed since getting the queues, and in this * case it should've inactivated its queues anyway. */ if (IS_ERR_OR_NULL(sta)) continue; mvmsta = iwl_mvm_sta_from_mac80211(sta); spin_lock_bh(&mvmsta->lock); ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, inactive_tid_bitmap, &unshare_queues, &changetid_queues); if (ret && free_queue < 0) { queue_owner = sta; free_queue = i; } /* only unlock sta lock - we still need the queue info lock */ spin_unlock_bh(&mvmsta->lock); } /* Reconfigure queues requiring reconfiguation */ for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) iwl_mvm_unshare_queue(mvm, i); for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) iwl_mvm_change_queue_tid(mvm, i); rcu_read_unlock(); if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, alloc_for_sta); if (ret) return ret; } return free_queue; } static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 ac, int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_trans_txq_scd_cfg cfg = { .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), - .sta_id = mvmsta->sta_id, + .sta_id = mvmsta->deflink.sta_id, .tid = tid, .frame_limit = IWL_FRAME_LIMIT, }; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); int queue = -1; u16 queue_tmp; unsigned long disable_agg_tids = 0; enum iwl_mvm_agg_state queue_state; bool shared_queue = false, inc_ssn; int ssn; unsigned long tfd_queue_mask; int ret; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_has_new_tx_api(mvm)) return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); spin_lock_bh(&mvmsta->lock); tfd_queue_mask = mvmsta->tfd_queue_msk; ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); spin_unlock_bh(&mvmsta->lock); if (tid == IWL_MAX_TID_COUNT) { - queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id, IWL_MVM_DQA_MIN_MGMT_QUEUE, IWL_MVM_DQA_MAX_MGMT_QUEUE); if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", queue); /* If no such queue is found, we'll use a DATA queue instead */ } if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && (mvm->queue_info[mvmsta->reserved_queue].status == IWL_MVM_QUEUE_RESERVED)) { queue = mvmsta->reserved_queue; mvm->queue_info[queue].reserved = true; IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); } if (queue < 0) - queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (queue < 0) { /* try harder - perhaps kill an inactive queue */ - queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); + queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id); } /* No free queue - we'll have to share */ if (queue <= 0) { queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); if (queue > 0) { shared_queue = true; mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; } } /* * Mark TXQ as ready, even though it hasn't been fully configured yet, * to make sure no one else takes it. * This will allow avoiding re-acquiring the lock at the end of the * configuration. On error we'll mark it back as free. */ if (queue > 0 && !shared_queue) mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; /* This shouldn't happen - out of queues */ if (WARN_ON(queue <= 0)) { IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", tid, cfg.sta_id); return queue; } /* * Actual en/disablement of aggregations is through the ADD_STA HCMD, * but for configuring the SCD to send A-MPDUs we need to mark the queue * as aggregatable. * Mark all DATA queues as allowing to be aggregated at some point */ cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); IWL_DEBUG_TX_QUEUES(mvm, "Allocating %squeue #%d to sta %d on tid %d\n", shared_queue ? "shared " : "", queue, - mvmsta->sta_id, tid); + mvmsta->deflink.sta_id, tid); if (shared_queue) { /* Disable any open aggs on this queue */ disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); if (disable_agg_tids) { IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", queue); iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); } } inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); /* * Mark queue as shared in transport if shared * Note this has to be done after queue enablement because enablement * can also set this value, and there is no indication there to shared * queues */ if (shared_queue) iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); spin_lock_bh(&mvmsta->lock); /* * This looks racy, but it is not. We have only one packet for * this ra/tid in our Tx path since we stop the Qdisc when we * need to allocate a new TFD queue. */ if (inc_ssn) { mvmsta->tid_data[tid].seq_number += 0x10; ssn = (ssn + 1) & IEEE80211_SCTL_SEQ; } mvmsta->tid_data[tid].txq_id = queue; mvmsta->tfd_queue_msk |= BIT(queue); queue_state = mvmsta->tid_data[tid].state; if (mvmsta->reserved_queue == queue) mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; spin_unlock_bh(&mvmsta->lock); if (!shared_queue) { ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); if (ret) goto out_err; /* If we need to re-enable aggregations... */ if (queue_state == IWL_AGG_ON) { ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) goto out_err; } } else { /* Redirect queue, if needed */ ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn, wdg_timeout, false, iwl_mvm_txq_from_tid(sta, tid)); if (ret) goto out_err; } return 0; out_err: queue_tmp = queue; - iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid); + iwl_mvm_disable_txq(mvm, sta, mvmsta->deflink.sta_id, &queue_tmp, tid); return ret; } void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, add_stream_wk); mutex_lock(&mvm->mutex); iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); while (!list_empty(&mvm->add_stream_txqs)) { struct iwl_mvm_txq *mvmtxq; struct ieee80211_txq *txq; u8 tid; mvmtxq = list_first_entry(&mvm->add_stream_txqs, struct iwl_mvm_txq, list); txq = container_of((void *)mvmtxq, struct ieee80211_txq, drv_priv); tid = txq->tid; if (tid == IEEE80211_NUM_TIDS) tid = IWL_MAX_TID_COUNT; /* * We can't really do much here, but if this fails we can't * transmit anyway - so just don't transmit the frame etc. * and let them back up ... we've tried our best to allocate * a queue in the function itself. */ if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) { + spin_lock_bh(&mvm->add_stream_lock); list_del_init(&mvmtxq->list); + spin_unlock_bh(&mvm->add_stream_lock); continue; } - list_del_init(&mvmtxq->list); + /* now we're ready, any remaining races/concurrency will be + * handled in iwl_mvm_mac_itxq_xmit() + */ + set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); + local_bh_disable(); + spin_lock(&mvm->add_stream_lock); + list_del_init(&mvmtxq->list); + spin_unlock(&mvm->add_stream_lock); + iwl_mvm_mac_itxq_xmit(mvm->hw, txq); local_bh_enable(); } mutex_unlock(&mvm->mutex); } static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum nl80211_iftype vif_type) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int queue; /* queue reserving is disabled on new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return 0; /* run the general cleanup/unsharing of queues */ iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); /* Make sure we have free resources for this STA */ if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap && (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == IWL_MVM_QUEUE_FREE)) queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; else - queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (queue < 0) { /* try again - this time kick out a queue if needed */ - queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); + queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id); if (queue < 0) { IWL_ERR(mvm, "No available queues for new station\n"); return -ENOSPC; } } mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; mvmsta->reserved_queue = queue; IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", - queue, mvmsta->sta_id); + queue, mvmsta->deflink.sta_id); return 0; } /* * In DQA mode, after a HW restart the queues should be allocated as before, in * order to avoid race conditions when there are shared queues. This function * does the re-mapping and queue allocation. * * Note that re-enabling aggregations isn't done in this function. */ -static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) +void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, + struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); unsigned int wdg = iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); int i; struct iwl_trans_txq_scd_cfg cfg = { - .sta_id = mvm_sta->sta_id, + .sta_id = mvm_sta->deflink.sta_id, .frame_limit = IWL_FRAME_LIMIT, }; /* Make sure reserved queue is still marked as such (if allocated) */ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) mvm->queue_info[mvm_sta->reserved_queue].status = IWL_MVM_QUEUE_RESERVED; for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; int txq_id = tid_data->txq_id; int ac; if (txq_id == IWL_MVM_INVALID_QUEUE) continue; ac = tid_to_mac80211_ac[i]; if (iwl_mvm_has_new_tx_api(mvm)) { IWL_DEBUG_TX_QUEUES(mvm, "Re-mapping sta %d tid %d\n", - mvm_sta->sta_id, i); - txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id, + mvm_sta->deflink.sta_id, i); + txq_id = iwl_mvm_tvqm_enable_txq(mvm, sta, + mvm_sta->deflink.sta_id, i, wdg); /* * on failures, just set it to IWL_MVM_INVALID_QUEUE * to try again later, we have no other good way of * failing here */ if (txq_id < 0) txq_id = IWL_MVM_INVALID_QUEUE; tid_data->txq_id = txq_id; /* * Since we don't set the seq number after reset, and HW * sets it now, FW reset will cause the seq num to start * at 0 again, so driver will need to update it * internally as well, so it keeps in sync with real val */ tid_data->seq_number = 0; } else { u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number); cfg.tid = i; cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE); IWL_DEBUG_TX_QUEUES(mvm, "Re-mapping sta %d tid %d to queue %d\n", - mvm_sta->sta_id, i, txq_id); + mvm_sta->deflink.sta_id, i, + txq_id); iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg); mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; } } } static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, const u8 *addr, u16 mac_id, u16 color) { struct iwl_mvm_add_sta_cmd cmd; int ret; u32 status = ADD_STA_SUCCESS; lockdep_assert_held(&mvm->mutex); memset(&cmd, 0, sizeof(cmd)); cmd.sta_id = sta->sta_id; - if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 && + if (iwl_mvm_has_new_station_api(mvm->fw) && sta->type == IWL_STA_AUX_ACTIVITY) cmd.mac_id_n_color = cpu_to_le32(mac_id); else cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, color)); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) cmd.station_type = sta->type; if (!iwl_mvm_has_new_tx_api(mvm)) cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(0xffff); if (addr) memcpy(cmd.addr, addr, ETH_ALEN); ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "Internal station added.\n"); return 0; default: ret = -EIO; IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", status); break; } return ret; } -int iwl_mvm_add_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +/* Initialize driver data of a new sta */ +int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, int sta_id, u8 sta_type) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_rxq_dup_data *dup_data; - int i, ret, sta_id; - bool sta_update = false; - unsigned int sta_flags = 0; + int i, ret = 0; lockdep_assert_held(&mvm->mutex); - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - sta_id = iwl_mvm_find_free_sta_id(mvm, - ieee80211_vif_type_p2p(vif)); - else - sta_id = mvm_sta->sta_id; - - if (sta_id == IWL_MVM_INVALID_STA) - return -ENOSPC; - - spin_lock_init(&mvm_sta->lock); + mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color); + mvm_sta->vif = vif; - /* if this is a HW restart re-alloc existing queues */ - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { - struct iwl_mvm_int_sta tmp_sta = { - .sta_id = sta_id, - .type = mvm_sta->sta_type, - }; + /* for MLD sta_id(s) should be allocated for each link before calling + * this function + */ + if (!mvm->mld_api_is_used) { + if (WARN_ON(sta_id == IWL_MVM_INVALID_STA)) + return -EINVAL; - /* - * First add an empty station since allocating - * a queue requires a valid station - */ - ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, - mvmvif->id, mvmvif->color); - if (ret) - goto err; + mvm_sta->deflink.sta_id = sta_id; + rcu_assign_pointer(mvm_sta->link[0], &mvm_sta->deflink); - iwl_mvm_realloc_queues_after_restart(mvm, sta); - sta_update = true; - sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; - goto update_fw; + if (!mvm->trans->trans_cfg->gen2) + mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize = + LINK_QUAL_AGG_FRAME_LIMIT_DEF; + else + mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize = + LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF; } - mvm_sta->sta_id = sta_id; - mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color); - mvm_sta->vif = vif; - if (!mvm->trans->trans_cfg->gen2) - mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; - else - mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF; - mvm_sta->tx_protection = 0; mvm_sta->tt_tx_protection = false; - mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK; + mvm_sta->sta_type = sta_type; - /* HW restart, don't assume the memory has been zeroed */ mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ - mvm_sta->tfd_queue_msk = 0; - /* for HW restart - reset everything but the sequence number */ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { - u16 seq = mvm_sta->tid_data[i].seq_number; - memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); - mvm_sta->tid_data[i].seq_number = seq; - /* * Mark all queues for this STA as unallocated and defer TX * frames until the queue is allocated */ mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; } for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(sta->txq[i]); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; INIT_LIST_HEAD(&mvmtxq->list); atomic_set(&mvmtxq->tx_request, 0); } - mvm_sta->agg_tids = 0; - - if (iwl_mvm_has_new_rx_api(mvm) && - !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + if (iwl_mvm_has_new_rx_api(mvm)) { int q; dup_data = kcalloc(mvm->trans->num_rx_queues, sizeof(*dup_data), GFP_KERNEL); if (!dup_data) return -ENOMEM; /* * Initialize all the last_seq values to 0xffff which can never * compare equal to the frame's seq_ctrl in the check in * iwl_mvm_is_dup() since the lower 4 bits are the fragment * number and fragmented packets don't reach that function. * * This thus allows receiving a packet with seqno 0 and the * retry bit set as the very first packet on a new TID. */ for (q = 0; q < mvm->trans->num_rx_queues; q++) memset(dup_data[q].last_seq, 0xff, sizeof(dup_data[q].last_seq)); mvm_sta->dup_data = dup_data; } if (!iwl_mvm_has_new_tx_api(mvm)) { ret = iwl_mvm_reserve_sta_stream(mvm, sta, ieee80211_vif_type_p2p(vif)); if (ret) - goto err; + return ret; } /* * if rs is registered with mac80211, then "add station" will be handled * via the corresponding ops, otherwise need to notify rate scaling here */ if (iwl_mvm_has_tlc_offload(mvm)) iwl_mvm_rs_add_sta(mvm, mvm_sta); else - spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock); + spin_lock_init(&mvm_sta->deflink.lq_sta.rs_drv.pers.lock); iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); + return 0; +} + +int iwl_mvm_add_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + int ret, sta_id; + bool sta_update = false; + unsigned int sta_flags = 0; + + lockdep_assert_held(&mvm->mutex); + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + sta_id = iwl_mvm_find_free_sta_id(mvm, + ieee80211_vif_type_p2p(vif)); + else + sta_id = mvm_sta->deflink.sta_id; + + if (sta_id == IWL_MVM_INVALID_STA) + return -ENOSPC; + + spin_lock_init(&mvm_sta->lock); + + /* if this is a HW restart re-alloc existing queues */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + struct iwl_mvm_int_sta tmp_sta = { + .sta_id = sta_id, + .type = mvm_sta->sta_type, + }; + + /* First add an empty station since allocating + * a queue requires a valid station + */ + ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, + mvmvif->id, mvmvif->color); + if (ret) + goto err; + + iwl_mvm_realloc_queues_after_restart(mvm, sta); + sta_update = true; + sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; + goto update_fw; + } + + ret = iwl_mvm_sta_init(mvm, vif, sta, sta_id, + sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK); + if (ret) + goto err; + update_fw: ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); if (ret) goto err; if (vif->type == NL80211_IFTYPE_STATION) { if (!sta->tdls) { - WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA); - mvmvif->ap_sta_id = sta_id; + WARN_ON(mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA); + mvmvif->deflink.ap_sta_id = sta_id; } else { - WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA); + WARN_ON(mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA); } } rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); return 0; err: return ret; } int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool drain) { struct iwl_mvm_add_sta_cmd cmd = {}; int ret; u32 status; lockdep_assert_held(&mvm->mutex); cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); - cmd.sta_id = mvmsta->sta_id; + cmd.sta_id = mvmsta->deflink.sta_id; cmd.add_modify = STA_MODE_MODIFY; cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", - mvmsta->sta_id); + mvmsta->deflink.sta_id); break; default: ret = -EIO; #if defined(__linux__) IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", - mvmsta->sta_id); + mvmsta->deflink.sta_id); #elif defined(__FreeBSD__) IWL_ERR(mvm, "Couldn't drain frames for staid %d, status %#x\n", - mvmsta->sta_id, status); + mvmsta->deflink.sta_id, status); #endif break; } return ret; } /* * Remove a station from the FW table. Before sending the command to remove * the station validate that the station is indeed known to the driver (sanity * only). */ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) { struct ieee80211_sta *sta; struct iwl_mvm_rm_sta_cmd rm_sta_cmd = { .sta_id = sta_id, }; int ret; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); /* Note: internal stations are marked as error values */ if (!sta) { IWL_ERR(mvm, "Invalid station id\n"); return -EINVAL; } ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, sizeof(rm_sta_cmd), &rm_sta_cmd); if (ret) { IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); return ret; } return 0; } static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); int i; lockdep_assert_held(&mvm->mutex); for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) continue; - iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i); + iwl_mvm_disable_txq(mvm, sta, mvm_sta->deflink.sta_id, + &mvm_sta->tid_data[i].txq_id, i); mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; } for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(sta->txq[i]); + spin_lock_bh(&mvm->add_stream_lock); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + list_del_init(&mvmtxq->list); + clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); + spin_unlock_bh(&mvm->add_stream_lock); } } int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta) { int i; for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { u16 txq_id; int ret; spin_lock_bh(&mvm_sta->lock); txq_id = mvm_sta->tid_data[i].txq_id; spin_unlock_bh(&mvm_sta->lock); if (txq_id == IWL_MVM_INVALID_QUEUE) continue; ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id); if (ret) return ret; } return 0; } -int iwl_mvm_rm_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +/* Execute the common part for both MLD and non-MLD modes. + * Returns if we're done with removing the station, either + * with error or success + */ +bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_link_sta *link_sta, int *ret) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *mvm_link = + mvmvif->link[link_sta->link_id]; struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - u8 sta_id = mvm_sta->sta_id; - int ret; + struct iwl_mvm_link_sta *mvm_link_sta; + u8 sta_id; lockdep_assert_held(&mvm->mutex); -#if defined(__linux__) - if (iwl_mvm_has_new_rx_api(mvm)) - kfree(mvm_sta->dup_data); -#elif defined(__FreeBSD__) - if (iwl_mvm_has_new_rx_api(mvm)) { - kfree(mvm_sta->dup_data); - mvm_sta->dup_data = NULL; - } -#endif - - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); - if (ret) - return ret; - - /* flush its queues here since we are freeing mvm_sta */ - ret = iwl_mvm_flush_sta(mvm, mvm_sta, false); - if (ret) - return ret; - if (iwl_mvm_has_new_tx_api(mvm)) { - ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); - } else { - u32 q_mask = mvm_sta->tfd_queue_msk; - - ret = iwl_trans_wait_tx_queues_empty(mvm->trans, - q_mask); - } - if (ret) - return ret; - - ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); - - iwl_mvm_disable_sta_queues(mvm, vif, sta); + mvm_link_sta = + rcu_dereference_protected(mvm_sta->link[link_sta->link_id], + lockdep_is_held(&mvm->mutex)); + sta_id = mvm_link_sta->sta_id; /* If there is a TXQ still marked as reserved - free it */ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { u8 reserved_txq = mvm_sta->reserved_queue; enum iwl_mvm_queue_status *status; /* * If no traffic has gone through the reserved TXQ - it * is still marked as IWL_MVM_QUEUE_RESERVED, and * should be manually marked as free again */ status = &mvm->queue_info[reserved_txq].status; if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && (*status != IWL_MVM_QUEUE_FREE), "sta_id %d reserved txq %d status %d", - sta_id, reserved_txq, *status)) - return -EINVAL; + sta_id, reserved_txq, *status)) { + *ret = -EINVAL; + return true; + } *status = IWL_MVM_QUEUE_FREE; } - if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == sta_id) { + if (vif->type == NL80211_IFTYPE_STATION) { /* if associated - we can't remove the AP STA now */ - if (vif->bss_conf.assoc) - return ret; + if (vif->cfg.assoc) + return true; + + /* first remove remaining keys */ + iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link, 0); /* unassoc - go ahead - remove the AP STA now */ - mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; + mvm_link->ap_sta_id = IWL_MVM_INVALID_STA; } /* * This shouldn't happen - the TDLS channel switch should be canceled * before the STA is removed. */ if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; cancel_delayed_work(&mvm->tdls_cs.dwork); } - /* - * Make sure that the tx response code sees the station as -EBUSY and - * calls the drain worker. - */ - spin_lock_bh(&mvm_sta->lock); - spin_unlock_bh(&mvm_sta->lock); + return false; +} + +int iwl_mvm_rm_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + int ret; - ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); - RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); + if (ret) + return ret; + + /* flush its queues here since we are freeing mvm_sta */ + ret = iwl_mvm_flush_sta(mvm, mvm_sta, false); + if (ret) + return ret; + if (iwl_mvm_has_new_tx_api(mvm)) { + ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); + } else { + u32 q_mask = mvm_sta->tfd_queue_msk; + + ret = iwl_trans_wait_tx_queues_empty(mvm->trans, + q_mask); + } + if (ret) + return ret; + + ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); + + iwl_mvm_disable_sta_queues(mvm, vif, sta); + + if (iwl_mvm_sta_del(mvm, vif, sta, &sta->deflink, &ret)) + return ret; + + ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->deflink.sta_id); + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->deflink.sta_id], NULL); return ret; } int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 sta_id) { int ret = iwl_mvm_rm_sta_common(mvm, sta_id); lockdep_assert_held(&mvm->mutex); RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); return ret; } int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, u32 qmask, enum nl80211_iftype iftype, - enum iwl_sta_type type) + u8 type) { if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || sta->sta_id == IWL_MVM_INVALID_STA) { sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) return -ENOSPC; } sta->tfd_queue_msk = qmask; sta->type = type; /* put a non-NULL value so iterating over the stations won't stop */ - rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); return 0; } void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) { RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); sta->sta_id = IWL_MVM_INVALID_STA; } static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue, u8 sta_id, u8 fifo) { unsigned int wdg_timeout = mvm->trans->trans_cfg->base_params->wd_timeout; struct iwl_trans_txq_scd_cfg cfg = { .fifo = fifo, .sta_id = sta_id, .tid = IWL_MAX_TID_COUNT, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; WARN_ON(iwl_mvm_has_new_tx_api(mvm)); iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); } static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id) { unsigned int wdg_timeout = mvm->trans->trans_cfg->base_params->wd_timeout; WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); - return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT, + return iwl_mvm_tvqm_enable_txq(mvm, NULL, sta_id, IWL_MAX_TID_COUNT, wdg_timeout); } static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx, int maccolor, u8 *addr, struct iwl_mvm_int_sta *sta, u16 *queue, int fifo) { int ret; /* Map queue to fifo - needs to happen before adding station */ if (!iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo); ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor); if (ret) { if (!iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_disable_txq(mvm, NULL, queue, + iwl_mvm_disable_txq(mvm, NULL, sta->sta_id, queue, IWL_MAX_TID_COUNT); return ret; } /* * For 22000 firmware and on we cannot add queue to a station unknown * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) { int txq; txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id); if (txq < 0) { iwl_mvm_rm_sta_common(mvm, sta->sta_id); return txq; } *queue = txq; } return 0; } int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id) { int ret; + u32 qmask = mvm->aux_queue == IWL_MVM_INVALID_QUEUE ? 0 : + BIT(mvm->aux_queue); lockdep_assert_held(&mvm->mutex); /* Allocate aux station and assign to it the aux queue */ - ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), + ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, qmask, NL80211_IFTYPE_UNSPECIFIED, IWL_STA_AUX_ACTIVITY); if (ret) return ret; /* * In CDB NICs we need to specify which lmac to use for aux activity * using the mac_id argument place to send lmac_id to the function */ ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL, &mvm->aux_sta, &mvm->aux_queue, IWL_MVM_TX_FIFO_MCAST); if (ret) { iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); return ret; } return 0; } int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); lockdep_assert_held(&mvm->mutex); return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color, NULL, &mvm->snif_sta, &mvm->snif_queue, IWL_MVM_TX_FIFO_BE); } int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) return -EINVAL; - iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT); + iwl_mvm_disable_txq(mvm, NULL, mvm->snif_sta.sta_id, + &mvm->snif_queue, IWL_MAX_TID_COUNT); ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); return ret; } int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) { int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) return -EINVAL; - iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT); + iwl_mvm_disable_txq(mvm, NULL, mvm->aux_sta.sta_id, + &mvm->aux_queue, IWL_MAX_TID_COUNT); ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); return ret; } void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) { iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); } /* * Send the add station command for the vif's broadcast station. * Assumes that the station was already allocated. * * @mvm: the mvm component * @vif: the interface to which the broadcast station is added * @bsta: the broadcast station to add. */ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; + struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta; static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; const u8 *baddr = _baddr; int queue; int ret; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_VO, - .sta_id = mvmvif->bcast_sta.sta_id, + .sta_id = mvmvif->deflink.bcast_sta.sta_id, .tid = IWL_MAX_TID_COUNT, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; lockdep_assert_held(&mvm->mutex); if (!iwl_mvm_has_new_tx_api(mvm)) { if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { queue = mvm->probe_queue; } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { queue = mvm->p2p_dev_queue; } else { WARN(1, "Missing required TXQ for adding bcast STA\n"); return -EINVAL; } bsta->tfd_queue_msk |= BIT(queue); iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); } if (vif->type == NL80211_IFTYPE_ADHOC) baddr = vif->bss_conf.bssid; if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA)) return -ENOSPC; ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, mvmvif->id, mvmvif->color); if (ret) return ret; /* * For 22000 firmware and on we cannot add queue to a station unknown * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) { - queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id, + queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, bsta->sta_id, IWL_MAX_TID_COUNT, wdg_timeout); if (queue < 0) { iwl_mvm_rm_sta_common(mvm, bsta->sta_id); return queue; } if (vif->type == NL80211_IFTYPE_AP || - vif->type == NL80211_IFTYPE_ADHOC) + vif->type == NL80211_IFTYPE_ADHOC) { + /* for queue management */ mvm->probe_queue = queue; - else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) + /* for use in TX */ + mvmvif->deflink.mgmt_queue = queue; + } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvm->p2p_dev_queue = queue; + } + } else if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) { + /* set it for use in TX */ + mvmvif->deflink.mgmt_queue = mvm->probe_queue; } return 0; } -static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) +void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 *queueptr, queue; lockdep_assert_held(&mvm->mutex); - iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true); + iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta, true); switch (vif->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: queueptr = &mvm->probe_queue; break; case NL80211_IFTYPE_P2P_DEVICE: queueptr = &mvm->p2p_dev_queue; break; default: WARN(1, "Can't free bcast queue on vif type %d\n", vif->type); return; } queue = *queueptr; - iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT); + iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.bcast_sta.sta_id, + queueptr, IWL_MAX_TID_COUNT); + + if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) + mvmvif->deflink.mgmt_queue = mvm->probe_queue; + if (iwl_mvm_has_new_tx_api(mvm)) return; - WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue))); - mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue); + WARN_ON(!(mvmvif->deflink.bcast_sta.tfd_queue_msk & BIT(queue))); + mvmvif->deflink.bcast_sta.tfd_queue_msk &= ~BIT(queue); } /* Send the FW a request to remove the station from it's internal data * structures, but DO NOT remove the entry from the local data structures. */ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); iwl_mvm_free_bcast_sta_queues(mvm, vif); - ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); + ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.bcast_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); return ret; } int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); lockdep_assert_held(&mvm->mutex); - return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0, + return iwl_mvm_allocate_int_sta(mvm, &mvmvif->deflink.bcast_sta, 0, ieee80211_vif_type_p2p(vif), IWL_STA_GENERAL_PURPOSE); } /* Allocate a new station entry for the broadcast station to the given vif, * and send it to the FW. * Note that each P2P mac should have its own broadcast station. * * @mvm: the mvm component * @vif: the interface to which the broadcast station is added * @bsta: the broadcast station to add. */ int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; + struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta; int ret; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_alloc_bcast_sta(mvm, vif); if (ret) return ret; ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) iwl_mvm_dealloc_int_sta(mvm, bsta); return ret; } void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); + iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.bcast_sta); } /* * Send the FW a request to remove the station from it's internal data * structures, and in addition remove it from the local data structure. */ int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { int ret; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); iwl_mvm_dealloc_bcast_sta(mvm, vif); return ret; } /* * Allocate a new station entry for the multicast station to the given vif, * and send it to the FW. * Note that each AP/GO mac should have its own multicast station. * * @mvm: the mvm component * @vif: the interface to which the multicast station is added */ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta; + struct iwl_mvm_int_sta *msta = &mvmvif->deflink.mcast_sta; static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; const u8 *maddr = _maddr; struct iwl_trans_txq_scd_cfg cfg = { .fifo = vif->type == NL80211_IFTYPE_AP ? IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE, .sta_id = msta->sta_id, .tid = 0, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON(vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC)) return -ENOTSUPP; /* * In IBSS, ieee80211_check_queues() sets the cab_queue to be * invalid, so make sure we use the queue we want. * Note that this is done here as we want to avoid making DQA * changes in mac80211 layer. */ if (vif->type == NL80211_IFTYPE_ADHOC) - mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; + mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE; /* * While in previous FWs we had to exclude cab queue from TFD queue * mask, now it is needed as any other queue. */ if (!iwl_mvm_has_new_tx_api(mvm) && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { - iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, + iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0, + &cfg, timeout); - msta->tfd_queue_msk |= BIT(mvmvif->cab_queue); + msta->tfd_queue_msk |= BIT(mvmvif->deflink.cab_queue); } ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, mvmvif->id, mvmvif->color); if (ret) goto err; /* * Enable cab queue after the ADD_STA command is sent. * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG * command with unknown station id, and for FW that doesn't support * station API since the cab queue is not included in the * tfd_queue_mask. */ if (iwl_mvm_has_new_tx_api(mvm)) { - int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id, - 0, - timeout); + int queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, msta->sta_id, + 0, timeout); if (queue < 0) { ret = queue; goto err; } - mvmvif->cab_queue = queue; + mvmvif->deflink.cab_queue = queue; } else if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) - iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, + iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0, + &cfg, timeout); return 0; err: iwl_mvm_dealloc_int_sta(mvm, msta); return ret; } static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, struct ieee80211_key_conf *keyconf, bool mcast) { union { struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; struct iwl_mvm_add_sta_key_cmd cmd; } u = {}; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS); __le16 key_flags; int ret, size; u32 status; /* This is a valid situation for GTK removal */ if (sta_id == IWL_MVM_INVALID_STA) return 0; key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK); key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); /* * The fields assigned here are in the same location at the start * of the command, so we can do this union trick. */ u.cmd.common.key_flags = key_flags; u.cmd.common.key_offset = keyconf->hw_key_idx; u.cmd.common.sta_id = sta_id; size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1); status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, &status); switch (status) { case ADD_STA_SUCCESS: IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); break; default: ret = -EIO; IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); break; } return ret; } /* * Send the FW a request to remove the station from it's internal data * structures, and in addition remove it from the local data structure. */ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); - iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true); + iwl_mvm_flush_sta(mvm, &mvmvif->deflink.mcast_sta, true); - iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0); + iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id, + &mvmvif->deflink.cab_queue, 0); - ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); + ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.mcast_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); return ret; } static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) { struct iwl_mvm_delba_data notif = { .baid = baid, }; iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true, ¬if, sizeof(notif)); }; static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, struct iwl_mvm_baid_data *data) { int i; iwl_mvm_sync_rxq_del_ba(mvm, data->baid); for (i = 0; i < mvm->trans->num_rx_queues; i++) { int j; struct iwl_mvm_reorder_buffer *reorder_buf = &data->reorder_buf[i]; struct iwl_mvm_reorder_buf_entry *entries = &data->entries[i * data->entries_per_queue]; spin_lock_bh(&reorder_buf->lock); if (likely(!reorder_buf->num_stored)) { spin_unlock_bh(&reorder_buf->lock); continue; } /* * This shouldn't happen in regular DELBA since the internal * delBA notification should trigger a release of all frames in * the reorder buffer. */ WARN_ON(1); for (j = 0; j < reorder_buf->buf_size; j++) __skb_queue_purge(&entries[j].e.frames); /* * Prevent timer re-arm. This prevents a very far fetched case * where we timed out on the notification. There may be prior * RX frames pending in the RX queue before the notification * that might get processed between now and the actual deletion * and we would re-arm the timer although we are deleting the * reorder buffer. */ reorder_buf->removed = true; spin_unlock_bh(&reorder_buf->lock); del_timer_sync(&reorder_buf->reorder_timer); } } static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, struct iwl_mvm_baid_data *data, u16 ssn, u16 buf_size) { int i; for (i = 0; i < mvm->trans->num_rx_queues; i++) { struct iwl_mvm_reorder_buffer *reorder_buf = &data->reorder_buf[i]; struct iwl_mvm_reorder_buf_entry *entries = &data->entries[i * data->entries_per_queue]; int j; reorder_buf->num_stored = 0; reorder_buf->head_sn = ssn; reorder_buf->buf_size = buf_size; /* rx reorder timer */ timer_setup(&reorder_buf->reorder_timer, iwl_mvm_reorder_timer_expired, 0); spin_lock_init(&reorder_buf->lock); reorder_buf->mvm = mvm; reorder_buf->queue = i; reorder_buf->valid = false; for (j = 0; j < reorder_buf->buf_size; j++) __skb_queue_head_init(&entries[j].e.frames); } } static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvm_sta, + struct ieee80211_sta *sta, bool start, int tid, u16 ssn, u16 buf_size) { + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = { .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), - .sta_id = mvm_sta->sta_id, + .sta_id = mvm_sta->deflink.sta_id, .add_modify = STA_MODE_MODIFY, }; u32 status; int ret; if (start) { cmd.add_immediate_ba_tid = tid; cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); cmd.rx_ba_window = cpu_to_le16(buf_size); cmd.modify_mask = STA_MODIFY_ADD_BA_TID; } else { cmd.remove_immediate_ba_tid = tid; cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID; } status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", start ? "start" : "stopp"); if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) && !(status & IWL_ADD_STA_BAID_VALID_MASK))) return -EINVAL; return u32_get_bits(status, IWL_ADD_STA_BAID_MASK); case ADD_STA_IMMEDIATE_BA_FAILURE: IWL_WARN(mvm, "RX BA Session refused by fw\n"); return -ENOSPC; default: IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", start ? "start" : "stopp", status); return -EIO; } } static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvm_sta, + struct ieee80211_sta *sta, bool start, int tid, u16 ssn, u16 buf_size, int baid) { struct iwl_rx_baid_cfg_cmd cmd = { .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) : cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE), }; u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD); int ret; BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); if (start) { - cmd.alloc.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id)); + cmd.alloc.sta_id_mask = + cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1)); cmd.alloc.tid = tid; cmd.alloc.ssn = cpu_to_le16(ssn); cmd.alloc.win_size = cpu_to_le16(buf_size); baid = -EIO; } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) { cmd.remove_v1.baid = cpu_to_le32(baid); BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove)); } else { - cmd.remove.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id)); + cmd.remove.sta_id_mask = + cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1)); cmd.remove.tid = cpu_to_le32(tid); } ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd), &cmd, &baid); if (ret) return ret; if (!start) { /* ignore firmware baid on remove */ baid = 0; } IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", start ? "start" : "stopp"); if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map)) return -EINVAL; return baid; } -static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta, +static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool start, int tid, u16 ssn, u16 buf_size, int baid) { if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) - return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start, + return iwl_mvm_fw_baid_op_cmd(mvm, sta, start, tid, ssn, buf_size, baid); - return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start, + return iwl_mvm_fw_baid_op_sta(mvm, sta, start, tid, ssn, buf_size); } int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u16 ssn, bool start, u16 buf_size, u16 timeout) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_baid_data *baid_data = NULL; int ret, baid; u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID : IWL_MAX_BAID_OLD; lockdep_assert_held(&mvm->mutex); if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) { IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); return -ENOSPC; } if (iwl_mvm_has_new_rx_api(mvm) && start) { - u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); + u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); /* sparse doesn't like the __align() so don't check */ #ifndef __CHECKER__ /* * The division below will be OK if either the cache line size * can be divided by the entry size (ALIGN will round up) or if * if the entry size can be divided by the cache line size, in * which case the ALIGN() will do nothing. */ BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) && sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES); #endif /* * Upward align the reorder buffer size to fill an entire cache * line for each queue, to avoid sharing cache lines between * different queues. */ reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES); /* * Allocate here so if allocation fails we can bail out early * before starting the BA session in the firmware */ baid_data = kzalloc(sizeof(*baid_data) + mvm->trans->num_rx_queues * reorder_buf_size, GFP_KERNEL); if (!baid_data) return -ENOMEM; /* * This division is why we need the above BUILD_BUG_ON(), * if that doesn't hold then this will not be right. */ baid_data->entries_per_queue = reorder_buf_size / sizeof(baid_data->entries[0]); } if (iwl_mvm_has_new_rx_api(mvm) && !start) { baid = mvm_sta->tid_to_baid[tid]; } else { /* we don't really need it in this case */ baid = -1; } /* Don't send command to remove (start=0) BAID during restart */ if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size, + baid = iwl_mvm_fw_baid_op(mvm, sta, start, tid, ssn, buf_size, baid); if (baid < 0) { ret = baid; goto out_free; } if (start) { mvm->rx_ba_sessions++; if (!iwl_mvm_has_new_rx_api(mvm)) return 0; baid_data->baid = baid; baid_data->timeout = timeout; baid_data->last_rx = jiffies; baid_data->rcu_ptr = &mvm->baid_map[baid]; timer_setup(&baid_data->session_timer, iwl_mvm_rx_agg_session_expired, 0); baid_data->mvm = mvm; baid_data->tid = tid; - baid_data->sta_id = mvm_sta->sta_id; + baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1); mvm_sta->tid_to_baid[tid] = baid; if (timeout) mod_timer(&baid_data->session_timer, TU_TO_EXP_TIME(timeout * 2)); iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size); /* * protect the BA data with RCU to cover a case where our * internal RX sync mechanism will timeout (not that it's * supposed to happen) and we will free the session data while * RX is being processed in parallel */ IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", - mvm_sta->sta_id, tid, baid); + mvm_sta->deflink.sta_id, tid, baid); WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); rcu_assign_pointer(mvm->baid_map[baid], baid_data); } else { baid = mvm_sta->tid_to_baid[tid]; if (mvm->rx_ba_sessions > 0) /* check that restart flow didn't zero the counter */ mvm->rx_ba_sessions--; if (!iwl_mvm_has_new_rx_api(mvm)) return 0; if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) return -EINVAL; baid_data = rcu_access_pointer(mvm->baid_map[baid]); if (WARN_ON(!baid_data)) return -EINVAL; /* synchronize all rx queues so we can safely delete */ iwl_mvm_free_reorder(mvm, baid_data); - del_timer_sync(&baid_data->session_timer); + timer_shutdown_sync(&baid_data->session_timer); RCU_INIT_POINTER(mvm->baid_map[baid], NULL); kfree_rcu(baid_data, rcu_head); IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); /* * After we've deleted it, do another queue sync * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently * running it won't find a new session in the old * BAID. It can find the NULL pointer for the BAID, * but we must not have it find a different session. */ iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0); } return 0; out_free: kfree(baid_data); return ret; } int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u8 queue, bool start) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = {}; int ret; u32 status; lockdep_assert_held(&mvm->mutex); if (start) { mvm_sta->tfd_queue_msk |= BIT(queue); mvm_sta->tid_disable_agg &= ~BIT(tid); } else { /* In DQA-mode the queue isn't removed on agg termination */ mvm_sta->tid_disable_agg |= BIT(tid); } cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); - cmd.sta_id = mvm_sta->sta_id; + cmd.sta_id = mvm_sta->deflink.sta_id; cmd.add_modify = STA_MODE_MODIFY; if (!iwl_mvm_has_new_tx_api(mvm)) cmd.modify_mask = STA_MODIFY_QUEUES; cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: break; default: ret = -EIO; IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", start ? "start" : "stopp", status); break; } return ret; } const u8 tid_to_mac80211_ac[] = { IEEE80211_AC_BE, IEEE80211_AC_BK, IEEE80211_AC_BK, IEEE80211_AC_BE, IEEE80211_AC_VI, IEEE80211_AC_VI, IEEE80211_AC_VO, IEEE80211_AC_VO, IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ }; static const u8 tid_to_ucode_ac[] = { AC_BE, AC_BK, AC_BK, AC_BE, AC_VI, AC_VI, AC_VO, AC_VO, }; int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data; u16 normalized_ssn; u16 txq_id; int ret; if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) return -EINVAL; if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED && mvmsta->tid_data[tid].state != IWL_AGG_OFF) { IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n", mvmsta->tid_data[tid].state); return -ENXIO; } lockdep_assert_held(&mvm->mutex); if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE && iwl_mvm_has_new_tx_api(mvm)) { u8 ac = tid_to_mac80211_ac[tid]; ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); if (ret) return ret; } spin_lock_bh(&mvmsta->lock); /* * Note the possible cases: * 1. An enabled TXQ - TXQ needs to become agg'ed * 2. The TXQ hasn't yet been enabled, so find a free one and mark * it as reserved */ txq_id = mvmsta->tid_data[tid].txq_id; if (txq_id == IWL_MVM_INVALID_QUEUE) { - ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + ret = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (ret < 0) { IWL_ERR(mvm, "Failed to allocate agg queue\n"); goto out; } txq_id = ret; /* TXQ hasn't yet been enabled, so mark it only as reserved */ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) { ret = -ENXIO; IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n", tid, IWL_MAX_HW_QUEUES - 1); goto out; } else if (unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) { ret = -ENXIO; IWL_DEBUG_TX_QUEUES(mvm, "Can't start tid %d agg on shared queue!\n", tid); goto out; } IWL_DEBUG_TX_QUEUES(mvm, "AGG for tid %d will be on queue #%d\n", tid, txq_id); tid_data = &mvmsta->tid_data[tid]; tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); tid_data->txq_id = txq_id; *ssn = tid_data->ssn; IWL_DEBUG_TX_QUEUES(mvm, "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", - mvmsta->sta_id, tid, txq_id, tid_data->ssn, + mvmsta->deflink.sta_id, tid, txq_id, + tid_data->ssn, tid_data->next_reclaimed); /* * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ normalized_ssn = tid_data->ssn; if (mvm->trans->trans_cfg->gen2) normalized_ssn &= 0xff; if (normalized_ssn == tid_data->next_reclaimed) { tid_data->state = IWL_AGG_STARTING; ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; } else { tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA; } out: spin_unlock_bh(&mvmsta->lock); return ret; } int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 buf_size, bool amsdu) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); int queue, ret; bool alloc_queue = true; enum iwl_mvm_queue_status queue_status; u16 ssn; struct iwl_trans_txq_scd_cfg cfg = { - .sta_id = mvmsta->sta_id, + .sta_id = mvmsta->deflink.sta_id, .tid = tid, .frame_limit = buf_size, .aggregate = true, }; /* * When FW supports TLC_OFFLOAD, it also implements Tx aggregation * manager, so this function should never be called in this case. */ if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm))) return -EINVAL; BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) != IWL_MAX_TID_COUNT); spin_lock_bh(&mvmsta->lock); ssn = tid_data->ssn; queue = tid_data->txq_id; tid_data->state = IWL_AGG_ON; mvmsta->agg_tids |= BIT(tid); tid_data->ssn = 0xffff; tid_data->amsdu_in_ampdu_allowed = amsdu; spin_unlock_bh(&mvmsta->lock); if (iwl_mvm_has_new_tx_api(mvm)) { /* * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start() * would have failed, so if we are here there is no need to * allocate a queue. * However, if aggregation size is different than the default * size, the scheduler should be reconfigured. * We cannot do this with the new TX API, so return unsupported * for now, until it will be offloaded to firmware.. * Note that if SCD default value changes - this condition * should be updated as well. */ if (buf_size < IWL_FRAME_LIMIT) return -ENOTSUPP; ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) return -EIO; goto out; } cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; queue_status = mvm->queue_info[queue].status; /* Maybe there is no need to even alloc a queue... */ if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) alloc_queue = false; /* * Only reconfig the SCD for the queue if the window size has * changed from current (become smaller) */ if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) { /* * If reconfiguring an existing queue, it first must be * drained */ ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); if (ret) { IWL_ERR(mvm, "Error draining queue before reconfig\n"); return ret; } ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, - mvmsta->sta_id, tid, + mvmsta->deflink.sta_id, tid, buf_size, ssn); if (ret) { IWL_ERR(mvm, "Error reconfiguring TXQ #%d\n", queue); return ret; } } if (alloc_queue) iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); /* Send ADD_STA command to enable aggs only if the queue isn't shared */ if (queue_status != IWL_MVM_QUEUE_SHARED) { ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) return -EIO; } /* No need to mark as reserved */ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; out: /* * Even though in theory the peer could have different * aggregation reorder buffer sizes for different sessions, * our ucode doesn't allow for that and has a global limit * for each station. Therefore, use the minimum of all the * aggregation sessions and our default value. */ - mvmsta->max_agg_bufsize = - min(mvmsta->max_agg_bufsize, buf_size); - mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; + mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize = + min(mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize, + buf_size); + mvmsta->deflink.lq_sta.rs_drv.lq.agg_frame_cnt_limit = + mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize; IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", sta->addr, tid); - return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq); + return iwl_mvm_send_lq_cmd(mvm, &mvmsta->deflink.lq_sta.rs_drv.lq); } static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, struct iwl_mvm_tid_data *tid_data) { u16 txq_id = tid_data->txq_id; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_has_new_tx_api(mvm)) return; /* * The TXQ is marked as reserved only if no traffic came through yet * This means no traffic has been sent on this TID (agg'd or not), so * we no longer have use for the queue. Since it hasn't even been * allocated through iwl_mvm_enable_txq, so we can just mark it back as * free. */ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) { mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; tid_data->txq_id = IWL_MVM_INVALID_QUEUE; } } int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; u16 txq_id; int err; /* * If mac80211 is cleaning its state, then say that we finished since * our state has been cleared anyway. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); return 0; } spin_lock_bh(&mvmsta->lock); txq_id = tid_data->txq_id; IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", - mvmsta->sta_id, tid, txq_id, tid_data->state); + mvmsta->deflink.sta_id, tid, txq_id, + tid_data->state); mvmsta->agg_tids &= ~BIT(tid); iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); switch (tid_data->state) { case IWL_AGG_ON: tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); IWL_DEBUG_TX_QUEUES(mvm, "ssn = %d, next_recl = %d\n", tid_data->ssn, tid_data->next_reclaimed); tid_data->ssn = 0xffff; tid_data->state = IWL_AGG_OFF; spin_unlock_bh(&mvmsta->lock); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); return 0; case IWL_AGG_STARTING: case IWL_EMPTYING_HW_QUEUE_ADDBA: /* * The agg session has been stopped before it was set up. This * can happen when the AddBA timer times out for example. */ /* No barriers since we are under mutex */ lockdep_assert_held(&mvm->mutex); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); tid_data->state = IWL_AGG_OFF; err = 0; break; default: IWL_ERR(mvm, "Stopping AGG while state not ON or starting for %d on %d (%d)\n", - mvmsta->sta_id, tid, tid_data->state); + mvmsta->deflink.sta_id, tid, tid_data->state); IWL_ERR(mvm, "\ttid_data->txq_id = %d\n", tid_data->txq_id); err = -EINVAL; } spin_unlock_bh(&mvmsta->lock); return err; } int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; u16 txq_id; enum iwl_mvm_agg_state old_state; /* * First set the agg state to OFF to avoid calling * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty. */ spin_lock_bh(&mvmsta->lock); txq_id = tid_data->txq_id; IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", - mvmsta->sta_id, tid, txq_id, tid_data->state); + mvmsta->deflink.sta_id, tid, txq_id, + tid_data->state); old_state = tid_data->state; tid_data->state = IWL_AGG_OFF; mvmsta->agg_tids &= ~BIT(tid); spin_unlock_bh(&mvmsta->lock); iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); if (old_state >= IWL_AGG_ON) { iwl_mvm_drain_sta(mvm, mvmsta, true); if (iwl_mvm_has_new_tx_api(mvm)) { - if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id, + if (iwl_mvm_flush_sta_tids(mvm, mvmsta->deflink.sta_id, BIT(tid))) IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); iwl_trans_wait_txq_empty(mvm->trans, txq_id); } else { if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id))) IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id)); } iwl_mvm_drain_sta(mvm, mvmsta, false); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); } return 0; } static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) { int i, max = -1, max_offs = -1; lockdep_assert_held(&mvm->mutex); /* Pick the unused key offset with the highest 'deleted' * counter. Every time a key is deleted, all the counters * are incremented and the one that was just deleted is * reset to zero. Thus, the highest counter is the one * that was deleted longest ago. Pick that one. */ for (i = 0; i < STA_KEY_MAX_NUM; i++) { if (test_bit(i, mvm->fw_key_table)) continue; if (mvm->fw_key_deleted[i] > max) { max = mvm->fw_key_deleted[i]; max_offs = i; } } if (max_offs < 0) return STA_KEY_IDX_INVALID; return max_offs; } static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (sta) return iwl_mvm_sta_from_mac80211(sta); /* * The device expects GTKs for station interfaces to be * installed as GTKs for the AP station. If we have no * station ID, then use AP's station ID. */ if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { - u8 sta_id = mvmvif->ap_sta_id; + mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) { + u8 sta_id = mvmvif->deflink.ap_sta_id; sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); /* * It is possible that the 'sta' parameter is NULL, * for example when a GTK is removed - the sta_id will then * be the AP ID, and no station was passed by mac80211. */ if (IS_ERR_OR_NULL(sta)) return NULL; return iwl_mvm_sta_from_mac80211(sta); } return NULL; } static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len) { int i; for (i = len - 1; i >= 0; i--) { if (pn1[i] > pn2[i]) return 1; if (pn1[i] < pn2[i]) return -1; } return 0; } static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, u32 sta_id, struct ieee80211_key_conf *key, bool mcast, u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, u8 key_offset, bool mfp) { union { struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; struct iwl_mvm_add_sta_key_cmd cmd; } u = {}; __le16 key_flags; int ret; u32 status; u16 keyidx; u64 pn = 0; int i, size; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS); int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY, new_api ? 2 : 1); if (sta_id == IWL_MVM_INVALID_STA) return -EINVAL; keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK; key_flags = cpu_to_le16(keyidx); key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); if (api_ver >= 2) { memcpy((void *)&u.cmd.tx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], IWL_MIC_KEY_SIZE); memcpy((void *)&u.cmd.rx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], IWL_MIC_KEY_SIZE); pn = atomic64_read(&key->tx_pn); } else { u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32; for (i = 0; i < 5; i++) u.cmd_v1.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); } memcpy(u.cmd.common.key, key->key, key->keylen); break; case WLAN_CIPHER_SUITE_CCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); memcpy(u.cmd.common.key, key->key, key->keylen); if (api_ver >= 2) pn = atomic64_read(&key->tx_pn); break; case WLAN_CIPHER_SUITE_WEP104: key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); fallthrough; case WLAN_CIPHER_SUITE_WEP40: key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); memcpy(u.cmd.common.key + 3, key->key, key->keylen); break; case WLAN_CIPHER_SUITE_GCMP_256: key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); fallthrough; case WLAN_CIPHER_SUITE_GCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); memcpy(u.cmd.common.key, key->key, key->keylen); if (api_ver >= 2) pn = atomic64_read(&key->tx_pn); break; default: key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); memcpy(u.cmd.common.key, key->key, key->keylen); } if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); if (mfp) key_flags |= cpu_to_le16(STA_KEY_MFP); u.cmd.common.key_offset = key_offset; u.cmd.common.key_flags = key_flags; u.cmd.common.sta_id = sta_id; if (key->cipher == WLAN_CIPHER_SUITE_TKIP) i = 0; else i = -1; for (; i < IEEE80211_NUM_TIDS; i++) { struct ieee80211_key_seq seq = {}; u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn; int rx_pn_len = 8; /* there's a hole at 2/3 in FW format depending on version */ int hole = api_ver >= 3 ? 0 : 2; ieee80211_get_key_rx_seq(key, i, &seq); if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { rx_pn[0] = seq.tkip.iv16; rx_pn[1] = seq.tkip.iv16 >> 8; rx_pn[2 + hole] = seq.tkip.iv32; rx_pn[3 + hole] = seq.tkip.iv32 >> 8; rx_pn[4 + hole] = seq.tkip.iv32 >> 16; rx_pn[5 + hole] = seq.tkip.iv32 >> 24; } else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) { rx_pn = seq.hw.seq; rx_pn_len = seq.hw.seq_len; } else { rx_pn[0] = seq.ccmp.pn[0]; rx_pn[1] = seq.ccmp.pn[1]; rx_pn[2 + hole] = seq.ccmp.pn[2]; rx_pn[3 + hole] = seq.ccmp.pn[3]; rx_pn[4 + hole] = seq.ccmp.pn[4]; rx_pn[5 + hole] = seq.ccmp.pn[5]; } if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt, rx_pn_len) > 0) memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn, rx_pn_len); } if (api_ver >= 2) { u.cmd.transmit_seq_cnt = cpu_to_le64(pn); size = sizeof(u.cmd); } else { size = sizeof(u.cmd_v1); } status = ADD_STA_SUCCESS; if (cmd_flags & CMD_ASYNC) ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size, &u.cmd); else ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, &status); switch (status) { case ADD_STA_SUCCESS: IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); break; default: ret = -EIO; IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); break; } return ret; } static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, struct ieee80211_key_conf *keyconf, u8 sta_id, bool remove_key) { struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; /* verify the key details match the required command's expectations */ if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || (keyconf->keyidx != 4 && keyconf->keyidx != 5 && keyconf->keyidx != 6 && keyconf->keyidx != 7) || (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC && keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 && keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256))) return -EINVAL; if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) && keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC)) return -EINVAL; igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); igtk_cmd.sta_id = cpu_to_le32(sta_id); if (remove_key) { /* This is a valid situation for IGTK */ if (sta_id == IWL_MVM_INVALID_STA) return 0; igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); } else { struct ieee80211_key_seq seq; const u8 *pn; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_AES_CMAC: igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP); break; default: return -EINVAL; } memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen); if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_KEY_32BYTES); ieee80211_get_key_rx_seq(keyconf, 0, &seq); pn = seq.aes_cmac.pn; igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | ((u64) pn[4] << 8) | ((u64) pn[3] << 16) | ((u64) pn[2] << 24) | ((u64) pn[1] << 32) | ((u64) pn[0] << 40)); } IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n", remove_key ? "removing" : "installing", keyconf->keyidx >= 6 ? "B" : "", keyconf->keyidx, igtk_cmd.sta_id); if (!iwl_mvm_has_new_rx_api(mvm)) { struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = { .ctrl_flags = igtk_cmd.ctrl_flags, .key_id = igtk_cmd.key_id, .sta_id = igtk_cmd.sta_id, .receive_seq_cnt = igtk_cmd.receive_seq_cnt }; memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk, ARRAY_SIZE(igtk_cmd_v1.igtk)); return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, sizeof(igtk_cmd_v1), &igtk_cmd_v1); } return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, sizeof(igtk_cmd), &igtk_cmd); } static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (sta) return sta->addr; if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { - u8 sta_id = mvmvif->ap_sta_id; + mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) { + u8 sta_id = mvmvif->deflink.ap_sta_id; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) + return NULL; + return sta->addr; } return NULL; } static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf, u8 key_offset, bool mcast) { const u8 *addr; struct ieee80211_key_seq seq; u16 p1k[5]; u32 sta_id; bool mfp = false; if (sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - sta_id = mvm_sta->sta_id; + sta_id = mvm_sta->deflink.sta_id; mfp = sta->mfp; } else if (vif->type == NL80211_IFTYPE_AP && !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - sta_id = mvmvif->mcast_sta.sta_id; + sta_id = mvmvif->deflink.mcast_sta.sta_id; } else { IWL_ERR(mvm, "Failed to find station id\n"); return -EINVAL; } if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) { addr = iwl_mvm_get_mac_addr(mvm, vif, sta); + if (!addr) { + IWL_ERR(mvm, "Failed to find mac address\n"); + return -EINVAL; + } + /* get phase 1 key from mac80211 */ ieee80211_get_key_rx_seq(keyconf, 0, &seq); ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, seq.tkip.iv32, p1k, 0, key_offset, mfp); } return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 0, NULL, 0, key_offset, mfp); } int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf, u8 key_offset) { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); struct iwl_mvm_sta *mvm_sta; u8 sta_id = IWL_MVM_INVALID_STA; int ret; static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; lockdep_assert_held(&mvm->mutex); if (vif->type != NL80211_IFTYPE_AP || keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) { /* Get the station id from the mvm local station table */ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); if (!mvm_sta) { IWL_ERR(mvm, "Failed to find station\n"); return -EINVAL; } - sta_id = mvm_sta->sta_id; + sta_id = mvm_sta->deflink.sta_id; /* * It is possible that the 'sta' parameter is NULL, and thus * there is a need to retrieve the sta from the local station * table. */ if (!sta) { sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) { IWL_ERR(mvm, "Invalid station id\n"); return -EINVAL; } } if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) return -EINVAL; } else { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - sta_id = mvmvif->mcast_sta.sta_id; + sta_id = mvmvif->deflink.mcast_sta.sta_id; } if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); goto end; } /* If the key_offset is not pre-assigned, we need to find a * new offset to use. In normal cases, the offset is not * pre-assigned, but during HW_RESTART we want to reuse the * same indices, so we pass them when this function is called. * * In D3 entry, we need to hardcoded the indices (because the * firmware hardcodes the PTK offset to 0). In this case, we * need to make sure we don't overwrite the hw_key_idx in the * keyconf structure, because otherwise we cannot configure * the original ones back when resuming. */ if (key_offset == STA_KEY_IDX_INVALID) { key_offset = iwl_mvm_set_fw_key_idx(mvm); if (key_offset == STA_KEY_IDX_INVALID) return -ENOSPC; keyconf->hw_key_idx = key_offset; } ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); if (ret) goto end; /* * For WEP, the same key is used for multicast and unicast. Upload it * again, using the same key offset, and now pointing the other one * to the same key slot (offset). * If this fails, remove the original as well. */ if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) && sta) { ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, !mcast); if (ret) { __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); goto end; } } __set_bit(key_offset, mvm->fw_key_table); end: IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta ? sta->addr : zero_addr, ret); return ret; } int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf) { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); struct iwl_mvm_sta *mvm_sta; u8 sta_id = IWL_MVM_INVALID_STA; int ret, i; lockdep_assert_held(&mvm->mutex); /* Get the station from the mvm local station table */ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); if (mvm_sta) - sta_id = mvm_sta->sta_id; + sta_id = mvm_sta->deflink.sta_id; else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast) - sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id; + sta_id = iwl_mvm_vif_from_mac80211(vif)->deflink.mcast_sta.sta_id; IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { IWL_ERR(mvm, "offset %d not used in fw key table.\n", keyconf->hw_key_idx); return -ENOENT; } /* track which key was deleted last */ for (i = 0; i < STA_KEY_MAX_NUM; i++) { if (mvm->fw_key_deleted[i] < U8_MAX) mvm->fw_key_deleted[i]++; } mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; if (sta && !mvm_sta) { IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); return 0; } ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); if (ret) return ret; /* delete WEP key twice to get rid of (now useless) offset */ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); return ret; } void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) { struct iwl_mvm_sta *mvm_sta; bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); bool mfp = sta ? sta->mfp : false; rcu_read_lock(); mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); if (WARN_ON_ONCE(!mvm_sta)) goto unlock; - iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast, + iwl_mvm_send_sta_key(mvm, mvm_sta->deflink.sta_id, keyconf, mcast, iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx, mfp); unlock: rcu_read_unlock(); } void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, - .sta_id = mvmsta->sta_id, + .sta_id = mvmsta->deflink.sta_id, .station_flags_msk = cpu_to_le32(STA_FLG_PS), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), }; int ret; ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum ieee80211_frame_release_type reason, u16 cnt, u16 tids, bool more_data, bool single_sta_queue) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, - .sta_id = mvmsta->sta_id, + .sta_id = mvmsta->deflink.sta_id, .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, .sleep_tx_count = cpu_to_le16(cnt), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), }; int tid, ret; unsigned long _tids = tids; /* convert TIDs to ACs - we don't support TSPEC so that's OK * Note that this field is reserved and unused by firmware not * supporting GO uAPSD, so it's safe to always do this. */ for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); /* If we're releasing frames from aggregation or dqa queues then check * if all the queues that we're releasing frames from, combined, have: * - more frames than the service period, in which case more_data * needs to be set * - fewer than 'cnt' frames, in which case we need to adjust the * firmware command (but do that unconditionally) */ if (single_sta_queue) { int remaining = cnt; int sleep_tx_count; spin_lock_bh(&mvmsta->lock); for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) { struct iwl_mvm_tid_data *tid_data; u16 n_queued; tid_data = &mvmsta->tid_data[tid]; n_queued = iwl_mvm_tid_queued(mvm, tid_data); if (n_queued > remaining) { more_data = true; remaining = 0; break; } remaining -= n_queued; } sleep_tx_count = cnt - remaining; if (reason == IEEE80211_FRAME_RELEASE_UAPSD) mvmsta->sleep_tx_count = sleep_tx_count; spin_unlock_bh(&mvmsta->lock); cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count); if (WARN_ON(cnt - remaining == 0)) { ieee80211_sta_eosp(sta); return; } } /* Note: this is ignored by firmware not supporting GO uAPSD */ if (more_data) cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA; if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { mvmsta->next_status_eosp = true; cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL; } else { cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD; } /* block the Tx queues until the FW updated the sleep Tx count */ iwl_trans_block_txq_ptrs(mvm->trans, true); ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; struct ieee80211_sta *sta; u32 sta_id = le32_to_cpu(notif->sta_id); if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (!IS_ERR_OR_NULL(sta)) ieee80211_sta_eosp(sta); rcu_read_unlock(); } void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvmsta, bool disable) + struct iwl_mvm_sta *mvmsta, + bool disable) { struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, - .sta_id = mvmsta->sta_id, + .sta_id = mvmsta->deflink.sta_id, .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), }; int ret; + if (mvm->mld_api_is_used) { + iwl_mvm_mld_sta_modify_disable_tx(mvm, mvmsta, disable); + return; + } + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool disable) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + if (mvm->mld_api_is_used) { + iwl_mvm_mld_sta_modify_disable_tx_ap(mvm, sta, disable); + return; + } + spin_lock_bh(&mvm_sta->lock); if (mvm_sta->disable_tx == disable) { spin_unlock_bh(&mvm_sta->lock); return; } mvm_sta->disable_tx = disable; /* * If sta PS state is handled by mac80211, tell it to start/stop * queuing tx for this station. */ if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS)) ieee80211_sta_block_awake(mvm->hw, sta, disable); iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); spin_unlock_bh(&mvm_sta->lock); } static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, struct iwl_mvm_int_sta *sta, bool disable) { u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, .sta_id = sta->sta_id, .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), .mac_id_n_color = cpu_to_le32(id), }; int ret; ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, bool disable) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvm_sta; int i; + if (mvm->mld_api_is_used) { + iwl_mvm_mld_modify_all_sta_disable_tx(mvm, mvmvif, disable); + return; + } + rcu_read_lock(); /* Block/unblock all the stations of the given mvmvif */ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { sta = rcu_dereference(mvm->fw_id_to_mac_id[i]); if (IS_ERR_OR_NULL(sta)) continue; mvm_sta = iwl_mvm_sta_from_mac80211(sta); if (mvm_sta->mac_id_n_color != FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) continue; iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); } rcu_read_unlock(); if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) return; /* Need to block/unblock also multicast station */ - if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA) + if (mvmvif->deflink.mcast_sta.sta_id != IWL_MVM_INVALID_STA) iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, - &mvmvif->mcast_sta, disable); + &mvmvif->deflink.mcast_sta, + disable); /* * Only unblock the broadcast station (FW blocks it for immediate * quiet, not the driver) */ - if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA) + if (!disable && mvmvif->deflink.bcast_sta.sta_id != IWL_MVM_INVALID_STA) iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, - &mvmvif->bcast_sta, disable); + &mvmvif->deflink.bcast_sta, + disable); } void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta; rcu_read_lock(); - mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); + mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->deflink.ap_sta_id); if (mvmsta) iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); rcu_read_unlock(); } u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data) { u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); /* * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ if (mvm->trans->trans_cfg->gen2) sn &= 0xff; return ieee80211_sn_sub(sn, tid_data->next_reclaimed); } #if defined(__linux__) int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, u8 *key, u32 key_len) { int ret; u16 queue; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_key_conf *keyconf; + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, vif, false, false); + bool mld = iwl_mvm_has_mld_api(mvm->fw); + u32 type = mld ? STATION_TYPE_PEER : IWL_STA_LINK; ret = iwl_mvm_allocate_int_sta(mvm, sta, 0, - NL80211_IFTYPE_UNSPECIFIED, - IWL_STA_LINK); + NL80211_IFTYPE_UNSPECIFIED, type); if (ret) return ret; - ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color, - addr, sta, &queue, - IWL_MVM_TX_FIFO_BE); + if (mld) + ret = iwl_mvm_mld_add_int_sta_with_queue(mvm, sta, addr, + mvmvif->deflink.fw_link_id, + &queue, + IWL_MAX_TID_COUNT, + &wdg_timeout); + else + ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, + mvmvif->color, addr, sta, + &queue, + IWL_MVM_TX_FIFO_BE); if (ret) goto out; keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL); if (!keyconf) { ret = -ENOBUFS; goto out; } keyconf->cipher = cipher; memcpy(keyconf->key, key, key_len); keyconf->keylen = key_len; + keyconf->flags = IEEE80211_KEY_FLAG_PAIRWISE; + + if (mld) { + /* The MFP flag is set according to the station mfp field. Since + * we don't have a station, set it manually. + */ + u32 key_flags = + iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) | + IWL_SEC_KEY_FLAG_MFP; + u32 sta_mask = BIT(sta->sta_id); + + ret = iwl_mvm_mld_send_key(mvm, sta_mask, key_flags, keyconf); + } else { + ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false, + 0, NULL, 0, 0, true); + } - ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false, - 0, NULL, 0, 0, true); kfree(keyconf); return 0; out: iwl_mvm_dealloc_int_sta(mvm, sta); return ret; } #endif void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 mac_id) + u32 id) { struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = { - .mac_id = cpu_to_le32(mac_id), + .id = cpu_to_le32(id), }; int ret; ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD), CMD_ASYNC, sizeof(cancel_channel_switch_cmd), &cancel_channel_switch_cmd); if (ret) IWL_ERR(mvm, "Failed to cancel the channel switch\n"); } diff --git a/sys/contrib/dev/iwlwifi/mvm/sta.h b/sys/contrib/dev/iwlwifi/mvm/sta.h index d72f52230f22..637f42fcb753 100644 --- a/sys/contrib/dev/iwlwifi/mvm/sta.h +++ b/sys/contrib/dev/iwlwifi/mvm/sta.h @@ -1,556 +1,670 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2016 Intel Deutschland GmbH */ #ifndef __sta_h__ #define __sta_h__ #include #include #include #include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */ #include "fw-api.h" /* IWL_MVM_STATION_COUNT_MAX */ #include "rs.h" struct iwl_mvm; struct iwl_mvm_vif; /** * DOC: DQA - Dynamic Queue Allocation -introduction * * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi * driver to allow dynamic allocation of queues on-demand, rather than allocate * them statically ahead of time. Ideally, we would like to allocate one queue * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2 * even if it also needs to send traffic to a sleeping STA1, without being * blocked by the sleeping station. * * Although the queues in DQA mode are dynamically allocated, there are still * some queues that are statically allocated: * TXQ #0 - command queue * TXQ #1 - aux frames * TXQ #2 - P2P device frames * TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames * TXQ #4 - BSS DATA frames queue * TXQ #5-8 - Non-QoS and MGMT frames queue pool * TXQ #9 - P2P GO/SoftAP probe responses * TXQ #10-31 - DATA frames queue pool * The queues are dynamically taken from either the MGMT frames queue pool or * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every * queue. * * When a frame for a previously unseen RA/TID comes in, it needs to be deferred * until a queue is allocated for it, and only then can be TXed. Therefore, it * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames. * * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT * queues in the pool. If there is no longer a free MGMT queue to allocate, a * queue will be allocated from the DATA pool instead. Since QoS NDPs can create * a problem for aggregations, they too will use a MGMT queue. * * When adding a STA, a DATA queue is reserved for it so that it can TX from * it. If no such free queue exists for reserving, the STA addition will fail. * * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a * new RA/TID comes in for an existing STA, one of the STA's queues will become * shared and will serve more than the single TID (but always for the same RA!). * * When a RA/TID needs to become aggregated, no new queue is required to be * allocated, only mark the queue as aggregated via the ADD_STA command. Note, * however, that a shared queue cannot be aggregated, and only after the other * TIDs become inactive and are removed - only then can the queue be * reconfigured and become aggregated. * * When removing a station, its queues are returned to the pool for reuse. Here * we also need to make sure that we are synced with the worker thread that TXes * the deferred frames so we don't get into a situation where the queues are * removed and then the worker puts deferred frames onto the released queues or * tries to allocate new queues for a STA we don't need anymore. */ /** * DOC: station table - introduction * * The station table is a list of data structure that reprensent the stations. * In STA/P2P client mode, the driver will hold one station for the AP/ GO. * In GO/AP mode, the driver will have as many stations as associated clients. * All these stations are reflected in the fw's station table. The driver * keeps the fw's station table up to date with the ADD_STA command. Stations * can be removed by the REMOVE_STA command. * * All the data related to a station is held in the structure %iwl_mvm_sta * which is embed in the mac80211's %ieee80211_sta (in the drv_priv) area. * This data includes the index of the station in the fw, per tid information * (sequence numbers, Block-ack state machine, etc...). The stations are * created and deleted by the %sta_state callback from %ieee80211_ops. * * The driver holds a map: %fw_id_to_mac_id that allows to fetch a * %ieee80211_sta (and the %iwl_mvm_sta embedded into it) based on a fw * station index. That way, the driver is able to get the tid related data in * O(1) in time sensitive paths (Tx / Tx response / BA notification). These * paths are triggered by the fw, and the driver needs to get a pointer to the * %ieee80211 structure. This map helps to get that pointer quickly. */ /** * DOC: station table - locking * * As stated before, the station is created / deleted by mac80211's %sta_state * callback from %ieee80211_ops which can sleep. The next paragraph explains * the locking of a single stations, the next ones relates to the station * table. * * The station holds the sequence number per tid. So this data needs to be * accessed in the Tx path (which is softIRQ). It also holds the Block-Ack * information (the state machine / and the logic that checks if the queues * were drained), so it also needs to be accessible from the Tx response flow. * In short, the station needs to be access from sleepable context as well as * from tasklets, so the station itself needs a spinlock. * * The writers of %fw_id_to_mac_id map are serialized by the global mutex of * the mvm op_mode. This is possible since %sta_state can sleep. * The pointers in this map are RCU protected, hence we won't replace the * station while we have Tx / Tx response / BA notification running. * * If a station is deleted while it still has packets in its A-MPDU queues, * then the reclaim flow will notice that there is no station in the map for * sta_id and it will dump the responses. */ /** * DOC: station table - internal stations * * The FW needs a few internal stations that are not reflected in * mac80211, such as broadcast station in AP / GO mode, or AUX sta for * scanning and P2P device (during the GO negotiation). * For these kind of stations we have %iwl_mvm_int_sta struct which holds the * data relevant for them from both %iwl_mvm_sta and %ieee80211_sta. * Usually the data for these stations is static, so no locking is required, * and no TID data as this is also not needed. * One thing to note, is that these stations have an ID in the fw, but not * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of * pointers from this mapping need to check that the value is not error * or NULL. * * Currently there is only one auxiliary station for scanning, initialized * on init. */ /** * DOC: station table - AP Station in STA mode * * %iwl_mvm_vif includes the index of the AP station in the fw's STA table: * %ap_sta_id. To get the point to the corresponding %ieee80211_sta, * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove * the AP station from the fw before setting the MAC context as unassociated. * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is * removed by mac80211, but the station won't be removed in the fw until the * VIF is set as unassociated. Then, %ap_sta_id will be invalidated. */ /** * DOC: station table - Drain vs. Flush * * Flush means that all the frames in the SCD queue are dumped regardless the * station to which they were sent. We do that when we disassociate and before * we remove the STA of the AP. The flush can be done synchronously against the * fw. * Drain means that the fw will drop all the frames sent to a specific station. * This is useful when a client (if we are IBSS / GO or AP) disassociates. */ /** * DOC: station table - fw restart * * When the fw asserts, or we have any other issue that requires to reset the * driver, we require mac80211 to reconfigure the driver. Since the private * data of the stations is embed in mac80211's %ieee80211_sta, that data will * not be zeroed and needs to be reinitialized manually. * %IWL_MVM_STATUS_IN_HW_RESTART is set during restart and that will hint us * that we must not allocate a new sta_id but reuse the previous one. This * means that the stations being re-added after the reset will have the same * place in the fw as before the reset. We do need to zero the %fw_id_to_mac_id * map, since the stations aren't in the fw any more. Internal stations that * are not added by mac80211 will be re-added in the init flow that is called * after the restart: mac80211 call's %iwl_mvm_mac_start which calls to * %iwl_mvm_up. */ /** * DOC: AP mode - PS * * When a station is asleep, the fw will set it as "asleep". All frames on * shared queues (i.e. non-aggregation queues) to that station will be dropped * by the fw (%TX_STATUS_FAIL_DEST_PS failure code). * * AMPDUs are in a separate queue that is stopped by the fw. We just need to * let mac80211 know when there are frames in these queues so that it can * properly handle trigger frames. * * When a trigger frame is received, mac80211 tells the driver to send frames * from the AMPDU queues or sends frames to non-aggregation queues itself, * depending on which ACs are delivery-enabled and what TID has frames to * transmit. Note that mac80211 has all the knowledge since all the non-agg * frames are buffered / filtered, and the driver tells mac80211 about agg * frames). The driver needs to tell the fw to let frames out even if the * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count. * * When we receive a frame from that station with PM bit unset, the driver * needs to let the fw know that this station isn't asleep any more. This is * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signaling the * station's wakeup. * * For a GO, the Service Period might be cut short due to an absence period * of the GO. In this (and all other cases) the firmware notifies us with the * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we * already sent to the device will be rejected again. * * See also "AP support for powersaving clients" in mac80211.h. */ /** * enum iwl_mvm_agg_state * * The state machine of the BA agreement establishment / tear down. * These states relate to a specific RA / TID. * * @IWL_AGG_OFF: aggregation is not used * @IWL_AGG_QUEUED: aggregation start work has been queued * @IWL_AGG_STARTING: aggregation are starting (between start and oper) * @IWL_AGG_ON: aggregation session is up * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the * HW queue to be empty from packets for this RA /TID. * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the * HW queue to be empty from packets for this RA /TID. */ enum iwl_mvm_agg_state { IWL_AGG_OFF = 0, IWL_AGG_QUEUED, IWL_AGG_STARTING, IWL_AGG_ON, IWL_EMPTYING_HW_QUEUE_ADDBA, IWL_EMPTYING_HW_QUEUE_DELBA, }; /** * struct iwl_mvm_tid_data - holds the states for each RA / TID * @seq_number: the next WiFi sequence number to use * @next_reclaimed: the WiFi sequence number of the next packet to be acked. * This is basically (last acked packet++). * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). * @lq_color: the color of the LQ command as it appears in tx response. * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. * @state: state of the BA agreement establishment / tear down. * @txq_id: Tx queue used by the BA session / DQA * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or * the first packet to be sent in legacy HW queue in Tx AGG stop flow. * Basically when next_reclaimed reaches ssn, we can tell mac80211 that * we are ready to finish the Tx AGG stop / start flow. * @tx_time: medium time consumed by this A-MPDU * @tpt_meas_start: time of the throughput measurements start, is reset every HZ * @tx_count_last: number of frames transmitted during the last second * @tx_count: counts the number of frames transmitted since the last reset of * tpt_meas_start */ struct iwl_mvm_tid_data { u16 seq_number; u16 next_reclaimed; /* The rest is Tx AGG related */ u32 rate_n_flags; u8 lq_color; bool amsdu_in_ampdu_allowed; enum iwl_mvm_agg_state state; u16 txq_id; u16 ssn; u16 tx_time; unsigned long tpt_meas_start; u32 tx_count_last; u32 tx_count; }; struct iwl_mvm_key_pn { struct rcu_head rcu_head; struct { u8 pn[IWL_MAX_TID_COUNT][IEEE80211_CCMP_PN_LEN]; } ____cacheline_aligned_in_smp q[]; }; /** * enum iwl_mvm_rxq_notif_type - Internal message identifier * * @IWL_MVM_RXQ_EMPTY: empty sync notification * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN */ enum iwl_mvm_rxq_notif_type { IWL_MVM_RXQ_EMPTY, IWL_MVM_RXQ_NOTIF_DEL_BA, IWL_MVM_RXQ_NSSN_SYNC, }; /** * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent * in &iwl_rxq_sync_cmd. Should be DWORD aligned. * FW is agnostic to the payload, so there are no endianity requirements. * * @type: value from &iwl_mvm_rxq_notif_type * @sync: ctrl path is waiting for all notifications to be received * @cookie: internal cookie to identify old notifications * @data: payload */ struct iwl_mvm_internal_rxq_notif { u16 type; u16 sync; u32 cookie; u8 data[]; } __packed; struct iwl_mvm_delba_data { u32 baid; } __packed; struct iwl_mvm_nssn_sync_data { u32 baid; u32 nssn; } __packed; /** * struct iwl_mvm_rxq_dup_data - per station per rx queue data * @last_seq: last sequence per tid for duplicate packet detection * @last_sub_frame: last subframe packet */ struct iwl_mvm_rxq_dup_data { __le16 last_seq[IWL_MAX_TID_COUNT + 1]; u8 last_sub_frame[IWL_MAX_TID_COUNT + 1]; } ____cacheline_aligned_in_smp; +/** + * struct iwl_mvm_link_sta - link specific parameters of a station + * @rcu_head: used for freeing the data + * @sta_id: the index of the station in the fw + * @lq_sta: holds rate scaling data, either for the case when RS is done in + * the driver - %rs_drv or in the FW - %rs_fw. + * @orig_amsdu_len: used to save the original amsdu_len when it is changed via + * debugfs. If it's set to 0, it means that it is it's not set via + * debugfs. + * @avg_energy: energy as reported by FW statistics notification + */ +struct iwl_mvm_link_sta { + struct rcu_head rcu_head; + u32 sta_id; + union { + struct iwl_lq_sta_rs_fw rs_fw; + struct iwl_lq_sta rs_drv; + } lq_sta; + + u16 orig_amsdu_len; + + u8 avg_energy; +}; + /** * struct iwl_mvm_sta - representation of a station in the driver - * @sta_id: the index of the station in the fw (will be replaced by id_n_color) * @tfd_queue_msk: the tfd queues used by the station * @mac_id_n_color: the MAC context this station is linked to * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for * tid. - * @max_agg_bufsize: the maximal size of the AGG buffer for this station * @sta_type: station type + * @authorized: indicates station is authorized * @sta_state: station state according to enum %ieee80211_sta_state * @bt_reduced_txpower: is reduced tx power enabled for this station * @next_status_eosp: the next reclaimed packet is a PS-Poll response and * we need to signal the EOSP * @lock: lock to protect the whole struct. Since %tid_data is access from Tx * and from Tx response flow, it needs a spinlock. * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data. * @tid_to_baid: a simple map of TID to baid - * @lq_sta: holds rate scaling data, either for the case when RS is done in - * the driver - %rs_drv or in the FW - %rs_fw. + * @vif: a vif pointer * @reserved_queue: the queue reserved for this STA for DQA purposes * Every STA has is given one reserved queue to allow it to operate. If no * such queue can be guaranteed, the STA addition will fail. * @tx_protection: reference counter for controlling the Tx protection. * @tt_tx_protection: is thermal throttling enable Tx protection? * @disable_tx: is tx to this STA disabled? * @amsdu_enabled: bitmap of TX AMSDU allowed TIDs. * In case TLC offload is not active it is either 0xFFFF or 0. * @max_amsdu_len: max AMSDU length - * @orig_amsdu_len: used to save the original amsdu_len when it is changed via - * debugfs. If it's set to 0, it means that it is it's not set via - * debugfs. * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) + * @sleeping: sta sleep transitions in power management * @sleep_tx_count: the number of frames that we told the firmware to let out * even when that station is asleep. This is useful in case the queue * gets empty before all the frames were sent, which can happen when * we are sending frames from an AMPDU queue and there was a hole in * the BA window. To be used for UAPSD only. * @ptk_pn: per-queue PTK PN data structures * @dup_data: per queue duplicate packet detection data * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID * @tx_ant: the index of the antenna to use for data tx to this station. Only * used during connection establishment (e.g. for the 4 way handshake * exchange). * @pairwise_cipher: used to feed iwlmei upon authorization + * @deflink: the default link station, for non-MLO STA, all link specific data + * is accessed via deflink (or link[0]). For MLO, it will hold data of the + * first added link STA. + * @link: per link sta entries. For non-MLO only link[0] holds data. For MLO, + * link[0] points to deflink and link[link_id] is allocated when new link + * sta is added. * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that * space. * */ struct iwl_mvm_sta { - u32 sta_id; u32 tfd_queue_msk; u32 mac_id_n_color; u16 tid_disable_agg; - u16 max_agg_bufsize; - enum iwl_sta_type sta_type; + u8 sta_type; enum ieee80211_sta_state sta_state; bool bt_reduced_txpower; bool next_status_eosp; + bool authorized; spinlock_t lock; struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1]; u8 tid_to_baid[IWL_MAX_TID_COUNT]; - union { - struct iwl_lq_sta_rs_fw rs_fw; - struct iwl_lq_sta rs_drv; - } lq_sta; struct ieee80211_vif *vif; struct iwl_mvm_key_pn __rcu *ptk_pn[4]; struct iwl_mvm_rxq_dup_data *dup_data; u8 reserved_queue; /* Temporary, until the new TLC will control the Tx protection */ s8 tx_protection; bool tt_tx_protection; bool disable_tx; u16 amsdu_enabled; u16 max_amsdu_len; - u16 orig_amsdu_len; bool sleeping; u8 agg_tids; u8 sleep_tx_count; - u8 avg_energy; u8 tx_ant; u32 pairwise_cipher; + + struct iwl_mvm_link_sta deflink; + struct iwl_mvm_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; }; u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data); static inline struct iwl_mvm_sta * iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta) { return (void *)sta->drv_priv; } /** * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or * broadcast) * @sta_id: the index of the station in the fw (will be replaced by id_n_color) * @type: station type * @tfd_queue_msk: the tfd queues used by the station */ struct iwl_mvm_int_sta { u32 sta_id; - enum iwl_sta_type type; + u8 type; u32 tfd_queue_msk; }; /** * Send the STA info to the FW. * * @mvm: the iwl_mvm* to use * @sta: the STA * @update: this is true if the FW is being updated about a STA it already knows * about. Otherwise (if this is a new STA), this should be false. * @flags: if update==true, this marks what is being changed via ORs of values * from enum iwl_sta_modify_flag. Otherwise, this is ignored. */ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool update, unsigned int flags); +int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype); +int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, int sta_id, u8 sta_type); int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); static inline int iwl_mvm_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0); } +void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, + struct ieee80211_sta *sta); int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta); +bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_link_sta *link_sta, int *ret); int iwl_mvm_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 sta_id); int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf, u8 key_offset); int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf); void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* AMPDU */ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u16 ssn, bool start, u16 buf_size, u16 timeout); int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn); int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 buf_size, bool amsdu); int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u8 queue, bool start); int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id); int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm); int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, u32 qmask, enum nl80211_iftype iftype, - enum iwl_sta_type type); + u8 type); void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta); int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm); void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, struct ieee80211_sta *sta); void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum ieee80211_frame_release_type reason, u16 cnt, u16 tids, bool more_data, bool single_sta_queue); int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool drain); void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool disable); void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool disable); void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, bool disable); + void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); #if defined(__linux__) int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, u8 *key, u32 key_len); #endif void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 mac_id); + u32 id); +/* Queues */ +int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + u8 sta_id, u8 tid, unsigned int timeout); + +/* Sta state */ +/** + * struct iwl_mvm_sta_state_ops - callbacks for the sta_state() ops + * + * Since the only difference between both MLD and + * non-MLD versions of sta_state() is these function calls, + * each version will send its specific function calls to + * %iwl_mvm_mac_sta_state_common(). + * + * @add_sta: pointer to the function that adds a new sta + * @update_sta: pointer to the function that updates a sta + * @rm_sta: pointer to the functions that removes a sta + * @mac_ctxt_changed: pointer to the function that handles a change in mac ctxt + */ +struct iwl_mvm_sta_state_ops { + int (*add_sta)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + int (*update_sta)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + int (*rm_sta)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + int (*mac_ctxt_changed)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool force_assoc_off); +}; + +int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state, + const struct iwl_mvm_sta_state_ops *callbacks); + +/* New MLD STA related APIs */ +/* STA */ +int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); +int iwl_mvm_mld_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); +int iwl_mvm_mld_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); +int iwl_mvm_mld_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id); +int iwl_mvm_mld_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); +int iwl_mvm_mld_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_mld_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); +int iwl_mvm_mld_rm_aux_sta(struct iwl_mvm *mvm); +int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id); +int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u16 old_links, u16 new_links); +u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int filter_link_id); +int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + const u8 *addr, int link_id, + u16 *queue, u8 tid, + unsigned int *_wdg_timeout); + +/* Queues */ +void iwl_mvm_mld_modify_all_sta_disable_tx(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + bool disable); +void iwl_mvm_mld_sta_modify_disable_tx(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta, + bool disable); +void iwl_mvm_mld_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + bool disable); #endif /* __sta_h__ */ diff --git a/sys/contrib/dev/iwlwifi/mvm/tdls.c b/sys/contrib/dev/iwlwifi/mvm/tdls.c index 9cf587db3d48..d9cb1c9f3299 100644 --- a/sys/contrib/dev/iwlwifi/mvm/tdls.c +++ b/sys/contrib/dev/iwlwifi/mvm/tdls.c @@ -1,680 +1,680 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2020, 2022 Intel Corporation */ #if defined(__FreeBSD__) #include #endif #include #include "mvm.h" #include "time-event.h" #include "iwl-io.h" #include "iwl-prph.h" #define TU_TO_US(x) (x * 1024) #define TU_TO_MS(x) (TU_TO_US(x) / 1000) void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; int i; lockdep_assert_held(&mvm->mutex); for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (!sta || IS_ERR(sta) || !sta->tdls) continue; mvmsta = iwl_mvm_sta_from_mac80211(sta); ieee80211_tdls_oper_request(mvmsta->vif, sta->addr, NL80211_TDLS_TEARDOWN, WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED, GFP_KERNEL); } } int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; int count = 0; int i; lockdep_assert_held(&mvm->mutex); for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (!sta || IS_ERR(sta) || !sta->tdls) continue; if (vif) { mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->vif != vif) continue; } count++; } return count; } static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_rx_packet *pkt; struct iwl_tdls_config_res *resp; struct iwl_tdls_config_cmd tdls_cfg_cmd = {}; struct iwl_host_cmd cmd = { .id = TDLS_CONFIG_CMD, .flags = CMD_WANT_SKB, .data = { &tdls_cfg_cmd, }, .len = { sizeof(struct iwl_tdls_config_cmd), }, }; struct ieee80211_sta *sta; int ret, i, cnt; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); lockdep_assert_held(&mvm->mutex); tdls_cfg_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID; tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */ /* for now the Tx cmd is empty and unused */ /* populate TDLS peer data */ cnt = 0; for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta) || !sta->tdls) continue; tdls_cfg_cmd.sta_info[cnt].sta_id = i; tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid = IWL_MVM_TDLS_FW_TID; tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0); tdls_cfg_cmd.sta_info[cnt].is_initiator = cpu_to_le32(sta->tdls_initiator ? 1 : 0); cnt++; } tdls_cfg_cmd.tdls_peer_count = cnt; IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt); ret = iwl_mvm_send_cmd(mvm, &cmd); if (WARN_ON_ONCE(ret)) return; pkt = cmd.resp_pkt; WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)); /* we don't really care about the response at this point */ iwl_free_resp(&cmd); } void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool sta_added) { int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif); /* when the first peer joins, send a power update first */ if (tdls_sta_cnt == 1 && sta_added) iwl_mvm_power_update_mac(mvm); /* Configure the FW with TDLS peer info only if TDLS channel switch * capability is set. * TDLS config data is used currently only in TDLS channel switch code. * Supposed to serve also TDLS buffer station which is not implemneted * yet in FW*/ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) iwl_mvm_tdls_config(mvm, vif); /* when the last peer leaves, send a power update last */ if (tdls_sta_cnt == 0 && !sta_added) iwl_mvm_power_update_mac(mvm); } void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; /* Protect the session to hear the TDLS setup response on the channel */ mutex_lock(&mvm->mutex); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) iwl_mvm_schedule_session_protection(mvm, vif, duration, duration, true); else iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true); mutex_unlock(&mvm->mutex); } static const char * iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state) { switch (state) { case IWL_MVM_TDLS_SW_IDLE: return "IDLE"; case IWL_MVM_TDLS_SW_REQ_SENT: return "REQ SENT"; case IWL_MVM_TDLS_SW_RESP_RCVD: return "RESP RECEIVED"; case IWL_MVM_TDLS_SW_REQ_RCVD: return "REQ RECEIVED"; case IWL_MVM_TDLS_SW_ACTIVE: return "ACTIVE"; } return NULL; } static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm, enum iwl_mvm_tdls_cs_state state) { if (mvm->tdls_cs.state == state) return; IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n", iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state), iwl_mvm_tdls_cs_state_str(state)); mvm->tdls_cs.state = state; /* we only send requests to our switching peer - update sent time */ if (state == IWL_MVM_TDLS_SW_REQ_SENT) mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm); if (state == IWL_MVM_TDLS_SW_IDLE) mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA; } void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data; struct ieee80211_sta *sta; unsigned int delay; struct iwl_mvm_sta *mvmsta; struct ieee80211_vif *vif; u32 sta_id = le32_to_cpu(notif->sta_id); lockdep_assert_held(&mvm->mutex); /* can fail sometimes */ if (!le32_to_cpu(notif->status)) { iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); return; } if (WARN_ON(sta_id >= mvm->fw->ucode_capa.num_stations)) return; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); /* the station may not be here, but if it is, it must be a TDLS peer */ if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls)) return; mvmsta = iwl_mvm_sta_from_mac80211(sta); vif = mvmsta->vif; /* * Update state and possibly switch again after this is over (DTIM). * Also convert TU to msec. */ delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, msecs_to_jiffies(delay)); iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE); } static int iwl_mvm_tdls_check_action(struct iwl_mvm *mvm, enum iwl_tdls_channel_switch_type type, const u8 *peer, bool peer_initiator, u32 timestamp) { bool same_peer = false; int ret = 0; /* get the existing peer if it's there */ if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE && mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) { struct ieee80211_sta *sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], lockdep_is_held(&mvm->mutex)); if (!IS_ERR_OR_NULL(sta)) same_peer = ether_addr_equal(peer, sta->addr); } switch (mvm->tdls_cs.state) { case IWL_MVM_TDLS_SW_IDLE: /* * might be spurious packet from the peer after the switch is * already done */ if (type == TDLS_MOVE_CH) ret = -EINVAL; break; case IWL_MVM_TDLS_SW_REQ_SENT: /* only allow requests from the same peer */ if (!same_peer) ret = -EBUSY; else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH && !peer_initiator) /* * We received a ch-switch request while an outgoing * one is pending. Allow it if the peer is the link * initiator. */ ret = -EBUSY; else if (type == TDLS_SEND_CHAN_SW_REQ) /* wait for idle before sending another request */ ret = -EBUSY; else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp) /* we got a stale response - ignore it */ ret = -EINVAL; break; case IWL_MVM_TDLS_SW_RESP_RCVD: /* * we are waiting for the FW to give an "active" notification, * so ignore requests in the meantime */ ret = -EBUSY; break; case IWL_MVM_TDLS_SW_REQ_RCVD: /* as above, allow the link initiator to proceed */ if (type == TDLS_SEND_CHAN_SW_REQ) { if (!same_peer) ret = -EBUSY; else if (peer_initiator) /* they are the initiator */ ret = -EBUSY; } else if (type == TDLS_MOVE_CH) { ret = -EINVAL; } break; case IWL_MVM_TDLS_SW_ACTIVE: /* * the only valid request when active is a request to return * to the base channel by the current off-channel peer */ if (type != TDLS_MOVE_CH || !same_peer) ret = -EBUSY; break; } if (ret) IWL_DEBUG_TDLS(mvm, "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n", type, mvm->tdls_cs.state, peer, same_peer, peer_initiator); return ret; } static int iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_tdls_channel_switch_type type, const u8 *peer, bool peer_initiator, u8 oper_class, struct cfg80211_chan_def *chandef, u32 timestamp, u16 switch_time, u16 switch_timeout, struct sk_buff *skb, u32 ch_sw_tm_ie) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; struct ieee80211_tx_info *info; struct ieee80211_hdr *hdr; struct iwl_tdls_channel_switch_cmd cmd = {0}; struct iwl_tdls_channel_switch_cmd_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, &cmd.ci); u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm); int ret; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator, timestamp); if (ret) return ret; if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) { ret = -EINVAL; goto out; } cmd.switch_type = type; tail->timing.frame_timestamp = cpu_to_le32(timestamp); tail->timing.switch_time = cpu_to_le32(switch_time); tail->timing.switch_timeout = cpu_to_le32(switch_timeout); rcu_read_lock(); sta = ieee80211_find_sta(vif, peer); if (!sta) { rcu_read_unlock(); ret = -ENOENT; goto out; } mvmsta = iwl_mvm_sta_from_mac80211(sta); - cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id); + cmd.peer_sta_id = cpu_to_le32(mvmsta->deflink.sta_id); if (!chandef) { if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && mvm->tdls_cs.peer.chandef.chan) { /* actually moving to the channel */ chandef = &mvm->tdls_cs.peer.chandef; } else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE && type == TDLS_MOVE_CH) { /* we need to return to base channel */ struct ieee80211_chanctx_conf *chanctx = - rcu_dereference(vif->chanctx_conf); + rcu_dereference(vif->bss_conf.chanctx_conf); if (WARN_ON_ONCE(!chanctx)) { rcu_read_unlock(); goto out; } chandef = &chanctx->def; } } if (chandef) iwl_mvm_set_chan_info_chandef(mvm, &cmd.ci, chandef); /* keep quota calculation simple for now - 50% of DTIM for TDLS */ tail->timing.max_offchan_duration = cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int) / 2); /* Switch time is the first element in the switch-timing IE. */ tail->frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2); info = IEEE80211_SKB_CB(skb); hdr = (void *)skb->data; if (info->control.hw_key) { if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) { rcu_read_unlock(); ret = -EINVAL; goto out; } iwl_mvm_set_tx_cmd_ccmp(info, &tail->frame.tx_cmd); } iwl_mvm_set_tx_cmd(mvm, skb, &tail->frame.tx_cmd, info, - mvmsta->sta_id); + mvmsta->deflink.sta_id); iwl_mvm_set_tx_cmd_rate(mvm, &tail->frame.tx_cmd, info, sta, hdr->frame_control); rcu_read_unlock(); memcpy(tail->frame.data, skb->data, skb->len); ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, len, &cmd); if (ret) { IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n", ret); goto out; } /* channel switch has started, update state */ if (type != TDLS_MOVE_CH) { - mvm->tdls_cs.cur_sta_id = mvmsta->sta_id; + mvm->tdls_cs.cur_sta_id = mvmsta->deflink.sta_id; iwl_mvm_tdls_update_cs_state(mvm, type == TDLS_SEND_CHAN_SW_REQ ? IWL_MVM_TDLS_SW_REQ_SENT : IWL_MVM_TDLS_SW_REQ_RCVD); } else { iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD); } out: /* channel switch failed - we are idle */ if (ret) iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); return ret; } void iwl_mvm_tdls_ch_switch_work(struct work_struct *work) { struct iwl_mvm *mvm; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; struct ieee80211_vif *vif; unsigned int delay; int ret; mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work); mutex_lock(&mvm->mutex); /* called after an active channel switch has finished or timed-out */ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); /* station might be gone, in that case do nothing */ if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) goto out; sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id], lockdep_is_held(&mvm->mutex)); /* the station may not be here, but if it is, it must be a TDLS peer */ if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls)) goto out; mvmsta = iwl_mvm_sta_from_mac80211(sta); vif = mvmsta->vif; ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, TDLS_SEND_CHAN_SW_REQ, sta->addr, mvm->tdls_cs.peer.initiator, mvm->tdls_cs.peer.op_class, &mvm->tdls_cs.peer.chandef, 0, 0, 0, mvm->tdls_cs.peer.skb, mvm->tdls_cs.peer.ch_sw_tm_ie); if (ret) IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret); /* retry after a DTIM if we failed sending now */ delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay)); out: mutex_unlock(&mvm->mutex); } int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u8 oper_class, struct cfg80211_chan_def *chandef, struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvmsta; unsigned int delay; int ret; mutex_lock(&mvm->mutex); IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n", sta->addr, chandef->chan->center_freq, chandef->width); /* we only support a single peer for channel switching */ if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) { IWL_DEBUG_TDLS(mvm, "Existing peer. Can't start switch with %pM\n", sta->addr); ret = -EBUSY; goto out; } ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, TDLS_SEND_CHAN_SW_REQ, sta->addr, sta->tdls_initiator, oper_class, chandef, 0, 0, 0, tmpl_skb, ch_sw_tm_ie); if (ret) goto out; /* * Mark the peer as "in tdls switch" for this vif. We only allow a * single such peer per vif. */ mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL); if (!mvm->tdls_cs.peer.skb) { ret = -ENOMEM; goto out; } mvmsta = iwl_mvm_sta_from_mac80211(sta); - mvm->tdls_cs.peer.sta_id = mvmsta->sta_id; + mvm->tdls_cs.peer.sta_id = mvmsta->deflink.sta_id; mvm->tdls_cs.peer.chandef = *chandef; mvm->tdls_cs.peer.initiator = sta->tdls_initiator; mvm->tdls_cs.peer.op_class = oper_class; mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie; /* * Wait for 2 DTIM periods before attempting the next switch. The next * switch will be made sooner if the current one completes before that. */ delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, msecs_to_jiffies(delay)); out: mutex_unlock(&mvm->mutex); return ret; } void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_sta *cur_sta; bool wait_for_phy = false; mutex_lock(&mvm->mutex); IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr); /* we only support a single peer for channel switching */ if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) { IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr); goto out; } cur_sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id], lockdep_is_held(&mvm->mutex)); /* make sure it's the same peer */ if (cur_sta != sta) goto out; /* * If we're currently in a switch because of the now canceled peer, * wait a DTIM here to make sure the phy is back on the base channel. * We can't otherwise force it. */ if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id && mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE) wait_for_phy = true; mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; dev_kfree_skb(mvm->tdls_cs.peer.skb); mvm->tdls_cs.peer.skb = NULL; out: mutex_unlock(&mvm->mutex); /* make sure the phy is on the base channel */ if (wait_for_phy) #if defined(__linux__) msleep(TU_TO_MS(vif->bss_conf.dtim_period * #elif defined(__FreeBSD__) linux_msleep(TU_TO_MS(vif->bss_conf.dtim_period * #endif vif->bss_conf.beacon_int)); /* flush the channel switch state */ flush_delayed_work(&mvm->tdls_cs.dwork); IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr); } void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_tdls_ch_sw_params *params) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); enum iwl_tdls_channel_switch_type type; unsigned int delay; const char *action_str = params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ? "REQ" : "RESP"; mutex_lock(&mvm->mutex); IWL_DEBUG_TDLS(mvm, "Received TDLS ch switch action %s from %pM status %d\n", action_str, params->sta->addr, params->status); /* * we got a non-zero status from a peer we were switching to - move to * the idle state and retry again later */ if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE && params->status != 0 && mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) { struct ieee80211_sta *cur_sta; /* make sure it's the same peer */ cur_sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], lockdep_is_held(&mvm->mutex)); if (cur_sta == params->sta) { iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); goto retry; } } type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ? TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH; iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr, params->sta->tdls_initiator, 0, params->chandef, params->timestamp, params->switch_time, params->switch_timeout, params->tmpl_skb, params->ch_sw_tm_ie); retry: /* register a timeout in case we don't succeed in switching */ delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int * 1024 / 1000; mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, msecs_to_jiffies(delay)); mutex_unlock(&mvm->mutex); } diff --git a/sys/contrib/dev/iwlwifi/mvm/time-event.c b/sys/contrib/dev/iwlwifi/mvm/time-event.c index 6edf2b79db43..5f0e7144a951 100644 --- a/sys/contrib/dev/iwlwifi/mvm/time-event.c +++ b/sys/contrib/dev/iwlwifi/mvm/time-event.c @@ -1,1229 +1,1236 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH */ #include #include #include "fw/notif-wait.h" #include "iwl-trans.h" #include "fw-api.h" #include "time-event.h" #include "mvm.h" #include "iwl-io.h" #include "iwl-prph.h" /* * For the high priority TE use a time event type that has similar priority to * the FW's action scan priority. */ #define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data) { lockdep_assert_held(&mvm->time_event_lock); if (!te_data || !te_data->vif) return; list_del(&te_data->list); /* * the list is only used for AUX ROC events so make sure it is always * initialized */ INIT_LIST_HEAD(&te_data->list); te_data->running = false; te_data->uid = 0; te_data->id = TE_MAX; te_data->vif = NULL; } void iwl_mvm_roc_done_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk); /* * Clear the ROC_RUNNING status bit. * This will cause the TX path to drop offchannel transmissions. * That would also be done by mac80211, but it is racy, in particular * in the case that the time event actually completed in the firmware * (which is handled in iwl_mvm_te_handle_notif). */ clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); synchronize_net(); /* * Flush the offchannel queue -- this is called when the time * event finishes or is canceled, so that frames queued for it * won't get stuck on the queue and be transmitted in the next * time event. */ mutex_lock(&mvm->mutex); if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) { struct iwl_mvm_vif *mvmvif; /* * NB: access to this pointer would be racy, but the flush bit * can only be set when we had a P2P-Device VIF, and we have a * flush of this work in iwl_mvm_prepare_mac_removal() so it's * not really racy. */ if (!WARN_ON(!mvm->p2p_device_vif)) { mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif); - iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true); + iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta, + true); } } /* * Clear the ROC_AUX_RUNNING status bit. * This will cause the TX path to drop offchannel transmissions. * That would also be done by mac80211, but it is racy, in particular * in the case that the time event actually completed in the firmware * (which is handled in iwl_mvm_te_handle_notif). */ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) { /* do the same in case of hot spot 2.0 */ iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true); + if (mvm->mld_api_is_used) { + iwl_mvm_mld_rm_aux_sta(mvm); + goto out_unlock; + } + /* In newer version of this command an aux station is added only * in cases of dedicated tx queue and need to be removed in end * of use */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) + if (iwl_mvm_has_new_station_api(mvm->fw)) iwl_mvm_rm_aux_sta(mvm); } +out_unlock: mutex_unlock(&mvm->mutex); } static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) { /* * Of course, our status bit is just as racy as mac80211, so in * addition, fire off the work struct which will drop all frames * from the hardware queues that made it through the race. First * it will of course synchronize the TX path to make sure that * any *new* TX will be rejected. */ schedule_work(&mvm->roc_done_wk); } static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm) { struct ieee80211_vif *csa_vif; rcu_read_lock(); csa_vif = rcu_dereference(mvm->csa_vif); - if (!csa_vif || !csa_vif->csa_active) + if (!csa_vif || !csa_vif->bss_conf.csa_active) goto out_unlock; IWL_DEBUG_TE(mvm, "CSA NOA started\n"); /* * CSA NoA is started but we still have beacons to * transmit on the current channel. * So we just do nothing here and the switch * will be performed on the last TBTT. */ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) { IWL_WARN(mvm, "CSA NOA started too early\n"); goto out_unlock; } ieee80211_csa_finish(csa_vif); rcu_read_unlock(); RCU_INIT_POINTER(mvm->csa_vif, NULL); return; out_unlock: rcu_read_unlock(); } static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->type != NL80211_IFTYPE_STATION) return false; - if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc && + if (!mvmvif->csa_bcn_pending && vif->cfg.assoc && vif->bss_conf.dtim_period) return false; if (errmsg) IWL_ERR(mvm, "%s\n", errmsg); if (mvmvif->csa_bcn_pending) { struct iwl_mvm_sta *mvmsta; rcu_read_lock(); - mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); + mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, + mvmvif->deflink.ap_sta_id); if (!WARN_ON(!mvmsta)) iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); rcu_read_unlock(); } - if (vif->bss_conf.assoc) { + if (vif->cfg.assoc) { /* * When not associated, this will be called from * iwl_mvm_event_mlme_callback_ini() */ iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_ASSOC_FAILED, NULL); } iwl_mvm_connection_loss(mvm, vif, errmsg); return true; } static void iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data, struct iwl_time_event_notif *notif) { struct ieee80211_vif *vif = te_data->vif; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (!notif->status) IWL_DEBUG_TE(mvm, "CSA time event failed to start\n"); switch (te_data->vif->type) { case NL80211_IFTYPE_AP: if (!notif->status) mvmvif->csa_failed = true; iwl_mvm_csa_noa_start(mvm); break; case NL80211_IFTYPE_STATION: if (!notif->status) { iwl_mvm_connection_loss(mvm, vif, "CSA TE failed to start"); break; } iwl_mvm_csa_client_absent(mvm, te_data->vif); cancel_delayed_work(&mvmvif->csa_work); ieee80211_chswitch_done(te_data->vif, true); break; default: /* should never happen */ WARN_ON_ONCE(1); break; } /* we don't need it anymore */ iwl_mvm_te_clear_data(mvm, te_data); } static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, struct iwl_time_event_notif *notif, struct iwl_mvm_time_event_data *te_data) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_time_event *te_trig; int i; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(te_data->vif), FW_DBG_TRIGGER_TIME_EVENT); if (!trig) return; te_trig = (void *)trig->data; for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) { u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id); u32 trig_action_bitmap = le32_to_cpu(te_trig->time_events[i].action_bitmap); u32 trig_status_bitmap = le32_to_cpu(te_trig->time_events[i].status_bitmap); if (trig_te_id != te_data->id || !(trig_action_bitmap & le32_to_cpu(notif->action)) || !(trig_status_bitmap & BIT(le32_to_cpu(notif->status)))) continue; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "Time event %d Action 0x%x received status: %d", te_data->id, le32_to_cpu(notif->action), le32_to_cpu(notif->status)); break; } } static void iwl_mvm_p2p_roc_finished(struct iwl_mvm *mvm) { /* * If the IWL_MVM_STATUS_NEED_FLUSH_P2P is already set, then the * roc_done_wk is already scheduled or running, so don't schedule it * again to avoid a race where the roc_done_wk clears this bit after * it is set here, affecting the next run of the roc_done_wk. */ if (!test_and_set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) iwl_mvm_roc_finished(mvm); } /* * Handles a FW notification for an event that is known to the driver. * * @mvm: the mvm component * @te_data: the time event data * @notif: the notification data corresponding the time event data. */ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data, struct iwl_time_event_notif *notif) { lockdep_assert_held(&mvm->time_event_lock); IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n", le32_to_cpu(notif->unique_id), le32_to_cpu(notif->action)); iwl_mvm_te_check_trigger(mvm, notif, te_data); /* * The FW sends the start/end time event notifications even for events * that it fails to schedule. This is indicated in the status field of * the notification. This happens in cases that the scheduler cannot * find a schedule that can handle the event (for example requesting a * P2P Device discoveribility, while there are other higher priority * events in the system). */ if (!le32_to_cpu(notif->status)) { const char *msg; if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START)) msg = "Time Event start notification failure"; else msg = "Time Event end notification failure"; IWL_DEBUG_TE(mvm, "%s\n", msg); if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) { iwl_mvm_te_clear_data(mvm, te_data); return; } } if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) { IWL_DEBUG_TE(mvm, "TE ended - current time %lu, estimated end %lu\n", jiffies, te_data->end_jiffies); switch (te_data->vif->type) { case NL80211_IFTYPE_P2P_DEVICE: ieee80211_remain_on_channel_expired(mvm->hw); iwl_mvm_p2p_roc_finished(mvm); break; case NL80211_IFTYPE_STATION: /* * If we are switching channel, don't disconnect * if the time event is already done. Beacons can * be delayed a bit after the switch. */ if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) { IWL_DEBUG_TE(mvm, "No beacon heard and the CS time event is over, don't disconnect\n"); break; } /* * By now, we should have finished association * and know the dtim period. */ iwl_mvm_te_check_disconnect(mvm, te_data->vif, - !te_data->vif->bss_conf.assoc ? + !te_data->vif->cfg.assoc ? "Not associated and the time event is over already..." : "No beacon heard and the time event is over already..."); break; default: break; } iwl_mvm_te_clear_data(mvm, te_data); } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) { te_data->running = true; te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration); if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); ieee80211_ready_on_channel(mvm->hw); } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) { iwl_mvm_te_handle_notify_csa(mvm, te_data, notif); } } else { IWL_WARN(mvm, "Got TE with unknown action\n"); } } /* * Handle A Aux ROC time event */ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm, struct iwl_time_event_notif *notif) { - struct iwl_mvm_time_event_data *te_data, *tmp; - bool aux_roc_te = false; + struct iwl_mvm_time_event_data *aux_roc_te = NULL, *te_data; - list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) { + list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) { if (le32_to_cpu(notif->unique_id) == te_data->uid) { - aux_roc_te = true; + aux_roc_te = te_data; break; } } if (!aux_roc_te) /* Not a Aux ROC time event */ return -EINVAL; iwl_mvm_te_check_trigger(mvm, notif, te_data); IWL_DEBUG_TE(mvm, "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n", le32_to_cpu(notif->unique_id), le32_to_cpu(notif->action), le32_to_cpu(notif->status)); if (!le32_to_cpu(notif->status) || le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) { /* End TE, notify mac80211 */ ieee80211_remain_on_channel_expired(mvm->hw); iwl_mvm_roc_finished(mvm); /* flush aux queue */ list_del(&te_data->list); /* remove from list */ te_data->running = false; te_data->vif = NULL; te_data->uid = 0; te_data->id = TE_MAX; } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) { set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); te_data->running = true; ieee80211_ready_on_channel(mvm->hw); /* Start TE */ } else { IWL_DEBUG_TE(mvm, "ERROR: Unknown Aux ROC Time Event (action = %d)\n", le32_to_cpu(notif->action)); return -EINVAL; } return 0; } /* * The Rx handler for time event notifications */ void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_time_event_notif *notif = (void *)pkt->data; struct iwl_mvm_time_event_data *te_data, *tmp; IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n", le32_to_cpu(notif->unique_id), le32_to_cpu(notif->action)); spin_lock_bh(&mvm->time_event_lock); /* This time event is triggered for Aux ROC request */ if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif)) goto unlock; list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) { if (le32_to_cpu(notif->unique_id) == te_data->uid) iwl_mvm_te_handle_notif(mvm, te_data, notif); } unlock: spin_unlock_bh(&mvm->time_event_lock); } static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_mvm_time_event_data *te_data = data; struct iwl_time_event_notif *resp; int resp_len = iwl_rx_packet_payload_len(pkt); if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION)) return true; if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n"); return true; } resp = (void *)pkt->data; /* te_data->uid is already set in the TIME_EVENT_CMD response */ if (le32_to_cpu(resp->unique_id) != te_data->uid) return false; IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n", te_data->uid); if (!resp->status) IWL_ERR(mvm, "TIME_EVENT_NOTIFICATION received but not executed\n"); return true; } static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_mvm_time_event_data *te_data = data; struct iwl_time_event_resp *resp; int resp_len = iwl_rx_packet_payload_len(pkt); if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD)) return true; if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n"); return true; } resp = (void *)pkt->data; /* we should never get a response to another TIME_EVENT_CMD here */ if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id)) return false; te_data->uid = le32_to_cpu(resp->unique_id); IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", te_data->uid); return true; } static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_time_event_data *te_data, struct iwl_time_event_cmd *te_cmd) { static const u16 time_event_response[] = { TIME_EVENT_CMD }; struct iwl_notification_wait wait_time_event; int ret; lockdep_assert_held(&mvm->mutex); IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n", le32_to_cpu(te_cmd->duration)); spin_lock_bh(&mvm->time_event_lock); if (WARN_ON(te_data->id != TE_MAX)) { spin_unlock_bh(&mvm->time_event_lock); return -EIO; } te_data->vif = vif; te_data->duration = le32_to_cpu(te_cmd->duration); te_data->id = le32_to_cpu(te_cmd->id); list_add_tail(&te_data->list, &mvm->time_event_list); spin_unlock_bh(&mvm->time_event_lock); /* * Use a notification wait, which really just processes the * command response and doesn't wait for anything, in order * to be able to process the response and get the UID inside * the RX path. Using CMD_WANT_SKB doesn't work because it * stores the buffer and then wakes up this thread, by which * time another notification (that the time event started) * might already be processed unsuccessfully. */ iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, time_event_response, ARRAY_SIZE(time_event_response), iwl_mvm_time_event_response, te_data); ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, sizeof(*te_cmd), te_cmd); if (ret) { IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret); iwl_remove_notification(&mvm->notif_wait, &wait_time_event); goto out_clear_te; } /* No need to wait for anything, so just pass 1 (0 isn't valid) */ ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); /* should never fail */ WARN_ON_ONCE(ret); if (ret) { out_clear_te: spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, te_data); spin_unlock_bh(&mvm->time_event_lock); } return ret; } void iwl_mvm_protect_session(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration, u32 min_duration, u32 max_delay, bool wait_for_notif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION }; struct iwl_notification_wait wait_te_notif; struct iwl_time_event_cmd time_cmd = {}; lockdep_assert_held(&mvm->mutex); if (te_data->running && time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) { IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n", jiffies_to_msecs(te_data->end_jiffies - jiffies)); return; } if (te_data->running) { IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n", te_data->uid, jiffies_to_msecs(te_data->end_jiffies - jiffies)); /* * we don't have enough time * cancel the current TE and issue a new one * Of course it would be better to remove the old one only * when the new one is added, but we don't care if we are off * channel for a bit. All we need to do, is not to return * before we actually begin to be on the channel. */ iwl_mvm_stop_session_protection(mvm, vif); } time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC); time_cmd.apply_time = cpu_to_le32(0); time_cmd.max_frags = TE_V2_FRAG_NONE; time_cmd.max_delay = cpu_to_le32(max_delay); /* TODO: why do we need to interval = bi if it is not periodic? */ time_cmd.interval = cpu_to_le32(1); time_cmd.duration = cpu_to_le32(duration); time_cmd.repeat = 1; time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_NOTIF_HOST_EVENT_END | TE_V2_START_IMMEDIATELY); if (!wait_for_notif) { iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); return; } /* * Create notification_wait for the TIME_EVENT_NOTIFICATION to use * right after we send the time event */ iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif, te_notif_response, ARRAY_SIZE(te_notif_response), iwl_mvm_te_notif, te_data); /* If TE was sent OK - wait for the notification that started */ if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) { IWL_ERR(mvm, "Failed to add TE to protect session\n"); iwl_remove_notification(&mvm->notif_wait, &wait_te_notif); } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif, TU_TO_JIFFIES(max_delay))) { IWL_ERR(mvm, "Failed to protect session until TE\n"); } } static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, u32 id) { struct iwl_mvm_session_prot_cmd cmd = { .id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), .conf_id = cpu_to_le32(id), }; int ret; ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD), 0, sizeof(cmd), &cmd); if (ret) IWL_ERR(mvm, "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret); } static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data, u32 *uid) { u32 id; struct iwl_mvm_vif *mvmvif; enum nl80211_iftype iftype; if (!te_data->vif) return false; mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); iftype = te_data->vif->type; /* * It is possible that by the time we got to this point the time * event was already removed. */ spin_lock_bh(&mvm->time_event_lock); /* Save time event uid before clearing its data */ *uid = te_data->uid; id = te_data->id; /* * The clear_data function handles time events that were already removed */ iwl_mvm_te_clear_data(mvm, te_data); spin_unlock_bh(&mvm->time_event_lock); /* When session protection is used, the te_data->id field * is reused to save session protection's configuration. * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id field is set * to HOT_SPOT_CMD. */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) && id != HOT_SPOT_CMD) { if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) { /* Session protection is still ongoing. Cancel it */ iwl_mvm_cancel_session_protection(mvm, mvmvif, id); if (iftype == NL80211_IFTYPE_P2P_DEVICE) { iwl_mvm_p2p_roc_finished(mvm); } } return false; } else { /* It is possible that by the time we try to remove it, the * time event has already ended and removed. In such a case * there is no need to send a removal command. */ if (id == TE_MAX) { IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid); return false; } } return true; } /* * Explicit request to remove a aux roc time event. The removal of a time * event needs to be synchronized with the flow of a time event's end * notification, which also removes the time event from the op mode * data structures. */ static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, struct iwl_mvm_time_event_data *te_data) { struct iwl_hs20_roc_req aux_cmd = {}; u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm); u32 uid; int ret; if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) return; aux_cmd.event_unique_id = cpu_to_le32(uid); aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); aux_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n", le32_to_cpu(aux_cmd.event_unique_id)); ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len, &aux_cmd); if (WARN_ON(ret)) return; } /* * Explicit request to remove a time event. The removal of a time event needs to * be synchronized with the flow of a time event's end notification, which also * removes the time event from the op mode data structures. */ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, struct iwl_mvm_time_event_data *te_data) { struct iwl_time_event_cmd time_cmd = {}; u32 uid; int ret; if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) return; /* When we remove a TE, the UID is to be set in the id field */ time_cmd.id = cpu_to_le32(uid); time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id)); ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, sizeof(time_cmd), &time_cmd); if (ret) IWL_ERR(mvm, "Couldn't remove the time event\n"); } void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; u32 id; lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); id = te_data->id; spin_unlock_bh(&mvm->time_event_lock); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { if (id != SESSION_PROTECT_CONF_ASSOC) { IWL_DEBUG_TE(mvm, "don't remove session protection id=%u\n", id); return; } } else if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) { IWL_DEBUG_TE(mvm, "don't remove TE with id=%u (not session protection)\n", id); return; } iwl_mvm_remove_time_event(mvm, mvmvif, te_data); } void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data; struct ieee80211_vif *vif; struct iwl_mvm_vif *mvmvif; rcu_read_lock(); vif = iwl_mvm_rcu_dereference_vif_id(mvm, le32_to_cpu(notif->mac_id), true); if (!vif) goto out_unlock; mvmvif = iwl_mvm_vif_from_mac80211(vif); /* The vif is not a P2P_DEVICE, maintain its time_event_data */ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) { struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; if (!le32_to_cpu(notif->status)) { iwl_mvm_te_check_disconnect(mvm, vif, "Session protection failure"); spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, te_data); spin_unlock_bh(&mvm->time_event_lock); } if (le32_to_cpu(notif->start)) { spin_lock_bh(&mvm->time_event_lock); te_data->running = le32_to_cpu(notif->start); te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration); spin_unlock_bh(&mvm->time_event_lock); } else { /* * By now, we should have finished association * and know the dtim period. */ iwl_mvm_te_check_disconnect(mvm, vif, - !vif->bss_conf.assoc ? + !vif->cfg.assoc ? "Not associated and the session protection is over already..." : "No beacon heard and the session protection is over already..."); spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, te_data); spin_unlock_bh(&mvm->time_event_lock); } goto out_unlock; } if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) { /* End TE, notify mac80211 */ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID; ieee80211_remain_on_channel_expired(mvm->hw); iwl_mvm_p2p_roc_finished(mvm); } else if (le32_to_cpu(notif->start)) { if (WARN_ON(mvmvif->time_event_data.id != le32_to_cpu(notif->conf_id))) goto out_unlock; set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); ieee80211_ready_on_channel(mvm->hw); /* Start TE */ } out_unlock: rcu_read_unlock(); } static int iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int duration, enum ieee80211_roc_type type) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_session_prot_cmd cmd = { .id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), }; lockdep_assert_held(&mvm->mutex); /* The time_event_data.id field is reused to save session * protection's configuration. */ switch (type) { case IEEE80211_ROC_TYPE_NORMAL: mvmvif->time_event_data.id = SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV; break; case IEEE80211_ROC_TYPE_MGMT_TX: mvmvif->time_event_data.id = SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION; break; default: WARN_ONCE(1, "Got an invalid ROC type\n"); return -EINVAL; } cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id); return iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD), 0, sizeof(cmd), &cmd); } int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int duration, enum ieee80211_roc_type type) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; struct iwl_time_event_cmd time_cmd = {}; lockdep_assert_held(&mvm->mutex); if (te_data->running) { IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n"); return -EBUSY; } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) return iwl_mvm_start_p2p_roc_session_protection(mvm, vif, duration, type); time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); switch (type) { case IEEE80211_ROC_TYPE_NORMAL: time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL); break; case IEEE80211_ROC_TYPE_MGMT_TX: time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX); break; default: WARN_ONCE(1, "Got an invalid ROC type\n"); return -EINVAL; } time_cmd.apply_time = cpu_to_le32(0); time_cmd.interval = cpu_to_le32(1); /* * The P2P Device TEs can have lower priority than other events * that are being scheduled by the driver/fw, and thus it might not be * scheduled. To improve the chances of it being scheduled, allow them * to be fragmented, and in addition allow them to be delayed. */ time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS); time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2)); time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration)); time_cmd.repeat = 1; time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_NOTIF_HOST_EVENT_END | TE_V2_START_IMMEDIATELY); return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); } static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm) { struct iwl_mvm_time_event_data *te_data; lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); /* * Iterate over the list of time events and find the time event that is * associated with a P2P_DEVICE interface. * This assumes that a P2P_DEVICE interface can have only a single time * event at any given time and this time event coresponds to a ROC * request */ list_for_each_entry(te_data, &mvm->time_event_list, list) { if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) goto out; } /* There can only be at most one AUX ROC time event, we just use the * list to simplify/unify code. Remove it if it exists. */ te_data = list_first_entry_or_null(&mvm->aux_roc_te_list, struct iwl_mvm_time_event_data, list); out: spin_unlock_bh(&mvm->time_event_lock); return te_data; } void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm) { struct iwl_mvm_time_event_data *te_data; u32 uid; te_data = iwl_mvm_get_roc_te(mvm); if (te_data) __iwl_mvm_remove_time_event(mvm, te_data, &uid); } void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif; struct iwl_mvm_time_event_data *te_data; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { iwl_mvm_cancel_session_protection(mvm, mvmvif, mvmvif->time_event_data.id); iwl_mvm_p2p_roc_finished(mvm); } else { iwl_mvm_remove_aux_roc_te(mvm, mvmvif, &mvmvif->hs_time_event_data); iwl_mvm_roc_finished(mvm); } return; } te_data = iwl_mvm_get_roc_te(mvm); if (!te_data) { IWL_WARN(mvm, "No remain on channel event\n"); return; } mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { iwl_mvm_remove_time_event(mvm, mvmvif, te_data); iwl_mvm_p2p_roc_finished(mvm); } else { iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); iwl_mvm_roc_finished(mvm); } } void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; u32 id; lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); id = te_data->id; spin_unlock_bh(&mvm->time_event_lock); if (id != TE_CHANNEL_SWITCH_PERIOD) return; iwl_mvm_remove_time_event(mvm, mvmvif, te_data); } int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration, u32 apply_time) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; struct iwl_time_event_cmd time_cmd = {}; lockdep_assert_held(&mvm->mutex); if (te_data->running) { u32 id; spin_lock_bh(&mvm->time_event_lock); id = te_data->id; spin_unlock_bh(&mvm->time_event_lock); if (id == TE_CHANNEL_SWITCH_PERIOD) { IWL_DEBUG_TE(mvm, "CS period is already scheduled\n"); return -EBUSY; } /* * Remove the session protection time event to allow the * channel switch. If we got here, we just heard a beacon so * the session protection is not needed anymore anyway. */ iwl_mvm_remove_time_event(mvm, mvmvif, te_data); } time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); time_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD); time_cmd.apply_time = cpu_to_le32(apply_time); time_cmd.max_frags = TE_V2_FRAG_NONE; time_cmd.duration = cpu_to_le32(duration); time_cmd.repeat = 1; time_cmd.interval = cpu_to_le32(1); time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | TE_V2_ABSENCE); if (!apply_time) time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY); return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); } static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_mvm_session_prot_notif *resp; int resp_len = iwl_rx_packet_payload_len(pkt); if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF || pkt->hdr.group_id != MAC_CONF_GROUP)) return true; if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n"); return true; } resp = (void *)pkt->data; if (!resp->status) IWL_ERR(mvm, "TIME_EVENT_NOTIFICATION received but not executed\n"); return true; } void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration, u32 min_duration, bool wait_for_notif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) }; struct iwl_notification_wait wait_notif; struct iwl_mvm_session_prot_cmd cmd = { .id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC), .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), }; lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); if (te_data->running && time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) { IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n", jiffies_to_msecs(te_data->end_jiffies - jiffies)); spin_unlock_bh(&mvm->time_event_lock); return; } iwl_mvm_te_clear_data(mvm, te_data); /* * The time_event_data.id field is reused to save session * protection's configuration. */ te_data->id = le32_to_cpu(cmd.conf_id); te_data->duration = le32_to_cpu(cmd.duration_tu); te_data->vif = vif; spin_unlock_bh(&mvm->time_event_lock); IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n", le32_to_cpu(cmd.duration_tu)); if (!wait_for_notif) { if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD), 0, sizeof(cmd), &cmd)) { IWL_ERR(mvm, "Couldn't send the SESSION_PROTECTION_CMD\n"); spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, te_data); spin_unlock_bh(&mvm->time_event_lock); } return; } iwl_init_notification_wait(&mvm->notif_wait, &wait_notif, notif, ARRAY_SIZE(notif), iwl_mvm_session_prot_notif, NULL); if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD), 0, sizeof(cmd), &cmd)) { IWL_ERR(mvm, "Couldn't send the SESSION_PROTECTION_CMD\n"); iwl_remove_notification(&mvm->notif_wait, &wait_notif); } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif, TU_TO_JIFFIES(100))) { IWL_ERR(mvm, "Failed to protect session until session protection\n"); } } diff --git a/sys/contrib/dev/iwlwifi/mvm/time-sync.c b/sys/contrib/dev/iwlwifi/mvm/time-sync.c new file mode 100644 index 000000000000..edae3e24192b --- /dev/null +++ b/sys/contrib/dev/iwlwifi/mvm/time-sync.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2022 Intel Corporation + */ + +#include "mvm.h" +#include "time-sync.h" +#include + +void iwl_mvm_init_time_sync(struct iwl_time_sync_data *data) +{ + skb_queue_head_init(&data->frame_list); +} + +static bool iwl_mvm_is_skb_match(struct sk_buff *skb, u8 *addr, u8 dialog_token) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + u8 skb_dialog_token; + + if (ieee80211_is_timing_measurement(skb)) + skb_dialog_token = mgmt->u.action.u.wnm_timing_msr.dialog_token; + else + skb_dialog_token = mgmt->u.action.u.ftm.dialog_token; + + if ((ether_addr_equal(mgmt->sa, addr) || + ether_addr_equal(mgmt->da, addr)) && + skb_dialog_token == dialog_token) + return true; + + return false; +} + +static struct sk_buff *iwl_mvm_time_sync_find_skb(struct iwl_mvm *mvm, u8 *addr, + u8 dialog_token) +{ + struct sk_buff *skb; + + /* The queue is expected to have only one SKB. If there are other SKBs + * in the queue, they did not get a time sync notification and are + * probably obsolete by now, so drop them. + */ + while ((skb = skb_dequeue(&mvm->time_sync.frame_list))) { + if (iwl_mvm_is_skb_match(skb, addr, dialog_token)) + break; + + kfree_skb(skb); + skb = NULL; + } + + return skb; +} + +static u64 iwl_mvm_get_64_bit(__le32 high, __le32 low) +{ + return ((u64)le32_to_cpu(high) << 32) | le32_to_cpu(low); +} + +void iwl_mvm_time_sync_msmt_event(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_time_msmt_notify *notif = (void *)pkt->data; + struct ieee80211_rx_status *rx_status; + struct skb_shared_hwtstamps *shwt; + u64 ts_10ns; + struct sk_buff *skb = + iwl_mvm_time_sync_find_skb(mvm, notif->peer_addr, + le32_to_cpu(notif->dialog_token)); + u64 adj_time; + + if (!skb) { + IWL_DEBUG_INFO(mvm, "Time sync event but no pending skb\n"); + return; + } + + ts_10ns = iwl_mvm_get_64_bit(notif->t2_hi, notif->t2_lo); + adj_time = iwl_mvm_ptp_get_adj_time(mvm, ts_10ns * 10); + shwt = skb_hwtstamps(skb); + shwt->hwtstamp = ktime_set(0, adj_time); + + ts_10ns = iwl_mvm_get_64_bit(notif->t3_hi, notif->t3_lo); + adj_time = iwl_mvm_ptp_get_adj_time(mvm, ts_10ns * 10); + rx_status = IEEE80211_SKB_RXCB(skb); + rx_status->ack_tx_hwtstamp = ktime_set(0, adj_time); + + IWL_DEBUG_INFO(mvm, + "Time sync: RX event - report frame t2=%llu t3=%llu\n", + ktime_to_ns(shwt->hwtstamp), + ktime_to_ns(rx_status->ack_tx_hwtstamp)); + ieee80211_rx_napi(mvm->hw, NULL, skb, NULL); +} + +void iwl_mvm_time_sync_msmt_confirm_event(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_time_msmt_cfm_notify *notif = (void *)pkt->data; + struct ieee80211_tx_status status = {}; + struct skb_shared_hwtstamps *shwt; + u64 ts_10ns, adj_time; + + status.skb = + iwl_mvm_time_sync_find_skb(mvm, notif->peer_addr, + le32_to_cpu(notif->dialog_token)); + + if (!status.skb) { + IWL_DEBUG_INFO(mvm, "Time sync confirm but no pending skb\n"); + return; + } + + ts_10ns = iwl_mvm_get_64_bit(notif->t1_hi, notif->t1_lo); + adj_time = iwl_mvm_ptp_get_adj_time(mvm, ts_10ns * 10); + shwt = skb_hwtstamps(status.skb); + shwt->hwtstamp = ktime_set(0, adj_time); + + ts_10ns = iwl_mvm_get_64_bit(notif->t4_hi, notif->t4_lo); + adj_time = iwl_mvm_ptp_get_adj_time(mvm, ts_10ns * 10); + status.info = IEEE80211_SKB_CB(status.skb); + status.ack_hwtstamp = ktime_set(0, adj_time); + + IWL_DEBUG_INFO(mvm, + "Time sync: TX event - report frame t1=%llu t4=%llu\n", + ktime_to_ns(shwt->hwtstamp), + ktime_to_ns(status.ack_hwtstamp)); + ieee80211_tx_status_ext(mvm->hw, &status); +} + +int iwl_mvm_time_sync_config(struct iwl_mvm *mvm, const u8 *addr, u32 protocols) +{ + struct iwl_time_sync_cfg_cmd cmd = {}; + int err; + + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM)) + return -EINVAL; + + /* The fw only supports one peer. We do allow reconfiguration of the + * same peer for cases of fw reset etc. + */ + if (mvm->time_sync.active && + !ether_addr_equal(addr, mvm->time_sync.peer_addr)) { + IWL_DEBUG_INFO(mvm, "Time sync: reject config for peer: %pM\n", + addr); + return -ENOBUFS; + } + + if (protocols & ~(IWL_TIME_SYNC_PROTOCOL_TM | + IWL_TIME_SYNC_PROTOCOL_FTM)) + return -EINVAL; + + cmd.protocols = cpu_to_le32(protocols); + + ether_addr_copy(cmd.peer_addr, addr); + + err = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(DATA_PATH_GROUP, + WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD), + 0, sizeof(cmd), &cmd); + if (err) { + IWL_ERR(mvm, "Failed to send time sync cfg cmd: %d\n", err); + } else { + mvm->time_sync.active = protocols != 0; + ether_addr_copy(mvm->time_sync.peer_addr, addr); + IWL_DEBUG_INFO(mvm, "Time sync: set peer addr=%pM\n", addr); + } + + if (!mvm->time_sync.active) + skb_queue_purge(&mvm->time_sync.frame_list); + + return err; +} diff --git a/sys/contrib/dev/iwlwifi/mvm/time-sync.h b/sys/contrib/dev/iwlwifi/mvm/time-sync.h new file mode 100644 index 000000000000..2cfd0fb5e781 --- /dev/null +++ b/sys/contrib/dev/iwlwifi/mvm/time-sync.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2022 Intel Corporation + */ +#ifndef __TIME_SYNC_H__ +#define __TIME_SYNC_H__ + +#include "mvm.h" +#include + +void iwl_mvm_init_time_sync(struct iwl_time_sync_data *data); +void iwl_mvm_time_sync_msmt_event(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_time_sync_msmt_confirm_event(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +int iwl_mvm_time_sync_config(struct iwl_mvm *mvm, const u8 *addr, + u32 protocols); + +static inline +bool iwl_mvm_time_sync_frame(struct iwl_mvm *mvm, struct sk_buff *skb, u8 *addr) +{ + if (ether_addr_equal(mvm->time_sync.peer_addr, addr) && + (ieee80211_is_timing_measurement(skb) || ieee80211_is_ftm(skb))) { + skb_queue_tail(&mvm->time_sync.frame_list, skb); + return true; + } + + return false; +} +#endif diff --git a/sys/contrib/dev/iwlwifi/mvm/tt.c b/sys/contrib/dev/iwlwifi/mvm/tt.c index 3c34b9c69135..41bff5831efc 100644 --- a/sys/contrib/dev/iwlwifi/mvm/tt.c +++ b/sys/contrib/dev/iwlwifi/mvm/tt.c @@ -1,910 +1,863 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2019-2021 Intel Corporation + * Copyright (C) 2012-2014, 2019-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2016 Intel Deutschland GmbH */ #ifdef CONFIG_THERMAL #include #endif #include "mvm.h" #define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) { struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; u32 duration = tt->params.ct_kill_duration; if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) return; IWL_ERR(mvm, "Enter CT Kill\n"); iwl_mvm_set_hw_ctkill_state(mvm, true); if (!iwl_mvm_is_tt_in_fw(mvm)) { tt->throttle = false; tt->dynamic_smps = false; } /* Don't schedule an exit work if we're in test mode, since * the temperature will not change unless we manually set it * again (or disable testing). */ if (!mvm->temperature_test) schedule_delayed_work(&tt->ct_kill_exit, round_jiffies_relative(duration * HZ)); } static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm) { if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) return; IWL_ERR(mvm, "Exit CT Kill\n"); iwl_mvm_set_hw_ctkill_state(mvm, false); } static void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp) { /* ignore the notification if we are in test mode */ if (mvm->temperature_test) return; if (mvm->temperature == temp) return; mvm->temperature = temp; iwl_mvm_tt_handler(mvm); } static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_dts_measurement_notif_v1 *notif_v1; int len = iwl_rx_packet_payload_len(pkt); int temp; /* we can use notif_v1 only, because v2 only adds an additional * parameter, which is not used in this function. */ if (WARN_ON_ONCE(len < sizeof(*notif_v1))) { IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); return -EINVAL; } notif_v1 = (void *)pkt->data; temp = le32_to_cpu(notif_v1->temp); /* shouldn't be negative, but since it's s32, make sure it isn't */ if (WARN_ON_ONCE(temp < 0)) temp = 0; IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", temp); return temp; } static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); int *temp = data; int ret; ret = iwl_mvm_temp_notif_parse(mvm, pkt); if (ret < 0) return true; *temp = ret; return true; } void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_dts_measurement_notif_v2 *notif_v2; int len = iwl_rx_packet_payload_len(pkt); int temp; u32 ths_crossed; /* the notification is handled synchronously in ctkill, so skip here */ if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) return; temp = iwl_mvm_temp_notif_parse(mvm, pkt); if (!iwl_mvm_is_tt_in_fw(mvm)) { if (temp >= 0) iwl_mvm_tt_temp_changed(mvm, temp); return; } if (WARN_ON_ONCE(len < sizeof(*notif_v2))) { IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); return; } notif_v2 = (void *)pkt->data; ths_crossed = le32_to_cpu(notif_v2->threshold_idx); /* 0xFF in ths_crossed means the notification is not related * to a trip, so we can ignore it here. */ if (ths_crossed == 0xFF) return; IWL_DEBUG_TEMP(mvm, "Temp = %d Threshold crossed = %d\n", temp, ths_crossed); #ifdef CONFIG_THERMAL if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS)) return; if (mvm->tz_device.tzone) { struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device; thermal_zone_device_update(tz_dev->tzone, THERMAL_TRIP_VIOLATED); } #endif /* CONFIG_THERMAL */ } void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct ct_kill_notif *notif; notif = (struct ct_kill_notif *)pkt->data; IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n", notif->temperature); if (iwl_fw_lookup_notif_ver(mvm->fw, PHY_OPS_GROUP, CT_KILL_NOTIFICATION, 0) > 1) IWL_DEBUG_TEMP(mvm, "CT kill notification DTS bitmap = 0x%x, Scheme = %d\n", notif->dts, notif->scheme); iwl_mvm_enter_ctkill(mvm); } /* * send the DTS_MEASUREMENT_TRIGGER command with or without waiting for a * response. If we get a response then the measurement is stored in 'temp' */ static int iwl_mvm_send_temp_cmd(struct iwl_mvm *mvm, bool response, s32 *temp) { struct iwl_host_cmd cmd = {}; struct iwl_dts_measurement_cmd dts_cmd = { .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP), }; struct iwl_ext_dts_measurement_cmd ext_cmd = { .control_mode = cpu_to_le32(DTS_DIRECT_WITHOUT_MEASURE), }; struct iwl_dts_measurement_resp *resp; void *cmd_ptr; int ret; u32 cmd_flags = 0; u16 len; /* Check which command format is used (regular/extended) */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE)) { len = sizeof(ext_cmd); cmd_ptr = &ext_cmd; } else { len = sizeof(dts_cmd); cmd_ptr = &dts_cmd; } /* The command version where we get a response is zero length */ if (response) { cmd_flags = CMD_WANT_SKB; len = 0; } cmd.id = WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE); cmd.len[0] = len; cmd.flags = cmd_flags; cmd.data[0] = cmd_ptr; IWL_DEBUG_TEMP(mvm, "Sending temperature measurement command - %s response\n", response ? "with" : "without"); ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { IWL_ERR(mvm, "Failed to send the temperature measurement command (err=%d)\n", ret); return ret; } if (response) { resp = (void *)cmd.resp_pkt->data; *temp = le32_to_cpu(resp->temp); IWL_DEBUG_TEMP(mvm, "Got temperature measurement response: temp=%d\n", *temp); iwl_free_resp(&cmd); } return ret; } int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp) { struct iwl_notification_wait wait_temp_notif; static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE) }; int ret; u8 cmd_ver; /* * If command version is 1 we send the command and immediately get * a response. For older versions we send the command and wait for a * notification (no command TLV for previous versions). */ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE), IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver == 1) return iwl_mvm_send_temp_cmd(mvm, true, temp); lockdep_assert_held(&mvm->mutex); iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif, temp_notif, ARRAY_SIZE(temp_notif), iwl_mvm_temp_notif_wait, temp); ret = iwl_mvm_send_temp_cmd(mvm, false, temp); if (ret) { iwl_remove_notification(&mvm->notif_wait, &wait_temp_notif); return ret; } ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif, IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT); if (ret) IWL_WARN(mvm, "Getting the temperature timed out\n"); return ret; } static void check_exit_ctkill(struct work_struct *work) { struct iwl_mvm_tt_mgmt *tt; struct iwl_mvm *mvm; u32 duration; s32 temp; int ret; tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work); mvm = container_of(tt, struct iwl_mvm, thermal_throttle); if (iwl_mvm_is_tt_in_fw(mvm)) { iwl_mvm_exit_ctkill(mvm); return; } duration = tt->params.ct_kill_duration; flush_work(&mvm->roc_done_wk); mutex_lock(&mvm->mutex); if (__iwl_mvm_mac_start(mvm)) goto reschedule; ret = iwl_mvm_get_temp(mvm, &temp); __iwl_mvm_mac_stop(mvm); if (ret) goto reschedule; IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp); if (temp <= tt->params.ct_kill_exit) { mutex_unlock(&mvm->mutex); iwl_mvm_exit_ctkill(mvm); return; } reschedule: mutex_unlock(&mvm->mutex); schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, round_jiffies(duration * HZ)); } static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = _data; enum ieee80211_smps_mode smps_mode; lockdep_assert_held(&mvm->mutex); if (mvm->thermal_throttle.dynamic_smps) smps_mode = IEEE80211_SMPS_DYNAMIC; else smps_mode = IEEE80211_SMPS_AUTOMATIC; if (vif->type != NL80211_IFTYPE_STATION) return; - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode); + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode, 0); } static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable) { struct iwl_mvm_sta *mvmsta; int i, err; for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i); if (!mvmsta) continue; if (enable == mvmsta->tt_tx_protection) continue; err = iwl_mvm_tx_protection(mvm, mvmsta, enable); if (err) { IWL_ERR(mvm, "Failed to %s Tx protection\n", enable ? "enable" : "disable"); } else { IWL_DEBUG_TEMP(mvm, "%s Tx protection\n", enable ? "Enable" : "Disable"); mvmsta->tt_tx_protection = enable; } } } void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff) { struct iwl_host_cmd cmd = { .id = REPLY_THERMAL_MNG_BACKOFF, .len = { sizeof(u32), }, .data = { &backoff, }, }; backoff = max(backoff, mvm->thermal_throttle.min_backoff); if (iwl_mvm_send_cmd(mvm, &cmd) == 0) { IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n", backoff); mvm->thermal_throttle.tx_backoff = backoff; } else { IWL_ERR(mvm, "Failed to change Thermal Tx backoff\n"); } } void iwl_mvm_tt_handler(struct iwl_mvm *mvm) { struct iwl_tt_params *params = &mvm->thermal_throttle.params; struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; s32 temperature = mvm->temperature; bool throttle_enable = false; int i; u32 tx_backoff; IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", mvm->temperature); if (params->support_ct_kill && temperature >= params->ct_kill_entry) { iwl_mvm_enter_ctkill(mvm); return; } if (params->support_ct_kill && temperature <= params->ct_kill_exit) { iwl_mvm_exit_ctkill(mvm); return; } if (params->support_dynamic_smps) { if (!tt->dynamic_smps && temperature >= params->dynamic_smps_entry) { IWL_DEBUG_TEMP(mvm, "Enable dynamic SMPS\n"); tt->dynamic_smps = true; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_tt_smps_iterator, mvm); throttle_enable = true; } else if (tt->dynamic_smps && temperature <= params->dynamic_smps_exit) { IWL_DEBUG_TEMP(mvm, "Disable dynamic SMPS\n"); tt->dynamic_smps = false; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_tt_smps_iterator, mvm); } } if (params->support_tx_protection) { if (temperature >= params->tx_protection_entry) { iwl_mvm_tt_tx_protection(mvm, true); throttle_enable = true; } else if (temperature <= params->tx_protection_exit) { iwl_mvm_tt_tx_protection(mvm, false); } } if (params->support_tx_backoff) { tx_backoff = tt->min_backoff; for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) { if (temperature < params->tx_backoff[i].temperature) break; tx_backoff = max(tt->min_backoff, params->tx_backoff[i].backoff); } if (tx_backoff != tt->min_backoff) throttle_enable = true; if (tt->tx_backoff != tx_backoff) iwl_mvm_tt_tx_backoff(mvm, tx_backoff); } if (!tt->throttle && throttle_enable) { IWL_WARN(mvm, "Due to high temperature thermal throttling initiated\n"); tt->throttle = true; } else if (tt->throttle && !tt->dynamic_smps && tt->tx_backoff == tt->min_backoff && temperature <= params->tx_protection_exit) { IWL_WARN(mvm, "Temperature is back to normal thermal throttling stopped\n"); tt->throttle = false; } } static const struct iwl_tt_params iwl_mvm_default_tt_params = { .ct_kill_entry = 118, .ct_kill_exit = 96, .ct_kill_duration = 5, .dynamic_smps_entry = 114, .dynamic_smps_exit = 110, .tx_protection_entry = 114, .tx_protection_exit = 108, .tx_backoff = { {.temperature = 112, .backoff = 200}, {.temperature = 113, .backoff = 600}, {.temperature = 114, .backoff = 1200}, {.temperature = 115, .backoff = 2000}, {.temperature = 116, .backoff = 4000}, {.temperature = 117, .backoff = 10000}, }, .support_ct_kill = true, .support_dynamic_smps = true, .support_tx_protection = true, .support_tx_backoff = true, }; /* budget in mWatt */ static const u32 iwl_mvm_cdev_budgets[] = { 2400, /* cooling state 0 */ 2000, /* cooling state 1 */ 1800, /* cooling state 2 */ 1600, /* cooling state 3 */ 1400, /* cooling state 4 */ 1200, /* cooling state 5 */ 1000, /* cooling state 6 */ 900, /* cooling state 7 */ 800, /* cooling state 8 */ 700, /* cooling state 9 */ 650, /* cooling state 10 */ 600, /* cooling state 11 */ 550, /* cooling state 12 */ 500, /* cooling state 13 */ 450, /* cooling state 14 */ 400, /* cooling state 15 */ 350, /* cooling state 16 */ 300, /* cooling state 17 */ 250, /* cooling state 18 */ 200, /* cooling state 19 */ 150, /* cooling state 20 */ }; int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state) { struct iwl_mvm_ctdp_cmd cmd = { .operation = cpu_to_le32(op), .budget = cpu_to_le32(iwl_mvm_cdev_budgets[state]), .window_size = 0, }; int ret; u32 status; lockdep_assert_held(&mvm->mutex); status = 0; ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP, CTDP_CONFIG_CMD), sizeof(cmd), &cmd, &status); if (ret) { IWL_ERR(mvm, "cTDP command failed (err=%d)\n", ret); return ret; } switch (op) { case CTDP_CMD_OPERATION_START: #ifdef CONFIG_THERMAL mvm->cooling_dev.cur_state = state; #endif /* CONFIG_THERMAL */ break; case CTDP_CMD_OPERATION_REPORT: IWL_DEBUG_TEMP(mvm, "cTDP avg energy in mWatt = %d\n", status); /* when the function is called with CTDP_CMD_OPERATION_REPORT * option the function should return the average budget value * that is received from the FW. * The budget can't be less or equal to 0, so it's possible * to distinguish between error values and budgets. */ return status; case CTDP_CMD_OPERATION_STOP: IWL_DEBUG_TEMP(mvm, "cTDP stopped successfully\n"); break; } return 0; } #ifdef CONFIG_THERMAL static int compare_temps(const void *a, const void *b) { return ((s16)le16_to_cpu(*(__le16 *)a) - (s16)le16_to_cpu(*(__le16 *)b)); } #endif int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm) { struct temp_report_ths_cmd cmd = {0}; int ret; #ifdef CONFIG_THERMAL int i, j, idx = 0; lockdep_assert_held(&mvm->mutex); if (!mvm->tz_device.tzone) goto send; /* The driver holds array of temperature trips that are unsorted * and uncompressed, the FW should get it compressed and sorted */ - /* compress temp_trips to cmd array, remove uninitialized values*/ + /* compress trips to cmd array, remove uninitialized values*/ for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) { - if (mvm->tz_device.temp_trips[i] != S16_MIN) { + if (mvm->tz_device.trips[i].temperature != INT_MIN) { cmd.thresholds[idx++] = - cpu_to_le16(mvm->tz_device.temp_trips[i]); + cpu_to_le16((s16)(mvm->tz_device.trips[i].temperature / 1000)); } } cmd.num_temps = cpu_to_le32(idx); if (!idx) goto send; /*sort cmd array*/ sort(cmd.thresholds, idx, sizeof(s16), compare_temps, NULL); /* we should save the indexes of trips because we sort * and compress the orginal array */ for (i = 0; i < idx; i++) { for (j = 0; j < IWL_MAX_DTS_TRIPS; j++) { - if (le16_to_cpu(cmd.thresholds[i]) == - mvm->tz_device.temp_trips[j]) + if ((int)(le16_to_cpu(cmd.thresholds[i]) * 1000) == + mvm->tz_device.trips[j].temperature) mvm->tz_device.fw_trips_index[i] = j; } } send: #endif ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP, TEMP_REPORTING_THRESHOLDS_CMD), 0, sizeof(cmd), &cmd); if (ret) IWL_ERR(mvm, "TEMP_REPORT_THS_CMD command failed (err=%d)\n", ret); return ret; } #ifdef CONFIG_THERMAL static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device, int *temperature) { - struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata; + struct iwl_mvm *mvm = thermal_zone_device_priv(device); int ret; int temp; mutex_lock(&mvm->mutex); if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { ret = -ENODATA; goto out; } ret = iwl_mvm_get_temp(mvm, &temp); if (ret) goto out; *temperature = temp * 1000; out: mutex_unlock(&mvm->mutex); return ret; } -static int iwl_mvm_tzone_get_trip_temp(struct thermal_zone_device *device, - int trip, int *temp) -{ - struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata; - - if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) - return -EINVAL; - - *temp = mvm->tz_device.temp_trips[trip] * 1000; - - return 0; -} - -static int iwl_mvm_tzone_get_trip_type(struct thermal_zone_device *device, - int trip, enum thermal_trip_type *type) -{ - if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) - return -EINVAL; - - *type = THERMAL_TRIP_PASSIVE; - - return 0; -} - static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device, int trip, int temp) { - struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata; + struct iwl_mvm *mvm = thermal_zone_device_priv(device); struct iwl_mvm_thermal_device *tzone; - int i, ret; - s16 temperature; + int ret; mutex_lock(&mvm->mutex); if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { ret = -EIO; goto out; } - if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) { - ret = -EINVAL; - goto out; - } - if ((temp / 1000) > S16_MAX) { ret = -EINVAL; goto out; } - temperature = (s16)(temp / 1000); tzone = &mvm->tz_device; - if (!tzone) { ret = -EIO; goto out; } - /* no updates*/ - if (tzone->temp_trips[trip] == temperature) { - ret = 0; - goto out; - } - - /* already existing temperature */ - for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) { - if (tzone->temp_trips[i] == temperature) { - ret = -EINVAL; - goto out; - } - } - - tzone->temp_trips[trip] = temperature; - ret = iwl_mvm_send_temp_report_ths_cmd(mvm); out: mutex_unlock(&mvm->mutex); return ret; } static struct thermal_zone_device_ops tzone_ops = { .get_temp = iwl_mvm_tzone_get_temp, - .get_trip_temp = iwl_mvm_tzone_get_trip_temp, - .get_trip_type = iwl_mvm_tzone_get_trip_type, .set_trip_temp = iwl_mvm_tzone_set_trip_temp, }; /* make all trips writable */ #define IWL_WRITABLE_TRIPS_MSK (BIT(IWL_MAX_DTS_TRIPS) - 1) static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) { int i, ret; char name[16]; static atomic_t counter = ATOMIC_INIT(0); if (!iwl_mvm_is_tt_in_fw(mvm)) { mvm->tz_device.tzone = NULL; return; } BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF); - mvm->tz_device.tzone = thermal_zone_device_register(name, + mvm->tz_device.tzone = thermal_zone_device_register_with_trips(name, + mvm->tz_device.trips, IWL_MAX_DTS_TRIPS, IWL_WRITABLE_TRIPS_MSK, mvm, &tzone_ops, NULL, 0, 0); if (IS_ERR(mvm->tz_device.tzone)) { IWL_DEBUG_TEMP(mvm, "Failed to register to thermal zone (err = %ld)\n", PTR_ERR(mvm->tz_device.tzone)); mvm->tz_device.tzone = NULL; return; } ret = thermal_zone_device_enable(mvm->tz_device.tzone); if (ret) { IWL_DEBUG_TEMP(mvm, "Failed to enable thermal zone\n"); thermal_zone_device_unregister(mvm->tz_device.tzone); return; } /* 0 is a valid temperature, * so initialize the array with S16_MIN which invalid temperature */ - for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++) - mvm->tz_device.temp_trips[i] = S16_MIN; + for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++) { + mvm->tz_device.trips[i].temperature = INT_MIN; + mvm->tz_device.trips[i].type = THERMAL_TRIP_PASSIVE; + } } static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { *state = ARRAY_SIZE(iwl_mvm_cdev_budgets) - 1; return 0; } static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) { struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); *state = mvm->cooling_dev.cur_state; return 0; } static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, unsigned long new_state) { struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); int ret; mutex_lock(&mvm->mutex); if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { ret = -EIO; goto unlock; } if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { ret = -EINVAL; goto unlock; } ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, new_state); unlock: mutex_unlock(&mvm->mutex); return ret; } static const struct thermal_cooling_device_ops tcooling_ops = { .get_max_state = iwl_mvm_tcool_get_max_state, .get_cur_state = iwl_mvm_tcool_get_cur_state, .set_cur_state = iwl_mvm_tcool_set_cur_state, }; static void iwl_mvm_cooling_device_register(struct iwl_mvm *mvm) { char name[] = "iwlwifi"; if (!iwl_mvm_is_ctdp_supported(mvm)) return; BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); mvm->cooling_dev.cdev = thermal_cooling_device_register(name, mvm, &tcooling_ops); if (IS_ERR(mvm->cooling_dev.cdev)) { IWL_DEBUG_TEMP(mvm, "Failed to register to cooling device (err = %ld)\n", PTR_ERR(mvm->cooling_dev.cdev)); mvm->cooling_dev.cdev = NULL; return; } } static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm) { if (!iwl_mvm_is_tt_in_fw(mvm) || !mvm->tz_device.tzone) return; IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n"); if (mvm->tz_device.tzone) { thermal_zone_device_unregister(mvm->tz_device.tzone); mvm->tz_device.tzone = NULL; } } static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) { if (!iwl_mvm_is_ctdp_supported(mvm) || !mvm->cooling_dev.cdev) return; IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n"); if (mvm->cooling_dev.cdev) { thermal_cooling_device_unregister(mvm->cooling_dev.cdev); mvm->cooling_dev.cdev = NULL; } } #endif /* CONFIG_THERMAL */ void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff) { struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n"); if (mvm->cfg->thermal_params) tt->params = *mvm->cfg->thermal_params; else tt->params = iwl_mvm_default_tt_params; tt->throttle = false; tt->dynamic_smps = false; tt->min_backoff = min_backoff; INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill); #ifdef CONFIG_THERMAL iwl_mvm_cooling_device_register(mvm); iwl_mvm_thermal_zone_register(mvm); #endif mvm->init_status |= IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE; } void iwl_mvm_thermal_exit(struct iwl_mvm *mvm) { if (!(mvm->init_status & IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE)) return; cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit); IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n"); #ifdef CONFIG_THERMAL iwl_mvm_cooling_device_unregister(mvm); iwl_mvm_thermal_zone_unregister(mvm); #endif mvm->init_status &= ~IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE; } diff --git a/sys/contrib/dev/iwlwifi/mvm/tx.c b/sys/contrib/dev/iwlwifi/mvm/tx.c index ac96cff455b1..2407d0ee832e 100644 --- a/sys/contrib/dev/iwlwifi/mvm/tx.c +++ b/sys/contrib/dev/iwlwifi/mvm/tx.c @@ -1,2230 +1,2318 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include +#include #include #include #include "iwl-trans.h" #include "iwl-eeprom-parse.h" #include "mvm.h" #include "sta.h" +#include "time-sync.h" static void iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, u16 tid, u16 ssn) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA); if (!trig) return; ba_trig = (void *)trig->data; if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "BAR sent to %pM, tid %d, ssn %d", addr, tid, ssn); } #define OPT_HDR(type, skb, off) \ (type *)(skb_network_header(skb) + (off)) -static u16 iwl_mvm_tx_csum_pre_bz(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_tx_info *info, bool amsdu) +static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_tx_info *info, + bool amsdu) { struct ieee80211_hdr *hdr = (void *)skb->data; + u16 mh_len = ieee80211_hdrlen(hdr->frame_control); u16 offload_assist = 0; #if IS_ENABLED(CONFIG_INET) - u16 mh_len = ieee80211_hdrlen(hdr->frame_control); u8 protocol = 0; /* Do not compute checksum if already computed */ if (skb->ip_summed != CHECKSUM_PARTIAL) goto out; /* We do not expect to be requested to csum stuff we do not support */ if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) || (skb->protocol != htons(ETH_P_IP) && skb->protocol != htons(ETH_P_IPV6)), "No support for requested checksum\n")) { skb_checksum_help(skb); goto out; } if (skb->protocol == htons(ETH_P_IP)) { protocol = ip_hdr(skb)->protocol; } else { #if IS_ENABLED(CONFIG_IPV6) struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_network_header(skb); unsigned int off = sizeof(*ipv6h); protocol = ipv6h->nexthdr; while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) { struct ipv6_opt_hdr *hp; /* only supported extension headers */ if (protocol != NEXTHDR_ROUTING && protocol != NEXTHDR_HOP && protocol != NEXTHDR_DEST) { skb_checksum_help(skb); goto out; } hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); protocol = hp->nexthdr; off += ipv6_optlen(hp); } /* if we get here - protocol now should be TCP/UDP */ #endif } if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) { WARN_ON_ONCE(1); skb_checksum_help(skb); goto out; } /* enable L4 csum */ offload_assist |= BIT(TX_CMD_OFFLD_L4_EN); /* * Set offset to IP header (snap). * We don't support tunneling so no need to take care of inner header. * Size is in words. */ offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR); /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */ if (skb->protocol == htons(ETH_P_IP) && amsdu) { ip_hdr(skb)->check = 0; offload_assist |= BIT(TX_CMD_OFFLD_L3_EN); } /* reset UDP/TCP header csum */ if (protocol == IPPROTO_TCP) tcp_hdr(skb)->check = 0; else udp_hdr(skb)->check = 0; +out: +#endif /* * mac header len should include IV, size is in words unless * the IV is added by the firmware like in WEP. * In new Tx API, the IV is always added by the firmware. */ if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key && info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 && info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104) mh_len += info->control.hw_key->iv_len; mh_len /= 2; offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; -out: -#endif if (amsdu) offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); else if (ieee80211_hdrlen(hdr->frame_control) % 4) /* padding is inserted later in transport */ offload_assist |= BIT(TX_CMD_OFFLD_PAD); return offload_assist; } -u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu) -{ - struct ieee80211_hdr *hdr = (void *)skb->data; - u32 offload_assist = IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM; - unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); - unsigned int csum_start = skb_checksum_start_offset(skb); - - offload_assist |= u32_encode_bits(hdrlen / 2, - IWL_TX_CMD_OFFLD_BZ_MH_LEN); - if (amsdu) - offload_assist |= IWL_TX_CMD_OFFLD_BZ_AMSDU; - else if (hdrlen % 4) - /* padding is inserted later in transport */ - offload_assist |= IWL_TX_CMD_OFFLD_BZ_MH_PAD; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return offload_assist; - - offload_assist |= IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM | - IWL_TX_CMD_OFFLD_BZ_ZERO2ONES; - - /* - * mac80211 will always calculate checksum in software for - * non-fast-xmit, and so we can only do offloaded checksum - * for fast-xmit frames. In this case, we always have the - * RFC 1042 header present. skb_checksum_start_offset() - * returns the offset from the beginning, but the hardware - * needs it from after the header & SNAP header. - */ - csum_start -= hdrlen + 8; - - offload_assist |= u32_encode_bits(csum_start, - IWL_TX_CMD_OFFLD_BZ_START_OFFS); - offload_assist |= u32_encode_bits(csum_start + skb->csum_offset, - IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS); - - return offload_assist; -} - -static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_tx_info *info, - bool amsdu) -{ - if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) - return iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu); - return iwl_mvm_tx_csum_bz(mvm, skb, amsdu); -} - /* * Sets most of the Tx cmd's fields */ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, u8 sta_id) { struct ieee80211_hdr *hdr = (void *)skb->data; __le16 fc = hdr->frame_control; u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); u32 len = skb->len + FCS_LEN; bool amsdu = false; u8 ac; if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) || (ieee80211_is_probe_resp(fc) && !is_multicast_ether_addr(hdr->addr1))) tx_flags |= TX_CMD_FLG_ACK; else tx_flags &= ~TX_CMD_FLG_ACK; if (ieee80211_is_probe_resp(fc)) tx_flags |= TX_CMD_FLG_TSF; if (ieee80211_has_morefrags(fc)) tx_flags |= TX_CMD_FLG_MORE_FRAG; if (ieee80211_is_data_qos(fc)) { u8 *qc = ieee80211_get_qos_ctl(hdr); tx_cmd->tid_tspec = qc[0] & 0xf; tx_flags &= ~TX_CMD_FLG_SEQ_CTL; amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT; } else if (ieee80211_is_back_req(fc)) { struct ieee80211_bar *bar = (void *)skb->data; u16 control = le16_to_cpu(bar->control); u16 ssn = le16_to_cpu(bar->start_seq_num); tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; tx_cmd->tid_tspec = (control & IEEE80211_BAR_CTRL_TID_INFO_MASK) >> IEEE80211_BAR_CTRL_TID_INFO_SHIFT; WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec, ssn); } else { if (ieee80211_is_data(fc)) tx_cmd->tid_tspec = IWL_TID_NON_QOS; else tx_cmd->tid_tspec = IWL_MAX_TID_COUNT; if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) tx_flags |= TX_CMD_FLG_SEQ_CTL; else tx_flags &= ~TX_CMD_FLG_SEQ_CTL; } /* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */ if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; else ac = tid_to_mac80211_ac[0]; tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << TX_CMD_FLG_BT_PRIO_POS; if (ieee80211_is_mgmt(fc)) { if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC); else if (ieee80211_is_action(fc)) tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); else tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); /* The spec allows Action frames in A-MPDU, we don't support * it */ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); } else { tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); } if (ieee80211_is_data(fc) && len > mvm->rts_threshold && !is_multicast_ether_addr(hdr->addr1)) tx_flags |= TX_CMD_FLG_PROT_REQUIRE; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) && ieee80211_action_contains_tpc(skb)) tx_flags |= TX_CMD_FLG_WRITE_TX_POWER; tx_cmd->tx_flags = cpu_to_le32(tx_flags); /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */ tx_cmd->len = cpu_to_le16((u16)skb->len); tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_cmd->sta_id = sta_id; tx_cmd->offload_assist = - cpu_to_le16(iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu)); + cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, info, amsdu)); } static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { if (info->band == NL80211_BAND_2GHZ && !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; if (sta && ieee80211_is_data(fc)) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS; } return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; } +static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info) +{ + struct ieee80211_tx_rate *rate = &info->control.rates[0]; + u32 result; + + /* + * we only care about legacy/HT/VHT so far, so we can + * build in v1 and use iwl_new_rate_from_v1() + */ + + if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { + u8 mcs = ieee80211_rate_get_vht_mcs(rate); + u8 nss = ieee80211_rate_get_vht_nss(rate); + + result = RATE_MCS_VHT_MSK_V1; + result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK); + result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK); + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + result |= RATE_MCS_SGI_MSK_V1; + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1); + else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1); + else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) + result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1); + } else if (rate->flags & IEEE80211_TX_RC_MCS) { + result = RATE_MCS_HT_MSK_V1; + result |= u32_encode_bits(rate->idx, + RATE_HT_MCS_RATE_CODE_MSK_V1 | + RATE_HT_MCS_NSS_MSK_V1); + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + result |= RATE_MCS_SGI_MSK_V1; + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1); + if (info->flags & IEEE80211_TX_CTL_LDPC) + result |= RATE_MCS_LDPC_MSK_V1; + if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC)) + result |= RATE_MCS_STBC_MSK; + } else { + return 0; + } + + if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6) + return iwl_new_rate_from_v1(result); + return result; +} + static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { int rate_idx = -1; u8 rate_plcp; u32 rate_flags = 0; bool is_cck; - /* info->control is only relevant for non HW rate control */ - if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) { + if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) { + u32 result = iwl_mvm_get_inject_tx_rate(mvm, info); + + if (result) + return result; + rate_idx = info->control.rates[0].idx; + } else if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) { + /* info->control is only relevant for non HW rate control */ + /* HT rate doesn't make sense for a non data frame */ WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS && !ieee80211_is_data(fc), "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n", info->control.rates[0].flags, info->control.rates[0].idx, le16_to_cpu(fc), sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1); rate_idx = info->control.rates[0].idx; + + /* For non 2 GHZ band, remap mac80211 rate indices into driver + * indices. + */ + if (info->band != NL80211_BAND_2GHZ || + (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) + rate_idx += IWL_FIRST_OFDM_RATE; + + /* For 2.4 GHZ band, check that there is no need to remap */ + BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); } /* if the rate isn't a well known legacy rate, take the lowest one */ if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) - rate_idx = rate_lowest_index( - &mvm->nvm_data->bands[info->band], sta); - - /* - * For non 2 GHZ band, remap mac80211 rate - * indices into driver indices - */ - if (info->band != NL80211_BAND_2GHZ) - rate_idx += IWL_FIRST_OFDM_RATE; - - /* For 2.4 GHZ band, check that there is no need to remap */ - BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); + rate_idx = iwl_mvm_mac_ctxt_get_lowest_rate(mvm, + info, + info->control.vif); /* Get PLCP rate for tx_cmd->rate_n_flags */ rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx); is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE); /* Set CCK or OFDM flag */ if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) { if (!is_cck) rate_flags |= RATE_MCS_LEGACY_OFDM_MSK; else rate_flags |= RATE_MCS_CCK_MSK; } else if (is_cck) { rate_flags |= RATE_MCS_CCK_MSK_V1; } return (u32)rate_plcp | rate_flags; } static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { return iwl_mvm_get_tx_rate(mvm, info, sta, fc) | iwl_mvm_get_tx_ant(mvm, info, sta, fc); } /* * Sets the fields in the Tx cmd that are rate related */ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { /* Set retry limit on RTS packets */ tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT; /* Set retry limit on DATA packets and Probe Responses*/ if (ieee80211_is_probe_resp(fc)) { tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT; tx_cmd->rts_retry_limit = min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit); } else if (ieee80211_is_back_req(fc)) { tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT; } else { tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY; } /* * for data packets, rate info comes from the table inside the fw. This * table is controlled by LINK_QUALITY commands */ - if (ieee80211_is_data(fc) && sta) { + if (likely(ieee80211_is_data(fc) && sta && + !(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); return; } } else if (ieee80211_is_back_req(fc)) { tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); } /* Set the rate in the TX cmd */ tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc)); } static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, u8 *crypto_hdr) { struct ieee80211_key_conf *keyconf = info->control.hw_key; u64 pn; pn = atomic64_inc_return(&keyconf->tx_pn); crypto_hdr[0] = pn; crypto_hdr[2] = 0; crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6); crypto_hdr[1] = pn >> 8; crypto_hdr[4] = pn >> 16; crypto_hdr[5] = pn >> 24; crypto_hdr[6] = pn >> 32; crypto_hdr[7] = pn >> 40; } /* * Sets the fields in the Tx cmd that are crypto related */ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct iwl_tx_cmd *tx_cmd, struct sk_buff *skb_frag, int hdrlen) { struct ieee80211_key_conf *keyconf = info->control.hw_key; u8 *crypto_hdr = skb_frag->data + hdrlen; enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM; u64 pn; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_CCMP: iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); break; case WLAN_CIPHER_SUITE_TKIP: tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; pn = atomic64_inc_return(&keyconf->tx_pn); ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn); ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); break; case WLAN_CIPHER_SUITE_WEP104: tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; fallthrough; case WLAN_CIPHER_SUITE_WEP40: tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) & TX_CMD_SEC_WEP_KEY_IDX_MSK); memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: type = TX_CMD_SEC_GCMP; fallthrough; case WLAN_CIPHER_SUITE_CCMP_256: /* TODO: Taking the key from the table might introduce a race * when PTK rekeying is done, having an old packets with a PN * based on the old key but the message encrypted with a new * one. * Need to handle this. */ tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE; tx_cmd->key[0] = keyconf->hw_key_idx; iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); break; default: tx_cmd->sec_ctl |= TX_CMD_SEC_EXT; } } /* * Allocates and sets the Tx cmd the driver data pointers in the skb */ static struct iwl_device_tx_cmd * iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, int hdrlen, struct ieee80211_sta *sta, u8 sta_id) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_device_tx_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); if (unlikely(!dev_cmd)) return NULL; dev_cmd->hdr.cmd = TX_CMD; if (iwl_mvm_has_new_tx_api(mvm)) { u32 rate_n_flags = 0; u16 flags = 0; struct iwl_mvm_sta *mvmsta = sta ? iwl_mvm_sta_from_mac80211(sta) : NULL; bool amsdu = false; if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc = ieee80211_get_qos_ctl(hdr); amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT; } if (!info->control.hw_key) flags |= IWL_TX_FLAGS_ENCRYPT_DIS; /* * For data packets rate info comes from the fw. Only * set rate/antenna during connection establishment or in case * no station is given. */ if (!sta || !ieee80211_is_data(hdr->frame_control) || mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) { flags |= IWL_TX_FLAGS_CMD_RATE; rate_n_flags = iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, hdr->frame_control); } if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload; u32 offload_assist = iwl_mvm_tx_csum(mvm, skb, info, amsdu); cmd->offload_assist = cpu_to_le32(offload_assist); /* Total # bytes to be transmitted */ cmd->len = cpu_to_le16((u16)skb->len); /* Copy MAC header from skb into command buffer */ memcpy(cmd->hdr, hdr, hdrlen); cmd->flags = cpu_to_le16(flags); cmd->rate_n_flags = cpu_to_le32(rate_n_flags); } else { struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; - u16 offload_assist = iwl_mvm_tx_csum_pre_bz(mvm, skb, - info, - amsdu); + u16 offload_assist = iwl_mvm_tx_csum(mvm, skb, + info, amsdu); cmd->offload_assist = cpu_to_le16(offload_assist); /* Total # bytes to be transmitted */ cmd->len = cpu_to_le16((u16)skb->len); /* Copy MAC header from skb into command buffer */ memcpy(cmd->hdr, hdr, hdrlen); cmd->flags = cpu_to_le32(flags); cmd->rate_n_flags = cpu_to_le32(rate_n_flags); } goto out; } tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; if (info->control.hw_key) iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen); iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id); iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, hdrlen); out: return dev_cmd; } static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, struct iwl_device_tx_cmd *cmd) { struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); memset(&skb_info->status, 0, sizeof(skb_info->status)); memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data)); skb_info->driver_data[1] = cmd; } static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, + struct iwl_mvm_vif_link_info *link, struct ieee80211_tx_info *info, - struct ieee80211_hdr *hdr) + struct sk_buff *skb) { - struct iwl_mvm_vif *mvmvif = - iwl_mvm_vif_from_mac80211(info->control.vif); + struct ieee80211_hdr *hdr = (void *)skb->data; __le16 fc = hdr->frame_control; switch (info->control.vif->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: /* * Non-bufferable frames use the broadcast station, thus they * use the probe queue. * Also take care of the case where we send a deauth to a * station that we don't have, or similarly an association * response (with non-success status) for a station we can't * accept. * Also, disassociate frames might happen, particular with * reason 7 ("Class 3 frame received from nonassociated STA"). */ if (ieee80211_is_mgmt(fc) && - (!ieee80211_is_bufferable_mmpdu(fc) || + (!ieee80211_is_bufferable_mmpdu(skb) || ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc))) - return mvm->probe_queue; + return link->mgmt_queue; if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) && is_multicast_ether_addr(hdr->addr1)) - return mvmvif->cab_queue; + return link->cab_queue; WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, "fc=0x%02x", le16_to_cpu(fc)); - return mvm->probe_queue; + return link->mgmt_queue; case NL80211_IFTYPE_P2P_DEVICE: if (ieee80211_is_mgmt(fc)) return mvm->p2p_dev_queue; WARN_ON_ONCE(1); return mvm->p2p_dev_queue; default: WARN_ONCE(1, "Not a ctrl vif, no available queue\n"); return -1; } } static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt; struct iwl_probe_resp_data *resp_data; const u8 *ie; u8 *pos; u8 match[] = { (WLAN_OUI_WFA >> 16) & 0xff, (WLAN_OUI_WFA >> 8) & 0xff, WLAN_OUI_WFA & 0xff, WLAN_OUI_TYPE_WFA_P2P, }; rcu_read_lock(); - resp_data = rcu_dereference(mvmvif->probe_resp_data); + resp_data = rcu_dereference(mvmvif->deflink.probe_resp_data); if (!resp_data) goto out; if (!resp_data->notif.noa_active) goto out; ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC, mgmt->u.probe_resp.variable, skb->len - base_len, match, 4, 2); if (!ie) { IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n"); goto out; } if (skb_tailroom(skb) < resp_data->noa_len) { if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) { IWL_ERR(mvm, "Failed to reallocate probe resp\n"); goto out; } } pos = skb_put(skb, resp_data->noa_len); *pos++ = WLAN_EID_VENDOR_SPECIFIC; /* Set length of IE body (not including ID and length itself) */ *pos++ = resp_data->noa_len - 2; *pos++ = (WLAN_OUI_WFA >> 16) & 0xff; *pos++ = (WLAN_OUI_WFA >> 8) & 0xff; *pos++ = WLAN_OUI_WFA & 0xff; *pos++ = WLAN_OUI_TYPE_WFA_P2P; memcpy(pos, &resp_data->notif.noa_attr, resp_data->noa_len - sizeof(struct ieee80211_vendor_ie)); out: rcu_read_unlock(); } int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info info; struct iwl_device_tx_cmd *dev_cmd; u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); __le16 fc = hdr->frame_control; bool offchannel = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_TX_OFFCHAN; int queue = -1; if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) return -1; memcpy(&info, skb->cb, sizeof(info)); if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen)) return -1; if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) return -1; if (info.control.vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info.control.vif); if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || info.control.vif->type == NL80211_IFTYPE_AP || info.control.vif->type == NL80211_IFTYPE_ADHOC) { + u32 link_id = u32_get_bits(info.control.flags, + IEEE80211_TX_CTRL_MLO_LINK); + struct iwl_mvm_vif_link_info *link; + + if (link_id == IEEE80211_LINK_UNSPECIFIED) { + if (info.control.vif->active_links) + link_id = ffs(info.control.vif->active_links) - 1; + else + link_id = 0; + } + + link = mvmvif->link[link_id]; + if (WARN_ON(!link)) + return -1; + if (!ieee80211_is_data(hdr->frame_control)) - sta_id = mvmvif->bcast_sta.sta_id; + sta_id = link->bcast_sta.sta_id; else - sta_id = mvmvif->mcast_sta.sta_id; + sta_id = link->mcast_sta.sta_id; - queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr); + queue = iwl_mvm_get_ctrl_vif_queue(mvm, link, &info, + skb); } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { queue = mvm->snif_queue; sta_id = mvm->snif_sta.sta_id; } else if (info.control.vif->type == NL80211_IFTYPE_STATION && offchannel) { /* * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets * that can be used in 2 different types of vifs, P2P & * STATION. * P2P uses the offchannel queue. * STATION (HS2.0) uses the auxiliary context of the FW, * and hence needs to be sent on the aux queue. */ sta_id = mvm->aux_sta.sta_id; queue = mvm->aux_queue; } } if (queue < 0) { IWL_ERR(mvm, "No queue was found. Dropping TX\n"); return -1; } if (unlikely(ieee80211_is_probe_resp(fc))) iwl_mvm_probe_resp_set_noa(mvm, skb); IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); if (!dev_cmd) return -1; /* From now on, we cannot access info->control */ iwl_mvm_skb_prepare_status(skb, dev_cmd); if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); return -1; } return 0; } unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, struct ieee80211_sta *sta, unsigned int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band; u8 ac = tid_to_mac80211_ac[tid]; + enum nl80211_band band; unsigned int txf; - int lmac = iwl_mvm_get_lmac_id(mvm->fw, band); + unsigned int val; + int lmac; /* For HE redirect to trigger based fifos */ if (sta->deflink.he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) ac += 4; txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); /* * Don't send an AMSDU that will be longer than the TXF. * Add a security margin of 256 for the TX command + headers. * We also want to have the start of the next packet inside the * fifo to be able to send bursts. */ - return min_t(unsigned int, mvmsta->max_amsdu_len, + val = mvmsta->max_amsdu_len; + + if (hweight16(sta->valid_links) <= 1) { + if (sta->valid_links) { + struct ieee80211_bss_conf *link_conf; + unsigned int link = ffs(sta->valid_links) - 1; + + rcu_read_lock(); + link_conf = rcu_dereference(mvmsta->vif->link_conf[link]); + if (WARN_ON(!link_conf)) + band = NL80211_BAND_2GHZ; + else + band = link_conf->chandef.chan->band; + rcu_read_unlock(); + } else { + band = mvmsta->vif->bss_conf.chandef.chan->band; + } + + lmac = iwl_mvm_get_lmac_id(mvm, band); + } else if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CDB_SUPPORT)) { + /* for real MLO restrict to both LMACs if they exist */ + lmac = IWL_LMAC_5G_INDEX; + val = min_t(unsigned int, val, + mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); + lmac = IWL_LMAC_24G_INDEX; + } else { + lmac = IWL_LMAC_24G_INDEX; + } + + return min_t(unsigned int, val, mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); } #ifdef CONFIG_INET static int iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, netdev_features_t netdev_flags, struct sk_buff_head *mpdus_skb) { struct sk_buff *tmp, *next; struct ieee80211_hdr *hdr = (void *)skb->data; char cb[sizeof(skb->cb)]; u16 i = 0; unsigned int tcp_payload_len; unsigned int mss = skb_shinfo(skb)->gso_size; bool ipv4 = (skb->protocol == htons(ETH_P_IP)); bool qos = ieee80211_is_data_qos(hdr->frame_control); u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; skb_shinfo(skb)->gso_size = num_subframes * mss; memcpy(cb, skb->cb, sizeof(cb)); next = skb_gso_segment(skb, netdev_flags); skb_shinfo(skb)->gso_size = mss; skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; if (WARN_ON_ONCE(IS_ERR(next))) return -EINVAL; else if (next) consume_skb(skb); skb_list_walk_safe(next, tmp, next) { memcpy(tmp->cb, cb, sizeof(tmp->cb)); /* * Compute the length of all the data added for the A-MSDU. * This will be used to compute the length to write in the TX * command. We have: SNAP + IP + TCP for n -1 subframes and * ETH header for n subframes. */ tcp_payload_len = skb_tail_pointer(tmp) - skb_transport_header(tmp) - tcp_hdrlen(tmp) + tmp->data_len; if (ipv4) ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); if (tcp_payload_len > mss) { skb_shinfo(tmp)->gso_size = mss; skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; } else { if (qos) { u8 *qc; if (ipv4) ip_send_check(ip_hdr(tmp)); qc = ieee80211_get_qos_ctl((void *)tmp->data); *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; } skb_shinfo(tmp)->gso_size = 0; } skb_mark_not_on_list(tmp); __skb_queue_tail(mpdus_skb, tmp); i++; } return 0; } static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff_head *mpdus_skb) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int mss = skb_shinfo(skb)->gso_size; unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len; u16 snap_ip_tcp, pad; netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG; u8 tid; snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + tcp_hdrlen(skb); if (!mvmsta->max_amsdu_len || !ieee80211_is_data_qos(hdr->frame_control) || !mvmsta->amsdu_enabled) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); /* * Do not build AMSDU for IPv6 with extension headers. * ask stack to segment and checkum the generated MPDUs for us. */ if (skb->protocol == htons(ETH_P_IPV6) && ((struct ipv6hdr *)skb_network_header(skb))->nexthdr != IPPROTO_TCP) { netdev_flags &= ~NETIF_F_CSUM_MASK; return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); } tid = ieee80211_get_tid(hdr); if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) return -EINVAL; /* * No need to lock amsdu_in_ampdu_allowed since it can't be modified * during an BA session. */ if ((info->flags & IEEE80211_TX_CTL_AMPDU && !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) || !(mvmsta->amsdu_enabled & BIT(tid))) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); /* * Take the min of ieee80211 station and mvm station */ max_amsdu_len = - min_t(unsigned int, sta->max_amsdu_len, + min_t(unsigned int, sta->cur->max_amsdu_len, iwl_mvm_max_amsdu_size(mvm, sta, tid)); /* * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not * supported. This is a spec requirement (IEEE 802.11-2015 * section 8.7.3 NOTE 3). */ if (info->flags & IEEE80211_TX_CTL_AMPDU && !sta->deflink.vht_cap.vht_supported) max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095); /* Sub frame header + SNAP + IP header + TCP header + MSS */ subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss; pad = (4 - subf_len) & 0x3; /* * If we have N subframes in the A-MSDU, then the A-MSDU's size is * N * subf_len + (N - 1) * pad. */ num_subframes = (max_amsdu_len + pad) / (subf_len + pad); if (sta->max_amsdu_subframes && num_subframes > sta->max_amsdu_subframes) num_subframes = sta->max_amsdu_subframes; tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - tcp_hdrlen(skb) + skb->data_len; /* * Make sure we have enough TBs for the A-MSDU: * 2 for each subframe * 1 more for each fragment * 1 more for the potential data in the header */ if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) > mvm->trans->max_skb_frags) num_subframes = 1; if (num_subframes > 1) *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; /* This skb fits in one single A-MSDU */ if (num_subframes * mss >= tcp_payload_len) { __skb_queue_tail(mpdus_skb, skb); return 0; } /* * Trick the segmentation function to make it * create SKBs that can fit into one A-MSDU. */ return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skb); } #else /* CONFIG_INET */ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff_head *mpdus_skb) { /* Impossible to get TSO with CONFIG_INET */ WARN_ON(1); return -1; } #endif /* Check if there are any timed-out TIDs on a given shared TXQ */ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) { unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap; unsigned long now = jiffies; int tid; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return false; for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) { if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) return true; } return false; } static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, int airtime) { int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; struct iwl_mvm_tcm_mac *mdata; if (mac >= NUM_MAC_INDEX_DRIVER) return; mdata = &mvm->tcm.data[mac]; if (mvm->tcm.paused) return; if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) schedule_delayed_work(&mvm->tcm.work, 0); mdata->tx.airtime += airtime; } static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, int tid) { u32 ac = tid_to_mac80211_ac[tid]; int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; struct iwl_mvm_tcm_mac *mdata; if (mac >= NUM_MAC_INDEX_DRIVER) return -EINVAL; mdata = &mvm->tcm.data[mac]; mdata->tx.pkts[ac]++; return 0; } /* * Sets the fields in the Tx cmd that are crypto related. * * This function must be called with BHs disabled. */ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_mvm_sta *mvmsta; struct iwl_device_tx_cmd *dev_cmd; __le16 fc; u16 seq_number = 0; u8 tid = IWL_MAX_TID_COUNT; u16 txq_id; bool is_ampdu = false; int hdrlen; mvmsta = iwl_mvm_sta_from_mac80211(sta); fc = hdr->frame_control; hdrlen = ieee80211_hdrlen(fc); if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) return -1; if (WARN_ON_ONCE(!mvmsta)) return -1; - if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA)) return -1; if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->deflink.he_cap.has_he) return -1; if (unlikely(ieee80211_is_probe_resp(fc))) iwl_mvm_probe_resp_set_noa(mvm, skb); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, - sta, mvmsta->sta_id); + sta, mvmsta->deflink.sta_id); if (!dev_cmd) goto drop; /* * we handle that entirely ourselves -- for uAPSD the firmware * will always send a notification, and for PS-Poll responses * we'll notify mac80211 when getting frame status */ info->flags &= ~IEEE80211_TX_STATUS_EOSP; spin_lock(&mvmsta->lock); /* nullfunc frames should go to the MGMT queue regardless of QOS, - * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default - * assignment of MGMT TID + * the conditions of !ieee80211_is_qos_nullfunc(fc) and + * !ieee80211_is_data_qos(fc) keep the default assignment of MGMT TID */ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { tid = ieee80211_get_tid(hdr); if (WARN_ONCE(tid >= IWL_MAX_TID_COUNT, "Invalid TID %d", tid)) goto drop_unlock_sta; is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; if (WARN_ONCE(is_ampdu && mvmsta->tid_data[tid].state != IWL_AGG_ON, "Invalid internal agg state %d for TID %d", mvmsta->tid_data[tid].state, tid)) goto drop_unlock_sta; seq_number = mvmsta->tid_data[tid].seq_number; seq_number &= IEEE80211_SCTL_SEQ; if (!iwl_mvm_has_new_tx_api(mvm)) { struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl |= cpu_to_le16(seq_number); /* update the tx_cmd hdr as it was already copied */ tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl; } - } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) { + } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc) && + !ieee80211_is_nullfunc(fc)) { tid = IWL_TID_NON_QOS; } txq_id = mvmsta->tid_data[tid].txq_id; WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) { #if defined(__FreeBSD__) - IWL_DEBUG_TX(mvm, "fc %#06x sta_id %u tid %u txq_id %u mvm %p " - "skb %p { len %u } info %p sta %p\n", fc, mvmsta->sta_id, + IWL_DEBUG_TX(mvm, "fc %#06x tid %u txq_id %u mvm %p " + "skb %p { len %u } info %p sta %p\n", fc, tid, txq_id, mvm, skb, skb->len, info, sta); #endif iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); return -1; } if (!iwl_mvm_has_new_tx_api(mvm)) { /* Keep track of the time of the last frame for this RA/TID */ mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; /* * If we have timed-out TIDs - schedule the worker that will * reconfig the queues and update them * * Note that the no lock is taken here in order to not serialize * the TX flow. This isn't dangerous because scheduling * mvm->add_stream_wk can't ruin the state, and if we DON'T * schedule it due to some race condition then next TX we get * here we will. */ if (unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED && iwl_mvm_txq_should_update(mvm, txq_id))) schedule_work(&mvm->add_stream_wk); } IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n", - mvmsta->sta_id, tid, txq_id, + mvmsta->deflink.sta_id, tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number), skb->len); /* From now on, we cannot access info->control */ iwl_mvm_skb_prepare_status(skb, dev_cmd); + /* + * The IV is introduced by the HW for new tx api, and it is not present + * in the skb, hence, don't tell iwl_mvm_mei_tx_copy_to_csme about the + * IV for those devices. + */ if (ieee80211_is_data(fc)) iwl_mvm_mei_tx_copy_to_csme(mvm, skb, - info->control.hw_key ? + info->control.hw_key && + !iwl_mvm_has_new_tx_api(mvm) ? info->control.hw_key->iv_len : 0); if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) goto drop_unlock_sta; if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc)) mvmsta->tid_data[tid].seq_number = seq_number + 0x10; spin_unlock(&mvmsta->lock); if (iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid)) goto drop; return 0; drop_unlock_sta: iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); drop: - IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->sta_id, tid); + IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->deflink.sta_id, + tid); return -1; } int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_tx_info info; struct sk_buff_head mpdus_skbs; unsigned int payload_len; int ret; + struct sk_buff *orig_skb = skb; if (WARN_ON_ONCE(!mvmsta)) return -1; - if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA)) return -1; memcpy(&info, skb->cb, sizeof(info)); if (!skb_is_gso(skb)) return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - tcp_hdrlen(skb) + skb->data_len; if (payload_len <= skb_shinfo(skb)->gso_size) return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); __skb_queue_head_init(&mpdus_skbs); ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs); if (ret) return ret; - if (WARN_ON(skb_queue_empty(&mpdus_skbs))) - return ret; + WARN_ON(skb_queue_empty(&mpdus_skbs)); while (!skb_queue_empty(&mpdus_skbs)) { skb = __skb_dequeue(&mpdus_skbs); ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); if (ret) { + /* Free skbs created as part of TSO logic that have not yet been dequeued */ __skb_queue_purge(&mpdus_skbs); - return ret; + /* skb here is not necessarily same as skb that entered this method, + * so free it explicitly. + */ + if (skb == orig_skb) + ieee80211_free_txskb(mvm->hw, skb); + else + kfree_skb(skb); + /* there was error, but we consumed skb one way or another, so return 0 */ + return 0; } } return 0; } static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; struct ieee80211_vif *vif = mvmsta->vif; u16 normalized_ssn; lockdep_assert_held(&mvmsta->lock); if ((tid_data->state == IWL_AGG_ON || tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && iwl_mvm_tid_queued(mvm, tid_data) == 0) { /* * Now that this aggregation or DQA queue is empty tell * mac80211 so it knows we no longer have frames buffered for * the station on this TID (for the TIM bitmap calculation.) */ ieee80211_sta_set_buffered(sta, tid, false); } /* * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ normalized_ssn = tid_data->ssn; if (mvm->trans->trans_cfg->gen2) normalized_ssn &= 0xff; if (normalized_ssn != tid_data->next_reclaimed) return; switch (tid_data->state) { case IWL_EMPTYING_HW_QUEUE_ADDBA: IWL_DEBUG_TX_QUEUES(mvm, "Can continue addBA flow ssn = next_recl = %d\n", tid_data->next_reclaimed); tid_data->state = IWL_AGG_STARTING; ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IWL_EMPTYING_HW_QUEUE_DELBA: IWL_DEBUG_TX_QUEUES(mvm, "Can continue DELBA flow ssn = next_recl = %d\n", tid_data->next_reclaimed); tid_data->state = IWL_AGG_OFF; ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; default: break; } } #ifdef CONFIG_IWLWIFI_DEBUG const char *iwl_mvm_get_tx_fail_reason(u32 status) { #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x switch (status & TX_STATUS_MSK) { case TX_STATUS_SUCCESS: return "SUCCESS"; TX_STATUS_POSTPONE(DELAY); TX_STATUS_POSTPONE(FEW_BYTES); TX_STATUS_POSTPONE(BT_PRIO); TX_STATUS_POSTPONE(QUIET_PERIOD); TX_STATUS_POSTPONE(CALC_TTAK); TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); TX_STATUS_FAIL(SHORT_LIMIT); TX_STATUS_FAIL(LONG_LIMIT); TX_STATUS_FAIL(UNDERRUN); TX_STATUS_FAIL(DRAIN_FLOW); TX_STATUS_FAIL(RFKILL_FLUSH); TX_STATUS_FAIL(LIFE_EXPIRE); TX_STATUS_FAIL(DEST_PS); TX_STATUS_FAIL(HOST_ABORTED); TX_STATUS_FAIL(BT_RETRY); TX_STATUS_FAIL(STA_INVALID); TX_STATUS_FAIL(FRAG_DROPPED); TX_STATUS_FAIL(TID_DISABLE); TX_STATUS_FAIL(FIFO_FLUSHED); TX_STATUS_FAIL(SMALL_CF_POLL); TX_STATUS_FAIL(FW_DROP); TX_STATUS_FAIL(STA_COLOR_MISMATCH); } return "UNKNOWN"; #undef TX_STATUS_FAIL #undef TX_STATUS_POSTPONE } #endif /* CONFIG_IWLWIFI_DEBUG */ static int iwl_mvm_get_hwrate_chan_width(u32 chan_width) { switch (chan_width) { case RATE_MCS_CHAN_WIDTH_20: return 0; case RATE_MCS_CHAN_WIDTH_40: return IEEE80211_TX_RC_40_MHZ_WIDTH; case RATE_MCS_CHAN_WIDTH_80: return IEEE80211_TX_RC_80_MHZ_WIDTH; case RATE_MCS_CHAN_WIDTH_160: return IEEE80211_TX_RC_160_MHZ_WIDTH; default: return 0; } } void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, enum nl80211_band band, struct ieee80211_tx_rate *r) { u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; u32 rate = format == RATE_MCS_HT_MSK ? RATE_HT_MCS_INDEX(rate_n_flags) : rate_n_flags & RATE_MCS_CODE_MSK; r->flags |= iwl_mvm_get_hwrate_chan_width(rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK); if (rate_n_flags & RATE_MCS_SGI_MSK) r->flags |= IEEE80211_TX_RC_SHORT_GI; if (format == RATE_MCS_HT_MSK) { r->flags |= IEEE80211_TX_RC_MCS; r->idx = rate; } else if (format == RATE_MCS_VHT_MSK) { ieee80211_rate_set_vht(r, rate, - ((rate_n_flags & RATE_MCS_NSS_MSK) >> - RATE_MCS_NSS_POS) + 1); + FIELD_GET(RATE_MCS_NSS_MSK, + rate_n_flags) + 1); r->flags |= IEEE80211_TX_RC_VHT_MCS; } else if (format == RATE_MCS_HE_MSK) { /* mac80211 cannot do this without ieee80211_tx_status_ext() * but it only matters for radiotap */ r->idx = 0; } else { r->idx = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, band); } } void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags, enum nl80211_band band, struct ieee80211_tx_rate *r) { if (rate_n_flags & RATE_HT_MCS_GF_MSK) r->flags |= IEEE80211_TX_RC_GREEN_FIELD; r->flags |= iwl_mvm_get_hwrate_chan_width(rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1); if (rate_n_flags & RATE_MCS_SGI_MSK_V1) r->flags |= IEEE80211_TX_RC_SHORT_GI; if (rate_n_flags & RATE_MCS_HT_MSK_V1) { r->flags |= IEEE80211_TX_RC_MCS; r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1; } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) { ieee80211_rate_set_vht( r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK, - ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> - RATE_VHT_MCS_NSS_POS) + 1); + FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags) + 1); r->flags |= IEEE80211_TX_RC_VHT_MCS; } else { r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, band); } } /* * translate ucode response to mac80211 tx status control values */ static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw, u32 rate_n_flags, struct ieee80211_tx_info *info) { struct ieee80211_tx_rate *r = &info->status.rates[0]; if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP, TX_CMD, 0) <= 6) rate_n_flags = iwl_new_rate_from_v1(rate_n_flags); info->status.antenna = ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS); iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r); } static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, u32 status, __le16 frame_control) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tx_status *status_trig; int i; if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS) { enum iwl_fw_ini_time_point tp = IWL_FW_INI_TIME_POINT_TX_FAILED; if (ieee80211_is_action(frame_control)) tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED; iwl_dbg_tlv_time_point(&mvm->fwrt, tp, NULL); return; } trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_TX_STATUS); if (!trig) return; status_trig = (void *)trig->data; for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { /* don't collect on status 0 */ if (!status_trig->statuses[i].status) break; if (status_trig->statuses[i].status != (status & TX_STATUS_MSK)) continue; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "Tx status %d was received", status & TX_STATUS_MSK); break; } } /* * iwl_mvm_get_scd_ssn - returns the SSN of the SCD * @tx_resp: the Tx response from the fw (agg or non-agg) * * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since * it can't know that everything will go well until the end of the AMPDU, it * can't know in advance the number of MPDUs that will be sent in the current * batch. This is why it writes the agg Tx response while it fetches the MPDUs. * Hence, it can't know in advance what the SSN of the SCD will be at the end * of the batch. This is why the SSN of the SCD is written at the end of the * whole struct at a variable offset. This function knows how to cope with the * variable offset and returns the SSN of the SCD. */ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm, struct iwl_mvm_tx_resp *tx_resp) { return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) + tx_resp->frame_count) & 0xfff; } static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct ieee80211_sta *sta; u16 sequence = le16_to_cpu(pkt->hdr.sequence); int txq_id = SEQ_TO_QUEUE(sequence); /* struct iwl_mvm_tx_resp_v3 is almost the same */ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); struct agg_tx_status *agg_status = iwl_mvm_get_agg_status(mvm, tx_resp); u32 status = le16_to_cpu(agg_status->status); u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp); struct sk_buff_head skbs; u8 skb_freed = 0; u8 lq_color; u16 next_reclaimed, seq_ctl; bool is_ndp = false; __skb_queue_head_init(&skbs); if (iwl_mvm_has_new_tx_api(mvm)) txq_id = le16_to_cpu(tx_resp->tx_queue); seq_ctl = le16_to_cpu(tx_resp->seq_ctl); /* we can free until ssn % q.n_bd not inclusive */ iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs); while (!skb_queue_empty(&skbs)) { struct sk_buff *skb = __skb_dequeue(&skbs); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; bool flushed = false; skb_freed++; iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); memset(&info->status, 0, sizeof(info->status)); /* inform mac80211 about what happened with the frame */ switch (status & TX_STATUS_MSK) { case TX_STATUS_SUCCESS: case TX_STATUS_DIRECT_DONE: info->flags |= IEEE80211_TX_STAT_ACK; break; case TX_STATUS_FAIL_FIFO_FLUSHED: case TX_STATUS_FAIL_DRAIN_FLOW: flushed = true; break; case TX_STATUS_FAIL_DEST_PS: /* the FW should have stopped the queue and not * return this status */ IWL_ERR_LIMIT(mvm, "FW reported TX filtered, status=0x%x, FC=0x%x\n", status, le16_to_cpu(hdr->frame_control)); info->flags |= IEEE80211_TX_STAT_TX_FILTERED; break; default: break; } if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS && ieee80211_is_mgmt(hdr->frame_control)) iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); /* * If we are freeing multiple frames, mark all the frames * but the first one as acked, since they were acknowledged * before * */ if (skb_freed > 1) info->flags |= IEEE80211_TX_STAT_ACK; iwl_mvm_tx_status_check_trigger(mvm, status, hdr->frame_control); info->status.rates[0].count = tx_resp->failure_frame + 1; iwl_mvm_hwrate_to_tx_status(mvm->fw, le32_to_cpu(tx_resp->initial_rate), info); /* Don't assign the converted initial_rate, because driver * TLC uses this and doesn't support the new FW rate */ info->status.status_driver_data[1] = (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate); /* Single frame failure in an AMPDU queue => send BAR */ if (info->flags & IEEE80211_TX_CTL_AMPDU && !(info->flags & IEEE80211_TX_STAT_ACK) && !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed) info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; info->flags &= ~IEEE80211_TX_CTL_AMPDU; /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */ if (ieee80211_is_back_req(hdr->frame_control)) seq_ctl = 0; else if (status != TX_STATUS_SUCCESS) seq_ctl = le16_to_cpu(hdr->seq_ctrl); if (unlikely(!seq_ctl)) { /* * If it is an NDP, we can't update next_reclaim since * its sequence control is 0. Note that for that same * reason, NDPs are never sent to A-MPDU'able queues * so that we can never have more than one freed frame * for a single Tx resonse (see WARN_ON below). */ if (ieee80211_is_qos_nullfunc(hdr->frame_control)) is_ndp = true; } /* * TODO: this is not accurate if we are freeing more than one * packet. */ info->status.tx_time = le16_to_cpu(tx_resp->wireless_media_time); BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); info->status.status_driver_data[0] = RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc); - ieee80211_tx_status(mvm->hw, skb); + if (likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr1))) + ieee80211_tx_status(mvm->hw, skb); } /* This is an aggregation queue or might become one, so we use * the ssn since: ssn = wifi seq_num % 256. * The seq_ctl is the sequence control of the packet to which * this Tx response relates. But if there is a hole in the * bitmap of the BA we received, this Tx response may allow to * reclaim the hole and all the subsequent packets that were * already acked. In that case, seq_ctl != ssn, and the next * packet to be reclaimed will be ssn and not seq_ctl. In that * case, several packets will be reclaimed even if * frame_count = 1. * * The ssn is the index (% 256) of the latest packet that has * treated (acked / dropped) + 1. */ next_reclaimed = ssn; IWL_DEBUG_TX_REPLY(mvm, "TXQ %d status %s (0x%08x)\n", txq_id, iwl_mvm_get_tx_fail_reason(status), status); IWL_DEBUG_TX_REPLY(mvm, "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n", le32_to_cpu(tx_resp->initial_rate), tx_resp->failure_frame, SEQ_TO_INDEX(sequence), ssn, next_reclaimed, seq_ctl); rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* * sta can't be NULL otherwise it'd mean that the sta has been freed in * the firmware while we still have packets for it in the Tx queues. */ if (WARN_ON_ONCE(!sta)) goto out; if (!IS_ERR(sta)) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); iwl_mvm_tx_airtime(mvm, mvmsta, le16_to_cpu(tx_resp->wireless_media_time)); if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS && mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant); if (sta->wme && tid != IWL_MGMT_TID) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; bool send_eosp_ndp = false; spin_lock_bh(&mvmsta->lock); if (!is_ndp) { tid_data->next_reclaimed = next_reclaimed; IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n", next_reclaimed); } else { IWL_DEBUG_TX_REPLY(mvm, "NDP - don't update next_reclaimed\n"); } iwl_mvm_check_ratid_empty(mvm, sta, tid); if (mvmsta->sleep_tx_count) { mvmsta->sleep_tx_count--; if (mvmsta->sleep_tx_count && !iwl_mvm_tid_queued(mvm, tid_data)) { /* * The number of frames in the queue * dropped to 0 even if we sent less * frames than we thought we had on the * Tx queue. * This means we had holes in the BA * window that we just filled, ask * mac80211 to send EOSP since the * firmware won't know how to do that. * Send NDP and the firmware will send * EOSP notification that will trigger * a call to ieee80211_sta_eosp(). */ send_eosp_ndp = true; } } spin_unlock_bh(&mvmsta->lock); if (send_eosp_ndp) { iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, IEEE80211_FRAME_RELEASE_UAPSD, 1, tid, false, false); mvmsta->sleep_tx_count = 0; ieee80211_send_eosp_nullfunc(sta, tid); } } if (mvmsta->next_status_eosp) { mvmsta->next_status_eosp = false; ieee80211_sta_eosp(sta); } } out: rcu_read_unlock(); } #ifdef CONFIG_IWLWIFI_DEBUG #define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x static const char *iwl_get_agg_tx_status(u16 status) { switch (status & AGG_TX_STATE_STATUS_MSK) { AGG_TX_STATE_(TRANSMITTED); AGG_TX_STATE_(UNDERRUN); AGG_TX_STATE_(BT_PRIO); AGG_TX_STATE_(FEW_BYTES); AGG_TX_STATE_(ABORT); AGG_TX_STATE_(TX_ON_AIR_DROP); AGG_TX_STATE_(LAST_SENT_TRY_CNT); AGG_TX_STATE_(LAST_SENT_BT_KILL); AGG_TX_STATE_(SCD_QUERY); AGG_TX_STATE_(TEST_BAD_CRC32); AGG_TX_STATE_(RESPONSE); AGG_TX_STATE_(DUMP_TX); AGG_TX_STATE_(DELAY_TX); } return "UNKNOWN"; } static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; struct agg_tx_status *frame_status = iwl_mvm_get_agg_status(mvm, tx_resp); int i; bool tirgger_timepoint = false; for (i = 0; i < tx_resp->frame_count; i++) { u16 fstatus = le16_to_cpu(frame_status[i].status); /* In case one frame wasn't transmitted trigger time point */ tirgger_timepoint |= ((fstatus & AGG_TX_STATE_STATUS_MSK) != AGG_TX_STATE_TRANSMITTED); IWL_DEBUG_TX_REPLY(mvm, "status %s (0x%04x), try-count (%d) seq (0x%x)\n", iwl_get_agg_tx_status(fstatus), fstatus & AGG_TX_STATE_STATUS_MSK, (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >> AGG_TX_STATE_TRY_CNT_POS, le16_to_cpu(frame_status[i].sequence)); } if (tirgger_timepoint) iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_TX_FAILED, NULL); } #else static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) {} #endif /* CONFIG_IWLWIFI_DEBUG */ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); u16 sequence = le16_to_cpu(pkt->hdr.sequence); struct iwl_mvm_sta *mvmsta; int queue = SEQ_TO_QUEUE(sequence); struct ieee80211_sta *sta; if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE && (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))) return; iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt); rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - if (WARN_ON_ONCE(!sta || !sta->wme)) { + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta) || !sta->wme)) { rcu_read_unlock(); return; } if (!WARN_ON_ONCE(!mvmsta)) { mvmsta->tid_data[tid].rate_n_flags = le32_to_cpu(tx_resp->initial_rate); mvmsta->tid_data[tid].tx_time = le16_to_cpu(tx_resp->wireless_media_time); mvmsta->tid_data[tid].lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); iwl_mvm_tx_airtime(mvm, mvmsta, le16_to_cpu(tx_resp->wireless_media_time)); } rcu_read_unlock(); } void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; if (tx_resp->frame_count == 1) iwl_mvm_rx_tx_cmd_single(mvm, pkt); else iwl_mvm_rx_tx_cmd_agg(mvm, pkt); } static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, int txq, int index, struct ieee80211_tx_info *tx_info, u32 rate, bool is_flush) { struct sk_buff_head reclaimed_skbs; struct iwl_mvm_tid_data *tid_data = NULL; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta = NULL; struct sk_buff *skb; int freed; if (WARN_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations || tid > IWL_MAX_TID_COUNT, "sta_id %d tid %d", sta_id, tid)) return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* Reclaiming frames for a station that has been deleted ? */ if (WARN_ON_ONCE(!sta)) { rcu_read_unlock(); return; } __skb_queue_head_init(&reclaimed_skbs); /* * Release all TFDs before the SSN, i.e. all TFDs in front of * block-ack window (we assume that they've been successfully * transmitted ... if not, it's too late anyway). */ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs); skb_queue_walk(&reclaimed_skbs, skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); memset(&info->status, 0, sizeof(info->status)); /* Packet was transmitted successfully, failures come as single * frames because before failing a frame the firmware transmits * it without aggregation at least once. */ if (!is_flush) info->flags |= IEEE80211_TX_STAT_ACK; } /* * It's possible to get a BA response after invalidating the rcu (rcu is * invalidated in order to prevent new Tx from being sent, but there may * be some frames already in-flight). * In this case we just want to reclaim, and could skip all the * sta-dependent stuff since it's in the middle of being removed * anyways. */ if (IS_ERR(sta)) goto out; mvmsta = iwl_mvm_sta_from_mac80211(sta); tid_data = &mvmsta->tid_data[tid]; if (tid_data->txq_id != txq) { IWL_ERR(mvm, "invalid reclaim request: Q %d, tid %d\n", tid_data->txq_id, tid); rcu_read_unlock(); return; } spin_lock_bh(&mvmsta->lock); tid_data->next_reclaimed = index; iwl_mvm_check_ratid_empty(mvm, sta, tid); freed = 0; /* pack lq color from tid_data along the reduced txp */ tx_info->status.status_driver_data[0] = RS_DRV_DATA_PACK(tid_data->lq_color, tx_info->status.status_driver_data[0]); tx_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; skb_queue_walk(&reclaimed_skbs, skb) { struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (!is_flush) { if (ieee80211_is_data_qos(hdr->frame_control)) freed++; else WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT); } /* this is the first skb we deliver in this batch */ /* put the rate scaling data there */ if (freed == 1) { info->flags |= IEEE80211_TX_STAT_AMPDU; memcpy(&info->status, &tx_info->status, sizeof(tx_info->status)); iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, info); } } spin_unlock_bh(&mvmsta->lock); /* We got a BA notif with 0 acked or scd_ssn didn't progress which is * possible (i.e. first MPDU in the aggregation wasn't acked) * Still it's important to update RS about sent vs. acked. */ - if (!is_flush && skb_queue_empty(&reclaimed_skbs)) { + if (!is_flush && skb_queue_empty(&reclaimed_skbs) && + !iwl_mvm_has_tlc_offload(mvm)) { struct ieee80211_chanctx_conf *chanctx_conf = NULL; + /* no TLC offload, so non-MLD mode */ if (mvmsta->vif) chanctx_conf = - rcu_dereference(mvmsta->vif->chanctx_conf); + rcu_dereference(mvmsta->vif->bss_conf.chanctx_conf); if (WARN_ON_ONCE(!chanctx_conf)) goto out; tx_info->band = chanctx_conf->def.chan->band; iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, tx_info); - if (!iwl_mvm_has_tlc_offload(mvm)) { - IWL_DEBUG_TX_REPLY(mvm, - "No reclaim. Update rs directly\n"); - iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false); - } + IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n"); + iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false); } out: rcu_read_unlock(); while (!skb_queue_empty(&reclaimed_skbs)) { skb = __skb_dequeue(&reclaimed_skbs); ieee80211_tx_status(mvm->hw, skb); } } void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); int sta_id, tid, txq, index; struct ieee80211_tx_info ba_info = {}; struct iwl_mvm_ba_notif *ba_notif; struct iwl_mvm_tid_data *tid_data; struct iwl_mvm_sta *mvmsta; ba_info.flags = IEEE80211_TX_STAT_AMPDU; if (iwl_mvm_has_new_tx_api(mvm)) { struct iwl_mvm_compressed_ba_notif *ba_res = (void *)pkt->data; u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info); u16 tfd_cnt; int i; - if (unlikely(sizeof(*ba_res) > pkt_len)) + if (IWL_FW_CHECK(mvm, sizeof(*ba_res) > pkt_len, + "short BA notification (%d)\n", pkt_len)) return; sta_id = ba_res->sta_id; ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done); ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed); ba_info.status.tx_time = (u16)le32_to_cpu(ba_res->wireless_time); ba_info.status.status_driver_data[0] = (void *)(uintptr_t)ba_res->reduced_txp; tfd_cnt = le16_to_cpu(ba_res->tfd_cnt); - if (!tfd_cnt || struct_size(ba_res, tfd, tfd_cnt) > pkt_len) + if (!tfd_cnt) + return; + + if (IWL_FW_CHECK(mvm, + struct_size(ba_res, tfd, tfd_cnt) > pkt_len, + "short BA notification (tfds:%d, size:%d)\n", + tfd_cnt, pkt_len)) return; rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); /* * It's possible to get a BA response after invalidating the rcu * (rcu is invalidated in order to prevent new Tx from being * sent, but there may be some frames already in-flight). * In this case we just want to reclaim, and could skip all the * sta-dependent stuff since it's in the middle of being removed * anyways. */ /* Free per TID */ for (i = 0; i < tfd_cnt; i++) { struct iwl_mvm_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i]; tid = ba_tfd->tid; if (tid == IWL_MGMT_TID) tid = IWL_MAX_TID_COUNT; if (mvmsta) mvmsta->tid_data[i].lq_color = lq_color; iwl_mvm_tx_reclaim(mvm, sta_id, tid, (int)(le16_to_cpu(ba_tfd->q_num)), le16_to_cpu(ba_tfd->tfd_index), &ba_info, le32_to_cpu(ba_res->tx_rate), false); } if (mvmsta) iwl_mvm_tx_airtime(mvm, mvmsta, le32_to_cpu(ba_res->wireless_time)); rcu_read_unlock(); IWL_DEBUG_TX_REPLY(mvm, "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n", sta_id, le32_to_cpu(ba_res->flags), le16_to_cpu(ba_res->txed), le16_to_cpu(ba_res->done)); return; } ba_notif = (void *)pkt->data; sta_id = ba_notif->sta_id; tid = ba_notif->tid; /* "flow" corresponds to Tx queue */ txq = le16_to_cpu(ba_notif->scd_flow); /* "ssn" is start of block-ack Tx window, corresponds to index * (in Tx queue's circular buffer) of first TFD/frame in window */ index = le16_to_cpu(ba_notif->scd_ssn); rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); - if (WARN_ON_ONCE(!mvmsta)) { + if (IWL_FW_CHECK(mvm, !mvmsta, + "invalid STA ID %d in BA notif\n", + sta_id)) { rcu_read_unlock(); return; } tid_data = &mvmsta->tid_data[tid]; ba_info.status.ampdu_ack_len = ba_notif->txed_2_done; ba_info.status.ampdu_len = ba_notif->txed; ba_info.status.tx_time = tid_data->tx_time; ba_info.status.status_driver_data[0] = (void *)(uintptr_t)ba_notif->reduced_txp; rcu_read_unlock(); iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info, tid_data->rate_n_flags, false); IWL_DEBUG_TX_REPLY(mvm, "BA_NOTIFICATION Received from %pM, sta_id = %d\n", ba_notif->sta_addr, ba_notif->sta_id); IWL_DEBUG_TX_REPLY(mvm, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n", ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl), le64_to_cpu(ba_notif->bitmap), txq, index, ba_notif->txed, ba_notif->txed_2_done); IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n", ba_notif->reduced_txp); } /* * Note that there are transports that buffer frames before they reach * the firmware. This means that after flush_tx_path is called, the * queue might not be empty. The race-free way to handle this is to: * 1) set the station as draining * 2) flush the Tx path * 3) wait for the transport queues to be empty */ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk) { int ret; struct iwl_tx_path_flush_cmd_v1 flush_cmd = { .queues_ctl = cpu_to_le32(tfd_msk), .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH), }; WARN_ON(iwl_mvm_has_new_tx_api(mvm)); ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, 0, sizeof(flush_cmd), &flush_cmd); if (ret) IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); return ret; } int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids) { int ret; struct iwl_tx_path_flush_cmd_rsp *rsp; struct iwl_tx_path_flush_cmd flush_cmd = { .sta_id = cpu_to_le32(sta_id), .tid_mask = cpu_to_le16(tids), }; struct iwl_host_cmd cmd = { .id = TXPATH_FLUSH, .len = { sizeof(flush_cmd), }, .data = { &flush_cmd, }, }; WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0) cmd.flags |= CMD_WANT_SKB; IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n", sta_id, tids); ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); return ret; } if (cmd.flags & CMD_WANT_SKB) { int i; int num_flushed_queues; if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != sizeof(*rsp))) { ret = -EIO; goto free_rsp; } rsp = (void *)cmd.resp_pkt->data; if (WARN_ONCE(le16_to_cpu(rsp->sta_id) != sta_id, "sta_id %d != rsp_sta_id %d", sta_id, le16_to_cpu(rsp->sta_id))) { ret = -EIO; goto free_rsp; } num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues); if (WARN_ONCE(num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP, "num_flushed_queues %d", num_flushed_queues)) { ret = -EIO; goto free_rsp; } for (i = 0; i < num_flushed_queues; i++) { struct ieee80211_tx_info tx_info = {}; struct iwl_flush_queue_info *queue_info = &rsp->queues[i]; int tid = le16_to_cpu(queue_info->tid); int read_before = le16_to_cpu(queue_info->read_before_flush); int read_after = le16_to_cpu(queue_info->read_after_flush); int queue_num = le16_to_cpu(queue_info->queue_num); if (tid == IWL_MGMT_TID) tid = IWL_MAX_TID_COUNT; IWL_DEBUG_TX_QUEUES(mvm, "tid %d queue_id %d read-before %d read-after %d\n", tid, queue_num, read_before, read_after); iwl_mvm_tx_reclaim(mvm, sta_id, tid, queue_num, read_after, &tx_info, 0, true); } free_rsp: iwl_free_resp(&cmd); } return ret; } int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal) { - struct iwl_mvm_int_sta *int_sta = sta; - struct iwl_mvm_sta *mvm_sta = sta; + u32 sta_id, tfd_queue_msk; - BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) != - offsetof(struct iwl_mvm_sta, sta_id)); + if (internal) { + struct iwl_mvm_int_sta *int_sta = sta; - if (iwl_mvm_has_new_tx_api(mvm)) - return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff); + sta_id = int_sta->sta_id; + tfd_queue_msk = int_sta->tfd_queue_msk; + } else { + struct iwl_mvm_sta *mvm_sta = sta; - if (internal) - return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk); + sta_id = mvm_sta->deflink.sta_id; + tfd_queue_msk = mvm_sta->tfd_queue_msk; + } + + if (iwl_mvm_has_new_tx_api(mvm)) + return iwl_mvm_flush_sta_tids(mvm, sta_id, 0xffff); - return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk); + return iwl_mvm_flush_tx_path(mvm, tfd_queue_msk); } diff --git a/sys/contrib/dev/iwlwifi/mvm/utils.c b/sys/contrib/dev/iwlwifi/mvm/utils.c index 5370cec8140a..34e554acd72b 100644 --- a/sys/contrib/dev/iwlwifi/mvm/utils.c +++ b/sys/contrib/dev/iwlwifi/mvm/utils.c @@ -1,1142 +1,1207 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #if defined(__FreeBSD__) #include #endif #include #include "iwl-debug.h" #include "iwl-io.h" #include "iwl-prph.h" #include "iwl-csr.h" #include "mvm.h" #include "fw/api/rs.h" #include "fw/img.h" /* * Will return 0 even if the cmd failed when RFKILL is asserted unless * CMD_WANT_SKB is set in cmd->flags. */ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd) { int ret; #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) if (WARN_ON(mvm->d3_test_active)) return -EIO; #endif /* * Synchronous commands from this op-mode must hold * the mutex, this ensures we don't try to send two * (or more) synchronous commands at a time. */ if (!(cmd->flags & CMD_ASYNC)) lockdep_assert_held(&mvm->mutex); ret = iwl_trans_send_cmd(mvm->trans, cmd); /* * If the caller wants the SKB, then don't hide any problems, the * caller might access the response buffer which will be NULL if * the command failed. */ if (cmd->flags & CMD_WANT_SKB) return ret; /* * Silently ignore failures if RFKILL is asserted or * we are in suspend\resume process */ if (!ret || ret == -ERFKILL || ret == -EHOSTDOWN) return 0; return ret; } int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, u32 flags, u16 len, const void *data) { struct iwl_host_cmd cmd = { .id = id, .len = { len, }, .data = { data, }, .flags = flags, }; return iwl_mvm_send_cmd(mvm, &cmd); } /* * We assume that the caller set the status to the success value */ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, u32 *status) { struct iwl_rx_packet *pkt; struct iwl_cmd_response *resp; int ret, resp_len; lockdep_assert_held(&mvm->mutex); #if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP) if (WARN_ON(mvm->d3_test_active)) return -EIO; #endif /* * Only synchronous commands can wait for status, * we use WANT_SKB so the caller can't. */ if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB), "cmd flags %x", cmd->flags)) return -EINVAL; cmd->flags |= CMD_WANT_SKB; ret = iwl_trans_send_cmd(mvm->trans, cmd); if (ret == -ERFKILL) { /* * The command failed because of RFKILL, don't update * the status, leave it as success and return 0. */ return 0; } else if (ret) { return ret; } pkt = cmd->resp_pkt; resp_len = iwl_rx_packet_payload_len(pkt); if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { ret = -EIO; goto out_free_resp; } resp = (void *)pkt->data; *status = le32_to_cpu(resp->status); out_free_resp: iwl_free_resp(cmd); return ret; } /* * We assume that the caller set the status to the sucess value */ int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, const void *data, u32 *status) { struct iwl_host_cmd cmd = { .id = id, .len = { len, }, .data = { data, }, }; return iwl_mvm_send_cmd_status(mvm, &cmd, status); } int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band) { int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; bool is_LB = band == NL80211_BAND_2GHZ; if (format == RATE_MCS_LEGACY_OFDM_MSK) return is_LB ? rate + IWL_FIRST_OFDM_RATE : rate; /* CCK is not allowed in HB */ return is_LB ? rate : -1; } int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band) { int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1; int idx; int band_offset = 0; /* Legacy rate format, search for match in table */ if (band != NL80211_BAND_2GHZ) band_offset = IWL_FIRST_OFDM_RATE; for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) if (iwl_fw_rate_idx_to_plcp(idx) == rate) return idx - band_offset; return -1; } u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx) { if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8) /* In the new rate legacy rates are indexed: * 0 - 3 for CCK and 0 - 7 for OFDM. */ return (rate_idx >= IWL_FIRST_OFDM_RATE ? rate_idx - IWL_FIRST_OFDM_RATE : rate_idx); return iwl_fw_rate_idx_to_plcp(rate_idx); } u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac) { static const u8 mac80211_ac_to_ucode_ac[] = { AC_VO, AC_VI, AC_BE, AC_BK }; return mac80211_ac_to_ucode_ac[ac]; } void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_error_resp *err_resp = (void *)pkt->data; IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n", le32_to_cpu(err_resp->error_type), err_resp->cmd_id); IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n", le16_to_cpu(err_resp->bad_cmd_seq_num), le32_to_cpu(err_resp->error_service)); IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n", le64_to_cpu(err_resp->timestamp)); } /* * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h. * The parameter should also be a combination of ANT_[ABC]. */ u8 first_antenna(u8 mask) { BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */ if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */ return BIT(0); return BIT(ffs(mask) - 1); } #define MAX_ANT_NUM 2 /* * Toggles between TX antennas to send the probe request on. * Receives the bitmask of valid TX antennas and the *index* used * for the last TX, and returns the next valid *index* to use. * In order to set it in the tx_cmd, must do BIT(idx). */ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) { u8 ind = last_idx; int i; for (i = 0; i < MAX_ANT_NUM; i++) { ind = (ind + 1) % MAX_ANT_NUM; if (valid & BIT(ind)) return ind; } WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid); return last_idx; } /** * iwl_mvm_send_lq_cmd() - Send link quality command * @mvm: Driver data. * @lq: Link quality command to send. * * The link quality command is sent as the last step of station creation. * This is the special case in which init is set and we call a callback in * this case to clear the state indicating that station creation is in * progress. */ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq) { struct iwl_host_cmd cmd = { .id = LQ_CMD, .len = { sizeof(struct iwl_lq_cmd), }, .flags = CMD_ASYNC, .data = { lq, }, }; if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA || iwl_mvm_has_tlc_offload(mvm))) return -EINVAL; return iwl_mvm_send_cmd(mvm, &cmd); } /** * iwl_mvm_update_smps - Get a request to change the SMPS mode * @mvm: Driver data. * @vif: Pointer to the ieee80211_vif structure * @req_type: The part of the driver who call for a change. * @smps_request: The request to change the SMPS mode. + * @link_id: for MLO link_id, otherwise 0 (deflink) * * Get a requst to change the SMPS mode, * and change it according to all other requests in the driver. */ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_mvm_smps_type_request req_type, - enum ieee80211_smps_mode smps_request) + enum ieee80211_smps_mode smps_request, + unsigned int link_id) { struct iwl_mvm_vif *mvmvif; enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC; int i; lockdep_assert_held(&mvm->mutex); /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */ if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) return; if (vif->type != NL80211_IFTYPE_STATION) return; mvmvif = iwl_mvm_vif_from_mac80211(vif); - mvmvif->smps_requests[req_type] = smps_request; + + if (WARN_ON_ONCE(!mvmvif->link[link_id])) + return; + + mvmvif->link[link_id]->smps_requests[req_type] = smps_request; for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { - if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) { + if (mvmvif->link[link_id]->smps_requests[i] == + IEEE80211_SMPS_STATIC) { smps_mode = IEEE80211_SMPS_STATIC; break; } - if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) + if (mvmvif->link[link_id]->smps_requests[i] == + IEEE80211_SMPS_DYNAMIC) smps_mode = IEEE80211_SMPS_DYNAMIC; } - ieee80211_request_smps(vif, 0, smps_mode); + /* SMPS is disabled in eSR */ + if (mvmvif->esr_active) + smps_mode = IEEE80211_SMPS_OFF; + + ieee80211_request_smps(vif, link_id, smps_mode); +} + +void iwl_mvm_update_smps_on_active_links(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + enum iwl_mvm_smps_type_request req_type, + enum ieee80211_smps_mode smps_request) +{ + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; + + rcu_read_lock(); + for_each_vif_active_link(vif, link_conf, link_id) + iwl_mvm_update_smps(mvm, vif, req_type, smps_request, + link_id); + rcu_read_unlock(); } static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { WARN_ON(pkt->hdr.cmd != STATISTICS_NOTIFICATION); return true; } int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) { struct iwl_statistics_cmd scmd = { .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0, }; struct iwl_host_cmd cmd = { .id = STATISTICS_CMD, .len[0] = sizeof(scmd), .data[0] = &scmd, }; int ret; /* From version 15 - STATISTICS_NOTIFICATION, the reply for * STATISTICS_CMD is empty, and the response is with * STATISTICS_NOTIFICATION notification */ if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, STATISTICS_NOTIFICATION, 0) < 15) { cmd.flags = CMD_WANT_SKB; ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) return ret; iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt); iwl_free_resp(&cmd); } else { struct iwl_notification_wait stats_wait; static const u16 stats_complete[] = { STATISTICS_NOTIFICATION, }; iwl_init_notification_wait(&mvm->notif_wait, &stats_wait, stats_complete, ARRAY_SIZE(stats_complete), iwl_wait_stats_complete, NULL); ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { iwl_remove_notification(&mvm->notif_wait, &stats_wait); return ret; } /* 200ms should be enough for FW to collect data from all * LMACs and send STATISTICS_NOTIFICATION to host */ ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 5); if (ret) return ret; } if (clear) iwl_mvm_accu_radio_stats(mvm); return 0; } void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm) { mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time; mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time; mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf; mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan; } struct iwl_mvm_diversity_iter_data { struct iwl_mvm_phy_ctxt *ctxt; bool result; }; static void iwl_mvm_diversity_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_diversity_iter_data *data = _data; - int i; + int i, link_id; - if (mvmvif->phy_ctxt != data->ctxt) - return; + for_each_mvm_vif_valid_link(mvmvif, link_id) { + struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; - for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { - if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC || - mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) { - data->result = false; - break; + if (link_info->phy_ctxt != data->ctxt) + continue; + + for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { + if (link_info->smps_requests[i] == IEEE80211_SMPS_STATIC || + link_info->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) { + data->result = false; + break; + } } } } bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) { struct iwl_mvm_diversity_iter_data data = { .ctxt = ctxt, .result = true, }; lockdep_assert_held(&mvm->mutex); if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) return false; if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1) return false; if (mvm->cfg->rx_with_siso_diversity) return false; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_diversity_iter, &data); return data.result; } void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, bool low_latency, u16 mac_id) { struct iwl_mac_low_latency_cmd cmd = { .mac_id = cpu_to_le32(mac_id) }; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) return; if (low_latency) { /* currently we don't care about the direction */ cmd.low_latency_rx = 1; cmd.low_latency_tx = 1; } if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD), 0, sizeof(cmd), &cmd)) IWL_ERR(mvm, "Failed to send low latency command\n"); } int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool low_latency, enum iwl_mvm_low_latency_cause cause) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int res; bool prev; lockdep_assert_held(&mvm->mutex); prev = iwl_mvm_vif_low_latency(mvmvif); iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause); low_latency = iwl_mvm_vif_low_latency(mvmvif); if (low_latency == prev) return 0; iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id); res = iwl_mvm_update_quotas(mvm, false, NULL); if (res) return res; iwl_mvm_bt_coex_vif_change(mvm); return iwl_mvm_power_update_mac(mvm); } struct iwl_mvm_low_latency_iter { bool result; bool result_per_band[NUM_NL80211_BANDS]; }; static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_low_latency_iter *result = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); enum nl80211_band band; if (iwl_mvm_vif_low_latency(mvmvif)) { result->result = true; - if (!mvmvif->phy_ctxt) + if (!mvmvif->deflink.phy_ctxt) return; - band = mvmvif->phy_ctxt->channel->band; + band = mvmvif->deflink.phy_ctxt->channel->band; result->result_per_band[band] = true; } } bool iwl_mvm_low_latency(struct iwl_mvm *mvm) { struct iwl_mvm_low_latency_iter data = {}; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_ll_iter, &data); return data.result; } bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band) { struct iwl_mvm_low_latency_iter data = {}; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_ll_iter, &data); return data.result_per_band[band]; } struct iwl_bss_iter_data { struct ieee80211_vif *vif; bool error; }; static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_bss_iter_data *data = _data; if (vif->type != NL80211_IFTYPE_STATION || vif->p2p) return; if (data->vif) { data->error = true; return; } data->vif = vif; } struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm) { struct iwl_bss_iter_data bss_iter_data = {}; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bss_iface_iterator, &bss_iter_data); if (bss_iter_data.error) { IWL_ERR(mvm, "More than one managed interface active!\n"); return ERR_PTR(-EINVAL); } return bss_iter_data.vif; } struct iwl_bss_find_iter_data { struct ieee80211_vif *vif; u32 macid; }; static void iwl_mvm_bss_find_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_bss_find_iter_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (mvmvif->id == data->macid) data->vif = vif; } struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid) { struct iwl_bss_find_iter_data data = { .macid = macid, }; lockdep_assert_held(&mvm->mutex); ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bss_find_iface_iterator, &data); return data.vif; } struct iwl_sta_iter_data { bool assoc; }; static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_sta_iter_data *data = _data; if (vif->type != NL80211_IFTYPE_STATION) return; - if (vif->bss_conf.assoc) + if (vif->cfg.assoc) data->assoc = true; } bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm) { struct iwl_sta_iter_data data = { .assoc = false, }; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_sta_iface_iterator, &data); return data.assoc; } unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool tdls, bool cmd_q) { struct iwl_fw_dbg_trigger_tlv *trigger; struct iwl_fw_dbg_trigger_txq_timer *txq_timer; unsigned int default_timeout = cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->trans->trans_cfg->base_params->wd_timeout; if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) { /* * We can't know when the station is asleep or awake, so we * must disable the queue hang detection. */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) && vif && vif->type == NL80211_IFTYPE_AP) return IWL_WATCHDOG_DISABLED; return default_timeout; } trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS); txq_timer = (void *)trigger->data; if (tdls) return le32_to_cpu(txq_timer->tdls); if (cmd_q) return le32_to_cpu(txq_timer->command_queue); if (WARN_ON(!vif)) return default_timeout; switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_ADHOC: return le32_to_cpu(txq_timer->ibss); case NL80211_IFTYPE_STATION: return le32_to_cpu(txq_timer->bss); case NL80211_IFTYPE_AP: return le32_to_cpu(txq_timer->softap); case NL80211_IFTYPE_P2P_CLIENT: return le32_to_cpu(txq_timer->p2p_client); case NL80211_IFTYPE_P2P_GO: return le32_to_cpu(txq_timer->p2p_go); case NL80211_IFTYPE_P2P_DEVICE: return le32_to_cpu(txq_timer->p2p_device); case NL80211_IFTYPE_MONITOR: return default_timeout; default: WARN_ON(1); return mvm->trans->trans_cfg->base_params->wd_timeout; } } void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MLME); if (!trig) goto out; trig_mlme = (void *)trig->data; if (trig_mlme->stop_connection_loss && --trig_mlme->stop_connection_loss) goto out; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg); out: ieee80211_connection_loss(vif); } void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_sta *sta, u16 tid) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_BA); if (!trig) return; ba_trig = (void *)trig->data; if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid))) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "Frame from %pM timed out, tid %d", sta->addr, tid); } u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed) { if (!elapsed) return 0; return (100 * airtime / elapsed) / USEC_PER_MSEC; } static enum iwl_mvm_traffic_load iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed) { u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed); if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH) return IWL_MVM_TRAFFIC_HIGH; if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH) return IWL_MVM_TRAFFIC_MEDIUM; return IWL_MVM_TRAFFIC_LOW; } static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC; if (mvmvif->id >= NUM_MAC_INDEX_DRIVER) return; low_latency = mvm->tcm.result.low_latency[mvmvif->id]; if (!mvm->tcm.result.change[mvmvif->id] && prev == low_latency) { iwl_mvm_update_quotas(mvm, false, NULL); return; } if (prev != low_latency) { /* this sends traffic load and updates quota as well */ iwl_mvm_update_low_latency(mvm, vif, low_latency, LOW_LATENCY_TRAFFIC); } else { iwl_mvm_update_quotas(mvm, false, NULL); } } static void iwl_mvm_tcm_results(struct iwl_mvm *mvm) { mutex_lock(&mvm->mutex); ieee80211_iterate_active_interfaces( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_tcm_iter, mvm); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); mutex_unlock(&mvm->mutex); } static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk) { struct iwl_mvm *mvm; struct iwl_mvm_vif *mvmvif; struct ieee80211_vif *vif; mvmvif = container_of(wk, struct iwl_mvm_vif, uapsd_nonagg_detected_wk.work); vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); mvm = mvmvif->mvm; if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions) return; /* remember that this AP is broken */ memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr, vif->bss_conf.bssid, ETH_ALEN); mvm->uapsd_noagg_bssid_write_idx++; if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN) mvm->uapsd_noagg_bssid_write_idx = 0; iwl_mvm_connection_loss(mvm, vif, "AP isn't using AMPDU with uAPSD enabled"); } static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->type != NL80211_IFTYPE_STATION) return; - if (!vif->bss_conf.assoc) + if (!vif->cfg.assoc) return; - if (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd && - !mvmvif->queue_params[IEEE80211_AC_VI].uapsd && - !mvmvif->queue_params[IEEE80211_AC_BE].uapsd && - !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) + if (!mvmvif->deflink.queue_params[IEEE80211_AC_VO].uapsd && + !mvmvif->deflink.queue_params[IEEE80211_AC_VI].uapsd && + !mvmvif->deflink.queue_params[IEEE80211_AC_BE].uapsd && + !mvmvif->deflink.queue_params[IEEE80211_AC_BK].uapsd) return; if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected) return; mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true; IWL_INFO(mvm, "detected AP should do aggregation but isn't, likely due to U-APSD\n"); - schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ); + schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, + 15 * HZ); } static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm, unsigned int elapsed, int mac) { u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes; u64 tpt; unsigned long rate; struct ieee80211_vif *vif; rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate); if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions || mvm->tcm.data[mac].uapsd_nonagg_detect.detected) return; if (iwl_mvm_has_new_rx_api(mvm)) { tpt = 8 * bytes; /* kbps */ do_div(tpt, elapsed); rate *= 1000; /* kbps */ if (tpt < 22 * rate / 100) return; } else { /* * the rate here is actually the threshold, in 100Kbps units, * so do the needed conversion from bytes to 100Kbps: * 100kb = bits / (100 * 1000), * 100kbps = 100kb / (msecs / 1000) == * (bits / (100 * 1000)) / (msecs / 1000) == * bits / (100 * msecs) */ tpt = (8 * bytes); do_div(tpt, elapsed * 100); if (tpt < rate) return; } rcu_read_lock(); vif = rcu_dereference(mvm->vif_id_to_mac[mac]); if (vif) iwl_mvm_uapsd_agg_disconnect(mvm, vif); rcu_read_unlock(); } static void iwl_mvm_tcm_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u32 *band = _data; - if (!mvmvif->phy_ctxt) + if (!mvmvif->deflink.phy_ctxt) return; - band[mvmvif->id] = mvmvif->phy_ctxt->channel->band; + band[mvmvif->id] = mvmvif->deflink.phy_ctxt->channel->band; } static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm, unsigned long ts, bool handle_uapsd) { unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts); unsigned int uapsd_elapsed = jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts); u32 total_airtime = 0; u32 band_airtime[NUM_NL80211_BANDS] = {0}; u32 band[NUM_MAC_INDEX_DRIVER] = {0}; int ac, mac, i; bool low_latency = false; enum iwl_mvm_traffic_load load, band_load; bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD); if (handle_ll) mvm->tcm.ll_ts = ts; if (handle_uapsd) mvm->tcm.uapsd_nonagg_ts = ts; mvm->tcm.result.elapsed = elapsed; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_tcm_iterator, &band); for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) { struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; u32 vo_vi_pkts = 0; u32 airtime = mdata->rx.airtime + mdata->tx.airtime; total_airtime += airtime; band_airtime[band[mac]] += airtime; load = iwl_mvm_tcm_load(mvm, airtime, elapsed); mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac]; mvm->tcm.result.load[mac] = load; mvm->tcm.result.airtime[mac] = airtime; for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++) vo_vi_pkts += mdata->rx.pkts[ac] + mdata->tx.pkts[ac]; /* enable immediately with enough packets but defer disabling */ if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH) mvm->tcm.result.low_latency[mac] = true; else if (handle_ll) mvm->tcm.result.low_latency[mac] = false; if (handle_ll) { /* clear old data */ memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts)); memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts)); } low_latency |= mvm->tcm.result.low_latency[mac]; if (!mvm->tcm.result.low_latency[mac] && handle_uapsd) iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed, mac); /* clear old data */ if (handle_uapsd) mdata->uapsd_nonagg_detect.rx_bytes = 0; memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime)); memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime)); } load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed); mvm->tcm.result.global_load = load; for (i = 0; i < NUM_NL80211_BANDS; i++) { band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed); mvm->tcm.result.band_load[i] = band_load; } /* * If the current load isn't low we need to force re-evaluation * in the TCM period, so that we can return to low load if there * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get * triggered by traffic). */ if (load != IWL_MVM_TRAFFIC_LOW) return MVM_TCM_PERIOD; /* * If low-latency is active we need to force re-evaluation after * (the longer) MVM_LL_PERIOD, so that we can disable low-latency * when there's no traffic at all. */ if (low_latency) return MVM_LL_PERIOD; /* * Otherwise, we don't need to run the work struct because we're * in the default "idle" state - traffic indication is low (which * also covers the "no traffic" case) and low-latency is disabled * so there's no state that may need to be disabled when there's * no traffic at all. * * Note that this has no impact on the regular scheduling of the * updates triggered by traffic - those happen whenever one of the * two timeouts expire (if there's traffic at all.) */ return 0; } void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm) { unsigned long ts = jiffies; bool handle_uapsd = time_after(ts, mvm->tcm.uapsd_nonagg_ts + msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD)); spin_lock(&mvm->tcm.lock); if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) { spin_unlock(&mvm->tcm.lock); return; } spin_unlock(&mvm->tcm.lock); if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) { mutex_lock(&mvm->mutex); if (iwl_mvm_request_statistics(mvm, true)) handle_uapsd = false; mutex_unlock(&mvm->mutex); } spin_lock(&mvm->tcm.lock); /* re-check if somebody else won the recheck race */ if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) { /* calculate statistics */ unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts, handle_uapsd); /* the memset needs to be visible before the timestamp */ smp_mb(); mvm->tcm.ts = ts; if (work_delay) schedule_delayed_work(&mvm->tcm.work, work_delay); } spin_unlock(&mvm->tcm.lock); iwl_mvm_tcm_results(mvm); } void iwl_mvm_tcm_work(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm, tcm.work); iwl_mvm_recalc_tcm(mvm); } void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel) { spin_lock_bh(&mvm->tcm.lock); mvm->tcm.paused = true; spin_unlock_bh(&mvm->tcm.lock); if (with_cancel) cancel_delayed_work_sync(&mvm->tcm.work); } void iwl_mvm_resume_tcm(struct iwl_mvm *mvm) { int mac; bool low_latency = false; spin_lock_bh(&mvm->tcm.lock); mvm->tcm.ts = jiffies; mvm->tcm.ll_ts = jiffies; for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) { struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts)); memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts)); memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime)); memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime)); if (mvm->tcm.result.low_latency[mac]) low_latency = true; } /* The TCM data needs to be reset before "paused" flag changes */ smp_mb(); mvm->tcm.paused = false; /* * if the current load is not low or low latency is active, force * re-evaluation to cover the case of no traffic. */ if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW) schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD); else if (low_latency) schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD); spin_unlock_bh(&mvm->tcm.lock); } void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk, iwl_mvm_tcm_uapsd_nonagg_detected_wk); } void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk); } u32 iwl_mvm_get_systime(struct iwl_mvm *mvm) { u32 reg_addr = DEVICE_SYSTEM_TIME_REG; if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000 && mvm->trans->cfg->gp2_reg_addr) reg_addr = mvm->trans->cfg->gp2_reg_addr; return iwl_read_prph(mvm->trans, reg_addr); } void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2, u64 *boottime, ktime_t *realtime) { bool ps_disabled; lockdep_assert_held(&mvm->mutex); /* Disable power save when reading GP2 */ ps_disabled = mvm->ps_disabled; if (!ps_disabled) { mvm->ps_disabled = true; iwl_mvm_power_update_device(mvm); } *gp2 = iwl_mvm_get_systime(mvm); if (clock_type == CLOCK_BOOTTIME && boottime) *boottime = ktime_get_boottime_ns(); else if (clock_type == CLOCK_REALTIME && realtime) *realtime = ktime_get_real(); if (!ps_disabled) { mvm->ps_disabled = ps_disabled; iwl_mvm_power_update_device(mvm); } } + +/* Find if at least two links from different vifs use same channel + * FIXME: consider having a refcount array in struct iwl_mvm_vif for + * used phy_ctxt ids. + */ +bool iwl_mvm_have_links_same_channel(struct iwl_mvm_vif *vif1, + struct iwl_mvm_vif *vif2) +{ + unsigned int i, j; + + for_each_mvm_vif_valid_link(vif1, i) { + for_each_mvm_vif_valid_link(vif2, j) { + if (vif1->link[i]->phy_ctxt == vif2->link[j]->phy_ctxt) + return true; + } + } + + return false; +} + +bool iwl_mvm_vif_is_active(struct iwl_mvm_vif *mvmvif) +{ + unsigned int i; + + /* FIXME: can it fail when phy_ctxt is assigned? */ + for_each_mvm_vif_valid_link(mvmvif, i) { + if (mvmvif->link[i]->phy_ctxt && + mvmvif->link[i]->phy_ctxt->id < NUM_PHY_CTX) + return true; + } + + return false; +} diff --git a/sys/contrib/dev/iwlwifi/pcie/ctxt-info-gen3.c b/sys/contrib/dev/iwlwifi/pcie/ctxt-info-gen3.c index 75fd386b048e..fa4a14546860 100644 --- a/sys/contrib/dev/iwlwifi/pcie/ctxt-info-gen3.c +++ b/sys/contrib/dev/iwlwifi/pcie/ctxt-info-gen3.c @@ -1,345 +1,553 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include "iwl-trans.h" #include "iwl-fh.h" #include "iwl-context-info-gen3.h" #include "internal.h" #include "iwl-prph.h" static void iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans, struct iwl_prph_scratch_hwm_cfg *dbg_cfg, u32 *control_flags) { enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1; struct iwl_fw_ini_allocation_tlv *fw_mon_cfg; u32 dbg_flags = 0; if (!iwl_trans_dbg_ini_valid(trans)) { struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; iwl_pcie_alloc_fw_monitor(trans, 0); if (fw_mon->size) { dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM; IWL_DEBUG_FW(trans, "WRT: Applying DRAM buffer destination\n"); dbg_cfg->hwm_base_addr = cpu_to_le64(fw_mon->physical); dbg_cfg->hwm_size = cpu_to_le32(fw_mon->size); } goto out; } fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id]; switch (le32_to_cpu(fw_mon_cfg->buf_location)) { case IWL_FW_INI_LOCATION_SRAM_PATH: dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL; IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n"); break; case IWL_FW_INI_LOCATION_NPK_PATH: dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF; IWL_DEBUG_FW(trans, "WRT: Applying NPK buffer destination\n"); break; case IWL_FW_INI_LOCATION_DRAM_PATH: if (trans->dbg.fw_mon_ini[alloc_id].num_frags) { struct iwl_dram_data *frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0]; dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM; dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical); dbg_cfg->hwm_size = cpu_to_le32(frag->size); dbg_cfg->debug_token_config = cpu_to_le32(trans->dbg.ucode_preset); IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (debug_token_config=%u)\n", dbg_cfg->debug_token_config); IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n", alloc_id, trans->dbg.fw_mon_ini[alloc_id].num_frags); } break; default: IWL_ERR(trans, "WRT: Invalid buffer destination\n"); } out: if (dbg_flags) *control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags; } int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, const struct fw_img *fw) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_context_info_gen3 *ctxt_info_gen3; struct iwl_prph_scratch *prph_scratch; struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl; struct iwl_prph_info *prph_info; u32 control_flags = 0; int ret; int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE, trans->cfg->min_txq_size); switch (trans_pcie->rx_buf_size) { case IWL_AMSDU_DEF: return -EINVAL; case IWL_AMSDU_2K: break; case IWL_AMSDU_4K: control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; break; case IWL_AMSDU_8K: control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; /* if firmware supports the ext size, tell it */ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K; break; case IWL_AMSDU_12K: control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; /* if firmware supports the ext size, tell it */ control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K; break; } /* Allocate prph scratch */ prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch), &trans_pcie->prph_scratch_dma_addr, GFP_KERNEL); if (!prph_scratch) return -ENOMEM; prph_sc_ctrl = &prph_scratch->ctrl_cfg; prph_sc_ctrl->version.version = 0; prph_sc_ctrl->version.mac_id = cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4); control_flags |= IWL_PRPH_SCRATCH_MTR_MODE; control_flags |= IWL_PRPH_MTR_FORMAT_256B & IWL_PRPH_SCRATCH_MTR_FORMAT; if (trans->trans_cfg->imr_enabled) control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN; /* initialize RX default queue */ prph_sc_ctrl->rbd_cfg.free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg, &control_flags); prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags); + /* initialize the Step equalizer data */ + prph_sc_ctrl->step_cfg.mbx_addr_0 = cpu_to_le32(trans->mbx_addr_0_step); + prph_sc_ctrl->step_cfg.mbx_addr_1 = cpu_to_le32(trans->mbx_addr_1_step); + /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram); if (ret) goto err_free_prph_scratch; /* Allocate prph information * currently we don't assign to the prph info anything, but it would get * assigned later * * We also use the second half of this page to give the device some * dummy TR/CR tail pointers - which shouldn't be necessary as we don't * use this, but the hardware still reads/writes there and we can't let * it go do that with a NULL pointer. */ BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2); prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE, &trans_pcie->prph_info_dma_addr, GFP_KERNEL); if (!prph_info) { ret = -ENOMEM; goto err_free_prph_scratch; } /* Allocate context info */ ctxt_info_gen3 = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info_gen3), &trans_pcie->ctxt_info_dma_addr, GFP_KERNEL); if (!ctxt_info_gen3) { ret = -ENOMEM; goto err_free_prph_info; } ctxt_info_gen3->prph_info_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr); ctxt_info_gen3->prph_scratch_base_addr = cpu_to_le64(trans_pcie->prph_scratch_dma_addr); ctxt_info_gen3->prph_scratch_size = cpu_to_le32(sizeof(*prph_scratch)); ctxt_info_gen3->cr_head_idx_arr_base_addr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma); ctxt_info_gen3->tr_tail_idx_arr_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2); ctxt_info_gen3->cr_tail_idx_arr_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4); ctxt_info_gen3->mtr_base_addr = cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr); ctxt_info_gen3->mcr_base_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); ctxt_info_gen3->mtr_size = cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size)); ctxt_info_gen3->mcr_size = cpu_to_le16(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds)); trans_pcie->ctxt_info_gen3 = ctxt_info_gen3; trans_pcie->prph_info = prph_info; trans_pcie->prph_scratch = prph_scratch; /* Allocate IML */ trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len, &trans_pcie->iml_dma_addr, GFP_KERNEL); if (!trans_pcie->iml) { ret = -ENOMEM; goto err_free_ctxt_info; } memcpy(trans_pcie->iml, trans->iml, trans->iml_len); iwl_enable_fw_load_int_ctx_info(trans); /* kick FW self load */ iwl_write64(trans, CSR_CTXT_INFO_ADDR, trans_pcie->ctxt_info_dma_addr); iwl_write64(trans, CSR_IML_DATA_ADDR, trans_pcie->iml_dma_addr); iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len); iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA); return 0; err_free_ctxt_info: dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), trans_pcie->ctxt_info_gen3, trans_pcie->ctxt_info_dma_addr); trans_pcie->ctxt_info_gen3 = NULL; err_free_prph_info: dma_free_coherent(trans->dev, PAGE_SIZE, prph_info, trans_pcie->prph_info_dma_addr); err_free_prph_scratch: dma_free_coherent(trans->dev, sizeof(*prph_scratch), prph_scratch, trans_pcie->prph_scratch_dma_addr); return ret; } void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (trans_pcie->iml) { dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml, trans_pcie->iml_dma_addr); trans_pcie->iml_dma_addr = 0; trans_pcie->iml = NULL; } iwl_pcie_ctxt_info_free_fw_img(trans); if (alive) return; if (!trans_pcie->ctxt_info_gen3) return; /* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */ dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), trans_pcie->ctxt_info_gen3, trans_pcie->ctxt_info_dma_addr); trans_pcie->ctxt_info_dma_addr = 0; trans_pcie->ctxt_info_gen3 = NULL; dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch), trans_pcie->prph_scratch, trans_pcie->prph_scratch_dma_addr); trans_pcie->prph_scratch_dma_addr = 0; trans_pcie->prph_scratch = NULL; /* this is needed for the entire lifetime */ dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info, trans_pcie->prph_info_dma_addr); trans_pcie->prph_info_dma_addr = 0; trans_pcie->prph_info = NULL; } -int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, - const void *data, u32 len) +static int iwl_pcie_load_payloads_continuously(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_data, + struct iwl_dram_data *dram) +{ + u32 len, len0, len1; + + if (pnvm_data->n_chunks != UNFRAGMENTED_PNVM_PAYLOADS_NUMBER) { + IWL_DEBUG_FW(trans, "expected 2 payloads, got %d.\n", + pnvm_data->n_chunks); + return -EINVAL; + } + + len0 = pnvm_data->chunks[0].len; + len1 = pnvm_data->chunks[1].len; + if (len1 > 0xFFFFFFFF - len0) { + IWL_DEBUG_FW(trans, "sizes of payloads overflow.\n"); + return -EINVAL; + } + len = len0 + len1; + + dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len, + &dram->physical); + if (!dram->block) { + IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n"); + return -ENOMEM; + } + + dram->size = len; + memcpy(dram->block, pnvm_data->chunks[0].data, len0); + memcpy((u8 *)dram->block + len0, pnvm_data->chunks[1].data, len1); + + return 0; +} + +static int iwl_pcie_load_payloads_segments + (struct iwl_trans *trans, + struct iwl_dram_regions *dram_regions, + const struct iwl_pnvm_image *pnvm_data) +{ + struct iwl_dram_data *cur_payload_dram = &dram_regions->drams[0]; + struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc; + struct iwl_prph_scrath_mem_desc_addr_array *addresses; + const void *data; + u32 len; + int i; + + /* allocate and init DRAM descriptors array */ + len = sizeof(struct iwl_prph_scrath_mem_desc_addr_array); + desc_dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent + (trans, + len, + &desc_dram->physical); + if (!desc_dram->block) { + IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n"); + return -ENOMEM; + } + desc_dram->size = len; + memset(desc_dram->block, 0, len); + + /* allocate DRAM region for each payload */ + dram_regions->n_regions = 0; + for (i = 0; i < pnvm_data->n_chunks; i++) { + len = pnvm_data->chunks[i].len; + data = pnvm_data->chunks[i].data; + + if (iwl_pcie_ctxt_info_alloc_dma(trans, + data, + len, + cur_payload_dram)) { + iwl_trans_pcie_free_pnvm_dram_regions(dram_regions, + trans->dev); + return -ENOMEM; + } + + dram_regions->n_regions++; + cur_payload_dram++; + } + + /* fill desc with the DRAM payloads addresses */ + addresses = desc_dram->block; + for (i = 0; i < pnvm_data->n_chunks; i++) { + addresses->mem_descs[i] = + cpu_to_le64(dram_regions->drams[i].physical); + } + + return 0; + +} + +int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_payloads, + const struct iwl_ucode_capabilities *capa) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = &trans_pcie->prph_scratch->ctrl_cfg; - int ret; + struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data; + int ret = 0; + + /* only allocate the DRAM if not allocated yet */ + if (trans->pnvm_loaded) + return 0; + + if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size)) + return -EBUSY; if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) return 0; - /* only allocate the DRAM if not allocated yet */ - if (!trans->pnvm_loaded) { - if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size)) - return -EBUSY; - - ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len, - &trans_pcie->pnvm_dram); - if (ret < 0) { - IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA %d.\n", - ret); - return ret; + if (!pnvm_payloads->n_chunks) { + IWL_DEBUG_FW(trans, "no payloads\n"); + return -EINVAL; + } + + /* save payloads in several DRAM sections */ + if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) { + ret = iwl_pcie_load_payloads_segments(trans, + dram_regions, + pnvm_payloads); + if (!ret) + trans->pnvm_loaded = true; + } else { + /* save only in one DRAM section */ + ret = iwl_pcie_load_payloads_continuously + (trans, + pnvm_payloads, + &dram_regions->drams[0]); + if (!ret) { + dram_regions->n_regions = 1; + trans->pnvm_loaded = true; } } + return ret; +} + +static inline size_t +iwl_dram_regions_size(const struct iwl_dram_regions *dram_regions) +{ + size_t total_size = 0; + int i; + + for (i = 0; i < dram_regions->n_regions; i++) + total_size += dram_regions->drams[i].size; + + return total_size; +} + +static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = + &trans_pcie->prph_scratch->ctrl_cfg; + struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data; + prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = - cpu_to_le64(trans_pcie->pnvm_dram.physical); + cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical); prph_sc_ctrl->pnvm_cfg.pnvm_size = - cpu_to_le32(trans_pcie->pnvm_dram.size); + cpu_to_le32(iwl_dram_regions_size(dram_regions)); +} - return 0; +static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = + &trans_pcie->prph_scratch->ctrl_cfg; + + prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = + cpu_to_le64(trans_pcie->pnvm_data.drams[0].physical); + prph_sc_ctrl->pnvm_cfg.pnvm_size = + cpu_to_le32(trans_pcie->pnvm_data.drams[0].size); } -int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans, - const void *data, u32 len) +void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) +{ + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + return; + + if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) + iwl_pcie_set_pnvm_segments(trans); + else + iwl_pcie_set_continuous_pnvm(trans); +} + +int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans, + const struct iwl_pnvm_image *payloads, + const struct iwl_ucode_capabilities *capa) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = &trans_pcie->prph_scratch->ctrl_cfg; - int ret; + struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data; + int ret = 0; + + /* only allocate the DRAM if not allocated yet */ + if (trans->reduce_power_loaded) + return 0; if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) return 0; - /* only allocate the DRAM if not allocated yet */ - if (!trans->reduce_power_loaded) { - if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size)) - return -EBUSY; + if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size)) + return -EBUSY; - ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len, - &trans_pcie->reduce_power_dram); - if (ret < 0) { - IWL_DEBUG_FW(trans, - "Failed to allocate reduce power DMA %d.\n", - ret); - return ret; + if (!payloads->n_chunks) { + IWL_DEBUG_FW(trans, "no payloads\n"); + return -EINVAL; + } + + /* save payloads in several DRAM sections */ + if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) { + ret = iwl_pcie_load_payloads_segments(trans, + dram_regions, + payloads); + if (!ret) + trans->reduce_power_loaded = true; + } else { + /* save only in one DRAM section */ + ret = iwl_pcie_load_payloads_continuously + (trans, + payloads, + &dram_regions->drams[0]); + if (!ret) { + dram_regions->n_regions = 1; + trans->reduce_power_loaded = true; } } + return ret; +} + +static void iwl_pcie_set_reduce_power_segments(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = + &trans_pcie->prph_scratch->ctrl_cfg; + struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data; + prph_sc_ctrl->reduce_power_cfg.base_addr = - cpu_to_le64(trans_pcie->reduce_power_dram.physical); + cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical); prph_sc_ctrl->reduce_power_cfg.size = - cpu_to_le32(trans_pcie->reduce_power_dram.size); + cpu_to_le32(iwl_dram_regions_size(dram_regions)); +} - return 0; +static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = + &trans_pcie->prph_scratch->ctrl_cfg; + + prph_sc_ctrl->reduce_power_cfg.base_addr = + cpu_to_le64(trans_pcie->reduced_tables_data.drams[0].physical); + prph_sc_ctrl->reduce_power_cfg.size = + cpu_to_le32(trans_pcie->reduced_tables_data.drams[0].size); +} + +void +iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) +{ + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + return; + + if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) + iwl_pcie_set_reduce_power_segments(trans); + else + iwl_pcie_set_continuous_reduce_power(trans); } + diff --git a/sys/contrib/dev/iwlwifi/pcie/ctxt-info.c b/sys/contrib/dev/iwlwifi/pcie/ctxt-info.c index 74ce31fdf45e..5f55efe64bf5 100644 --- a/sys/contrib/dev/iwlwifi/pcie/ctxt-info.c +++ b/sys/contrib/dev/iwlwifi/pcie/ctxt-info.c @@ -1,263 +1,263 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #include "iwl-trans.h" #include "iwl-fh.h" #include "iwl-context-info.h" #include "internal.h" #include "iwl-prph.h" static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, size_t size, dma_addr_t *phys, int depth) { void *result; if (WARN(depth > 2, "failed to allocate DMA memory not crossing 2^32 boundary")) return NULL; result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL); if (!result) return NULL; if (unlikely(iwl_txq_crosses_4g_boundary(*phys, size))) { void *old = result; dma_addr_t oldphys = *phys; result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, depth + 1); dma_free_coherent(trans->dev, size, old, oldphys); } return result; } -static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, - size_t size, - dma_addr_t *phys) +void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, + size_t size, + dma_addr_t *phys) { return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0); } int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, const void *data, u32 len, struct iwl_dram_data *dram) { dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len, &dram->physical); if (!dram->block) return -ENOMEM; dram->size = len; memcpy(dram->block, data, len); return 0; } void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) { struct iwl_self_init_dram *dram = &trans->init_dram; int i; if (!dram->paging) { WARN_ON(dram->paging_cnt); return; } /* free paging*/ for (i = 0; i < dram->paging_cnt; i++) dma_free_coherent(trans->dev, dram->paging[i].size, dram->paging[i].block, dram->paging[i].physical); kfree(dram->paging); dram->paging_cnt = 0; dram->paging = NULL; } int iwl_pcie_init_fw_sec(struct iwl_trans *trans, const struct fw_img *fw, struct iwl_context_info_dram *ctxt_dram) { struct iwl_self_init_dram *dram = &trans->init_dram; int i, ret, lmac_cnt, umac_cnt, paging_cnt; if (WARN(dram->paging, "paging shouldn't already be initialized (%d pages)\n", dram->paging_cnt)) iwl_pcie_ctxt_info_free_paging(trans); lmac_cnt = iwl_pcie_get_num_sections(fw, 0); /* add 1 due to separator */ umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1); /* add 2 due to separators */ paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2); dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL); if (!dram->fw) return -ENOMEM; dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL); if (!dram->paging) return -ENOMEM; /* initialize lmac sections */ for (i = 0; i < lmac_cnt; i++) { ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[i].data, fw->sec[i].len, &dram->fw[dram->fw_cnt]); if (ret) return ret; ctxt_dram->lmac_img[i] = cpu_to_le64(dram->fw[dram->fw_cnt].physical); dram->fw_cnt++; } /* initialize umac sections */ for (i = 0; i < umac_cnt; i++) { /* access FW with +1 to make up for lmac separator */ ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[dram->fw_cnt + 1].data, fw->sec[dram->fw_cnt + 1].len, &dram->fw[dram->fw_cnt]); if (ret) return ret; ctxt_dram->umac_img[i] = cpu_to_le64(dram->fw[dram->fw_cnt].physical); dram->fw_cnt++; } /* * Initialize paging. * Paging memory isn't stored in dram->fw as the umac and lmac - it is * stored separately. * This is since the timing of its release is different - * while fw memory can be released on alive, the paging memory can be * freed only when the device goes down. * Given that, the logic here in accessing the fw image is a bit * different - fw_cnt isn't changing so loop counter is added to it. */ for (i = 0; i < paging_cnt; i++) { /* access FW with +2 to make up for lmac & umac separators */ int fw_idx = dram->fw_cnt + i + 2; ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[fw_idx].data, fw->sec[fw_idx].len, &dram->paging[i]); if (ret) return ret; ctxt_dram->virtual_img[i] = cpu_to_le64(dram->paging[i].physical); dram->paging_cnt++; } return 0; } int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_context_info *ctxt_info; struct iwl_context_info_rbd_cfg *rx_cfg; u32 control_flags = 0, rb_size; dma_addr_t phys; int ret; ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, sizeof(*ctxt_info), &phys); if (!ctxt_info) return -ENOMEM; trans_pcie->ctxt_info_dma_addr = phys; ctxt_info->version.version = 0; ctxt_info->version.mac_id = cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); /* size is in DWs */ ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4); switch (trans_pcie->rx_buf_size) { case IWL_AMSDU_2K: rb_size = IWL_CTXT_INFO_RB_SIZE_2K; break; case IWL_AMSDU_4K: rb_size = IWL_CTXT_INFO_RB_SIZE_4K; break; case IWL_AMSDU_8K: rb_size = IWL_CTXT_INFO_RB_SIZE_8K; break; case IWL_AMSDU_12K: rb_size = IWL_CTXT_INFO_RB_SIZE_16K; break; default: WARN_ON(1); rb_size = IWL_CTXT_INFO_RB_SIZE_4K; } WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12); control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG; control_flags |= u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds), IWL_CTXT_INFO_RB_CB_SIZE); control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE); ctxt_info->control.control_flags = cpu_to_le32(control_flags); /* initialize RX default queue */ rx_cfg = &ctxt_info->rbd_cfg; rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma); /* initialize TX command queue */ ctxt_info->hcmd_cfg.cmd_queue_addr = cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr); ctxt_info->hcmd_cfg.cmd_queue_size = TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE); /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram); if (ret) { dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), ctxt_info, trans_pcie->ctxt_info_dma_addr); return ret; } trans_pcie->ctxt_info = ctxt_info; iwl_enable_fw_load_int_ctx_info(trans); /* Configure debug, if exists */ if (iwl_pcie_dbg_on(trans)) iwl_pcie_apply_destination(trans); /* kick FW self load */ iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr); /* Context info will be released upon alive or failure to get one */ return 0; } void iwl_pcie_ctxt_info_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (!trans_pcie->ctxt_info) return; dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), trans_pcie->ctxt_info, trans_pcie->ctxt_info_dma_addr); trans_pcie->ctxt_info_dma_addr = 0; trans_pcie->ctxt_info = NULL; iwl_pcie_ctxt_info_free_fw_img(trans); } diff --git a/sys/contrib/dev/iwlwifi/pcie/drv.c b/sys/contrib/dev/iwlwifi/pcie/drv.c index 6b9ce03850ec..3bde223e3f9c 100644 --- a/sys/contrib/dev/iwlwifi/pcie/drv.c +++ b/sys/contrib/dev/iwlwifi/pcie/drv.c @@ -1,1831 +1,1668 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include "fw/acpi.h" #include "iwl-trans.h" #include "iwl-drv.h" #include "iwl-prph.h" #include "internal.h" #define TRANS_CFG_MARKER BIT(0) #define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg), \ struct _struct) extern int _invalid_type; #define _TRANS_CFG_MARKER(cfg) \ (__builtin_choose_expr(_IS_A(cfg, iwl_cfg_trans_params), \ TRANS_CFG_MARKER, \ __builtin_choose_expr(_IS_A(cfg, iwl_cfg), 0, _invalid_type))) #define _ASSIGN_CFG(cfg) (_TRANS_CFG_MARKER(cfg) + (kernel_ulong_t)&(cfg)) #define IWL_PCI_DEVICE(dev, subdev, cfg) \ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ .driver_data = _ASSIGN_CFG(cfg) /* Hardware specific file defines the PCI IDs table for that hardware module */ static const struct pci_device_id iwl_hw_card_ids[] = { #if IS_ENABLED(CONFIG_IWLDVM) {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */ /* 5300 Series WiFi */ {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */ /* 5350 Series WiFi/WiMax */ {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */ /* 5150 Series Wifi/WiMax */ {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */ /* 6x00 Series */ {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)}, {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)}, {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)}, {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)}, {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)}, {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)}, {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)}, {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)}, {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)}, {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)}, {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, /* 6x05 Series */ {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)}, {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)}, {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)}, {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)}, {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)}, {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)}, {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)}, {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */ {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */ /* 6x30 Series */ {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)}, {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)}, {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)}, {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)}, {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)}, {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)}, {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)}, {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)}, {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)}, {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)}, {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)}, {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)}, {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)}, /* 6x50 WiFi/WiMax Series */ {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)}, {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)}, {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)}, {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)}, {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)}, /* 6150 WiFi/WiMax Series */ {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)}, {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)}, {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)}, {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)}, {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)}, {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)}, /* 1000 Series WiFi */ {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)}, {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)}, {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)}, {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)}, {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)}, {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)}, {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)}, {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)}, {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)}, {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)}, {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)}, /* 100 Series WiFi */ {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)}, {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)}, {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)}, {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)}, {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)}, {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)}, /* 130 Series WiFi */ {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)}, {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)}, {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)}, {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)}, {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)}, {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)}, /* 2x00 Series */ {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)}, /* 2x30 Series */ {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)}, {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)}, /* 6x35 Series */ {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)}, {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)}, {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)}, {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)}, {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)}, /* 105 Series */ {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)}, {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)}, {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)}, {IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)}, /* 135 Series */ {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)}, {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)}, {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)}, #endif /* CONFIG_IWLDVM */ #if IS_ENABLED(CONFIG_IWLMVM) /* 7260 Series */ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)}, {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)}, {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)}, {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)}, /* 3160 Series */ {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, /* 3165 Series */ {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, /* 3168 Series */ {IWL_PCI_DEVICE(0x24FB, 0x2010, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x2050, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x2150, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)}, /* 7265 Series */ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7265_2ac_cfg)}, /* 8000 Series */ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)}, {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0910, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, /* 9000 Series */ {IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_trans_cfg)}, {IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9000_trans_cfg)}, {IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9000_trans_cfg)}, {IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x31DC, PCI_ANY_ID, iwl9560_shared_clk_trans_cfg)}, {IWL_PCI_DEVICE(0x9DF0, PCI_ANY_ID, iwl9560_trans_cfg)}, {IWL_PCI_DEVICE(0xA370, PCI_ANY_ID, iwl9560_trans_cfg)}, /* Qu devices */ {IWL_PCI_DEVICE(0x02F0, PCI_ANY_ID, iwl_qu_trans_cfg)}, {IWL_PCI_DEVICE(0x06F0, PCI_ANY_ID, iwl_qu_trans_cfg)}, {IWL_PCI_DEVICE(0x34F0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x3DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x4DF0, PCI_ANY_ID, iwl_qu_medium_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)}, - {IWL_PCI_DEVICE(0x2720, PCI_ANY_ID, iwl_qnj_trans_cfg)}, - {IWL_PCI_DEVICE(0x2723, PCI_ANY_ID, iwl_ax200_trans_cfg)}, /* So devices */ {IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_so_trans_cfg)}, - {IWL_PCI_DEVICE(0x2726, PCI_ANY_ID, iwl_snj_trans_cfg)}, {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)}, {IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)}, {IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)}, + {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)}, /* Ma devices */ {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)}, {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)}, /* Bz devices */ {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)}, + +/* Sc devices */ + {IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_trans_cfg)}, #endif /* CONFIG_IWLMVM */ {0} }; MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); #define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \ - _rf_id, _no_160, _cores, _cdb, _jacket, _cfg, _name) \ - { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \ - .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, \ - .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \ - .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket } + _rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \ + { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \ + .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, .rf_step = _rf_step, \ + .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \ + .mac_step = _mac_step, .cdb = _cdb, .jacket = IWL_CFG_ANY } #define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \ - _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \ - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ - IWL_CFG_ANY, IWL_CFG_ANY, _cfg, _name) + _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \ + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ + IWL_CFG_ANY, _cfg, _name) static const struct iwl_dev_info iwl_dev_info_table[] = { #if IS_ENABLED(CONFIG_IWLMVM) /* 9000 */ IWL_DEV_INFO(0x2526, 0x1550, iwl9260_2ac_cfg, iwl9260_killer_1550_name), IWL_DEV_INFO(0x2526, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), IWL_DEV_INFO(0x2526, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0x30DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), IWL_DEV_INFO(0x30DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0x31DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0x54F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), IWL_DEV_INFO(0x54F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0x51F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), IWL_DEV_INFO(0x51F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x51F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x51F1, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x54F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), - IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690s_name), - IWL_DEV_INFO(0x7E40, 0x1692, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7E40, 0x1692, iwl_cfg_ma, iwl_ax411_killer_1690i_name), /* AX200 */ IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name), IWL_DEV_INFO(0x2723, 0x1653, iwl_ax200_cfg_cc, iwl_ax200_killer_1650w_name), IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name), /* Qu with Hr */ IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name), IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name), IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name), IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x6074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x6074, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr, NULL), IWL_DEV_INFO(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x3DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x3DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x3DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x0310, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL), IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL), /* So with HR */ IWL_DEV_INFO(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x2020, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x0024, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0xE020, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0xE024, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x4020, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x6020, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x6024, iwlax210_2ax_cfg_ty_gf_a0, NULL), IWL_DEV_INFO(0x2725, 0x1673, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675w_name), IWL_DEV_INFO(0x2725, 0x1674, iwlax210_2ax_cfg_ty_gf_a0, iwl_ax210_killer_1675x_name), IWL_DEV_INFO(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long, NULL), IWL_DEV_INFO(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long, NULL), IWL_DEV_INFO(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long, NULL), IWL_DEV_INFO(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long, NULL), IWL_DEV_INFO(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long, NULL), IWL_DEV_INFO(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long, NULL), IWL_DEV_INFO(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL), IWL_DEV_INFO(0x7AF0, 0x0098, iwlax211_2ax_cfg_so_gf_a0, NULL), IWL_DEV_INFO(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0, NULL), IWL_DEV_INFO(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0, NULL), IWL_DEV_INFO(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0, NULL), IWL_DEV_INFO(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0, NULL), /* So with JF */ IWL_DEV_INFO(0x7A70, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), IWL_DEV_INFO(0x7A70, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), IWL_DEV_INFO(0x7AF0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), IWL_DEV_INFO(0x7AF0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), - /* SnJ with HR */ - IWL_DEV_INFO(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL), - IWL_DEV_INFO(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0, NULL), - IWL_DEV_INFO(0x2726, 0x0098, iwlax211_cfg_snj_gf_a0, NULL), - IWL_DEV_INFO(0x2726, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL), - IWL_DEV_INFO(0x2726, 0x00B4, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL), - IWL_DEV_INFO(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0, NULL), - IWL_DEV_INFO(0x2726, 0x1651, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name), - IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name), - IWL_DEV_INFO(0x2726, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name), - IWL_DEV_INFO(0x2726, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name), - IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), - IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), - /* SO with GF2 */ IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), IWL_DEV_INFO(0x2726, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), IWL_DEV_INFO(0x51F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), IWL_DEV_INFO(0x51F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x51F1, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x51F1, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), IWL_DEV_INFO(0x54F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), IWL_DEV_INFO(0x54F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), IWL_DEV_INFO(0x7A70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), IWL_DEV_INFO(0x7A70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), IWL_DEV_INFO(0x7AF0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), IWL_DEV_INFO(0x7AF0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), IWL_DEV_INFO(0x7F70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), IWL_DEV_INFO(0x7F70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), /* MA with GF2 */ - IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675s_name), - IWL_DEV_INFO(0x7E40, 0x1672, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x7E40, 0x1672, iwl_cfg_ma, iwl_ax211_killer_1675i_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9560_name), - _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9260_2ac_cfg, iwl9461_160_name), - _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9260_2ac_cfg, iwl9461_name), - _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9260_2ac_cfg, iwl9462_160_name), - _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9260_2ac_cfg, iwl9462_name), - _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9270_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9270_name), _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9162_160_name), _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9162_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9260_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9260_name), /* Qu with Jf */ /* Qu B step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name), - /* QnJ */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9560_qnj_b0_jf_b0_cfg, iwl9461_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9560_qnj_b0_jf_b0_cfg, iwl9461_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9560_qnj_b0_jf_b0_cfg, iwl9462_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9560_qnj_b0_jf_b0_cfg, iwl9462_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9560_qnj_b0_jf_b0_cfg, iwl9560_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9560_qnj_b0_jf_b0_cfg, iwl9560_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550s_name), - _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550i_name), - /* Qu with Hr */ /* Qu B step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_b0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_b0_hr_b0, iwl_ax203_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_c0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_c0_hr_b0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_c0_hr_b0, iwl_ax201_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_quz_a0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_quz_a0_hr_b0, iwl_ax203_name), - -/* QnJ with Hr */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name), - -/* SnJ with Jf */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_a0_jf_b0, iwl9461_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_a0_jf_b0, iwl9461_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_a0_jf_b0, iwl9462_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_a0_jf_b0, iwl9462_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_a0_jf_b0, iwl9560_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_a0_jf_b0, iwl9560_name), - -/* SnJ with Hr */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_hr_b0, iwl_ax101_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_hr_b0, iwl_ax201_name), + IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_quz_a0_hr_b0, iwl_ax201_name), /* Ma */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_ma_a0_hr_b0, iwl_ax201_name), + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_ma, iwl_ax201_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_ma_a0_gf_a0, iwl_ax211_name), + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_ma, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, - iwl_cfg_ma_a0_gf4_a0, iwl_ax211_name), + IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_ma, iwl_ax221_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_ma_a0_mr_a0, iwl_ax221_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_ma_a0_fm_a0, iwl_ax231_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_a0_mr_a0, iwl_ax221_name), + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_ma, iwl_ax231_name), /* So with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax201_name), /* So-F with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax201_name), /* So-F with Gf */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* Bz */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_bz_a0_hr_b0, iwl_bz_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_bz_a0_gf_a0, iwl_bz_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, - iwl_cfg_bz_a0_gf4_a0, iwl_bz_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_bz_a0_mr_a0, iwl_bz_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_bz_a0_fm_a0, iwl_bz_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_NO_JACKET, - iwl_cfg_gl_a0_fm_a0, iwl_bz_name), - -/* BZ Z step */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ, SILICON_Z_STEP, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_bz_z0_gf_a0, iwl_bz_name), + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_bz, iwl_bz_name), -/* BNJ */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, - iwl_cfg_bnj_a0_fm_a0, iwl_bz_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET, - iwl_cfg_bnj_a0_fm4_a0, iwl_bz_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, - iwl_cfg_bnj_a0_gf_a0, iwl_bz_name), +/* Ga (Gl) */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET, - iwl_cfg_bnj_a0_gf4_a0, iwl_bz_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, - iwl_cfg_bnj_a0_hr_b0, iwl_bz_name), + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_gl, iwl_bz_name), /* SoF with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* SoF with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), - -/* SoF with JF2 */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), - -/* SoF with JF */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), /* So with GF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* So with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* So with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), /* MsP */ /* For now we use the same FW as MR, but this will change in the future. */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_ms_a0, iwl_ax204_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_ms_a0, iwl_ax204_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_ma_a0_ms_a0, iwl_ax204_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_a0_ms_a0, iwl_ax204_name) + IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_ma, iwl_ax204_name), +/* Sc */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_sc, iwl_sc_name), #endif /* CONFIG_IWLMVM */ }; /* - * In case that there is no OTP on the NIC, get the rf id and cdb info - * from the prph registers. + * Read rf id and cdb info from prph register and store it */ static int get_crf_id(struct iwl_trans *iwl_trans) { int ret = 0; u32 sd_reg_ver_addr; - u32 cdb = 0; - u32 val; + u32 val = 0; if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) sd_reg_ver_addr = SD_REG_VER_GEN2; else sd_reg_ver_addr = SD_REG_VER; if (!iwl_trans_grab_nic_access(iwl_trans)) { IWL_ERR(iwl_trans, "Failed to grab nic access before reading crf id\n"); ret = -EIO; goto out; } /* Enable access to peripheral registers */ val = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG); val |= ENABLE_WFPM; iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val); /* Read crf info */ - val = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr); + iwl_trans->hw_crf_id = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr); + + /* Read cnv info */ + iwl_trans->hw_cnv_id = + iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP); /* Read cdb info (also contains the jacket info if needed in the future */ - cdb = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR); + iwl_trans->hw_wfpm_id = + iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR); + IWL_INFO(iwl_trans, "Detected crf-id 0x%x, cnv-id 0x%x wfpm id 0x%x\n", + iwl_trans->hw_crf_id, iwl_trans->hw_cnv_id, + iwl_trans->hw_wfpm_id); + + iwl_trans_release_nic_access(iwl_trans); + +out: + return ret; +} + +/* + * In case that there is no OTP on the NIC, map the rf id and cdb info + * from the prph registers. + */ +static int map_crf_id(struct iwl_trans *iwl_trans) +{ + int ret = 0; + u32 val = iwl_trans->hw_crf_id; + u32 step_id = REG_CRF_ID_STEP(val); + u32 slave_id = REG_CRF_ID_SLAVE(val); + u32 jacket_id_cnv = REG_CRF_ID_SLAVE(iwl_trans->hw_cnv_id); + u32 jacket_id_wfpm = WFPM_OTP_CFG1_IS_JACKET(iwl_trans->hw_wfpm_id); + u32 cdb_id_wfpm = WFPM_OTP_CFG1_IS_CDB(iwl_trans->hw_wfpm_id); /* Map between crf id to rf id */ switch (REG_CRF_ID_TYPE(val)) { case REG_CRF_ID_TYPE_JF_1: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF1 << 12); break; case REG_CRF_ID_TYPE_JF_2: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF2 << 12); break; - case REG_CRF_ID_TYPE_HR_NONE_CDB: + case REG_CRF_ID_TYPE_HR_NONE_CDB_1X1: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR1 << 12); break; + case REG_CRF_ID_TYPE_HR_NONE_CDB: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12); + break; case REG_CRF_ID_TYPE_HR_CDB: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12); break; case REG_CRF_ID_TYPE_GF: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12); break; case REG_CRF_ID_TYPE_MR: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_MR << 12); break; - case REG_CRF_ID_TYPE_FM: - iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12); - break; + case REG_CRF_ID_TYPE_FM: + case REG_CRF_ID_TYPE_FMI: + case REG_CRF_ID_TYPE_FMR: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12); + break; default: ret = -EIO; IWL_ERR(iwl_trans, - "Can find a correct rfid for crf id 0x%x\n", + "Can't find a correct rfid for crf id 0x%x\n", REG_CRF_ID_TYPE(val)); - goto out_release; + goto out; } + /* Set Step-id */ + iwl_trans->hw_rf_id |= (step_id << 8); + /* Set CDB capabilities */ - if (cdb & BIT(4)) { + if (cdb_id_wfpm || slave_id) { iwl_trans->hw_rf_id += BIT(28); IWL_INFO(iwl_trans, "Adding cdb to rf id\n"); } - IWL_INFO(iwl_trans, "Detected RF 0x%x from crf id 0x%x\n", - iwl_trans->hw_rf_id, REG_CRF_ID_TYPE(val)); + /* Set Jacket capabilities */ + if (jacket_id_wfpm || jacket_id_cnv) { + iwl_trans->hw_rf_id += BIT(29); + IWL_INFO(iwl_trans, "Adding jacket to rf id\n"); + } -out_release: - iwl_trans_release_nic_access(iwl_trans); + IWL_INFO(iwl_trans, + "Detected rf-type 0x%x step-id 0x%x slave-id 0x%x from crf id 0x%x\n", + REG_CRF_ID_TYPE(val), step_id, slave_id, iwl_trans->hw_rf_id); + IWL_INFO(iwl_trans, + "Detected cdb-id 0x%x jacket-id 0x%x from wfpm id 0x%x\n", + cdb_id_wfpm, jacket_id_wfpm, iwl_trans->hw_wfpm_id); + IWL_INFO(iwl_trans, "Detected jacket-id 0x%x from cnvi id 0x%x\n", + jacket_id_cnv, iwl_trans->hw_cnv_id); out: return ret; } /* PCI registers */ #define PCI_CFG_RETRY_TIMEOUT 0x041 static const struct iwl_dev_info * iwl_pci_find_dev_info(u16 device, u16 subsystem_device, - u16 mac_type, u8 mac_step, - u16 rf_type, u8 cdb, u8 jacket, u8 rf_id, u8 no_160, u8 cores) + u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, + u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step) { int num_devices = ARRAY_SIZE(iwl_dev_info_table); int i; if (!num_devices) return NULL; for (i = num_devices - 1; i >= 0; i--) { const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i]; if (dev_info->device != (u16)IWL_CFG_ANY && dev_info->device != device) continue; if (dev_info->subdevice != (u16)IWL_CFG_ANY && dev_info->subdevice != subsystem_device) continue; if (dev_info->mac_type != (u16)IWL_CFG_ANY && dev_info->mac_type != mac_type) continue; if (dev_info->mac_step != (u8)IWL_CFG_ANY && dev_info->mac_step != mac_step) continue; if (dev_info->rf_type != (u16)IWL_CFG_ANY && dev_info->rf_type != rf_type) continue; if (dev_info->cdb != (u8)IWL_CFG_ANY && dev_info->cdb != cdb) continue; if (dev_info->jacket != (u8)IWL_CFG_ANY && dev_info->jacket != jacket) continue; if (dev_info->rf_id != (u8)IWL_CFG_ANY && dev_info->rf_id != rf_id) continue; if (dev_info->no_160 != (u8)IWL_CFG_ANY && dev_info->no_160 != no_160) continue; if (dev_info->cores != (u8)IWL_CFG_ANY && dev_info->cores != cores) continue; + if (dev_info->rf_step != (u8)IWL_CFG_ANY && + dev_info->rf_step != rf_step) + continue; + return dev_info; } return NULL; } static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct iwl_cfg_trans_params *trans; const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; const struct iwl_dev_info *dev_info; struct iwl_trans *iwl_trans; struct iwl_trans_pcie *trans_pcie; int ret; const struct iwl_cfg *cfg; trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER); /* * This is needed for backwards compatibility with the old * tables, so we don't need to change all the config structs * at the same time. The cfg is used to compare with the old * full cfg structs. */ cfg = (void *)(ent->driver_data & ~TRANS_CFG_MARKER); /* make sure trans is the first element in iwl_cfg */ BUILD_BUG_ON(offsetof(struct iwl_cfg, trans)); iwl_trans = iwl_trans_pcie_alloc(pdev, ent, trans); if (IS_ERR(iwl_trans)) return PTR_ERR(iwl_trans); trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); /* * Let's try to grab NIC access early here. Sometimes, NICs may * fail to initialize, and if that happens it's better if we see * issues early on (and can reprobe, per the logic inside), than * first trying to load the firmware etc. and potentially only * detecting any problems when the first interface is brought up. */ ret = iwl_pcie_prepare_card_hw(iwl_trans); if (!ret) { ret = iwl_finish_nic_init(iwl_trans); if (ret) goto out_free_trans; if (iwl_trans_grab_nic_access(iwl_trans)) { /* all good */ iwl_trans_release_nic_access(iwl_trans); } else { ret = -EIO; goto out_free_trans; } } iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); + get_crf_id(iwl_trans); /* * The RF_ID is set to zero in blank OTP so read version to * extract the RF_ID. * This is relevant only for family 9000 and up. */ if (iwl_trans->trans_cfg->rf_id && iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 && - !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) { + !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && map_crf_id(iwl_trans)) { ret = -EINVAL; goto out_free_trans; } + IWL_INFO(iwl_trans, "PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n", + pdev->device, pdev->subsystem_device, + iwl_trans->hw_rev, iwl_trans->hw_rf_id); + dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device, CSR_HW_REV_TYPE(iwl_trans->hw_rev), iwl_trans->hw_rev_step, CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id), CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id), CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id), IWL_SUBDEVICE_RF_ID(pdev->subsystem_device), IWL_SUBDEVICE_NO_160(pdev->subsystem_device), - IWL_SUBDEVICE_CORES(pdev->subsystem_device)); - + IWL_SUBDEVICE_CORES(pdev->subsystem_device), + CSR_HW_RFID_STEP(iwl_trans->hw_rf_id)); if (dev_info) { iwl_trans->cfg = dev_info->cfg; iwl_trans->name = dev_info->name; } #if IS_ENABLED(CONFIG_IWLMVM) - - /* - * Workaround for problematic SnJ device: sometimes when - * certain RF modules are connected to SnJ, the device ID - * changes to QnJ's ID. So we are using QnJ's trans_cfg until - * here. But if we detect that the MAC type is actually SnJ, - * we should switch to it here to avoid problems later. - */ - if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_SNJ) - iwl_trans->trans_cfg = &iwl_so_trans_cfg; - /* * special-case 7265D, it has the same PCI IDs. * * Note that because we already pass the cfg to the transport above, * all the parameters that the transport uses must, until that is * changed, be identical to the ones in the 7265D configuration. */ if (cfg == &iwl7265_2ac_cfg) cfg_7265d = &iwl7265d_2ac_cfg; else if (cfg == &iwl7265_2n_cfg) cfg_7265d = &iwl7265d_2n_cfg; else if (cfg == &iwl7265_n_cfg) cfg_7265d = &iwl7265d_n_cfg; if (cfg_7265d && (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) iwl_trans->cfg = cfg_7265d; /* * This is a hack to switch from Qu B0 to Qu C0. We need to * do this for all cfgs that use Qu B0, except for those using * Jf, which have already been moved to the new table. The * rest must be removed once we convert Qu with Hr as well. */ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) { if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0; else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0; else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0; } /* same thing for QuZ... */ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) iwl_trans->cfg = &iwl_ax1650s_cfg_quz_hr; else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) iwl_trans->cfg = &iwl_ax1650i_cfg_quz_hr; } #endif /* * If we didn't set the cfg yet, the PCI ID table entry should have * been a full config - if yes, use it, otherwise fail. */ if (!iwl_trans->cfg) { if (ent->driver_data & TRANS_CFG_MARKER) { pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n", pdev->device, pdev->subsystem_device, iwl_trans->hw_rev, iwl_trans->hw_rf_id); ret = -EINVAL; goto out_free_trans; } iwl_trans->cfg = cfg; } /* if we don't have a name yet, copy name from the old cfg */ if (!iwl_trans->name) iwl_trans->name = iwl_trans->cfg->name; if (iwl_trans->trans_cfg->mq_rx_supported) { if (WARN_ON(!iwl_trans->cfg->num_rbds)) { ret = -EINVAL; goto out_free_trans; } trans_pcie->num_rx_bufs = iwl_trans->cfg->num_rbds; } else { trans_pcie->num_rx_bufs = RX_QUEUE_SIZE; } + if (!iwl_trans->trans_cfg->integrated) { + u16 link_status; + + pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &link_status); + + iwl_trans->pcie_link_speed = + u16_get_bits(link_status, PCI_EXP_LNKSTA_CLS); + } + ret = iwl_trans_init(iwl_trans); if (ret) goto out_free_trans; pci_set_drvdata(pdev, iwl_trans); /* try to get ownership so that we'll know if we don't own it */ iwl_pcie_prepare_card_hw(iwl_trans); iwl_trans->drv = iwl_drv_start(iwl_trans); if (IS_ERR(iwl_trans->drv)) { ret = PTR_ERR(iwl_trans->drv); goto out_free_trans; } /* register transport layer debugfs here */ iwl_trans_pcie_dbgfs_register(iwl_trans); return 0; out_free_trans: iwl_trans_pcie_free(iwl_trans); return ret; } static void iwl_pci_remove(struct pci_dev *pdev) { struct iwl_trans *trans = pci_get_drvdata(pdev); + if (!trans) + return; + iwl_drv_stop(trans->drv); iwl_trans_pcie_free(trans); } #ifdef CONFIG_PM_SLEEP static int iwl_pci_suspend(struct device *device) { /* Before you put code here, think about WoWLAN. You cannot check here * whether WoWLAN is enabled or not, and your code will run even if * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. */ return 0; } static int iwl_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct iwl_trans *trans = pci_get_drvdata(pdev); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); /* Before you put code here, think about WoWLAN. You cannot check here * whether WoWLAN is enabled or not, and your code will run even if * WoWLAN is enabled - the NIC may be alive. */ /* * We disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state. */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); if (!trans->op_mode) return 0; /* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) return 0; /* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */ iwl_pcie_conf_msix_hw(trans_pcie); /* * Enable rfkill interrupt (in order to keep track of the rfkill * status). Must be locked to avoid processing a possible rfkill * interrupt while in iwl_pcie_check_hw_rf_kill(). */ mutex_lock(&trans_pcie->mutex); iwl_enable_rfkill_int(trans); iwl_pcie_check_hw_rf_kill(trans); mutex_unlock(&trans_pcie->mutex); return 0; } static const struct dev_pm_ops iwl_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend, iwl_pci_resume) }; #define IWL_PM_OPS (&iwl_dev_pm_ops) #else /* CONFIG_PM_SLEEP */ #define IWL_PM_OPS NULL #endif /* CONFIG_PM_SLEEP */ static struct pci_driver iwl_pci_driver = { .name = DRV_NAME, .id_table = iwl_hw_card_ids, .probe = iwl_pci_probe, .remove = iwl_pci_remove, .driver.pm = IWL_PM_OPS, #if defined(__FreeBSD__) /* Allow iwm(4) to attach for conflicting IDs for now. */ .bsd_probe_return = (BUS_PROBE_DEFAULT - 1), #endif }; int __must_check iwl_pci_register_driver(void) { int ret; ret = pci_register_driver(&iwl_pci_driver); if (ret) pr_err("Unable to initialize PCI module\n"); return ret; } void iwl_pci_unregister_driver(void) { pci_unregister_driver(&iwl_pci_driver); } #if defined(__FreeBSD__) static int sysctl_iwlwifi_pci_ids_name(SYSCTL_HANDLER_ARGS) { const struct pci_device_id *id; struct sbuf *sb; int error, i; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sb = sbuf_new_for_sysctl(NULL, NULL, 512, req); if (sb == NULL) return (ENOMEM); id = iwl_hw_card_ids; while (id != NULL && id->vendor != 0) { if ((id->driver_data & TRANS_CFG_MARKER) != 0) { /* Skip and print them below. */ } else if (id->driver_data != 0) { const struct iwl_cfg *cfg; cfg = (void *)(id->driver_data & ~TRANS_CFG_MARKER); sbuf_printf(sb, "%#06x/%#06x/%#06x/%#06x\t%s\t%s\n", id->vendor, id->device, id->subvendor, id->subdevice, cfg->name, cfg->fw_name_pre); } else { sbuf_printf(sb, "%#06x/%#06x/%#06x/%#06x\t%s\t%s\n", id->vendor, id->device, id->subvendor, id->subdevice, "",""); } id++; } for (i = 0; i < ARRAY_SIZE(iwl_dev_info_table); i++) { const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i]; const char *name; if (dev_info->name) name = dev_info->name; else if (dev_info->cfg && dev_info->cfg->name) name = dev_info->cfg->name; else name = ""; sbuf_printf(sb, "%#06x/%#06x/%#06x/%#06x\t%s\t%s\n", PCI_VENDOR_ID_INTEL, dev_info->device, PCI_ANY_ID, dev_info->subdevice, name, dev_info->cfg->fw_name_pre); } error = sbuf_finish(sb); sbuf_delete(sb); return (error); } SYSCTL_PROC(LINUXKPI_PARAM_PARENT, OID_AUTO, LINUXKPI_PARAM_NAME(pci_ids_name), CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 0, sysctl_iwlwifi_pci_ids_name, "", "iwlwifi PCI IDs and names"); #endif diff --git a/sys/contrib/dev/iwlwifi/pcie/internal.h b/sys/contrib/dev/iwlwifi/pcie/internal.h index cd2fb7811e4e..83d9507127b6 100644 --- a/sys/contrib/dev/iwlwifi/pcie/internal.h +++ b/sys/contrib/dev/iwlwifi/pcie/internal.h @@ -1,844 +1,851 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2003-2015, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_trans_int_pcie_h__ #define __iwl_trans_int_pcie_h__ #include #include #include #include #include #include #include #include "iwl-fh.h" #include "iwl-csr.h" #include "iwl-trans.h" #include "iwl-debug.h" #include "iwl-io.h" #include "iwl-op-mode.h" #include "iwl-drv.h" #include "queue/tx.h" +#include "iwl-context-info.h" /* * RX related structures and functions */ #define RX_NUM_QUEUES 1 #define RX_POST_REQ_ALLOC 2 #define RX_CLAIM_REQ_ALLOC 8 #define RX_PENDING_WATERMARK 16 #define FIRST_RX_QUEUE 512 struct iwl_host_cmd; /*This file includes the declaration that are internal to the * trans_pcie layer */ /** * struct iwl_rx_mem_buffer * @page_dma: bus address of rxb page * @page: driver's pointer to the rxb page * @list: list entry for the membuffer * @invalid: rxb is in driver ownership - not owned by HW * @vid: index of this rxb in the global table * @offset: indicates which offset of the page (in bytes) * this buffer uses (if multiple RBs fit into one page) */ struct iwl_rx_mem_buffer { dma_addr_t page_dma; struct page *page; struct list_head list; u32 offset; u16 vid; bool invalid; }; /** * struct isr_statistics - interrupt statistics * */ struct isr_statistics { u32 hw; u32 sw; u32 err_code; u32 sch; u32 alive; u32 rfkill; u32 ctkill; u32 wakeup; u32 rx; u32 tx; u32 unhandled; }; /** * struct iwl_rx_transfer_desc - transfer descriptor * @addr: ptr to free buffer start address * @rbid: unique tag of the buffer * @reserved: reserved */ struct iwl_rx_transfer_desc { __le16 rbid; __le16 reserved[3]; __le64 addr; } __packed; #define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0) /** * struct iwl_rx_completion_desc - completion descriptor * @reserved1: reserved * @rbid: unique tag of the received buffer * @flags: flags (0: fragmented, all others: reserved) * @reserved2: reserved */ struct iwl_rx_completion_desc { __le32 reserved1; __le16 rbid; u8 flags; u8 reserved2[25]; } __packed; /** * struct iwl_rx_completion_desc_bz - Bz completion descriptor * @rbid: unique tag of the received buffer * @flags: flags (0: fragmented, all others: reserved) * @reserved: reserved */ struct iwl_rx_completion_desc_bz { __le16 rbid; u8 flags; u8 reserved[1]; } __packed; /** * struct iwl_rxq - Rx queue * @id: queue index * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) * @used_bd: driver's pointer to buffer of used receive buffer descriptors (rbd) * @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd) * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free * @used_count: Number of RBDs handled to allocator to use for allocation * @write_actual: * @rx_free: list of RBDs with allocated RB ready for use * @rx_used: list of RBDs with no RB attached * @need_update: flag to indicate we need to update read/write index * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status * @lock: * @queue: actual rx queue. Not used for multi-rx queue. * @next_rb_is_fragment: indicates that the previous RB that we handled set * the fragmented flag, so the next one is still another fragment * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ struct iwl_rxq { int id; void *bd; dma_addr_t bd_dma; void *used_bd; dma_addr_t used_bd_dma; u32 read; u32 write; u32 free_count; u32 used_count; u32 write_actual; u32 queue_size; struct list_head rx_free; struct list_head rx_used; bool need_update, next_rb_is_fragment; void *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; struct napi_struct napi; struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; }; /** * struct iwl_rb_allocator - Rx allocator * @req_pending: number of requests the allcator had not processed yet * @req_ready: number of requests honored and ready for claiming * @rbd_allocated: RBDs with pages allocated and ready to be handled to * the queue. This is a list of &struct iwl_rx_mem_buffer * @rbd_empty: RBDs with no page attached for allocator use. This is a list * of &struct iwl_rx_mem_buffer * @lock: protects the rbd_allocated and rbd_empty lists * @alloc_wq: work queue for background calls * @rx_alloc: work struct for background calls */ struct iwl_rb_allocator { atomic_t req_pending; atomic_t req_ready; struct list_head rbd_allocated; struct list_head rbd_empty; spinlock_t lock; struct workqueue_struct *alloc_wq; struct work_struct rx_alloc; }; /** * iwl_get_closed_rb_stts - get closed rb stts from different structs * @rxq - the rxq to get the rb stts from */ static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, struct iwl_rxq *rxq) { if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { __le16 *rb_stts = rxq->rb_stts; return READ_ONCE(*rb_stts); } else { struct iwl_rb_status *rb_stts = rxq->rb_stts; return READ_ONCE(rb_stts->closed_rb_num); } } #ifdef CONFIG_IWLWIFI_DEBUGFS /** * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data * debugfs file * * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed. * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open. * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is * set the file can no longer be used. */ enum iwl_fw_mon_dbgfs_state { IWL_FW_MON_DBGFS_STATE_CLOSED, IWL_FW_MON_DBGFS_STATE_OPEN, IWL_FW_MON_DBGFS_STATE_DISABLED, }; #endif /** * enum iwl_shared_irq_flags - level of sharing for irq * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes. * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue. */ enum iwl_shared_irq_flags { IWL_SHARED_IRQ_NON_RX = BIT(0), IWL_SHARED_IRQ_FIRST_RSS = BIT(1), }; /** * enum iwl_image_response_code - image response values * @IWL_IMAGE_RESP_DEF: the default value of the register * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully * @IWL_IMAGE_RESP_FAIL: iml reading failed */ enum iwl_image_response_code { IWL_IMAGE_RESP_DEF = 0, IWL_IMAGE_RESP_SUCCESS = 1, IWL_IMAGE_RESP_FAIL = 2, }; /** * struct cont_rec: continuous recording data structure * @prev_wr_ptr: the last address that was read in monitor_data * debugfs file * @prev_wrap_cnt: the wrap count that was used during the last read in * monitor_data debugfs file * @state: the state of monitor_data debugfs file as described * in &iwl_fw_mon_dbgfs_state enum * @mutex: locked while reading from monitor_data debugfs file */ #ifdef CONFIG_IWLWIFI_DEBUGFS struct cont_rec { u32 prev_wr_ptr; u32 prev_wrap_cnt; u8 state; /* Used to sync monitor_data debugfs file with driver unload flow */ struct mutex mutex; }; #endif enum iwl_pcie_fw_reset_state { FW_RESET_IDLE, FW_RESET_REQUESTED, FW_RESET_OK, FW_RESET_ERROR, }; /** * enum wl_pcie_imr_status - imr dma transfer state * @IMR_D2S_IDLE: default value of the dma transfer * @IMR_D2S_REQUESTED: dma transfer requested * @IMR_D2S_COMPLETED: dma transfer completed * @IMR_D2S_ERROR: dma transfer error */ enum iwl_pcie_imr_status { IMR_D2S_IDLE, IMR_D2S_REQUESTED, IMR_D2S_COMPLETED, IMR_D2S_ERROR, }; /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues * @global_table: table mapping received VID from hw to rxb * @rba: allocator for RX replenishing * @ctxt_info: context information for FW self init * @ctxt_info_gen3: context information for gen3 devices * @prph_info: prph info for self init * @prph_scratch: prph scratch for self init * @ctxt_info_dma_addr: dma addr of context information * @prph_info_dma_addr: dma addr of prph info * @prph_scratch_dma_addr: dma addr of prph scratch * @ctxt_info_dma_addr: dma addr of context information * @init_dram: DRAM data of firmware image (including paging). * Context information addresses will be taken from here. * This is driver's local copy for keeping track of size and * count for allocating and freeing the memory. * @iml: image loader image virtual address * @iml_dma_addr: image loader image DMA address * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM * @kw: keep warm address - * @pnvm_dram: DRAM area that contains the PNVM data + * @pnvm_data: holds info about pnvm payloads allocated in DRAM + * @reduced_tables_data: holds info about power reduced tablse + * payloads allocated in DRAM * @pci_dev: basic pci-network driver stuff * @hw_base: pci hardware address support * @ucode_write_complete: indicates that the ucode has been copied. * @ucode_write_waitq: wait queue for uCode load * @cmd_queue - command queue number * @def_rx_queue - default rx queue number * @rx_buf_size: Rx buffer size * @scd_set_active: should the transport configure the SCD for HCMD queue * @rx_page_order: page order for receive buffer size * @rx_buf_bytes: RX buffer (RB) size in bytes * @reg_lock: protect hw register access * @mutex: to protect stop_device / start_fw / start_hw * @cmd_in_flight: true when we have a host command in flight #ifdef CONFIG_IWLWIFI_DEBUGFS * @fw_mon_data: fw continuous recording data #endif * @msix_entries: array of MSI-X entries * @msix_enabled: true if managed to enable MSI-X * @shared_vec_mask: the type of causes the shared vector handles * (see iwl_shared_irq_flags). * @alloc_vecs: the number of interrupt vectors allocated by the OS * @def_irq: default irq for non rx causes * @fh_init_mask: initial unmasked fh causes * @hw_init_mask: initial unmasked hw causes * @fh_mask: current unmasked fh causes * @hw_mask: current unmasked hw causes * @in_rescan: true if we have triggered a device rescan * @base_rb_stts: base virtual address of receive buffer status for all queues * @base_rb_stts_dma: base physical address of receive buffer status * @supported_dma_mask: DMA mask to validate the actual address against, * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device * @alloc_page_lock: spinlock for the page allocator * @alloc_page: allocated page to still use parts of * @alloc_page_used: how much of the allocated page was already used (bytes) * @imr_status: imr dma state machine * @wait_queue_head_t: imr wait queue for dma completion * @rf_name: name/version of the CRF, if any */ struct iwl_trans_pcie { struct iwl_rxq *rxq; struct iwl_rx_mem_buffer *rx_pool; struct iwl_rx_mem_buffer **global_table; struct iwl_rb_allocator rba; union { struct iwl_context_info *ctxt_info; struct iwl_context_info_gen3 *ctxt_info_gen3; }; struct iwl_prph_info *prph_info; struct iwl_prph_scratch *prph_scratch; void *iml; dma_addr_t ctxt_info_dma_addr; dma_addr_t prph_info_dma_addr; dma_addr_t prph_scratch_dma_addr; dma_addr_t iml_dma_addr; struct iwl_trans *trans; struct net_device napi_dev; /* INT ICT Table */ __le32 *ict_tbl; dma_addr_t ict_tbl_dma; int ict_index; bool use_ict; bool is_down, opmode_down; s8 debug_rfkill; struct isr_statistics isr_stats; spinlock_t irq_lock; struct mutex mutex; u32 inta_mask; u32 scd_base_addr; struct iwl_dma_ptr kw; - struct iwl_dram_data pnvm_dram; - struct iwl_dram_data reduce_power_dram; + /* pnvm data */ + struct iwl_dram_regions pnvm_data; + struct iwl_dram_regions reduced_tables_data; struct iwl_txq *txq_memory; /* PCI bus related data */ struct pci_dev *pci_dev; u8 __iomem *hw_base; bool ucode_write_complete; bool sx_complete; wait_queue_head_t ucode_write_waitq; wait_queue_head_t sx_waitq; u8 def_rx_queue; u8 n_no_reclaim_cmds; u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; u16 num_rx_bufs; enum iwl_amsdu_size rx_buf_size; bool scd_set_active; bool pcie_dbg_dumped_once; u32 rx_page_order; u32 rx_buf_bytes; u32 supported_dma_mask; /* allocator lock for the two values below */ spinlock_t alloc_page_lock; struct page *alloc_page; u32 alloc_page_used; /*protect hw register */ spinlock_t reg_lock; bool cmd_hold_nic_awake; #ifdef CONFIG_IWLWIFI_DEBUGFS struct cont_rec fw_mon_data; #endif struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; bool msix_enabled; u8 shared_vec_mask; u32 alloc_vecs; u32 def_irq; u32 fh_init_mask; u32 hw_init_mask; u32 fh_mask; u32 hw_mask; cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; u16 tx_cmd_queue_size; bool in_rescan; void *base_rb_stts; dma_addr_t base_rb_stts_dma; bool fw_reset_handshake; enum iwl_pcie_fw_reset_state fw_reset_state; wait_queue_head_t fw_reset_waitq; enum iwl_pcie_imr_status imr_status; wait_queue_head_t imr_waitq; char rf_name[32]; }; static inline struct iwl_trans_pcie * IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) { return (void *)trans->trans_specific; } static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, int queue) { /* * Before sending the interrupt the HW disables it to prevent * a nested interrupt. This is done by writing 1 to the corresponding * bit in the mask register. After handling the interrupt, it should be * re-enabled by clearing this bit. This register is defined as * write 1 clear (W1C) register, meaning that it's being clear * by writing 1 to the bit. */ iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(queue)); } static inline struct iwl_trans * iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) { return container_of((void *)trans_pcie, struct iwl_trans, trans_specific); } /* * Convention: trans API functions: iwl_trans_pcie_XXX * Other functions: iwl_pcie_XXX */ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, const struct pci_device_id *ent, const struct iwl_cfg_trans_params *cfg_trans); void iwl_trans_pcie_free(struct iwl_trans *trans); +void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions, + struct device *dev); bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); #define _iwl_trans_pcie_grab_nic_access(trans) \ __cond_lock(nic_access_nobh, \ likely(__iwl_trans_pcie_grab_nic_access(trans))) /***************************************************** * RX ******************************************************/ int iwl_pcie_rx_init(struct iwl_trans *trans); int iwl_pcie_gen2_rx_init(struct iwl_trans *trans); irqreturn_t iwl_pcie_msix_isr(int irq, void *data); irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); int iwl_pcie_rx_stop(struct iwl_trans *trans); void iwl_pcie_rx_free(struct iwl_trans *trans); void iwl_pcie_free_rbs_pool(struct iwl_trans *trans); void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq); +void iwl_pcie_rx_napi_sync(struct iwl_trans *trans); void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, struct iwl_rxq *rxq); /***************************************************** * ICT - interrupt handling ******************************************************/ irqreturn_t iwl_pcie_isr(int irq, void *data); int iwl_pcie_alloc_ict(struct iwl_trans *trans); void iwl_pcie_free_ict(struct iwl_trans *trans); void iwl_pcie_reset_ict(struct iwl_trans *trans); void iwl_pcie_disable_ict(struct iwl_trans *trans); /***************************************************** * TX / HCMD ******************************************************/ int iwl_pcie_tx_init(struct iwl_trans *trans); void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); int iwl_pcie_tx_stop(struct iwl_trans *trans); void iwl_pcie_tx_free(struct iwl_trans *trans); bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int wdg_timeout); void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, bool configure_scd); void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, bool shared_mode); int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int txq_id); void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); #if defined(__linux__) int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); #endif void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb); void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); /***************************************************** * Error handling ******************************************************/ void iwl_pcie_dump_csr(struct iwl_trans *trans); /***************************************************** * Helpers ******************************************************/ static inline void _iwl_disable_interrupts(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); clear_bit(STATUS_INT_ENABLED, &trans->status); if (!trans_pcie->msix_enabled) { /* disable interrupts from uCode/NIC to host */ iwl_write32(trans, CSR_INT_MASK, 0x00000000); /* acknowledge/clear/reset any interrupts still pending * from uCode or flow handler (Rx/Tx DMA) */ iwl_write32(trans, CSR_INT, 0xffffffff); iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); } else { /* disable all the interrupt we might use */ iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, trans_pcie->fh_init_mask); iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, trans_pcie->hw_init_mask); } IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); } static inline int iwl_pcie_get_num_sections(const struct fw_img *fw, int start) { int i = 0; while (start < fw->num_sec && fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { start++; i++; } return i; } static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) { struct iwl_self_init_dram *dram = &trans->init_dram; int i; if (!dram->fw) { WARN_ON(dram->fw_cnt); return; } for (i = 0; i < dram->fw_cnt; i++) dma_free_coherent(trans->dev, dram->fw[i].size, dram->fw[i].block, dram->fw[i].physical); kfree(dram->fw); dram->fw_cnt = 0; dram->fw = NULL; } static inline void iwl_disable_interrupts(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); spin_lock_bh(&trans_pcie->irq_lock); _iwl_disable_interrupts(trans); spin_unlock_bh(&trans_pcie->irq_lock); } static inline void _iwl_enable_interrupts(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); set_bit(STATUS_INT_ENABLED, &trans->status); if (!trans_pcie->msix_enabled) { trans_pcie->inta_mask = CSR_INI_SET_MASK; iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); } else { /* * fh/hw_mask keeps all the unmasked causes. * Unlike msi, in msix cause is enabled when it is unset. */ trans_pcie->hw_mask = trans_pcie->hw_init_mask; trans_pcie->fh_mask = trans_pcie->fh_init_mask; iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~trans_pcie->fh_mask); iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~trans_pcie->hw_mask); } } static inline void iwl_enable_interrupts(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); spin_lock_bh(&trans_pcie->irq_lock); _iwl_enable_interrupts(trans); spin_unlock_bh(&trans_pcie->irq_lock); } static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk); trans_pcie->hw_mask = msk; } static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk); trans_pcie->fh_mask = msk; } static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); if (!trans_pcie->msix_enabled) { trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); } else { iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, trans_pcie->hw_init_mask); iwl_enable_fh_int_msk_msix(trans, MSIX_FH_INT_CAUSES_D2S_CH0_NUM); } } static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n"); if (!trans_pcie->msix_enabled) { /* * When we'll receive the ALIVE interrupt, the ISR will call * iwl_enable_fw_load_int_ctx_info again to set the ALIVE * interrupt (which is not really needed anymore) but also the * RX interrupt which will allow us to receive the ALIVE * notification (which is Rx) and continue the flow. */ trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX; iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); } else { iwl_enable_hw_int_msk_msix(trans, MSIX_HW_INT_CAUSES_REG_ALIVE); /* * Leave all the FH causes enabled to get the ALIVE * notification. */ iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask); } } static inline const char *queue_name(struct device *dev, struct iwl_trans_pcie *trans_p, int i) { if (trans_p->shared_vec_mask) { int vec = trans_p->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; if (i == 0) return DRV_NAME ":shared_IRQ"; return devm_kasprintf(dev, GFP_KERNEL, DRV_NAME ":queue_%d", i + vec); } if (i == 0) return DRV_NAME ":default_queue"; if (i == trans_p->alloc_vecs - 1) return DRV_NAME ":exception"; return devm_kasprintf(dev, GFP_KERNEL, DRV_NAME ":queue_%d", i); } static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); if (!trans_pcie->msix_enabled) { trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); } else { iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, trans_pcie->fh_init_mask); iwl_enable_hw_int_msk_msix(trans, MSIX_HW_INT_CAUSES_REG_RF_KILL); } if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { /* * On 9000-series devices this bit isn't enabled by default, so * when we power down the device we need set the bit to allow it * to wake up the PCI-E bus for RF-kill interrupts. */ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN); } } void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); lockdep_assert_held(&trans_pcie->mutex); if (trans_pcie->debug_rfkill == 1) return true; return !(iwl_read32(trans, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); } static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) { u32 v; #ifdef CONFIG_IWLWIFI_DEBUG WARN_ON_ONCE(value & ~mask); #endif v = iwl_read32(trans, reg); v &= ~mask; v |= value; iwl_write32(trans, reg, v); } static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask) { __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); } static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, u32 reg, u32 mask) { __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); } static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) { return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans)); } void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); #else static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { } #endif void iwl_pcie_rx_allocator_work(struct work_struct *data); /* common functions that are used by gen2 transport */ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans); void iwl_pcie_apm_config(struct iwl_trans *trans); int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, bool was_in_rfkill); void iwl_pcie_apm_stop_master(struct iwl_trans *trans); void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr, size_t size); void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); void iwl_pcie_apply_destination(struct iwl_trans *trans); /* common functions that are used by gen3 transport */ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); /* transport gen 2 exported functions */ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill); void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, bool test, bool reset); int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans, u32 dst_addr, u64 src_addr, u32 byte_cnt); int iwl_trans_pcie_copy_imr(struct iwl_trans *trans, u32 dst_addr, u64 src_addr, u32 byte_cnt); #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/sys/contrib/dev/iwlwifi/pcie/rx.c b/sys/contrib/dev/iwlwifi/pcie/rx.c index 480cc9c765d8..7499ba3093af 100644 --- a/sys/contrib/dev/iwlwifi/pcie/rx.c +++ b/sys/contrib/dev/iwlwifi/pcie/rx.c @@ -1,2382 +1,2404 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2022 Intel Corporation + * Copyright (C) 2003-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include "iwl-prph.h" #include "iwl-io.h" #include "internal.h" #include "iwl-op-mode.h" #include "iwl-context-info-gen3.h" /****************************************************************************** * * RX path functions * ******************************************************************************/ /* * Rx theory of operation * * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), * each of which point to Receive Buffers to be filled by the NIC. These get * used not only for Rx frames, but for any command response or notification * from the NIC. The driver and NIC manage the Rx buffers by means * of indexes into the circular buffer. * * Rx Queue Indexes * The host/firmware share two index registers for managing the Rx buffers. * * The READ index maps to the first position that the firmware may be writing * to -- the driver can read up to (but not including) this position and get * good data. * The READ index is managed by the firmware once the card is enabled. * * The WRITE index maps to the last position the driver has read from -- the * position preceding WRITE is the last slot the firmware can place a packet. * * The queue is empty (no good data) if WRITE = READ - 1, and is full if * WRITE = READ. * * During initialization, the host sets up the READ queue position to the first * INDEX position, and WRITE to the last (READ - 1 wrapped) * * When the firmware places a packet in a buffer, it will advance the READ index * and fire the RX interrupt. The driver can then query the READ index and * process as many packets as possible, moving the WRITE index forward as it * resets the Rx queue buffers with new memory. * * The management in the driver is as follows: * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. * When the interrupt handler is called, the request is processed. * The page is either stolen - transferred to the upper layer * or reused - added immediately to the iwl->rxq->rx_free list. * + When the page is stolen - the driver updates the matching queue's used * count, detaches the RBD and transfers it to the queue used list. * When there are two used RBDs - they are transferred to the allocator empty * list. Work is then scheduled for the allocator to start allocating * eight buffers. * When there are another 6 used RBDs - they are transferred to the allocator * empty list and the driver tries to claim the pre-allocated buffers and * add them to iwl->rxq->rx_free. If it fails - it continues to claim them * until ready. * When there are 8+ buffers in the free list - either from allocation or from * 8 reused unstolen pages - restock is called to update the FW and indexes. * + In order to make sure the allocator always has RBDs to use for allocation * the allocator has initial pool in the size of num_queues*(8-2) - the * maximum missing RBDs per allocation request (request posted with 2 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). * The queues supplies the recycle of the rest of the RBDs. * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' index is updated. * + If there are no allocated buffers in iwl->rxq->rx_free, * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. * If there were enough free buffers and RX_STALLED is set it is cleared. * * * Driver sequence: * * iwl_rxq_alloc() Allocates rx_free * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls * iwl_pcie_rxq_restock. * Used only during initialization. * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates * the WRITE index. * iwl_pcie_rx_allocator() Background work for allocating pages. * * -- enable interrupts -- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the * READ INDEX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. * Posts and claims requests to the allocator. * Calls iwl_pcie_rxq_restock to refill any empty * slots. * * RBD life-cycle: * * Init: * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue * * Regular Receive interrupt: * Page Stolen: * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue * Page not Stolen: * rxq.queue -> rxq.rx_free -> rxq.queue * ... * */ /* * iwl_rxq_space - Return number of free slots available in queue. */ static int iwl_rxq_space(const struct iwl_rxq *rxq) { /* Make sure rx queue size is a power of 2 */ WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); /* * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity * between empty and completely full queues. * The following is equivalent to modulo by RX_QUEUE_SIZE and is well * defined for negative dividends. */ return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); } /* * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr */ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) { return cpu_to_le32((u32)(dma_addr >> 8)); } /* * iwl_pcie_rx_stop - stops the Rx DMA */ int iwl_pcie_rx_stop(struct iwl_trans *trans) { if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { /* TODO: remove this once fw does it */ iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3, RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); } else if (trans->trans_cfg->mq_rx_supported) { iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); } else { iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); } } /* * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue */ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *rxq) { u32 reg; lockdep_assert_held(&rxq->lock); /* * explicitly wake up the NIC if: * 1. shadow registers aren't enabled * 2. there is a chance that the NIC is asleep */ if (!trans->trans_cfg->base_params->shadow_reg_enable && test_bit(STATUS_TPOWER_PMI, &trans->status)) { reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", reg); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); rxq->need_update = true; return; } } rxq->write_actual = round_down(rxq->write, 8); if (!trans->trans_cfg->mq_rx_supported) iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual | HBUS_TARG_WRPTR_RX_Q(rxq->id)); else iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), rxq->write_actual); } static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; if (!rxq->need_update) continue; spin_lock_bh(&rxq->lock); iwl_pcie_rxq_inc_wr_ptr(trans, rxq); rxq->need_update = false; spin_unlock_bh(&rxq->lock); } } static void iwl_pcie_restock_bd(struct iwl_trans *trans, struct iwl_rxq *rxq, struct iwl_rx_mem_buffer *rxb) { if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { struct iwl_rx_transfer_desc *bd = rxq->bd; BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64)); bd[rxq->write].addr = cpu_to_le64(rxb->page_dma); bd[rxq->write].rbid = cpu_to_le16(rxb->vid); } else { __le64 *bd = rxq->bd; bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); } #if defined(__linux__) IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n", #elif defined(__FreeBSD__) IWL_DEBUG_PCI_RW(trans, "Assigned virtual RB ID %u to queue %d index %d\n", (u32)rxb->vid, rxq->id, rxq->write); #endif } /* * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx */ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_mem_buffer *rxb; /* * If the device isn't enabled - no need to try to add buffers... * This can happen when we stop the device and still have an interrupt * pending. We stop the APM before we sync the interrupts because we * have to (see comment there). On the other hand, since the APM is * stopped, we cannot access the HW (in particular not prph). * So don't try to restock if the APM has been already stopped. */ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) return; spin_lock_bh(&rxq->lock); while (rxq->free_count) { /* Get next free Rx buffer, remove from free list */ rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, list); list_del(&rxb->list); rxb->invalid = false; /* some low bits are expected to be unset (depending on hw) */ WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask); /* Point to Rx buffer via next RBD in circular buffer */ iwl_pcie_restock_bd(trans, rxq, rxb); rxq->write = (rxq->write + 1) & (rxq->queue_size - 1); rxq->free_count--; } spin_unlock_bh(&rxq->lock); /* * If we've added more space for the firmware to place data, tell it. * Increment device's write pointer in multiples of 8. */ if (rxq->write_actual != (rxq->write & ~0x7)) { spin_lock_bh(&rxq->lock); iwl_pcie_rxq_inc_wr_ptr(trans, rxq); spin_unlock_bh(&rxq->lock); } } /* * iwl_pcie_rxsq_restock - restock implementation for single queue rx */ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct iwl_rx_mem_buffer *rxb; /* * If the device isn't enabled - not need to try to add buffers... * This can happen when we stop the device and still have an interrupt * pending. We stop the APM before we sync the interrupts because we * have to (see comment there). On the other hand, since the APM is * stopped, we cannot access the HW (in particular not prph). * So don't try to restock if the APM has been already stopped. */ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) return; spin_lock_bh(&rxq->lock); while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { __le32 *bd = (__le32 *)rxq->bd; /* The overwritten rxb must be a used one */ rxb = rxq->queue[rxq->write]; BUG_ON(rxb && rxb->page); /* Get next free Rx buffer, remove from free list */ rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, list); list_del(&rxb->list); rxb->invalid = false; /* Point to Rx buffer via next RBD in circular buffer */ bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); rxq->queue[rxq->write] = rxb; rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; rxq->free_count--; } spin_unlock_bh(&rxq->lock); /* If we've added more space for the firmware to place data, tell it. * Increment device's write pointer in multiples of 8. */ if (rxq->write_actual != (rxq->write & ~0x7)) { spin_lock_bh(&rxq->lock); iwl_pcie_rxq_inc_wr_ptr(trans, rxq); spin_unlock_bh(&rxq->lock); } } /* * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool * * If there are slots in the RX queue that need to be restocked, * and we have free pre-allocated buffers, fill the ranks as much * as we can, pulling from rx_free. * * This moves the 'write' index forward to catch up with 'processed', and * also updates the memory address in the firmware to reference the new * target buffer. */ static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { if (trans->trans_cfg->mq_rx_supported) iwl_pcie_rxmq_restock(trans, rxq); else iwl_pcie_rxsq_restock(trans, rxq); } /* * iwl_pcie_rx_alloc_page - allocates and returns a page. * */ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, u32 *offset, gfp_t priority) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size); unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order; struct page *page; gfp_t gfp_mask = priority; if (trans_pcie->rx_page_order > 0) gfp_mask |= __GFP_COMP; if (trans_pcie->alloc_page) { spin_lock_bh(&trans_pcie->alloc_page_lock); /* recheck */ if (trans_pcie->alloc_page) { *offset = trans_pcie->alloc_page_used; page = trans_pcie->alloc_page; trans_pcie->alloc_page_used += rbsize; if (trans_pcie->alloc_page_used >= allocsize) trans_pcie->alloc_page = NULL; else get_page(page); spin_unlock_bh(&trans_pcie->alloc_page_lock); return page; } spin_unlock_bh(&trans_pcie->alloc_page_lock); } /* Alloc a new receive buffer */ page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); if (!page) { if (net_ratelimit()) IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", trans_pcie->rx_page_order); /* * Issue an error if we don't have enough pre-allocated * buffers. */ if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) IWL_CRIT(trans, "Failed to alloc_pages\n"); return NULL; } if (2 * rbsize <= allocsize) { spin_lock_bh(&trans_pcie->alloc_page_lock); if (!trans_pcie->alloc_page) { get_page(page); trans_pcie->alloc_page = page; trans_pcie->alloc_page_used = rbsize; } spin_unlock_bh(&trans_pcie->alloc_page_lock); } *offset = 0; return page; } /* * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD * * A used RBD is an Rx buffer that has been given to the stack. To use it again * a page must be allocated and the RBD must point to the page. This function * doesn't change the HW pointer but handles the list of pages that is used by * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly * allocated buffers. */ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_mem_buffer *rxb; struct page *page; while (1) { unsigned int offset; spin_lock_bh(&rxq->lock); if (list_empty(&rxq->rx_used)) { spin_unlock_bh(&rxq->lock); return; } spin_unlock_bh(&rxq->lock); page = iwl_pcie_rx_alloc_page(trans, &offset, priority); if (!page) return; spin_lock_bh(&rxq->lock); if (list_empty(&rxq->rx_used)) { spin_unlock_bh(&rxq->lock); __free_pages(page, trans_pcie->rx_page_order); return; } rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, list); list_del(&rxb->list); spin_unlock_bh(&rxq->lock); BUG_ON(rxb->page); rxb->page = page; rxb->offset = offset; /* Get physical address of the RB */ rxb->page_dma = dma_map_page(trans->dev, page, rxb->offset, trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE); if (dma_mapping_error(trans->dev, rxb->page_dma)) { rxb->page = NULL; spin_lock_bh(&rxq->lock); list_add(&rxb->list, &rxq->rx_used); spin_unlock_bh(&rxq->lock); __free_pages(page, trans_pcie->rx_page_order); return; } spin_lock_bh(&rxq->lock); list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; spin_unlock_bh(&rxq->lock); } } void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; if (!trans_pcie->rx_pool) return; for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) { if (!trans_pcie->rx_pool[i].page) continue; dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE); __free_pages(trans_pcie->rx_pool[i].page, trans_pcie->rx_page_order); trans_pcie->rx_pool[i].page = NULL; } } /* * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues * * Allocates for each received request 8 pages * Called as a scheduled work item. */ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; struct list_head local_empty; int pending = atomic_read(&rba->req_pending); IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending); /* If we were scheduled - there is at least one request */ spin_lock_bh(&rba->lock); /* swap out the rba->rbd_empty to a local list */ list_replace_init(&rba->rbd_empty, &local_empty); spin_unlock_bh(&rba->lock); while (pending) { int i; LIST_HEAD(local_allocated); gfp_t gfp_mask = GFP_KERNEL; /* Do not post a warning if there are only a few requests */ if (pending < RX_PENDING_WATERMARK) gfp_mask |= __GFP_NOWARN; for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { struct iwl_rx_mem_buffer *rxb; struct page *page; /* List should never be empty - each reused RBD is * returned to the list, and initial pool covers any * possible gap between the time the page is allocated * to the time the RBD is added. */ BUG_ON(list_empty(&local_empty)); /* Get the first rxb from the rbd list */ rxb = list_first_entry(&local_empty, struct iwl_rx_mem_buffer, list); BUG_ON(rxb->page); /* Alloc a new receive buffer */ page = iwl_pcie_rx_alloc_page(trans, &rxb->offset, gfp_mask); if (!page) continue; rxb->page = page; /* Get physical address of the RB */ rxb->page_dma = dma_map_page(trans->dev, page, rxb->offset, trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE); if (dma_mapping_error(trans->dev, rxb->page_dma)) { rxb->page = NULL; __free_pages(page, trans_pcie->rx_page_order); continue; } /* move the allocated entry to the out list */ list_move(&rxb->list, &local_allocated); i++; } atomic_dec(&rba->req_pending); pending--; if (!pending) { pending = atomic_read(&rba->req_pending); if (pending) IWL_DEBUG_TPT(trans, "Got more pending allocation requests = %d\n", pending); } spin_lock_bh(&rba->lock); /* add the allocated rbds to the allocator allocated list */ list_splice_tail(&local_allocated, &rba->rbd_allocated); /* get more empty RBDs for current pending requests */ list_splice_tail_init(&rba->rbd_empty, &local_empty); spin_unlock_bh(&rba->lock); atomic_inc(&rba->req_ready); } spin_lock_bh(&rba->lock); /* return unused rbds to the allocator empty list */ list_splice_tail(&local_empty, &rba->rbd_empty); spin_unlock_bh(&rba->lock); IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__); } /* * iwl_pcie_rx_allocator_get - returns the pre-allocated pages .* .* Called by queue when the queue posted allocation request and * has freed 8 RBDs in order to restock itself. * This function directly moves the allocated RBs to the queue's ownership * and updates the relevant counters. */ static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i; lockdep_assert_held(&rxq->lock); /* * atomic_dec_if_positive returns req_ready - 1 for any scenario. * If req_ready is 0 atomic_dec_if_positive will return -1 and this * function will return early, as there are no ready requests. * atomic_dec_if_positive will perofrm the *actual* decrement only if * req_ready > 0, i.e. - there are ready requests and the function * hands one request to the caller. */ if (atomic_dec_if_positive(&rba->req_ready) < 0) return; spin_lock(&rba->lock); for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { /* Get next free Rx buffer, remove it from free list */ struct iwl_rx_mem_buffer *rxb = list_first_entry(&rba->rbd_allocated, struct iwl_rx_mem_buffer, list); list_move(&rxb->list, &rxq->rx_free); } spin_unlock(&rba->lock); rxq->used_count -= RX_CLAIM_REQ_ALLOC; rxq->free_count += RX_CLAIM_REQ_ALLOC; } void iwl_pcie_rx_allocator_work(struct work_struct *data) { struct iwl_rb_allocator *rba_p = container_of(data, struct iwl_rb_allocator, rx_alloc); struct iwl_trans_pcie *trans_pcie = container_of(rba_p, struct iwl_trans_pcie, rba); iwl_pcie_rx_allocator(trans_pcie->trans); } static int iwl_pcie_free_bd_size(struct iwl_trans *trans) { if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) return sizeof(struct iwl_rx_transfer_desc); return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) : sizeof(__le32); } static int iwl_pcie_used_bd_size(struct iwl_trans *trans) { if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) return sizeof(struct iwl_rx_completion_desc_bz); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) return sizeof(struct iwl_rx_completion_desc); return sizeof(__le32); } static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, struct iwl_rxq *rxq) { int free_size = iwl_pcie_free_bd_size(trans); if (rxq->bd) dma_free_coherent(trans->dev, free_size * rxq->queue_size, rxq->bd, rxq->bd_dma); rxq->bd_dma = 0; rxq->bd = NULL; rxq->rb_stts_dma = 0; rxq->rb_stts = NULL; if (rxq->used_bd) dma_free_coherent(trans->dev, iwl_pcie_used_bd_size(trans) * rxq->queue_size, rxq->used_bd, rxq->used_bd_dma); rxq->used_bd_dma = 0; rxq->used_bd = NULL; } +static size_t iwl_pcie_rb_stts_size(struct iwl_trans *trans) +{ + bool use_rx_td = (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_AX210); + + if (use_rx_td) + return sizeof(__le16); + + return sizeof(struct iwl_rb_status); +} + static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + size_t rb_stts_size = iwl_pcie_rb_stts_size(trans); struct device *dev = trans->dev; int i; int free_size; - bool use_rx_td = (trans->trans_cfg->device_family >= - IWL_DEVICE_FAMILY_AX210); - size_t rb_stts_size = use_rx_td ? sizeof(__le16) : - sizeof(struct iwl_rb_status); spin_lock_init(&rxq->lock); if (trans->trans_cfg->mq_rx_supported) rxq->queue_size = trans->cfg->num_rbds; else rxq->queue_size = RX_QUEUE_SIZE; free_size = iwl_pcie_free_bd_size(trans); /* * Allocate the circular buffer of Read Buffer Descriptors * (RBDs) */ rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, &rxq->bd_dma, GFP_KERNEL); if (!rxq->bd) goto err; if (trans->trans_cfg->mq_rx_supported) { rxq->used_bd = dma_alloc_coherent(dev, iwl_pcie_used_bd_size(trans) * rxq->queue_size, &rxq->used_bd_dma, GFP_KERNEL); if (!rxq->used_bd) goto err; } rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size; rxq->rb_stts_dma = trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size; return 0; err: for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; iwl_pcie_free_rxq_dma(trans, rxq); } return -ENOMEM; } static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + size_t rb_stts_size = iwl_pcie_rb_stts_size(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, ret; - size_t rb_stts_size = trans->trans_cfg->device_family >= - IWL_DEVICE_FAMILY_AX210 ? - sizeof(__le16) : sizeof(struct iwl_rb_status); if (WARN_ON(trans_pcie->rxq)) return -EINVAL; trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), GFP_KERNEL); trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), sizeof(trans_pcie->rx_pool[0]), GFP_KERNEL); trans_pcie->global_table = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), sizeof(trans_pcie->global_table[0]), GFP_KERNEL); if (!trans_pcie->rxq || !trans_pcie->rx_pool || !trans_pcie->global_table) { ret = -ENOMEM; goto err; } spin_lock_init(&rba->lock); /* * Allocate the driver's pointer to receive buffer status. * Allocate for all queues continuously (HW requirement). */ trans_pcie->base_rb_stts = dma_alloc_coherent(trans->dev, rb_stts_size * trans->num_rx_queues, &trans_pcie->base_rb_stts_dma, GFP_KERNEL); if (!trans_pcie->base_rb_stts) { ret = -ENOMEM; goto err; } for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; rxq->id = i; ret = iwl_pcie_alloc_rxq_dma(trans, rxq); if (ret) goto err; } return 0; err: if (trans_pcie->base_rb_stts) { dma_free_coherent(trans->dev, rb_stts_size * trans->num_rx_queues, trans_pcie->base_rb_stts, trans_pcie->base_rb_stts_dma); trans_pcie->base_rb_stts = NULL; trans_pcie->base_rb_stts_dma = 0; } kfree(trans_pcie->rx_pool); trans_pcie->rx_pool = NULL; kfree(trans_pcie->global_table); trans_pcie->global_table = NULL; kfree(trans_pcie->rxq); trans_pcie->rxq = NULL; return ret; } static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 rb_size; const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ switch (trans_pcie->rx_buf_size) { case IWL_AMSDU_4K: rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; break; case IWL_AMSDU_8K: rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; break; case IWL_AMSDU_12K: rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; break; default: WARN_ON(1); rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; } if (!iwl_trans_grab_nic_access(trans)) return; /* Stop Rx DMA */ iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); /* reset and flush pointers */ iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0); /* Reset driver's Rx queue write index */ iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); /* Tell device where to find RBD circular buffer in DRAM */ iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, (u32)(rxq->bd_dma >> 8)); /* Tell device where in DRAM to update its Rx status */ iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4); /* Enable Rx DMA * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in * the credit mechanism in 5000 HW RX FIFO * Direct rx interrupts to hosts * Rx buffer size 4 or 8k or 12k * RB timeout 0x10 * 256 RBDs */ iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | rb_size | (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); iwl_trans_release_nic_access(trans); /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); /* W/A for interrupt coalescing bug in 7260 and 3160 */ if (trans->cfg->host_interrupt_operation_mode) iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); } static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 rb_size, enabled = 0; int i; switch (trans_pcie->rx_buf_size) { case IWL_AMSDU_2K: rb_size = RFH_RXF_DMA_RB_SIZE_2K; break; case IWL_AMSDU_4K: rb_size = RFH_RXF_DMA_RB_SIZE_4K; break; case IWL_AMSDU_8K: rb_size = RFH_RXF_DMA_RB_SIZE_8K; break; case IWL_AMSDU_12K: rb_size = RFH_RXF_DMA_RB_SIZE_12K; break; default: WARN_ON(1); rb_size = RFH_RXF_DMA_RB_SIZE_4K; } if (!iwl_trans_grab_nic_access(trans)) return; /* Stop Rx DMA */ iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0); /* disable free amd used rx queue operation */ iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); for (i = 0; i < trans->num_rx_queues; i++) { /* Tell device where to find RBD free table in DRAM */ iwl_write_prph64_no_grab(trans, RFH_Q_FRBDCB_BA_LSB(i), trans_pcie->rxq[i].bd_dma); /* Tell device where to find RBD used table in DRAM */ iwl_write_prph64_no_grab(trans, RFH_Q_URBDCB_BA_LSB(i), trans_pcie->rxq[i].used_bd_dma); /* Tell device where in DRAM to update its Rx status */ iwl_write_prph64_no_grab(trans, RFH_Q_URBD_STTS_WPTR_LSB(i), trans_pcie->rxq[i].rb_stts_dma); /* Reset device indice tables */ iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0); enabled |= BIT(i) | BIT(i + 16); } /* * Enable Rx DMA * Rx buffer size 4 or 8k or 12k * Min RB size 4 or 8 * Drop frames that exceed RB size * 512 RBDs */ iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, RFH_DMA_EN_ENABLE_VAL | rb_size | RFH_RXF_DMA_MIN_RB_4_8 | RFH_RXF_DMA_DROP_TOO_LARGE_MASK | RFH_RXF_DMA_RBDCB_SIZE_512); /* * Activate DMA snooping. * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe * Default queue is 0 */ iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) | RFH_GEN_CFG_SERVICE_DMA_SNOOP | RFH_GEN_CFG_VAL(RB_CHUNK_SIZE, trans->trans_cfg->integrated ? RFH_GEN_CFG_RB_CHUNK_SIZE_64 : RFH_GEN_CFG_RB_CHUNK_SIZE_128)); /* Enable the relevant rx queues */ iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); iwl_trans_release_nic_access(trans); /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); } void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) { lockdep_assert_held(&rxq->lock); INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); rxq->free_count = 0; rxq->used_count = 0; } static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget); static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget) { struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi); struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; int ret; trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev); trans = trans_pcie->trans; ret = iwl_pcie_rx_handle(trans, rxq->id, budget); IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret, budget); if (ret < budget) { spin_lock(&trans_pcie->irq_lock); if (test_bit(STATUS_INT_ENABLED, &trans->status)) _iwl_enable_interrupts(trans); spin_unlock(&trans_pcie->irq_lock); napi_complete_done(&rxq->napi, ret); } return ret; } static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget) { struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi); struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; int ret; trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev); trans = trans_pcie->trans; ret = iwl_pcie_rx_handle(trans, rxq->id, budget); IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret, budget); if (ret < budget) { int irq_line = rxq->id; /* FIRST_RSS is shared with line 0 */ if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS && rxq->id == 1) irq_line = 0; spin_lock(&trans_pcie->irq_lock); iwl_pcie_clear_irq(trans, irq_line); spin_unlock(&trans_pcie->irq_lock); napi_complete_done(&rxq->napi, ret); } return ret; } +void iwl_pcie_rx_napi_sync(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; + + if (unlikely(!trans_pcie->rxq)) + return; + + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + if (rxq && rxq->napi.poll) + napi_synchronize(&rxq->napi); + } +} + static int _iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *def_rxq; struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, err, queue_size, allocator_pool_size, num_alloc; if (!trans_pcie->rxq) { err = iwl_pcie_rx_alloc(trans); if (err) return err; } def_rxq = trans_pcie->rxq; cancel_work_sync(&rba->rx_alloc); spin_lock_bh(&rba->lock); atomic_set(&rba->req_pending, 0); atomic_set(&rba->req_ready, 0); INIT_LIST_HEAD(&rba->rbd_allocated); INIT_LIST_HEAD(&rba->rbd_empty); spin_unlock_bh(&rba->lock); /* free all first - we overwrite everything here */ iwl_pcie_free_rbs_pool(trans); for (i = 0; i < RX_QUEUE_SIZE; i++) def_rxq->queue[i] = NULL; for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; spin_lock_bh(&rxq->lock); /* * Set read write pointer to reflect that we have processed * and used all buffers, but have not restocked the Rx queue * with fresh buffers */ rxq->read = 0; rxq->write = 0; rxq->write_actual = 0; memset(rxq->rb_stts, 0, (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) ? sizeof(__le16) : sizeof(struct iwl_rb_status)); iwl_pcie_rx_init_rxb_lists(rxq); spin_unlock_bh(&rxq->lock); if (!rxq->napi.poll) { int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll; if (trans_pcie->msix_enabled) poll = iwl_pcie_napi_poll_msix; - netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, poll); + netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, + poll); napi_enable(&rxq->napi); } } /* move the pool to the default queue and allocator ownerships */ queue_size = trans->trans_cfg->mq_rx_supported ? trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE; allocator_pool_size = trans->num_rx_queues * (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); num_alloc = queue_size + allocator_pool_size; for (i = 0; i < num_alloc; i++) { struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; if (i < allocator_pool_size) list_add(&rxb->list, &rba->rbd_empty); else list_add(&rxb->list, &def_rxq->rx_used); trans_pcie->global_table[i] = rxb; rxb->vid = (u16)(i + 1); rxb->invalid = true; } iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); return 0; } int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret = _iwl_pcie_rx_init(trans); if (ret) return ret; if (trans->trans_cfg->mq_rx_supported) iwl_pcie_rx_mq_hw_init(trans); else iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); iwl_pcie_rxq_restock(trans, trans_pcie->rxq); spin_lock_bh(&trans_pcie->rxq->lock); iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq); spin_unlock_bh(&trans_pcie->rxq->lock); return 0; } int iwl_pcie_gen2_rx_init(struct iwl_trans *trans) { /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); /* * We don't configure the RFH. * Restock will be done at alive, after firmware configured the RFH. */ return _iwl_pcie_rx_init(trans); } void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + size_t rb_stts_size = iwl_pcie_rb_stts_size(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i; - size_t rb_stts_size = trans->trans_cfg->device_family >= - IWL_DEVICE_FAMILY_AX210 ? - sizeof(__le16) : sizeof(struct iwl_rb_status); /* * if rxq is NULL, it means that nothing has been allocated, * exit now */ if (!trans_pcie->rxq) { IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); return; } cancel_work_sync(&rba->rx_alloc); iwl_pcie_free_rbs_pool(trans); if (trans_pcie->base_rb_stts) { dma_free_coherent(trans->dev, rb_stts_size * trans->num_rx_queues, trans_pcie->base_rb_stts, trans_pcie->base_rb_stts_dma); trans_pcie->base_rb_stts = NULL; trans_pcie->base_rb_stts_dma = 0; } for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; iwl_pcie_free_rxq_dma(trans, rxq); if (rxq->napi.poll) { napi_disable(&rxq->napi); netif_napi_del(&rxq->napi); } } kfree(trans_pcie->rx_pool); kfree(trans_pcie->global_table); kfree(trans_pcie->rxq); if (trans_pcie->alloc_page) __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order); } static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, struct iwl_rb_allocator *rba) { spin_lock(&rba->lock); list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); spin_unlock(&rba->lock); } /* * iwl_pcie_rx_reuse_rbd - Recycle used RBDs * * Called when a RBD can be reused. The RBD is transferred to the allocator. * When there are 2 empty RBDs - a request for allocation is posted */ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb, struct iwl_rxq *rxq, bool emergency) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; /* Move the RBD to the used list, will be moved to allocator in batches * before claiming or posting a request*/ list_add_tail(&rxb->list, &rxq->rx_used); if (unlikely(emergency)) return; /* Count the allocator owned RBDs */ rxq->used_count++; /* If we have RX_POST_REQ_ALLOC new released rx buffers - * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, * after but we still need to post another request. */ if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { /* Move the 2 RBDs to the allocator ownership. Allocator has another 6 from pool for the request completion*/ iwl_pcie_rx_move_to_allocator(rxq, rba); atomic_inc(&rba->req_pending); queue_work(rba->alloc_wq, &rba->rx_alloc); } } static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, struct iwl_rxq *rxq, struct iwl_rx_mem_buffer *rxb, bool emergency, int i) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; bool page_stolen = false; int max_len = trans_pcie->rx_buf_bytes; u32 offset = 0; if (WARN_ON(!rxb)) return; dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { struct iwl_rx_packet *pkt; bool reclaim; int len; struct iwl_rx_cmd_buffer rxcb = { ._offset = rxb->offset + offset, ._rx_page_order = trans_pcie->rx_page_order, ._page = rxb->page, ._page_stolen = false, .truesize = max_len, }; pkt = rxb_addr(&rxcb); if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) { IWL_DEBUG_RX(trans, "Q %d: RB end marker at offset %d\n", rxq->id, offset); break; } WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> FH_RSCSR_RXQ_POS != rxq->id, "frame on invalid queue - is on %d and indicates %d\n", rxq->id, (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> FH_RSCSR_RXQ_POS); IWL_DEBUG_RX(trans, "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n", rxq->id, offset, iwl_get_cmd_string(trans, WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)), pkt->hdr.group_id, pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence)); len = iwl_rx_packet_len(pkt); len += sizeof(u32); /* account for status word */ offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); /* check that what the device tells us made sense */ if (len < sizeof(*pkt) || offset > max_len) break; trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); /* Reclaim a command buffer only if this packet is a response * to a (driver-originated) command. * If the packet (e.g. Rx frame) originated from uCode, * there is no command buffer to reclaim. * Ucode should set SEQ_RX_FRAME bit if ucode-originated, * but apparently a few don't get set; catch them here. */ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); if (reclaim && !pkt->hdr.group_id) { int i; for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { if (trans_pcie->no_reclaim_cmds[i] == pkt->hdr.cmd) { reclaim = false; break; } } } if (rxq->id == trans_pcie->def_rx_queue) iwl_op_mode_rx(trans->op_mode, &rxq->napi, &rxcb); else iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, &rxcb, rxq->id); /* * After here, we should always check rxcb._page_stolen, * if it is true then one of the handlers took the page. */ if (reclaim) { u16 sequence = le16_to_cpu(pkt->hdr.sequence); int index = SEQ_TO_INDEX(sequence); int cmd_index = iwl_txq_get_cmd_index(txq, index); kfree_sensitive(txq->entries[cmd_index].free_buf); txq->entries[cmd_index].free_buf = NULL; /* Invoke any callbacks, transfer the buffer to caller, * and fire off the (possibly) blocking * iwl_trans_send_cmd() * as we reclaim the driver command queue */ if (!rxcb._page_stolen) iwl_pcie_hcmd_complete(trans, &rxcb); else IWL_WARN(trans, "Claim null rxb?\n"); } page_stolen |= rxcb._page_stolen; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) break; } /* page was stolen from us -- free our reference */ if (page_stolen) { __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; } /* Reuse the page if possible. For notification packets and * SKBs that fail to Rx correctly, add them back into the * rx_free list for reuse later. */ if (rxb->page != NULL) { rxb->page_dma = dma_map_page(trans->dev, rxb->page, rxb->offset, trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE); if (dma_mapping_error(trans->dev, rxb->page_dma)) { /* * free the page(s) as well to not break * the invariant that the items on the used * list have no page(s) */ __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); } else { list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } } else iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); } static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, struct iwl_rxq *rxq, int i, bool *join) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_mem_buffer *rxb; u16 vid; BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32); BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4); if (!trans->trans_cfg->mq_rx_supported) { rxb = rxq->queue[i]; rxq->queue[i] = NULL; return rxb; } if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { struct iwl_rx_completion_desc_bz *cd = rxq->used_bd; vid = le16_to_cpu(cd[i].rbid); *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { struct iwl_rx_completion_desc *cd = rxq->used_bd; vid = le16_to_cpu(cd[i].rbid); *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; } else { __le32 *cd = rxq->used_bd; vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */ } if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs)) goto out_err; rxb = trans_pcie->global_table[vid - 1]; if (rxb->invalid) goto out_err; IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid); rxb->invalid = true; return rxb; out_err: WARN(1, "Invalid rxb from HW %u\n", (u32)vid); iwl_force_nmi(trans); return NULL; } /* * iwl_pcie_rx_handle - Main entry function for receiving responses from fw */ static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq; u32 r, i, count = 0, handled = 0; bool emergency = false; if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd)) return budget; rxq = &trans_pcie->rxq[queue]; restart: spin_lock(&rxq->lock); /* uCode's read index (stored in shared DRAM) indicates the last Rx * buffer that the driver may process (last buffer filled by ucode). */ r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; i = rxq->read; /* W/A 9000 device step A0 wrap-around bug */ r &= (rxq->queue_size - 1); /* Rx interrupt, but nothing sent from uCode */ if (i == r) IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); while (i != r && ++handled < budget) { struct iwl_rb_allocator *rba = &trans_pcie->rba; struct iwl_rx_mem_buffer *rxb; /* number of RBDs still waiting for page allocation */ u32 rb_pending_alloc = atomic_read(&trans_pcie->rba.req_pending) * RX_CLAIM_REQ_ALLOC; bool join = false; if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 && !emergency)) { iwl_pcie_rx_move_to_allocator(rxq, rba); emergency = true; IWL_DEBUG_TPT(trans, "RX path is in emergency. Pending allocations %d\n", rb_pending_alloc); } IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); rxb = iwl_pcie_get_rxb(trans, rxq, i, &join); if (!rxb) goto out; if (unlikely(join || rxq->next_rb_is_fragment)) { rxq->next_rb_is_fragment = join; /* * We can only get a multi-RB in the following cases: * - firmware issue, sending a too big notification * - sniffer mode with a large A-MSDU * - large MTU frames (>2k) * since the multi-RB functionality is limited to newer * hardware that cannot put multiple entries into a * single RB. * * Right now, the higher layers aren't set up to deal * with that, so discard all of these. */ list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } else { iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); } i = (i + 1) & (rxq->queue_size - 1); /* * If we have RX_CLAIM_REQ_ALLOC released rx buffers - * try to claim the pre-allocated buffers from the allocator. * If not ready - will try to reclaim next time. * There is no need to reschedule work - allocator exits only * on success */ if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) iwl_pcie_rx_allocator_get(trans, rxq); if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { /* Add the remaining empty RBDs for allocator use */ iwl_pcie_rx_move_to_allocator(rxq, rba); } else if (emergency) { count++; if (count == 8) { count = 0; if (rb_pending_alloc < rxq->queue_size / 3) { IWL_DEBUG_TPT(trans, "RX path exited emergency. Pending allocations %d\n", rb_pending_alloc); emergency = false; } rxq->read = i; spin_unlock(&rxq->lock); iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); iwl_pcie_rxq_restock(trans, rxq); goto restart; } } } out: /* Backtrack one entry */ rxq->read = i; spin_unlock(&rxq->lock); /* * handle a case where in emergency there are some unallocated RBDs. * those RBDs are in the used list, but are not tracked by the queue's * used_count which counts allocator owned RBDs. * unallocated emergency RBDs must be allocated on exit, otherwise * when called again the function may not be in emergency mode and * they will be handed to the allocator with no tracking in the RBD * allocator counters, which will lead to them never being claimed back * by the queue. * by allocating them here, they are now in the queue free list, and * will be restocked by the next call of iwl_pcie_rxq_restock. */ if (unlikely(emergency && count)) iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); iwl_pcie_rxq_restock(trans, rxq); return handled; } static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) { u8 queue = entry->entry; struct msix_entry *entries = entry - queue; return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); } /* * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw * This interrupt handler should be used with RSS queue only. */ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) { struct msix_entry *entry = dev_id; struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); struct iwl_trans *trans = trans_pcie->trans; - struct iwl_rxq *rxq = &trans_pcie->rxq[entry->entry]; + struct iwl_rxq *rxq; trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0); if (WARN_ON(entry->entry >= trans->num_rx_queues)) return IRQ_NONE; - if (!rxq) { + if (!trans_pcie->rxq) { if (net_ratelimit()) IWL_ERR(trans, "[%d] Got MSI-X interrupt before we have Rx queues\n", entry->entry); return IRQ_NONE; } + rxq = &trans_pcie->rxq[entry->entry]; lock_map_acquire(&trans->sync_cmd_lockdep_map); IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry); local_bh_disable(); if (napi_schedule_prep(&rxq->napi)) __napi_schedule(&rxq->napi); else iwl_pcie_clear_irq(trans, entry->entry); local_bh_enable(); lock_map_release(&trans->sync_cmd_lockdep_map); return IRQ_HANDLED; } /* * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card */ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) { int i; /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ if (trans->cfg->internal_wimax_coex && !trans->cfg->apmg_not_supported && (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & APMS_CLK_VAL_MRB_FUNC_MODE) || (iwl_read_prph(trans, APMG_PS_CTRL_REG) & APMG_PS_CTRL_VAL_RESET_REQ))) { clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); iwl_op_mode_wimax_active(trans->op_mode); wake_up(&trans->wait_command_queue); return; } for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { if (!trans->txqs.txq[i]) continue; del_timer(&trans->txqs.txq[i]->stuck_timer); } /* The STATUS_FW_ERROR bit is set in this function. This must happen * before we wake up the command caller, to ensure a proper cleanup. */ iwl_trans_fw_error(trans, false); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); wake_up(&trans->wait_command_queue); } static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) { u32 inta; lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); trace_iwlwifi_dev_irq(trans->dev); /* Discover which interrupts are active/pending */ inta = iwl_read32(trans, CSR_INT); /* the thread will service interrupts and re-enable them */ return inta; } /* a device (PCI-E) page is 4096 bytes long */ #define ICT_SHIFT 12 #define ICT_SIZE (1 << ICT_SHIFT) #define ICT_COUNT (ICT_SIZE / sizeof(u32)) /* interrupt handler using ict table, with this interrupt driver will * stop using INTA register to get device's interrupt, reading this register * is expensive, device will write interrupts in ICT dram table, increment * index then will fire interrupt to driver, driver will OR all ICT table * entries from current index up to table entry with 0 value. the result is * the interrupt we need to service, driver will set the entries back to 0 and * set index. */ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 inta; u32 val = 0; u32 read; trace_iwlwifi_dev_irq(trans->dev); /* Ignore interrupt if there's nothing in NIC to service. * This may be due to IRQ shared with another device, * or due to sporadic interrupts thrown from our NIC. */ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); if (!read) return 0; /* * Collect all entries up to the first 0, starting from ict_index; * note we already read at ict_index. */ do { val |= read; IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", trans_pcie->ict_index, read); trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; trans_pcie->ict_index = ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); } while (read); /* We should not get this value, just ignore it. */ if (val == 0xffffffff) val = 0; /* * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit * (bit 15 before shifting it to 31) to clear when using interrupt * coalescing. fortunately, bits 18 and 19 stay set when this happens * so we use them to decide on the real state of the Rx bit. * In order words, bit 15 is set if bit 18 or bit 19 are set. */ if (val & 0xC0000) val |= 0x8000; inta = (0xff & val) | ((0xff00 & val) << 16); return inta; } void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; bool hw_rfkill, prev, report; mutex_lock(&trans_pcie->mutex); prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); hw_rfkill = iwl_is_rfkill_set(trans); if (hw_rfkill) { set_bit(STATUS_RFKILL_OPMODE, &trans->status); set_bit(STATUS_RFKILL_HW, &trans->status); } if (trans_pcie->opmode_down) report = hw_rfkill; else report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", hw_rfkill ? "disable radio" : "enable radio"); isr_stats->rfkill++; if (prev != report) iwl_trans_pcie_rf_kill(trans, report); mutex_unlock(&trans_pcie->mutex); if (hw_rfkill) { if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) IWL_DEBUG_RF_KILL(trans, "Rfkill while SYNC HCMD in flight\n"); wake_up(&trans->wait_command_queue); } else { clear_bit(STATUS_RFKILL_HW, &trans->status); if (trans_pcie->opmode_down) clear_bit(STATUS_RFKILL_OPMODE, &trans->status); } } irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) { struct iwl_trans *trans = dev_id; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; u32 inta = 0; u32 handled = 0; bool polling = false; lock_map_acquire(&trans->sync_cmd_lockdep_map); spin_lock_bh(&trans_pcie->irq_lock); /* dram interrupt table not set yet, * use legacy interrupt. */ if (likely(trans_pcie->use_ict)) inta = iwl_pcie_int_cause_ict(trans); else inta = iwl_pcie_int_cause_non_ict(trans); #ifdef CONFIG_IWLWIFI_DEBUG if (iwl_have_debug_level(IWL_DL_ISR)) { IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", inta, trans_pcie->inta_mask, iwl_read32(trans, CSR_INT_MASK), iwl_read32(trans, CSR_FH_INT_STATUS)); if (inta & (~trans_pcie->inta_mask)) IWL_DEBUG_ISR(trans, "We got a masked interrupt (0x%08x)\n", inta & (~trans_pcie->inta_mask)); } #endif inta &= trans_pcie->inta_mask; /* * Ignore interrupt if there's nothing in NIC to service. * This may be due to IRQ shared with another device, * or due to sporadic interrupts thrown from our NIC. */ if (unlikely(!inta)) { IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); /* * Re-enable interrupts here since we don't * have anything to service */ if (test_bit(STATUS_INT_ENABLED, &trans->status)) _iwl_enable_interrupts(trans); spin_unlock_bh(&trans_pcie->irq_lock); lock_map_release(&trans->sync_cmd_lockdep_map); return IRQ_NONE; } - if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { + if (unlikely(inta == 0xFFFFFFFF || iwl_trans_is_hw_error_value(inta))) { /* * Hardware disappeared. It might have * already raised an interrupt. */ IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); spin_unlock_bh(&trans_pcie->irq_lock); goto out; } /* Ack/clear/reset pending uCode interrupts. * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, */ /* There is a hardware bug in the interrupt mask function that some * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if * they are disabled in the CSR_INT_MASK register. Furthermore the * ICT interrupt handling mechanism has another bug that might cause * these unmasked interrupts fail to be detected. We workaround the * hardware bugs here by ACKing all the possible interrupts so that * interrupt coalescing can still be achieved. */ iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); #ifdef CONFIG_IWLWIFI_DEBUG if (iwl_have_debug_level(IWL_DL_ISR)) IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", inta, iwl_read32(trans, CSR_INT_MASK)); #endif spin_unlock_bh(&trans_pcie->irq_lock); /* Now service all interrupt bits discovered above. */ if (inta & CSR_INT_BIT_HW_ERR) { IWL_ERR(trans, "Hardware error detected. Restarting.\n"); /* Tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); isr_stats->hw++; iwl_pcie_irq_handle_error(trans); handled |= CSR_INT_BIT_HW_ERR; goto out; } /* NIC fires this, but we don't use it, redundant with WAKEUP */ if (inta & CSR_INT_BIT_SCD) { IWL_DEBUG_ISR(trans, "Scheduler finished to transmit the frame/frames.\n"); isr_stats->sch++; } /* Alive notification via Rx interrupt will do the real work */ if (inta & CSR_INT_BIT_ALIVE) { IWL_DEBUG_ISR(trans, "Alive interrupt\n"); isr_stats->alive++; if (trans->trans_cfg->gen2) { /* * We can restock, since firmware configured * the RFH */ iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); } handled |= CSR_INT_BIT_ALIVE; } /* Safely ignore these bits for debug checks below */ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); /* HW RF KILL switch toggled */ if (inta & CSR_INT_BIT_RF_KILL) { iwl_pcie_handle_rfkill_irq(trans); handled |= CSR_INT_BIT_RF_KILL; } /* Chip got too hot and stopped itself */ if (inta & CSR_INT_BIT_CT_KILL) { IWL_ERR(trans, "Microcode CT kill error detected.\n"); isr_stats->ctkill++; handled |= CSR_INT_BIT_CT_KILL; } /* Error detected by uCode */ if (inta & CSR_INT_BIT_SW_ERR) { IWL_ERR(trans, "Microcode SW error detected. " " Restarting 0x%X.\n", inta); isr_stats->sw++; iwl_pcie_irq_handle_error(trans); handled |= CSR_INT_BIT_SW_ERR; } /* uCode wakes up after power-down sleep */ if (inta & CSR_INT_BIT_WAKEUP) { IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); iwl_pcie_rxq_check_wrptr(trans); iwl_pcie_txq_check_wrptrs(trans); isr_stats->wakeup++; handled |= CSR_INT_BIT_WAKEUP; } /* All uCode command responses, including Tx command responses, * Rx "responses" (frame-received notification), and other * notifications from uCode come through here*/ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | CSR_INT_BIT_RX_PERIODIC)) { IWL_DEBUG_ISR(trans, "Rx interrupt\n"); if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_RX_MASK); } if (inta & CSR_INT_BIT_RX_PERIODIC) { handled |= CSR_INT_BIT_RX_PERIODIC; iwl_write32(trans, CSR_INT, CSR_INT_BIT_RX_PERIODIC); } /* Sending RX interrupt require many steps to be done in the * device: * 1- write interrupt to current index in ICT table. * 2- dma RX frame. * 3- update RX shared data to indicate last write index. * 4- send interrupt. * This could lead to RX race, driver could receive RX interrupt * but the shared data changes does not reflect this; * periodic interrupt will detect any dangling Rx activity. */ /* Disable periodic interrupt; we use it as just a one-shot. */ iwl_write8(trans, CSR_INT_PERIODIC_REG, CSR_INT_PERIODIC_DIS); /* * Enable periodic interrupt in 8 msec only if we received * real RX interrupt (instead of just periodic int), to catch * any dangling Rx interrupt. If it was just the periodic * interrupt, there was no dangling Rx activity, and no need * to extend the periodic interrupt; one-shot is enough. */ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) iwl_write8(trans, CSR_INT_PERIODIC_REG, CSR_INT_PERIODIC_ENA); isr_stats->rx++; local_bh_disable(); if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) { polling = true; __napi_schedule(&trans_pcie->rxq[0].napi); } local_bh_enable(); } /* This "Tx" DMA channel is used only for loading uCode */ if (inta & CSR_INT_BIT_FH_TX) { iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); isr_stats->tx++; handled |= CSR_INT_BIT_FH_TX; /* Wake up uCode load routine, now that load is complete */ trans_pcie->ucode_write_complete = true; wake_up(&trans_pcie->ucode_write_waitq); /* Wake up IMR write routine, now that write to SRAM is complete */ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { trans_pcie->imr_status = IMR_D2S_COMPLETED; wake_up(&trans_pcie->ucode_write_waitq); } } if (inta & ~handled) { IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); isr_stats->unhandled++; } if (inta & ~(trans_pcie->inta_mask)) { IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", inta & ~trans_pcie->inta_mask); } if (!polling) { spin_lock_bh(&trans_pcie->irq_lock); /* only Re-enable all interrupt if disabled by irq */ if (test_bit(STATUS_INT_ENABLED, &trans->status)) _iwl_enable_interrupts(trans); /* we are loading the firmware, enable FH_TX interrupt only */ else if (handled & CSR_INT_BIT_FH_TX) iwl_enable_fw_load_int(trans); /* Re-enable RF_KILL if it occurred */ else if (handled & CSR_INT_BIT_RF_KILL) iwl_enable_rfkill_int(trans); /* Re-enable the ALIVE / Rx interrupt if it occurred */ else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX)) iwl_enable_fw_load_int_ctx_info(trans); spin_unlock_bh(&trans_pcie->irq_lock); } out: lock_map_release(&trans->sync_cmd_lockdep_map); return IRQ_HANDLED; } /****************************************************************************** * * ICT functions * ******************************************************************************/ /* Free dram table */ void iwl_pcie_free_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (trans_pcie->ict_tbl) { dma_free_coherent(trans->dev, ICT_SIZE, trans_pcie->ict_tbl, trans_pcie->ict_tbl_dma); trans_pcie->ict_tbl = NULL; trans_pcie->ict_tbl_dma = 0; } } /* * allocate dram shared table, it is an aligned memory * block of ICT_SIZE. * also reset all data related to ICT table interrupt. */ int iwl_pcie_alloc_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans_pcie->ict_tbl = dma_alloc_coherent(trans->dev, ICT_SIZE, &trans_pcie->ict_tbl_dma, GFP_KERNEL); if (!trans_pcie->ict_tbl) return -ENOMEM; /* just an API sanity check ... it is guaranteed to be aligned */ if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { iwl_pcie_free_ict(trans); return -EINVAL; } return 0; } /* Device is going up inform it about using ICT interrupt table, * also we need to tell the driver to start using ICT interrupt. */ void iwl_pcie_reset_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 val; if (!trans_pcie->ict_tbl) return; spin_lock_bh(&trans_pcie->irq_lock); _iwl_disable_interrupts(trans); memset(trans_pcie->ict_tbl, 0, ICT_SIZE); val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; val |= CSR_DRAM_INT_TBL_ENABLE | CSR_DRAM_INIT_TBL_WRAP_CHECK | CSR_DRAM_INIT_TBL_WRITE_POINTER; IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); trans_pcie->use_ict = true; trans_pcie->ict_index = 0; iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); _iwl_enable_interrupts(trans); spin_unlock_bh(&trans_pcie->irq_lock); } /* Device is going down disable ict interrupt usage */ void iwl_pcie_disable_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); spin_lock_bh(&trans_pcie->irq_lock); trans_pcie->use_ict = false; spin_unlock_bh(&trans_pcie->irq_lock); } irqreturn_t iwl_pcie_isr(int irq, void *data) { struct iwl_trans *trans = data; if (!trans) return IRQ_NONE; /* Disable (but don't clear!) interrupts here to avoid * back-to-back ISRs and sporadic interrupts from our NIC. * If we have something to service, the tasklet will re-enable ints. * If we *don't* have something, we'll re-enable before leaving here. */ iwl_write32(trans, CSR_INT_MASK, 0x00000000); return IRQ_WAKE_THREAD; } irqreturn_t iwl_pcie_msix_isr(int irq, void *data) { return IRQ_WAKE_THREAD; } irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) { struct msix_entry *entry = dev_id; struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); struct iwl_trans *trans = trans_pcie->trans; struct isr_statistics *isr_stats = &trans_pcie->isr_stats; u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE; u32 inta_fh, inta_hw; bool polling = false; bool sw_err; if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0; if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1; lock_map_acquire(&trans->sync_cmd_lockdep_map); spin_lock_bh(&trans_pcie->irq_lock); inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD); inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); /* * Clear causes registers to avoid being handling the same cause. */ iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk); iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); spin_unlock_bh(&trans_pcie->irq_lock); trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw); if (unlikely(!(inta_fh | inta_hw))) { IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); lock_map_release(&trans->sync_cmd_lockdep_map); return IRQ_NONE; } #ifdef CONFIG_IWLWIFI_DEBUG if (iwl_have_debug_level(IWL_DL_ISR)) { IWL_DEBUG_ISR(trans, "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", entry->entry, inta_fh, trans_pcie->fh_mask, iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); if (inta_fh & ~trans_pcie->fh_mask) IWL_DEBUG_ISR(trans, "We got a masked interrupt (0x%08x)\n", inta_fh & ~trans_pcie->fh_mask); } #endif inta_fh &= trans_pcie->fh_mask; if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) && inta_fh & MSIX_FH_INT_CAUSES_Q0) { local_bh_disable(); if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) { polling = true; __napi_schedule(&trans_pcie->rxq[0].napi); } local_bh_enable(); } if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) && inta_fh & MSIX_FH_INT_CAUSES_Q1) { local_bh_disable(); if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) { polling = true; __napi_schedule(&trans_pcie->rxq[1].napi); } local_bh_enable(); } /* This "Tx" DMA channel is used only for loading uCode */ if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM && trans_pcie->imr_status == IMR_D2S_REQUESTED) { IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n"); isr_stats->tx++; /* Wake up IMR routine once write to SRAM is complete */ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { trans_pcie->imr_status = IMR_D2S_COMPLETED; wake_up(&trans_pcie->ucode_write_waitq); } } else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); isr_stats->tx++; /* * Wake up uCode load routine, * now that load is complete */ trans_pcie->ucode_write_complete = true; wake_up(&trans_pcie->ucode_write_waitq); /* Wake up IMR routine once write to SRAM is complete */ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { trans_pcie->imr_status = IMR_D2S_COMPLETED; wake_up(&trans_pcie->ucode_write_waitq); } } if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; else sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR; /* Error detected by uCode */ if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) { IWL_ERR(trans, "Microcode SW error detected. Restarting 0x%X.\n", inta_fh); isr_stats->sw++; /* during FW reset flow report errors from there */ if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { trans_pcie->imr_status = IMR_D2S_ERROR; wake_up(&trans_pcie->imr_waitq); } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) { trans_pcie->fw_reset_state = FW_RESET_ERROR; wake_up(&trans_pcie->fw_reset_waitq); } else { iwl_pcie_irq_handle_error(trans); } } /* After checking FH register check HW register */ #ifdef CONFIG_IWLWIFI_DEBUG if (iwl_have_debug_level(IWL_DL_ISR)) { IWL_DEBUG_ISR(trans, "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n", entry->entry, inta_hw, trans_pcie->hw_mask, iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); if (inta_hw & ~trans_pcie->hw_mask) IWL_DEBUG_ISR(trans, "We got a masked interrupt 0x%08x\n", inta_hw & ~trans_pcie->hw_mask); } #endif inta_hw &= trans_pcie->hw_mask; /* Alive notification via Rx interrupt will do the real work */ if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { IWL_DEBUG_ISR(trans, "Alive interrupt\n"); isr_stats->alive++; if (trans->trans_cfg->gen2) { /* We can restock, since firmware configured the RFH */ iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); } } /* * In some rare cases when the HW is in a bad state, we may * get this interrupt too early, when prph_info is still NULL. * So make sure that it's not NULL to prevent crashing. */ if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) { u32 sleep_notif = le32_to_cpu(trans_pcie->prph_info->sleep_notif); if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND || sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) { IWL_DEBUG_ISR(trans, "Sx interrupt: sleep notification = 0x%x\n", sleep_notif); trans_pcie->sx_complete = true; wake_up(&trans_pcie->sx_waitq); } else { /* uCode wakes up after power-down sleep */ IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); iwl_pcie_rxq_check_wrptr(trans); iwl_pcie_txq_check_wrptrs(trans); isr_stats->wakeup++; } } /* Chip got too hot and stopped itself */ if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { IWL_ERR(trans, "Microcode CT kill error detected.\n"); isr_stats->ctkill++; } /* HW RF KILL switch toggled */ if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) iwl_pcie_handle_rfkill_irq(trans); if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { IWL_ERR(trans, "Hardware error detected. Restarting.\n"); isr_stats->hw++; trans->dbg.hw_error = true; iwl_pcie_irq_handle_error(trans); } if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) { IWL_DEBUG_ISR(trans, "Reset flow completed\n"); trans_pcie->fw_reset_state = FW_RESET_OK; wake_up(&trans_pcie->fw_reset_waitq); } if (!polling) iwl_pcie_clear_irq(trans, entry->entry); lock_map_release(&trans->sync_cmd_lockdep_map); return IRQ_HANDLED; } diff --git a/sys/contrib/dev/iwlwifi/pcie/trans-gen2.c b/sys/contrib/dev/iwlwifi/pcie/trans-gen2.c index be4bc549acea..4446734d00ca 100644 --- a/sys/contrib/dev/iwlwifi/pcie/trans-gen2.c +++ b/sys/contrib/dev/iwlwifi/pcie/trans-gen2.c @@ -1,466 +1,537 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #if defined(__FreeBSD__) #include #endif #include "iwl-trans.h" #include "iwl-prph.h" #include "iwl-context-info.h" #include "iwl-context-info-gen3.h" #include "internal.h" #include "fw/dbg.h" #define FW_RESET_TIMEOUT (HZ / 5) /* * Start up NIC's basic functionality after it has been reset * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) * NOTE: This does not load uCode nor start the embedded processor */ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) { int ret = 0; IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); /* * Use "set_bit" below rather than "write", to preserve any hardware * bits already set by default after reset. */ /* * Disable L0s without affecting L1; * don't wait for ICH L0s (ICH bug W/A) */ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); /* Set FH wait threshold to maximum (HW error during stress W/A) */ iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); /* * Enable HAP INTA (interrupt from management bus) to * wake device's PCI Express link L1a -> L0s */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); iwl_pcie_apm_config(trans); ret = iwl_finish_nic_init(trans); if (ret) return ret; set_bit(STATUS_DEVICE_ENABLED, &trans->status); return 0; } static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) { IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); if (op_mode_leave) { if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) iwl_pcie_gen2_apm_init(trans); /* inform ME that we are leaving */ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE | CSR_HW_IF_CONFIG_REG_ENABLE_PME); mdelay(1); iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); mdelay(5); } clear_bit(STATUS_DEVICE_ENABLED, &trans->status); /* Stop device's DMA activity */ iwl_pcie_apm_stop_master(trans); iwl_trans_sw_reset(trans, false); /* * Clear "initialization complete" bit to move adapter from * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_INIT); else iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); } static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; trans_pcie->fw_reset_state = FW_RESET_REQUESTED; if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE); else if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE); else iwl_write32(trans, CSR_DOORBELL_VECTOR, UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE); /* wait 200ms */ ret = wait_event_timeout(trans_pcie->fw_reset_waitq, trans_pcie->fw_reset_state != FW_RESET_REQUESTED, FW_RESET_TIMEOUT); if (!ret || trans_pcie->fw_reset_state == FW_RESET_ERROR) { - IWL_INFO(trans, - "firmware didn't ACK the reset - continue anyway\n"); - iwl_trans_fw_error(trans, true); + u32 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); + + IWL_ERR(trans, + "timeout waiting for FW reset ACK (inta_hw=0x%x)\n", + inta_hw); + + if (!(inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE)) + iwl_trans_fw_error(trans, true); } trans_pcie->fw_reset_state = FW_RESET_IDLE; } void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); lockdep_assert_held(&trans_pcie->mutex); if (trans_pcie->is_down) return; if (trans->state >= IWL_TRANS_FW_STARTED) if (trans_pcie->fw_reset_handshake) iwl_trans_pcie_fw_reset_handshake(trans); trans_pcie->is_down = true; /* tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); /* device going down, Stop using ICT table */ iwl_pcie_disable_ict(trans); /* * If a HW restart happens during firmware loading, * then the firmware loading might call this function * and later it might be called again due to the * restart. So don't process again if the device is * already dead. */ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); + iwl_pcie_rx_napi_sync(trans); iwl_txq_gen2_tx_free(trans); iwl_pcie_rx_stop(trans); } iwl_pcie_ctxt_info_free_paging(trans); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) iwl_pcie_ctxt_info_gen3_free(trans, false); else iwl_pcie_ctxt_info_free(trans); /* Stop the device, and put it in low power state */ iwl_pcie_gen2_apm_stop(trans, false); /* re-take ownership to prevent other users from stealing the device */ iwl_trans_sw_reset(trans, true); /* * Upon stop, the IVAR table gets erased, so msi-x won't * work. This causes a bug in RF-KILL flows, since the interrupt * that enables radio won't fire on the correct irq, and the * driver won't be able to handle the interrupt. * Configure the IVAR table again after reset. */ iwl_pcie_conf_msix_hw(trans_pcie); /* * Upon stop, the APM issues an interrupt if HW RF kill is set. * This is a bug in certain verions of the hardware. * Certain devices also keep sending HW RF kill interrupt all * the time, unless the interrupt is ACKed even if the interrupt * should be masked. Re-ACK all the interrupts here. */ iwl_disable_interrupts(trans); /* clear all status bits */ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); clear_bit(STATUS_INT_ENABLED, &trans->status); clear_bit(STATUS_TPOWER_PMI, &trans->status); /* * Even if we stop the HW, we still want the RF kill * interrupt */ iwl_enable_rfkill_int(trans); } void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool was_in_rfkill; iwl_op_mode_time_point(trans->op_mode, IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE, NULL); mutex_lock(&trans_pcie->mutex); trans_pcie->opmode_down = true; was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); _iwl_trans_pcie_gen2_stop_device(trans); iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); mutex_unlock(&trans_pcie->mutex); } static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE, trans->cfg->min_txq_size); /* TODO: most of the logic can be removed in A0 - but not in Z0 */ spin_lock_bh(&trans_pcie->irq_lock); iwl_pcie_gen2_apm_init(trans); spin_unlock_bh(&trans_pcie->irq_lock); iwl_op_mode_nic_config(trans->op_mode); /* Allocate the RX queue, or reset if it is already allocated */ if (iwl_pcie_gen2_rx_init(trans)) return -ENOMEM; /* Allocate or reset and init all Tx and Command queues */ if (iwl_txq_gen2_init(trans, trans->txqs.cmd.q_id, queue_size)) return -ENOMEM; /* enable shadow regs in HW */ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); return 0; } static void iwl_pcie_get_rf_name(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); char *buf = trans_pcie->rf_name; size_t buflen = sizeof(trans_pcie->rf_name); size_t pos; u32 version; if (buf[0]) return; switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF): pos = scnprintf(buf, buflen, "JF"); break; case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF): pos = scnprintf(buf, buflen, "GF"); break; case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4): pos = scnprintf(buf, buflen, "GF4"); break; case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR): pos = scnprintf(buf, buflen, "HR"); break; case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1): pos = scnprintf(buf, buflen, "HR1"); break; case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB): pos = scnprintf(buf, buflen, "HRCDB"); break; + case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_MS): + pos = scnprintf(buf, buflen, "MS"); + break; default: return; } switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR): case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1): case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB): version = iwl_read_prph(trans, CNVI_MBOX_C); switch (version) { case 0x20000: pos += scnprintf(buf + pos, buflen - pos, " B3"); break; case 0x120000: pos += scnprintf(buf + pos, buflen - pos, " B5"); break; default: pos += scnprintf(buf + pos, buflen - pos, " (0x%x)", version); break; } break; default: break; } pos += scnprintf(buf + pos, buflen - pos, ", rfid=0x%x", trans->hw_rf_id); IWL_INFO(trans, "Detected RF %s\n", buf); /* * also add a \n for debugfs - need to do it after printing * since our IWL_INFO machinery wants to see a static \n at * the end of the string */ pos += scnprintf(buf + pos, buflen - pos, "\n"); } void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); iwl_pcie_reset_ict(trans); /* make sure all queue are not stopped/used */ memset(trans->txqs.queue_stopped, 0, sizeof(trans->txqs.queue_stopped)); memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); /* now that we got alive we can free the fw image & the context info. * paging memory cannot be freed included since FW will still use it */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) iwl_pcie_ctxt_info_gen3_free(trans, true); else iwl_pcie_ctxt_info_free(trans); /* * Re-enable all the interrupts, including the RF-Kill one, now that * the firmware is alive. */ iwl_enable_interrupts(trans); mutex_lock(&trans_pcie->mutex); iwl_pcie_check_hw_rf_kill(trans); iwl_pcie_get_rf_name(trans); mutex_unlock(&trans_pcie->mutex); } -static void iwl_pcie_set_ltr(struct iwl_trans *trans) +static bool iwl_pcie_set_ltr(struct iwl_trans *trans) { u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ | u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) | u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) | CSR_LTR_LONG_VAL_AD_SNOOP_REQ | u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) | u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL); /* * To workaround hardware latency issues during the boot process, * initialize the LTR to ~250 usec (see ltr_val above). * The firmware initializes this again later (to a smaller value). */ if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 || trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) && !trans->trans_cfg->integrated) { iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val); - } else if (trans->trans_cfg->integrated && - trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) { + return true; + } + + if (trans->trans_cfg->integrated && + trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) { iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL); iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val); + return true; + } + + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { + /* First clear the interrupt, just in case */ + iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, + MSIX_HW_INT_CAUSES_REG_IML); + /* In this case, unfortunately the same ROM bug exists in the + * device (not setting LTR correctly), but we don't have control + * over the settings from the host due to some hardware security + * features. The only workaround we've been able to come up with + * so far is to try to keep the CPU and device busy by polling + * it and the IML (image loader) completed interrupt. + */ + return false; } + + /* nothing needs to be done on other devices */ + return true; +} + +static void iwl_pcie_spin_for_iml(struct iwl_trans *trans) +{ +/* in practice, this seems to complete in around 20-30ms at most, wait 100 */ +#define IML_WAIT_TIMEOUT (HZ / 10) + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned long end_time = jiffies + IML_WAIT_TIMEOUT; + u32 value, loops = 0; + bool irq = false; + + if (WARN_ON(!trans_pcie->iml)) + return; + + value = iwl_read32(trans, CSR_LTR_LAST_MSG); + IWL_DEBUG_INFO(trans, "Polling for IML load - CSR_LTR_LAST_MSG=0x%x\n", + value); + + while (time_before(jiffies, end_time)) { + if (iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD) & + MSIX_HW_INT_CAUSES_REG_IML) { + irq = true; + break; + } + /* Keep the CPU and device busy. */ + value = iwl_read32(trans, CSR_LTR_LAST_MSG); + loops++; + } + + IWL_DEBUG_INFO(trans, + "Polled for IML load: irq=%d, loops=%d, CSR_LTR_LAST_MSG=0x%x\n", + irq, loops, value); + + /* We don't fail here even if we timed out - maybe we get lucky and the + * interrupt comes in later (and we get alive from firmware) and then + * we're all happy - but if not we'll fail on alive timeout or get some + * other error out. + */ } int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - bool hw_rfkill; + bool hw_rfkill, keep_ram_busy; int ret; /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); return -EIO; } iwl_enable_rfkill_int(trans); iwl_write32(trans, CSR_INT, 0xFFFFFFFF); /* * We enabled the RF-Kill interrupt and the handler may very * well be running. Disable the interrupts to make sure no other * interrupt can be fired. */ iwl_disable_interrupts(trans); /* Make sure it finished running */ iwl_pcie_synchronize_irqs(trans); mutex_lock(&trans_pcie->mutex); /* If platform's RF_KILL switch is NOT set to KILL */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) { ret = -ERFKILL; goto out; } /* Someone called stop_device, don't try to start_fw */ if (trans_pcie->is_down) { IWL_WARN(trans, "Can't start_fw since the HW hasn't been started\n"); ret = -EIO; goto out; } /* make sure rfkill handshake bits are cleared */ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); /* clear (again), then enable host interrupts */ iwl_write32(trans, CSR_INT, 0xFFFFFFFF); ret = iwl_pcie_gen2_nic_init(trans); if (ret) { IWL_ERR(trans, "Unable to init nic\n"); goto out; } if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) ret = iwl_pcie_ctxt_info_gen3_init(trans, fw); else ret = iwl_pcie_ctxt_info_init(trans, fw); if (ret) goto out; - iwl_pcie_set_ltr(trans); + keep_ram_busy = !iwl_pcie_set_ltr(trans); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_ROM_START); } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1); } else { iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); } + if (keep_ram_busy) + iwl_pcie_spin_for_iml(trans); + /* re-check RF-Kill state since we may have missed the interrupt */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) ret = -ERFKILL; out: mutex_unlock(&trans_pcie->mutex); return ret; } diff --git a/sys/contrib/dev/iwlwifi/pcie/trans.c b/sys/contrib/dev/iwlwifi/pcie/trans.c index 7fea426e07e9..15d7c37c0482 100644 --- a/sys/contrib/dev/iwlwifi/pcie/trans.c +++ b/sys/contrib/dev/iwlwifi/pcie/trans.c @@ -1,3779 +1,3822 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2007-2015, 2018-2022 Intel Corporation + * Copyright (C) 2007-2015, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include #include #include #include #include #include #include #if defined(__FreeBSD__) #include #endif #include "iwl-drv.h" #include "iwl-trans.h" #include "iwl-csr.h" #include "iwl-prph.h" #include "iwl-scd.h" #include "iwl-agn-hw.h" #include "fw/error-dump.h" #include "fw/dbg.h" #include "fw/api/tx.h" #include "mei/iwl-mei.h" #include "internal.h" #include "iwl-fh.h" #include "iwl-context-info-gen3.h" /* extended range in FW SRAM */ #define IWL_FW_MEM_EXTENDED_START 0x40000 #define IWL_FW_MEM_EXTENDED_END 0x57FFF void iwl_trans_pcie_dump_regs(struct iwl_trans *trans) { #define PCI_DUMP_SIZE 352 #define PCI_MEM_DUMP_SIZE 64 #define PCI_PARENT_DUMP_SIZE 524 #define PREFIX_LEN 32 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct pci_dev *pdev = trans_pcie->pci_dev; u32 i, pos, alloc_size, *ptr, *buf; char *prefix; if (trans_pcie->pcie_dbg_dumped_once) return; /* Should be a multiple of 4 */ BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3); BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3); BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3); /* Alloc a max size buffer */ alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN; alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN); alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN); alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN); buf = kmalloc(alloc_size, GFP_ATOMIC); if (!buf) return; prefix = (char *)buf + alloc_size - PREFIX_LEN; IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n"); /* Print wifi device registers */ sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); IWL_ERR(trans, "iwlwifi device config registers:\n"); for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++) if (pci_read_config_dword(pdev, i, ptr)) goto err_read; #if defined(__linux__) print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); #elif defined(__FreeBSD__) iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); #endif IWL_ERR(trans, "iwlwifi device memory mapped registers:\n"); for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++) *ptr = iwl_read32(trans, i); #if defined(__linux__) print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); #elif defined(__FreeBSD__) iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); #endif pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); if (pos) { IWL_ERR(trans, "iwlwifi device AER capability structure:\n"); for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++) if (pci_read_config_dword(pdev, pos + i, ptr)) goto err_read; #if defined(__linux__) print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); #elif defined(__FreeBSD__) iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); #endif } /* Print parent device registers next */ if (!pdev->bus->self) goto out; pdev = pdev->bus->self; sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n", pci_name(pdev)); for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++) if (pci_read_config_dword(pdev, i, ptr)) goto err_read; #if defined(__linux__) print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); #elif defined(__FreeBSD__) iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); #endif /* Print root port AER registers */ pos = 0; pdev = pcie_find_root_port(pdev); if (pdev) pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); if (pos) { IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n", pci_name(pdev)); sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++) if (pci_read_config_dword(pdev, pos + i, ptr)) goto err_read; #if defined(__linux__) print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); #elif defined(__FreeBSD__) iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); #endif } goto out; err_read: #if defined(__linux__) print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); #elif defined(__FreeBSD__) iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); #endif IWL_ERR(trans, "Read failed at 0x%X\n", i); out: trans_pcie->pcie_dbg_dumped_once = 1; kfree(buf); } static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership) { /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_SW_RESET); - else + usleep_range(10000, 20000); + } else { iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); - usleep_range(5000, 6000); + usleep_range(5000, 6000); + } if (retake_ownership) return iwl_pcie_prepare_card_hw(trans); return 0; } static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) { struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; if (!fw_mon->size) return; dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block, fw_mon->physical); fw_mon->block = NULL; fw_mon->physical = 0; fw_mon->size = 0; } static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans, - u8 max_power, u8 min_power) + u8 max_power) { struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; void *block = NULL; dma_addr_t physical = 0; u32 size = 0; u8 power; - if (fw_mon->size) + if (fw_mon->size) { + memset(fw_mon->block, 0, fw_mon->size); return; + } - for (power = max_power; power >= min_power; power--) { + /* need at least 2 KiB, so stop at 11 */ + for (power = max_power; power >= 11; power--) { size = BIT(power); block = dma_alloc_coherent(trans->dev, size, &physical, GFP_KERNEL | __GFP_NOWARN); if (!block) continue; IWL_INFO(trans, "Allocated 0x%08x bytes for firmware monitor.\n", size); break; } if (WARN_ON_ONCE(!block)) return; if (power != max_power) IWL_ERR(trans, "Sorry - debug buffer is only %luK while you requested %luK\n", (unsigned long)BIT(power - 10), (unsigned long)BIT(max_power - 10)); fw_mon->block = block; fw_mon->physical = physical; fw_mon->size = size; } void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) { if (!max_power) { /* default max_power is maximum */ max_power = 26; } else { max_power += 11; } if (WARN(max_power > 26, "External buffer size for monitor is too big %d, check the FW TLV\n", max_power)) return; - if (trans->dbg.fw_mon.size) - return; - - iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11); + iwl_pcie_alloc_fw_monitor_block(trans, max_power); } static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) { iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, ((reg & 0x0000ffff) | (2 << 28))); return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); } static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) { iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, ((reg & 0x0000ffff) | (3 << 28))); } static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) { if (trans->cfg->apmg_not_supported) return; if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VAUX, ~APMG_PS_CTRL_MSK_PWR_SRC); else iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~APMG_PS_CTRL_MSK_PWR_SRC); } /* PCI registers */ #define PCI_CFG_RETRY_TIMEOUT 0x041 void iwl_pcie_apm_config(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u16 lctl; u16 cap; /* * L0S states have been found to be unstable with our devices * and in newer hardware they are not officially supported at * all, so we must always set the L0S_DISABLED bit. */ iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED); pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n", (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", trans->ltr_enabled ? "En" : "Dis"); } /* * Start up NIC's basic functionality after it has been reset * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) * NOTE: This does not load uCode nor start the embedded processor */ static int iwl_pcie_apm_init(struct iwl_trans *trans) { int ret; IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); /* * Use "set_bit" below rather than "write", to preserve any hardware * bits already set by default after reset. */ /* Disable L0S exit timer (platform NMI Work/Around) */ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); /* * Disable L0s without affecting L1; * don't wait for ICH L0s (ICH bug W/A) */ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); /* Set FH wait threshold to maximum (HW error during stress W/A) */ iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); /* * Enable HAP INTA (interrupt from management bus) to * wake device's PCI Express link L1a -> L0s */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); iwl_pcie_apm_config(trans); /* Configure analog phase-lock-loop before activating to D0A */ if (trans->trans_cfg->base_params->pll_cfg) iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); ret = iwl_finish_nic_init(trans); if (ret) return ret; if (trans->cfg->host_interrupt_operation_mode) { /* * This is a bit of an abuse - This is needed for 7260 / 3160 * only check host_interrupt_operation_mode even if this is * not related to host_interrupt_operation_mode. * * Enable the oscillator to count wake up time for L1 exit. This * consumes slightly more power (100uA) - but allows to be sure * that we wake up from L1 on time. * * This looks weird: read twice the same register, discard the * value, set a bit, and yet again, read that same register * just to discard the value. But that's the way the hardware * seems to like it. */ iwl_read_prph(trans, OSC_CLK); iwl_read_prph(trans, OSC_CLK); iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); iwl_read_prph(trans, OSC_CLK); iwl_read_prph(trans, OSC_CLK); } /* * Enable DMA clock and wait for it to stabilize. * * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" * bits do not disable clocks. This preserves any hardware * bits already set by default in "CLK_CTRL_REG" after reset. */ if (!trans->cfg->apmg_not_supported) { iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(20); /* Disable L1-Active */ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_L1_ACT_DIS); /* Clear the interrupt in APMG if the NIC is in RFKILL */ iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL); } set_bit(STATUS_DEVICE_ENABLED, &trans->status); return 0; } /* * Enable LP XTAL to avoid HW bug where device may consume much power if * FW is not loaded after device reset. LP XTAL is disabled by default * after device HW reset. Do it only if XTAL is fed by internal source. * Configure device's "persistence" mode to avoid resetting XTAL again when * SHRD_HW_RST occurs in S3. */ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) { int ret; u32 apmg_gp1_reg; u32 apmg_xtal_cfg_reg; u32 dl_cfg_reg; /* Force XTAL ON */ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_XTAL_ON); ret = iwl_trans_pcie_sw_reset(trans, true); if (!ret) ret = iwl_finish_nic_init(trans); if (WARN_ON(ret)) { /* Release XTAL ON request */ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_XTAL_ON); return; } /* * Clear "disable persistence" to avoid LP XTAL resetting when * SHRD_HW_RST is applied in S3. */ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_PERSIST_DIS); /* * Force APMG XTAL to be active to prevent its disabling by HW * caused by APMG idle state. */ apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_XTAL_CFG_REG); iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, apmg_xtal_cfg_reg | SHR_APMG_XTAL_CFG_XTAL_ON_REQ); ret = iwl_trans_pcie_sw_reset(trans, true); if (ret) IWL_ERR(trans, "iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n"); /* Enable LP XTAL by indirect access through CSR */ apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | SHR_APMG_GP1_WF_XTAL_LP_EN | SHR_APMG_GP1_CHICKEN_BIT_SELECT); /* Clear delay line clock power up */ dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); /* * Enable persistence mode to avoid LP XTAL resetting when * SHRD_HW_RST is applied in S3. */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PERSIST_MODE); /* * Clear "initialization complete" bit to move adapter from * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); /* Activates XTAL resources monitor */ __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, CSR_MONITOR_XTAL_RESOURCES); /* Release XTAL ON request */ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_XTAL_ON); udelay(10); /* Release APMG XTAL */ iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, apmg_xtal_cfg_reg & ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); } void iwl_pcie_apm_stop_master(struct iwl_trans *trans) { int ret; /* stop device's busmaster DMA activity */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ); ret = iwl_poll_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, 100); - msleep(100); + usleep_range(10000, 20000); } else { iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); ret = iwl_poll_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); } if (ret < 0) IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); IWL_DEBUG_INFO(trans, "stop master\n"); } static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) { IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); if (op_mode_leave) { if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) iwl_pcie_apm_init(trans); /* inform ME that we are leaving */ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_WAKE_ME); else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) { iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE | CSR_HW_IF_CONFIG_REG_ENABLE_PME); mdelay(1); iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); } mdelay(5); } clear_bit(STATUS_DEVICE_ENABLED, &trans->status); /* Stop device's DMA activity */ iwl_pcie_apm_stop_master(trans); if (trans->cfg->lp_xtal_workaround) { iwl_pcie_apm_lp_xtal_enable(trans); return; } iwl_trans_pcie_sw_reset(trans, false); /* * Clear "initialization complete" bit to move adapter from * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); } static int iwl_pcie_nic_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; /* nic_init */ spin_lock_bh(&trans_pcie->irq_lock); ret = iwl_pcie_apm_init(trans); spin_unlock_bh(&trans_pcie->irq_lock); if (ret) return ret; iwl_pcie_set_pwr(trans, false); iwl_op_mode_nic_config(trans->op_mode); /* Allocate the RX queue, or reset if it is already allocated */ ret = iwl_pcie_rx_init(trans); if (ret) return ret; /* Allocate or reset and init all Tx and Command queues */ if (iwl_pcie_tx_init(trans)) { iwl_pcie_rx_free(trans); return -ENOMEM; } if (trans->trans_cfg->base_params->shadow_reg_enable) { /* enable shadow regs in HW */ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); } return 0; } #define HW_READY_TIMEOUT (50) /* Note: returns poll_bit return value, which is >= 0 if success */ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) { int ret; iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); /* See if we got it */ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT); if (ret >= 0) iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); return ret; } /* Note: returns standard 0/-ERROR code */ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) { int ret; - int t = 0; int iter; IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); ret = iwl_pcie_set_hw_ready(trans); /* If the card is ready, exit 0 */ if (ret >= 0) { trans->csme_own = false; return 0; } iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); usleep_range(1000, 2000); for (iter = 0; iter < 10; iter++) { + int t = 0; + /* If HW is not ready, prepare the conditions to check again */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE); do { ret = iwl_pcie_set_hw_ready(trans); if (ret >= 0) { trans->csme_own = false; return 0; } if (iwl_mei_is_connected()) { IWL_DEBUG_INFO(trans, "Couldn't prepare the card but SAP is connected\n"); trans->csme_own = true; if (trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_9000) IWL_ERR(trans, "SAP not supported for this NIC family\n"); return -EBUSY; } usleep_range(200, 1000); t += 200; } while (t < 150000); msleep(25); } IWL_ERR(trans, "Couldn't prepare the card\n"); return ret; } /* * ucode */ static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans, u32 dst_addr, dma_addr_t phy_addr, u32 byte_cnt) { iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr); iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), (iwl_get_dma_hi_addr(phy_addr) << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) | BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) | FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); } static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, dma_addr_t phy_addr, u32 byte_cnt) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; trans_pcie->ucode_write_complete = false; if (!iwl_trans_grab_nic_access(trans)) return -EIO; iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, byte_cnt); iwl_trans_release_nic_access(trans); ret = wait_event_timeout(trans_pcie->ucode_write_waitq, trans_pcie->ucode_write_complete, 5 * HZ); if (!ret) { IWL_ERR(trans, "Failed to load firmware chunk!\n"); iwl_trans_pcie_dump_regs(trans); return -ETIMEDOUT; } return 0; } static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, const struct fw_desc *section) { u8 *v_addr; dma_addr_t p_addr; u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); int ret = 0; IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", section_num); v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, GFP_KERNEL | __GFP_NOWARN); if (!v_addr) { IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); chunk_sz = PAGE_SIZE; v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, GFP_KERNEL); if (!v_addr) return -ENOMEM; } for (offset = 0; offset < section->len; offset += chunk_sz) { u32 copy_size, dst_addr; bool extended_addr = false; copy_size = min_t(u32, chunk_sz, section->len - offset); dst_addr = section->offset + offset; if (dst_addr >= IWL_FW_MEM_EXTENDED_START && dst_addr <= IWL_FW_MEM_EXTENDED_END) extended_addr = true; if (extended_addr) iwl_set_bits_prph(trans, LMPM_CHICK, LMPM_CHICK_EXTENDED_ADDR_SPACE); memcpy(v_addr, (const u8 *)section->data + offset, copy_size); ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, copy_size); if (extended_addr) iwl_clear_bits_prph(trans, LMPM_CHICK, LMPM_CHICK_EXTENDED_ADDR_SPACE); if (ret) { IWL_ERR(trans, "Could not load the [%d] uCode section\n", section_num); break; } } dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); return ret; } static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, const struct fw_img *image, int cpu, int *first_ucode_section) { int shift_param; int i, ret = 0, sec_num = 0x1; u32 val, last_read_idx = 0; if (cpu == 1) { shift_param = 0; *first_ucode_section = 0; } else { shift_param = 16; (*first_ucode_section)++; } for (i = *first_ucode_section; i < image->num_sec; i++) { last_read_idx = i; /* * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between * CPU1 to CPU2. * PAGING_SEPARATOR_SECTION delimiter - separate between * CPU2 non paged to CPU2 paging sec. */ if (!image->sec[i].data || image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || image->sec[i].offset == PAGING_SEPARATOR_SECTION) { IWL_DEBUG_FW(trans, "Break since Data not valid or Empty section, sec = %d\n", i); break; } ret = iwl_pcie_load_section(trans, i, &image->sec[i]); if (ret) return ret; /* Notify ucode of loaded section number and status */ val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); val = val | (sec_num << shift_param); iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); sec_num = (sec_num << 1) | 0x1; } *first_ucode_section = last_read_idx; iwl_enable_interrupts(trans); - if (trans->trans_cfg->use_tfh) { + if (trans->trans_cfg->gen2) { if (cpu == 1) iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 0xFFFF); else iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 0xFFFFFFFF); } else { if (cpu == 1) iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF); else iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); } return 0; } static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, const struct fw_img *image, int cpu, int *first_ucode_section) { int i, ret = 0; u32 last_read_idx = 0; if (cpu == 1) *first_ucode_section = 0; else (*first_ucode_section)++; for (i = *first_ucode_section; i < image->num_sec; i++) { last_read_idx = i; /* * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between * CPU1 to CPU2. * PAGING_SEPARATOR_SECTION delimiter - separate between * CPU2 non paged to CPU2 paging sec. */ if (!image->sec[i].data || image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || image->sec[i].offset == PAGING_SEPARATOR_SECTION) { IWL_DEBUG_FW(trans, "Break since Data not valid or Empty section, sec = %d\n", i); break; } ret = iwl_pcie_load_section(trans, i, &image->sec[i]); if (ret) return ret; } *first_ucode_section = last_read_idx; return 0; } static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans) { enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1; struct iwl_fw_ini_allocation_tlv *fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id]; struct iwl_dram_data *frag; if (!iwl_trans_dbg_ini_valid(trans)) return; if (le32_to_cpu(fw_mon_cfg->buf_location) == IWL_FW_INI_LOCATION_SRAM_PATH) { IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n"); /* set sram monitor by enabling bit 7 */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM); return; } if (le32_to_cpu(fw_mon_cfg->buf_location) != IWL_FW_INI_LOCATION_DRAM_PATH || !trans->dbg.fw_mon_ini[alloc_id].num_frags) return; frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0]; IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n", alloc_id); iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2, frag->physical >> MON_BUFF_SHIFT_VER2); iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2, (frag->physical + frag->size - 256) >> MON_BUFF_SHIFT_VER2); } void iwl_pcie_apply_destination(struct iwl_trans *trans) { const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv; const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; int i; if (iwl_trans_dbg_ini_valid(trans)) { iwl_pcie_apply_destination_ini(trans); return; } IWL_INFO(trans, "Applying debug destination %s\n", get_fw_dbg_mode_string(dest->monitor_mode)); if (dest->monitor_mode == EXTERNAL_MODE) iwl_pcie_alloc_fw_monitor(trans, dest->size_power); else IWL_WARN(trans, "PCI should have external buffer debug\n"); for (i = 0; i < trans->dbg.n_dest_reg; i++) { u32 addr = le32_to_cpu(dest->reg_ops[i].addr); u32 val = le32_to_cpu(dest->reg_ops[i].val); switch (dest->reg_ops[i].op) { case CSR_ASSIGN: iwl_write32(trans, addr, val); break; case CSR_SETBIT: iwl_set_bit(trans, addr, BIT(val)); break; case CSR_CLEARBIT: iwl_clear_bit(trans, addr, BIT(val)); break; case PRPH_ASSIGN: iwl_write_prph(trans, addr, val); break; case PRPH_SETBIT: iwl_set_bits_prph(trans, addr, BIT(val)); break; case PRPH_CLEARBIT: iwl_clear_bits_prph(trans, addr, BIT(val)); break; case PRPH_BLOCKBIT: if (iwl_read_prph(trans, addr) & BIT(val)) { IWL_ERR(trans, "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", val, addr); goto monitor; } break; default: IWL_ERR(trans, "FW debug - unknown OP %d\n", dest->reg_ops[i].op); break; } } monitor: if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) { iwl_write_prph(trans, le32_to_cpu(dest->base_reg), fw_mon->physical >> dest->base_shift); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) iwl_write_prph(trans, le32_to_cpu(dest->end_reg), (fw_mon->physical + fw_mon->size - 256) >> dest->end_shift); else iwl_write_prph(trans, le32_to_cpu(dest->end_reg), (fw_mon->physical + fw_mon->size) >> dest->end_shift); } } static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, const struct fw_img *image) { int ret = 0; int first_ucode_section; IWL_DEBUG_FW(trans, "working with %s CPU\n", image->is_dual_cpus ? "Dual" : "Single"); /* load to FW the binary non secured sections of CPU1 */ ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); if (ret) return ret; if (image->is_dual_cpus) { /* set CPU2 header address */ iwl_write_prph(trans, LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, LMPM_SECURE_CPU2_HDR_MEM_SPACE); /* load to FW the binary sections of CPU2 */ ret = iwl_pcie_load_cpu_sections(trans, image, 2, &first_ucode_section); if (ret) return ret; } if (iwl_pcie_dbg_on(trans)) iwl_pcie_apply_destination(trans); iwl_enable_interrupts(trans); /* release CPU reset */ iwl_write32(trans, CSR_RESET, 0); return 0; } static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, const struct fw_img *image) { int ret = 0; int first_ucode_section; IWL_DEBUG_FW(trans, "working with %s CPU\n", image->is_dual_cpus ? "Dual" : "Single"); if (iwl_pcie_dbg_on(trans)) iwl_pcie_apply_destination(trans); IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", iwl_read_prph(trans, WFPM_GP2)); /* * Set default value. On resume reading the values that were * zeored can provide debug data on the resume flow. * This is for debugging only and has no functional impact. */ iwl_write_prph(trans, WFPM_GP2, 0x01010101); /* configure the ucode to be ready to get the secured image */ /* release CPU reset */ iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); /* load to FW the binary Secured sections of CPU1 */ ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1, &first_ucode_section); if (ret) return ret; /* load to FW the binary sections of CPU2 */ return iwl_pcie_load_cpu_sections_8000(trans, image, 2, &first_ucode_section); } bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill = iwl_is_rfkill_set(trans); bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); bool report; if (hw_rfkill) { set_bit(STATUS_RFKILL_HW, &trans->status); set_bit(STATUS_RFKILL_OPMODE, &trans->status); } else { clear_bit(STATUS_RFKILL_HW, &trans->status); if (trans_pcie->opmode_down) clear_bit(STATUS_RFKILL_OPMODE, &trans->status); } report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); if (prev != report) iwl_trans_pcie_rf_kill(trans, report); return hw_rfkill; } struct iwl_causes_list { - u32 cause_num; - u32 mask_reg; + u16 mask_reg; + u8 bit; u8 addr; }; +#define IWL_CAUSE(reg, mask) \ + { \ + .mask_reg = reg, \ + .bit = ilog2(mask), \ + .addr = ilog2(mask) + \ + ((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \ + (reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 : \ + 0xffff), /* causes overflow warning */ \ + } + static const struct iwl_causes_list causes_list_common[] = { - {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, - {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, - {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, - {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, - {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, - {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, - {MSIX_HW_INT_CAUSES_REG_RESET_DONE, CSR_MSIX_HW_INT_MASK_AD, 0x12}, - {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, - {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, - {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, - {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, - {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, - {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, - {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, + IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM), + IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM), + IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D), + IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP), }; static const struct iwl_causes_list causes_list_pre_bz[] = { - {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR), }; static const struct iwl_causes_list causes_list_bz[] = { - {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ, CSR_MSIX_HW_INT_MASK_AD, 0x15}, + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ), }; static void iwl_pcie_map_list(struct iwl_trans *trans, const struct iwl_causes_list *causes, int arr_size, int val) { int i; for (i = 0; i < arr_size; i++) { iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); iwl_clear_bit(trans, causes[i].mask_reg, - causes[i].cause_num); + BIT(causes[i].bit)); } } static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; /* * Access all non RX causes and map them to the default irq. * In case we are missing at least one interrupt vector, * the first interrupt vector will serve non-RX and FBQ causes. */ iwl_pcie_map_list(trans, causes_list_common, ARRAY_SIZE(causes_list_common), val); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) iwl_pcie_map_list(trans, causes_list_bz, ARRAY_SIZE(causes_list_bz), val); else iwl_pcie_map_list(trans, causes_list_pre_bz, ARRAY_SIZE(causes_list_pre_bz), val); } static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 offset = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; u32 val, idx; /* * The first RX queue - fallback queue, which is designated for * management frame, command responses etc, is always mapped to the * first interrupt vector. The other RX queues are mapped to * the other (N - 2) interrupt vectors. */ val = BIT(MSIX_FH_INT_CAUSES_Q(0)); for (idx = 1; idx < trans->num_rx_queues; idx++) { iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), MSIX_FH_INT_CAUSES_Q(idx - offset)); val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); } iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); val = MSIX_FH_INT_CAUSES_Q(0); if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) val |= MSIX_NON_AUTO_CLEAR_CAUSE; iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); } void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) { struct iwl_trans *trans = trans_pcie->trans; if (!trans_pcie->msix_enabled) { if (trans->trans_cfg->mq_rx_supported && test_bit(STATUS_DEVICE_ENABLED, &trans->status)) iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSI_ENABLE); return; } /* * The IVAR table needs to be configured again after reset, * but if the device is disabled, we can't write to * prph. */ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); /* * Each cause from the causes list above and the RX causes is * represented as a byte in the IVAR table. The first nibble * represents the bound interrupt vector of the cause, the second * represents no auto clear for this cause. This will be set if its * interrupt vector is bound to serve other causes. */ iwl_pcie_map_rx_causes(trans); iwl_pcie_map_non_rx_causes(trans); } static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) { struct iwl_trans *trans = trans_pcie->trans; iwl_pcie_conf_msix_hw(trans_pcie); if (!trans_pcie->msix_enabled) return; trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); trans_pcie->fh_mask = trans_pcie->fh_init_mask; trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); trans_pcie->hw_mask = trans_pcie->hw_init_mask; } static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); lockdep_assert_held(&trans_pcie->mutex); if (trans_pcie->is_down) return; trans_pcie->is_down = true; /* tell the device to stop sending interrupts */ iwl_disable_interrupts(trans); /* device going down, Stop using ICT table */ iwl_pcie_disable_ict(trans); /* * If a HW restart happens during firmware loading, * then the firmware loading might call this function * and later it might be called again due to the * restart. So don't process again if the device is * already dead. */ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); + iwl_pcie_rx_napi_sync(trans); iwl_pcie_tx_stop(trans); iwl_pcie_rx_stop(trans); /* Power-down device's busmaster DMA clocks */ if (!trans->cfg->apmg_not_supported) { iwl_write_prph(trans, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(5); } } /* Make sure (redundant) we've released our request to stay awake */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); else iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); /* Stop the device, and put it in low power state */ iwl_pcie_apm_stop(trans, false); /* re-take ownership to prevent other users from stealing the device */ iwl_trans_pcie_sw_reset(trans, true); /* * Upon stop, the IVAR table gets erased, so msi-x won't * work. This causes a bug in RF-KILL flows, since the interrupt * that enables radio won't fire on the correct irq, and the * driver won't be able to handle the interrupt. * Configure the IVAR table again after reset. */ iwl_pcie_conf_msix_hw(trans_pcie); /* * Upon stop, the APM issues an interrupt if HW RF kill is set. * This is a bug in certain verions of the hardware. * Certain devices also keep sending HW RF kill interrupt all * the time, unless the interrupt is ACKed even if the interrupt * should be masked. Re-ACK all the interrupts here. */ iwl_disable_interrupts(trans); /* clear all status bits */ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); clear_bit(STATUS_INT_ENABLED, &trans->status); clear_bit(STATUS_TPOWER_PMI, &trans->status); /* * Even if we stop the HW, we still want the RF kill * interrupt */ iwl_enable_rfkill_int(trans); } void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (trans_pcie->msix_enabled) { int i; for (i = 0; i < trans_pcie->alloc_vecs; i++) synchronize_irq(trans_pcie->msix_entries[i].vector); } else { synchronize_irq(trans_pcie->pci_dev->irq); } } static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; int ret; /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); return -EIO; } iwl_enable_rfkill_int(trans); iwl_write32(trans, CSR_INT, 0xFFFFFFFF); /* * We enabled the RF-Kill interrupt and the handler may very * well be running. Disable the interrupts to make sure no other * interrupt can be fired. */ iwl_disable_interrupts(trans); /* Make sure it finished running */ iwl_pcie_synchronize_irqs(trans); mutex_lock(&trans_pcie->mutex); /* If platform's RF_KILL switch is NOT set to KILL */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) { ret = -ERFKILL; goto out; } /* Someone called stop_device, don't try to start_fw */ if (trans_pcie->is_down) { IWL_WARN(trans, "Can't start_fw since the HW hasn't been started\n"); ret = -EIO; goto out; } /* make sure rfkill handshake bits are cleared */ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); /* clear (again), then enable host interrupts */ iwl_write32(trans, CSR_INT, 0xFFFFFFFF); ret = iwl_pcie_nic_init(trans); if (ret) { IWL_ERR(trans, "Unable to init nic\n"); goto out; } /* * Now, we load the firmware and don't want to be interrupted, even * by the RF-Kill interrupt (hence mask all the interrupt besides the * FH_TX interrupt which is needed to load the firmware). If the * RF-Kill switch is toggled, we will find out after having loaded * the firmware and return the proper value to the caller. */ iwl_enable_fw_load_int(trans); /* really make sure rfkill handshake bits are cleared */ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); /* Load the given image to the HW */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) ret = iwl_pcie_load_given_ucode_8000(trans, fw); else ret = iwl_pcie_load_given_ucode(trans, fw); /* re-check RF-Kill state since we may have missed the interrupt */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) ret = -ERFKILL; out: mutex_unlock(&trans_pcie->mutex); return ret; } static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) { iwl_pcie_reset_ict(trans); iwl_pcie_tx_start(trans, scd_addr); } void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, bool was_in_rfkill) { bool hw_rfkill; /* * Check again since the RF kill state may have changed while * all the interrupts were disabled, in this case we couldn't * receive the RF kill interrupt and update the state in the * op_mode. * Don't call the op_mode if the rkfill state hasn't changed. * This allows the op_mode to call stop_device from the rfkill * notification without endless recursion. Under very rare * circumstances, we might have a small recursion if the rfkill * state changed exactly now while we were called from stop_device. * This is very unlikely but can happen and is supported. */ hw_rfkill = iwl_is_rfkill_set(trans); if (hw_rfkill) { set_bit(STATUS_RFKILL_HW, &trans->status); set_bit(STATUS_RFKILL_OPMODE, &trans->status); } else { clear_bit(STATUS_RFKILL_HW, &trans->status); clear_bit(STATUS_RFKILL_OPMODE, &trans->status); } if (hw_rfkill != was_in_rfkill) iwl_trans_pcie_rf_kill(trans, hw_rfkill); } static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool was_in_rfkill; iwl_op_mode_time_point(trans->op_mode, IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE, NULL); mutex_lock(&trans_pcie->mutex); trans_pcie->opmode_down = true; was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); _iwl_trans_pcie_stop_device(trans); iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); mutex_unlock(&trans_pcie->mutex); } void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) { struct iwl_trans_pcie __maybe_unused *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); lockdep_assert_held(&trans_pcie->mutex); IWL_WARN(trans, "reporting RF_KILL (radio %s)\n", state ? "disabled" : "enabled"); if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { if (trans->trans_cfg->gen2) _iwl_trans_pcie_gen2_stop_device(trans); else _iwl_trans_pcie_stop_device(trans); } } void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, bool test, bool reset) { iwl_disable_interrupts(trans); /* * in testing mode, the host stays awake and the * hardware won't be reset (not even partially) */ if (test) return; iwl_pcie_disable_ict(trans); iwl_pcie_synchronize_irqs(trans); iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); if (reset) { /* * reset TX queues -- some of their registers reset during S3 * so if we don't reset everything here the D3 image would try * to execute some invalid memory upon resume */ iwl_trans_pcie_tx_reset(trans); } iwl_pcie_set_pwr(trans, true); } static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND : UREG_DOORBELL_TO_ISR6_RESUME); - } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) iwl_write32(trans, CSR_IPC_SLEEP_CONTROL, suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND : CSR_IPC_SLEEP_CONTROL_RESUME); - iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, - UREG_DOORBELL_TO_ISR6_SLEEP_CTRL); - } else { + else return 0; - } ret = wait_event_timeout(trans_pcie->sx_waitq, trans_pcie->sx_complete, 2 * HZ); /* Invalidate it toward next suspend or resume */ trans_pcie->sx_complete = false; if (!ret) { IWL_ERR(trans, "Timeout %s D3\n", suspend ? "entering" : "exiting"); return -ETIMEDOUT; } return 0; } static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset) { int ret; if (!reset) /* Enable persistence mode to avoid reset */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PERSIST_MODE); ret = iwl_pcie_d3_handshake(trans, true); if (ret) return ret; iwl_pcie_d3_complete_suspend(trans, test, reset); return 0; } static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, bool test, bool reset) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 val; int ret; if (test) { iwl_enable_interrupts(trans); *status = IWL_D3_STATUS_ALIVE; ret = 0; goto out; } iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); ret = iwl_finish_nic_init(trans); if (ret) return ret; /* * Reconfigure IVAR table in case of MSIX or reset ict table in * MSI mode since HW reset erased it. * Also enables interrupts - none will happen as * the device doesn't know we're waking it up, only when * the opmode actually tells it after this call. */ iwl_pcie_conf_msix_hw(trans_pcie); if (!trans_pcie->msix_enabled) iwl_pcie_reset_ict(trans); iwl_enable_interrupts(trans); iwl_pcie_set_pwr(trans, false); if (!reset) { iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); } else { iwl_trans_pcie_tx_reset(trans); ret = iwl_pcie_rx_init(trans); if (ret) { IWL_ERR(trans, "Failed to resume the device (RX reset)\n"); return ret; } } IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n", iwl_read_umac_prph(trans, WFPM_GP2)); val = iwl_read32(trans, CSR_RESET); if (val & CSR_RESET_REG_FLAG_NEVO_RESET) *status = IWL_D3_STATUS_RESET; else *status = IWL_D3_STATUS_ALIVE; out: if (*status == IWL_D3_STATUS_ALIVE) ret = iwl_pcie_d3_handshake(trans, false); return ret; } static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, struct iwl_trans *trans, const struct iwl_cfg_trans_params *cfg_trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int max_irqs, num_irqs, i, ret; u16 pci_cmd; u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES; if (!cfg_trans->mq_rx_supported) goto enable_msi; if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000) max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES; max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues); for (i = 0; i < max_irqs; i++) trans_pcie->msix_entries[i].entry = i; num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries, MSIX_MIN_INTERRUPT_VECTORS, max_irqs); if (num_irqs < 0) { IWL_DEBUG_INFO(trans, "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n", num_irqs); goto enable_msi; } trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0; IWL_DEBUG_INFO(trans, "MSI-X enabled. %d interrupt vectors were allocated\n", num_irqs); /* * In case the OS provides fewer interrupts than requested, different * causes will share the same interrupt vector as follows: * One interrupt less: non rx causes shared with FBQ. * Two interrupts less: non rx causes shared with FBQ and RSS. * More than two interrupts: we will use fewer RSS queues. */ if (num_irqs <= max_irqs - 2) { trans_pcie->trans->num_rx_queues = num_irqs + 1; trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | IWL_SHARED_IRQ_FIRST_RSS; } else if (num_irqs == max_irqs - 1) { trans_pcie->trans->num_rx_queues = num_irqs; trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; } else { trans_pcie->trans->num_rx_queues = num_irqs - 1; } IWL_DEBUG_INFO(trans, "MSI-X enabled with rx queues %d, vec mask 0x%x\n", trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask); WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); trans_pcie->alloc_vecs = num_irqs; trans_pcie->msix_enabled = true; return; enable_msi: ret = pci_enable_msi(pdev); if (ret) { dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); } } } static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) { int iter_rx_q, i, ret, cpu, offset; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i; offset = 1 + i; for (; i < iter_rx_q ; i++) { /* * Get the cpu prior to the place to search * (i.e. return will be > i - 1). */ cpu = cpumask_next(i - offset, cpu_online_mask); cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]); ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector, &trans_pcie->affinity_mask[i]); if (ret) IWL_ERR(trans_pcie->trans, "Failed to set affinity mask for IRQ %d\n", trans_pcie->msix_entries[i].vector); } } static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, struct iwl_trans_pcie *trans_pcie) { int i; for (i = 0; i < trans_pcie->alloc_vecs; i++) { int ret; struct msix_entry *msix_entry; const char *qname = queue_name(&pdev->dev, trans_pcie, i); if (!qname) return -ENOMEM; msix_entry = &trans_pcie->msix_entries[i]; ret = devm_request_threaded_irq(&pdev->dev, msix_entry->vector, iwl_pcie_msix_isr, (i == trans_pcie->def_irq) ? iwl_pcie_irq_msix_handler : iwl_pcie_irq_rx_msix_handler, IRQF_SHARED, qname, msix_entry); if (ret) { IWL_ERR(trans_pcie->trans, "Error allocating IRQ %d\n", i); return ret; } } iwl_pcie_irq_set_affinity(trans_pcie->trans); return 0; } static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) { u32 hpm, wprot; switch (trans->trans_cfg->device_family) { case IWL_DEVICE_FAMILY_9000: wprot = PREG_PRPH_WPROT_9000; break; case IWL_DEVICE_FAMILY_22000: wprot = PREG_PRPH_WPROT_22000; break; default: return 0; } hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG); - if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) { + if (!iwl_trans_is_hw_error_value(hpm) && (hpm & PERSISTENCE_BIT)) { u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot); if (wprot_val & PREG_WFPM_ACCESS) { IWL_ERR(trans, "Error, can not clear persistence bit\n"); return -EPERM; } iwl_write_umac_prph_no_grab(trans, HPM_DEBUG, hpm & ~PERSISTENCE_BIT); } return 0; } static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) { int ret; ret = iwl_finish_nic_init(trans); if (ret < 0) return ret; iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); udelay(20); iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, HPM_HIPM_GEN_CFG_CR_PG_EN | HPM_HIPM_GEN_CFG_CR_SLP_EN); udelay(20); iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); return iwl_trans_pcie_sw_reset(trans, true); } static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int err; lockdep_assert_held(&trans_pcie->mutex); err = iwl_pcie_prepare_card_hw(trans); if (err) { IWL_ERR(trans, "Error while preparing HW: %d\n", err); return err; } err = iwl_trans_pcie_clear_persistence_bit(trans); if (err) return err; err = iwl_trans_pcie_sw_reset(trans, true); if (err) return err; if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && trans->trans_cfg->integrated) { err = iwl_pcie_gen2_force_power_gating(trans); if (err) return err; } err = iwl_pcie_apm_init(trans); if (err) return err; iwl_pcie_init_msix(trans_pcie); /* From now on, the op_mode will be kept updated about RF kill state */ iwl_enable_rfkill_int(trans); trans_pcie->opmode_down = false; /* Set is_down to false here so that...*/ trans_pcie->is_down = false; /* ...rfkill can call stop_device and set it false if needed */ iwl_pcie_check_hw_rf_kill(trans); return 0; } static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; mutex_lock(&trans_pcie->mutex); ret = _iwl_trans_pcie_start_hw(trans); mutex_unlock(&trans_pcie->mutex); return ret; } static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); mutex_lock(&trans_pcie->mutex); /* disable interrupts - don't enable HW RF kill interrupt */ iwl_disable_interrupts(trans); iwl_pcie_apm_stop(trans, true); iwl_disable_interrupts(trans); iwl_pcie_disable_ict(trans); mutex_unlock(&trans_pcie->mutex); iwl_pcie_synchronize_irqs(trans); } #if defined(__linux__) static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) { writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) { writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) { return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } #elif defined(__FreeBSD__) static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) { IWL_DEBUG_PCI_RW(trans, "W1 %#010x %#04x\n", ofs, val); bus_write_1((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs, val); } static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) { IWL_DEBUG_PCI_RW(trans, "W4 %#010x %#010x\n", ofs, val); bus_write_4((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs, val); } static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) { u32 v; v = bus_read_4((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs); IWL_DEBUG_PCI_RW(trans, "R4 %#010x %#010x\n", ofs, v); return (v); } #endif static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) { if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) return 0x00FFFFFF; else return 0x000FFFFF; } static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) { u32 mask = iwl_trans_pcie_prph_msk(trans); iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, ((reg & mask) | (3 << 24))); return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); } static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val) { u32 mask = iwl_trans_pcie_prph_msk(trans); iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24))); iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); } static void iwl_trans_pcie_configure(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); /* free all first - we might be reconfigured for a different size */ iwl_pcie_free_rbs_pool(trans); trans->txqs.cmd.q_id = trans_cfg->cmd_queue; trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; trans->txqs.page_offs = trans_cfg->cb_data_offs; trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver; if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) trans_pcie->n_no_reclaim_cmds = 0; else trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; if (trans_pcie->n_no_reclaim_cmds) memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, trans_pcie->n_no_reclaim_cmds * sizeof(u8)); trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; trans_pcie->rx_page_order = iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); trans_pcie->rx_buf_bytes = iwl_trans_get_rb_size(trans_pcie->rx_buf_size); trans_pcie->supported_dma_mask = DMA_BIT_MASK(12); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) trans_pcie->supported_dma_mask = DMA_BIT_MASK(11); trans->txqs.bc_table_dword = trans_cfg->bc_table_dword; trans_pcie->scd_set_active = trans_cfg->scd_set_active; trans->command_groups = trans_cfg->command_groups; trans->command_groups_size = trans_cfg->command_groups_size; /* Initialize NAPI here - it should be before registering to mac80211 * in the opmode but after the HW struct is allocated. * As this function may be called again in some corner cases don't * do anything if NAPI was already initialized. */ if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) init_dummy_netdev(&trans_pcie->napi_dev); trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake; } +void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions, + struct device *dev) +{ + u8 i; + struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc; + + /* free DRAM payloads */ + for (i = 0; i < dram_regions->n_regions; i++) { + dma_free_coherent(dev, dram_regions->drams[i].size, + dram_regions->drams[i].block, + dram_regions->drams[i].physical); + } + dram_regions->n_regions = 0; + + /* free DRAM addresses array */ + if (desc_dram->block) { + dma_free_coherent(dev, desc_dram->size, + desc_dram->block, + desc_dram->physical); + } + memset(desc_dram, 0, sizeof(*desc_dram)); +} + void iwl_trans_pcie_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; iwl_pcie_synchronize_irqs(trans); if (trans->trans_cfg->gen2) iwl_txq_gen2_tx_free(trans); else iwl_pcie_tx_free(trans); iwl_pcie_rx_free(trans); if (trans_pcie->rba.alloc_wq) { destroy_workqueue(trans_pcie->rba.alloc_wq); trans_pcie->rba.alloc_wq = NULL; } if (trans_pcie->msix_enabled) { for (i = 0; i < trans_pcie->alloc_vecs; i++) { irq_set_affinity_hint( trans_pcie->msix_entries[i].vector, NULL); } trans_pcie->msix_enabled = false; } else { iwl_pcie_free_ict(trans); } iwl_pcie_free_fw_monitor(trans); - if (trans_pcie->pnvm_dram.size) - dma_free_coherent(trans->dev, trans_pcie->pnvm_dram.size, - trans_pcie->pnvm_dram.block, - trans_pcie->pnvm_dram.physical); - - if (trans_pcie->reduce_power_dram.size) - dma_free_coherent(trans->dev, - trans_pcie->reduce_power_dram.size, - trans_pcie->reduce_power_dram.block, - trans_pcie->reduce_power_dram.physical); + iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data, + trans->dev); + iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->reduced_tables_data, + trans->dev); mutex_destroy(&trans_pcie->mutex); iwl_trans_free(trans); } static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) { if (state) set_bit(STATUS_TPOWER_PMI, &trans->status); else clear_bit(STATUS_TPOWER_PMI, &trans->status); } struct iwl_trans_pcie_removal { struct pci_dev *pdev; struct work_struct work; + bool rescan; }; static void iwl_trans_pcie_removal_wk(struct work_struct *wk) { struct iwl_trans_pcie_removal *removal = container_of(wk, struct iwl_trans_pcie_removal, work); struct pci_dev *pdev = removal->pdev; static char *prop[] = {"EVENT=INACCESSIBLE", NULL}; + struct pci_bus *bus = pdev->bus; dev_err(&pdev->dev, "Device gone - attempting removal\n"); kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); pci_lock_rescan_remove(); pci_dev_put(pdev); pci_stop_and_remove_bus_device(pdev); + if (removal->rescan) +#if defined(__linux__) + pci_rescan_bus(bus->parent); +#elif defined(__FreeBSD__) + pci_rescan_bus(bus); +#endif pci_unlock_rescan_remove(); kfree(removal); module_put(THIS_MODULE); } +void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan) +{ + struct iwl_trans_pcie_removal *removal; + + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + return; + + IWL_ERR(trans, "Device gone - scheduling removal!\n"); + + /* + * get a module reference to avoid doing this + * while unloading anyway and to avoid + * scheduling a work with code that's being + * removed. + */ + if (!try_module_get(THIS_MODULE)) { + IWL_ERR(trans, + "Module is being unloaded - abort\n"); + return; + } + + removal = kzalloc(sizeof(*removal), GFP_ATOMIC); + if (!removal) { + module_put(THIS_MODULE); + return; + } + /* + * we don't need to clear this flag, because + * the trans will be freed and reallocated. + */ + set_bit(STATUS_TRANS_DEAD, &trans->status); + + removal->pdev = to_pci_dev(trans->dev); + removal->rescan = rescan; + INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); + pci_dev_get(removal->pdev); + schedule_work(&removal->work); +} +EXPORT_SYMBOL(iwl_trans_pcie_remove); + /* * This version doesn't disable BHs but rather assumes they're * already disabled. */ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) { int ret; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ; u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP; u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN; spin_lock(&trans_pcie->reg_lock); if (trans_pcie->cmd_hold_nic_awake) goto out; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ; mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; } /* this bit wakes up the NIC */ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) udelay(2); /* * These bits say the device is running, and should keep running for * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), * but they do not indicate that embedded SRAM is restored yet; * HW with volatile SRAM must save/restore contents to/from * host DRAM when sleeping/waking for power-saving. * Each direction takes approximately 1/4 millisecond; with this * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a * series of register accesses are expected (e.g. reading Event Log), * to keep device from sleeping. * * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that * SRAM is okay/restored. We don't check that here because this call * is just for hardware register access; but GP1 MAC_SLEEP * check is a good idea before accessing the SRAM of HW with * volatile SRAM (e.g. reading Event Log). * * 5000 series and later (including 1000 series) have non-volatile SRAM, * and do not save/restore SRAM when power cycling. */ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000); if (unlikely(ret < 0)) { u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); WARN_ONCE(1, "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", cntrl); iwl_trans_pcie_dump_regs(trans); - if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) { - struct iwl_trans_pcie_removal *removal; - - if (test_bit(STATUS_TRANS_DEAD, &trans->status)) - goto err; - - IWL_ERR(trans, "Device gone - scheduling removal!\n"); - - /* - * get a module reference to avoid doing this - * while unloading anyway and to avoid - * scheduling a work with code that's being - * removed. - */ - if (!try_module_get(THIS_MODULE)) { - IWL_ERR(trans, - "Module is being unloaded - abort\n"); - goto err; - } - - removal = kzalloc(sizeof(*removal), GFP_ATOMIC); - if (!removal) { - module_put(THIS_MODULE); - goto err; - } - /* - * we don't need to clear this flag, because - * the trans will be freed and reallocated. - */ - set_bit(STATUS_TRANS_DEAD, &trans->status); - - removal->pdev = to_pci_dev(trans->dev); - INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); - pci_dev_get(removal->pdev); - schedule_work(&removal->work); - } else { + if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) + iwl_trans_pcie_remove(trans, false); + else iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); - } -err: spin_unlock(&trans_pcie->reg_lock); return false; } out: /* * Fool sparse by faking we release the lock - sparse will * track nic_access anyway. */ __release(&trans_pcie->reg_lock); return true; } static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) { bool ret; local_bh_disable(); ret = __iwl_trans_pcie_grab_nic_access(trans); if (ret) { /* keep BHs disabled until iwl_trans_pcie_release_nic_access */ return ret; } local_bh_enable(); return false; } static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); lockdep_assert_held(&trans_pcie->reg_lock); /* * Fool sparse by faking we acquiring the lock - sparse will * track nic_access anyway. */ __acquire(&trans_pcie->reg_lock); if (trans_pcie->cmd_hold_nic_awake) goto out; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); else __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); /* * Above we read the CSR_GP_CNTRL register, which will flush * any previous writes, but we need the write that clears the * MAC_ACCESS_REQ bit to be performed before any other writes * scheduled on different CPUs (after we drop reg_lock). */ out: spin_unlock_bh(&trans_pcie->reg_lock); } static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, void *buf, int dwords) { int offs = 0; u32 *vals = buf; while (offs < dwords) { /* limit the time we spin here under lock to 1/2s */ unsigned long end = jiffies + HZ / 2; bool resched = false; if (iwl_trans_grab_nic_access(trans)) { iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr + 4 * offs); while (offs < dwords) { vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); offs++; if (time_after(jiffies, end)) { resched = true; break; } } iwl_trans_release_nic_access(trans); if (resched) cond_resched(); } else { return -EBUSY; } } return 0; } static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, const void *buf, int dwords) { int offs, ret = 0; const u32 *vals = buf; if (iwl_trans_grab_nic_access(trans)) { iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); for (offs = 0; offs < dwords; offs++) iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals ? vals[offs] : 0); iwl_trans_release_nic_access(trans); } else { ret = -EBUSY; } return ret; } static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, u32 *val) { return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev, ofs, val); } static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) { int i; for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { struct iwl_txq *txq = trans->txqs.txq[i]; if (i == trans->txqs.cmd.q_id) continue; spin_lock_bh(&txq->lock); if (!block && !(WARN_ON_ONCE(!txq->block))) { txq->block--; if (!txq->block) { iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (i << 8)); } } else if (block) { txq->block++; } spin_unlock_bh(&txq->lock); } } #define IWL_FLUSH_WAIT_MS 2000 static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, struct iwl_trans_rxq_dma_data *data) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (queue >= trans->num_rx_queues || !trans_pcie->rxq) return -EINVAL; data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma; data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma; data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma; data->fr_bd_wid = 0; return 0; } static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) { struct iwl_txq *txq; unsigned long now = jiffies; bool overflow_tx; u8 wr_ptr; /* Make sure the NIC is still alive in the bus */ if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; if (!test_bit(txq_idx, trans->txqs.queue_used)) return -EINVAL; IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); txq = trans->txqs.txq[txq_idx]; spin_lock_bh(&txq->lock); overflow_tx = txq->overflow_tx || !skb_queue_empty(&txq->overflow_q); spin_unlock_bh(&txq->lock); wr_ptr = READ_ONCE(txq->write_ptr); while ((txq->read_ptr != READ_ONCE(txq->write_ptr) || overflow_tx) && !time_after(jiffies, now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { u8 write_ptr = READ_ONCE(txq->write_ptr); /* * If write pointer moved during the wait, warn only * if the TX came from op mode. In case TX came from * trans layer (overflow TX) don't warn. */ if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx, "WR pointer moved while flushing %d -> %d\n", wr_ptr, write_ptr)) return -ETIMEDOUT; wr_ptr = write_ptr; usleep_range(1000, 2000); spin_lock_bh(&txq->lock); overflow_tx = txq->overflow_tx || !skb_queue_empty(&txq->overflow_q); spin_unlock_bh(&txq->lock); } if (txq->read_ptr != txq->write_ptr) { IWL_ERR(trans, "fail to flush all tx fifo queues Q %d\n", txq_idx); iwl_txq_log_scd_error(trans, txq); return -ETIMEDOUT; } IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx); return 0; } static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) { int cnt; int ret = 0; /* waiting for all the tx frames complete might take a while */ for (cnt = 0; cnt < trans->trans_cfg->base_params->num_of_queues; cnt++) { if (cnt == trans->txqs.cmd.q_id) continue; if (!test_bit(cnt, trans->txqs.queue_used)) continue; if (!(BIT(cnt) & txq_bm)) continue; ret = iwl_trans_pcie_wait_txq_empty(trans, cnt); if (ret) break; } return ret; } static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); spin_lock_bh(&trans_pcie->reg_lock); __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); spin_unlock_bh(&trans_pcie->reg_lock); } static const char *get_csr_string(int cmd) { #define IWL_CMD(x) case x: return #x switch (cmd) { IWL_CMD(CSR_HW_IF_CONFIG_REG); IWL_CMD(CSR_INT_COALESCING); IWL_CMD(CSR_INT); IWL_CMD(CSR_INT_MASK); IWL_CMD(CSR_FH_INT_STATUS); IWL_CMD(CSR_GPIO_IN); IWL_CMD(CSR_RESET); IWL_CMD(CSR_GP_CNTRL); IWL_CMD(CSR_HW_REV); IWL_CMD(CSR_EEPROM_REG); IWL_CMD(CSR_EEPROM_GP); IWL_CMD(CSR_OTP_GP_REG); IWL_CMD(CSR_GIO_REG); IWL_CMD(CSR_GP_UCODE_REG); IWL_CMD(CSR_GP_DRIVER_REG); IWL_CMD(CSR_UCODE_DRV_GP1); IWL_CMD(CSR_UCODE_DRV_GP2); IWL_CMD(CSR_LED_REG); IWL_CMD(CSR_DRAM_INT_TBL_REG); IWL_CMD(CSR_GIO_CHICKEN_BITS); IWL_CMD(CSR_ANA_PLL_CFG); IWL_CMD(CSR_HW_REV_WA_REG); IWL_CMD(CSR_MONITOR_STATUS_REG); IWL_CMD(CSR_DBG_HPET_MEM_REG); default: return "UNKNOWN"; } #undef IWL_CMD } void iwl_pcie_dump_csr(struct iwl_trans *trans) { int i; static const u32 csr_tbl[] = { CSR_HW_IF_CONFIG_REG, CSR_INT_COALESCING, CSR_INT, CSR_INT_MASK, CSR_FH_INT_STATUS, CSR_GPIO_IN, CSR_RESET, CSR_GP_CNTRL, CSR_HW_REV, CSR_EEPROM_REG, CSR_EEPROM_GP, CSR_OTP_GP_REG, CSR_GIO_REG, CSR_GP_UCODE_REG, CSR_GP_DRIVER_REG, CSR_UCODE_DRV_GP1, CSR_UCODE_DRV_GP2, CSR_LED_REG, CSR_DRAM_INT_TBL_REG, CSR_GIO_CHICKEN_BITS, CSR_ANA_PLL_CFG, CSR_MONITOR_STATUS_REG, CSR_HW_REV_WA_REG, CSR_DBG_HPET_MEM_REG }; IWL_ERR(trans, "CSR values:\n"); IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " "CSR_INT_PERIODIC_REG)\n"); for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { IWL_ERR(trans, " %25s: 0X%08x\n", get_csr_string(csr_tbl[i]), iwl_read32(trans, csr_tbl[i])); } } #ifdef CONFIG_IWLWIFI_DEBUGFS /* create and remove of files */ #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ debugfs_create_file(#name, mode, parent, trans, \ &iwl_dbgfs_##name##_ops); \ } while (0) /* file operation */ #define DEBUGFS_READ_FILE_OPS(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .read = iwl_dbgfs_##name##_read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; #define DEBUGFS_WRITE_FILE_OPS(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = iwl_dbgfs_##name##_write, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = iwl_dbgfs_##name##_write, \ .read = iwl_dbgfs_##name##_read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; struct iwl_dbgfs_tx_queue_priv { struct iwl_trans *trans; }; struct iwl_dbgfs_tx_queue_state { loff_t pos; }; static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos) { struct iwl_dbgfs_tx_queue_priv *priv = seq->private; struct iwl_dbgfs_tx_queue_state *state; if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) return NULL; state = kmalloc(sizeof(*state), GFP_KERNEL); if (!state) return NULL; state->pos = *pos; return state; } static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct iwl_dbgfs_tx_queue_priv *priv = seq->private; struct iwl_dbgfs_tx_queue_state *state = v; *pos = ++state->pos; if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) return NULL; return state; } static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v) { kfree(v); } static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) { struct iwl_dbgfs_tx_queue_priv *priv = seq->private; struct iwl_dbgfs_tx_queue_state *state = v; struct iwl_trans *trans = priv->trans; struct iwl_txq *txq = trans->txqs.txq[state->pos]; seq_printf(seq, "hwq %.3u: used=%d stopped=%d ", (unsigned int)state->pos, !!test_bit(state->pos, trans->txqs.queue_used), !!test_bit(state->pos, trans->txqs.queue_stopped)); if (txq) seq_printf(seq, "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d", txq->read_ptr, txq->write_ptr, txq->need_update, txq->frozen, txq->n_window, txq->ampdu); else seq_puts(seq, "(unallocated)"); if (state->pos == trans->txqs.cmd.q_id) seq_puts(seq, " (HCMD)"); seq_puts(seq, "\n"); return 0; } static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = { .start = iwl_dbgfs_tx_queue_seq_start, .next = iwl_dbgfs_tx_queue_seq_next, .stop = iwl_dbgfs_tx_queue_seq_stop, .show = iwl_dbgfs_tx_queue_seq_show, }; static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp) { struct iwl_dbgfs_tx_queue_priv *priv; priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops, sizeof(*priv)); if (!priv) return -ENOMEM; priv->trans = inode->i_private; return 0; } static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); char *buf; int pos = 0, i, ret; size_t bufsz; bufsz = sizeof(char) * 121 * trans->num_rx_queues; if (!trans_pcie->rxq) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", i); pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", rxq->read); pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", rxq->write); pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", rxq->write_actual); pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", rxq->need_update); pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", rxq->free_count); if (rxq->rb_stts) { u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)); pos += scnprintf(buf + pos, bufsz - pos, "\tclosed_rb_num: %u\n", r & 0x0FFF); } else { pos += scnprintf(buf + pos, bufsz - pos, "\tclosed_rb_num: Not Allocated\n"); } } ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } static ssize_t iwl_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; int pos = 0; char *buf; int bufsz = 24 * 64; /* 24 items * 64 char per item */ ssize_t ret; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; pos += scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n"); pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", isr_stats->hw); pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", isr_stats->sw); if (isr_stats->sw || isr_stats->hw) { pos += scnprintf(buf + pos, bufsz - pos, "\tLast Restarting Code: 0x%X\n", isr_stats->err_code); } #ifdef CONFIG_IWLWIFI_DEBUG pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", isr_stats->sch); pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", isr_stats->alive); #endif pos += scnprintf(buf + pos, bufsz - pos, "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", isr_stats->ctkill); pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", isr_stats->wakeup); pos += scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n", isr_stats->rx); pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", isr_stats->tx); pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", isr_stats->unhandled); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } static ssize_t iwl_dbgfs_interrupt_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; u32 reset_flag; int ret; ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag); if (ret) return ret; if (reset_flag == 0) memset(isr_stats, 0, sizeof(*isr_stats)); return count; } static ssize_t iwl_dbgfs_csr_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; iwl_pcie_dump_csr(trans); return count; } static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; char *buf = NULL; ssize_t ret; ret = iwl_dump_fh(trans, &buf); if (ret < 0) return ret; if (!buf) return -EINVAL; ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); return ret; } static ssize_t iwl_dbgfs_rfkill_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); char buf[100]; int pos; pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n", trans_pcie->debug_rfkill, !(iwl_read32(trans, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_rfkill_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool new_value; int ret; ret = kstrtobool_from_user(user_buf, count, &new_value); if (ret) return ret; if (new_value == trans_pcie->debug_rfkill) return count; IWL_WARN(trans, "changing debug rfkill %d->%d\n", trans_pcie->debug_rfkill, new_value); trans_pcie->debug_rfkill = new_value; iwl_pcie_handle_rfkill_irq(trans); return count; } static int iwl_dbgfs_monitor_data_open(struct inode *inode, struct file *file) { struct iwl_trans *trans = inode->i_private; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (!trans->dbg.dest_tlv || trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) { IWL_ERR(trans, "Debug destination is not set to DRAM\n"); return -ENOENT; } if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED) return -EBUSY; trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN; return simple_open(inode, file); } static int iwl_dbgfs_monitor_data_release(struct inode *inode, struct file *file) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(inode->i_private); if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN) trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; return 0; } static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count, void *buf, ssize_t *size, ssize_t *bytes_copied) { - int buf_size_left = count - *bytes_copied; + ssize_t buf_size_left = count - *bytes_copied; buf_size_left = buf_size_left - (buf_size_left % sizeof(u32)); if (*size > buf_size_left) *size = buf_size_left; *size -= copy_to_user(user_buf, buf, *size); *bytes_copied += *size; if (buf_size_left == *size) return true; return false; } static ssize_t iwl_dbgfs_monitor_data_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf; struct cont_rec *data = &trans_pcie->fw_mon_data; u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt; ssize_t size, bytes_copied = 0; bool b_full; if (trans->dbg.dest_tlv) { write_ptr_addr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); } else { write_ptr_addr = MON_BUFF_WRPTR; wrap_cnt_addr = MON_BUFF_CYCLE_CNT; } if (unlikely(!trans->dbg.rec_on)) return 0; mutex_lock(&data->mutex); if (data->state == IWL_FW_MON_DBGFS_STATE_DISABLED) { mutex_unlock(&data->mutex); return 0; } /* write_ptr position in bytes rather then DW */ write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32); wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr); if (data->prev_wrap_cnt == wrap_cnt) { size = write_ptr - data->prev_wr_ptr; curr_buf = cpu_addr + data->prev_wr_ptr; b_full = iwl_write_to_user_buf(user_buf, count, curr_buf, &size, &bytes_copied); data->prev_wr_ptr += size; } else if (data->prev_wrap_cnt == wrap_cnt - 1 && write_ptr < data->prev_wr_ptr) { size = trans->dbg.fw_mon.size - data->prev_wr_ptr; curr_buf = cpu_addr + data->prev_wr_ptr; b_full = iwl_write_to_user_buf(user_buf, count, curr_buf, &size, &bytes_copied); data->prev_wr_ptr += size; if (!b_full) { size = write_ptr; b_full = iwl_write_to_user_buf(user_buf, count, cpu_addr, &size, &bytes_copied); data->prev_wr_ptr = size; data->prev_wrap_cnt++; } } else { if (data->prev_wrap_cnt == wrap_cnt - 1 && write_ptr > data->prev_wr_ptr) IWL_WARN(trans, "write pointer passed previous write pointer, start copying from the beginning\n"); else if (!unlikely(data->prev_wrap_cnt == 0 && data->prev_wr_ptr == 0)) IWL_WARN(trans, "monitor data is out of sync, start copying from the beginning\n"); size = write_ptr; b_full = iwl_write_to_user_buf(user_buf, count, cpu_addr, &size, &bytes_copied); data->prev_wr_ptr = size; data->prev_wrap_cnt = wrap_cnt; } mutex_unlock(&data->mutex); return bytes_copied; } static ssize_t iwl_dbgfs_rf_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (!trans_pcie->rf_name[0]) return -ENODEV; return simple_read_from_buffer(user_buf, count, ppos, trans_pcie->rf_name, strlen(trans_pcie->rf_name)); } DEBUGFS_READ_WRITE_FILE_OPS(interrupt); DEBUGFS_READ_FILE_OPS(fh_reg); DEBUGFS_READ_FILE_OPS(rx_queue); DEBUGFS_WRITE_FILE_OPS(csr); DEBUGFS_READ_WRITE_FILE_OPS(rfkill); DEBUGFS_READ_FILE_OPS(rf); static const struct file_operations iwl_dbgfs_tx_queue_ops = { .owner = THIS_MODULE, .open = iwl_dbgfs_tx_queue_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; static const struct file_operations iwl_dbgfs_monitor_data_ops = { .read = iwl_dbgfs_monitor_data_read, .open = iwl_dbgfs_monitor_data_open, .release = iwl_dbgfs_monitor_data_release, }; /* Create the debugfs files and directories */ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { struct dentry *dir = trans->dbgfs_dir; DEBUGFS_ADD_FILE(rx_queue, dir, 0400); DEBUGFS_ADD_FILE(tx_queue, dir, 0400); DEBUGFS_ADD_FILE(interrupt, dir, 0600); DEBUGFS_ADD_FILE(csr, dir, 0200); DEBUGFS_ADD_FILE(fh_reg, dir, 0400); DEBUGFS_ADD_FILE(rfkill, dir, 0600); DEBUGFS_ADD_FILE(monitor_data, dir, 0400); DEBUGFS_ADD_FILE(rf, dir, 0400); } static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct cont_rec *data = &trans_pcie->fw_mon_data; mutex_lock(&data->mutex); data->state = IWL_FW_MON_DBGFS_STATE_DISABLED; mutex_unlock(&data->mutex); } #endif /*CONFIG_IWLWIFI_DEBUGFS */ static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) { u32 cmdlen = 0; int i; for (i = 0; i < trans->txqs.tfd.max_tbs; i++) cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i); return cmdlen; } static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data, int allocated_rb_nums) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int max_len = trans_pcie->rx_buf_bytes; /* Dump RBs is supported only for pre-9000 devices (1 queue) */ struct iwl_rxq *rxq = &trans_pcie->rxq[0]; u32 i, r, j, rb_len = 0; spin_lock(&rxq->lock); r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; for (i = rxq->read, j = 0; i != r && j < allocated_rb_nums; i = (i + 1) & RX_QUEUE_MASK, j++) { struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; struct iwl_fw_error_dump_rb *rb; dma_sync_single_for_cpu(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); rb_len += sizeof(**data) + sizeof(*rb) + max_len; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); rb = (void *)(*data)->data; rb->index = cpu_to_le32(i); memcpy(rb->data, page_address(rxb->page), max_len); *data = iwl_fw_error_next_data(*data); } spin_unlock(&rxq->lock); return rb_len; } #define IWL_CSR_TO_DUMP (0x250) static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data) { u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP; __le32 *val; int i; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); val = (void *)(*data)->data; for (i = 0; i < IWL_CSR_TO_DUMP; i += 4) *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); *data = iwl_fw_error_next_data(*data); return csr_len; } static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data) { u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; __le32 *val; int i; if (!iwl_trans_grab_nic_access(trans)) return 0; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); (*data)->len = cpu_to_le32(fh_regs_len); val = (void *)(*data)->data; if (!trans->trans_cfg->gen2) for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32)) *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); else for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2); i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2); i += sizeof(u32)) *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans, i)); iwl_trans_release_nic_access(trans); *data = iwl_fw_error_next_data(*data); return sizeof(**data) + fh_regs_len; } static u32 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, struct iwl_fw_error_dump_fw_mon *fw_mon_data, u32 monitor_len) { u32 buf_size_in_dwords = (monitor_len >> 2); u32 *buffer = (u32 *)fw_mon_data->data; u32 i; if (!iwl_trans_grab_nic_access(trans)) return 0; iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); for (i = 0; i < buf_size_in_dwords; i++) buffer[i] = iwl_read_umac_prph_no_grab(trans, MON_DMARB_RD_DATA_ADDR); iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); iwl_trans_release_nic_access(trans); return monitor_len; } static void iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, struct iwl_fw_error_dump_fw_mon *fw_mon_data) { u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB; base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB; write_ptr = DBGC_CUR_DBGBUF_STATUS; wrap_cnt = DBGC_DBGBUF_WRAP_AROUND; } else if (trans->dbg.dest_tlv) { write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); } else { base = MON_BUFF_BASE_ADDR; write_ptr = MON_BUFF_WRPTR; wrap_cnt = MON_BUFF_CYCLE_CNT; } write_ptr_val = iwl_read_prph(trans, write_ptr); fw_mon_data->fw_mon_cycle_cnt = cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); fw_mon_data->fw_mon_base_ptr = cpu_to_le32(iwl_read_prph(trans, base)); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { fw_mon_data->fw_mon_base_high_ptr = cpu_to_le32(iwl_read_prph(trans, base_high)); write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; /* convert wrtPtr to DWs, to align with all HWs */ write_ptr_val >>= 2; } fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val); } static u32 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data, u32 monitor_len) { struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; u32 len = 0; if (trans->dbg.dest_tlv || (fw_mon->size && (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 || trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { struct iwl_fw_error_dump_fw_mon *fw_mon_data; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); fw_mon_data = (void *)(*data)->data; iwl_trans_pcie_dump_pointers(trans, fw_mon_data); len += sizeof(**data) + sizeof(*fw_mon_data); if (fw_mon->size) { memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size); monitor_len = fw_mon->size; } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) { u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr); /* * Update pointers to reflect actual values after * shifting */ if (trans->dbg.dest_tlv->version) { base = (iwl_read_prph(trans, base) & IWL_LDBG_M2S_BUF_BA_MSK) << trans->dbg.dest_tlv->base_shift; base *= IWL_M2S_UNIT_SIZE; base += trans->cfg->smem_offset; } else { base = iwl_read_prph(trans, base) << trans->dbg.dest_tlv->base_shift; } iwl_trans_read_mem(trans, base, fw_mon_data->data, monitor_len / sizeof(u32)); } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) { monitor_len = iwl_trans_pci_dump_marbh_monitor(trans, fw_mon_data, monitor_len); } else { /* Didn't match anything - output no monitor data */ monitor_len = 0; } len += monitor_len; (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); } return len; } static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) { if (trans->dbg.fw_mon.size) { *len += sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fw_mon) + trans->dbg.fw_mon.size; return trans->dbg.fw_mon.size; } else if (trans->dbg.dest_tlv) { u32 base, end, cfg_reg, monitor_len; if (trans->dbg.dest_tlv->version == 1) { cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg); cfg_reg = iwl_read_prph(trans, cfg_reg); base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) << trans->dbg.dest_tlv->base_shift; base *= IWL_M2S_UNIT_SIZE; base += trans->cfg->smem_offset; monitor_len = (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >> trans->dbg.dest_tlv->end_shift; monitor_len *= IWL_M2S_UNIT_SIZE; } else { base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); end = le32_to_cpu(trans->dbg.dest_tlv->end_reg); base = iwl_read_prph(trans, base) << trans->dbg.dest_tlv->base_shift; end = iwl_read_prph(trans, end) << trans->dbg.dest_tlv->end_shift; /* Make "end" point to the actual end */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000 || trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) end += (1 << trans->dbg.dest_tlv->end_shift); monitor_len = end - base; } *len += sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fw_mon) + monitor_len; return monitor_len; } return 0; } static struct iwl_trans_dump_data * iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask, const struct iwl_dump_sanitize_ops *sanitize_ops, void *sanitize_ctx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_fw_error_dump_data *data; struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id]; struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; u32 len, num_rbs = 0, monitor_len = 0; int i, ptr; bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && !trans->trans_cfg->mq_rx_supported && dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); if (!dump_mask) return NULL; /* transport dump header */ len = sizeof(*dump_data); /* host commands */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) len += sizeof(*data) + cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); /* FW monitor */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); /* CSR registers */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) len += sizeof(*data) + IWL_CSR_TO_DUMP; /* FH registers */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { if (trans->trans_cfg->gen2) len += sizeof(*data) + (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) - iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2)); else len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); } if (dump_rbs) { /* Dump RBs is supported only for pre-9000 devices (1 queue) */ struct iwl_rxq *rxq = &trans_pcie->rxq[0]; /* RBs */ num_rbs = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; len += num_rbs * (sizeof(*data) + sizeof(struct iwl_fw_error_dump_rb) + (PAGE_SIZE << trans_pcie->rx_page_order)); } /* Paged memory for gen2 HW */ if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) for (i = 0; i < trans->init_dram.paging_cnt; i++) len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_paging) + trans->init_dram.paging[i].size; dump_data = vzalloc(len); if (!dump_data) return NULL; len = 0; data = (void *)dump_data->data; if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) { u16 tfd_size = trans->txqs.tfd.size; data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); txcmd = (void *)data->data; spin_lock_bh(&cmdq->lock); ptr = cmdq->write_ptr; for (i = 0; i < cmdq->n_window; i++) { u8 idx = iwl_txq_get_cmd_index(cmdq, ptr); u8 tfdidx; u32 caplen, cmdlen; - if (trans->trans_cfg->use_tfh) + if (trans->trans_cfg->gen2) tfdidx = idx; else tfdidx = ptr; cmdlen = iwl_trans_pcie_get_cmdlen(trans, (u8 *)cmdq->tfds + tfd_size * tfdidx); caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); if (cmdlen) { len += sizeof(*txcmd) + caplen; txcmd->cmdlen = cpu_to_le32(cmdlen); txcmd->caplen = cpu_to_le32(caplen); memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen); if (sanitize_ops && sanitize_ops->frob_hcmd) sanitize_ops->frob_hcmd(sanitize_ctx, txcmd->data, caplen); txcmd = (void *)((u8 *)txcmd->data + caplen); } ptr = iwl_txq_dec_wrap(trans, ptr); } spin_unlock_bh(&cmdq->lock); data->len = cpu_to_le32(len); len += sizeof(*data); data = iwl_fw_error_next_data(data); } if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) len += iwl_trans_pcie_dump_csr(trans, &data); if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) len += iwl_trans_pcie_fh_regs_dump(trans, &data); if (dump_rbs) len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); /* Paged memory for gen2 HW */ if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { for (i = 0; i < trans->init_dram.paging_cnt; i++) { struct iwl_fw_error_dump_paging *paging; u32 page_len = trans->init_dram.paging[i].size; data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); data->len = cpu_to_le32(sizeof(*paging) + page_len); paging = (void *)data->data; paging->index = cpu_to_le32(i); memcpy(paging->data, trans->init_dram.paging[i].block, page_len); data = iwl_fw_error_next_data(data); len += sizeof(*data) + sizeof(*paging) + page_len; } } if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); dump_data->len = len; return dump_data; } static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable) { if (enable) iwl_enable_interrupts(trans); else iwl_disable_interrupts(trans); } static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) { u32 inta_addr, sw_err_bit; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (trans_pcie->msix_enabled) { inta_addr = CSR_MSIX_HW_INT_CAUSES_AD; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; else sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR; } else { inta_addr = CSR_INT; sw_err_bit = CSR_INT_BIT_SW_ERR; } iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit); } #define IWL_TRANS_COMMON_OPS \ .op_mode_leave = iwl_trans_pcie_op_mode_leave, \ .write8 = iwl_trans_pcie_write8, \ .write32 = iwl_trans_pcie_write32, \ .read32 = iwl_trans_pcie_read32, \ .read_prph = iwl_trans_pcie_read_prph, \ .write_prph = iwl_trans_pcie_write_prph, \ .read_mem = iwl_trans_pcie_read_mem, \ .write_mem = iwl_trans_pcie_write_mem, \ .read_config32 = iwl_trans_pcie_read_config32, \ .configure = iwl_trans_pcie_configure, \ .set_pmi = iwl_trans_pcie_set_pmi, \ .sw_reset = iwl_trans_pcie_sw_reset, \ .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ .release_nic_access = iwl_trans_pcie_release_nic_access, \ .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ .dump_data = iwl_trans_pcie_dump_data, \ .d3_suspend = iwl_trans_pcie_d3_suspend, \ .d3_resume = iwl_trans_pcie_d3_resume, \ .interrupts = iwl_trans_pci_interrupts, \ .sync_nmi = iwl_trans_pcie_sync_nmi, \ .imr_dma_data = iwl_trans_pcie_copy_imr \ static const struct iwl_trans_ops trans_ops_pcie = { IWL_TRANS_COMMON_OPS, .start_hw = iwl_trans_pcie_start_hw, .fw_alive = iwl_trans_pcie_fw_alive, .start_fw = iwl_trans_pcie_start_fw, .stop_device = iwl_trans_pcie_stop_device, .send_cmd = iwl_pcie_enqueue_hcmd, .tx = iwl_trans_pcie_tx, .reclaim = iwl_txq_reclaim, .txq_disable = iwl_trans_pcie_txq_disable, .txq_enable = iwl_trans_pcie_txq_enable, .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty, .freeze_txq_timer = iwl_trans_txq_freeze_timer, .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, #ifdef CONFIG_IWLWIFI_DEBUGFS .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, #endif }; static const struct iwl_trans_ops trans_ops_pcie_gen2 = { IWL_TRANS_COMMON_OPS, .start_hw = iwl_trans_pcie_start_hw, .fw_alive = iwl_trans_pcie_gen2_fw_alive, .start_fw = iwl_trans_pcie_gen2_start_fw, .stop_device = iwl_trans_pcie_gen2_stop_device, .send_cmd = iwl_pcie_gen2_enqueue_hcmd, .tx = iwl_txq_gen2_tx, .reclaim = iwl_txq_reclaim, .set_q_ptrs = iwl_txq_set_q_ptrs, .txq_alloc = iwl_txq_dyn_alloc, .txq_free = iwl_txq_dyn_free, .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, + .load_pnvm = iwl_trans_pcie_ctx_info_gen3_load_pnvm, .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm, + .load_reduce_power = iwl_trans_pcie_ctx_info_gen3_load_reduce_power, .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power, #ifdef CONFIG_IWLWIFI_DEBUGFS .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, #endif }; struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, const struct pci_device_id *ent, const struct iwl_cfg_trans_params *cfg_trans) { struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; int ret, addr_size; const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; void __iomem * const *table; if (!cfg_trans->gen2) ops = &trans_ops_pcie; ret = pcim_enable_device(pdev); if (ret) return ERR_PTR(ret); trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, cfg_trans); if (!trans) return ERR_PTR(-ENOMEM); trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans_pcie->trans = trans; trans_pcie->opmode_down = true; spin_lock_init(&trans_pcie->irq_lock); spin_lock_init(&trans_pcie->reg_lock); spin_lock_init(&trans_pcie->alloc_page_lock); mutex_init(&trans_pcie->mutex); init_waitqueue_head(&trans_pcie->ucode_write_waitq); init_waitqueue_head(&trans_pcie->fw_reset_waitq); init_waitqueue_head(&trans_pcie->imr_waitq); trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", - WQ_HIGHPRI | WQ_UNBOUND, 1); + WQ_HIGHPRI | WQ_UNBOUND, 0); if (!trans_pcie->rba.alloc_wq) { ret = -ENOMEM; goto out_free_trans; } INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); trans_pcie->debug_rfkill = -1; if (!cfg_trans->base_params->pcie_l1_allowed) { /* * W/A - seems to solve weird behavior. We need to remove this * if we don't want to stay in L1 all the time. This wastes a * lot of power. */ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); } trans_pcie->def_rx_queue = 0; pci_set_master(pdev); addr_size = trans->txqs.tfd.addr_size; ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size)); if (ret) { ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); /* both attempts failed: */ if (ret) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto out_no_pci; } } ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); if (ret) { dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); goto out_no_pci; } #if defined(__FreeBSD__) linuxkpi_pcim_want_to_use_bus_functions(pdev); #endif table = pcim_iomap_table(pdev); if (!table) { dev_err(&pdev->dev, "pcim_iomap_table failed\n"); ret = -ENOMEM; goto out_no_pci; } trans_pcie->hw_base = table[0]; if (!trans_pcie->hw_base) { dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n"); ret = -ENODEV; goto out_no_pci; } /* We disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); trans_pcie->pci_dev = pdev; iwl_disable_interrupts(trans); trans->hw_rev = iwl_read32(trans, CSR_HW_REV); if (trans->hw_rev == 0xffffffff) { dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n"); ret = -EIO; goto out_no_pci; } /* * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have * changed, and now the revision step also includes bit 0-1 (no more * "dash" value). To keep hw_rev backwards compatible - we'll store it * in the old format. */ if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) trans->hw_rev_step = trans->hw_rev & 0xF; else trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2; IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); init_waitqueue_head(&trans_pcie->sx_waitq); if (trans_pcie->msix_enabled) { ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); if (ret) goto out_no_pci; } else { ret = iwl_pcie_alloc_ict(trans); if (ret) goto out_no_pci; ret = devm_request_threaded_irq(&pdev->dev, pdev->irq, iwl_pcie_isr, iwl_pcie_irq_handler, IRQF_SHARED, DRV_NAME, trans); if (ret) { IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); goto out_free_ict; } } #ifdef CONFIG_IWLWIFI_DEBUGFS trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; mutex_init(&trans_pcie->fw_mon_data.mutex); #endif iwl_dbg_tlv_init(trans); return trans; out_free_ict: iwl_pcie_free_ict(trans); out_no_pci: destroy_workqueue(trans_pcie->rba.alloc_wq); out_free_trans: iwl_trans_free(trans); return ERR_PTR(ret); } void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans, u32 dst_addr, u64 src_addr, u32 byte_cnt) { iwl_write_prph(trans, IMR_UREG_CHICK, iwl_read_prph(trans, IMR_UREG_CHICK) | IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK); iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr); iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB, (u32)(src_addr & 0xFFFFFFFF)); iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB, iwl_get_dma_hi_addr(src_addr)); iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt); iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL, IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS | IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS | IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK); } int iwl_trans_pcie_copy_imr(struct iwl_trans *trans, u32 dst_addr, u64 src_addr, u32 byte_cnt) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret = -1; trans_pcie->imr_status = IMR_D2S_REQUESTED; iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt); ret = wait_event_timeout(trans_pcie->imr_waitq, trans_pcie->imr_status != IMR_D2S_REQUESTED, 5 * HZ); if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) { IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n"); iwl_trans_pcie_dump_regs(trans); return -ETIMEDOUT; } trans_pcie->imr_status = IMR_D2S_IDLE; return 0; } diff --git a/sys/contrib/dev/iwlwifi/pcie/tx.c b/sys/contrib/dev/iwlwifi/pcie/tx.c index 447faaef1ea0..0962afa4e695 100644 --- a/sys/contrib/dev/iwlwifi/pcie/tx.c +++ b/sys/contrib/dev/iwlwifi/pcie/tx.c @@ -1,1639 +1,1642 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2021 Intel Corporation + * Copyright (C) 2003-2014, 2018-2021, 2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include #ifdef CONFIG_INET #include #include #endif #if defined(__FreeBSD__) #include #endif #include "iwl-debug.h" #include "iwl-csr.h" #include "iwl-prph.h" #include "iwl-io.h" #include "iwl-scd.h" #include "iwl-op-mode.h" #include "internal.h" #include "fw/api/tx.h" /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** * DMA services * * Theory of operation * * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer * of buffer descriptors, each of which points to one or more data buffers for * the device to read from or fill. Driver and device exchange status of each * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty * entries in each circular buffer, to protect against confusing empty and full * queue states. * * The device reads or writes the data in the queues via the device's several * DMA/FIFO channels. Each queue is mapped to a single DMA channel. * * For Tx queue, there are low mark and high mark limits. If, after queuing * the packet for Tx, free space become < low mark, Tx queue stopped. When * reclaiming packets (on 'tx done IRQ), if free space become > high mark, * Tx queue resumed. * ***************************************************/ int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr, size_t size) { if (WARN_ON(ptr->addr)) return -EINVAL; ptr->addr = dma_alloc_coherent(trans->dev, size, &ptr->dma, GFP_KERNEL); if (!ptr->addr) return -ENOMEM; ptr->size = size; return 0; } void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) { if (unlikely(!ptr->addr)) return; dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); memset(ptr, 0, sizeof(*ptr)); } /* * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware */ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) { u32 reg = 0; int txq_id = txq->id; lockdep_assert_held(&txq->lock); /* * explicitly wake up the NIC if: * 1. shadow registers aren't enabled * 2. NIC is woken up for CMD regardless of shadow outside this function * 3. there is a chance that the NIC is asleep */ if (!trans->trans_cfg->base_params->shadow_reg_enable && txq_id != trans->txqs.cmd.q_id && test_bit(STATUS_TPOWER_PMI, &trans->status)) { /* * wake up nic if it's powered down ... * uCode will wake up, and interrupt us again, so next * time we'll skip this part. */ reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", txq_id, reg); iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); txq->need_update = true; return; } } /* * if not in power-save mode, uCode will never sleep when we're * trying to tx (during RFKILL, we're not trying to tx). */ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); if (!txq->block) iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq_id << 8)); } void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) { int i; for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { struct iwl_txq *txq = trans->txqs.txq[i]; if (!test_bit(i, trans->txqs.queue_used)) continue; spin_lock_bh(&txq->lock); if (txq->need_update) { iwl_pcie_txq_inc_wr_ptr(trans, txq); txq->need_update = false; } spin_unlock_bh(&txq->lock); } } static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, u8 idx, dma_addr_t addr, u16 len) { struct iwl_tfd *tfd_fh = (void *)tfd; struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; u16 hi_n_len = len << 4; put_unaligned_le32(addr, &tb->lo); hi_n_len |= iwl_get_dma_hi_addr(addr); tb->hi_n_len = cpu_to_le16(hi_n_len); tfd_fh->num_tbs = idx + 1; } static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, dma_addr_t addr, u16 len, bool reset) { void *tfd; u32 num_tbs; tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr; if (reset) memset(tfd, 0, trans->txqs.tfd.size); num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); /* Each TFD can point to a maximum max_tbs Tx buffers */ if (num_tbs >= trans->txqs.tfd.max_tbs) { IWL_ERR(trans, "Error can not send more than %d chunks\n", trans->txqs.tfd.max_tbs); return -EINVAL; } if (WARN(addr & ~IWL_TX_DMA_MASK, "Unaligned address = %llx\n", (unsigned long long)addr)) return -EINVAL; iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); return num_tbs; } static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (!trans->trans_cfg->base_params->apmg_wake_up_wa) return; spin_lock(&trans_pcie->reg_lock); if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) { spin_unlock(&trans_pcie->reg_lock); return; } trans_pcie->cmd_hold_nic_awake = false; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); spin_unlock(&trans_pcie->reg_lock); } /* * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's */ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) { struct iwl_txq *txq = trans->txqs.txq[txq_id]; if (!txq) { IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); return; } spin_lock_bh(&txq->lock); while (txq->write_ptr != txq->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); if (txq_id != trans->txqs.cmd.q_id) { struct sk_buff *skb = txq->entries[txq->read_ptr].skb; if (WARN_ON_ONCE(!skb)) continue; iwl_txq_free_tso_page(trans, skb); } iwl_txq_free_tfd(trans, txq); txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); if (txq->read_ptr == txq->write_ptr && txq_id == trans->txqs.cmd.q_id) iwl_pcie_clear_cmd_in_flight(trans); } while (!skb_queue_empty(&txq->overflow_q)) { struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); iwl_op_mode_free_skb(trans->op_mode, skb); } spin_unlock_bh(&txq->lock); /* just in case - this queue may have been stopped */ iwl_wake_queue(trans, txq); } /* * iwl_pcie_txq_free - Deallocate DMA queue. * @txq: Transmit queue to deallocate. * * Empty queue by removing and destroying all BD's. * Free all buffers. * 0-fill, but do not free "txq" descriptor structure. */ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) { struct iwl_txq *txq = trans->txqs.txq[txq_id]; struct device *dev = trans->dev; int i; if (WARN_ON(!txq)) return; iwl_pcie_txq_unmap(trans, txq_id); /* De-alloc array of command/tx buffers */ if (txq_id == trans->txqs.cmd.q_id) for (i = 0; i < txq->n_window; i++) { kfree_sensitive(txq->entries[i].cmd); kfree_sensitive(txq->entries[i].free_buf); } /* De-alloc circular buffer of TFDs */ if (txq->tfds) { dma_free_coherent(dev, trans->txqs.tfd.size * trans->trans_cfg->base_params->max_tfd_queue_size, txq->tfds, txq->dma_addr); txq->dma_addr = 0; txq->tfds = NULL; dma_free_coherent(dev, sizeof(*txq->first_tb_bufs) * txq->n_window, txq->first_tb_bufs, txq->first_tb_dma); } kfree(txq->entries); txq->entries = NULL; del_timer_sync(&txq->stuck_timer); /* 0-fill queue descriptor structure */ memset(txq, 0, sizeof(*txq)); } void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int nq = trans->trans_cfg->base_params->num_of_queues; int chan; u32 reg_val; int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); /* make sure all queue are not stopped/used */ memset(trans->txqs.queue_stopped, 0, sizeof(trans->txqs.queue_stopped)); memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); trans_pcie->scd_base_addr = iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); WARN_ON(scd_base_addr != 0 && scd_base_addr != trans_pcie->scd_base_addr); /* reset context data, TX status and translation data */ iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND, NULL, clear_dwords); iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, trans->txqs.scd_bc_tbls.dma >> 10); /* The chain extension of the SCD doesn't work well. This feature is * enabled by default by the HW, so we need to disable it manually. */ if (trans->trans_cfg->base_params->scd_chain_ext_wa) iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, trans->txqs.cmd.fifo, trans->txqs.cmd.wdg_timeout); /* Activate all Tx DMA/FIFO channels */ iwl_scd_activate_fifos(trans); /* Enable DMA channel */ for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); /* Update FH chicken bits */ reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); /* Enable L1-Active */ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_L1_ACT_DIS); } void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int txq_id; /* * we should never get here in gen2 trans mode return early to avoid * having invalid accesses */ if (WARN_ON_ONCE(trans->trans_cfg->gen2)) return; for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { struct iwl_txq *txq = trans->txqs.txq[txq_id]; - if (trans->trans_cfg->use_tfh) + if (trans->trans_cfg->gen2) iwl_write_direct64(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), txq->dma_addr); else iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), txq->dma_addr >> 8); iwl_pcie_txq_unmap(trans, txq_id); txq->read_ptr = 0; txq->write_ptr = 0; } /* Tell NIC where to find the "keep warm" buffer */ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, trans_pcie->kw.dma >> 4); /* * Send 0 as the scd_base_addr since the device may have be reset * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will * contain garbage. */ iwl_pcie_tx_start(trans, 0); } static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ch, ret; u32 mask = 0; spin_lock_bh(&trans_pcie->irq_lock); if (!iwl_trans_grab_nic_access(trans)) goto out; /* Stop each Tx DMA channel */ for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); } /* Wait for DMA channels to be idle */ ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); if (ret < 0) IWL_ERR(trans, "Failing on timeout while stopping DMA channel %d [0x%08x]\n", ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); iwl_trans_release_nic_access(trans); out: spin_unlock_bh(&trans_pcie->irq_lock); } /* * iwl_pcie_tx_stop - Stop all Tx DMA channels */ int iwl_pcie_tx_stop(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int txq_id; /* Turn off all Tx DMA fifos */ iwl_scd_deactivate_fifos(trans); /* Turn off all Tx DMA channels */ iwl_pcie_tx_stop_fh(trans); /* * This function can be called before the op_mode disabled the * queues. This happens when we have an rfkill interrupt. * Since we stop Tx altogether - mark the queues as stopped. */ memset(trans->txqs.queue_stopped, 0, sizeof(trans->txqs.queue_stopped)); memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); /* This can happen: start_hw, stop_device */ if (!trans_pcie->txq_memory) return 0; /* Unmap DMA from host system and free skb's */ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) iwl_pcie_txq_unmap(trans, txq_id); return 0; } /* * iwl_trans_tx_free - Free TXQ Context * * Destroy all TX DMA queues and structures */ void iwl_pcie_tx_free(struct iwl_trans *trans) { int txq_id; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); /* Tx queues */ if (trans_pcie->txq_memory) { for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { iwl_pcie_txq_free(trans, txq_id); trans->txqs.txq[txq_id] = NULL; } } kfree(trans_pcie->txq_memory); trans_pcie->txq_memory = NULL; iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); } /* * iwl_pcie_tx_alloc - allocate TX context * Allocate all Tx DMA structures and initialize them */ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) { int ret; int txq_id, slots_num; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) return -EINVAL; bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); /*It is not allowed to alloc twice, so warn when this happens. * We cannot rely on the previous allocation, so free and fail */ if (WARN_ON(trans_pcie->txq_memory)) { ret = -EINVAL; goto error; } ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, bc_tbls_size); if (ret) { IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); goto error; } /* Alloc keep-warm buffer */ ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); if (ret) { IWL_ERR(trans, "Keep Warm allocation failed\n"); goto error; } trans_pcie->txq_memory = kcalloc(trans->trans_cfg->base_params->num_of_queues, sizeof(struct iwl_txq), GFP_KERNEL); if (!trans_pcie->txq_memory) { IWL_ERR(trans, "Not enough memory for txq\n"); ret = -ENOMEM; goto error; } /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); if (cmd_queue) slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, trans->cfg->min_txq_size); else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, trans->cfg->min_ba_txq_size); trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; } trans->txqs.txq[txq_id]->id = txq_id; } return 0; error: iwl_pcie_tx_free(trans); return ret; } int iwl_pcie_tx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; int txq_id, slots_num; bool alloc = false; if (!trans_pcie->txq_memory) { ret = iwl_pcie_tx_alloc(trans); if (ret) goto error; alloc = true; } spin_lock_bh(&trans_pcie->irq_lock); /* Turn off all Tx DMA fifos */ iwl_scd_deactivate_fifos(trans); /* Tell NIC where to find the "keep warm" buffer */ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, trans_pcie->kw.dma >> 4); spin_unlock_bh(&trans_pcie->irq_lock); /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); if (cmd_queue) slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, trans->cfg->min_txq_size); else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, trans->cfg->min_ba_txq_size); ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); goto error; } /* * Tell nic where to find circular buffer of TFDs for a * given Tx queue, and enable the DMA channel used for that * queue. * Circular buffer (TFD queue in DRAM) physical base address */ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), trans->txqs.txq[txq_id]->dma_addr >> 8); } iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); if (trans->trans_cfg->base_params->num_of_queues > 20) iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_ENABLE_31_QUEUES); return 0; error: /*Upon error, free only if we allocated something */ if (alloc) iwl_pcie_tx_free(trans); return ret; } static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, const struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); /* Make sure the NIC is still alive in the bus */ if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; if (!trans->trans_cfg->base_params->apmg_wake_up_wa) return 0; /* * wake up the NIC to make sure that the firmware will see the host * command - we will let the NIC sleep once all the host commands * returned. This needs to be done only on NICs that have * apmg_wake_up_wa set (see above.) */ if (!_iwl_trans_pcie_grab_nic_access(trans)) return -EIO; /* * In iwl_trans_grab_nic_access(), we've acquired the reg_lock. * There, we also returned immediately if cmd_hold_nic_awake is * already true, so it's OK to unconditionally set it to true. */ trans_pcie->cmd_hold_nic_awake = true; spin_unlock(&trans_pcie->reg_lock); return 0; } /* * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd * * When FW advances 'R' index, all entries between old and new 'R' index * need to be reclaimed. As result, some free space forms. If there is * enough free space (> low mark), wake the stack that feeds us. */ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) { struct iwl_txq *txq = trans->txqs.txq[txq_id]; int nfreed = 0; u16 r; lockdep_assert_held(&txq->lock); idx = iwl_txq_get_cmd_index(txq, idx); r = iwl_txq_get_cmd_index(txq, txq->read_ptr); if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || (!iwl_txq_used(txq, idx))) { WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", __func__, txq_id, idx, trans->trans_cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); return; } for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; r = iwl_txq_inc_wrap(trans, r)) { txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); if (nfreed++ > 0) { IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx, txq->write_ptr, r); iwl_force_nmi(trans); } } if (txq->read_ptr == txq->write_ptr) iwl_pcie_clear_cmd_in_flight(trans); iwl_txq_progress(txq); } static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, u16 txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 tbl_dw_addr; u32 tbl_dw; u16 scd_q2ratid; scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; tbl_dw_addr = trans_pcie->scd_base_addr + SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); if (txq_id & 0x1) tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); else tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); return 0; } /* Receiver address (actually, Rx station's index into station table), * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int wdg_timeout) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans->txqs.txq[txq_id]; int fifo = -1; bool scd_bug = false; if (test_and_set_bit(txq_id, trans->txqs.queue_used)) WARN_ONCE(1, "queue %d already used - expect issues", txq_id); txq->wd_timeout = msecs_to_jiffies(wdg_timeout); if (cfg) { fifo = cfg->fifo; /* Disable the scheduler prior configuring the cmd queue */ if (txq_id == trans->txqs.cmd.q_id && trans_pcie->scd_set_active) iwl_scd_enable_set_active(trans, 0); /* Stop this Tx queue before configuring it */ iwl_scd_txq_set_inactive(trans, txq_id); /* Set this queue as a chain-building queue unless it is CMD */ if (txq_id != trans->txqs.cmd.q_id) iwl_scd_txq_set_chain(trans, txq_id); if (cfg->aggregate) { u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); /* Map receiver-address / traffic-ID to this queue */ iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); /* enable aggregations for the queue */ iwl_scd_txq_enable_agg(trans, txq_id); txq->ampdu = true; } else { /* * disable aggregations for the queue, this will also * make the ra_tid mapping configuration irrelevant * since it is now a non-AGG queue. */ iwl_scd_txq_disable_agg(trans, txq_id); ssn = txq->read_ptr; } } else { /* * If we need to move the SCD write pointer by steps of * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let * the op_mode know by returning true later. * Do this only in case cfg is NULL since this trick can * be done only if we have DQA enabled which is true for mvm * only. And mvm never sets a cfg pointer. * This is really ugly, but this is the easiest way out for * this sad hardware issue. * This bug has been fixed on devices 9000 and up. */ scd_bug = !trans->trans_cfg->mq_rx_supported && !((ssn - txq->write_ptr) & 0x3f) && (ssn != txq->write_ptr); if (scd_bug) ssn++; } /* Place first TFD at index corresponding to start sequence number. * Assumes that ssn_idx is valid (!= 0xFFF) */ txq->read_ptr = (ssn & 0xff); txq->write_ptr = (ssn & 0xff); iwl_write_direct32(trans, HBUS_TARG_WRPTR, (ssn & 0xff) | (txq_id << 8)); if (cfg) { u8 frame_limit = cfg->frame_limit; iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); /* Set up Tx window size and frame limit for this queue */ iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | (1 << SCD_QUEUE_STTS_REG_POS_WSL) | SCD_QUEUE_STTS_REG_MSK); /* enable the scheduler for this queue (only) */ if (txq_id == trans->txqs.cmd.q_id && trans_pcie->scd_set_active) iwl_scd_enable_set_active(trans, BIT(txq_id)); IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n", txq_id, fifo, ssn & 0xff); } else { IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d WrPtr: %d\n", txq_id, ssn & 0xff); } return scd_bug; } void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, bool shared_mode) { struct iwl_txq *txq = trans->txqs.txq[txq_id]; txq->ampdu = !shared_mode; } void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, bool configure_scd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 stts_addr = trans_pcie->scd_base_addr + SCD_TX_STTS_QUEUE_OFFSET(txq_id); static const u32 zero_val[4] = {}; trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; trans->txqs.txq[txq_id]->frozen = false; /* * Upon HW Rfkill - we stop the device, and then stop the queues * in the op_mode. Just for the sake of the simplicity of the op_mode, * allow the op_mode to call txq_disable after it already called * stop_device. */ if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), "queue %d not used", txq_id); return; } if (configure_scd) { iwl_scd_txq_set_inactive(trans, txq_id); iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val, ARRAY_SIZE(zero_val)); } iwl_pcie_txq_unmap(trans, txq_id); trans->txqs.txq[txq_id]->ampdu = false; IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); } /*************** HOST COMMAND QUEUE FUNCTIONS *****/ /* * iwl_pcie_enqueue_hcmd - enqueue a uCode command * @priv: device private data point * @cmd: a pointer to the ucode command structure * * The function returns < 0 values to indicate the operation * failed. On success, it returns the index (>= 0) of command in the * command queue. */ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; void *dup_buf = NULL; dma_addr_t phys_addr; int idx; u16 copy_size, cmd_size, tb0_size; bool had_nocopy = false; u8 group_id = iwl_cmd_groupid(cmd->id); int i, ret; u32 cmd_pos; const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; unsigned long flags; if (WARN(!trans->wide_cmd_header && group_id > IWL_ALWAYS_LONG_GROUP, "unsupported wide command %#x\n", cmd->id)) return -EINVAL; if (group_id != 0) { copy_size = sizeof(struct iwl_cmd_header_wide); cmd_size = sizeof(struct iwl_cmd_header_wide); } else { copy_size = sizeof(struct iwl_cmd_header); cmd_size = sizeof(struct iwl_cmd_header); } /* need one for the header if the first is NOCOPY */ BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { cmddata[i] = cmd->data[i]; cmdlen[i] = cmd->len[i]; if (!cmd->len[i]) continue; /* need at least IWL_FIRST_TB_SIZE copied */ if (copy_size < IWL_FIRST_TB_SIZE) { int copy = IWL_FIRST_TB_SIZE - copy_size; if (copy > cmdlen[i]) copy = cmdlen[i]; cmdlen[i] -= copy; cmddata[i] += copy; copy_size += copy; } if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { had_nocopy = true; if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { idx = -EINVAL; goto free_dup_buf; } } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { /* * This is also a chunk that isn't copied * to the static buffer so set had_nocopy. */ had_nocopy = true; /* only allowed once */ if (WARN_ON(dup_buf)) { idx = -EINVAL; goto free_dup_buf; } dup_buf = kmemdup(cmddata[i], cmdlen[i], GFP_ATOMIC); if (!dup_buf) return -ENOMEM; } else { /* NOCOPY must not be followed by normal! */ if (WARN_ON(had_nocopy)) { idx = -EINVAL; goto free_dup_buf; } copy_size += cmdlen[i]; } cmd_size += cmd->len[i]; } /* * If any of the command structures end up being larger than * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically * allocated into separate TFDs, then we will need to * increase the size of the buffers. */ if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, "Command %s (%#x) is too large (%d bytes)\n", iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) { idx = -EINVAL; goto free_dup_buf; } spin_lock_irqsave(&txq->lock, flags); if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_irqrestore(&txq->lock, flags); IWL_ERR(trans, "No space in command queue\n"); iwl_op_mode_cmd_queue_full(trans->op_mode); idx = -ENOSPC; goto free_dup_buf; } idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; /* set up the header */ if (group_id != 0) { out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); out_cmd->hdr_wide.group_id = group_id; out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); out_cmd->hdr_wide.length = cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.sequence = cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | INDEX_TO_SEQ(txq->write_ptr)); cmd_pos = sizeof(struct iwl_cmd_header_wide); copy_size = sizeof(struct iwl_cmd_header_wide); } else { out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | INDEX_TO_SEQ(txq->write_ptr)); out_cmd->hdr.group_id = 0; cmd_pos = sizeof(struct iwl_cmd_header); copy_size = sizeof(struct iwl_cmd_header); } /* and copy the data that needs to be copied */ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { int copy; if (!cmd->len[i]) continue; /* copy everything if not nocopy/dup */ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | IWL_HCMD_DFL_DUP))) { copy = cmd->len[i]; memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); cmd_pos += copy; copy_size += copy; continue; } /* * Otherwise we need at least IWL_FIRST_TB_SIZE copied * in total (for bi-directional DMA), but copy up to what * we can fit into the payload for debug dump purposes. */ copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); cmd_pos += copy; /* However, treat copy_size the proper way, we need it below */ if (copy_size < IWL_FIRST_TB_SIZE) { copy = IWL_FIRST_TB_SIZE - copy_size; if (copy > cmd->len[i]) copy = cmd->len[i]; copy_size += copy; } } IWL_DEBUG_HC(trans, "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", iwl_get_cmd_string(trans, cmd->id), group_id, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); iwl_pcie_txq_build_tfd(trans, txq, iwl_txq_get_first_tb_dma(txq, idx), tb0_size, true); /* map first command fragment, if any remains */ if (copy_size > tb0_size) { phys_addr = dma_map_single(trans->dev, ((u8 *)&out_cmd->hdr) + tb0_size, copy_size - tb0_size, DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); idx = -ENOMEM; goto out; } iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size - tb0_size, false); } /* map the remaining (adjusted) nocopy/dup fragments */ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { void *data = (void *)(uintptr_t)cmddata[i]; if (!cmdlen[i]) continue; if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | IWL_HCMD_DFL_DUP))) continue; if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) data = dup_buf; phys_addr = dma_map_single(trans->dev, data, cmdlen[i], DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); idx = -ENOMEM; goto out; } iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); } BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); out_meta->flags = cmd->flags; if (WARN_ON_ONCE(txq->entries[idx].free_buf)) kfree_sensitive(txq->entries[idx].free_buf); txq->entries[idx].free_buf = dup_buf; trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); /* start timer if queue currently empty */ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); ret = iwl_pcie_set_cmd_in_flight(trans, cmd); if (ret < 0) { idx = ret; goto out; } /* Increment and update queue's write index */ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); iwl_pcie_txq_inc_wr_ptr(trans, txq); out: spin_unlock_irqrestore(&txq->lock, flags); free_dup_buf: if (idx < 0) kfree(dup_buf); return idx; } /* * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them * @rxb: Rx buffer to reclaim */ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); u8 group_id; u32 cmd_id; int txq_id = SEQ_TO_QUEUE(sequence); int index = SEQ_TO_INDEX(sequence); int cmd_index; struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced * in the queue management code. */ if (WARN(txq_id != trans->txqs.cmd.q_id, "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, txq->write_ptr)) { iwl_print_hex_error(trans, pkt, 32); return; } spin_lock_bh(&txq->lock); cmd_index = iwl_txq_get_cmd_index(txq, index); cmd = txq->entries[cmd_index].cmd; meta = &txq->entries[cmd_index].meta; group_id = cmd->hdr.group_id; cmd_id = WIDE_ID(group_id, cmd->hdr.cmd); iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); /* Input error checking is done when commands are added to queue. */ if (meta->flags & CMD_WANT_SKB) { struct page *p = rxb_steal_page(rxb); meta->source->resp_pkt = pkt; #if defined(__linux__) meta->source->_rx_page_addr = (unsigned long)page_address(p); #elif defined(__FreeBSD__) meta->source->_page = p; #endif meta->source->_rx_page_order = trans_pcie->rx_page_order; } if (meta->flags & CMD_WANT_ASYNC_CALLBACK) iwl_op_mode_async_cb(trans->op_mode, cmd); iwl_pcie_cmdq_reclaim(trans, txq_id, index); if (!(meta->flags & CMD_ASYNC)) { if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { IWL_WARN(trans, "HCMD_ACTIVE already clear for command %s\n", iwl_get_cmd_string(trans, cmd_id)); } clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", iwl_get_cmd_string(trans, cmd_id)); wake_up(&trans->wait_command_queue); } meta->flags = 0; spin_unlock_bh(&txq->lock); } static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta) { u16 head_tb_len; int i; /* * Set up TFD's third entry to point directly to remainder * of skb's head, if any */ head_tb_len = skb_headlen(skb) - hdr_len; if (head_tb_len > 0) { dma_addr_t tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, head_tb_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) return -EINVAL; trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, tb_phys, head_tb_len); iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); } /* set up the remaining entries to point to the data */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr_t tb_phys; int tb_idx; if (!skb_frag_size(frag)) continue; tb_phys = skb_frag_dma_map(trans->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) return -EINVAL; trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), tb_phys, skb_frag_size(frag)); tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, skb_frag_size(frag), false); if (tb_idx < 0) return tb_idx; out_meta->tbs |= BIT(tb_idx); } return 0; } #ifdef CONFIG_INET static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, struct iwl_device_tx_cmd *dev_cmd, u16 tb1_len) { struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; u16 length, iv_len, amsdu_pad; u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; struct tso_t tso; /* if the packet is protected, then it must be CCMP or GCMP */ BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); iv_len = ieee80211_has_protected(hdr->frame_control) ? IEEE80211_CCMP_HDR_LEN : 0; trace_iwlwifi_dev_tx(trans->dev, skb, iwl_txq_get_tfd(trans, txq, txq->write_ptr), trans->txqs.tfd.size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; amsdu_pad = 0; /* total amount of header we may need for this A-MSDU */ hdr_room = DIV_ROUND_UP(total_len, mss) * (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; /* Our device supports 9 segments at most, it will fit in 1 page */ hdr_page = get_page_hdr(trans, hdr_room, skb); if (!hdr_page) return -ENOMEM; start_hdr = hdr_page->pos; memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); hdr_page->pos += iv_len; /* * Pull the ieee80211 header + IV to be able to use TSO core, * we will restore it for the tx_status flow. */ skb_pull(skb, hdr_len + iv_len); /* * Remove the length of all the headers that we don't actually * have in the MPDU by themselves, but that we duplicate into * all the different MSDUs inside the A-MSDU. */ le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); tso_start(skb, &tso); while (total_len) { /* this is the data left for this subframe */ unsigned int data_left = min_t(unsigned int, mss, total_len); unsigned int hdr_tb_len; dma_addr_t hdr_tb_phys; u8 *subf_hdrs_start = hdr_page->pos; total_len -= data_left; memset(hdr_page->pos, 0, amsdu_pad); hdr_page->pos += amsdu_pad; amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + data_left)) & 0x3; ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); hdr_page->pos += ETH_ALEN; ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); hdr_page->pos += ETH_ALEN; length = snap_ip_tcp_hdrlen + data_left; *((__be16 *)hdr_page->pos) = cpu_to_be16(length); hdr_page->pos += sizeof(length); /* * This will copy the SNAP as well which will be considered * as MAC header. */ tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); hdr_page->pos += snap_ip_tcp_hdrlen; hdr_tb_len = hdr_page->pos - start_hdr; hdr_tb_phys = dma_map_single(trans->dev, start_hdr, hdr_tb_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) return -EINVAL; iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, hdr_tb_len, false); trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, hdr_tb_phys, hdr_tb_len); /* add this subframe's headers' length to the tx_cmd */ le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); /* prepare the start_hdr for the next subframe */ start_hdr = hdr_page->pos; /* put the payload */ while (data_left) { unsigned int size = min_t(unsigned int, tso.size, data_left); dma_addr_t tb_phys; tb_phys = dma_map_single(trans->dev, tso.data, size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) return -EINVAL; iwl_pcie_txq_build_tfd(trans, txq, tb_phys, size, false); trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, tb_phys, size); data_left -= size; tso_build_data(skb, &tso, size); } } /* re -add the WiFi header and IV */ skb_push(skb, hdr_len + iv_len); return 0; } #else /* CONFIG_INET */ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, struct iwl_device_tx_cmd *dev_cmd, u16 tb1_len) { /* No A-MSDU without CONFIG_INET */ WARN_ON(1); return -1; } #endif /* CONFIG_INET */ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int txq_id) { struct ieee80211_hdr *hdr; struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; struct iwl_txq *txq; dma_addr_t tb0_phys, tb1_phys, scratch_phys; void *tb1_addr; void *tfd; u16 len, tb1_len; bool wait_write_ptr; __le16 fc; u8 hdr_len; u16 wifi_seq; bool amsdu; txq = trans->txqs.txq[txq_id]; if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), "TX on unused queue %d\n", txq_id)) return -EINVAL; if (skb_is_nonlinear(skb) && skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && __skb_linearize(skb)) return -ENOMEM; /* mac80211 always puts the full header into the SKB's head, * so there's no need to check if it's readable there */ hdr = (struct ieee80211_hdr *)skb->data; fc = hdr->frame_control; hdr_len = ieee80211_hdrlen(fc); spin_lock(&txq->lock); if (iwl_txq_space(trans, txq) < txq->high_mark) { iwl_txq_stop(trans, txq); /* don't put the packet on the ring, if there is no room */ if (unlikely(iwl_txq_space(trans, txq) < 3)) { struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + trans->txqs.dev_cmd_offs); *dev_cmd_ptr = dev_cmd; __skb_queue_tail(&txq->overflow_q, skb); spin_unlock(&txq->lock); return 0; } } /* In AGG mode, the index in the ring must correspond to the WiFi * sequence number. This is a HW requirements to help the SCD to parse * the BA. * Check here that the packets are in the right place on the ring. */ wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); WARN_ONCE(txq->ampdu && (wifi_seq & 0xff) != txq->write_ptr, "Q: %d WiFi Seq %d tfdNum %d", txq_id, wifi_seq, txq->write_ptr); /* Set up driver data for this TFD */ txq->entries[txq->write_ptr].skb = skb; txq->entries[txq->write_ptr].cmd = dev_cmd; dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | INDEX_TO_SEQ(txq->write_ptr))); tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + offsetof(struct iwl_tx_cmd, scratch); tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[txq->write_ptr].meta; out_meta->flags = 0; /* * The second TB (tb1) points to the remainder of the TX command * and the 802.11 header - dword aligned size * (This calculation modifies the TX command, so do it before the * setup of the first TB) */ len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + hdr_len - IWL_FIRST_TB_SIZE; /* do not align A-MSDU to dword as the subframe header aligns it */ amsdu = ieee80211_is_data_qos(fc) && (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT); if (!amsdu) { tb1_len = ALIGN(len, 4); /* Tell NIC about any 2-byte padding after MAC header */ if (tb1_len != len) tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); } else { tb1_len = len; } /* * The first TB points to bi-directional DMA data, we'll * memcpy the data into it later. */ iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, IWL_FIRST_TB_SIZE, true); /* there must be data left over for TB1 or this code must be changed */ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + + offsetofend(struct iwl_tx_cmd, scratch) > + IWL_FIRST_TB_SIZE); /* map the data for TB1 */ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) goto out_err; iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); trace_iwlwifi_dev_tx(trans->dev, skb, iwl_txq_get_tfd(trans, txq, txq->write_ptr), trans->txqs.tfd.size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, hdr_len); /* * If gso_size wasn't set, don't give the frame "amsdu treatment" * (adding subframes, etc.). * This can happen in some testing flows when the amsdu was already * pre-built, and we just need to send the resulting skb. */ if (amsdu && skb_shinfo(skb)->gso_size) { if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, out_meta, dev_cmd, tb1_len))) goto out_err; } else { struct sk_buff *frag; if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, out_meta))) goto out_err; skb_walk_frags(skb, frag) { if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, out_meta))) goto out_err; } } /* building the A-MSDU might have changed this data, so memcpy it now */ memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); /* Set up entry for this TFD in Tx byte-count array */ iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), iwl_txq_gen1_tfd_get_num_tbs(trans, tfd)); wait_write_ptr = ieee80211_has_morefrags(fc); /* start timer if queue currently empty */ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { /* * If the TXQ is active, then set the timer, if not, * set the timer in remainder so that the timer will * be armed with the right value when the station will * wake up. */ if (!txq->frozen) mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); else txq->frozen_expiry_remainder = txq->wd_timeout; } /* Tell device the write index *just past* this latest filled TFD */ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); if (!wait_write_ptr) iwl_pcie_txq_inc_wr_ptr(trans, txq); /* * At this point the frame is "transmitted" successfully * and we will get a TX status notification eventually. */ spin_unlock(&txq->lock); return 0; out_err: iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); spin_unlock(&txq->lock); return -1; } diff --git a/sys/contrib/dev/iwlwifi/queue/tx.c b/sys/contrib/dev/iwlwifi/queue/tx.c index eb290a4fd06a..9074c69c9f98 100644 --- a/sys/contrib/dev/iwlwifi/queue/tx.c +++ b/sys/contrib/dev/iwlwifi/queue/tx.c @@ -1,1889 +1,1903 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation */ #ifdef CONFIG_INET #include #endif #include #include "iwl-debug.h" #include "iwl-io.h" #include "fw/api/commands.h" #include "fw/api/tx.h" #include "fw/api/datapath.h" #include "queue/tx.h" #include "iwl-fh.h" #include "iwl-scd.h" #include #if defined(__FreeBSD__) #include #endif /* * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array */ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, struct iwl_txq *txq, u16 byte_cnt, int num_tbs) { int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); u8 filled_tfd_size, num_fetch_chunks; u16 len = byte_cnt; __le16 bc_ent; if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) return; filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + num_tbs * sizeof(struct iwl_tfh_tb); /* * filled_tfd_size contains the number of filled bytes in the TFD. * Dividing it by 64 will give the number of chunks to fetch * to SRAM- 0 for one chunk, 1 for 2 and so on. * If, for example, TFD contains only 3 TBs then 32 bytes * of the TFD are used, and only one chunk of 64 bytes should * be fetched */ num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr; /* Starting from AX210, the HW expects bytes */ WARN_ON(trans->txqs.bc_table_dword); WARN_ON(len > 0x3FFF); bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); scd_bc_tbl_gen3[idx].tfd_offset = bc_ent; } else { struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; /* Before AX210, the HW expects DW */ WARN_ON(!trans->txqs.bc_table_dword); len = DIV_ROUND_UP(len, 4); WARN_ON(len > 0xFFF); bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); scd_bc_tbl->tfd_offset[idx] = bc_ent; } } /* * iwl_txq_inc_wr_ptr - Send new write index to hardware */ void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) { lockdep_assert_held(&txq->lock); IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); /* * if not in power-save mode, uCode will never sleep when we're * trying to tx (during RFKILL, we're not trying to tx). */ iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); } static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd) { return le16_to_cpu(tfd->num_tbs) & 0x1f; } void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, struct iwl_tfh_tfd *tfd) { int i, num_tbs; /* Sanity check on number of chunks */ num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd); if (num_tbs > trans->txqs.tfd.max_tbs) { IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); return; } /* first TB is never freed - it's the bidirectional DMA data */ for (i = 1; i < num_tbs; i++) { if (meta->tbs & BIT(i)) dma_unmap_page(trans->dev, le64_to_cpu(tfd->tbs[i].addr), le16_to_cpu(tfd->tbs[i].tb_len), DMA_TO_DEVICE); else dma_unmap_single(trans->dev, le64_to_cpu(tfd->tbs[i].addr), le16_to_cpu(tfd->tbs[i].tb_len), DMA_TO_DEVICE); } tfd->num_tbs = 0; } void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) { /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and * idx is bounded by n_window */ int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); struct sk_buff *skb; lockdep_assert_held(&txq->lock); if (!txq->entries) return; iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, iwl_txq_get_tfd(trans, txq, idx)); skb = txq->entries[idx].skb; /* Can be called from irqs-disabled context * If skb is not NULL, it means that the whole queue is being * freed and that the queue is not empty - free the skb */ if (skb) { iwl_op_mode_free_skb(trans->op_mode, skb); txq->entries[idx].skb = NULL; } } int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, dma_addr_t addr, u16 len) { int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); struct iwl_tfh_tb *tb; /* * Only WARN here so we know about the issue, but we mess up our * unmap path because not every place currently checks for errors * returned from this function - it can only return an error if * there's no more space, and so when we know there is enough we * don't always check ... */ WARN(iwl_txq_crosses_4g_boundary(addr, len), "possible DMA problem with iova:0x%llx, len:%d\n", (unsigned long long)addr, len); if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) return -EINVAL; tb = &tfd->tbs[idx]; /* Each TFD can point to a maximum max_tbs Tx buffers */ if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { IWL_ERR(trans, "Error can not send more than %d chunks\n", trans->txqs.tfd.max_tbs); return -EINVAL; } put_unaligned_le64(addr, &tb->addr); tb->tb_len = cpu_to_le16(len); tfd->num_tbs = cpu_to_le16(idx + 1); return idx; } static struct page *get_workaround_page(struct iwl_trans *trans, struct sk_buff *skb) { struct page **page_ptr; struct page *ret; page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); ret = alloc_page(GFP_ATOMIC); if (!ret) return NULL; /* set the chaining pointer to the previous page if there */ *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; *page_ptr = ret; return ret; } /* * Add a TB and if needed apply the FH HW bug workaround; * meta != NULL indicates that it's a page mapping and we * need to dma_unmap_page() and set the meta->tbs bit in * this case. */ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tfh_tfd *tfd, dma_addr_t phys, void *virt, u16 len, struct iwl_cmd_meta *meta) { dma_addr_t oldphys = phys; struct page *page; int ret; if (unlikely(dma_mapping_error(trans->dev, phys))) return -ENOMEM; if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) { ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); if (ret < 0) goto unmap; if (meta) meta->tbs |= BIT(ret); ret = 0; goto trace; } /* * Work around a hardware bug. If (as expressed in the * condition above) the TB ends on a 32-bit boundary, * then the next TB may be accessed with the wrong * address. * To work around it, copy the data elsewhere and make * a new mapping for it so the device will not fail. */ if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { ret = -ENOBUFS; goto unmap; } page = get_workaround_page(trans, skb); if (!page) { ret = -ENOMEM; goto unmap; } memcpy(page_address(page), virt, len); phys = dma_map_single(trans->dev, page_address(page), len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, phys))) return -ENOMEM; ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); if (ret < 0) { /* unmap the new allocation as single */ oldphys = phys; meta = NULL; goto unmap; } IWL_WARN(trans, "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", len, (unsigned long long)oldphys, (unsigned long long)phys); ret = 0; unmap: if (meta) dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); else dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); trace: trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); return ret; } #ifdef CONFIG_INET struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, struct sk_buff *skb) { struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page); struct page **page_ptr; page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); if (WARN_ON(*page_ptr)) return NULL; if (!p->page) goto alloc; /* * Check if there's enough room on this page * * Note that we put a page chaining pointer *last* in the * page - we need it somewhere, and if it's there then we * avoid DMA mapping the last bits of the page which may * trigger the 32-bit boundary hardware bug. * * (see also get_workaround_page() in tx-gen2.c) */ if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) goto out; /* We don't have enough room on this page, get a new one. */ __free_page(p->page); alloc: p->page = alloc_page(GFP_ATOMIC); if (!p->page) return NULL; p->pos = page_address(p->page); /* set the chaining pointer to NULL */ *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; out: *page_ptr = p->page; get_page(p->page); return p; } #endif static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tfh_tfd *tfd, int start_len, u8 hdr_len, struct iwl_device_tx_cmd *dev_cmd) { #ifdef CONFIG_INET struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; u16 length, amsdu_pad; u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; struct tso_t tso; trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, start_len, 0); ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; amsdu_pad = 0; /* total amount of header we may need for this A-MSDU */ hdr_room = DIV_ROUND_UP(total_len, mss) * (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); /* Our device supports 9 segments at most, it will fit in 1 page */ hdr_page = get_page_hdr(trans, hdr_room, skb); if (!hdr_page) return -ENOMEM; start_hdr = hdr_page->pos; /* * Pull the ieee80211 header to be able to use TSO core, * we will restore it for the tx_status flow. */ skb_pull(skb, hdr_len); /* * Remove the length of all the headers that we don't actually * have in the MPDU by themselves, but that we duplicate into * all the different MSDUs inside the A-MSDU. */ le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); tso_start(skb, &tso); while (total_len) { /* this is the data left for this subframe */ unsigned int data_left = min_t(unsigned int, mss, total_len); unsigned int tb_len; dma_addr_t tb_phys; u8 *subf_hdrs_start = hdr_page->pos; total_len -= data_left; memset(hdr_page->pos, 0, amsdu_pad); hdr_page->pos += amsdu_pad; amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + data_left)) & 0x3; ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); hdr_page->pos += ETH_ALEN; ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); hdr_page->pos += ETH_ALEN; length = snap_ip_tcp_hdrlen + data_left; *((__be16 *)hdr_page->pos) = cpu_to_be16(length); hdr_page->pos += sizeof(length); /* * This will copy the SNAP as well which will be considered * as MAC header. */ tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); hdr_page->pos += snap_ip_tcp_hdrlen; tb_len = hdr_page->pos - start_hdr; tb_phys = dma_map_single(trans->dev, start_hdr, tb_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; /* * No need for _with_wa, this is from the TSO page and * we leave some space at the end of it so can't hit * the buggy scenario. */ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len); trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_phys, tb_len); /* add this subframe's headers' length to the tx_cmd */ le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); /* prepare the start_hdr for the next subframe */ start_hdr = hdr_page->pos; /* put the payload */ while (data_left) { int ret; tb_len = min_t(unsigned int, tso.size, data_left); tb_phys = dma_map_single(trans->dev, tso.data, tb_len, DMA_TO_DEVICE); ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, tso.data, tb_len, NULL); if (ret) goto out_err; data_left -= tb_len; tso_build_data(skb, &tso, tb_len); } } /* re -add the WiFi header */ skb_push(skb, hdr_len); return 0; out_err: #endif return -EINVAL; } static struct iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, struct iwl_txq *txq, struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, int hdr_len, int tx_cmd_len) { int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); dma_addr_t tb_phys; int len; void *tb1_addr; tb_phys = iwl_txq_get_first_tb_dma(txq, idx); /* * No need for _with_wa, the first TB allocation is aligned up * to a 64-byte boundary and thus can't be at the end or cross * a page boundary (much less a 2^32 boundary). */ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); /* * The second TB (tb1) points to the remainder of the TX command * and the 802.11 header - dword aligned size * (This calculation modifies the TX command, so do it before the * setup of the first TB) */ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - IWL_FIRST_TB_SIZE; /* do not align A-MSDU to dword as the subframe header aligns it */ /* map the data for TB1 */ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; /* * No need for _with_wa(), we ensure (via alignment) that the data * here can never cross or end at a page boundary. */ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE, hdr_len, dev_cmd)) goto out_err; /* building the A-MSDU might have changed this data, memcpy it now */ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); return tfd; out_err: iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); return NULL; } static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tfh_tfd *tfd, struct iwl_cmd_meta *out_meta) { int i; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr_t tb_phys; unsigned int fragsz = skb_frag_size(frag); int ret; if (!fragsz) continue; tb_phys = skb_frag_dma_map(trans->dev, frag, 0, fragsz, DMA_TO_DEVICE); ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, skb_frag_address(frag), fragsz, out_meta); if (ret) return ret; } return 0; } static struct iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, struct iwl_txq *txq, struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, int hdr_len, int tx_cmd_len, bool pad) { int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); dma_addr_t tb_phys; int len, tb1_len, tb2_len; void *tb1_addr; struct sk_buff *frag; tb_phys = iwl_txq_get_first_tb_dma(txq, idx); /* The first TB points to bi-directional DMA data */ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); /* * No need for _with_wa, the first TB allocation is aligned up * to a 64-byte boundary and thus can't be at the end or cross * a page boundary (much less a 2^32 boundary). */ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); /* * The second TB (tb1) points to the remainder of the TX command * and the 802.11 header - dword aligned size * (This calculation modifies the TX command, so do it before the * setup of the first TB) */ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - IWL_FIRST_TB_SIZE; if (pad) tb1_len = ALIGN(len, 4); else tb1_len = len; /* map the data for TB1 */ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; /* * No need for _with_wa(), we ensure (via alignment) that the data * here can never cross or end at a page boundary. */ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len); trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, hdr_len); /* set up TFD's third entry to point to remainder of skb's head */ tb2_len = skb_headlen(skb) - hdr_len; if (tb2_len > 0) { int ret; tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, tb2_len, DMA_TO_DEVICE); ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, skb->data + hdr_len, tb2_len, NULL); if (ret) goto out_err; } if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta)) goto out_err; skb_walk_frags(skb, frag) { int ret; tb_phys = dma_map_single(trans->dev, frag->data, skb_headlen(frag), DMA_TO_DEVICE); ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, frag->data, skb_headlen(frag), NULL); if (ret) goto out_err; if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) goto out_err; } return tfd; out_err: iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); return NULL; } static struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); int len, hdr_len; bool amsdu; /* There must be data left over for TB1 or this code must be changed */ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + + offsetofend(struct iwl_tx_cmd_gen2, dram_info) > + IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + + offsetofend(struct iwl_tx_cmd_gen3, dram_info) > + IWL_FIRST_TB_SIZE); memset(tfd, 0, sizeof(*tfd)); if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) len = sizeof(struct iwl_tx_cmd_gen2); else len = sizeof(struct iwl_tx_cmd_gen3); amsdu = ieee80211_is_data_qos(hdr->frame_control) && (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT); hdr_len = ieee80211_hdrlen(hdr->frame_control); /* * Only build A-MSDUs here if doing so by GSO, otherwise it may be * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been * built in the higher layers already. */ if (amsdu && skb_shinfo(skb)->gso_size) return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, out_meta, hdr_len, len); return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, hdr_len, len, !amsdu); } int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) { unsigned int max; unsigned int used; /* * To avoid ambiguity between empty and completely full queues, there * should always be less than max_tfd_queue_size elements in the queue. * If q->n_window is smaller than max_tfd_queue_size, there is no need * to reserve any queue entries for this purpose. */ if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) max = q->n_window; else max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; /* * max_tfd_queue_size is a power of 2, so the following is equivalent to * modulo by max_tfd_queue_size and is well defined. */ used = (q->write_ptr - q->read_ptr) & (trans->trans_cfg->base_params->max_tfd_queue_size - 1); if (WARN_ON(used > max)) return 0; return max - used; } int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int txq_id) { struct iwl_cmd_meta *out_meta; struct iwl_txq *txq = trans->txqs.txq[txq_id]; u16 cmd_len; int idx; void *tfd; if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, "queue %d out of range", txq_id)) return -EINVAL; if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), "TX on unused queue %d\n", txq_id)) return -EINVAL; if (skb_is_nonlinear(skb) && skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && __skb_linearize(skb)) return -ENOMEM; spin_lock(&txq->lock); if (iwl_txq_space(trans, txq) < txq->high_mark) { iwl_txq_stop(trans, txq); /* don't put the packet on the ring, if there is no room */ if (unlikely(iwl_txq_space(trans, txq) < 3)) { struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + trans->txqs.dev_cmd_offs); *dev_cmd_ptr = dev_cmd; __skb_queue_tail(&txq->overflow_q, skb); spin_unlock(&txq->lock); return 0; } } idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); /* Set up driver data for this TFD */ txq->entries[idx].skb = skb; txq->entries[idx].cmd = dev_cmd; dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | INDEX_TO_SEQ(idx))); /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[idx].meta; out_meta->flags = 0; tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); if (!tfd) { spin_unlock(&txq->lock); return -1; } if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = (void *)dev_cmd->payload; cmd_len = le16_to_cpu(tx_cmd_gen3->len); } else { struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = (void *)dev_cmd->payload; cmd_len = le16_to_cpu(tx_cmd_gen2->len); } /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, iwl_txq_gen2_get_num_tbs(trans, tfd)); /* start timer if queue currently empty */ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); /* Tell device the write index *just past* this latest filled TFD */ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); iwl_txq_inc_wr_ptr(trans, txq); /* * At this point the frame is "transmitted" successfully * and we will get a TX status notification eventually. */ spin_unlock(&txq->lock); return 0; } /*************** HOST COMMAND QUEUE FUNCTIONS *****/ /* * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's */ void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) { struct iwl_txq *txq = trans->txqs.txq[txq_id]; spin_lock_bh(&txq->lock); while (txq->write_ptr != txq->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); if (txq_id != trans->txqs.cmd.q_id) { int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); struct sk_buff *skb = txq->entries[idx].skb; if (!WARN_ON_ONCE(!skb)) iwl_txq_free_tso_page(trans, skb); } iwl_txq_gen2_free_tfd(trans, txq); txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); } while (!skb_queue_empty(&txq->overflow_q)) { struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); iwl_op_mode_free_skb(trans->op_mode, skb); } spin_unlock_bh(&txq->lock); /* just in case - this queue may have been stopped */ iwl_wake_queue(trans, txq); } static void iwl_txq_gen2_free_memory(struct iwl_trans *trans, struct iwl_txq *txq) { struct device *dev = trans->dev; /* De-alloc circular buffer of TFDs */ if (txq->tfds) { dma_free_coherent(dev, trans->txqs.tfd.size * txq->n_window, txq->tfds, txq->dma_addr); dma_free_coherent(dev, sizeof(*txq->first_tb_bufs) * txq->n_window, txq->first_tb_bufs, txq->first_tb_dma); } kfree(txq->entries); if (txq->bc_tbl.addr) dma_pool_free(trans->txqs.bc_pool, txq->bc_tbl.addr, txq->bc_tbl.dma); kfree(txq); } /* * iwl_pcie_txq_free - Deallocate DMA queue. * @txq: Transmit queue to deallocate. * * Empty queue by removing and destroying all BD's. * Free all buffers. * 0-fill, but do not free "txq" descriptor structure. */ static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) { struct iwl_txq *txq; int i; if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, "queue %d out of range", txq_id)) return; txq = trans->txqs.txq[txq_id]; if (WARN_ON(!txq)) return; iwl_txq_gen2_unmap(trans, txq_id); /* De-alloc array of command/tx buffers */ if (txq_id == trans->txqs.cmd.q_id) for (i = 0; i < txq->n_window; i++) { kfree_sensitive(txq->entries[i].cmd); kfree_sensitive(txq->entries[i].free_buf); } del_timer_sync(&txq->stuck_timer); iwl_txq_gen2_free_memory(trans, txq); trans->txqs.txq[txq_id] = NULL; clear_bit(txq_id, trans->txqs.queue_used); } /* * iwl_queue_init - Initialize queue's high/low-water and read/write indexes */ static int iwl_queue_init(struct iwl_txq *q, int slots_num) { q->n_window = slots_num; /* slots_num must be power-of-two size, otherwise * iwl_txq_get_cmd_index is broken. */ if (WARN_ON(!is_power_of_2(slots_num))) return -EINVAL; q->low_mark = q->n_window / 4; if (q->low_mark < 4) q->low_mark = 4; q->high_mark = q->n_window / 8; if (q->high_mark < 2) q->high_mark = 2; q->write_ptr = 0; q->read_ptr = 0; return 0; } int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { int ret; u32 tfd_queue_max_size = trans->trans_cfg->base_params->max_tfd_queue_size; txq->need_update = false; /* max_tfd_queue_size must be power-of-two size, otherwise * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */ if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), "Max tfd queue size must be a power of two, but is %d", tfd_queue_max_size)) return -EINVAL; /* Initialize queue's high/low-water marks, and head/tail indexes */ ret = iwl_queue_init(txq, slots_num); if (ret) return ret; spin_lock_init(&txq->lock); #ifdef CONFIG_LOCKDEP if (cmd_queue) { static struct lock_class_key iwl_txq_cmd_queue_lock_class; lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); } #endif __skb_queue_head_init(&txq->overflow_q); return 0; } void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) { struct page **page_ptr; struct page *next; page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); next = *page_ptr; *page_ptr = NULL; while (next) { struct page *tmp = next; next = *(void **)((u8 *)page_address(next) + PAGE_SIZE - sizeof(void *)); __free_page(tmp); } } void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) { u32 txq_id = txq->id; u32 status; bool active; u8 fifo; - if (trans->trans_cfg->use_tfh) { + if (trans->trans_cfg->gen2) { IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, txq->read_ptr, txq->write_ptr); #if defined(__FreeBSD__) /* * Dump some more queue and timer information to rule * out a LinuxKPI issues and gather some extra data. */ IWL_ERR(trans, " need_update %d frozen %d ampdu %d " "now %ju stuck_timer.expires %ju " "frozen_expiry_remainder %ju wd_timeout %ju\n", txq->need_update, txq->frozen, txq->ampdu, (uintmax_t)jiffies, (uintmax_t)txq->stuck_timer.expires, (uintmax_t)txq->frozen_expiry_remainder, (uintmax_t)txq->wd_timeout); #endif /* TODO: access new SCD registers and dump them */ return; } status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); IWL_ERR(trans, "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", txq_id, active ? "" : "in", fifo, jiffies_to_msecs(txq->wd_timeout), txq->read_ptr, txq->write_ptr, iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (trans->trans_cfg->base_params->max_tfd_queue_size - 1), iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & (trans->trans_cfg->base_params->max_tfd_queue_size - 1), iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); } static void iwl_txq_stuck_timer(struct timer_list *t) { struct iwl_txq *txq = from_timer(txq, t, stuck_timer); struct iwl_trans *trans = txq->trans; spin_lock(&txq->lock); /* check if triggered erroneously */ if (txq->read_ptr == txq->write_ptr) { spin_unlock(&txq->lock); return; } spin_unlock(&txq->lock); iwl_txq_log_scd_error(trans, txq); iwl_force_nmi(trans); } int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { size_t tfd_sz = trans->txqs.tfd.size * trans->trans_cfg->base_params->max_tfd_queue_size; size_t tb0_buf_sz; int i; + if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num)) + return -EINVAL; + if (WARN_ON(txq->entries || txq->tfds)) return -EINVAL; - if (trans->trans_cfg->use_tfh) + if (trans->trans_cfg->gen2) tfd_sz = trans->txqs.tfd.size * slots_num; timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); txq->trans = trans; txq->n_window = slots_num; txq->entries = kcalloc(slots_num, sizeof(struct iwl_pcie_txq_entry), GFP_KERNEL); if (!txq->entries) goto error; if (cmd_queue) for (i = 0; i < slots_num; i++) { txq->entries[i].cmd = kmalloc(sizeof(struct iwl_device_cmd), GFP_KERNEL); if (!txq->entries[i].cmd) goto error; } /* Circular buffer of transmit frame descriptors (TFDs), * shared with device */ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, &txq->dma_addr, GFP_KERNEL); if (!txq->tfds) goto error; BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, &txq->first_tb_dma, GFP_KERNEL); if (!txq->first_tb_bufs) goto err_free_tfds; return 0; err_free_tfds: dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); txq->tfds = NULL; error: if (txq->entries && cmd_queue) for (i = 0; i < slots_num; i++) kfree(txq->entries[i].cmd); kfree(txq->entries); txq->entries = NULL; return -ENOMEM; } static struct iwl_txq * iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout) { size_t bc_tbl_size, bc_tbl_entries; struct iwl_txq *txq; int ret; WARN_ON(!trans->txqs.bc_tbl_size); bc_tbl_size = trans->txqs.bc_tbl_size; bc_tbl_entries = bc_tbl_size / sizeof(u16); if (WARN_ON(size > bc_tbl_entries)) return ERR_PTR(-EINVAL); txq = kzalloc(sizeof(*txq), GFP_KERNEL); if (!txq) return ERR_PTR(-ENOMEM); txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, &txq->bc_tbl.dma); if (!txq->bc_tbl.addr) { IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); kfree(txq); return ERR_PTR(-ENOMEM); } ret = iwl_txq_alloc(trans, txq, size, false); if (ret) { IWL_ERR(trans, "Tx queue alloc failed\n"); goto error; } ret = iwl_txq_init(trans, txq, size, false); if (ret) { IWL_ERR(trans, "Tx queue init failed\n"); goto error; } txq->wd_timeout = msecs_to_jiffies(timeout); return txq; error: iwl_txq_gen2_free_memory(trans, txq); return ERR_PTR(ret); } static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, struct iwl_host_cmd *hcmd) { struct iwl_tx_queue_cfg_rsp *rsp; int ret, qid; u32 wr_ptr; if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != sizeof(*rsp))) { ret = -EINVAL; goto error_free_resp; } rsp = (void *)hcmd->resp_pkt->data; qid = le16_to_cpu(rsp->queue_number); wr_ptr = le16_to_cpu(rsp->write_pointer); if (qid >= ARRAY_SIZE(trans->txqs.txq)) { WARN_ONCE(1, "queue index %d unsupported", qid); ret = -EIO; goto error_free_resp; } if (test_and_set_bit(qid, trans->txqs.queue_used)) { WARN_ONCE(1, "queue %d already used", qid); ret = -EIO; goto error_free_resp; } if (WARN_ONCE(trans->txqs.txq[qid], "queue %d already allocated\n", qid)) { ret = -EIO; goto error_free_resp; } txq->id = qid; trans->txqs.txq[qid] = txq; wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); /* Place first TFD at index corresponding to start sequence number */ txq->read_ptr = wr_ptr; txq->write_ptr = wr_ptr; IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); iwl_free_resp(hcmd); return qid; error_free_resp: iwl_free_resp(hcmd); iwl_txq_gen2_free_memory(trans, txq); return ret; } int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, u8 tid, int size, unsigned int timeout) { struct iwl_txq *txq; union { struct iwl_tx_queue_cfg_cmd old; struct iwl_scd_queue_cfg_cmd new; } cmd; struct iwl_host_cmd hcmd = { .flags = CMD_WANT_SKB, }; int ret; if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ && trans->hw_rev_step == SILICON_A_STEP) size = 4096; txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); if (IS_ERR(txq)) return PTR_ERR(txq); if (trans->txqs.queue_alloc_cmd_ver == 0) { memset(&cmd.old, 0, sizeof(cmd.old)); cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr); cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE); cmd.old.tid = tid; if (hweight32(sta_mask) != 1) { ret = -EINVAL; goto error; } cmd.old.sta_id = ffs(sta_mask) - 1; hcmd.id = SCD_QUEUE_CFG; hcmd.len[0] = sizeof(cmd.old); hcmd.data[0] = &cmd.old; } else if (trans->txqs.queue_alloc_cmd_ver == 3) { memset(&cmd.new, 0, sizeof(cmd.new)); cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD); cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr); cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma); cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); cmd.new.u.add.flags = cpu_to_le32(flags); cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask); cmd.new.u.add.tid = tid; hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD); hcmd.len[0] = sizeof(cmd.new); hcmd.data[0] = &cmd.new; } else { ret = -EOPNOTSUPP; goto error; } ret = iwl_trans_send_cmd(trans, &hcmd); if (ret) goto error; return iwl_txq_alloc_response(trans, txq, &hcmd); error: iwl_txq_gen2_free_memory(trans, txq); return ret; } void iwl_txq_dyn_free(struct iwl_trans *trans, int queue) { if (WARN(queue >= IWL_MAX_TVQM_QUEUES, "queue %d out of range", queue)) return; /* * Upon HW Rfkill - we stop the device, and then stop the queues * in the op_mode. Just for the sake of the simplicity of the op_mode, * allow the op_mode to call txq_disable after it already called * stop_device. */ if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), "queue %d not used", queue); return; } iwl_txq_gen2_free(trans, queue); IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); } void iwl_txq_gen2_tx_free(struct iwl_trans *trans) { int i; memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); /* Free all TX queues */ for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { if (!trans->txqs.txq[i]) continue; iwl_txq_gen2_free(trans, i); } } int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) { struct iwl_txq *queue; int ret; /* alloc and init the tx queue */ if (!trans->txqs.txq[txq_id]) { queue = kzalloc(sizeof(*queue), GFP_KERNEL); if (!queue) { IWL_ERR(trans, "Not enough memory for tx queue\n"); return -ENOMEM; } trans->txqs.txq[txq_id] = queue; ret = iwl_txq_alloc(trans, queue, queue_size, true); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); goto error; } } else { queue = trans->txqs.txq[txq_id]; } ret = iwl_txq_init(trans, queue, queue_size, (txq_id == trans->txqs.cmd.q_id)); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; } trans->txqs.txq[txq_id]->id = txq_id; set_bit(txq_id, trans->txqs.queue_used); return 0; error: iwl_txq_gen2_tx_free(trans); return ret; } static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, void *_tfd, u8 idx) { struct iwl_tfd *tfd; struct iwl_tfd_tb *tb; dma_addr_t addr; dma_addr_t hi_len; - if (trans->trans_cfg->use_tfh) { + if (trans->trans_cfg->gen2) { struct iwl_tfh_tfd *tfh_tfd = _tfd; struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; return (dma_addr_t)(le64_to_cpu(tfh_tb->addr)); } tfd = _tfd; tb = &tfd->tbs[idx]; addr = get_unaligned_le32(&tb->lo); if (sizeof(dma_addr_t) <= sizeof(u32)) return addr; hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; /* * shift by 16 twice to avoid warnings on 32-bit * (where this code never runs anyway due to the * if statement above) */ return addr | ((hi_len << 16) << 16); } void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, struct iwl_txq *txq, int index) { int i, num_tbs; void *tfd = iwl_txq_get_tfd(trans, txq, index); /* Sanity check on number of chunks */ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); if (num_tbs > trans->txqs.tfd.max_tbs) { IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); /* @todo issue fatal error, it is quite serious situation */ return; } /* first TB is never freed - it's the bidirectional DMA data */ for (i = 1; i < num_tbs; i++) { if (meta->tbs & BIT(i)) dma_unmap_page(trans->dev, iwl_txq_gen1_tfd_tb_get_addr(trans, tfd, i), iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i), DMA_TO_DEVICE); else dma_unmap_single(trans->dev, iwl_txq_gen1_tfd_tb_get_addr(trans, tfd, i), iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i), DMA_TO_DEVICE); } meta->tbs = 0; - if (trans->trans_cfg->use_tfh) { + if (trans->trans_cfg->gen2) { struct iwl_tfh_tfd *tfd_fh = (void *)tfd; tfd_fh->num_tbs = 0; } else { struct iwl_tfd *tfd_fh = (void *)tfd; tfd_fh->num_tbs = 0; } } #define IWL_TX_CRC_SIZE 4 #define IWL_TX_DELIMITER_SIZE 4 /* * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array */ void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_txq *txq, u16 byte_cnt, int num_tbs) { struct iwlagn_scd_bc_tbl *scd_bc_tbl; int write_ptr = txq->write_ptr; int txq_id = txq->id; u8 sec_ctl = 0; u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; __le16 bc_ent; struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; u8 sta_id = tx_cmd->sta_id; scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; sec_ctl = tx_cmd->sec_ctl; switch (sec_ctl & TX_CMD_SEC_MSK) { case TX_CMD_SEC_CCM: len += IEEE80211_CCMP_MIC_LEN; break; case TX_CMD_SEC_TKIP: len += IEEE80211_TKIP_ICV_LEN; break; case TX_CMD_SEC_WEP: len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; break; } if (trans->txqs.bc_table_dword) len = DIV_ROUND_UP(len, 4); if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) return; bc_ent = cpu_to_le16(len | (sta_id << 12)); scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; } void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_txq *txq) { struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; int txq_id = txq->id; int read_ptr = txq->read_ptr; u8 sta_id = 0; __le16 bc_ent; struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); if (txq_id != trans->txqs.cmd.q_id) sta_id = tx_cmd->sta_id; bc_ent = cpu_to_le16(1 | (sta_id << 12)); scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; } /* * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] * @trans - transport private data * @txq - tx queue * @dma_dir - the direction of the DMA mapping * * Does NOT advance any TFD circular buffer read/write indexes * Does NOT free the TFD itself (which is within circular buffer) */ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) { /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and * idx is bounded by n_window */ int rd_ptr = txq->read_ptr; int idx = iwl_txq_get_cmd_index(txq, rd_ptr); struct sk_buff *skb; lockdep_assert_held(&txq->lock); if (!txq->entries) return; /* We have only q->n_window txq->entries, but we use * TFD_QUEUE_SIZE_MAX tfds */ iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); /* free SKB */ skb = txq->entries[idx].skb; /* Can be called from irqs-disabled context * If skb is not NULL, it means that the whole queue is being * freed and that the queue is not empty - free the skb */ if (skb) { iwl_op_mode_free_skb(trans->op_mode, skb); txq->entries[idx].skb = NULL; } } void iwl_txq_progress(struct iwl_txq *txq) { lockdep_assert_held(&txq->lock); if (!txq->wd_timeout) return; /* * station is asleep and we send data - that must * be uAPSD or PS-Poll. Don't rearm the timer. */ if (txq->frozen) return; /* * if empty delete timer, otherwise move timer forward * since we're making progress on this queue */ if (txq->read_ptr == txq->write_ptr) del_timer(&txq->stuck_timer); else mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); } /* Frees buffers until index _not_ inclusive */ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs) { struct iwl_txq *txq = trans->txqs.txq[txq_id]; - int tfd_num = iwl_txq_get_cmd_index(txq, ssn); - int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); - int last_to_free; + int tfd_num, read_ptr, last_to_free; /* This function is not meant to release cmd queue*/ if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) return; + if (WARN_ON(!txq)) + return; + + tfd_num = iwl_txq_get_cmd_index(txq, ssn); + read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); + spin_lock_bh(&txq->lock); if (!test_bit(txq_id, trans->txqs.queue_used)) { IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", txq_id, ssn); goto out; } if (read_ptr == tfd_num) goto out; IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", txq_id, txq->read_ptr, tfd_num, ssn); /*Since we free until index _not_ inclusive, the one before index is * the last we will free. This one must be used */ last_to_free = iwl_txq_dec_wrap(trans, tfd_num); if (!iwl_txq_used(txq, last_to_free)) { IWL_ERR(trans, "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", __func__, txq_id, last_to_free, trans->trans_cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); iwl_op_mode_time_point(trans->op_mode, IWL_FW_INI_TIME_POINT_FAKE_TX, NULL); goto out; } if (WARN_ON(!skb_queue_empty(skbs))) goto out; for (; read_ptr != tfd_num; txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { struct sk_buff *skb = txq->entries[read_ptr].skb; if (WARN_ON_ONCE(!skb)) continue; iwl_txq_free_tso_page(trans, skb); __skb_queue_tail(skbs, skb); txq->entries[read_ptr].skb = NULL; - if (!trans->trans_cfg->use_tfh) + if (!trans->trans_cfg->gen2) iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); iwl_txq_free_tfd(trans, txq); } iwl_txq_progress(txq); if (iwl_txq_space(trans, txq) > txq->low_mark && test_bit(txq_id, trans->txqs.queue_stopped)) { struct sk_buff_head overflow_skbs; __skb_queue_head_init(&overflow_skbs); skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); /* * We are going to transmit from the overflow queue. * Remember this state so that wait_for_txq_empty will know we * are adding more packets to the TFD queue. It cannot rely on * the state of &txq->overflow_q, as we just emptied it, but * haven't TXed the content yet. */ txq->overflow_tx = true; /* * This is tricky: we are in reclaim path which is non * re-entrant, so noone will try to take the access the * txq data from that path. We stopped tx, so we can't * have tx as well. Bottom line, we can unlock and re-lock * later. */ spin_unlock_bh(&txq->lock); while (!skb_queue_empty(&overflow_skbs)) { struct sk_buff *skb = __skb_dequeue(&overflow_skbs); struct iwl_device_tx_cmd *dev_cmd_ptr; dev_cmd_ptr = *(void **)((u8 *)skb->cb + trans->txqs.dev_cmd_offs); /* * Note that we can very well be overflowing again. * In that case, iwl_txq_space will be small again * and we won't wake mac80211's queue. */ iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); } if (iwl_txq_space(trans, txq) > txq->low_mark) iwl_wake_queue(trans, txq); spin_lock_bh(&txq->lock); txq->overflow_tx = false; } out: spin_unlock_bh(&txq->lock); } /* Set wr_ptr of specific device and txq */ void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) { struct iwl_txq *txq = trans->txqs.txq[txq_id]; spin_lock_bh(&txq->lock); txq->write_ptr = ptr; txq->read_ptr = txq->write_ptr; spin_unlock_bh(&txq->lock); } void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, bool freeze) { int queue; for_each_set_bit(queue, &txqs, BITS_PER_LONG) { struct iwl_txq *txq = trans->txqs.txq[queue]; unsigned long now; spin_lock_bh(&txq->lock); now = jiffies; if (txq->frozen == freeze) goto next_queue; IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", freeze ? "Freezing" : "Waking", queue); txq->frozen = freeze; if (txq->read_ptr == txq->write_ptr) goto next_queue; if (freeze) { if (unlikely(time_after(now, txq->stuck_timer.expires))) { /* * The timer should have fired, maybe it is * spinning right now on the lock. */ goto next_queue; } /* remember how long until the timer fires */ txq->frozen_expiry_remainder = txq->stuck_timer.expires - now; del_timer(&txq->stuck_timer); goto next_queue; } /* * Wake a non-empty queue -> arm timer with the * remainder before it froze */ mod_timer(&txq->stuck_timer, now + txq->frozen_expiry_remainder); next_queue: spin_unlock_bh(&txq->lock); } } #define HOST_COMPLETE_TIMEOUT (2 * HZ) static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; int cmd_idx; int ret; IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status), "Command %s: a command is already active!\n", cmd_str)) return -EIO; IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); cmd_idx = trans->ops->send_cmd(trans, cmd); if (cmd_idx < 0) { ret = cmd_idx; clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", cmd_str, ret); return ret; } ret = wait_event_timeout(trans->wait_command_queue, !test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status), HOST_COMPLETE_TIMEOUT); if (!ret) { IWL_ERR(trans, "Error sending %s: time out after %dms.\n", cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", txq->read_ptr, txq->write_ptr); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", cmd_str); ret = -ETIMEDOUT; iwl_trans_sync_nmi(trans); goto cancel; } if (test_bit(STATUS_FW_ERROR, &trans->status)) { if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &trans->status)) { IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); dump_stack(); } ret = -EIO; goto cancel; } if (!(cmd->flags & CMD_SEND_IN_RFKILL) && test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); ret = -ERFKILL; goto cancel; } if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); ret = -EIO; goto cancel; } return 0; cancel: if (cmd->flags & CMD_WANT_SKB) { /* * Cancel the CMD_WANT_SKB flag for the cmd in the * TX cmd queue. Otherwise in case the cmd comes * in later, it will possibly set an invalid * address (cmd->meta.source). */ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; } if (cmd->resp_pkt) { iwl_free_resp(cmd); cmd->resp_pkt = NULL; } return ret; } int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { /* Make sure the NIC is still alive in the bus */ if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; if (!(cmd->flags & CMD_SEND_IN_RFKILL) && test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", cmd->id); return -ERFKILL; } if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && !(cmd->flags & CMD_SEND_IN_D3))) { IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); return -EHOSTDOWN; } if (cmd->flags & CMD_ASYNC) { int ret; /* An asynchronous command can not expect an SKB to be set. */ if (WARN_ON(cmd->flags & CMD_WANT_SKB)) return -EINVAL; ret = trans->ops->send_cmd(trans, cmd); if (ret < 0) { IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", iwl_get_cmd_string(trans, cmd->id), ret); return ret; } return 0; } return iwl_trans_txq_send_hcmd_sync(trans, cmd); } diff --git a/sys/contrib/dev/iwlwifi/queue/tx.h b/sys/contrib/dev/iwlwifi/queue/tx.h index eca53bfd326d..1e4a24ab9bab 100644 --- a/sys/contrib/dev/iwlwifi/queue/tx.h +++ b/sys/contrib/dev/iwlwifi/queue/tx.h @@ -1,183 +1,183 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation */ #ifndef __iwl_trans_queue_tx_h__ #define __iwl_trans_queue_tx_h__ #include "iwl-fh.h" #include "fw/api/tx.h" struct iwl_tso_hdr_page { struct page *page; u8 *pos; }; static inline dma_addr_t iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx) { return txq->first_tb_dma + sizeof(struct iwl_pcie_first_tb_buf) * idx; } static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index) { return index & (q->n_window - 1); } void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id); static inline void iwl_wake_queue(struct iwl_trans *trans, struct iwl_txq *txq) { if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) { IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); iwl_op_mode_queue_not_full(trans->op_mode, txq->id); } } static inline void *iwl_txq_get_tfd(struct iwl_trans *trans, struct iwl_txq *txq, int idx) { - if (trans->trans_cfg->use_tfh) + if (trans->trans_cfg->gen2) idx = iwl_txq_get_cmd_index(txq, idx); return (u8 *)txq->tfds + trans->txqs.tfd.size * idx; } int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue); /* * We need this inline in case dma_addr_t is only 32-bits - since the * hardware is always 64-bit, the issue can still occur in that case, * so use u64 for 'phys' here to force the addition in 64-bit. */ static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len) { return upper_32_bits(phys) != upper_32_bits(phys + len); } int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q); static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq) { if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) { iwl_op_mode_queue_full(trans->op_mode, txq->id); IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); } else { IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", txq->id); } } /** * iwl_txq_inc_wrap - increment queue index, wrap back to beginning * @index -- current index */ static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index) { return ++index & (trans->trans_cfg->base_params->max_tfd_queue_size - 1); } /** * iwl_txq_dec_wrap - decrement queue index, wrap back to end * @index -- current index */ static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index) { return --index & (trans->trans_cfg->base_params->max_tfd_queue_size - 1); } static inline bool iwl_txq_used(const struct iwl_txq *q, int i) { int index = iwl_txq_get_cmd_index(q, i); int r = iwl_txq_get_cmd_index(q, q->read_ptr); int w = iwl_txq_get_cmd_index(q, q->write_ptr); return w >= r ? (index >= r && index < w) : !(index < r && index >= w); } void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb); void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, dma_addr_t addr, u16 len); void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, struct iwl_tfh_tfd *tfd); int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, u8 tid, int size, unsigned int timeout); int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int txq_id); void iwl_txq_dyn_free(struct iwl_trans *trans, int queue); void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq); void iwl_txq_gen2_tx_free(struct iwl_trans *trans); int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue); int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size); #ifdef CONFIG_INET struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, struct sk_buff *skb); #endif static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd) { struct iwl_tfd *tfd; - if (trans->trans_cfg->use_tfh) { + if (trans->trans_cfg->gen2) { struct iwl_tfh_tfd *tfh_tfd = _tfd; return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f; } tfd = (struct iwl_tfd *)_tfd; return tfd->num_tbs & 0x1f; } static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, u8 idx) { struct iwl_tfd *tfd; struct iwl_tfd_tb *tb; - if (trans->trans_cfg->use_tfh) { + if (trans->trans_cfg->gen2) { struct iwl_tfh_tfd *tfh_tfd = _tfd; struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; return le16_to_cpu(tfh_tb->tb_len); } tfd = (struct iwl_tfd *)_tfd; tb = &tfd->tbs[idx]; return le16_to_cpu(tb->hi_n_len) >> 4; } void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, struct iwl_txq *txq, int index); void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_txq *txq); void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_txq *txq, u16 byte_cnt, int num_tbs); void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs); void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, bool freeze); void iwl_txq_progress(struct iwl_txq *txq); void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); #endif /* __iwl_trans_queue_tx_h__ */ diff --git a/sys/modules/iwlwifi/Makefile b/sys/modules/iwlwifi/Makefile index 9eb6f0e7c267..679490fdf774 100644 --- a/sys/modules/iwlwifi/Makefile +++ b/sys/modules/iwlwifi/Makefile @@ -1,65 +1,69 @@ DEVIWLWIFIDIR= ${SRCTOP}/sys/contrib/dev/iwlwifi .PATH: ${DEVIWLWIFIDIR} WITH_DEBUGFS= 0 KMOD= if_iwlwifi SRCS= iwl-drv.c SRCS+= iwl-dbg-tlv.c iwl-debug.c SRCS+= iwl-eeprom-parse.c iwl-eeprom-read.c SRCS+= iwl-io.c iwl-nvm-parse.c iwl-phy-db.c iwl-trans.c SRCS+= cfg/7000.c cfg/8000.c cfg/9000.c cfg/22000.c +SRCS+= cfg/ax210.c cfg/bz.c cfg/sc.c SRCS+= fw/dbg.c fw/dump.c fw/img.c fw/notif-wait.c SRCS+= fw/paging.c fw/pnvm.c fw/rs.c fw/smem.c fw/init.c #SRCS+= fw/uefi.c SRCS+= mvm/rs.c mvm/binding.c mvm/coex.c mvm/ftm-initiator.c SRCS+= mvm/ftm-responder.c mvm/fw.c mvm/mac-ctxt.c SRCS+= mvm/mac80211.c mvm/nvm.c mvm/offloading.c mvm/ops.c SRCS+= mvm/phy-ctxt.c mvm/power.c mvm/quota.c mvm/rs-fw.c mvm/rfi.c SRCS+= mvm/rx.c mvm/rxmq.c mvm/scan.c mvm/sf.c mvm/sta.c mvm/tdls.c SRCS+= mvm/time-event.c mvm/tt.c mvm/tx.c mvm/utils.c +SRCS+= mvm/link.c +SRCS+= mvm/mld-key.c mvm/mld-mac.c mvm/mld-mac80211.c mvm/mld-sta.c +SRCS+= mvm/ptp.c mvm/time-sync.c #SRCS+= mvm/led.c SRCS+= pcie/ctxt-info-gen3.c pcie/ctxt-info.c SRCS+= pcie/drv.c pcie/rx.c pcie/trans-gen2.c pcie/trans.c SRCS+= pcie/tx-gen2.c pcie/tx.c SRCS+= queue/tx.c .if defined(WITH_DEBUGFS) && ${WITH_DEBUGFS} > 0 SRCS+= fw/debugfs.c mvm/debugfs.c mvm/debugfs-vif.c CFLAGS+= -DCONFIG_IWLWIFI_DEBUGFS=${WITH_DEBUGFS} CFLAGS+= -DCONFIG_MAC80211_DEBUGFS=${WITH_DEBUGFS} .endif SRCS+= iwl-devtrace.c # Other SRCS+= ${LINUXKPI_GENSRCS} SRCS+= opt_wlan.h opt_inet6.h opt_inet.h opt_acpi.h CFLAGS+= -DKBUILD_MODNAME='"iwlwifi"' CFLAGS+= -I${DEVIWLWIFIDIR} CFLAGS+= ${LINUXKPI_INCLUDES} CFLAGS+= -DCONFIG_IWLDVM=0 CFLAGS+= -DCONFIG_IWLMVM=1 # Helpful after fresh imports. #CFLAGS+= -ferror-limit=0 #CFLAGS+= -DCONFIG_ACPI=1 #CFLAGS+= -DCONFIG_INET=1 # Need LKPI TSO implementation. #CFLAGS+= -DCONFIG_IPV6=1 CFLAGS+= -DCONFIG_IWLWIFI_DEBUG=1 #CFLAGS+= -DCONFIG_IWLWIFI_LEDS=1 #CFLAGS+= -DCONFIG_IWLWIFI_OPMODE_MODULAR=1 CFLAGS+= -DCONFIG_IWLWIFI_DEVICE_TRACING=1 #CFLAGS+= -DCONFIG_LOCKDEP=1 #CFLAGS+= -DCONFIG_NL80211_TESTMODE=1 #CFLAGS+= -DCONFIG_PM=1 #CFLAGS+= -DCONFIG_PM_SLEEP=1 #CFLAGS+= -DCONFIG_THERMAL=1 #CFLAGS+= -DCONFIG_EFI=1 .include