diff --git a/sys/contrib/dev/iwlwifi/fw/dbg.c b/sys/contrib/dev/iwlwifi/fw/dbg.c index c2928d1f2b65..fd3483fe50b2 100644 --- a/sys/contrib/dev/iwlwifi/fw/dbg.c +++ b/sys/contrib/dev/iwlwifi/fw/dbg.c @@ -1,3166 +1,3172 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #include #if defined(__FreeBSD__) #include #endif #include "iwl-drv.h" #include "runtime.h" #include "dbg.h" #include "debugfs.h" #include "iwl-io.h" #include "iwl-prph.h" #include "iwl-csr.h" #include "iwl-fh.h" /** * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump * * @fwrt_ptr: pointer to the buffer coming from fwrt * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the * transport's data. * @trans_len: length of the valid data in trans_ptr * @fwrt_len: length of the valid data in fwrt_ptr */ struct iwl_fw_dump_ptrs { struct iwl_trans_dump_data *trans_ptr; void *fwrt_ptr; u32 fwrt_len; }; #define RADIO_REG_MAX_READ 0x2ad static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data) { u8 *pos = (void *)(*dump_data)->data; int i; IWL_DEBUG_INFO(fwrt, "WRT radio registers dump\n"); if (!iwl_trans_grab_nic_access(fwrt->trans)) return; (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG); (*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ); for (i = 0; i < RADIO_REG_MAX_READ; i++) { u32 rd_cmd = RADIO_RSP_RD_CMD; rd_cmd |= i << RADIO_RSP_ADDR_POS; iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd); *pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT); pos++; } *dump_data = iwl_fw_error_next_data(*dump_data); iwl_trans_release_nic_access(fwrt->trans); } static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data, int size, u32 offset, int fifo_num) { struct iwl_fw_error_dump_fifo *fifo_hdr; u32 *fifo_data; u32 fifo_len; int i; fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; fifo_len = size; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) return; /* Add a TLV for the RXF */ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); fifo_hdr->fifo_num = cpu_to_le32(fifo_num); fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_D_SPACE + offset)); fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_WR_PTR + offset)); fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_RD_PTR + offset)); fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_RD_FENCE_PTR + offset)); fifo_hdr->fence_mode = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset)); /* Lock fence */ iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1); /* Set fence pointer to the same place like WR pointer */ iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1); /* Set fence offset */ iwl_trans_write_prph(fwrt->trans, RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0); /* Read FIFO */ fifo_len /= sizeof(u32); /* Size in DWORDS */ for (i = 0; i < fifo_len; i++) fifo_data[i] = iwl_trans_read_prph(fwrt->trans, RXF_FIFO_RD_FENCE_INC + offset); *dump_data = iwl_fw_error_next_data(*dump_data); } static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data, int size, u32 offset, int fifo_num) { struct iwl_fw_error_dump_fifo *fifo_hdr; u32 *fifo_data; u32 fifo_len; int i; fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; fifo_len = size; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) return; /* Add a TLV for the FIFO */ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF); (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); fifo_hdr->fifo_num = cpu_to_le32(fifo_num); fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_FIFO_ITEM_CNT + offset)); fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_WR_PTR + offset)); fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_RD_PTR + offset)); fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_FENCE_PTR + offset)); fifo_hdr->fence_mode = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_LOCK_FENCE + offset)); /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset, TXF_WR_PTR + offset); /* Dummy-read to advance the read pointer to the head */ iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset); /* Read FIFO */ for (i = 0; i < fifo_len / sizeof(u32); i++) fifo_data[i] = iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf) fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx, fifo_data, fifo_len); *dump_data = iwl_fw_error_next_data(*dump_data); } static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data) { struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; IWL_DEBUG_INFO(fwrt, "WRT RX FIFO dump\n"); if (!iwl_trans_grab_nic_access(fwrt->trans)) return; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) { /* Pull RXF1 */ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); /* Pull RXF2 */ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, RXF_DIFF_FROM_PREV + fwrt->trans->trans_cfg->umac_prph_offset, 1); /* Pull LMAC2 RXF1 */ if (fwrt->smem_cfg.num_lmacs > 1) iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size, LMAC2_PRPH_OFFSET, 2); } iwl_trans_release_nic_access(fwrt->trans); } static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data) { struct iwl_fw_error_dump_fifo *fifo_hdr; struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; u32 *fifo_data; u32 fifo_len; int i, j; IWL_DEBUG_INFO(fwrt, "WRT TX FIFO dump\n"); if (!iwl_trans_grab_nic_access(fwrt->trans)) return; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) { /* Pull TXF data from LMAC1 */ for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i); iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i], 0, i); } /* Pull TXF data from LMAC2 */ if (fwrt->smem_cfg.num_lmacs > 1) { for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM + LMAC2_PRPH_OFFSET, i); iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[1].txfifo_size[i], LMAC2_PRPH_OFFSET, i + cfg->num_txfifo_entries); } } } if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) && fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { /* Pull UMAC internal TXF data from all TXFs */ for (i = 0; i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); i++) { fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; fifo_len = fwrt->smem_cfg.internal_txfifo_size[i]; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) continue; /* Add a TLV for the internal FIFOs */ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF); (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); fifo_hdr->fifo_num = cpu_to_le32(i); /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i + fwrt->smem_cfg.num_txfifo_entries); fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_FIFO_ITEM_CNT)); fifo_hdr->wr_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_WR_PTR)); fifo_hdr->rd_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_RD_PTR)); fifo_hdr->fence_ptr = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_FENCE_PTR)); fifo_hdr->fence_mode = cpu_to_le32(iwl_trans_read_prph(fwrt->trans, TXF_CPU2_LOCK_FENCE)); /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */ iwl_trans_write_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_ADDR, TXF_CPU2_WR_PTR); /* Dummy-read to advance the read pointer to head */ iwl_trans_read_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_DATA); /* Read FIFO */ fifo_len /= sizeof(u32); /* Size in DWORDS */ for (j = 0; j < fifo_len; j++) fifo_data[j] = iwl_trans_read_prph(fwrt->trans, TXF_CPU2_READ_MODIFY_DATA); *dump_data = iwl_fw_error_next_data(*dump_data); } } iwl_trans_release_nic_access(fwrt->trans); } struct iwl_prph_range { u32 start, end; }; static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = { { .start = 0x00a00000, .end = 0x00a00000 }, { .start = 0x00a0000c, .end = 0x00a00024 }, { .start = 0x00a0002c, .end = 0x00a0003c }, { .start = 0x00a00410, .end = 0x00a00418 }, { .start = 0x00a00420, .end = 0x00a00420 }, { .start = 0x00a00428, .end = 0x00a00428 }, { .start = 0x00a00430, .end = 0x00a0043c }, { .start = 0x00a00444, .end = 0x00a00444 }, { .start = 0x00a004c0, .end = 0x00a004cc }, { .start = 0x00a004d8, .end = 0x00a004d8 }, { .start = 0x00a004e0, .end = 0x00a004f0 }, { .start = 0x00a00840, .end = 0x00a00840 }, { .start = 0x00a00850, .end = 0x00a00858 }, { .start = 0x00a01004, .end = 0x00a01008 }, { .start = 0x00a01010, .end = 0x00a01010 }, { .start = 0x00a01018, .end = 0x00a01018 }, { .start = 0x00a01024, .end = 0x00a01024 }, { .start = 0x00a0102c, .end = 0x00a01034 }, { .start = 0x00a0103c, .end = 0x00a01040 }, { .start = 0x00a01048, .end = 0x00a01094 }, { .start = 0x00a01c00, .end = 0x00a01c20 }, { .start = 0x00a01c58, .end = 0x00a01c58 }, { .start = 0x00a01c7c, .end = 0x00a01c7c }, { .start = 0x00a01c28, .end = 0x00a01c54 }, { .start = 0x00a01c5c, .end = 0x00a01c5c }, { .start = 0x00a01c60, .end = 0x00a01cdc }, { .start = 0x00a01ce0, .end = 0x00a01d0c }, { .start = 0x00a01d18, .end = 0x00a01d20 }, { .start = 0x00a01d2c, .end = 0x00a01d30 }, { .start = 0x00a01d40, .end = 0x00a01d5c }, { .start = 0x00a01d80, .end = 0x00a01d80 }, { .start = 0x00a01d98, .end = 0x00a01d9c }, { .start = 0x00a01da8, .end = 0x00a01da8 }, { .start = 0x00a01db8, .end = 0x00a01df4 }, { .start = 0x00a01dc0, .end = 0x00a01dfc }, { .start = 0x00a01e00, .end = 0x00a01e2c }, { .start = 0x00a01e40, .end = 0x00a01e60 }, { .start = 0x00a01e68, .end = 0x00a01e6c }, { .start = 0x00a01e74, .end = 0x00a01e74 }, { .start = 0x00a01e84, .end = 0x00a01e90 }, { .start = 0x00a01e9c, .end = 0x00a01ec4 }, { .start = 0x00a01ed0, .end = 0x00a01ee0 }, { .start = 0x00a01f00, .end = 0x00a01f1c }, { .start = 0x00a01f44, .end = 0x00a01ffc }, { .start = 0x00a02000, .end = 0x00a02048 }, { .start = 0x00a02068, .end = 0x00a020f0 }, { .start = 0x00a02100, .end = 0x00a02118 }, { .start = 0x00a02140, .end = 0x00a0214c }, { .start = 0x00a02168, .end = 0x00a0218c }, { .start = 0x00a021c0, .end = 0x00a021c0 }, { .start = 0x00a02400, .end = 0x00a02410 }, { .start = 0x00a02418, .end = 0x00a02420 }, { .start = 0x00a02428, .end = 0x00a0242c }, { .start = 0x00a02434, .end = 0x00a02434 }, { .start = 0x00a02440, .end = 0x00a02460 }, { .start = 0x00a02468, .end = 0x00a024b0 }, { .start = 0x00a024c8, .end = 0x00a024cc }, { .start = 0x00a02500, .end = 0x00a02504 }, { .start = 0x00a0250c, .end = 0x00a02510 }, { .start = 0x00a02540, .end = 0x00a02554 }, { .start = 0x00a02580, .end = 0x00a025f4 }, { .start = 0x00a02600, .end = 0x00a0260c }, { .start = 0x00a02648, .end = 0x00a02650 }, { .start = 0x00a02680, .end = 0x00a02680 }, { .start = 0x00a026c0, .end = 0x00a026d0 }, { .start = 0x00a02700, .end = 0x00a0270c }, { .start = 0x00a02804, .end = 0x00a02804 }, { .start = 0x00a02818, .end = 0x00a0281c }, { .start = 0x00a02c00, .end = 0x00a02db4 }, { .start = 0x00a02df4, .end = 0x00a02fb0 }, { .start = 0x00a03000, .end = 0x00a03014 }, { .start = 0x00a0301c, .end = 0x00a0302c }, { .start = 0x00a03034, .end = 0x00a03038 }, { .start = 0x00a03040, .end = 0x00a03048 }, { .start = 0x00a03060, .end = 0x00a03068 }, { .start = 0x00a03070, .end = 0x00a03074 }, { .start = 0x00a0307c, .end = 0x00a0307c }, { .start = 0x00a03080, .end = 0x00a03084 }, { .start = 0x00a0308c, .end = 0x00a03090 }, { .start = 0x00a03098, .end = 0x00a03098 }, { .start = 0x00a030a0, .end = 0x00a030a0 }, { .start = 0x00a030a8, .end = 0x00a030b4 }, { .start = 0x00a030bc, .end = 0x00a030bc }, { .start = 0x00a030c0, .end = 0x00a0312c }, { .start = 0x00a03c00, .end = 0x00a03c5c }, { .start = 0x00a04400, .end = 0x00a04454 }, { .start = 0x00a04460, .end = 0x00a04474 }, { .start = 0x00a044c0, .end = 0x00a044ec }, { .start = 0x00a04500, .end = 0x00a04504 }, { .start = 0x00a04510, .end = 0x00a04538 }, { .start = 0x00a04540, .end = 0x00a04548 }, { .start = 0x00a04560, .end = 0x00a0457c }, { .start = 0x00a04590, .end = 0x00a04598 }, { .start = 0x00a045c0, .end = 0x00a045f4 }, }; static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { { .start = 0x00a05c00, .end = 0x00a05c18 }, { .start = 0x00a05400, .end = 0x00a056e8 }, { .start = 0x00a08000, .end = 0x00a098bc }, { .start = 0x00a02400, .end = 0x00a02758 }, { .start = 0x00a04764, .end = 0x00a0476c }, { .start = 0x00a04770, .end = 0x00a04774 }, { .start = 0x00a04620, .end = 0x00a04624 }, }; static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = { { .start = 0x00a00000, .end = 0x00a00000 }, { .start = 0x00a0000c, .end = 0x00a00024 }, { .start = 0x00a0002c, .end = 0x00a00034 }, { .start = 0x00a0003c, .end = 0x00a0003c }, { .start = 0x00a00410, .end = 0x00a00418 }, { .start = 0x00a00420, .end = 0x00a00420 }, { .start = 0x00a00428, .end = 0x00a00428 }, { .start = 0x00a00430, .end = 0x00a0043c }, { .start = 0x00a00444, .end = 0x00a00444 }, { .start = 0x00a00840, .end = 0x00a00840 }, { .start = 0x00a00850, .end = 0x00a00858 }, { .start = 0x00a01004, .end = 0x00a01008 }, { .start = 0x00a01010, .end = 0x00a01010 }, { .start = 0x00a01018, .end = 0x00a01018 }, { .start = 0x00a01024, .end = 0x00a01024 }, { .start = 0x00a0102c, .end = 0x00a01034 }, { .start = 0x00a0103c, .end = 0x00a01040 }, { .start = 0x00a01048, .end = 0x00a01050 }, { .start = 0x00a01058, .end = 0x00a01058 }, { .start = 0x00a01060, .end = 0x00a01070 }, { .start = 0x00a0108c, .end = 0x00a0108c }, { .start = 0x00a01c20, .end = 0x00a01c28 }, { .start = 0x00a01d10, .end = 0x00a01d10 }, { .start = 0x00a01e28, .end = 0x00a01e2c }, { .start = 0x00a01e60, .end = 0x00a01e60 }, { .start = 0x00a01e80, .end = 0x00a01e80 }, { .start = 0x00a01ea0, .end = 0x00a01ea0 }, { .start = 0x00a02000, .end = 0x00a0201c }, { .start = 0x00a02024, .end = 0x00a02024 }, { .start = 0x00a02040, .end = 0x00a02048 }, { .start = 0x00a020c0, .end = 0x00a020e0 }, { .start = 0x00a02400, .end = 0x00a02404 }, { .start = 0x00a0240c, .end = 0x00a02414 }, { .start = 0x00a0241c, .end = 0x00a0243c }, { .start = 0x00a02448, .end = 0x00a024bc }, { .start = 0x00a024c4, .end = 0x00a024cc }, { .start = 0x00a02508, .end = 0x00a02508 }, { .start = 0x00a02510, .end = 0x00a02514 }, { .start = 0x00a0251c, .end = 0x00a0251c }, { .start = 0x00a0252c, .end = 0x00a0255c }, { .start = 0x00a02564, .end = 0x00a025a0 }, { .start = 0x00a025a8, .end = 0x00a025b4 }, { .start = 0x00a025c0, .end = 0x00a025c0 }, { .start = 0x00a025e8, .end = 0x00a025f4 }, { .start = 0x00a02c08, .end = 0x00a02c18 }, { .start = 0x00a02c2c, .end = 0x00a02c38 }, { .start = 0x00a02c68, .end = 0x00a02c78 }, { .start = 0x00a03000, .end = 0x00a03000 }, { .start = 0x00a03010, .end = 0x00a03014 }, { .start = 0x00a0301c, .end = 0x00a0302c }, { .start = 0x00a03034, .end = 0x00a03038 }, { .start = 0x00a03040, .end = 0x00a03044 }, { .start = 0x00a03060, .end = 0x00a03068 }, { .start = 0x00a03070, .end = 0x00a03070 }, { .start = 0x00a0307c, .end = 0x00a03084 }, { .start = 0x00a0308c, .end = 0x00a03090 }, { .start = 0x00a03098, .end = 0x00a03098 }, { .start = 0x00a030a0, .end = 0x00a030a0 }, { .start = 0x00a030a8, .end = 0x00a030b4 }, { .start = 0x00a030bc, .end = 0x00a030c0 }, { .start = 0x00a030c8, .end = 0x00a030f4 }, { .start = 0x00a03100, .end = 0x00a0312c }, { .start = 0x00a03c00, .end = 0x00a03c5c }, { .start = 0x00a04400, .end = 0x00a04454 }, { .start = 0x00a04460, .end = 0x00a04474 }, { .start = 0x00a044c0, .end = 0x00a044ec }, { .start = 0x00a04500, .end = 0x00a04504 }, { .start = 0x00a04510, .end = 0x00a04538 }, { .start = 0x00a04540, .end = 0x00a04548 }, { .start = 0x00a04560, .end = 0x00a04560 }, { .start = 0x00a04570, .end = 0x00a0457c }, { .start = 0x00a04590, .end = 0x00a04590 }, { .start = 0x00a04598, .end = 0x00a04598 }, { .start = 0x00a045c0, .end = 0x00a045f4 }, { .start = 0x00a05c18, .end = 0x00a05c1c }, { .start = 0x00a0c000, .end = 0x00a0c018 }, { .start = 0x00a0c020, .end = 0x00a0c028 }, { .start = 0x00a0c038, .end = 0x00a0c094 }, { .start = 0x00a0c0c0, .end = 0x00a0c104 }, { .start = 0x00a0c10c, .end = 0x00a0c118 }, { .start = 0x00a0c150, .end = 0x00a0c174 }, { .start = 0x00a0c17c, .end = 0x00a0c188 }, { .start = 0x00a0c190, .end = 0x00a0c198 }, { .start = 0x00a0c1a0, .end = 0x00a0c1a8 }, { .start = 0x00a0c1b0, .end = 0x00a0c1b8 }, }; static const struct iwl_prph_range iwl_prph_dump_addr_ax210[] = { { .start = 0x00d03c00, .end = 0x00d03c64 }, { .start = 0x00d05c18, .end = 0x00d05c1c }, { .start = 0x00d0c000, .end = 0x00d0c174 }, }; static void iwl_read_prph_block(struct iwl_trans *trans, u32 start, u32 len_bytes, __le32 *data) { u32 i; for (i = 0; i < len_bytes; i += 4) *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i)); } static void iwl_dump_prph(struct iwl_fw_runtime *fwrt, const struct iwl_prph_range *iwl_prph_dump_addr, u32 range_len, void *ptr) { struct iwl_fw_error_dump_prph *prph; struct iwl_trans *trans = fwrt->trans; struct iwl_fw_error_dump_data **data = (struct iwl_fw_error_dump_data **)ptr; u32 i; if (!data) return; IWL_DEBUG_INFO(trans, "WRT PRPH dump\n"); if (!iwl_trans_grab_nic_access(trans)) return; for (i = 0; i < range_len; i++) { /* The range includes both boundaries */ int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); (*data)->len = cpu_to_le32(sizeof(*prph) + num_bytes_in_chunk); prph = (void *)(*data)->data; prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start); iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start, /* our range is inclusive, hence + 4 */ iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4, (void *)prph->data); *data = iwl_fw_error_next_data(*data); } iwl_trans_release_nic_access(trans); } /* * alloc_sgtable - allocates scallerlist table in the given size, * fills it with pages and returns it * @size: the size (in bytes) of the table */ static struct scatterlist *alloc_sgtable(int size) { int alloc_size, nents, i; struct page *new_page; struct scatterlist *iter; struct scatterlist *table; nents = DIV_ROUND_UP(size, PAGE_SIZE); table = kcalloc(nents, sizeof(*table), GFP_KERNEL); if (!table) return NULL; sg_init_table(table, nents); iter = table; for_each_sg(table, iter, sg_nents(table), i) { new_page = alloc_page(GFP_KERNEL); if (!new_page) { /* release all previous allocated pages in the table */ iter = table; for_each_sg(table, iter, sg_nents(table), i) { new_page = sg_page(iter); if (new_page) __free_page(new_page); } kfree(table); return NULL; } alloc_size = min_t(int, size, PAGE_SIZE); size -= PAGE_SIZE; sg_set_page(iter, new_page, alloc_size, 0); } return table; } static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt, const struct iwl_prph_range *iwl_prph_dump_addr, u32 range_len, void *ptr) { u32 *prph_len = (u32 *)ptr; int i, num_bytes_in_chunk; if (!prph_len) return; for (i = 0; i < range_len; i++) { /* The range includes both boundaries */ num_bytes_in_chunk = iwl_prph_dump_addr[i].end - iwl_prph_dump_addr[i].start + 4; *prph_len += sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_prph) + num_bytes_in_chunk; } } static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr, void (*handler)(struct iwl_fw_runtime *, const struct iwl_prph_range *, u32, void *)) { u32 range_len; if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_ax210); handler(fwrt, iwl_prph_dump_addr_ax210, range_len, ptr); } else if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000); handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr); } else { range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm); handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr); if (fwrt->trans->trans_cfg->mq_rx_supported) { range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000); handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr); } } } static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **dump_data, u32 len, u32 ofs, u32 type) { struct iwl_fw_error_dump_mem *dump_mem; if (!len) return; (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem)); dump_mem = (void *)(*dump_data)->data; dump_mem->type = cpu_to_le32(type); dump_mem->offset = cpu_to_le32(ofs); iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len); *dump_data = iwl_fw_error_next_data(*dump_data); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, ofs, dump_mem->data, len); IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type); } #define ADD_LEN(len, item_len, const_len) \ do {size_t item = item_len; len += (!!item) * const_len + item; } \ while (0) static int iwl_fw_rxf_len(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_shared_mem_cfg *mem_cfg) { size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fifo); u32 fifo_len = 0; int i; if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) return 0; /* Count RXF2 size */ ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len); /* Count RXF1 sizes */ if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC)) mem_cfg->num_lmacs = MAX_NUM_LMAC; for (i = 0; i < mem_cfg->num_lmacs; i++) ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len); return fifo_len; } static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_shared_mem_cfg *mem_cfg) { size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fifo); u32 fifo_len = 0; int i; if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) goto dump_internal_txf; /* Count TXF sizes */ if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC)) mem_cfg->num_lmacs = MAX_NUM_LMAC; for (i = 0; i < mem_cfg->num_lmacs; i++) { int j; for (j = 0; j < mem_cfg->num_txfifo_entries; j++) ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j], hdr_len); } dump_internal_txf: if (!(iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) && fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))) goto out; for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++) ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len); out: return fifo_len; } static void iwl_dump_paging(struct iwl_fw_runtime *fwrt, struct iwl_fw_error_dump_data **data) { int i; IWL_DEBUG_INFO(fwrt, "WRT paging dump\n"); for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) { struct iwl_fw_error_dump_paging *paging; struct page *pages = fwrt->fw_paging_db[i].fw_paging_block; dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); (*data)->len = cpu_to_le32(sizeof(*paging) + PAGING_BLOCK_SIZE); paging = (void *)(*data)->data; paging->index = cpu_to_le32(i); dma_sync_single_for_cpu(fwrt->trans->dev, addr, PAGING_BLOCK_SIZE, DMA_BIDIRECTIONAL); memcpy(paging->data, page_address(pages), PAGING_BLOCK_SIZE); dma_sync_single_for_device(fwrt->trans->dev, addr, PAGING_BLOCK_SIZE, DMA_BIDIRECTIONAL); (*data) = iwl_fw_error_next_data(*data); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, fwrt->fw_paging_db[i].fw_offs, paging->data, PAGING_BLOCK_SIZE); } } static struct iwl_fw_error_dump_file * iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, struct iwl_fw_dump_ptrs *fw_error_dump, struct iwl_fwrt_dump_data *data) { struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_info *dump_info; struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg; struct iwl_fw_error_dump_trigger_desc *dump_trig; u32 sram_len, sram_ofs; const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv; struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg; u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0; u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->dccm2_len; int i; /* SRAM - include stack CCM if driver knows the values for it */ if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) { const struct fw_img *img; if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX) return NULL; img = &fwrt->fw->img[fwrt->cur_fw_img]; sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; } else { sram_ofs = fwrt->trans->cfg->dccm_offset; sram_len = fwrt->trans->cfg->dccm_len; } /* reading RXF/TXF sizes */ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { fifo_len = iwl_fw_rxf_len(fwrt, mem_cfg); fifo_len += iwl_fw_txf_len(fwrt, mem_cfg); /* Make room for PRPH registers */ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH)) iwl_fw_prph_handler(fwrt, &prph_len, iwl_fw_get_prph_len); if (fwrt->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 && iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG)) radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; } file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) file_len += sizeof(*dump_data) + sizeof(*dump_info); if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg); if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) { size_t hdr_len = sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_mem); /* Dump SRAM only if no mem_tlvs */ if (!fwrt->fw->dbg.n_mem_tlv) ADD_LEN(file_len, sram_len, hdr_len); /* Make room for all mem types that exist */ ADD_LEN(file_len, smem_len, hdr_len); ADD_LEN(file_len, sram2_len, hdr_len); for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len); } /* Make room for fw's virtual image pages, if it exists */ if (iwl_fw_dbg_is_paging_enabled(fwrt)) file_len += fwrt->num_of_paging_blk * (sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_paging) + PAGING_BLOCK_SIZE); if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) { file_len += sizeof(*dump_data) + fwrt->trans->cfg->d3_debug_data_length * 2; } /* If we only want a monitor dump, reset the file length */ if (data->monitor_only) { file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 + sizeof(*dump_info) + sizeof(*dump_smem_cfg); } if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && data->desc) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + data->desc->len; dump_file = vzalloc(file_len); if (!dump_file) return NULL; fw_error_dump->fwrt_ptr = dump_file; dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); dump_data = (void *)dump_file->data; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_info)); dump_info = (void *)dump_data->data; dump_info->hw_type = cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev)); dump_info->hw_step = cpu_to_le32(fwrt->trans->hw_rev_step); memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, sizeof(dump_info->fw_human_readable)); strncpy(dump_info->dev_human_readable, fwrt->trans->name, sizeof(dump_info->dev_human_readable) - 1); #if defined(__linux__) strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, sizeof(dump_info->bus_human_readable) - 1); #elif defined(__FreeBSD__) /* XXX TODO */ strncpy(dump_info->bus_human_readable, "", sizeof(dump_info->bus_human_readable) - 1); #endif dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs; dump_info->lmac_err_id[0] = cpu_to_le32(fwrt->dump.lmac_err_id[0]); if (fwrt->smem_cfg.num_lmacs > 1) dump_info->lmac_err_id[1] = cpu_to_le32(fwrt->dump.lmac_err_id[1]); dump_info->umac_err_id = cpu_to_le32(fwrt->dump.umac_err_id); dump_data = iwl_fw_error_next_data(dump_data); } if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) { /* Dump shared memory configuration */ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg)); dump_smem_cfg = (void *)dump_data->data; dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs); dump_smem_cfg->num_txfifo_entries = cpu_to_le32(mem_cfg->num_txfifo_entries); for (i = 0; i < MAX_NUM_LMAC; i++) { int j; u32 *txf_size = mem_cfg->lmac[i].txfifo_size; for (j = 0; j < TX_FIFO_MAX_NUM; j++) dump_smem_cfg->lmac[i].txfifo_size[j] = cpu_to_le32(txf_size[j]); dump_smem_cfg->lmac[i].rxfifo1_size = cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size); } dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size); dump_smem_cfg->internal_txfifo_addr = cpu_to_le32(mem_cfg->internal_txfifo_addr); for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) { dump_smem_cfg->internal_txfifo_size[i] = cpu_to_le32(mem_cfg->internal_txfifo_size[i]); } dump_data = iwl_fw_error_next_data(dump_data); } /* We only dump the FIFOs if the FW is in error state */ if (fifo_len) { iwl_fw_dump_rxf(fwrt, &dump_data); iwl_fw_dump_txf(fwrt, &dump_data); } if (radio_len) iwl_read_radio_regs(fwrt, &dump_data); if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && data->desc) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_trig) + data->desc->len); dump_trig = (void *)dump_data->data; memcpy(dump_trig, &data->desc->trig_desc, sizeof(*dump_trig) + data->desc->len); dump_data = iwl_fw_error_next_data(dump_data); } /* In case we only want monitor dump, skip to dump trasport data */ if (data->monitor_only) goto out; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) { const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg.mem_tlv; if (!fwrt->fw->dbg.n_mem_tlv) iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs, IWL_FW_ERROR_DUMP_MEM_SRAM); for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) { u32 len = le32_to_cpu(fw_dbg_mem[i].len); u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); iwl_fw_dump_mem(fwrt, &dump_data, len, ofs, le32_to_cpu(fw_dbg_mem[i].data_type)); } iwl_fw_dump_mem(fwrt, &dump_data, smem_len, fwrt->trans->cfg->smem_offset, IWL_FW_ERROR_DUMP_MEM_SMEM); iwl_fw_dump_mem(fwrt, &dump_data, sram2_len, fwrt->trans->cfg->dccm2_offset, IWL_FW_ERROR_DUMP_MEM_SRAM); } if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) { u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr; size_t data_size = fwrt->trans->cfg->d3_debug_data_length; dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); dump_data->len = cpu_to_le32(data_size * 2); memcpy(dump_data->data, fwrt->dump.d3_debug_data, data_size); kfree(fwrt->dump.d3_debug_data); fwrt->dump.d3_debug_data = NULL; iwl_trans_read_mem_bytes(fwrt->trans, addr, dump_data->data + data_size, data_size); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, addr, dump_data->data + data_size, data_size); dump_data = iwl_fw_error_next_data(dump_data); } /* Dump fw's virtual image */ if (iwl_fw_dbg_is_paging_enabled(fwrt)) iwl_dump_paging(fwrt, &dump_data); if (prph_len) iwl_fw_prph_handler(fwrt, &dump_data, iwl_dump_prph); out: dump_file->file_len = cpu_to_le32(file_len); return dump_file; } /** * struct iwl_dump_ini_region_data - region data * @reg_tlv: region TLV * @dump_data: dump data */ struct iwl_dump_ini_region_data { struct iwl_ucode_tlv *reg_tlv; struct iwl_fwrt_dump_data *dump_data; }; static int iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 prph_val; u32 addr = le32_to_cpu(reg->addrs[idx]) + le32_to_cpu(reg->dev_addr.offset); int i; range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { prph_val = iwl_read_prph(fwrt->trans, addr + i); if (prph_val == 0x5a5a5a5a) return -EBUSY; *val++ = cpu_to_le32(prph_val); } return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 indirect_wr_addr = WMAL_INDRCT_RD_CMD1; u32 indirect_rd_addr = WMAL_MRSPF_1; u32 prph_val; u32 addr = le32_to_cpu(reg->addrs[idx]); u32 dphy_state; u32 dphy_addr; int i; range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) indirect_wr_addr = WMAL_INDRCT_CMD1; indirect_wr_addr += le32_to_cpu(reg->dev_addr.offset); indirect_rd_addr += le32_to_cpu(reg->dev_addr.offset); if (!iwl_trans_grab_nic_access(fwrt->trans)) return -EBUSY; dphy_addr = (reg->dev_addr.offset) ? WFPM_LMAC2_PS_CTL_RW : WFPM_LMAC1_PS_CTL_RW; dphy_state = iwl_read_umac_prph_no_grab(fwrt->trans, dphy_addr); for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { if (dphy_state == HBUS_TIMEOUT || (dphy_state & WFPM_PS_CTL_RW_PHYRF_PD_FSM_CURSTATE_MSK) != WFPM_PHYRF_STATE_ON) { *val++ = cpu_to_le32(WFPM_DPHY_OFF); continue; } iwl_write_prph_no_grab(fwrt->trans, indirect_wr_addr, WMAL_INDRCT_CMD(addr + i)); prph_val = iwl_read_prph_no_grab(fwrt->trans, indirect_rd_addr); *val++ = cpu_to_le32(prph_val); } iwl_trans_release_nic_access(fwrt->trans); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 addr = le32_to_cpu(reg->addrs[idx]) + le32_to_cpu(reg->dev_addr.offset); int i; range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) *val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans, addr + i)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_trans *trans = fwrt->trans; struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 addr = le32_to_cpu(reg->addrs[idx]) + le32_to_cpu(reg->dev_addr.offset); int i; /* we shouldn't get here if the trans doesn't have read_config32 */ if (WARN_ON_ONCE(!trans->ops->read_config32)) return -EOPNOTSUPP; range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { int ret; u32 tmp; ret = trans->ops->read_config32(trans, addr + i, &tmp); if (ret < 0) return ret; *val++ = cpu_to_le32(tmp); } return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; u32 addr = le32_to_cpu(reg->addrs[idx]) + le32_to_cpu(reg->dev_addr.offset); range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(reg->dev_addr.size)); if (reg->sub_type == IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM && fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf) fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx, range->data, le32_to_cpu(reg->dev_addr.size)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, void *range_ptr, u32 range_len, int idx) { struct page *page = fwrt->fw_paging_db[idx].fw_paging_block; struct iwl_fw_ini_error_dump_range *range = range_ptr; dma_addr_t addr = fwrt->fw_paging_db[idx].fw_paging_phys; u32 page_size = fwrt->fw_paging_db[idx].fw_paging_size; range->page_num = cpu_to_le32(idx); range->range_data_size = cpu_to_le32(page_size); dma_sync_single_for_cpu(fwrt->trans->dev, addr, page_size, DMA_BIDIRECTIONAL); memcpy(range->data, page_address(page), page_size); dma_sync_single_for_device(fwrt->trans->dev, addr, page_size, DMA_BIDIRECTIONAL); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_error_dump_range *range; u32 page_size; /* all paged index start from 1 to skip CSS section */ idx++; if (!fwrt->trans->trans_cfg->gen2) return _iwl_dump_ini_paging_iter(fwrt, range_ptr, range_len, idx); range = range_ptr; page_size = fwrt->trans->init_dram.paging[idx].size; range->page_num = cpu_to_le32(idx); range->range_data_size = cpu_to_le32(page_size); memcpy(range->data, fwrt->trans->init_dram.paging[idx].block, page_size); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; struct iwl_dram_data *frag; u32 alloc_id = le32_to_cpu(reg->dram_alloc_id); frag = &fwrt->trans->dbg.fw_mon_ini[alloc_id].frags[idx]; range->dram_base_addr = cpu_to_le64(frag->physical); range->range_data_size = cpu_to_le32(frag->size); memcpy(range->data, frag->block, frag->size); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_mon_smem_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; u32 addr = le32_to_cpu(reg->internal_buffer.base_addr); range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->internal_buffer.size; iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(reg->internal_buffer.size)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data; struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; int txf_num = cfg->num_txfifo_entries; int int_txf_num = ARRAY_SIZE(cfg->internal_txfifo_size); u32 lmac_bitmap = le32_to_cpu(reg->fifos.fid[0]); if (!idx) { if (le32_to_cpu(reg->fifos.offset) && cfg->num_lmacs == 1) { IWL_ERR(fwrt, "WRT: Invalid lmac offset 0x%x\n", le32_to_cpu(reg->fifos.offset)); return false; } iter->internal_txf = 0; iter->fifo_size = 0; iter->fifo = -1; if (le32_to_cpu(reg->fifos.offset)) iter->lmac = 1; else iter->lmac = 0; } if (!iter->internal_txf) { for (iter->fifo++; iter->fifo < txf_num; iter->fifo++) { iter->fifo_size = cfg->lmac[iter->lmac].txfifo_size[iter->fifo]; if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo))) return true; } iter->fifo--; } iter->internal_txf = 1; if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) return false; for (iter->fifo++; iter->fifo < int_txf_num + txf_num; iter->fifo++) { iter->fifo_size = cfg->internal_txfifo_size[iter->fifo - txf_num]; if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo))) return true; } return false; } static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data; struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data; u32 offs = le32_to_cpu(reg->fifos.offset), addr; u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); u32 registers_size = registers_num * sizeof(*reg_dump); __le32 *data; int i; if (!iwl_ini_txf_iter(fwrt, reg_data, idx)) return -EIO; if (!iwl_trans_grab_nic_access(fwrt->trans)) return -EBUSY; range->fifo_hdr.fifo_num = cpu_to_le32(iter->fifo); range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num); range->range_data_size = cpu_to_le32(iter->fifo_size + registers_size); iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo); /* * read txf registers. for each register, write to the dump the * register address and its value */ for (i = 0; i < registers_num; i++) { addr = le32_to_cpu(reg->addrs[i]) + offs; reg_dump->addr = cpu_to_le32(addr); reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); reg_dump++; } if (reg->fifos.hdr_only) { range->range_data_size = cpu_to_le32(registers_size); goto out; } /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ iwl_write_prph_no_grab(fwrt->trans, TXF_READ_MODIFY_ADDR + offs, TXF_WR_PTR + offs); /* Dummy-read to advance the read pointer to the head */ iwl_read_prph_no_grab(fwrt->trans, TXF_READ_MODIFY_DATA + offs); /* Read FIFO */ addr = TXF_READ_MODIFY_DATA + offs; data = (void *)reg_dump; for (i = 0; i < iter->fifo_size; i += sizeof(*data)) *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf) fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx, reg_dump, iter->fifo_size); out: iwl_trans_release_nic_access(fwrt->trans); return sizeof(*range) + le32_to_cpu(range->range_data_size); } struct iwl_ini_rxf_data { u32 fifo_num; u32 size; u32 offset; }; static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, struct iwl_ini_rxf_data *data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 fid1 = le32_to_cpu(reg->fifos.fid[0]); u32 fid2 = le32_to_cpu(reg->fifos.fid[1]); u8 fifo_idx; if (!data) return; /* make sure only one bit is set in only one fid */ if (WARN_ONCE(hweight_long(fid1) + hweight_long(fid2) != 1, "fid1=%x, fid2=%x\n", fid1, fid2)) return; memset(data, 0, sizeof(*data)); if (fid1) { fifo_idx = ffs(fid1) - 1; if (WARN_ONCE(fifo_idx >= MAX_NUM_LMAC, "fifo_idx=%d\n", fifo_idx)) return; data->size = fwrt->smem_cfg.lmac[fifo_idx].rxfifo1_size; data->fifo_num = fifo_idx; } else { u8 max_idx; fifo_idx = ffs(fid2) - 1; if (iwl_fw_lookup_notif_ver(fwrt->fw, SYSTEM_GROUP, SHARED_MEM_CFG_CMD, 0) <= 3) max_idx = 0; else max_idx = 1; if (WARN_ONCE(fifo_idx > max_idx, "invalid umac fifo idx %d", fifo_idx)) return; /* use bit 31 to distinguish between umac and lmac rxf while * parsing the dump */ data->fifo_num = fifo_idx | IWL_RXF_UMAC_BIT; switch (fifo_idx) { case 0: data->size = fwrt->smem_cfg.rxfifo2_size; data->offset = iwl_umac_prph(fwrt->trans, RXF_DIFF_FROM_PREV); break; case 1: data->size = fwrt->smem_cfg.rxfifo2_control_size; data->offset = iwl_umac_prph(fwrt->trans, RXF2C_DIFF_FROM_PREV); break; } } } static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; struct iwl_ini_rxf_data rxf_data; struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data; u32 offs = le32_to_cpu(reg->fifos.offset), addr; u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); u32 registers_size = registers_num * sizeof(*reg_dump); __le32 *data; int i; +#if defined(__FreeBSD__) + rxf_data.size = 0; +#endif iwl_ini_get_rxf_data(fwrt, reg_data, &rxf_data); if (!rxf_data.size) return -EIO; if (!iwl_trans_grab_nic_access(fwrt->trans)) return -EBUSY; range->fifo_hdr.fifo_num = cpu_to_le32(rxf_data.fifo_num); range->fifo_hdr.num_of_registers = cpu_to_le32(registers_num); range->range_data_size = cpu_to_le32(rxf_data.size + registers_size); /* * read rxf registers. for each register, write to the dump the * register address and its value */ for (i = 0; i < registers_num; i++) { addr = le32_to_cpu(reg->addrs[i]) + offs; reg_dump->addr = cpu_to_le32(addr); reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); reg_dump++; } if (reg->fifos.hdr_only) { range->range_data_size = cpu_to_le32(registers_size); goto out; } offs = rxf_data.offset; /* Lock fence */ iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1); /* Set fence pointer to the same place like WR pointer */ iwl_write_prph_no_grab(fwrt->trans, RXF_LD_WR2FENCE + offs, 0x1); /* Set fence offset */ iwl_write_prph_no_grab(fwrt->trans, RXF_LD_FENCE_OFFSET_ADDR + offs, 0x0); /* Read FIFO */ addr = RXF_FIFO_RD_FENCE_INC + offs; data = (void *)reg_dump; for (i = 0; i < rxf_data.size; i += sizeof(*data)) *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); out: iwl_trans_release_nic_access(fwrt->trans); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_region_err_table *err_table = ®->err_table; struct iwl_fw_ini_error_dump_range *range = range_ptr; u32 addr = le32_to_cpu(err_table->base_addr) + le32_to_cpu(err_table->offset); range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = err_table->size; iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(err_table->size)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_region_special_device_memory *special_mem = ®->special_mem; struct iwl_fw_ini_error_dump_range *range = range_ptr; u32 addr = le32_to_cpu(special_mem->base_addr) + le32_to_cpu(special_mem->offset); range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = special_mem->size; iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(special_mem->size)); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; __le32 *val = range->data; u32 prph_data; int i; if (!iwl_trans_grab_nic_access(fwrt->trans)) return -EBUSY; range->range_data_size = reg->dev_addr.size; for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) { prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ? DBGI_SRAM_TARGET_ACCESS_RDATA_MSB : DBGI_SRAM_TARGET_ACCESS_RDATA_LSB); if (prph_data == 0x5a5a5a5a) { iwl_trans_release_nic_access(fwrt->trans); return -EBUSY; } *val++ = cpu_to_le32(prph_data); } iwl_trans_release_nic_access(fwrt->trans); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_error_dump_range *range = range_ptr; struct iwl_rx_packet *pkt = reg_data->dump_data->fw_pkt; u32 pkt_len; if (!pkt) return -EIO; pkt_len = iwl_rx_packet_payload_len(pkt); memcpy(&range->fw_pkt_hdr, &pkt->hdr, sizeof(range->fw_pkt_hdr)); range->range_data_size = cpu_to_le32(pkt_len); memcpy(range->data, pkt->data, pkt_len); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static int iwl_dump_ini_imr_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, u32 range_len, int idx) { /* read the IMR memory and DMA it to SRAM */ struct iwl_fw_ini_error_dump_range *range = range_ptr; u64 imr_curr_addr = fwrt->trans->dbg.imr_data.imr_curr_addr; u32 imr_rem_bytes = fwrt->trans->dbg.imr_data.imr2sram_remainbyte; u32 sram_addr = fwrt->trans->dbg.imr_data.sram_addr; u32 sram_size = fwrt->trans->dbg.imr_data.sram_size; u32 size_to_dump = (imr_rem_bytes > sram_size) ? sram_size : imr_rem_bytes; range->range_data_size = cpu_to_le32(size_to_dump); if (iwl_trans_write_imr_mem(fwrt->trans, sram_addr, imr_curr_addr, size_to_dump)) { IWL_ERR(fwrt, "WRT_DEBUG: IMR Memory transfer failed\n"); return -1; } fwrt->trans->dbg.imr_data.imr_curr_addr = imr_curr_addr + size_to_dump; fwrt->trans->dbg.imr_data.imr2sram_remainbyte -= size_to_dump; iwl_trans_read_mem_bytes(fwrt->trans, sram_addr, range->data, size_to_dump); return sizeof(*range) + le32_to_cpu(range->range_data_size); } static void * iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_error_dump *dump = data; dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); return dump->data; } /** * mask_apply_and_normalize - applies mask on val and normalize the result * * The normalization is based on the first set bit in the mask * * @val: value * @mask: mask to apply and to normalize with */ static u32 mask_apply_and_normalize(u32 val, u32 mask) { return (val & mask) >> (ffs(mask) - 1); } static __le32 iwl_get_mon_reg(struct iwl_fw_runtime *fwrt, u32 alloc_id, const struct iwl_fw_mon_reg *reg_info) { u32 val, offs; /* The header addresses of DBGCi is calculate as follows: * DBGC1 address + (0x100 * i) */ offs = (alloc_id - IWL_FW_INI_ALLOCATION_ID_DBGC1) * 0x100; if (!reg_info || !reg_info->addr || !reg_info->mask) return 0; val = iwl_read_prph_no_grab(fwrt->trans, reg_info->addr + offs); return cpu_to_le32(mask_apply_and_normalize(val, reg_info->mask)); } static void * iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, struct iwl_fw_ini_monitor_dump *data, const struct iwl_fw_mon_regs *addrs) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 alloc_id = le32_to_cpu(reg->dram_alloc_id); if (!iwl_trans_grab_nic_access(fwrt->trans)) { IWL_ERR(fwrt, "Failed to get monitor header\n"); return NULL; } data->write_ptr = iwl_get_mon_reg(fwrt, alloc_id, &addrs->write_ptr); if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { u32 wrt_ptr = le32_to_cpu(data->write_ptr); data->write_ptr = cpu_to_le32(wrt_ptr >> 2); } data->cycle_cnt = iwl_get_mon_reg(fwrt, alloc_id, &addrs->cycle_cnt); data->cur_frag = iwl_get_mon_reg(fwrt, alloc_id, &addrs->cur_frag); iwl_trans_release_nic_access(fwrt->trans); data->header.version = cpu_to_le32(IWL_INI_DUMP_VER); return data->data; } static void * iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, &fwrt->trans->cfg->mon_dram_regs); } static void * iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, &fwrt->trans->cfg->mon_smem_regs); } static void * iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, &fwrt->trans->cfg->mon_dbgi_regs); } static void * iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_err_table_dump *dump = data; dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); dump->version = reg->err_table.version; return dump->data; } static void * iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_special_device_memory *dump = data; dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); dump->type = reg->special_mem.type; dump->version = reg->special_mem.version; return dump->data; } static void * iwl_dump_ini_imr_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len) { struct iwl_fw_ini_error_dump *dump = data; dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); return dump->data; } static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; return iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); } static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { if (fwrt->trans->trans_cfg->gen2) { if (fwrt->trans->init_dram.paging_cnt) return fwrt->trans->init_dram.paging_cnt - 1; else return 0; } return fwrt->num_of_paging_blk; } static u32 iwl_dump_ini_mon_dram_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_mon *fw_mon; u32 ranges = 0, alloc_id = le32_to_cpu(reg->dram_alloc_id); int i; fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id]; for (i = 0; i < fw_mon->num_frags; i++) { if (!fw_mon->frags[i].size) break; ranges++; } return ranges; } static u32 iwl_dump_ini_txf_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { u32 num_of_fifos = 0; while (iwl_ini_txf_iter(fwrt, reg_data, num_of_fifos)) num_of_fifos++; return num_of_fifos; } static u32 iwl_dump_ini_single_range(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { return 1; } static u32 iwl_dump_ini_imr_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { /* range is total number of pages need to copied from *IMR memory to SRAM and later from SRAM to DRAM */ u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable; u32 imr_size = fwrt->trans->dbg.imr_data.imr_size; u32 sram_size = fwrt->trans->dbg.imr_data.sram_size; if (imr_enable == 0 || imr_size == 0 || sram_size == 0) { IWL_DEBUG_INFO(fwrt, "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n", imr_enable, imr_size, sram_size); return 0; } return((imr_size % sram_size) ? (imr_size / sram_size + 1) : (imr_size / sram_size)); } static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 size = le32_to_cpu(reg->dev_addr.size); u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data); if (!size || !ranges) return 0; return sizeof(struct iwl_fw_ini_error_dump) + ranges * (size + sizeof(struct iwl_fw_ini_error_dump_range)); } static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { int i; u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range); u32 size = sizeof(struct iwl_fw_ini_error_dump); /* start from 1 to skip CSS section */ for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg_data); i++) { size += range_header_len; if (fwrt->trans->trans_cfg->gen2) size += fwrt->trans->init_dram.paging[i].size; else size += fwrt->fw_paging_db[i].fw_paging_size; } return size; } static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_mon *fw_mon; u32 size = 0, alloc_id = le32_to_cpu(reg->dram_alloc_id); int i; fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id]; for (i = 0; i < fw_mon->num_frags; i++) { struct iwl_dram_data *frag = &fw_mon->frags[i]; if (!frag->size) break; size += sizeof(struct iwl_fw_ini_error_dump_range) + frag->size; } if (size) size += sizeof(struct iwl_fw_ini_monitor_dump); return size; } static u32 iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 size; size = le32_to_cpu(reg->internal_buffer.size); if (!size) return 0; size += sizeof(struct iwl_fw_ini_monitor_dump) + sizeof(struct iwl_fw_ini_error_dump_range); return size; } static u32 iwl_dump_ini_mon_dbgi_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 size = le32_to_cpu(reg->dev_addr.size); u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data); if (!size || !ranges) return 0; return sizeof(struct iwl_fw_ini_monitor_dump) + ranges * (size + sizeof(struct iwl_fw_ini_error_dump_range)); } static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_txf_iter_data *iter = &fwrt->dump.txf_iter_data; u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); u32 size = 0; u32 fifo_hdr = sizeof(struct iwl_fw_ini_error_dump_range) + registers_num * sizeof(struct iwl_fw_ini_error_dump_register); while (iwl_ini_txf_iter(fwrt, reg_data, size)) { size += fifo_hdr; if (!reg->fifos.hdr_only) size += iter->fifo_size; } if (!size) return 0; return size + sizeof(struct iwl_fw_ini_error_dump); } static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_ini_rxf_data rx_data; u32 registers_num = iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs); u32 size = sizeof(struct iwl_fw_ini_error_dump) + sizeof(struct iwl_fw_ini_error_dump_range) + registers_num * sizeof(struct iwl_fw_ini_error_dump_register); if (reg->fifos.hdr_only) return size; +#if defined(__FreeBSD__) + rx_data.size = 0; +#endif iwl_ini_get_rxf_data(fwrt, reg_data, &rx_data); size += rx_data.size; return size; } static u32 iwl_dump_ini_err_table_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 size = le32_to_cpu(reg->err_table.size); if (size) size += sizeof(struct iwl_fw_ini_err_table_dump) + sizeof(struct iwl_fw_ini_error_dump_range); return size; } static u32 iwl_dump_ini_special_mem_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; u32 size = le32_to_cpu(reg->special_mem.size); if (size) size += sizeof(struct iwl_fw_ini_special_device_memory) + sizeof(struct iwl_fw_ini_error_dump_range); return size; } static u32 iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { u32 size = 0; if (!reg_data->dump_data->fw_pkt) return 0; size += iwl_rx_packet_payload_len(reg_data->dump_data->fw_pkt); if (size) size += sizeof(struct iwl_fw_ini_error_dump) + sizeof(struct iwl_fw_ini_error_dump_range); return size; } static u32 iwl_dump_ini_imr_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { u32 size = 0; u32 ranges = 0; u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable; u32 imr_size = fwrt->trans->dbg.imr_data.imr_size; u32 sram_size = fwrt->trans->dbg.imr_data.sram_size; if (imr_enable == 0 || imr_size == 0 || sram_size == 0) { IWL_DEBUG_INFO(fwrt, "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n", imr_enable, imr_size, sram_size); return size; } size = imr_size; ranges = iwl_dump_ini_imr_ranges(fwrt, reg_data); if (!size && !ranges) { IWL_ERR(fwrt, "WRT: imr_size :=%d, ranges :=%d\n", size, ranges); return 0; } size += sizeof(struct iwl_fw_ini_error_dump) + ranges * sizeof(struct iwl_fw_ini_error_dump_range); return size; } /** * struct iwl_dump_ini_mem_ops - ini memory dump operations * @get_num_of_ranges: returns the number of memory ranges in the region. * @get_size: returns the total size of the region. * @fill_mem_hdr: fills region type specific headers and returns pointer to * the first range or NULL if failed to fill headers. * @fill_range: copies a given memory range into the dump. * Returns the size of the range or negative error value otherwise. */ struct iwl_dump_ini_mem_ops { u32 (*get_num_of_ranges)(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data); u32 (*get_size)(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data); void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *data, u32 data_len); int (*fill_range)(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range, u32 range_len, int idx); }; /** * iwl_dump_ini_mem * * Creates a dump tlv and copy a memory region into it. * Returns the size of the current dump tlv or 0 if failed * * @fwrt: fw runtime struct * @list: list to add the dump tlv to * @reg_data: memory region * @ops: memory dump operations */ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list, struct iwl_dump_ini_region_data *reg_data, const struct iwl_dump_ini_mem_ops *ops) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_dump_entry *entry; struct iwl_fw_ini_error_dump_data *tlv; struct iwl_fw_ini_error_dump_header *header; u32 type = reg->type; u32 id = le32_get_bits(reg->id, IWL_FW_INI_REGION_ID_MASK); u32 num_of_ranges, i, size; u8 *range; u32 free_size; u64 header_size; u32 dump_policy = IWL_FW_INI_DUMP_VERBOSE; IWL_DEBUG_FW(fwrt, "WRT: Collecting region: dump type=%d, id=%d, type=%d\n", dump_policy, id, type); if (le32_to_cpu(reg->hdr.version) >= 2) { u32 dp = le32_get_bits(reg->id, IWL_FW_INI_REGION_DUMP_POLICY_MASK); if (dump_policy == IWL_FW_INI_DUMP_VERBOSE && !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT)) { IWL_DEBUG_FW(fwrt, "WRT: no dump - type %d and policy mismatch=%d\n", dump_policy, dp); return 0; } else if (dump_policy == IWL_FW_INI_DUMP_MEDIUM && !(dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB)) { IWL_DEBUG_FW(fwrt, "WRT: no dump - type %d and policy mismatch=%d\n", dump_policy, dp); return 0; } else if (dump_policy == IWL_FW_INI_DUMP_BRIEF && !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB)) { IWL_DEBUG_FW(fwrt, "WRT: no dump - type %d and policy mismatch=%d\n", dump_policy, dp); return 0; } } if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr || !ops->fill_range) { IWL_DEBUG_FW(fwrt, "WRT: no ops for collecting data\n"); return 0; } size = ops->get_size(fwrt, reg_data); if (size < sizeof(*header)) { IWL_DEBUG_FW(fwrt, "WRT: size didn't include space for header\n"); return 0; } entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + size); if (!entry) return 0; entry->size = sizeof(*tlv) + size; tlv = (void *)entry->data; tlv->type = reg->type; tlv->sub_type = reg->sub_type; tlv->sub_type_ver = reg->sub_type_ver; tlv->reserved = reg->reserved; tlv->len = cpu_to_le32(size); num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data); header = (void *)tlv->data; header->region_id = cpu_to_le32(id); header->num_of_ranges = cpu_to_le32(num_of_ranges); header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME); memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME); free_size = size; range = ops->fill_mem_hdr(fwrt, reg_data, header, free_size); if (!range) { IWL_ERR(fwrt, "WRT: Failed to fill region header: id=%d, type=%d\n", id, type); goto out_err; } header_size = range - (u8 *)header; if (WARN(header_size > free_size, #if defined(__linux__) "header size %llu > free_size %d", header_size, free_size)) { #elif defined(__FreeBSD__) "header size %ju > free_size %d", (uintmax_t)header_size, free_size)) { #endif IWL_ERR(fwrt, "WRT: fill_mem_hdr used more than given free_size\n"); goto out_err; } free_size -= header_size; for (i = 0; i < num_of_ranges; i++) { int range_size = ops->fill_range(fwrt, reg_data, range, free_size, i); if (range_size < 0) { IWL_ERR(fwrt, "WRT: Failed to dump region: id=%d, type=%d\n", id, type); goto out_err; } if (WARN(range_size > free_size, "range_size %d > free_size %d", range_size, free_size)) { IWL_ERR(fwrt, "WRT: fill_raged used more than given free_size\n"); goto out_err; } free_size -= range_size; range = range + range_size; } list_add_tail(&entry->list, list); return entry->size; out_err: vfree(entry); return 0; } static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger_tlv *trigger, struct list_head *list) { struct iwl_fw_ini_dump_entry *entry; struct iwl_fw_error_dump_data *tlv; struct iwl_fw_ini_dump_info *dump; struct iwl_dbg_tlv_node *node; struct iwl_fw_ini_dump_cfg_name *cfg_name; u32 size = sizeof(*tlv) + sizeof(*dump); u32 num_of_cfg_names = 0; u32 hw_type; list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) { size += sizeof(*cfg_name); num_of_cfg_names++; } entry = vzalloc(sizeof(*entry) + size); if (!entry) return 0; entry->size = size; tlv = (void *)entry->data; tlv->type = cpu_to_le32(IWL_INI_DUMP_INFO_TYPE); tlv->len = cpu_to_le32(size - sizeof(*tlv)); dump = (void *)tlv->data; dump->version = cpu_to_le32(IWL_INI_DUMP_VER); dump->time_point = trigger->time_point; dump->trigger_reason = trigger->trigger_reason; dump->external_cfg_state = cpu_to_le32(fwrt->trans->dbg.external_ini_cfg); dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type); dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype); dump->hw_step = cpu_to_le32(fwrt->trans->hw_rev_step); /* * Several HWs all have type == 0x42, so we'll override this value * according to the detected HW */ hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev); if (hw_type == IWL_AX210_HW_TYPE) { u32 prph_val = iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR); u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT); u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT); u32 masked_bits = is_jacket | (is_cdb << 1); /* * The HW type depends on certain bits in this case, so add * these bits to the HW type. We won't have collisions since we * add these bits after the highest possible bit in the mask. */ hw_type |= masked_bits << IWL_AX210_HW_TYPE_ADDITION_SHIFT; } dump->hw_type = cpu_to_le32(hw_type); dump->rf_id_flavor = cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt->trans->hw_rf_id)); dump->rf_id_dash = cpu_to_le32(CSR_HW_RFID_DASH(fwrt->trans->hw_rf_id)); dump->rf_id_step = cpu_to_le32(CSR_HW_RFID_STEP(fwrt->trans->hw_rf_id)); dump->rf_id_type = cpu_to_le32(CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)); dump->lmac_major = cpu_to_le32(fwrt->dump.fw_ver.lmac_major); dump->lmac_minor = cpu_to_le32(fwrt->dump.fw_ver.lmac_minor); dump->umac_major = cpu_to_le32(fwrt->dump.fw_ver.umac_major); dump->umac_minor = cpu_to_le32(fwrt->dump.fw_ver.umac_minor); dump->fw_mon_mode = cpu_to_le32(fwrt->trans->dbg.ini_dest); dump->regions_mask = trigger->regions_mask & ~cpu_to_le64(fwrt->trans->dbg.unsupported_region_msk); dump->build_tag_len = cpu_to_le32(sizeof(dump->build_tag)); memcpy(dump->build_tag, fwrt->fw->human_readable, sizeof(dump->build_tag)); cfg_name = dump->cfg_names; dump->num_of_cfg_names = cpu_to_le32(num_of_cfg_names); list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) { struct iwl_fw_ini_debug_info_tlv *debug_info = (void *)node->tlv.data; cfg_name->image_type = debug_info->image_type; cfg_name->cfg_name_len = cpu_to_le32(IWL_FW_INI_MAX_CFG_NAME); memcpy(cfg_name->cfg_name, debug_info->debug_cfg_name, sizeof(cfg_name->cfg_name)); cfg_name++; } /* add dump info TLV to the beginning of the list since it needs to be * the first TLV in the dump */ list_add(&entry->list, list); return entry->size; } static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = { [IWL_FW_INI_REGION_INVALID] = {}, [IWL_FW_INI_REGION_INTERNAL_BUFFER] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_mon_smem_get_size, .fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header, .fill_range = iwl_dump_ini_mon_smem_iter, }, [IWL_FW_INI_REGION_DRAM_BUFFER] = { .get_num_of_ranges = iwl_dump_ini_mon_dram_ranges, .get_size = iwl_dump_ini_mon_dram_get_size, .fill_mem_hdr = iwl_dump_ini_mon_dram_fill_header, .fill_range = iwl_dump_ini_mon_dram_iter, }, [IWL_FW_INI_REGION_TXF] = { .get_num_of_ranges = iwl_dump_ini_txf_ranges, .get_size = iwl_dump_ini_txf_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_txf_iter, }, [IWL_FW_INI_REGION_RXF] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_rxf_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_rxf_iter, }, [IWL_FW_INI_REGION_LMAC_ERROR_TABLE] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_err_table_get_size, .fill_mem_hdr = iwl_dump_ini_err_table_fill_header, .fill_range = iwl_dump_ini_err_table_iter, }, [IWL_FW_INI_REGION_UMAC_ERROR_TABLE] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_err_table_get_size, .fill_mem_hdr = iwl_dump_ini_err_table_fill_header, .fill_range = iwl_dump_ini_err_table_iter, }, [IWL_FW_INI_REGION_RSP_OR_NOTIF] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_fw_pkt_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_fw_pkt_iter, }, [IWL_FW_INI_REGION_DEVICE_MEMORY] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_dev_mem_iter, }, [IWL_FW_INI_REGION_PERIPHERY_MAC] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_prph_mac_iter, }, [IWL_FW_INI_REGION_PERIPHERY_PHY] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_prph_phy_iter, }, [IWL_FW_INI_REGION_PERIPHERY_AUX] = {}, [IWL_FW_INI_REGION_PAGING] = { .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .get_num_of_ranges = iwl_dump_ini_paging_ranges, .get_size = iwl_dump_ini_paging_get_size, .fill_range = iwl_dump_ini_paging_iter, }, [IWL_FW_INI_REGION_CSR] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_csr_iter, }, [IWL_FW_INI_REGION_DRAM_IMR] = { .get_num_of_ranges = iwl_dump_ini_imr_ranges, .get_size = iwl_dump_ini_imr_get_size, .fill_mem_hdr = iwl_dump_ini_imr_fill_header, .fill_range = iwl_dump_ini_imr_iter, }, [IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_config_iter, }, [IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY] = { .get_num_of_ranges = iwl_dump_ini_single_range, .get_size = iwl_dump_ini_special_mem_get_size, .fill_mem_hdr = iwl_dump_ini_special_mem_fill_header, .fill_range = iwl_dump_ini_special_mem_iter, }, [IWL_FW_INI_REGION_DBGI_SRAM] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mon_dbgi_get_size, .fill_mem_hdr = iwl_dump_ini_mon_dbgi_fill_header, .fill_range = iwl_dump_ini_dbgi_sram_iter, }, }; static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data, struct list_head *list) { struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig; enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trigger->time_point); struct iwl_dump_ini_region_data reg_data = { .dump_data = dump_data, }; struct iwl_dump_ini_region_data imr_reg_data = { .dump_data = dump_data, }; int i; u32 size = 0; u64 regions_mask = le64_to_cpu(trigger->regions_mask) & ~(fwrt->trans->dbg.unsupported_region_msk); BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask)); BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) < ARRAY_SIZE(fwrt->trans->dbg.active_regions)); for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions); i++) { u32 reg_type; struct iwl_fw_ini_region_tlv *reg; if (!(BIT_ULL(i) & regions_mask)) continue; reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i]; if (!reg_data.reg_tlv) { IWL_WARN(fwrt, "WRT: Unassigned region id %d, skipping\n", i); continue; } reg = (void *)reg_data.reg_tlv->data; reg_type = reg->type; if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops)) continue; if (reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY && tp_id != IWL_FW_INI_TIME_POINT_FW_ASSERT) { IWL_WARN(fwrt, "WRT: trying to collect phy prph at time point: %d, skipping\n", tp_id); continue; } /* * DRAM_IMR can be collected only for FW/HW error timepoint * when fw is not alive. In addition, it must be collected * lastly as it overwrites SRAM that can possibly contain * debug data which also need to be collected. */ if (reg_type == IWL_FW_INI_REGION_DRAM_IMR) { if (tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT || tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR) imr_reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i]; else IWL_INFO(fwrt, "WRT: trying to collect DRAM_IMR at time point: %d, skipping\n", tp_id); /* continue to next region */ continue; } size += iwl_dump_ini_mem(fwrt, list, ®_data, &iwl_dump_ini_region_ops[reg_type]); } /* collect DRAM_IMR region in the last */ if (imr_reg_data.reg_tlv) size += iwl_dump_ini_mem(fwrt, list, ®_data, &iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]); if (size) size += iwl_dump_ini_info(fwrt, trigger, list); return size; } static bool iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger_tlv *trig) { enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point); u32 usec = le32_to_cpu(trig->ignore_consec); if (!iwl_trans_dbg_ini_valid(fwrt->trans) || tp_id == IWL_FW_INI_TIME_POINT_INVALID || tp_id >= IWL_FW_INI_TIME_POINT_NUM || iwl_fw_dbg_no_trig_window(fwrt, tp_id, usec)) return false; return true; } static u32 iwl_dump_ini_file_gen(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data, struct list_head *list) { struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig; struct iwl_fw_ini_dump_entry *entry; struct iwl_fw_ini_dump_file_hdr *hdr; u32 size; if (!trigger || !iwl_fw_ini_trigger_on(fwrt, trigger) || !le64_to_cpu(trigger->regions_mask)) return 0; entry = vzalloc(sizeof(*entry) + sizeof(*hdr)); if (!entry) return 0; entry->size = sizeof(*hdr); size = iwl_dump_ini_trigger(fwrt, dump_data, list); if (!size) { vfree(entry); return 0; } hdr = (void *)entry->data; hdr->barker = cpu_to_le32(IWL_FW_INI_ERROR_DUMP_BARKER); hdr->file_len = cpu_to_le32(size + entry->size); list_add(&entry->list, list); return le32_to_cpu(hdr->file_len); } static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dump_desc *desc) { if (desc && desc != &iwl_dump_desc_assert) kfree(desc); fwrt->dump.lmac_err_id[0] = 0; if (fwrt->smem_cfg.num_lmacs > 1) fwrt->dump.lmac_err_id[1] = 0; fwrt->dump.umac_err_id = 0; } static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data) { struct iwl_fw_dump_ptrs fw_error_dump = {}; struct iwl_fw_error_dump_file *dump_file; struct scatterlist *sg_dump_data; u32 file_len; u32 dump_mask = fwrt->fw->dbg.dump_mask; dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump, dump_data); if (!dump_file) return; if (dump_data->monitor_only) dump_mask &= BIT(IWL_FW_ERROR_DUMP_FW_MONITOR); fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask, fwrt->sanitize_ops, fwrt->sanitize_ctx); file_len = le32_to_cpu(dump_file->file_len); fw_error_dump.fwrt_len = file_len; if (fw_error_dump.trans_ptr) { file_len += fw_error_dump.trans_ptr->len; dump_file->file_len = cpu_to_le32(file_len); } sg_dump_data = alloc_sgtable(file_len); if (sg_dump_data) { sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), fw_error_dump.fwrt_ptr, fw_error_dump.fwrt_len, 0); if (fw_error_dump.trans_ptr) sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data), fw_error_dump.trans_ptr->data, fw_error_dump.trans_ptr->len, fw_error_dump.fwrt_len); dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, GFP_KERNEL); } vfree(fw_error_dump.fwrt_ptr); vfree(fw_error_dump.trans_ptr); } static void iwl_dump_ini_list_free(struct list_head *list) { while (!list_empty(list)) { struct iwl_fw_ini_dump_entry *entry = list_entry(list->next, typeof(*entry), list); list_del(&entry->list); vfree(entry); } } static void iwl_fw_error_dump_data_free(struct iwl_fwrt_dump_data *dump_data) { dump_data->trig = NULL; kfree(dump_data->fw_pkt); dump_data->fw_pkt = NULL; } static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data) { #if defined(__linux__) LIST_HEAD(dump_list); #elif defined(__FreeBSD__) LINUX_LIST_HEAD(dump_list); #endif struct scatterlist *sg_dump_data; u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list); if (!file_len) return; sg_dump_data = alloc_sgtable(file_len); if (sg_dump_data) { struct iwl_fw_ini_dump_entry *entry; int sg_entries = sg_nents(sg_dump_data); u32 offs = 0; list_for_each_entry(entry, &dump_list, list) { sg_pcopy_from_buffer(sg_dump_data, sg_entries, entry->data, entry->size, offs); offs += entry->size; } dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, GFP_KERNEL); } iwl_dump_ini_list_free(&dump_list); } const struct iwl_fw_dump_desc iwl_dump_desc_assert = { .trig_desc = { .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT), }, }; IWL_EXPORT_SYMBOL(iwl_dump_desc_assert); int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dump_desc *desc, bool monitor_only, unsigned int delay) { struct iwl_fwrt_wk_data *wk_data; unsigned long idx; if (iwl_trans_dbg_ini_valid(fwrt->trans)) { iwl_fw_free_dump_desc(fwrt, desc); return 0; } /* * Check there is an available worker. * ffz return value is undefined if no zero exists, * so check against ~0UL first. */ if (fwrt->dump.active_wks == ~0UL) return -EBUSY; idx = ffz(fwrt->dump.active_wks); if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM || test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks)) return -EBUSY; wk_data = &fwrt->dump.wks[idx]; if (WARN_ON(wk_data->dump_data.desc)) iwl_fw_free_dump_desc(fwrt, wk_data->dump_data.desc); wk_data->dump_data.desc = desc; wk_data->dump_data.monitor_only = monitor_only; IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", le32_to_cpu(desc->trig_desc.type)); schedule_delayed_work(&wk_data->wk, usecs_to_jiffies(delay)); return 0; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc); int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig_type) { if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) return -EIO; if (iwl_trans_dbg_ini_valid(fwrt->trans)) { if (trig_type != FW_DBG_TRIGGER_ALIVE_TIMEOUT && trig_type != FW_DBG_TRIGGER_DRIVER) return -EIO; iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT, NULL); } else { struct iwl_fw_dump_desc *iwl_dump_error_desc; int ret; iwl_dump_error_desc = kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL); if (!iwl_dump_error_desc) return -ENOMEM; iwl_dump_error_desc->trig_desc.type = cpu_to_le32(trig_type); iwl_dump_error_desc->len = 0; ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0); if (ret) { kfree(iwl_dump_error_desc); return ret; } } iwl_trans_sync_nmi(fwrt->trans); return 0; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_error_collect); int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig, const char *str, size_t len, struct iwl_fw_dbg_trigger_tlv *trigger) { struct iwl_fw_dump_desc *desc; unsigned int delay = 0; bool monitor_only = false; if (trigger) { u16 occurrences = le16_to_cpu(trigger->occurrences) - 1; if (!le16_to_cpu(trigger->occurrences)) return 0; if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) { IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", trig); iwl_force_nmi(fwrt->trans); return 0; } trigger->occurrences = cpu_to_le16(occurrences); monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY; /* convert msec to usec */ delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC; } desc = kzalloc(struct_size(desc, trig_desc.data, len), GFP_ATOMIC); if (!desc) return -ENOMEM; desc->len = len; desc->trig_desc.type = cpu_to_le32(trig); memcpy(desc->trig_desc.data, str, len); return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) { int ret, len = 0; char buf[64]; if (iwl_trans_dbg_ini_valid(fwrt->trans)) return 0; if (fmt) { va_list ap; buf[sizeof(buf) - 1] = '\0'; va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); /* check for truncation */ if (WARN_ON_ONCE(buf[sizeof(buf) - 1])) buf[sizeof(buf) - 1] = '\0'; len = strlen(buf) + 1; } ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len, trigger); if (ret) return ret; return 0; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig); int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id) { u8 *ptr; int ret; int i; if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv), "Invalid configuration %d\n", conf_id)) return -EINVAL; /* EARLY START - firmware's configuration is hard coded */ if ((!fwrt->fw->dbg.conf_tlv[conf_id] || !fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) && conf_id == FW_DBG_START_FROM_ALIVE) return 0; if (!fwrt->fw->dbg.conf_tlv[conf_id]) return -EINVAL; if (fwrt->dump.conf != FW_DBG_INVALID) IWL_INFO(fwrt, "FW already configured (%d) - re-configuring\n", fwrt->dump.conf); /* Send all HCMDs for configuring the FW debug */ ptr = (void *)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd; for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) { struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr; struct iwl_host_cmd hcmd = { .id = cmd->id, .len = { le16_to_cpu(cmd->len), }, .data = { cmd->data, }, }; ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); if (ret) return ret; ptr += sizeof(*cmd); ptr += le16_to_cpu(cmd->len); } fwrt->dump.conf = conf_id; return 0; } IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf); void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt, u32 timepoint, u32 timepoint_data) { struct iwl_dbg_dump_complete_cmd hcmd_data; struct iwl_host_cmd hcmd = { .id = WIDE_ID(DEBUG_GROUP, FW_DUMP_COMPLETE_CMD), .data[0] = &hcmd_data, .len[0] = sizeof(hcmd_data), }; if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) return; if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT)) { hcmd_data.tp = cpu_to_le32(timepoint); hcmd_data.tp_data = cpu_to_le32(timepoint_data); iwl_trans_send_cmd(fwrt->trans, &hcmd); } } /* this function assumes dump_start was called beforehand and dump_end will be * called afterwards */ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) { struct iwl_fw_dbg_params params = {0}; struct iwl_fwrt_dump_data *dump_data = &fwrt->dump.wks[wk_idx].dump_data; u32 policy; u32 time_point; if (!test_bit(wk_idx, &fwrt->dump.active_wks)) return; if (!dump_data->trig) { IWL_ERR(fwrt, "dump trigger data is not set\n"); goto out; } if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) { IWL_ERR(fwrt, "Device is not enabled - cannot dump error\n"); goto out; } /* there's no point in fw dump if the bus is dead */ if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) { IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n"); goto out; } iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, true); IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n"); if (iwl_trans_dbg_ini_valid(fwrt->trans)) iwl_fw_error_ini_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data); else iwl_fw_error_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data); IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection done\n"); iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, false); policy = le32_to_cpu(dump_data->trig->apply_policy); time_point = le32_to_cpu(dump_data->trig->time_point); if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) { IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n"); iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0); } if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) iwl_force_nmi(fwrt->trans); out: if (iwl_trans_dbg_ini_valid(fwrt->trans)) { iwl_fw_error_dump_data_free(dump_data); } else { iwl_fw_free_dump_desc(fwrt, dump_data->desc); dump_data->desc = NULL; } clear_bit(wk_idx, &fwrt->dump.active_wks); } int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data, bool sync) { struct iwl_fw_ini_trigger_tlv *trig = dump_data->trig; enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point); u32 occur, delay; unsigned long idx; if (!iwl_fw_ini_trigger_on(fwrt, trig)) { IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n", tp_id); return -EINVAL; } delay = le32_to_cpu(trig->dump_delay); occur = le32_to_cpu(trig->occurrences); if (!occur) return 0; trig->occurrences = cpu_to_le32(--occur); /* Check there is an available worker. * ffz return value is undefined if no zero exists, * so check against ~0UL first. */ if (fwrt->dump.active_wks == ~0UL) return -EBUSY; idx = ffz(fwrt->dump.active_wks); if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM || test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks)) return -EBUSY; fwrt->dump.wks[idx].dump_data = *dump_data; if (sync) delay = 0; IWL_WARN(fwrt, "WRT: Collecting data: ini trigger %d fired (delay=%dms).\n", tp_id, (u32)(delay / USEC_PER_MSEC)); if (sync) iwl_fw_dbg_collect_sync(fwrt, idx); else schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay)); return 0; } void iwl_fw_error_dump_wk(struct work_struct *work) { struct iwl_fwrt_wk_data *wks = container_of(work, typeof(*wks), wk.work); struct iwl_fw_runtime *fwrt = container_of(wks, typeof(*fwrt), dump.wks[wks->idx]); /* assumes the op mode mutex is locked in dump_start since * iwl_fw_dbg_collect_sync can't run in parallel */ if (fwrt->ops && fwrt->ops->dump_start) fwrt->ops->dump_start(fwrt->ops_ctx); iwl_fw_dbg_collect_sync(fwrt, wks->idx); if (fwrt->ops && fwrt->ops->dump_end) fwrt->ops->dump_end(fwrt->ops_ctx); } void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt) { const struct iwl_cfg *cfg = fwrt->trans->cfg; if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt)) return; if (!fwrt->dump.d3_debug_data) { fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length, GFP_KERNEL); if (!fwrt->dump.d3_debug_data) { IWL_ERR(fwrt, "failed to allocate memory for D3 debug data\n"); return; } } /* if the buffer holds previous debug data it is overwritten */ iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr, fwrt->dump.d3_debug_data, cfg->d3_debug_data_length); if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem) fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, cfg->d3_debug_data_base_addr, fwrt->dump.d3_debug_data, cfg->d3_debug_data_length); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data); void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime *fwrt) { int i; iwl_dbg_tlv_del_timers(fwrt->trans); for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) iwl_fw_dbg_collect_sync(fwrt, i); iwl_fw_dbg_stop_restart_recording(fwrt, NULL, true); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_sync); static int iwl_fw_dbg_suspend_resume_hcmd(struct iwl_trans *trans, bool suspend) { struct iwl_dbg_suspend_resume_cmd cmd = { .operation = suspend ? cpu_to_le32(DBGC_SUSPEND_CMD) : cpu_to_le32(DBGC_RESUME_CMD), }; struct iwl_host_cmd hcmd = { .id = WIDE_ID(DEBUG_GROUP, DBGC_SUSPEND_RESUME), .data[0] = &cmd, .len[0] = sizeof(cmd), }; return iwl_trans_send_cmd(trans, &hcmd); } static void iwl_fw_dbg_stop_recording(struct iwl_trans *trans, struct iwl_fw_dbg_params *params) { if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) { iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); return; } if (params) { params->in_sample = iwl_read_umac_prph(trans, DBGC_IN_SAMPLE); params->out_ctrl = iwl_read_umac_prph(trans, DBGC_OUT_CTRL); } iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, 0); /* wait for the DBGC to finish writing the internal buffer to DRAM to * avoid halting the HW while writing */ usleep_range(700, 1000); iwl_write_umac_prph(trans, DBGC_OUT_CTRL, 0); } static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans, struct iwl_fw_dbg_params *params) { if (!params) return -EIO; if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) { iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100); iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); } else { iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, params->in_sample); iwl_write_umac_prph(trans, DBGC_OUT_CTRL, params->out_ctrl); } return 0; } void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_params *params, bool stop) { int ret __maybe_unused = 0; if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) return; if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP)) ret = iwl_fw_dbg_suspend_resume_hcmd(fwrt->trans, stop); else if (stop) iwl_fw_dbg_stop_recording(fwrt->trans, params); else ret = iwl_fw_dbg_restart_recording(fwrt->trans, params); #ifdef CONFIG_IWLWIFI_DEBUGFS if (!ret) { if (stop) fwrt->trans->dbg.rec_on = false; else iwl_fw_set_dbg_rec_on(fwrt); } #endif } IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording); diff --git a/sys/contrib/dev/iwlwifi/fw/pnvm.c b/sys/contrib/dev/iwlwifi/fw/pnvm.c index b6d3ac6ed440..1ced9cb2836b 100644 --- a/sys/contrib/dev/iwlwifi/fw/pnvm.c +++ b/sys/contrib/dev/iwlwifi/fw/pnvm.c @@ -1,356 +1,359 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2020-2021 Intel Corporation */ #include "iwl-drv.h" #include "pnvm.h" #include "iwl-prph.h" #include "iwl-io.h" #include "fw/api/commands.h" #include "fw/api/nvm-reg.h" #include "fw/api/alive.h" #include "fw/uefi.h" struct iwl_pnvm_section { __le32 offset; const u8 data[]; } __packed; static bool iwl_pnvm_complete_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_trans *trans = (struct iwl_trans *)data; struct iwl_pnvm_init_complete_ntfy *pnvm_ntf = (void *)pkt->data; IWL_DEBUG_FW(trans, "PNVM complete notification received with status 0x%0x\n", le32_to_cpu(pnvm_ntf->status)); return true; } static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data, size_t len) { const struct iwl_ucode_tlv *tlv; u32 sha1 = 0; u16 mac_type = 0, rf_id = 0; u8 *pnvm_data = NULL, *tmp; bool hw_match = false; u32 size = 0; int ret; IWL_DEBUG_FW(trans, "Handling PNVM section\n"); while (len >= sizeof(*tlv)) { u32 tlv_len, tlv_type; len -= sizeof(*tlv); tlv = (const void *)data; tlv_len = le32_to_cpu(tlv->length); tlv_type = le32_to_cpu(tlv->type); if (len < tlv_len) { IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len); ret = -EINVAL; goto out; } data += sizeof(*tlv); switch (tlv_type) { case IWL_UCODE_TLV_PNVM_VERSION: if (tlv_len < sizeof(__le32)) { IWL_DEBUG_FW(trans, "Invalid size for IWL_UCODE_TLV_PNVM_VERSION (expected %zd, got %d)\n", sizeof(__le32), tlv_len); break; } sha1 = le32_to_cpup((const __le32 *)data); IWL_DEBUG_FW(trans, "Got IWL_UCODE_TLV_PNVM_VERSION %0x\n", sha1); break; case IWL_UCODE_TLV_HW_TYPE: if (tlv_len < 2 * sizeof(__le16)) { IWL_DEBUG_FW(trans, "Invalid size for IWL_UCODE_TLV_HW_TYPE (expected %zd, got %d)\n", 2 * sizeof(__le16), tlv_len); break; } if (hw_match) break; mac_type = le16_to_cpup((const __le16 *)data); rf_id = le16_to_cpup((const __le16 *)(data + sizeof(__le16))); IWL_DEBUG_FW(trans, "Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n", mac_type, rf_id); if (mac_type == CSR_HW_REV_TYPE(trans->hw_rev) && rf_id == CSR_HW_RFID_TYPE(trans->hw_rf_id)) hw_match = true; break; case IWL_UCODE_TLV_SEC_RT: { const struct iwl_pnvm_section *section = (const void *)data; u32 data_len = tlv_len - sizeof(*section); IWL_DEBUG_FW(trans, "Got IWL_UCODE_TLV_SEC_RT len %d\n", tlv_len); /* TODO: remove, this is a deprecated separator */ if (le32_to_cpup((const __le32 *)data) == 0xddddeeee) { IWL_DEBUG_FW(trans, "Ignoring separator.\n"); break; } IWL_DEBUG_FW(trans, "Adding data (size %d)\n", data_len); tmp = krealloc(pnvm_data, size + data_len, GFP_KERNEL); if (!tmp) { IWL_DEBUG_FW(trans, "Couldn't allocate (more) pnvm_data\n"); ret = -ENOMEM; goto out; } pnvm_data = tmp; memcpy(pnvm_data + size, section->data, data_len); size += data_len; break; } case IWL_UCODE_TLV_PNVM_SKU: IWL_DEBUG_FW(trans, "New PNVM section started, stop parsing.\n"); goto done; default: IWL_DEBUG_FW(trans, "Found TLV 0x%0x, len %d\n", tlv_type, tlv_len); break; } len -= ALIGN(tlv_len, 4); data += ALIGN(tlv_len, 4); } done: if (!hw_match) { IWL_DEBUG_FW(trans, "HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n", CSR_HW_REV_TYPE(trans->hw_rev), CSR_HW_RFID_TYPE(trans->hw_rf_id)); ret = -ENOENT; goto out; } if (!size) { IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n"); ret = -ENOENT; goto out; } IWL_INFO(trans, "loaded PNVM version %08x\n", sha1); ret = iwl_trans_set_pnvm(trans, pnvm_data, size); out: kfree(pnvm_data); return ret; } static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data, size_t len) { const struct iwl_ucode_tlv *tlv; IWL_DEBUG_FW(trans, "Parsing PNVM file\n"); while (len >= sizeof(*tlv)) { u32 tlv_len, tlv_type; len -= sizeof(*tlv); tlv = (const void *)data; tlv_len = le32_to_cpu(tlv->length); tlv_type = le32_to_cpu(tlv->type); if (len < tlv_len) { IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len); return -EINVAL; } if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) { const struct iwl_sku_id *sku_id = (const void *)(data + sizeof(*tlv)); IWL_DEBUG_FW(trans, "Got IWL_UCODE_TLV_PNVM_SKU len %d\n", tlv_len); IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n", le32_to_cpu(sku_id->data[0]), le32_to_cpu(sku_id->data[1]), le32_to_cpu(sku_id->data[2])); data += sizeof(*tlv) + ALIGN(tlv_len, 4); len -= ALIGN(tlv_len, 4); if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) && trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) && trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) { int ret; ret = iwl_pnvm_handle_section(trans, data, len); if (!ret) return 0; } else { IWL_DEBUG_FW(trans, "SKU ID didn't match!\n"); } } else { data += sizeof(*tlv) + ALIGN(tlv_len, 4); len -= ALIGN(tlv_len, 4); } } return -ENOENT; } static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len) { const struct firmware *pnvm; char pnvm_name[MAX_PNVM_NAME]; size_t new_len; int ret; iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name)); ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev); if (ret) { IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n", pnvm_name, ret); return ret; } new_len = pnvm->size; *data = kmemdup(pnvm->data, pnvm->size, GFP_KERNEL); release_firmware(pnvm); if (!*data) return -ENOMEM; *len = new_len; return 0; } int iwl_pnvm_load(struct iwl_trans *trans, struct iwl_notif_wait_data *notif_wait) { u8 *data; size_t len; struct pnvm_sku_package *package; struct iwl_notification_wait pnvm_wait; static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP, PNVM_INIT_COMPLETE_NTFY) }; int ret; /* if the SKU_ID is empty, there's nothing to do */ if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2]) return 0; /* * If we already loaded (or tried to load) it before, we just * need to set it again. */ if (trans->pnvm_loaded) { ret = iwl_trans_set_pnvm(trans, NULL, 0); if (ret) return ret; goto skip_parse; } /* First attempt to get the PNVM from BIOS */ package = iwl_uefi_get_pnvm(trans, &len); if (!IS_ERR_OR_NULL(package)) { if (len >= sizeof(*package)) { /* we need only the data */ len -= sizeof(*package); data = kmemdup(package->data, len, GFP_KERNEL); } else { data = NULL; } /* free package regardless of whether kmemdup succeeded */ kfree(package); if (data) goto parse; } /* If it's not available, try from the filesystem */ ret = iwl_pnvm_get_from_fs(trans, &data, &len); if (ret) { /* * Pretend we've loaded it - at least we've tried and * couldn't load it at all, so there's no point in * trying again over and over. */ trans->pnvm_loaded = true; goto skip_parse; } parse: iwl_pnvm_parse(trans, data, len); kfree(data); skip_parse: data = NULL; +#if defined(__FreeBSD__) + len = 0; +#endif /* now try to get the reduce power table, if not loaded yet */ if (!trans->reduce_power_loaded) { data = iwl_uefi_get_reduced_power(trans, &len); if (IS_ERR_OR_NULL(data)) { /* * Pretend we've loaded it - at least we've tried and * couldn't load it at all, so there's no point in * trying again over and over. */ trans->reduce_power_loaded = true; goto skip_reduce_power; } } ret = iwl_trans_set_reduce_power(trans, data, len); if (ret) IWL_DEBUG_FW(trans, "Failed to set reduce power table %d\n", ret); kfree(data); skip_reduce_power: iwl_init_notification_wait(notif_wait, &pnvm_wait, ntf_cmds, ARRAY_SIZE(ntf_cmds), iwl_pnvm_complete_fn, trans); /* kick the doorbell */ iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, UREG_DOORBELL_TO_ISR6_PNVM); return iwl_wait_notification(notif_wait, &pnvm_wait, MVM_UCODE_PNVM_TIMEOUT); } IWL_EXPORT_SYMBOL(iwl_pnvm_load); diff --git a/sys/contrib/dev/iwlwifi/mvm/ftm-initiator.c b/sys/contrib/dev/iwlwifi/mvm/ftm-initiator.c index 430044bc4755..66140af891d3 100644 --- a/sys/contrib/dev/iwlwifi/mvm/ftm-initiator.c +++ b/sys/contrib/dev/iwlwifi/mvm/ftm-initiator.c @@ -1,1367 +1,1371 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2021 Intel Corporation */ #include #include #include #include "mvm.h" #include "iwl-io.h" #include "iwl-prph.h" #include "constants.h" struct iwl_mvm_loc_entry { struct list_head list; u8 addr[ETH_ALEN]; u8 lci_len, civic_len; u8 buf[]; }; struct iwl_mvm_smooth_entry { struct list_head list; u8 addr[ETH_ALEN]; s64 rtt_avg; u64 host_time; }; struct iwl_mvm_ftm_pasn_entry { struct list_head list; u8 addr[ETH_ALEN]; u8 hltk[HLTK_11AZ_LEN]; u8 tk[TK_11AZ_LEN]; u8 cipher; u8 tx_pn[IEEE80211_CCMP_PN_LEN]; u8 rx_pn[IEEE80211_CCMP_PN_LEN]; }; int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr, u32 cipher, u8 *tk, u32 tk_len, u8 *hltk, u32 hltk_len) { struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn), GFP_KERNEL); u32 expected_tk_len; lockdep_assert_held(&mvm->mutex); if (!pasn) return -ENOBUFS; pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher); switch (pasn->cipher) { case IWL_LOCATION_CIPHER_CCMP_128: case IWL_LOCATION_CIPHER_GCMP_128: expected_tk_len = WLAN_KEY_LEN_CCMP; break; case IWL_LOCATION_CIPHER_GCMP_256: expected_tk_len = WLAN_KEY_LEN_GCMP_256; break; default: goto out; } /* * If associated to this AP and already have security context, * the TK is already configured for this station, so it * shouldn't be set again here. */ if (vif->bss_conf.assoc && !memcmp(addr, vif->bss_conf.bssid, ETH_ALEN)) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_sta *sta; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); if (!IS_ERR_OR_NULL(sta) && sta->mfp) expected_tk_len = 0; rcu_read_unlock(); } if (tk_len != expected_tk_len || hltk_len != sizeof(pasn->hltk)) { IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n", tk_len, hltk_len); goto out; } memcpy(pasn->addr, addr, sizeof(pasn->addr)); memcpy(pasn->hltk, hltk, sizeof(pasn->hltk)); if (tk && tk_len) memcpy(pasn->tk, tk, sizeof(pasn->tk)); list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list); return 0; out: kfree(pasn); return -EINVAL; } void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr) { struct iwl_mvm_ftm_pasn_entry *entry, *prev; lockdep_assert_held(&mvm->mutex); list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list, list) { if (memcmp(entry->addr, addr, sizeof(entry->addr))) continue; list_del(&entry->list); kfree(entry); return; } } static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm) { struct iwl_mvm_loc_entry *e, *t; mvm->ftm_initiator.req = NULL; mvm->ftm_initiator.req_wdev = NULL; memset(mvm->ftm_initiator.responses, 0, sizeof(mvm->ftm_initiator.responses)); list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) { list_del(&e->list); kfree(e); } } void iwl_mvm_ftm_restart(struct iwl_mvm *mvm) { struct cfg80211_pmsr_result result = { .status = NL80211_PMSR_STATUS_FAILURE, .final = 1, .host_time = ktime_get_boottime_ns(), .type = NL80211_PMSR_TYPE_FTM, }; int i; lockdep_assert_held(&mvm->mutex); if (!mvm->ftm_initiator.req) return; for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) { memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr, ETH_ALEN); result.ftm.burst_index = mvm->ftm_initiator.responses[i]; cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, mvm->ftm_initiator.req, &result, GFP_KERNEL); } cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, mvm->ftm_initiator.req, GFP_KERNEL); iwl_mvm_ftm_reset(mvm); } void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm) { INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp); IWL_DEBUG_INFO(mvm, "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n", IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH, IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA, IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ, IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT, IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT); } void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm) { struct iwl_mvm_smooth_entry *se, *st; list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp, list) { list_del(&se->list); kfree(se); } } static int iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s) { switch (s) { case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS: return 0; case IWL_TOF_RANGE_REQUEST_STATUS_BUSY: return -EBUSY; default: WARN_ON_ONCE(1); return -EIO; } } static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_tof_range_req_cmd_v5 *cmd, struct cfg80211_pmsr_request *req) { int i; cmd->request_id = req->cookie; cmd->num_of_ap = req->n_peers; /* use maximum for "no timeout" or bigger than what we can do */ if (!req->timeout || req->timeout > 255 * 100) cmd->req_timeout = 255; else cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100); /* * We treat it always as random, since if not we'll * have filled our local address there instead. */ cmd->macaddr_random = 1; memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); for (i = 0; i < ETH_ALEN; i++) cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; if (vif->bss_conf.assoc) memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); else eth_broadcast_addr(cmd->range_req_bssid); } static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm, struct ieee80211_vif *vif, +#if defined(__linux__) struct iwl_tof_range_req_cmd_v9 *cmd, +#elif defined(__FreeBSD__) + struct iwl_tof_range_req_cmd_v9 *cmd, /* XXX-BZ Probably better solved by a common struct in fw for top parts of the struct. */ +#endif struct cfg80211_pmsr_request *req) { int i; cmd->initiator_flags = cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM | IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT); cmd->request_id = req->cookie; cmd->num_of_ap = req->n_peers; /* * Use a large value for "no timeout". Don't use the maximum value * because of fw limitations. */ if (req->timeout) cmd->req_timeout_ms = cpu_to_le32(req->timeout); else cmd->req_timeout_ms = cpu_to_le32(0xfffff); memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); for (i = 0; i < ETH_ALEN; i++) cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; if (vif->bss_conf.assoc) { memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); /* AP's TSF is only relevant if associated */ for (i = 0; i < req->n_peers; i++) { if (req->peers[i].report_ap_tsf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); cmd->tsf_mac_id = cpu_to_le32(mvmvif->id); return; } } } else { eth_broadcast_addr(cmd->range_req_bssid); } /* Don't report AP's TSF */ cmd->tsf_mac_id = cpu_to_le32(0xff); } static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_tof_range_req_cmd_v8 *cmd, struct cfg80211_pmsr_request *req) { iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req); } static int iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, u8 *channel, u8 *bandwidth, u8 *ctrl_ch_position) { u32 freq = peer->chandef.chan->center_freq; *channel = ieee80211_frequency_to_channel(freq); switch (peer->chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: *bandwidth = IWL_TOF_BW_20_LEGACY; break; case NL80211_CHAN_WIDTH_20: *bandwidth = IWL_TOF_BW_20_HT; break; case NL80211_CHAN_WIDTH_40: *bandwidth = IWL_TOF_BW_40; break; case NL80211_CHAN_WIDTH_80: *bandwidth = IWL_TOF_BW_80; break; default: IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", peer->chandef.width); return -EINVAL; } *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; return 0; } static int iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, u8 *channel, u8 *format_bw, u8 *ctrl_ch_position) { u32 freq = peer->chandef.chan->center_freq; u8 cmd_ver; *channel = ieee80211_frequency_to_channel(freq); switch (peer->chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY; *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; break; case NL80211_CHAN_WIDTH_20: *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; break; case NL80211_CHAN_WIDTH_40: *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS; break; case NL80211_CHAN_WIDTH_80: *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT; *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS; break; case NL80211_CHAN_WIDTH_160: cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver >= 13) { *format_bw = IWL_LOCATION_FRAME_FORMAT_HE; *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS; break; } fallthrough; default: IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", peer->chandef.width); return -EINVAL; } /* non EDCA based measurement must use HE preamble */ if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) *format_bw |= IWL_LOCATION_FRAME_FORMAT_HE; *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; return 0; } static int iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, struct iwl_tof_range_req_ap_entry_v2 *target) { int ret; ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num, &target->bandwidth, &target->ctrl_ch_position); if (ret) return ret; memcpy(target->bssid, peer->addr, ETH_ALEN); target->burst_period = cpu_to_le16(peer->ftm.burst_period); target->samples_per_burst = peer->ftm.ftms_per_burst; target->num_of_bursts = peer->ftm.num_bursts_exp; target->measure_type = 0; /* regular two-sided FTM */ target->retries_per_sample = peer->ftm.ftmr_retries; target->asap_mode = peer->ftm.asap; target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK; if (peer->ftm.request_lci) target->location_req |= IWL_TOF_LOC_LCI; if (peer->ftm.request_civicloc) target->location_req |= IWL_TOF_LOC_CIVIC; target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO; return 0; } #define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \ cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag)) static void iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, struct iwl_tof_range_req_ap_entry_v6 *target) { memcpy(target->bssid, peer->addr, ETH_ALEN); target->burst_period = cpu_to_le16(peer->ftm.burst_period); target->samples_per_burst = peer->ftm.ftms_per_burst; target->num_of_bursts = peer->ftm.num_bursts_exp; target->ftmr_max_retries = peer->ftm.ftmr_retries; target->initiator_ap_flags = cpu_to_le32(0); if (peer->ftm.asap) FTM_PUT_FLAG(ASAP); if (peer->ftm.request_lci) FTM_PUT_FLAG(LCI_REQUEST); if (peer->ftm.request_civicloc) FTM_PUT_FLAG(CIVIC_REQUEST); if (IWL_MVM_FTM_INITIATOR_DYNACK) FTM_PUT_FLAG(DYN_ACK); if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG) FTM_PUT_FLAG(ALGO_LR); else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT) FTM_PUT_FLAG(ALGO_FFT); if (peer->ftm.trigger_based) FTM_PUT_FLAG(TB); else if (peer->ftm.non_trigger_based) FTM_PUT_FLAG(NON_TB); if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) && peer->ftm.lmr_feedback) FTM_PUT_FLAG(LMR_FEEDBACK); } static int iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, struct iwl_tof_range_req_ap_entry_v3 *target) { int ret; ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num, &target->bandwidth, &target->ctrl_ch_position); if (ret) return ret; /* * Versions 3 and 4 has some common fields, so * iwl_mvm_ftm_put_target_common() can be used for version 7 too. */ iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); return 0; } static int iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, struct iwl_tof_range_req_ap_entry_v4 *target) { int ret; ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, &target->format_bw, &target->ctrl_ch_position); if (ret) return ret; iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); return 0; } static int iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request_peer *peer, struct iwl_tof_range_req_ap_entry_v6 *target) { int ret; ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, &target->format_bw, &target->ctrl_ch_position); if (ret) return ret; iwl_mvm_ftm_put_target_common(mvm, peer, target); if (vif->bss_conf.assoc && !memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_sta *sta; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based)) FTM_PUT_FLAG(PMF); rcu_read_unlock(); target->sta_id = mvmvif->ap_sta_id; } else { target->sta_id = IWL_MVM_INVALID_STA; } /* * TODO: Beacon interval is currently unknown, so use the common value * of 100 TUs. */ target->beacon_interval = cpu_to_le16(100); return 0; } static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd) { u32 status; int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status); if (!err && status) { IWL_ERR(mvm, "FTM range request command failure, status: %u\n", status); err = iwl_ftm_range_request_status_to_err(status); } return err; } static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_req_cmd_v5 cmd_v5; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd_v5, .len[0] = sizeof(cmd_v5), }; u8 i; int err; iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req); for (i = 0; i < cmd_v5.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]); if (err) return err; } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_req_cmd_v7 cmd_v7; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd_v7, .len[0] = sizeof(cmd_v7), }; u8 i; int err; /* * Versions 7 and 8 has the same structure except from the responders * list, so iwl_mvm_ftm_cmd() can be used for version 7 too. */ iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req); for (i = 0; i < cmd_v7.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]); if (err) return err; } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_req_cmd_v8 cmd; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd, .len[0] = sizeof(cmd), }; u8 i; int err; iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req); for (i = 0; i < cmd.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]); if (err) return err; } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_req_cmd_v9 cmd; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd, .len[0] = sizeof(cmd), }; u8 i; int err; iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req); for (i = 0; i < cmd.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i]; err = iwl_mvm_ftm_put_target(mvm, vif, peer, target); if (err) return err; } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } static void iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data) { struct iwl_tof_range_req_ap_entry_v6 *target = data; if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN)) return; WARN_ON(!sta->mfp); if (WARN_ON(key->keylen > sizeof(target->tk))) return; memcpy(target->tk, key->key, key->keylen); target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher); WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID); } static void iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_tof_range_req_ap_entry_v7 *target) { struct iwl_mvm_ftm_pasn_entry *entry; u32 flags = le32_to_cpu(target->initiator_ap_flags); if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB | IWL_INITIATOR_AP_FLAGS_TB))) return; lockdep_assert_held(&mvm->mutex); list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { if (memcmp(entry->addr, target->bssid, sizeof(entry->addr))) continue; target->cipher = entry->cipher; memcpy(target->hltk, entry->hltk, sizeof(target->hltk)); if (vif->bss_conf.assoc && !memcmp(vif->bss_conf.bssid, target->bssid, sizeof(target->bssid))) ieee80211_iter_keys(mvm->hw, vif, iter, target); else memcpy(target->tk, entry->tk, sizeof(target->tk)); memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn)); memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn)); target->initiator_ap_flags |= cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED); return; } } static int iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request_peer *peer, struct iwl_tof_range_req_ap_entry_v7 *target) { int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target); if (err) return err; iwl_mvm_ftm_set_secured_ranging(mvm, vif, target); return err; } static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_req_cmd_v11 cmd; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd, .len[0] = sizeof(cmd), }; u8 i; int err; iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); for (i = 0; i < cmd.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i]; err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target); if (err) return err; } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } static void iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm, struct iwl_tof_range_req_ap_entry_v8 *target) { /* Only 2 STS are supported on Tx */ u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 : IWL_MVM_FTM_I2R_MAX_STS; target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS); target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | (i2r_max_sts << IWL_LOCATION_MAX_STS_POS); target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF; target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF; } static int iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request_peer *peer, struct iwl_tof_range_req_ap_entry_v8 *target) { u32 flags; int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target); if (ret) return ret; iwl_mvm_ftm_set_ndp_params(mvm, target); /* * If secure LTF is turned off, replace the flag with PMF only */ flags = le32_to_cpu(target->initiator_ap_flags); if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) && !IWL_MVM_FTM_INITIATOR_SECURE_LTF) { flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; flags |= IWL_INITIATOR_AP_FLAGS_PMF; target->initiator_ap_flags = cpu_to_le32(flags); } return 0; } static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_req_cmd_v12 cmd; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd, .len[0] = sizeof(cmd), }; u8 i; int err; iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); for (i = 0; i < cmd.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i]; err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target); if (err) return err; } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_req_cmd_v13 cmd; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd, .len[0] = sizeof(cmd), }; u8 i; int err; iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); for (i = 0; i < cmd.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i]; err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target); if (err) return err; if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) target->bss_color = peer->ftm.bss_color; if (peer->ftm.non_trigger_based) { target->min_time_between_msr = cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); target->burst_period = cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); } else { target->min_time_between_msr = cpu_to_le16(0); } target->band = iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band); } return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); int err; lockdep_assert_held(&mvm->mutex); if (mvm->ftm_initiator.req) return -EBUSY; if (new_api) { u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), IWL_FW_CMD_VER_UNKNOWN); switch (cmd_ver) { case 13: err = iwl_mvm_ftm_start_v13(mvm, vif, req); break; case 12: err = iwl_mvm_ftm_start_v12(mvm, vif, req); break; case 11: err = iwl_mvm_ftm_start_v11(mvm, vif, req); break; case 9: case 10: err = iwl_mvm_ftm_start_v9(mvm, vif, req); break; case 8: err = iwl_mvm_ftm_start_v8(mvm, vif, req); break; default: err = iwl_mvm_ftm_start_v7(mvm, vif, req); break; } } else { err = iwl_mvm_ftm_start_v5(mvm, vif, req); } if (!err) { mvm->ftm_initiator.req = req; mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif); } return err; } void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req) { struct iwl_tof_range_abort_cmd cmd = { .request_id = req->cookie, }; lockdep_assert_held(&mvm->mutex); if (req != mvm->ftm_initiator.req) return; iwl_mvm_ftm_reset(mvm); if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD), 0, sizeof(cmd), &cmd)) IWL_ERR(mvm, "failed to abort FTM process\n"); } static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req, const u8 *addr) { int i; for (i = 0; i < req->n_peers; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; if (ether_addr_equal_unaligned(peer->addr, addr)) return i; } return -ENOENT; } static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts) { u32 gp2_ts = le32_to_cpu(fw_gp2_ts); u32 curr_gp2, diff; u64 now_from_boot_ns; iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2, &now_from_boot_ns, NULL); if (curr_gp2 >= gp2_ts) diff = curr_gp2 - gp2_ts; else diff = curr_gp2 + (U32_MAX - gp2_ts + 1); return now_from_boot_ns - (u64)diff * 1000; } static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm, struct cfg80211_pmsr_result *res) { struct iwl_mvm_loc_entry *entry; list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) { if (!ether_addr_equal_unaligned(res->addr, entry->addr)) continue; if (entry->lci_len) { res->ftm.lci_len = entry->lci_len; res->ftm.lci = entry->buf; } if (entry->civic_len) { res->ftm.civicloc_len = entry->civic_len; res->ftm.civicloc = entry->buf + entry->lci_len; } /* we found the entry we needed */ break; } } static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id, u8 num_of_aps) { lockdep_assert_held(&mvm->mutex); if (request_id != (u8)mvm->ftm_initiator.req->cookie) { IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n", request_id, (u8)mvm->ftm_initiator.req->cookie); return -EINVAL; } if (num_of_aps > mvm->ftm_initiator.req->n_peers) { IWL_ERR(mvm, "FTM range response invalid\n"); return -EINVAL; } return 0; } static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm, struct cfg80211_pmsr_result *res) { struct iwl_mvm_smooth_entry *resp; s64 rtt_avg, rtt = res->ftm.rtt_avg; u32 undershoot, overshoot; u8 alpha; bool found; if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH) return; WARN_ON(rtt < 0); if (res->status != NL80211_PMSR_STATUS_SUCCESS) { IWL_DEBUG_INFO(mvm, ": %pM: ignore failed measurement. Status=%u\n", res->addr, res->status); return; } found = false; list_for_each_entry(resp, &mvm->ftm_initiator.smooth.resp, list) { if (!memcmp(res->addr, resp->addr, ETH_ALEN)) { found = true; break; } } if (!found) { resp = kzalloc(sizeof(*resp), GFP_KERNEL); if (!resp) return; memcpy(resp->addr, res->addr, ETH_ALEN); list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp); resp->rtt_avg = rtt; IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n", resp->addr, resp->rtt_avg); goto update_time; } if (res->host_time - resp->host_time > IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) { resp->rtt_avg = rtt; IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n", resp->addr, resp->rtt_avg); goto update_time; } /* Smooth the results based on the tracked RTT average */ undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT; overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT; alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA; rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100); IWL_DEBUG_INFO(mvm, "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n", resp->addr, resp->rtt_avg, rtt_avg, rtt); /* * update the responder's average RTT results regardless of * the under/over shoot logic below */ resp->rtt_avg = rtt_avg; /* smooth the results */ if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) { res->ftm.rtt_avg = rtt_avg; IWL_DEBUG_INFO(mvm, "undershoot: val=%lld\n", (rtt_avg - rtt)); } else if (rtt_avg < rtt && (rtt - rtt_avg) > overshoot) { res->ftm.rtt_avg = rtt_avg; IWL_DEBUG_INFO(mvm, "overshoot: val=%lld\n", (rtt - rtt_avg)); } update_time: resp->host_time = res->host_time; } static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, struct cfg80211_pmsr_result *res) { s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666); IWL_DEBUG_INFO(mvm, "entry %d\n", index); IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr); IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time); IWL_DEBUG_INFO(mvm, "\tburst index: %hhu\n", res->ftm.burst_index); IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes); IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg); IWL_DEBUG_INFO(mvm, "\trssi spread: %hhu\n", res->ftm.rssi_spread); IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg); IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance); IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread); IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg); } static void iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm, struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap) { struct iwl_mvm_ftm_pasn_entry *entry; lockdep_assert_held(&mvm->mutex); list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr))) continue; memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn)); memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn)); return; } } static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm) { if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ)) return 5; /* Starting from version 8, the FW advertises the version */ if (mvm->cmd_ver.range_resp >= 8) return mvm->cmd_ver.range_resp; else if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) return 7; /* The first version of the new range request API */ return 6; } static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len) { switch (ver) { case 9: case 8: return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8); case 7: return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7); case 6: return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6); case 5: return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5); default: WARN_ONCE(1, "FTM: unsupported range response version %u", ver); return false; } } void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data; struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data; struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data; struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data; int i; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); u8 num_of_aps, last_in_batch; u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm); lockdep_assert_held(&mvm->mutex); if (!mvm->ftm_initiator.req) { return; } if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len))) return; if (new_api) { if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id, fw_resp_v8->num_of_aps)) return; num_of_aps = fw_resp_v8->num_of_aps; last_in_batch = fw_resp_v8->last_report; } else { if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id, fw_resp_v5->num_of_aps)) return; num_of_aps = fw_resp_v5->num_of_aps; last_in_batch = fw_resp_v5->last_in_batch; } IWL_DEBUG_INFO(mvm, "Range response received\n"); IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %hhu\n", mvm->ftm_initiator.req->cookie, num_of_aps); for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) { struct cfg80211_pmsr_result result = {}; struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap; int peer_idx; if (new_api) { if (notif_ver >= 8) { fw_ap = &fw_resp_v8->ap[i]; iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap); } else if (notif_ver == 7) { fw_ap = (void *)&fw_resp_v7->ap[i]; } else { fw_ap = (void *)&fw_resp_v6->ap[i]; } result.final = fw_ap->last_burst; result.ap_tsf = le32_to_cpu(fw_ap->start_tsf); result.ap_tsf_valid = 1; } else { /* the first part is the same for old and new APIs */ fw_ap = (void *)&fw_resp_v5->ap[i]; /* * FIXME: the firmware needs to report this, we don't * even know the number of bursts the responder picked * (if we asked it to) */ result.final = 0; } peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req, fw_ap->bssid); if (peer_idx < 0) { IWL_WARN(mvm, "Unknown address (%pM, target #%d) in FTM response\n", fw_ap->bssid, i); continue; } switch (fw_ap->measure_status) { case IWL_TOF_ENTRY_SUCCESS: result.status = NL80211_PMSR_STATUS_SUCCESS; break; case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: result.status = NL80211_PMSR_STATUS_TIMEOUT; break; case IWL_TOF_ENTRY_NO_RESPONSE: result.status = NL80211_PMSR_STATUS_FAILURE; result.ftm.failure_reason = NL80211_PMSR_FTM_FAILURE_NO_RESPONSE; break; case IWL_TOF_ENTRY_REQUEST_REJECTED: result.status = NL80211_PMSR_STATUS_FAILURE; result.ftm.failure_reason = NL80211_PMSR_FTM_FAILURE_PEER_BUSY; result.ftm.busy_retry_time = fw_ap->refusal_period; break; default: result.status = NL80211_PMSR_STATUS_FAILURE; result.ftm.failure_reason = NL80211_PMSR_FTM_FAILURE_UNSPECIFIED; break; } memcpy(result.addr, fw_ap->bssid, ETH_ALEN); result.host_time = iwl_mvm_ftm_get_host_time(mvm, fw_ap->timestamp); result.type = NL80211_PMSR_TYPE_FTM; result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx]; mvm->ftm_initiator.responses[peer_idx]++; result.ftm.rssi_avg = fw_ap->rssi; result.ftm.rssi_avg_valid = 1; result.ftm.rssi_spread = fw_ap->rssi_spread; result.ftm.rssi_spread_valid = 1; result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt); result.ftm.rtt_avg_valid = 1; result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance); result.ftm.rtt_variance_valid = 1; result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread); result.ftm.rtt_spread_valid = 1; iwl_mvm_ftm_get_lci_civic(mvm, &result); iwl_mvm_ftm_rtt_smoothing(mvm, &result); cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, mvm->ftm_initiator.req, &result, GFP_KERNEL); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) IWL_DEBUG_INFO(mvm, "RTT confidence: %hhu\n", fw_ap->rttConfidence); iwl_mvm_debug_range_resp(mvm, i, &result); } if (last_in_batch) { cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, mvm->ftm_initiator.req, GFP_KERNEL); iwl_mvm_ftm_reset(mvm); } } void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); const struct ieee80211_mgmt *mgmt = (void *)pkt->data; size_t len = iwl_rx_packet_payload_len(pkt); struct iwl_mvm_loc_entry *entry; const u8 *ies, *lci, *civic, *msr_ie; size_t ies_len, lci_len = 0, civic_len = 0; size_t baselen = IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.ftm); static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI; static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC; if (len <= baselen) return; lockdep_assert_held(&mvm->mutex); ies = mgmt->u.action.u.ftm.variable; ies_len = len - baselen; msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, &rprt_type_lci, 1, 4); if (msr_ie) { lci = msr_ie + 2; lci_len = msr_ie[1]; } msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, &rprt_type_civic, 1, 4); if (msr_ie) { civic = msr_ie + 2; civic_len = msr_ie[1]; } entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL); if (!entry) return; memcpy(entry->addr, mgmt->bssid, ETH_ALEN); entry->lci_len = lci_len; if (lci_len) memcpy(entry->buf, lci, lci_len); entry->civic_len = civic_len; if (civic_len) memcpy(entry->buf + lci_len, civic, civic_len); list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list); } diff --git a/sys/contrib/dev/iwlwifi/mvm/ftm-responder.c b/sys/contrib/dev/iwlwifi/mvm/ftm-responder.c index 9729680476fd..4cb15046ec5c 100644 --- a/sys/contrib/dev/iwlwifi/mvm/ftm-responder.c +++ b/sys/contrib/dev/iwlwifi/mvm/ftm-responder.c @@ -1,479 +1,481 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2021 Intel Corporation */ #include #include #include "mvm.h" #include "constants.h" struct iwl_mvm_pasn_sta { struct list_head list; struct iwl_mvm_int_sta int_sta; u8 addr[ETH_ALEN]; }; struct iwl_mvm_pasn_hltk_data { u8 *addr; u8 cipher; u8 *hltk; }; static int iwl_mvm_ftm_responder_set_bw_v1(struct cfg80211_chan_def *chandef, u8 *bw, u8 *ctrl_ch_position) { switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: *bw = IWL_TOF_BW_20_LEGACY; break; case NL80211_CHAN_WIDTH_20: *bw = IWL_TOF_BW_20_HT; break; case NL80211_CHAN_WIDTH_40: *bw = IWL_TOF_BW_40; *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); break; case NL80211_CHAN_WIDTH_80: *bw = IWL_TOF_BW_80; *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); break; default: return -ENOTSUPP; } return 0; } static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef, u8 *format_bw, u8 *ctrl_ch_position, u8 cmd_ver) { switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY; *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; break; case NL80211_CHAN_WIDTH_20: *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; break; case NL80211_CHAN_WIDTH_40: *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS; *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); break; case NL80211_CHAN_WIDTH_80: *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT; *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS; *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); break; case NL80211_CHAN_WIDTH_160: if (cmd_ver >= 9) { *format_bw = IWL_LOCATION_FRAME_FORMAT_HE; *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS; *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); break; } fallthrough; default: return -ENOTSUPP; } return 0; } static void iwl_mvm_ftm_responder_set_ndp(struct iwl_mvm *mvm, struct iwl_tof_responder_config_cmd_v9 *cmd) { /* Up to 2 R2I STS are allowed on the responder */ u32 r2i_max_sts = IWL_MVM_FTM_R2I_MAX_STS < 2 ? IWL_MVM_FTM_R2I_MAX_STS : 1; cmd->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | (r2i_max_sts << IWL_RESPONDER_STS_POS) | (IWL_MVM_FTM_R2I_MAX_TOTAL_LTF << IWL_RESPONDER_TOTAL_LTF_POS); cmd->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | (IWL_MVM_FTM_I2R_MAX_STS << IWL_RESPONDER_STS_POS) | (IWL_MVM_FTM_I2R_MAX_TOTAL_LTF << IWL_RESPONDER_TOTAL_LTF_POS); cmd->cmd_valid_fields |= cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS); } static int iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_chan_def *chandef) { u32 cmd_id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_CONFIG_CMD); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); /* * The command structure is the same for versions 6, 7 and 8 (only the * field interpretation is different), so the same struct can be use * for all cases. */ struct iwl_tof_responder_config_cmd_v9 cmd = { .channel_num = chandef->chan->hw_value, .cmd_valid_fields = cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO | IWL_TOF_RESPONDER_CMD_VALID_BSSID | IWL_TOF_RESPONDER_CMD_VALID_STA_ID), .sta_id = mvmvif->bcast_sta.sta_id, }; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 6); int err; int cmd_size; lockdep_assert_held(&mvm->mutex); /* Use a default of bss_color=1 for now */ if (cmd_ver == 9) { cmd.cmd_valid_fields |= cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR | IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR); cmd.bss_color = 1; cmd.min_time_between_msr = cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); cmd.max_time_between_msr = cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); cmd_size = sizeof(struct iwl_tof_responder_config_cmd_v9); } else { /* All versions up to version 8 have the same size */ cmd_size = sizeof(struct iwl_tof_responder_config_cmd_v8); } if (cmd_ver >= 8) iwl_mvm_ftm_responder_set_ndp(mvm, &cmd); if (cmd_ver >= 7) err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw, &cmd.ctrl_ch_position, cmd_ver); else err = iwl_mvm_ftm_responder_set_bw_v1(chandef, &cmd.format_bw, &cmd.ctrl_ch_position); if (err) { IWL_ERR(mvm, "Failed to set responder bandwidth\n"); return err; } memcpy(cmd.bssid, vif->addr, ETH_ALEN); return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd); } static int iwl_mvm_ftm_responder_dyn_cfg_v2(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_ftm_responder_params *params) { struct iwl_tof_responder_dyn_config_cmd_v2 cmd = { .lci_len = cpu_to_le32(params->lci_len + 2), .civic_len = cpu_to_le32(params->civicloc_len + 2), }; u8 data[IWL_LCI_CIVIC_IE_MAX_SIZE] = {0}; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD), .data[0] = &cmd, .len[0] = sizeof(cmd), .data[1] = &data, /* .len[1] set later */ /* may not be able to DMA from stack */ .dataflags[1] = IWL_HCMD_DFL_DUP, }; u32 aligned_lci_len = ALIGN(params->lci_len + 2, 4); u32 aligned_civicloc_len = ALIGN(params->civicloc_len + 2, 4); u8 *pos = data; lockdep_assert_held(&mvm->mutex); if (aligned_lci_len + aligned_civicloc_len > sizeof(data)) { IWL_ERR(mvm, "LCI/civicloc data too big (%zd + %zd)\n", params->lci_len, params->civicloc_len); return -ENOBUFS; } pos[0] = WLAN_EID_MEASURE_REPORT; pos[1] = params->lci_len; memcpy(pos + 2, params->lci, params->lci_len); pos += aligned_lci_len; pos[0] = WLAN_EID_MEASURE_REPORT; pos[1] = params->civicloc_len; memcpy(pos + 2, params->civicloc, params->civicloc_len); hcmd.len[1] = aligned_lci_len + aligned_civicloc_len; return iwl_mvm_send_cmd(mvm, &hcmd); } static int iwl_mvm_ftm_responder_dyn_cfg_v3(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_ftm_responder_params *params, struct iwl_mvm_pasn_hltk_data *hltk_data) { struct iwl_tof_responder_dyn_config_cmd cmd; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD), .data[0] = &cmd, .len[0] = sizeof(cmd), /* may not be able to DMA from stack */ .dataflags[0] = IWL_HCMD_DFL_DUP, }; lockdep_assert_held(&mvm->mutex); cmd.valid_flags = 0; if (params) { if (params->lci_len + 2 > sizeof(cmd.lci_buf) || params->civicloc_len + 2 > sizeof(cmd.civic_buf)) { IWL_ERR(mvm, "LCI/civic data too big (lci=%zd, civic=%zd)\n", params->lci_len, params->civicloc_len); return -ENOBUFS; } cmd.lci_buf[0] = WLAN_EID_MEASURE_REPORT; cmd.lci_buf[1] = params->lci_len; memcpy(cmd.lci_buf + 2, params->lci, params->lci_len); cmd.lci_len = params->lci_len + 2; cmd.civic_buf[0] = WLAN_EID_MEASURE_REPORT; cmd.civic_buf[1] = params->civicloc_len; memcpy(cmd.civic_buf + 2, params->civicloc, params->civicloc_len); cmd.civic_len = params->civicloc_len + 2; cmd.valid_flags |= IWL_RESPONDER_DYN_CFG_VALID_LCI | IWL_RESPONDER_DYN_CFG_VALID_CIVIC; } if (hltk_data) { if (hltk_data->cipher > IWL_LOCATION_CIPHER_GCMP_256) { IWL_ERR(mvm, "invalid cipher: %u\n", hltk_data->cipher); return -EINVAL; } cmd.cipher = hltk_data->cipher; memcpy(cmd.addr, hltk_data->addr, sizeof(cmd.addr)); memcpy(cmd.hltk_buf, hltk_data->hltk, sizeof(cmd.hltk_buf)); cmd.valid_flags |= IWL_RESPONDER_DYN_CFG_VALID_PASN_STA; } return iwl_mvm_send_cmd(mvm, &hcmd); } static int iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_ftm_responder_params *params) { int ret; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD), 2); switch (cmd_ver) { case 2: ret = iwl_mvm_ftm_responder_dyn_cfg_v2(mvm, vif, params); break; case 3: ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif, params, NULL); break; default: IWL_ERR(mvm, "Unsupported DYN_CONFIG_CMD version %u\n", cmd_ver); ret = -ENOTSUPP; } return ret; } static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_pasn_sta *sta) { list_del(&sta->list); iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id); iwl_mvm_dealloc_int_sta(mvm, &sta->int_sta); kfree(sta); } +#if defined(__linux__) int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr, u32 cipher, u8 *tk, u32 tk_len, u8 *hltk, u32 hltk_len) { int ret; struct iwl_mvm_pasn_sta *sta = NULL; struct iwl_mvm_pasn_hltk_data hltk_data = { .addr = addr, .hltk = hltk, }; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD), 2); lockdep_assert_held(&mvm->mutex); if (cmd_ver < 3) { IWL_ERR(mvm, "Adding PASN station not supported by FW\n"); return -ENOTSUPP; } hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher); if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) { IWL_ERR(mvm, "invalid cipher: %u\n", cipher); return -EINVAL; } if (tk && tk_len) { sta = kzalloc(sizeof(*sta), GFP_KERNEL); if (!sta) return -ENOBUFS; ret = iwl_mvm_add_pasn_sta(mvm, vif, &sta->int_sta, addr, cipher, tk, tk_len); if (ret) { kfree(sta); return ret; } memcpy(sta->addr, addr, ETH_ALEN); list_add_tail(&sta->list, &mvm->resp_pasn_list); } ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif, NULL, &hltk_data); if (ret && sta) iwl_mvm_resp_del_pasn_sta(mvm, vif, sta); return ret; } int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr) { struct iwl_mvm_pasn_sta *sta, *prev; lockdep_assert_held(&mvm->mutex); list_for_each_entry_safe(sta, prev, &mvm->resp_pasn_list, list) { if (!memcmp(sta->addr, addr, ETH_ALEN)) { iwl_mvm_resp_del_pasn_sta(mvm, vif, sta); return 0; } } IWL_ERR(mvm, "FTM: PASN station %pM not found\n", addr); return -EINVAL; } +#endif int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_ftm_responder_params *params; struct ieee80211_chanctx_conf ctx, *pctx; u16 *phy_ctxt_id; struct iwl_mvm_phy_ctxt *phy_ctxt; int ret; params = vif->bss_conf.ftmr_params; lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(!vif->bss_conf.ftm_responder)) return -EINVAL; if (vif->p2p || vif->type != NL80211_IFTYPE_AP || !mvmvif->ap_ibss_active) { IWL_ERR(mvm, "Cannot start responder, not in AP mode\n"); return -EIO; } rcu_read_lock(); pctx = rcu_dereference(vif->chanctx_conf); /* Copy the ctx to unlock the rcu and send the phy ctxt. We don't care * about changes in the ctx after releasing the lock because the driver * is still protected by the mutex. */ ctx = *pctx; phy_ctxt_id = (u16 *)pctx->drv_priv; rcu_read_unlock(); phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def, ctx.rx_chains_static, ctx.rx_chains_dynamic); if (ret) return ret; ret = iwl_mvm_ftm_responder_cmd(mvm, vif, &ctx.def); if (ret) return ret; if (params) ret = iwl_mvm_ftm_responder_dyn_cfg_cmd(mvm, vif, params); return ret; } void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_pasn_sta *sta, *prev; lockdep_assert_held(&mvm->mutex); list_for_each_entry_safe(sta, prev, &mvm->resp_pasn_list, list) iwl_mvm_resp_del_pasn_sta(mvm, vif, sta); } void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { if (!vif->bss_conf.ftm_responder) return; iwl_mvm_ftm_responder_clear(mvm, vif); iwl_mvm_ftm_start_responder(mvm, vif); } void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_ftm_responder_stats *resp = (void *)pkt->data; struct cfg80211_ftm_responder_stats *stats = &mvm->ftm_resp_stats; u32 flags = le32_to_cpu(resp->flags); if (resp->success_ftm == resp->ftm_per_burst) stats->success_num++; else if (resp->success_ftm >= 2) stats->partial_num++; else stats->failed_num++; if ((flags & FTM_RESP_STAT_ASAP_REQ) && (flags & FTM_RESP_STAT_ASAP_RESP)) stats->asap_num++; if (flags & FTM_RESP_STAT_NON_ASAP_RESP) stats->non_asap_num++; stats->total_duration_ms += le32_to_cpu(resp->duration) / USEC_PER_MSEC; if (flags & FTM_RESP_STAT_TRIGGER_UNKNOWN) stats->unknown_triggers_num++; if (flags & FTM_RESP_STAT_DUP) stats->reschedule_requests_num++; if (flags & FTM_RESP_STAT_NON_ASAP_OUT_WIN) stats->out_of_window_triggers_num++; } diff --git a/sys/contrib/dev/iwlwifi/mvm/mvm.h b/sys/contrib/dev/iwlwifi/mvm/mvm.h index 73fd5dca45c4..7e905deab8ab 100644 --- a/sys/contrib/dev/iwlwifi/mvm/mvm.h +++ b/sys/contrib/dev/iwlwifi/mvm/mvm.h @@ -1,2233 +1,2235 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __IWL_MVM_H__ #define __IWL_MVM_H__ #include #include #ifdef CONFIG_IWLWIFI_LEDS #include #endif #include #ifdef CONFIG_THERMAL #include #endif #include #include "iwl-op-mode.h" #include "iwl-trans.h" #include "fw/notif-wait.h" #include "iwl-eeprom-parse.h" #include "fw/file.h" #include "iwl-config.h" #include "sta.h" #include "fw-api.h" #include "constants.h" #include "fw/runtime.h" #include "fw/dbg.h" #include "fw/acpi.h" #include "mei/iwl-mei.h" #include "iwl-nvm-parse.h" #include #if defined(__FreeBSD__) #include #endif #define IWL_MVM_MAX_ADDRESSES 5 /* RSSI offset for WkP */ #define IWL_RSSI_OFFSET 50 #define IWL_MVM_MISSED_BEACONS_THRESHOLD 8 #define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 16 /* A TimeUnit is 1024 microsecond */ #define MSEC_TO_TU(_msec) (_msec*1000/1024) /* For GO, this value represents the number of TUs before CSA "beacon * 0" TBTT when the CSA time-event needs to be scheduled to start. It * must be big enough to ensure that we switch in time. */ #define IWL_MVM_CHANNEL_SWITCH_TIME_GO 40 /* For client, this value represents the number of TUs before CSA * "beacon 1" TBTT, instead. This is because we don't know when the * GO/AP will be in the new channel, so we switch early enough. */ #define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT 10 /* * This value (in TUs) is used to fine tune the CSA NoA end time which should * be just before "beacon 0" TBTT. */ #define IWL_MVM_CHANNEL_SWITCH_MARGIN 4 /* * Number of beacons to transmit on a new channel until we unblock tx to * the stations, even if we didn't identify them on a new channel */ #define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3 /* offchannel queue towards mac80211 */ #define IWL_MVM_OFFCHANNEL_QUEUE 0 extern const struct ieee80211_ops iwl_mvm_hw_ops; /** * struct iwl_mvm_mod_params - module parameters for iwlmvm * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted. * We will register to mac80211 to have testmode working. The NIC must not * be up'ed after the INIT fw asserted. This is useful to be able to use * proprietary tools over testmode to debug the INIT fw. * @power_scheme: one of enum iwl_power_scheme */ struct iwl_mvm_mod_params { bool init_dbg; int power_scheme; }; extern struct iwl_mvm_mod_params iwlmvm_mod_params; struct iwl_mvm_phy_ctxt { u16 id; u16 color; u32 ref; enum nl80211_chan_width width; struct ieee80211_channel *channel; /* track for RLC config command */ u32 center_freq1; }; struct iwl_mvm_time_event_data { struct ieee80211_vif *vif; struct list_head list; unsigned long end_jiffies; u32 duration; bool running; u32 uid; /* * The access to the 'id' field must be done when the * mvm->time_event_lock is held, as it value is used to indicate * if the te is in the time event list or not (when id == TE_MAX) */ u32 id; }; /* Power management */ /** * enum iwl_power_scheme * @IWL_POWER_LEVEL_CAM - Continuously Active Mode * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default) * @IWL_POWER_LEVEL_LP - Low Power */ enum iwl_power_scheme { IWL_POWER_SCHEME_CAM = 1, IWL_POWER_SCHEME_BPS, IWL_POWER_SCHEME_LP }; #define IWL_CONN_MAX_LISTEN_INTERVAL 10 #define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL #ifdef CONFIG_IWLWIFI_DEBUGFS enum iwl_dbgfs_pm_mask { MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0), MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1), MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2), MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3), MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4), MVM_DEBUGFS_PM_LPRX_ENA = BIT(6), MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7), MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8), MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9), MVM_DEBUGFS_PM_USE_PS_POLL = BIT(10), }; struct iwl_dbgfs_pm { u16 keep_alive_seconds; u32 rx_data_timeout; u32 tx_data_timeout; bool skip_over_dtim; u8 skip_dtim_periods; bool lprx_ena; u32 lprx_rssi_threshold; bool snooze_ena; bool uapsd_misbehaving; bool use_ps_poll; int mask; }; /* beacon filtering */ enum iwl_dbgfs_bf_mask { MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0), MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1), MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2), MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3), MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4), MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5), MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6), MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7), MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8), MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9), MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10), }; struct iwl_dbgfs_bf { u32 bf_energy_delta; u32 bf_roaming_energy_delta; u32 bf_roaming_state; u32 bf_temp_threshold; u32 bf_temp_fast_filter; u32 bf_temp_slow_filter; u32 bf_enable_beacon_filter; u32 bf_debug_flag; u32 bf_escape_timer; u32 ba_escape_timer; u32 ba_enable_beacon_abort; int mask; }; #endif enum iwl_mvm_smps_type_request { IWL_MVM_SMPS_REQ_BT_COEX, IWL_MVM_SMPS_REQ_TT, IWL_MVM_SMPS_REQ_PROT, IWL_MVM_SMPS_REQ_FW, NUM_IWL_MVM_SMPS_REQ, }; enum iwl_bt_force_ant_mode { BT_FORCE_ANT_DIS = 0, BT_FORCE_ANT_AUTO, BT_FORCE_ANT_BT, BT_FORCE_ANT_WIFI, BT_FORCE_ANT_MAX, }; /** * struct iwl_mvm_low_latency_force - low latency force mode set by debugfs * @LOW_LATENCY_FORCE_UNSET: unset force mode * @LOW_LATENCY_FORCE_ON: for low latency on * @LOW_LATENCY_FORCE_OFF: for low latency off * @NUM_LOW_LATENCY_FORCE: max num of modes */ enum iwl_mvm_low_latency_force { LOW_LATENCY_FORCE_UNSET, LOW_LATENCY_FORCE_ON, LOW_LATENCY_FORCE_OFF, NUM_LOW_LATENCY_FORCE }; /** * struct iwl_mvm_low_latency_cause - low latency set causes * @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected * @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs * @LOW_LATENCY_VCMD: low latency mode set from vendor command * @LOW_LATENCY_VIF_TYPE: low latency mode set because of vif type (ap) * @LOW_LATENCY_DEBUGFS_FORCE_ENABLE: indicate that force mode is enabled * the actual set/unset is done with LOW_LATENCY_DEBUGFS_FORCE * @LOW_LATENCY_DEBUGFS_FORCE: low latency force mode from debugfs * set this with LOW_LATENCY_DEBUGFS_FORCE_ENABLE flag * in low_latency. */ enum iwl_mvm_low_latency_cause { LOW_LATENCY_TRAFFIC = BIT(0), LOW_LATENCY_DEBUGFS = BIT(1), LOW_LATENCY_VCMD = BIT(2), LOW_LATENCY_VIF_TYPE = BIT(3), LOW_LATENCY_DEBUGFS_FORCE_ENABLE = BIT(4), LOW_LATENCY_DEBUGFS_FORCE = BIT(5), }; /** * struct iwl_mvm_vif_bf_data - beacon filtering related data * @bf_enabled: indicates if beacon filtering is enabled * @ba_enabled: indicated if beacon abort is enabled * @ave_beacon_signal: average beacon signal * @last_cqm_event: rssi of the last cqm event * @bt_coex_min_thold: minimum threshold for BT coex * @bt_coex_max_thold: maximum threshold for BT coex * @last_bt_coex_event: rssi of the last BT coex event */ struct iwl_mvm_vif_bf_data { bool bf_enabled; bool ba_enabled; int ave_beacon_signal; int last_cqm_event; int bt_coex_min_thold; int bt_coex_max_thold; int last_bt_coex_event; }; /** * struct iwl_probe_resp_data - data for NoA/CSA updates * @rcu_head: used for freeing the data on update * @notif: notification data * @noa_len: length of NoA attribute, calculated from the notification */ struct iwl_probe_resp_data { struct rcu_head rcu_head; struct iwl_probe_resp_data_notif notif; int noa_len; }; /** * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context * @id: between 0 and 3 * @color: to solve races upon MAC addition and removal * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA * @bssid: BSSID for this (client) interface * @associated: indicates that we're currently associated, used only for * managing the firmware state in iwl_mvm_bss_info_changed_station() * @ap_assoc_sta_count: count of stations associated to us - valid only * if VIF type is AP * @uploaded: indicates the MAC context has been added to the device * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface * should get quota etc. * @pm_enabled - Indicate if MAC power management is allowed * @monitor_active: indicates that monitor context is configured, and that the * interface should get quota etc. * @low_latency: bit flags for low latency * see enum &iwl_mvm_low_latency_cause for causes. * @low_latency_actual: boolean, indicates low latency is set, * as a result from low_latency bit flags and takes force into account. * @authorized: indicates the AP station was set to authorized * @ps_disabled: indicates that this interface requires PS to be disabled * @queue_params: QoS params for this MAC * @bcast_sta: station used for broadcast packets. Used by the following * vifs: P2P_DEVICE, GO and AP. * @beacon_skb: the skb used to hold the AP/GO beacon template * @smps_requests: the SMPS requests of different parts of the driver, * combined on update to yield the overall request to mac80211. * @beacon_stats: beacon statistics, containing the # of received beacons, * # of received beacons accumulated over FW restart, and the current * average signal of beacons retrieved from the firmware * @csa_failed: CSA failed to schedule time event, report an error later * @features: hw features active for this vif * @probe_resp_data: data from FW notification to store NOA and CSA related * data to be inserted into probe response. */ struct iwl_mvm_vif { struct iwl_mvm *mvm; u16 id; u16 color; u8 ap_sta_id; u8 bssid[ETH_ALEN]; bool associated; u8 ap_assoc_sta_count; u16 cab_queue; bool uploaded; bool ap_ibss_active; bool pm_enabled; bool monitor_active; u8 low_latency: 6; u8 low_latency_actual: 1; u8 authorized:1; bool ps_disabled; struct iwl_mvm_vif_bf_data bf_data; struct { u32 num_beacons, accu_num_beacons; u8 avg_signal; } beacon_stats; u32 ap_beacon_time; enum iwl_tsf_id tsf_id; /* * QoS data from mac80211, need to store this here * as mac80211 has a separate callback but we need * to have the data for the MAC context */ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; struct iwl_mvm_time_event_data time_event_data; struct iwl_mvm_time_event_data hs_time_event_data; struct iwl_mvm_int_sta bcast_sta; struct iwl_mvm_int_sta mcast_sta; /* * Assigned while mac80211 has the interface in a channel context, * or, for P2P Device, while it exists. */ struct iwl_mvm_phy_ctxt *phy_ctxt; #ifdef CONFIG_PM /* WoWLAN GTK rekey data */ struct { u8 kck[NL80211_KCK_EXT_LEN]; u8 kek[NL80211_KEK_EXT_LEN]; size_t kek_len; size_t kck_len; u32 akm; __le64 replay_ctr; bool valid; } rekey_data; int tx_key_idx; bool seqno_valid; u16 seqno; #endif #if IS_ENABLED(CONFIG_IPV6) /* IPv6 addresses for WoWLAN */ struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX]; unsigned long tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)]; int num_target_ipv6_addrs; #endif #ifdef CONFIG_IWLWIFI_DEBUGFS struct dentry *dbgfs_dir; struct dentry *dbgfs_slink; struct iwl_dbgfs_pm dbgfs_pm; struct iwl_dbgfs_bf dbgfs_bf; struct iwl_mac_power_cmd mac_pwr_cmd; int dbgfs_quota_min; #endif enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; /* FW identified misbehaving AP */ u8 uapsd_misbehaving_bssid[ETH_ALEN]; struct delayed_work uapsd_nonagg_detected_wk; /* Indicates that CSA countdown may be started */ bool csa_countdown; bool csa_failed; u16 csa_target_freq; u16 csa_count; u16 csa_misbehave; struct delayed_work csa_work; /* Indicates that we are waiting for a beacon on a new channel */ bool csa_bcn_pending; /* TCP Checksum Offload */ netdev_features_t features; struct iwl_probe_resp_data __rcu *probe_resp_data; /* we can only have 2 GTK + 2 IGTK active at a time */ struct ieee80211_key_conf *ap_early_keys[4]; /* 26-tone RU OFDMA transmissions should be blocked */ bool he_ru_2mhz_block; struct { struct ieee80211_key_conf __rcu *keys[2]; } bcn_prot; }; static inline struct iwl_mvm_vif * iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) { return (void *)vif->drv_priv; } extern const u8 tid_to_mac80211_ac[]; #define IWL_MVM_SCAN_STOPPING_SHIFT 8 enum iwl_scan_status { IWL_MVM_SCAN_REGULAR = BIT(0), IWL_MVM_SCAN_SCHED = BIT(1), IWL_MVM_SCAN_NETDETECT = BIT(2), IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8), IWL_MVM_SCAN_STOPPING_SCHED = BIT(9), IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10), IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR | IWL_MVM_SCAN_STOPPING_REGULAR, IWL_MVM_SCAN_SCHED_MASK = IWL_MVM_SCAN_SCHED | IWL_MVM_SCAN_STOPPING_SCHED, IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT | IWL_MVM_SCAN_STOPPING_NETDETECT, IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT, IWL_MVM_SCAN_MASK = 0xff, }; enum iwl_mvm_scan_type { IWL_SCAN_TYPE_NOT_SET, IWL_SCAN_TYPE_UNASSOC, IWL_SCAN_TYPE_WILD, IWL_SCAN_TYPE_MILD, IWL_SCAN_TYPE_FRAGMENTED, IWL_SCAN_TYPE_FAST_BALANCE, }; enum iwl_mvm_sched_scan_pass_all_states { SCHED_SCAN_PASS_ALL_DISABLED, SCHED_SCAN_PASS_ALL_ENABLED, SCHED_SCAN_PASS_ALL_FOUND, }; /** * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure * @ct_kill_exit: worker to exit thermal kill * @dynamic_smps: Is thermal throttling enabled dynamic_smps? * @tx_backoff: The current thremal throttling tx backoff in uSec. * @min_backoff: The minimal tx backoff due to power restrictions * @params: Parameters to configure the thermal throttling algorithm. * @throttle: Is thermal throttling is active? */ struct iwl_mvm_tt_mgmt { struct delayed_work ct_kill_exit; bool dynamic_smps; u32 tx_backoff; u32 min_backoff; struct iwl_tt_params params; bool throttle; }; #ifdef CONFIG_THERMAL /** *struct iwl_mvm_thermal_device - thermal zone related data * @temp_trips: temperature thresholds for report * @fw_trips_index: keep indexes to original array - temp_trips * @tzone: thermal zone device data */ struct iwl_mvm_thermal_device { s16 temp_trips[IWL_MAX_DTS_TRIPS]; u8 fw_trips_index[IWL_MAX_DTS_TRIPS]; struct thermal_zone_device *tzone; }; /* * struct iwl_mvm_cooling_device * @cur_state: current state * @cdev: struct thermal cooling device */ struct iwl_mvm_cooling_device { u32 cur_state; struct thermal_cooling_device *cdev; }; #endif #define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8 struct iwl_mvm_frame_stats { u32 legacy_frames; u32 ht_frames; u32 vht_frames; u32 bw_20_frames; u32 bw_40_frames; u32 bw_80_frames; u32 bw_160_frames; u32 sgi_frames; u32 ngi_frames; u32 siso_frames; u32 mimo2_frames; u32 agg_frames; u32 ampdu_count; u32 success_frames; u32 fail_frames; u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES]; int last_frame_idx; }; #define IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE 0xff #define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100 #define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200 enum iwl_mvm_tdls_cs_state { IWL_MVM_TDLS_SW_IDLE = 0, IWL_MVM_TDLS_SW_REQ_SENT, IWL_MVM_TDLS_SW_RESP_RCVD, IWL_MVM_TDLS_SW_REQ_RCVD, IWL_MVM_TDLS_SW_ACTIVE, }; enum iwl_mvm_traffic_load { IWL_MVM_TRAFFIC_LOW, IWL_MVM_TRAFFIC_MEDIUM, IWL_MVM_TRAFFIC_HIGH, }; DECLARE_EWMA(rate, 16, 16) struct iwl_mvm_tcm_mac { struct { u32 pkts[IEEE80211_NUM_ACS]; u32 airtime; } tx; struct { u32 pkts[IEEE80211_NUM_ACS]; u32 airtime; u32 last_ampdu_ref; } rx; struct { /* track AP's transfer in client mode */ u64 rx_bytes; struct ewma_rate rate; bool detected; } uapsd_nonagg_detect; bool opened_rx_ba_sessions; }; struct iwl_mvm_tcm { struct delayed_work work; spinlock_t lock; /* used when time elapsed */ unsigned long ts; /* timestamp when period ends */ unsigned long ll_ts; unsigned long uapsd_nonagg_ts; bool paused; struct iwl_mvm_tcm_mac data[NUM_MAC_INDEX_DRIVER]; struct { u32 elapsed; /* milliseconds for this TCM period */ u32 airtime[NUM_MAC_INDEX_DRIVER]; enum iwl_mvm_traffic_load load[NUM_MAC_INDEX_DRIVER]; enum iwl_mvm_traffic_load band_load[NUM_NL80211_BANDS]; enum iwl_mvm_traffic_load global_load; bool low_latency[NUM_MAC_INDEX_DRIVER]; bool change[NUM_MAC_INDEX_DRIVER]; } result; }; /** * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer * @head_sn: reorder window head sn * @num_stored: number of mpdus stored in the buffer * @buf_size: the reorder buffer size as set by the last addba request * @queue: queue of this reorder buffer * @last_amsdu: track last ASMDU SN for duplication detection * @last_sub_index: track ASMDU sub frame index for duplication detection * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU * it is the time of last received sub-frame * @removed: prevent timer re-arming * @valid: reordering is valid for this queue * @lock: protect reorder buffer internal state * @mvm: mvm pointer, needed for frame timer context * @consec_oldsn_drops: consecutive drops due to old SN * @consec_oldsn_ampdu_gp2: A-MPDU GP2 timestamp to track * when to apply old SN consecutive drop workaround * @consec_oldsn_prev_drop: track whether or not an MPDU * that was single/part of the previous A-MPDU was * dropped due to old SN */ struct iwl_mvm_reorder_buffer { u16 head_sn; u16 num_stored; u16 buf_size; int queue; u16 last_amsdu; u8 last_sub_index; struct timer_list reorder_timer; bool removed; bool valid; spinlock_t lock; struct iwl_mvm *mvm; unsigned int consec_oldsn_drops; u32 consec_oldsn_ampdu_gp2; unsigned int consec_oldsn_prev_drop:1; } ____cacheline_aligned_in_smp; /** * struct _iwl_mvm_reorder_buf_entry - reorder buffer entry per-queue/per-seqno * @frames: list of skbs stored * @reorder_time: time the packet was stored in the reorder buffer */ struct _iwl_mvm_reorder_buf_entry { struct sk_buff_head frames; unsigned long reorder_time; }; /* make this indirection to get the aligned thing */ struct iwl_mvm_reorder_buf_entry { struct _iwl_mvm_reorder_buf_entry e; } #if defined(__FreeBSD__) __aligned(roundup2(sizeof(struct _iwl_mvm_reorder_buf_entry), 32)) #elif !defined(__CHECKER__) /* sparse doesn't like this construct: "bad integer constant expression" */ /* clang on FreeBSD: error: 'aligned' attribute requires integer constant */ __aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry))) #endif ; /** * struct iwl_mvm_baid_data - BA session data * @sta_id: station id * @tid: tid of the session * @baid baid of the session * @timeout: the timeout set in the addba request * @entries_per_queue: # of buffers per queue, this actually gets * aligned up to avoid cache line sharing between queues * @last_rx: last rx jiffies, updated only if timeout passed from last update * @session_timer: timer to check if BA session expired, runs at 2 * timeout * @mvm: mvm pointer, needed for timer context * @reorder_buf: reorder buffer, allocated per queue * @reorder_buf_data: data */ struct iwl_mvm_baid_data { struct rcu_head rcu_head; u8 sta_id; u8 tid; u8 baid; u16 timeout; u16 entries_per_queue; unsigned long last_rx; struct timer_list session_timer; struct iwl_mvm_baid_data __rcu **rcu_ptr; struct iwl_mvm *mvm; struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES]; struct iwl_mvm_reorder_buf_entry entries[]; }; static inline struct iwl_mvm_baid_data * iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf) { return (void *)((u8 *)buf - offsetof(struct iwl_mvm_baid_data, reorder_buf) - sizeof(*buf) * buf->queue); } /* * enum iwl_mvm_queue_status - queue status * @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved * Basically, this means that this queue can be used for any purpose * @IWL_MVM_QUEUE_RESERVED: queue is reserved but not yet in use * This is the state of a queue that has been dedicated for some RATID * (agg'd or not), but that hasn't yet gone through the actual enablement * of iwl_mvm_enable_txq(), and therefore no traffic can go through it yet. * Note that in this state there is no requirement to already know what TID * should be used with this queue, it is just marked as a queue that will * be used, and shouldn't be allocated to anyone else. * @IWL_MVM_QUEUE_READY: queue is ready to be used * This is the state of a queue that has been fully configured (including * SCD pointers, etc), has a specific RA/TID assigned to it, and can be * used to send traffic. * @IWL_MVM_QUEUE_SHARED: queue is shared, or in a process of becoming shared * This is a state in which a single queue serves more than one TID, all of * which are not aggregated. Note that the queue is only associated to one * RA. */ enum iwl_mvm_queue_status { IWL_MVM_QUEUE_FREE, IWL_MVM_QUEUE_RESERVED, IWL_MVM_QUEUE_READY, IWL_MVM_QUEUE_SHARED, }; #define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) #define IWL_MVM_INVALID_QUEUE 0xFFFF #define IWL_MVM_NUM_CIPHERS 10 struct iwl_mvm_txq { struct list_head list; u16 txq_id; atomic_t tx_request; bool stopped; }; static inline struct iwl_mvm_txq * iwl_mvm_txq_from_mac80211(struct ieee80211_txq *txq) { return (void *)txq->drv_priv; } static inline struct iwl_mvm_txq * iwl_mvm_txq_from_tid(struct ieee80211_sta *sta, u8 tid) { if (tid == IWL_MAX_TID_COUNT) tid = IEEE80211_NUM_TIDS; return (void *)sta->txq[tid]->drv_priv; } /** * struct iwl_mvm_tvqm_txq_info - maps TVQM hw queue to tid * * @sta_id: sta id * @txq_tid: txq tid */ struct iwl_mvm_tvqm_txq_info { u8 sta_id; u8 txq_tid; }; struct iwl_mvm_dqa_txq_info { u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ bool reserved; /* Is this the TXQ reserved for a STA */ u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */ u8 txq_tid; /* The TID "owner" of this queue*/ u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ /* Timestamp for inactivation per TID of this queue */ unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1]; enum iwl_mvm_queue_status status; }; struct iwl_mvm { /* for logger access */ struct device *dev; struct iwl_trans *trans; const struct iwl_fw *fw; const struct iwl_cfg *cfg; struct iwl_phy_db *phy_db; struct ieee80211_hw *hw; /* for protecting access to iwl_mvm */ struct mutex mutex; struct list_head async_handlers_list; spinlock_t async_handlers_lock; struct work_struct async_handlers_wk; struct work_struct roc_done_wk; unsigned long init_status; unsigned long status; u32 queue_sync_cookie; unsigned long queue_sync_state; /* * for beacon filtering - * currently only one interface can be supported */ struct iwl_mvm_vif *bf_allowed_vif; bool hw_registered; bool rfkill_safe_init_done; u8 cca_40mhz_workaround; u32 ampdu_ref; bool ampdu_toggle; struct iwl_notif_wait_data notif_wait; union { struct mvm_statistics_rx_v3 rx_stats_v3; struct mvm_statistics_rx rx_stats; }; struct { u64 rx_time; u64 tx_time; u64 on_time_rf; u64 on_time_scan; } radio_stats, accu_radio_stats; struct list_head add_stream_txqs; union { struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES]; struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES]; }; struct work_struct add_stream_wk; /* To add streams to queues */ const char *nvm_file_name; struct iwl_nvm_data *nvm_data; struct iwl_mei_nvm *mei_nvm_data; struct iwl_mvm_csme_conn_info __rcu *csme_conn_info; bool mei_rfkill_blocked; bool mei_registered; struct work_struct sap_connected_wk; /* * NVM built based on the SAP data but that we can't free even after * we get ownership because it contains the cfg80211's channel. */ struct iwl_nvm_data *temp_nvm_data; /* NVM sections */ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; struct iwl_fw_runtime fwrt; /* EEPROM MAC addresses */ struct mac_address addresses[IWL_MVM_MAX_ADDRESSES]; /* data related to data path */ struct iwl_rx_phy_info last_phy_info; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT_MAX]; u8 rx_ba_sessions; /* configured by mac80211 */ u32 rts_threshold; /* Scan status, cmd (pre-allocated) and auxiliary station */ unsigned int scan_status; void *scan_cmd; struct iwl_mcast_filter_cmd *mcast_filter_cmd; /* For CDB this is low band scan type, for non-CDB - type. */ enum iwl_mvm_scan_type scan_type; enum iwl_mvm_scan_type hb_scan_type; enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all; struct delayed_work scan_timeout_dwork; /* max number of simultaneous scans the FW supports */ unsigned int max_scans; /* UMAC scan tracking */ u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS]; /* start time of last scan in TSF of the mac that requested the scan */ u64 scan_start; /* the vif that requested the current scan */ struct iwl_mvm_vif *scan_vif; /* rx chain antennas set through debugfs for the scan command */ u8 scan_rx_ant; /* Internal station */ struct iwl_mvm_int_sta aux_sta; struct iwl_mvm_int_sta snif_sta; bool last_ebs_successful; u8 scan_last_antenna_idx; /* to toggle TX between antennas */ u8 mgmt_last_antenna_idx; /* last smart fifo state that was successfully sent to firmware */ enum iwl_sf_state sf_state; /* * Leave this pointer outside the ifdef below so that it can be * assigned without ifdef in the source code. */ struct dentry *debugfs_dir; #ifdef CONFIG_IWLWIFI_DEBUGFS u32 dbgfs_sram_offset, dbgfs_sram_len; u32 dbgfs_prph_reg_addr; bool disable_power_off; bool disable_power_off_d3; bool beacon_inject_active; bool scan_iter_notif_enabled; struct debugfs_blob_wrapper nvm_hw_blob; struct debugfs_blob_wrapper nvm_sw_blob; struct debugfs_blob_wrapper nvm_calib_blob; struct debugfs_blob_wrapper nvm_prod_blob; struct debugfs_blob_wrapper nvm_phy_sku_blob; struct debugfs_blob_wrapper nvm_reg_blob; struct iwl_mvm_frame_stats drv_rx_stats; spinlock_t drv_stats_lock; u16 dbgfs_rx_phyinfo; #endif struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX]; struct list_head time_event_list; spinlock_t time_event_lock; /* * A bitmap indicating the index of the key in use. The firmware * can hold 16 keys at most. Reflect this fact. */ unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)]; u8 fw_key_deleted[STA_KEY_MAX_NUM]; u8 vif_count; struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER]; /* -1 for always, 0 for never, >0 for that many times */ s8 fw_restart; u8 *error_recovery_buf; #ifdef CONFIG_IWLWIFI_LEDS struct led_classdev led; #endif struct ieee80211_vif *p2p_device_vif; #ifdef CONFIG_PM struct wiphy_wowlan_support wowlan; int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; /* sched scan settings for net detect */ struct ieee80211_scan_ies nd_ies; struct cfg80211_match_set *nd_match_sets; int n_nd_match_sets; struct ieee80211_channel **nd_channels; int n_nd_channels; bool net_detect; u8 offload_tid; #ifdef CONFIG_IWLWIFI_DEBUGFS bool d3_wake_sysassert; bool d3_test_active; u32 d3_test_pme_ptr; struct ieee80211_vif *keep_vif; u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */ #endif #endif wait_queue_head_t rx_sync_waitq; /* BT-Coex */ struct iwl_bt_coex_profile_notif last_bt_notif; struct iwl_bt_coex_ci_cmd last_bt_ci_cmd; u8 bt_tx_prio; enum iwl_bt_force_ant_mode bt_force_ant_mode; /* Aux ROC */ struct list_head aux_roc_te_list; /* Thermal Throttling and CTkill */ struct iwl_mvm_tt_mgmt thermal_throttle; #ifdef CONFIG_THERMAL struct iwl_mvm_thermal_device tz_device; struct iwl_mvm_cooling_device cooling_dev; #endif s32 temperature; /* Celsius */ /* * Debug option to set the NIC temperature. This option makes the * driver think this is the actual NIC temperature, and ignore the * real temperature that is received from the fw */ bool temperature_test; /* Debug test temperature is enabled */ bool fw_static_smps_request; unsigned long bt_coex_last_tcm_ts; struct iwl_mvm_tcm tcm; u8 uapsd_noagg_bssid_write_idx; struct mac_address uapsd_noagg_bssids[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM] __aligned(2); struct iwl_time_quota_cmd last_quota_cmd; #ifdef CONFIG_NL80211_TESTMODE u32 noa_duration; struct ieee80211_vif *noa_vif; #endif /* Tx queues */ u16 aux_queue; u16 snif_queue; u16 probe_queue; u16 p2p_dev_queue; /* Indicate if device power save is allowed */ u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ /* Indicate if 32Khz external clock is valid */ u32 ext_clock_valid; /* This vif used by CSME to send / receive traffic */ struct ieee80211_vif *csme_vif; struct ieee80211_vif __rcu *csa_vif; struct ieee80211_vif __rcu *csa_tx_blocked_vif; u8 csa_tx_block_bcn_timeout; /* system time of last beacon (for AP/GO interface) */ u32 ap_last_beacon_gp2; /* indicates that we transmitted the last beacon */ bool ibss_manager; bool lar_regdom_set; enum iwl_mcc_source mcc_src; /* TDLS channel switch data */ struct { struct delayed_work dwork; enum iwl_mvm_tdls_cs_state state; /* * Current cs sta - might be different from periodic cs peer * station. Value is meaningless when the cs-state is idle. */ u8 cur_sta_id; /* TDLS periodic channel-switch peer */ struct { u8 sta_id; u8 op_class; bool initiator; /* are we the link initiator */ struct cfg80211_chan_def chandef; struct sk_buff *skb; /* ch sw template */ u32 ch_sw_tm_ie; /* timestamp of last ch-sw request sent (GP2 time) */ u32 sent_timestamp; } peer; } tdls_cs; u32 ciphers[IWL_MVM_NUM_CIPHERS]; struct cfg80211_ftm_responder_stats ftm_resp_stats; struct { struct cfg80211_pmsr_request *req; struct wireless_dev *req_wdev; struct list_head loc_list; int responses[IWL_MVM_TOF_MAX_APS]; struct { struct list_head resp; } smooth; struct list_head pasn_list; } ftm_initiator; struct list_head resp_pasn_list; struct { u8 d0i3_resp; u8 range_resp; } cmd_ver; struct ieee80211_vif *nan_vif; struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID]; /* * Drop beacons from other APs in AP mode when there are no connected * clients. */ bool drop_bcn_ap_mode; struct delayed_work cs_tx_unblock_dwork; /* does a monitor vif exist (only one can exist hence bool) */ bool monitor_on; /* sniffer data to include in radiotap */ __le16 cur_aid; u8 cur_bssid[ETH_ALEN]; unsigned long last_6ghz_passive_scan_jiffies; unsigned long last_reset_or_resume_time_jiffies; bool sta_remove_requires_queue_remove; }; /* Extract MVM priv from op_mode and _hw */ #define IWL_OP_MODE_GET_MVM(_iwl_op_mode) \ ((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific) #define IWL_MAC80211_GET_MVM(_hw) \ IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv)) /** * enum iwl_mvm_status - MVM status bits * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA * @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it) * @IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE: suppress one error log * if this is set, when intentionally triggered * @IWL_MVM_STATUS_STARTING: starting mac, * used to disable restart flow while in STARTING state */ enum iwl_mvm_status { IWL_MVM_STATUS_HW_RFKILL, IWL_MVM_STATUS_HW_CTKILL, IWL_MVM_STATUS_ROC_RUNNING, IWL_MVM_STATUS_HW_RESTART_REQUESTED, IWL_MVM_STATUS_IN_HW_RESTART, IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_FIRMWARE_RUNNING, IWL_MVM_STATUS_NEED_FLUSH_P2P, IWL_MVM_STATUS_IN_D3, IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, IWL_MVM_STATUS_STARTING, }; struct iwl_mvm_csme_conn_info { struct rcu_head rcu_head; struct iwl_mei_conn_info conn_info; }; /* Keep track of completed init configuration */ enum iwl_mvm_init_status { IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0), IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE = BIT(1), }; static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) { return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) || test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); } static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm) { return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); } static inline bool iwl_mvm_firmware_running(struct iwl_mvm *mvm) { return test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); } /* Must be called with rcu_read_lock() held and it can only be * released when mvmsta is not needed anymore. */ static inline struct iwl_mvm_sta * iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id) { struct ieee80211_sta *sta; if (sta_id >= mvm->fw->ucode_capa.num_stations) return NULL; sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* This can happen if the station has been removed right now */ if (IS_ERR_OR_NULL(sta)) return NULL; return iwl_mvm_sta_from_mac80211(sta); } static inline struct iwl_mvm_sta * iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id) { struct ieee80211_sta *sta; if (sta_id >= mvm->fw->ucode_capa.num_stations) return NULL; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); /* This can happen if the station has been removed right now */ if (IS_ERR_OR_NULL(sta)) return NULL; return iwl_mvm_sta_from_mac80211(sta); } static inline struct ieee80211_vif * iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu) { if (WARN_ON(vif_id >= ARRAY_SIZE(mvm->vif_id_to_mac))) return NULL; if (rcu) return rcu_dereference(mvm->vif_id_to_mac[vif_id]); return rcu_dereference_protected(mvm->vif_id_to_mac[vif_id], lockdep_is_held(&mvm->mutex)); } static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ADAPTIVE_DWELL); } static inline bool iwl_mvm_is_adaptive_dwell_v2_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2); } static inline bool iwl_mvm_is_adwell_hb_ap_num_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP); } static inline bool iwl_mvm_is_oce_supported(struct iwl_mvm *mvm) { /* OCE should never be enabled for LMAC scan FWs */ return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_OCE); } static inline bool iwl_mvm_is_frag_ebs_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAG_EBS); } static inline bool iwl_mvm_is_short_beacon_notif_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF); } static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue) { return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) && (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE); } static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue) { return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) && (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE); } static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) { bool nvm_lar = mvm->nvm_data->lar_enabled; bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); /* * Enable LAR only if it is supported by the FW (TLV) && * enabled in the NVM */ if (mvm->cfg->nvm_type == IWL_NVM_EXT) return nvm_lar && tlv_lar; else return tlv_lar; } static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) || fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC); } static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BT_COEX_RRC) && IWL_MVM_BT_COEX_RRC; } static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CSUM_SUPPORT) && !IWL_MVM_HW_CSUM_DISABLE; } static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT) && IWL_MVM_BT_COEX_MPLUT; } static inline bool iwl_mvm_is_p2p_scm_uapsd_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD) && !(iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_P2P_CLIENT); } static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT); } static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm) { /* TODO - replace with TLV once defined */ return mvm->trans->trans_cfg->use_tfh; } static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm) { /* TODO - better define this */ return mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000; } static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) { /* * TODO: * The issue of how to determine CDB APIs and usage is still not fully * defined. * There is a compilation for CDB and non-CDB FW, but there may * be also runtime check. * For now there is a TLV for checking compilation mode, but a * runtime check will also have to be here - once defined. */ return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CDB_SUPPORT); } static inline bool iwl_mvm_cdb_scan_api(struct iwl_mvm *mvm) { /* * TODO: should this be the same as iwl_mvm_is_cdb_supported()? * but then there's a little bit of code in scan that won't make * any sense... */ return mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000; } static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER); } static inline bool iwl_mvm_is_reduced_config_scan_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG); } static inline bool iwl_mvm_is_band_in_rx_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BAND_IN_RX_DATA); } static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_RX_STATS); } static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY); } static inline bool iwl_mvm_has_tlc_offload(const struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD); } static inline struct agg_tx_status * iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp) { if (iwl_mvm_has_new_tx_api(mvm)) return &((struct iwl_mvm_tx_resp *)tx_resp)->status; else return ((struct iwl_mvm_tx_resp_v3 *)tx_resp)->status; } static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) { /* these two TLV are redundant since the responsibility to CT-kill by * FW happens only after we send at least one command of * temperature THs report. */ return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT); } static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CTDP_SUPPORT); } extern const u8 iwl_mvm_ac_to_tx_fifo[]; extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[]; static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm, enum ieee80211_ac_numbers ac) { return iwl_mvm_has_new_tx_api(mvm) ? iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac]; } struct iwl_rate_info { u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */ u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ }; void __iwl_mvm_mac_stop(struct iwl_mvm *mvm); int __iwl_mvm_mac_start(struct iwl_mvm *mvm); /****************** * MVM Methods ******************/ /* uCode */ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm); /* Utils */ int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band); int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band); void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, enum nl80211_band band, struct ieee80211_tx_rate *r); void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags, enum nl80211_band band, struct ieee80211_tx_rate *r); u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx); u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac); static inline void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) { iwl_fwrt_dump_error_logs(&mvm->fwrt); } u8 first_antenna(u8 mask); u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2, u64 *boottime, ktime_t *realtime); u32 iwl_mvm_get_systime(struct iwl_mvm *mvm); /* Tx / Host Commands */ int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd); int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, u32 flags, u16 len, const void *data); int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, u32 *status); int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, const void *data, u32 *status); int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta); int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, u8 sta_id); void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc); void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq); unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, struct ieee80211_sta *sta, unsigned int tid); u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu); #ifdef CONFIG_IWLWIFI_DEBUG const char *iwl_mvm_get_tx_fail_reason(u32 status); #else static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; } #endif int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk); int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal); int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids); void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm); static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info, struct iwl_tx_cmd *tx_cmd) { struct ieee80211_key_conf *keyconf = info->control.hw_key; tx_cmd->sec_ctl = TX_CMD_SEC_CCM; memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); } static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm) { flush_work(&mvm->async_handlers_wk); } /* Statistics */ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt); void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear); void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); /* NVM */ int iwl_nvm_init(struct iwl_mvm *mvm); int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm) { return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ? mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant : mvm->fw->valid_tx_ant; } static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm) { return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ? mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant : mvm->fw->valid_rx_ant; } static inline void iwl_mvm_toggle_tx_ant(struct iwl_mvm *mvm, u8 *ant) { *ant = iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), *ant); } static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm) { u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN | FW_PHY_CFG_RX_CHAIN); u32 valid_rx_ant = iwl_mvm_get_valid_rx_ant(mvm); u32 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS | valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS; return mvm->fw->phy_config & phy_config; } int iwl_mvm_up(struct iwl_mvm *mvm); int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm); int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm); /* * FW notifications / CMD responses handlers * Convention: iwl_mvm_rx_ */ void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags); void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* MVM PHY */ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic); int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic); void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt); void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt); int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm); u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef); u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef); /* MAC (virtual interface) programming */ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool force_assoc_off, const u8 *bssid_override); int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon); int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, struct sk_buff *beacon, void *data, int len); u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, struct ieee80211_vif *vif); u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx); void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, __le32 *tim_index, __le32 *tim_size, u8 *beacon, u32 frame_size); void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_window_status_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* Bindings */ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); /* Quota management */ static inline size_t iwl_mvm_quota_cmd_size(struct iwl_mvm *mvm) { return iwl_mvm_has_quota_low_latency(mvm) ? sizeof(struct iwl_time_quota_cmd) : sizeof(struct iwl_time_quota_cmd_v1); } static inline struct iwl_time_quota_data *iwl_mvm_quota_cmd_get_quota(struct iwl_mvm *mvm, struct iwl_time_quota_cmd *cmd, int i) { struct iwl_time_quota_data_v1 *quotas; if (iwl_mvm_has_quota_low_latency(mvm)) return &cmd->quotas[i]; quotas = (struct iwl_time_quota_data_v1 *)cmd->quotas; return (struct iwl_time_quota_data *)"as[i]; } int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload, struct ieee80211_vif *disabled_vif); /* Scanning */ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies); int iwl_mvm_scan_size(struct iwl_mvm *mvm); int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); void iwl_mvm_scan_timeout_wk(struct work_struct *work); /* Scheduled scan */ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies, int type); void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* UMAC scan */ int iwl_mvm_config_scan(struct iwl_mvm *mvm); void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* MVM debugfs */ #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm); void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif); #else static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) { } static inline void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } static inline void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } #endif /* CONFIG_IWLWIFI_DEBUGFS */ /* rate scaling */ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq); void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg); int rs_pretty_print_rate_v1(char *buf, int bufsz, const u32 rate); void rs_update_last_rssi(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, struct ieee80211_rx_status *rx_status); /* power management */ int iwl_mvm_power_update_device(struct iwl_mvm *mvm); int iwl_mvm_power_update_mac(struct iwl_mvm *mvm); int iwl_mvm_power_update_ps(struct iwl_mvm *mvm); int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, char *buf, int bufsz); void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); #ifdef CONFIG_IWLWIFI_LEDS int iwl_mvm_leds_init(struct iwl_mvm *mvm); void iwl_mvm_leds_exit(struct iwl_mvm *mvm); void iwl_mvm_leds_sync(struct iwl_mvm *mvm); #else static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm) { return 0; } static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm) { } static inline void iwl_mvm_leds_sync(struct iwl_mvm *mvm) { } #endif /* D3 (WoWLAN, NetDetect) */ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); int iwl_mvm_resume(struct ieee80211_hw *hw); void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled); void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *data); void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct inet6_dev *idev); void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx); extern const struct file_operations iwl_dbgfs_d3_test_ops; #ifdef CONFIG_PM void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif); #else static inline void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } #endif void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, struct iwl_wowlan_config_cmd *cmd); int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool disable_offloading, bool offload_ns, u32 cmd_flags); /* BT Coex */ int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm); void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event_data); void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm); u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, struct ieee80211_sta *sta); bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, struct ieee80211_sta *sta); bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant); bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm); bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, enum nl80211_band band); u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants); u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac); /* beacon filtering */ #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd); #else static inline void iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif, struct iwl_beacon_filter_cmd *cmd) {} #endif int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags); int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags); /* SMPS */ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_mvm_smps_type_request req_type, enum ieee80211_smps_mode smps_request); bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt); void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif); /* Low latency */ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool low_latency, enum iwl_mvm_low_latency_cause cause); /* get SystemLowLatencyMode - only needed for beacon threshold? */ bool iwl_mvm_low_latency(struct iwl_mvm *mvm); bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band); void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, bool low_latency, u16 mac_id); /* get VMACLowLatencyMode */ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) { /* * should this consider associated/active/... state? * * Normally low-latency should only be active on interfaces * that are active, but at least with debugfs it can also be * enabled on interfaces that aren't active. However, when * interface aren't active then they aren't added into the * binding, so this has no real impact. For now, just return * the current desired low-latency state. */ return mvmvif->low_latency_actual; } static inline void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set, enum iwl_mvm_low_latency_cause cause) { u8 new_state; if (set) mvmvif->low_latency |= cause; else mvmvif->low_latency &= ~cause; /* * if LOW_LATENCY_DEBUGFS_FORCE_ENABLE is enabled no changes are * allowed to actual mode. */ if (mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE_ENABLE && cause != LOW_LATENCY_DEBUGFS_FORCE_ENABLE) return; if (cause == LOW_LATENCY_DEBUGFS_FORCE_ENABLE && set) /* * We enter force state */ new_state = !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE); else /* * Check if any other one set low latency */ new_state = !!(mvmvif->low_latency & ~(LOW_LATENCY_DEBUGFS_FORCE_ENABLE | LOW_LATENCY_DEBUGFS_FORCE)); mvmvif->low_latency_actual = new_state; } /* Return a bitmask with all the hw supported queues, except for the * command queue, which can't be flushed. */ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) { return ((BIT(mvm->trans->trans_cfg->base_params->num_of_queues) - 1) & ~BIT(IWL_MVM_DQA_CMD_QUEUE)); } void iwl_mvm_stop_device(struct iwl_mvm *mvm); /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tt_handler(struct iwl_mvm *mvm); void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff); void iwl_mvm_thermal_exit(struct iwl_mvm *mvm); void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm); int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm); int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget); #if IS_ENABLED(CONFIG_IWLMEI) /* vendor commands */ void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm); #else static inline void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm) {} #endif /* Location Aware Regulatory */ struct iwl_mcc_update_resp * iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, enum iwl_mcc_source src_id); int iwl_mvm_init_mcc(struct iwl_mvm *mvm); void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, const char *alpha2, enum iwl_mcc_source src_id, bool *changed); struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, bool *changed); int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm); void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm); /* smart fifo */ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool added_vif); /* FTM responder */ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +#if defined(__linux__) int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr); int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr, u32 cipher, u8 *tk, u32 tk_len, u8 *hltk, u32 hltk_len); +#endif void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm, struct ieee80211_vif *vif); /* FTM initiator */ void iwl_mvm_ftm_restart(struct iwl_mvm *mvm); void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request); void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req); void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm); void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm); int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr, u32 cipher, u8 *tk, u32 tk_len, u8 *hltk, u32 hltk_len); void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr); /* TDLS */ /* * We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present. * This TID is marked as used vs the AP and all connected TDLS peers. */ #define IWL_MVM_TDLS_FW_TID 4 int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm); void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool sta_added); void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u8 oper_class, struct cfg80211_chan_def *chandef, struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie); void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_tdls_ch_sw_params *params); void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tdls_ch_switch_work(struct work_struct *work); void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, enum iwl_mvm_rxq_notif_type type, bool sync, const void *data, u32 size); void iwl_mvm_reorder_timer_expired(struct timer_list *t); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid); bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm); #define MVM_TCM_PERIOD_MSEC 500 #define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000) #define MVM_LL_PERIOD (10 * HZ) void iwl_mvm_tcm_work(struct work_struct *work); void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm); void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel); void iwl_mvm_resume_tcm(struct iwl_mvm *mvm); void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed); void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool tdls, bool cmd_q); void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const char *errmsg); void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_sta *sta, u16 tid); int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b); int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm); int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm); void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm); #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir); #endif int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table); struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm); void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band) { switch (band) { case NL80211_BAND_2GHZ: return PHY_BAND_24; case NL80211_BAND_5GHZ: return PHY_BAND_5; case NL80211_BAND_6GHZ: return PHY_BAND_6; default: WARN_ONCE(1, "Unsupported band (%u)\n", band); return PHY_BAND_5; } } /* Channel info utils */ static inline bool iwl_mvm_has_ultra_hb_channel(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS); } static inline void *iwl_mvm_chan_info_cmd_tail(struct iwl_mvm *mvm, struct iwl_fw_channel_info *ci) { return (u8 *)ci + (iwl_mvm_has_ultra_hb_channel(mvm) ? sizeof(struct iwl_fw_channel_info) : sizeof(struct iwl_fw_channel_info_v1)); } static inline size_t iwl_mvm_chan_info_padding(struct iwl_mvm *mvm) { return iwl_mvm_has_ultra_hb_channel(mvm) ? 0 : sizeof(struct iwl_fw_channel_info) - sizeof(struct iwl_fw_channel_info_v1); } static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm, struct iwl_fw_channel_info *ci, u32 chan, u8 band, u8 width, u8 ctrl_pos) { if (iwl_mvm_has_ultra_hb_channel(mvm)) { ci->channel = cpu_to_le32(chan); ci->band = band; ci->width = width; ci->ctrl_pos = ctrl_pos; } else { struct iwl_fw_channel_info_v1 *ci_v1 = (struct iwl_fw_channel_info_v1 *)ci; ci_v1->channel = chan; ci_v1->band = band; ci_v1->width = width; ci_v1->ctrl_pos = ctrl_pos; } } static inline void iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm, struct iwl_fw_channel_info *ci, struct cfg80211_chan_def *chandef) { enum nl80211_band band = chandef->chan->band; iwl_mvm_set_chan_info(mvm, ci, chandef->chan->hw_value, iwl_mvm_phy_band_from_nl80211(band), iwl_mvm_get_channel_width(chandef), iwl_mvm_get_ctrl_pos(chandef)); } static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw) { u8 ver = iwl_fw_lookup_cmd_ver(fw, SCAN_OFFLOAD_UPDATE_PROFILES_CMD, IWL_FW_CMD_VER_UNKNOWN); return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ? IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2; } static inline enum iwl_location_cipher iwl_mvm_cipher_to_location_cipher(u32 cipher) { switch (cipher) { case WLAN_CIPHER_SUITE_CCMP: return IWL_LOCATION_CIPHER_CCMP_128; case WLAN_CIPHER_SUITE_GCMP: return IWL_LOCATION_CIPHER_GCMP_128; case WLAN_CIPHER_SUITE_GCMP_256: return IWL_LOCATION_CIPHER_GCMP_256; default: return IWL_LOCATION_CIPHER_INVALID; } } struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm); static inline int iwl_mvm_mei_get_ownership(struct iwl_mvm *mvm) { if (mvm->mei_registered) return iwl_mei_get_ownership(); return 0; } static inline void iwl_mvm_mei_tx_copy_to_csme(struct iwl_mvm *mvm, struct sk_buff *skb, unsigned int ivlen) { if (mvm->mei_registered) iwl_mei_tx_copy_to_csme(skb, ivlen); } static inline void iwl_mvm_mei_host_disassociated(struct iwl_mvm *mvm) { if (mvm->mei_registered) iwl_mei_host_disassociated(); } static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm) { if (mvm->mei_registered) iwl_mei_device_down(); } static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm) { bool sw_rfkill = mvm->hw_registered ? rfkill_soft_blocked(mvm->hw->wiphy->rfkill) : false; if (mvm->mei_registered) iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm), sw_rfkill); } void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool forbidden); #endif /* __IWL_MVM_H__ */ diff --git a/sys/contrib/dev/iwlwifi/mvm/sta.c b/sys/contrib/dev/iwlwifi/mvm/sta.c index cf3d0922cf83..71d9f2a2988a 100644 --- a/sys/contrib/dev/iwlwifi/mvm/sta.c +++ b/sys/contrib/dev/iwlwifi/mvm/sta.c @@ -1,4175 +1,4177 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2015, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #if defined(__FreeBSD__) #include #endif #include "mvm.h" #include "sta.h" #include "rs.h" /* * New version of ADD_STA_sta command added new fields at the end of the * structure, so sending the size of the relevant API's structure is enough to * support both API versions. */ static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) { if (iwl_mvm_has_new_rx_api(mvm) || fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) return sizeof(struct iwl_mvm_add_sta_cmd); else return sizeof(struct iwl_mvm_add_sta_cmd_v7); } static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype) { int sta_id; u32 reserved_ids = 0; BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32); WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); lockdep_assert_held(&mvm->mutex); /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */ if (iftype != NL80211_IFTYPE_STATION) reserved_ids = BIT(0); /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) { if (BIT(sta_id) & reserved_ids) continue; if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex))) return sta_id; } return IWL_MVM_INVALID_STA; } /* send station add/update command to firmware */ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool update, unsigned int flags) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd add_sta_cmd = { .sta_id = mvm_sta->sta_id, .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), .add_modify = update ? 1 : 0, .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | STA_FLG_MIMO_EN_MSK | STA_FLG_RTS_MIMO_PROT), .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), }; int ret; u32 status; u32 agg_size = 0, mpdu_dens = 0; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) add_sta_cmd.station_type = mvm_sta->sta_type; if (!update || (flags & STA_MODIFY_QUEUES)) { memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); if (!iwl_mvm_has_new_tx_api(mvm)) { add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); if (flags & STA_MODIFY_QUEUES) add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; } else { WARN_ON(flags & STA_MODIFY_QUEUES); } } switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_320: case IEEE80211_STA_RX_BW_160: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); fallthrough; case IEEE80211_STA_RX_BW_80: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ); fallthrough; case IEEE80211_STA_RX_BW_40: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); fallthrough; case IEEE80211_STA_RX_BW_20: if (sta->deflink.ht_cap.ht_supported) add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_20MHZ); break; } switch (sta->deflink.rx_nss) { case 1: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); break; case 2: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2); break; case 3 ... 8: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3); break; } switch (sta->smps_mode) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); break; case IEEE80211_SMPS_STATIC: /* override NSS */ add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK); add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); break; case IEEE80211_SMPS_DYNAMIC: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT); break; case IEEE80211_SMPS_OFF: /* nothing */ break; } if (sta->deflink.ht_cap.ht_supported) { add_sta_cmd.station_flags_msk |= cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | STA_FLG_AGG_MPDU_DENS_MSK); mpdu_dens = sta->deflink.ht_cap.ampdu_density; } if (mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) { add_sta_cmd.station_flags_msk |= cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | STA_FLG_AGG_MPDU_DENS_MSK); mpdu_dens = le16_get_bits(sta->deflink.he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); agg_size = le16_get_bits(sta->deflink.he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); } else if (sta->deflink.vht_cap.vht_supported) { agg_size = sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; agg_size >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; } else if (sta->deflink.ht_cap.ht_supported) { agg_size = sta->deflink.ht_cap.ampdu_factor; } /* D6.0 10.12.2 A-MPDU length limit rules * A STA indicates the maximum length of the A-MPDU preEOF padding * that it can receive in an HE PPDU in the Maximum A-MPDU Length * Exponent field in its HT Capabilities, VHT Capabilities, * and HE 6 GHz Band Capabilities elements (if present) and the * Maximum AMPDU Length Exponent Extension field in its HE * Capabilities element */ if (sta->deflink.he_cap.has_he) agg_size += u8_get_bits(sta->deflink.he_cap.he_cap_elem.mac_cap_info[3], IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); /* Limit to max A-MPDU supported by FW */ if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT)) agg_size = (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT); add_sta_cmd.station_flags |= cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); add_sta_cmd.station_flags |= cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC) add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); if (sta->wme) { add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) add_sta_cmd.uapsd_acs |= BIT(AC_BK); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) add_sta_cmd.uapsd_acs |= BIT(AC_BE); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) add_sta_cmd.uapsd_acs |= BIT(AC_VI); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) add_sta_cmd.uapsd_acs |= BIT(AC_VO); add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4; add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128; } status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &add_sta_cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); break; default: ret = -EIO; IWL_ERR(mvm, "ADD_STA failed\n"); break; } return ret; } static void iwl_mvm_rx_agg_session_expired(struct timer_list *t) { struct iwl_mvm_baid_data *data = from_timer(data, t, session_timer); struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr; struct iwl_mvm_baid_data *ba_data; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvm_sta; unsigned long timeout; rcu_read_lock(); ba_data = rcu_dereference(*rcu_ptr); if (WARN_ON(!ba_data)) goto unlock; if (!ba_data->timeout) goto unlock; timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2); if (time_is_after_jiffies(timeout)) { mod_timer(&ba_data->session_timer, timeout); goto unlock; } /* Timer expired */ sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); /* * sta should be valid unless the following happens: * The firmware asserts which triggers a reconfig flow, but * the reconfig fails before we set the pointer to sta into * the fw_id_to_mac_id pointer table. Mac80211 can't stop * A-MDPU and hence the timer continues to run. Then, the * timer expires and sta is NULL. */ if (!sta) goto unlock; mvm_sta = iwl_mvm_sta_from_mac80211(sta); ieee80211_rx_ba_timer_expired(mvm_sta->vif, sta->addr, ba_data->tid); unlock: rcu_read_unlock(); } /* Disable aggregations for a bitmap of TIDs for a given station */ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, unsigned long disable_agg_tids, bool remove_queue) { struct iwl_mvm_add_sta_cmd cmd = {}; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; u32 status; u8 sta_id; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; sta_id = mvm->queue_info[queue].ra_sta_id; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return -EINVAL; } mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmsta->tid_disable_agg |= disable_agg_tids; cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); cmd.sta_id = mvmsta->sta_id; cmd.add_modify = STA_MODE_MODIFY; cmd.modify_mask = STA_MODIFY_QUEUES; if (disable_agg_tids) cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; if (remove_queue) cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); rcu_read_unlock(); /* Notify FW of queue removal from the STA queues */ status = ADD_STA_SUCCESS; return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); } static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u16 *queueptr, u8 tid) { int queue = *queueptr; struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, }; int ret; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_has_new_tx_api(mvm)) { if (mvm->sta_remove_requires_queue_remove) { u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD); struct iwl_scd_queue_cfg_cmd remove_cmd = { .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE), .u.remove.queue = cpu_to_le32(queue), }; ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(remove_cmd), &remove_cmd); } else { ret = 0; } iwl_trans_txq_free(mvm->trans, queue); *queueptr = IWL_MVM_INVALID_QUEUE; return ret; } if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) return 0; mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); cmd.action = mvm->queue_info[queue].tid_bitmap ? SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; if (cmd.action == SCD_CFG_DISABLE_QUEUE) mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; IWL_DEBUG_TX_QUEUES(mvm, "Disabling TXQ #%d tids=0x%x\n", queue, mvm->queue_info[queue].tid_bitmap); /* If the queue is still enabled - nothing left to do in this func */ if (cmd.action == SCD_CFG_ENABLE_QUEUE) return 0; cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.tid = mvm->queue_info[queue].txq_tid; /* Make sure queue info is correct even though we overwrite it */ WARN(mvm->queue_info[queue].tid_bitmap, "TXQ #%d info out-of-sync - tids=0x%x\n", queue, mvm->queue_info[queue].tid_bitmap); /* If we are here - the queue is freed and we can zero out these vals */ mvm->queue_info[queue].tid_bitmap = 0; if (sta) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; } /* Regardless if this is a reserved TXQ for a STA - mark it as false */ mvm->queue_info[queue].reserved = false; iwl_trans_txq_disable(mvm->trans, queue, false); ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); if (ret) IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", queue, ret); return ret; } static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; unsigned long tid_bitmap; unsigned long agg_tids = 0; u8 sta_id; int tid; lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) return -EINVAL; mvmsta = iwl_mvm_sta_from_mac80211(sta); spin_lock_bh(&mvmsta->lock); for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { if (mvmsta->tid_data[tid].state == IWL_AGG_ON) agg_tids |= BIT(tid); } spin_unlock_bh(&mvmsta->lock); return agg_tids; } /* * Remove a queue from a station's resources. * Note that this only marks as free. It DOESN'T delete a BA agreement, and * doesn't disable the queue */ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; unsigned long tid_bitmap; unsigned long disable_agg_tids = 0; u8 sta_id; int tid; lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return 0; } mvmsta = iwl_mvm_sta_from_mac80211(sta); spin_lock_bh(&mvmsta->lock); /* Unmap MAC queues and TIDs from this queue */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); if (mvmsta->tid_data[tid].state == IWL_AGG_ON) disable_agg_tids |= BIT(tid); mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; } mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ spin_unlock_bh(&mvmsta->lock); rcu_read_unlock(); /* * The TX path may have been using this TXQ_ID from the tid_data, * so make sure it's no longer running so that we can safely reuse * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE * above, but nothing guarantees we've stopped using them. Thus, * without this, we could get to iwl_mvm_disable_txq() and remove * the queue while still sending frames to it. */ synchronize_net(); return disable_agg_tids; } static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, struct ieee80211_sta *old_sta, u8 new_sta_id) { struct iwl_mvm_sta *mvmsta; u8 sta_id, tid; unsigned long disable_agg_tids = 0; bool same_sta; u16 queue_tmp = queue; int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; sta_id = mvm->queue_info[queue].ra_sta_id; tid = mvm->queue_info[queue].txq_tid; same_sta = sta_id == new_sta_id; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); if (WARN_ON(!mvmsta)) return -EINVAL; disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); /* Disable the queue */ if (disable_agg_tids) iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid); if (ret) { IWL_ERR(mvm, "Failed to free inactive queue %d (ret=%d)\n", queue, ret); return ret; } /* If TXQ is allocated to another STA, update removal in FW */ if (!same_sta) iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); return 0; } static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, unsigned long tfd_queue_mask, u8 ac) { int queue = 0; u8 ac_to_queue[IEEE80211_NUM_ACS]; int i; /* * This protects us against grabbing a queue that's being reconfigured * by the inactivity checker. */ lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); /* See what ACs the existing queues for this STA have */ for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) { /* Only DATA queues can be shared */ if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) continue; ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; } /* * The queue to share is chosen only from DATA queues as follows (in * descending priority): * 1. An AC_BE queue * 2. Same AC queue * 3. Highest AC queue that is lower than new AC * 4. Any existing AC (there always is at least 1 DATA queue) */ /* Priority 1: An AC_BE queue */ if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_BE]; /* Priority 2: Same AC queue */ else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[ac]; /* Priority 3a: If new AC is VO and VI exists - use VI */ else if (ac == IEEE80211_AC_VO && ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_VI]; /* Priority 3b: No BE so only AC less than the new one is BK */ else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_BK]; /* Priority 4a: No BE nor BK - use VI if exists */ else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_VI]; /* Priority 4b: No BE, BK nor VI - use VO if exists */ else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_VO]; /* Make sure queue found (or not) is legal */ if (!iwl_mvm_is_dqa_data_queue(mvm, queue) && !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) && (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) { IWL_ERR(mvm, "No DATA queues available to share\n"); return -ENOSPC; } return queue; } /* Re-configure the SCD for a queue that has already been configured */ static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, int tid, int frame_limit, u16 ssn) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_ENABLE_QUEUE, .window = frame_limit, .sta_id = sta_id, .ssn = cpu_to_le16(ssn), .tx_fifo = fifo, .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE), .tid = tid, }; int ret; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; if (WARN(mvm->queue_info[queue].tid_bitmap == 0, "Trying to reconfig unallocated queue %d\n", queue)) return -ENXIO; IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n", queue, fifo, ret); return ret; } /* * If a given queue has a higher AC than the TID stream that is being compared * to, the queue needs to be redirected to the lower AC. This function does that * in such a case, otherwise - if no redirection required - it does nothing, * unless the %force param is true. */ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, int ac, int ssn, unsigned int wdg_timeout, bool force, struct iwl_mvm_txq *txq) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, }; bool shared_queue; int ret; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; /* * If the AC is lower than current one - FIFO needs to be redirected to * the lowest one of the streams in the queue. Check if this is needed * here. * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with * value 3 and VO with value 0, so to check if ac X is lower than ac Y * we need to check if the numerical value of X is LARGER than of Y. */ if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { IWL_DEBUG_TX_QUEUES(mvm, "No redirection needed on TXQ #%d\n", queue); return 0; } cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; cmd.tid = mvm->queue_info[queue].txq_tid; shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", queue, iwl_mvm_ac_to_tx_fifo[ac]); /* Stop the queue and wait for it to empty */ txq->stopped = true; ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); if (ret) { IWL_ERR(mvm, "Error draining queue %d before reconfig\n", queue); ret = -EIO; goto out; } /* Before redirecting the queue we need to de-activate it */ iwl_trans_txq_disable(mvm->trans, queue, false); ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); if (ret) IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, ret); /* Make sure the SCD wrptr is correctly set before reconfiguring */ iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); /* Update the TID "owner" of the queue */ mvm->queue_info[queue].txq_tid = tid; /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ /* Redirect to lower AC */ iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn); /* Update AC marking of the queue */ mvm->queue_info[queue].mac80211_ac = ac; /* * Mark queue as shared in transport if shared * Note this has to be done after queue enablement because enablement * can also set this value, and there is no indication there to shared * queues */ if (shared_queue) iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); out: /* Continue using the queue */ txq->stopped = false; return ret; } static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq) { int i; lockdep_assert_held(&mvm->mutex); if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues, "max queue %d >= num_of_queues (%d)", maxq, mvm->trans->trans_cfg->base_params->num_of_queues)) maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1; /* This should not be hit with new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -ENOSPC; /* Start by looking for a free queue */ for (i = minq; i <= maxq; i++) if (mvm->queue_info[i].tid_bitmap == 0 && mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) return i; return -ENOSPC; } static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, u8 sta_id, u8 tid, unsigned int timeout) { int queue, size; if (tid == IWL_MAX_TID_COUNT) { tid = IWL_MGMT_TID; size = max_t(u32, IWL_MGMT_QUEUE_SIZE, mvm->trans->cfg->min_txq_size); } else { struct ieee80211_sta *sta; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* this queue isn't used for traffic (cab_queue) */ if (IS_ERR_OR_NULL(sta)) { size = IWL_MGMT_QUEUE_SIZE; } else if (sta->deflink.he_cap.has_he) { /* support for 256 ba size */ size = IWL_DEFAULT_QUEUE_SIZE_HE; } else { size = IWL_DEFAULT_QUEUE_SIZE; } rcu_read_unlock(); } /* take the min with bc tbl entries allowed */ size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16)); /* size needs to be power of 2 values for calculating read/write pointers */ size = rounddown_pow_of_two(size); do { queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id), tid, size, timeout); if (queue < 0) IWL_DEBUG_TX_QUEUES(mvm, "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n", size, sta_id, tid, queue); size /= 2; } while (queue < 0 && size >= 16); if (queue < 0) return queue; IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", queue, sta_id, tid); return queue; } static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 ac, int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); int queue = -1; lockdep_assert_held(&mvm->mutex); IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue for sta %d on tid %d\n", mvmsta->sta_id, tid); queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout); if (queue < 0) return queue; mvmtxq->txq_id = queue; mvm->tvqm_info[queue].txq_tid = tid; mvm->tvqm_info[queue].sta_id = mvmsta->sta_id; IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); spin_lock_bh(&mvmsta->lock); mvmsta->tid_data[tid].txq_id = queue; spin_unlock_bh(&mvmsta->lock); return 0; } static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int queue, u8 sta_id, u8 tid) { bool enable_queue = true; /* Make sure this TID isn't already enabled */ if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", queue, tid); return false; } /* Update mappings and refcounts */ if (mvm->queue_info[queue].tid_bitmap) enable_queue = false; mvm->queue_info[queue].tid_bitmap |= BIT(tid); mvm->queue_info[queue].ra_sta_id = sta_id; if (enable_queue) { if (tid != IWL_MAX_TID_COUNT) mvm->queue_info[queue].mac80211_ac = tid_to_mac80211_ac[tid]; else mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; mvm->queue_info[queue].txq_tid = tid; } if (sta) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); mvmtxq->txq_id = queue; } IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d tids=0x%x\n", queue, mvm->queue_info[queue].tid_bitmap); return enable_queue; } static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int wdg_timeout) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_ENABLE_QUEUE, .window = cfg->frame_limit, .sta_id = cfg->sta_id, .ssn = cpu_to_le16(ssn), .tx_fifo = cfg->fifo, .aggregate = cfg->aggregate, .tid = cfg->tid, }; bool inc_ssn; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return false; /* Send the enabling command if we need to */ if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid)) return false; inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); if (inc_ssn) le16_add_cpu(&cmd.ssn, 1); WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); return inc_ssn; } static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_UPDATE_QUEUE_TID, }; int tid; unsigned long tid_bitmap; int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return; tid_bitmap = mvm->queue_info[queue].tid_bitmap; if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) return; /* Find any TID for queue */ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); cmd.tid = tid; cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); if (ret) { IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", queue, ret); return; } mvm->queue_info[queue].txq_tid = tid; IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", queue, tid); } static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; u8 sta_id; int tid = -1; unsigned long tid_bitmap; unsigned int wdg_timeout; int ssn; int ret = true; /* queue sharing is disabled on new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return; lockdep_assert_held(&mvm->mutex); sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; /* Find TID for queue, and make sure it is the only one on the queue */ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); if (tid_bitmap != BIT(tid)) { IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", queue, tid_bitmap); return; } IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, tid); sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) return; mvmsta = iwl_mvm_sta_from_mac80211(sta); wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); ret = iwl_mvm_redirect_queue(mvm, queue, tid, tid_to_mac80211_ac[tid], ssn, wdg_timeout, true, iwl_mvm_txq_from_tid(sta, tid)); if (ret) { IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); return; } /* If aggs should be turned back on - do it */ if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { struct iwl_mvm_add_sta_cmd cmd = {0}; mvmsta->tid_disable_agg &= ~BIT(tid); cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); cmd.sta_id = mvmsta->sta_id; cmd.add_modify = STA_MODE_MODIFY; cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (!ret) { IWL_DEBUG_TX_QUEUES(mvm, "TXQ #%d is now aggregated again\n", queue); /* Mark queue intenally as aggregating again */ iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); } } mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; } /* * Remove inactive TIDs of a given queue. * If all queue TIDs are inactive - mark the queue as inactive * If only some the queue TIDs are inactive - unmap them from the queue * * Returns %true if all TIDs were removed and the queue could be reused. */ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, int queue, unsigned long tid_bitmap, unsigned long *unshare_queues, unsigned long *changetid_queues) { int tid; lockdep_assert_held(&mvmsta->lock); lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return false; /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { /* If some TFDs are still queued - don't mark TID as inactive */ if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) tid_bitmap &= ~BIT(tid); /* Don't mark as inactive any TID that has an active BA */ if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) tid_bitmap &= ~BIT(tid); } /* If all TIDs in the queue are inactive - return it can be reused */ if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue); return true; } /* * If we are here, this is a shared queue and not all TIDs timed-out. * Remove the ones that did. */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { u16 q_tid_bitmap; mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); q_tid_bitmap = mvm->queue_info[queue].tid_bitmap; /* * We need to take into account a situation in which a TXQ was * allocated to TID x, and then turned shared by adding TIDs y * and z. If TID x becomes inactive and is removed from the TXQ, * ownership must be given to one of the remaining TIDs. * This is mainly because if TID x continues - a new queue can't * be allocated for it as long as it is an owner of another TXQ. * * Mark this queue in the right bitmap, we'll send the command * to the firmware later. */ if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) set_bit(queue, changetid_queues); IWL_DEBUG_TX_QUEUES(mvm, "Removing inactive TID %d from shared Q:%d\n", tid, queue); } IWL_DEBUG_TX_QUEUES(mvm, "TXQ #%d left with tid bitmap 0x%x\n", queue, mvm->queue_info[queue].tid_bitmap); /* * There may be different TIDs with the same mac queues, so make * sure all TIDs have existing corresponding mac queues enabled */ tid_bitmap = mvm->queue_info[queue].tid_bitmap; /* If the queue is marked as shared - "unshare" it */ if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 && mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", queue); set_bit(queue, unshare_queues); } return false; } /* * Check for inactivity - this includes checking if any queue * can be unshared and finding one (and only one) that can be * reused. * This function is also invoked as a sort of clean-up task, * in which case @alloc_for_sta is IWL_MVM_INVALID_STA. * * Returns the queue number, or -ENOSPC. */ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) { unsigned long now = jiffies; unsigned long unshare_queues = 0; unsigned long changetid_queues = 0; int i, ret, free_queue = -ENOSPC; struct ieee80211_sta *queue_owner = NULL; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_has_new_tx_api(mvm)) return -ENOSPC; rcu_read_lock(); /* we skip the CMD queue below by starting at 1 */ BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0); for (i = 1; i < IWL_MAX_HW_QUEUES; i++) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; u8 sta_id; int tid; unsigned long inactive_tid_bitmap = 0; unsigned long queue_tid_bitmap; queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; if (!queue_tid_bitmap) continue; /* If TXQ isn't in active use anyway - nothing to do here... */ if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) continue; /* Check to see if there are inactive TIDs on this queue */ for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) { if (time_after(mvm->queue_info[i].last_frame_time[tid] + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) continue; inactive_tid_bitmap |= BIT(tid); } /* If all TIDs are active - finish check on this queue */ if (!inactive_tid_bitmap) continue; /* * If we are here - the queue hadn't been served recently and is * in use */ sta_id = mvm->queue_info[i].ra_sta_id; sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* * If the STA doesn't exist anymore, it isn't an error. It could * be that it was removed since getting the queues, and in this * case it should've inactivated its queues anyway. */ if (IS_ERR_OR_NULL(sta)) continue; mvmsta = iwl_mvm_sta_from_mac80211(sta); spin_lock_bh(&mvmsta->lock); ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, inactive_tid_bitmap, &unshare_queues, &changetid_queues); if (ret && free_queue < 0) { queue_owner = sta; free_queue = i; } /* only unlock sta lock - we still need the queue info lock */ spin_unlock_bh(&mvmsta->lock); } /* Reconfigure queues requiring reconfiguation */ for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) iwl_mvm_unshare_queue(mvm, i); for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) iwl_mvm_change_queue_tid(mvm, i); rcu_read_unlock(); if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, alloc_for_sta); if (ret) return ret; } return free_queue; } static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 ac, int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_trans_txq_scd_cfg cfg = { .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), .sta_id = mvmsta->sta_id, .tid = tid, .frame_limit = IWL_FRAME_LIMIT, }; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); int queue = -1; u16 queue_tmp; unsigned long disable_agg_tids = 0; enum iwl_mvm_agg_state queue_state; bool shared_queue = false, inc_ssn; int ssn; unsigned long tfd_queue_mask; int ret; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_has_new_tx_api(mvm)) return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); spin_lock_bh(&mvmsta->lock); tfd_queue_mask = mvmsta->tfd_queue_msk; ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); spin_unlock_bh(&mvmsta->lock); if (tid == IWL_MAX_TID_COUNT) { queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_MGMT_QUEUE, IWL_MVM_DQA_MAX_MGMT_QUEUE); if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", queue); /* If no such queue is found, we'll use a DATA queue instead */ } if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && (mvm->queue_info[mvmsta->reserved_queue].status == IWL_MVM_QUEUE_RESERVED)) { queue = mvmsta->reserved_queue; mvm->queue_info[queue].reserved = true; IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); } if (queue < 0) queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (queue < 0) { /* try harder - perhaps kill an inactive queue */ queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); } /* No free queue - we'll have to share */ if (queue <= 0) { queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); if (queue > 0) { shared_queue = true; mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; } } /* * Mark TXQ as ready, even though it hasn't been fully configured yet, * to make sure no one else takes it. * This will allow avoiding re-acquiring the lock at the end of the * configuration. On error we'll mark it back as free. */ if (queue > 0 && !shared_queue) mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; /* This shouldn't happen - out of queues */ if (WARN_ON(queue <= 0)) { IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", tid, cfg.sta_id); return queue; } /* * Actual en/disablement of aggregations is through the ADD_STA HCMD, * but for configuring the SCD to send A-MPDUs we need to mark the queue * as aggregatable. * Mark all DATA queues as allowing to be aggregated at some point */ cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); IWL_DEBUG_TX_QUEUES(mvm, "Allocating %squeue #%d to sta %d on tid %d\n", shared_queue ? "shared " : "", queue, mvmsta->sta_id, tid); if (shared_queue) { /* Disable any open aggs on this queue */ disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); if (disable_agg_tids) { IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", queue); iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); } } inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); /* * Mark queue as shared in transport if shared * Note this has to be done after queue enablement because enablement * can also set this value, and there is no indication there to shared * queues */ if (shared_queue) iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); spin_lock_bh(&mvmsta->lock); /* * This looks racy, but it is not. We have only one packet for * this ra/tid in our Tx path since we stop the Qdisc when we * need to allocate a new TFD queue. */ if (inc_ssn) { mvmsta->tid_data[tid].seq_number += 0x10; ssn = (ssn + 1) & IEEE80211_SCTL_SEQ; } mvmsta->tid_data[tid].txq_id = queue; mvmsta->tfd_queue_msk |= BIT(queue); queue_state = mvmsta->tid_data[tid].state; if (mvmsta->reserved_queue == queue) mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; spin_unlock_bh(&mvmsta->lock); if (!shared_queue) { ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); if (ret) goto out_err; /* If we need to re-enable aggregations... */ if (queue_state == IWL_AGG_ON) { ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) goto out_err; } } else { /* Redirect queue, if needed */ ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn, wdg_timeout, false, iwl_mvm_txq_from_tid(sta, tid)); if (ret) goto out_err; } return 0; out_err: queue_tmp = queue; iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid); return ret; } void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, add_stream_wk); mutex_lock(&mvm->mutex); iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); while (!list_empty(&mvm->add_stream_txqs)) { struct iwl_mvm_txq *mvmtxq; struct ieee80211_txq *txq; u8 tid; mvmtxq = list_first_entry(&mvm->add_stream_txqs, struct iwl_mvm_txq, list); txq = container_of((void *)mvmtxq, struct ieee80211_txq, drv_priv); tid = txq->tid; if (tid == IEEE80211_NUM_TIDS) tid = IWL_MAX_TID_COUNT; /* * We can't really do much here, but if this fails we can't * transmit anyway - so just don't transmit the frame etc. * and let them back up ... we've tried our best to allocate * a queue in the function itself. */ if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) { list_del_init(&mvmtxq->list); continue; } list_del_init(&mvmtxq->list); local_bh_disable(); iwl_mvm_mac_itxq_xmit(mvm->hw, txq); local_bh_enable(); } mutex_unlock(&mvm->mutex); } static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum nl80211_iftype vif_type) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int queue; /* queue reserving is disabled on new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return 0; /* run the general cleanup/unsharing of queues */ iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); /* Make sure we have free resources for this STA */ if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap && (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == IWL_MVM_QUEUE_FREE)) queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; else queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (queue < 0) { /* try again - this time kick out a queue if needed */ queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); if (queue < 0) { IWL_ERR(mvm, "No available queues for new station\n"); return -ENOSPC; } } mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; mvmsta->reserved_queue = queue; IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", queue, mvmsta->sta_id); return 0; } /* * In DQA mode, after a HW restart the queues should be allocated as before, in * order to avoid race conditions when there are shared queues. This function * does the re-mapping and queue allocation. * * Note that re-enabling aggregations isn't done in this function. */ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); unsigned int wdg = iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); int i; struct iwl_trans_txq_scd_cfg cfg = { .sta_id = mvm_sta->sta_id, .frame_limit = IWL_FRAME_LIMIT, }; /* Make sure reserved queue is still marked as such (if allocated) */ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) mvm->queue_info[mvm_sta->reserved_queue].status = IWL_MVM_QUEUE_RESERVED; for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; int txq_id = tid_data->txq_id; int ac; if (txq_id == IWL_MVM_INVALID_QUEUE) continue; ac = tid_to_mac80211_ac[i]; if (iwl_mvm_has_new_tx_api(mvm)) { IWL_DEBUG_TX_QUEUES(mvm, "Re-mapping sta %d tid %d\n", mvm_sta->sta_id, i); txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id, i, wdg); /* * on failures, just set it to IWL_MVM_INVALID_QUEUE * to try again later, we have no other good way of * failing here */ if (txq_id < 0) txq_id = IWL_MVM_INVALID_QUEUE; tid_data->txq_id = txq_id; /* * Since we don't set the seq number after reset, and HW * sets it now, FW reset will cause the seq num to start * at 0 again, so driver will need to update it * internally as well, so it keeps in sync with real val */ tid_data->seq_number = 0; } else { u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number); cfg.tid = i; cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE); IWL_DEBUG_TX_QUEUES(mvm, "Re-mapping sta %d tid %d to queue %d\n", mvm_sta->sta_id, i, txq_id); iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg); mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; } } } static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, const u8 *addr, u16 mac_id, u16 color) { struct iwl_mvm_add_sta_cmd cmd; int ret; u32 status = ADD_STA_SUCCESS; lockdep_assert_held(&mvm->mutex); memset(&cmd, 0, sizeof(cmd)); cmd.sta_id = sta->sta_id; if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 && sta->type == IWL_STA_AUX_ACTIVITY) cmd.mac_id_n_color = cpu_to_le32(mac_id); else cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, color)); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) cmd.station_type = sta->type; if (!iwl_mvm_has_new_tx_api(mvm)) cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(0xffff); if (addr) memcpy(cmd.addr, addr, ETH_ALEN); ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "Internal station added.\n"); return 0; default: ret = -EIO; IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", status); break; } return ret; } int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_rxq_dup_data *dup_data; int i, ret, sta_id; bool sta_update = false; unsigned int sta_flags = 0; lockdep_assert_held(&mvm->mutex); if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) sta_id = iwl_mvm_find_free_sta_id(mvm, ieee80211_vif_type_p2p(vif)); else sta_id = mvm_sta->sta_id; if (sta_id == IWL_MVM_INVALID_STA) return -ENOSPC; spin_lock_init(&mvm_sta->lock); /* if this is a HW restart re-alloc existing queues */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_int_sta tmp_sta = { .sta_id = sta_id, .type = mvm_sta->sta_type, }; /* * First add an empty station since allocating * a queue requires a valid station */ ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, mvmvif->id, mvmvif->color); if (ret) goto err; iwl_mvm_realloc_queues_after_restart(mvm, sta); sta_update = true; sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; goto update_fw; } mvm_sta->sta_id = sta_id; mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); mvm_sta->vif = vif; if (!mvm->trans->trans_cfg->gen2) mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; else mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF; mvm_sta->tx_protection = 0; mvm_sta->tt_tx_protection = false; mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK; /* HW restart, don't assume the memory has been zeroed */ mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ mvm_sta->tfd_queue_msk = 0; /* for HW restart - reset everything but the sequence number */ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { u16 seq = mvm_sta->tid_data[i].seq_number; memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); mvm_sta->tid_data[i].seq_number = seq; /* * Mark all queues for this STA as unallocated and defer TX * frames until the queue is allocated */ mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; } for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(sta->txq[i]); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; INIT_LIST_HEAD(&mvmtxq->list); atomic_set(&mvmtxq->tx_request, 0); } mvm_sta->agg_tids = 0; if (iwl_mvm_has_new_rx_api(mvm) && !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { int q; dup_data = kcalloc(mvm->trans->num_rx_queues, sizeof(*dup_data), GFP_KERNEL); if (!dup_data) return -ENOMEM; /* * Initialize all the last_seq values to 0xffff which can never * compare equal to the frame's seq_ctrl in the check in * iwl_mvm_is_dup() since the lower 4 bits are the fragment * number and fragmented packets don't reach that function. * * This thus allows receiving a packet with seqno 0 and the * retry bit set as the very first packet on a new TID. */ for (q = 0; q < mvm->trans->num_rx_queues; q++) memset(dup_data[q].last_seq, 0xff, sizeof(dup_data[q].last_seq)); mvm_sta->dup_data = dup_data; } if (!iwl_mvm_has_new_tx_api(mvm)) { ret = iwl_mvm_reserve_sta_stream(mvm, sta, ieee80211_vif_type_p2p(vif)); if (ret) goto err; } /* * if rs is registered with mac80211, then "add station" will be handled * via the corresponding ops, otherwise need to notify rate scaling here */ if (iwl_mvm_has_tlc_offload(mvm)) iwl_mvm_rs_add_sta(mvm, mvm_sta); else spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock); iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); update_fw: ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); if (ret) goto err; if (vif->type == NL80211_IFTYPE_STATION) { if (!sta->tdls) { WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA); mvmvif->ap_sta_id = sta_id; } else { WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA); } } rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); return 0; err: return ret; } int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool drain) { struct iwl_mvm_add_sta_cmd cmd = {}; int ret; u32 status; lockdep_assert_held(&mvm->mutex); cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); cmd.sta_id = mvmsta->sta_id; cmd.add_modify = STA_MODE_MODIFY; cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", mvmsta->sta_id); break; default: ret = -EIO; #if defined(__linux__) IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", mvmsta->sta_id); #elif defined(__FreeBSD__) IWL_ERR(mvm, "Couldn't drain frames for staid %d, status %#x\n", mvmsta->sta_id, status); #endif break; } return ret; } /* * Remove a station from the FW table. Before sending the command to remove * the station validate that the station is indeed known to the driver (sanity * only). */ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) { struct ieee80211_sta *sta; struct iwl_mvm_rm_sta_cmd rm_sta_cmd = { .sta_id = sta_id, }; int ret; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); /* Note: internal stations are marked as error values */ if (!sta) { IWL_ERR(mvm, "Invalid station id\n"); return -EINVAL; } ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, sizeof(rm_sta_cmd), &rm_sta_cmd); if (ret) { IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); return ret; } return 0; } static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); int i; lockdep_assert_held(&mvm->mutex); for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) continue; iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i); mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; } for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(sta->txq[i]); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; } } int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta) { int i; for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { u16 txq_id; int ret; spin_lock_bh(&mvm_sta->lock); txq_id = mvm_sta->tid_data[i].txq_id; spin_unlock_bh(&mvm_sta->lock); if (txq_id == IWL_MVM_INVALID_QUEUE) continue; ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id); if (ret) return ret; } return 0; } int iwl_mvm_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); u8 sta_id = mvm_sta->sta_id; int ret; lockdep_assert_held(&mvm->mutex); #if defined(__linux__) if (iwl_mvm_has_new_rx_api(mvm)) kfree(mvm_sta->dup_data); #elif defined(__FreeBSD__) if (iwl_mvm_has_new_rx_api(mvm)) { kfree(mvm_sta->dup_data); mvm_sta->dup_data = NULL; } #endif ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); if (ret) return ret; /* flush its queues here since we are freeing mvm_sta */ ret = iwl_mvm_flush_sta(mvm, mvm_sta, false); if (ret) return ret; if (iwl_mvm_has_new_tx_api(mvm)) { ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); } else { u32 q_mask = mvm_sta->tfd_queue_msk; ret = iwl_trans_wait_tx_queues_empty(mvm->trans, q_mask); } if (ret) return ret; ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); iwl_mvm_disable_sta_queues(mvm, vif, sta); /* If there is a TXQ still marked as reserved - free it */ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { u8 reserved_txq = mvm_sta->reserved_queue; enum iwl_mvm_queue_status *status; /* * If no traffic has gone through the reserved TXQ - it * is still marked as IWL_MVM_QUEUE_RESERVED, and * should be manually marked as free again */ status = &mvm->queue_info[reserved_txq].status; if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && (*status != IWL_MVM_QUEUE_FREE), "sta_id %d reserved txq %d status %d", sta_id, reserved_txq, *status)) return -EINVAL; *status = IWL_MVM_QUEUE_FREE; } if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id == sta_id) { /* if associated - we can't remove the AP STA now */ if (vif->bss_conf.assoc) return ret; /* unassoc - go ahead - remove the AP STA now */ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; } /* * This shouldn't happen - the TDLS channel switch should be canceled * before the STA is removed. */ if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; cancel_delayed_work(&mvm->tdls_cs.dwork); } /* * Make sure that the tx response code sees the station as -EBUSY and * calls the drain worker. */ spin_lock_bh(&mvm_sta->lock); spin_unlock_bh(&mvm_sta->lock); ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); return ret; } int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 sta_id) { int ret = iwl_mvm_rm_sta_common(mvm, sta_id); lockdep_assert_held(&mvm->mutex); RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); return ret; } int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, u32 qmask, enum nl80211_iftype iftype, enum iwl_sta_type type) { if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || sta->sta_id == IWL_MVM_INVALID_STA) { sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) return -ENOSPC; } sta->tfd_queue_msk = qmask; sta->type = type; /* put a non-NULL value so iterating over the stations won't stop */ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); return 0; } void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) { RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); sta->sta_id = IWL_MVM_INVALID_STA; } static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue, u8 sta_id, u8 fifo) { unsigned int wdg_timeout = mvm->trans->trans_cfg->base_params->wd_timeout; struct iwl_trans_txq_scd_cfg cfg = { .fifo = fifo, .sta_id = sta_id, .tid = IWL_MAX_TID_COUNT, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; WARN_ON(iwl_mvm_has_new_tx_api(mvm)); iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); } static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id) { unsigned int wdg_timeout = mvm->trans->trans_cfg->base_params->wd_timeout; WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT, wdg_timeout); } static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx, int maccolor, u8 *addr, struct iwl_mvm_int_sta *sta, u16 *queue, int fifo) { int ret; /* Map queue to fifo - needs to happen before adding station */ if (!iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo); ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor); if (ret) { if (!iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT); return ret; } /* * For 22000 firmware and on we cannot add queue to a station unknown * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) { int txq; txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id); if (txq < 0) { iwl_mvm_rm_sta_common(mvm, sta->sta_id); return txq; } *queue = txq; } return 0; } int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id) { int ret; lockdep_assert_held(&mvm->mutex); /* Allocate aux station and assign to it the aux queue */ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), NL80211_IFTYPE_UNSPECIFIED, IWL_STA_AUX_ACTIVITY); if (ret) return ret; /* * In CDB NICs we need to specify which lmac to use for aux activity * using the mac_id argument place to send lmac_id to the function */ ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL, &mvm->aux_sta, &mvm->aux_queue, IWL_MVM_TX_FIFO_MCAST); if (ret) { iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); return ret; } return 0; } int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); lockdep_assert_held(&mvm->mutex); return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color, NULL, &mvm->snif_sta, &mvm->snif_queue, IWL_MVM_TX_FIFO_BE); } int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) return -EINVAL; iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT); ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); return ret; } int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) { int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) return -EINVAL; iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT); ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); return ret; } void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) { iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); } /* * Send the add station command for the vif's broadcast station. * Assumes that the station was already allocated. * * @mvm: the mvm component * @vif: the interface to which the broadcast station is added * @bsta: the broadcast station to add. */ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; const u8 *baddr = _baddr; int queue; int ret; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_VO, .sta_id = mvmvif->bcast_sta.sta_id, .tid = IWL_MAX_TID_COUNT, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; lockdep_assert_held(&mvm->mutex); if (!iwl_mvm_has_new_tx_api(mvm)) { if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { queue = mvm->probe_queue; } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { queue = mvm->p2p_dev_queue; } else { WARN(1, "Missing required TXQ for adding bcast STA\n"); return -EINVAL; } bsta->tfd_queue_msk |= BIT(queue); iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); } if (vif->type == NL80211_IFTYPE_ADHOC) baddr = vif->bss_conf.bssid; if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA)) return -ENOSPC; ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, mvmvif->id, mvmvif->color); if (ret) return ret; /* * For 22000 firmware and on we cannot add queue to a station unknown * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) { queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id, IWL_MAX_TID_COUNT, wdg_timeout); if (queue < 0) { iwl_mvm_rm_sta_common(mvm, bsta->sta_id); return queue; } if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) mvm->probe_queue = queue; else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) mvm->p2p_dev_queue = queue; } return 0; } static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 *queueptr, queue; lockdep_assert_held(&mvm->mutex); iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true); switch (vif->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: queueptr = &mvm->probe_queue; break; case NL80211_IFTYPE_P2P_DEVICE: queueptr = &mvm->p2p_dev_queue; break; default: WARN(1, "Can't free bcast queue on vif type %d\n", vif->type); return; } queue = *queueptr; iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT); if (iwl_mvm_has_new_tx_api(mvm)) return; WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue))); mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue); } /* Send the FW a request to remove the station from it's internal data * structures, but DO NOT remove the entry from the local data structures. */ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); iwl_mvm_free_bcast_sta_queues(mvm, vif); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); return ret; } int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); lockdep_assert_held(&mvm->mutex); return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0, ieee80211_vif_type_p2p(vif), IWL_STA_GENERAL_PURPOSE); } /* Allocate a new station entry for the broadcast station to the given vif, * and send it to the FW. * Note that each P2P mac should have its own broadcast station. * * @mvm: the mvm component * @vif: the interface to which the broadcast station is added * @bsta: the broadcast station to add. */ int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; int ret; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_alloc_bcast_sta(mvm, vif); if (ret) return ret; ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) iwl_mvm_dealloc_int_sta(mvm, bsta); return ret; } void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); } /* * Send the FW a request to remove the station from it's internal data * structures, and in addition remove it from the local data structure. */ int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { int ret; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); iwl_mvm_dealloc_bcast_sta(mvm, vif); return ret; } /* * Allocate a new station entry for the multicast station to the given vif, * and send it to the FW. * Note that each AP/GO mac should have its own multicast station. * * @mvm: the mvm component * @vif: the interface to which the multicast station is added */ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta; static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; const u8 *maddr = _maddr; struct iwl_trans_txq_scd_cfg cfg = { .fifo = vif->type == NL80211_IFTYPE_AP ? IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE, .sta_id = msta->sta_id, .tid = 0, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON(vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC)) return -ENOTSUPP; /* * In IBSS, ieee80211_check_queues() sets the cab_queue to be * invalid, so make sure we use the queue we want. * Note that this is done here as we want to avoid making DQA * changes in mac80211 layer. */ if (vif->type == NL80211_IFTYPE_ADHOC) mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; /* * While in previous FWs we had to exclude cab queue from TFD queue * mask, now it is needed as any other queue. */ if (!iwl_mvm_has_new_tx_api(mvm) && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, timeout); msta->tfd_queue_msk |= BIT(mvmvif->cab_queue); } ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, mvmvif->id, mvmvif->color); if (ret) goto err; /* * Enable cab queue after the ADD_STA command is sent. * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG * command with unknown station id, and for FW that doesn't support * station API since the cab queue is not included in the * tfd_queue_mask. */ if (iwl_mvm_has_new_tx_api(mvm)) { int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id, 0, timeout); if (queue < 0) { ret = queue; goto err; } mvmvif->cab_queue = queue; } else if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, timeout); return 0; err: iwl_mvm_dealloc_int_sta(mvm, msta); return ret; } static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, struct ieee80211_key_conf *keyconf, bool mcast) { union { struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; struct iwl_mvm_add_sta_key_cmd cmd; } u = {}; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS); __le16 key_flags; int ret, size; u32 status; /* This is a valid situation for GTK removal */ if (sta_id == IWL_MVM_INVALID_STA) return 0; key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK); key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); /* * The fields assigned here are in the same location at the start * of the command, so we can do this union trick. */ u.cmd.common.key_flags = key_flags; u.cmd.common.key_offset = keyconf->hw_key_idx; u.cmd.common.sta_id = sta_id; size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1); status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, &status); switch (status) { case ADD_STA_SUCCESS: IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); break; default: ret = -EIO; IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); break; } return ret; } /* * Send the FW a request to remove the station from it's internal data * structures, and in addition remove it from the local data structure. */ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true); iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); return ret; } static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) { struct iwl_mvm_delba_data notif = { .baid = baid, }; iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true, ¬if, sizeof(notif)); }; static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, struct iwl_mvm_baid_data *data) { int i; iwl_mvm_sync_rxq_del_ba(mvm, data->baid); for (i = 0; i < mvm->trans->num_rx_queues; i++) { int j; struct iwl_mvm_reorder_buffer *reorder_buf = &data->reorder_buf[i]; struct iwl_mvm_reorder_buf_entry *entries = &data->entries[i * data->entries_per_queue]; spin_lock_bh(&reorder_buf->lock); if (likely(!reorder_buf->num_stored)) { spin_unlock_bh(&reorder_buf->lock); continue; } /* * This shouldn't happen in regular DELBA since the internal * delBA notification should trigger a release of all frames in * the reorder buffer. */ WARN_ON(1); for (j = 0; j < reorder_buf->buf_size; j++) __skb_queue_purge(&entries[j].e.frames); /* * Prevent timer re-arm. This prevents a very far fetched case * where we timed out on the notification. There may be prior * RX frames pending in the RX queue before the notification * that might get processed between now and the actual deletion * and we would re-arm the timer although we are deleting the * reorder buffer. */ reorder_buf->removed = true; spin_unlock_bh(&reorder_buf->lock); del_timer_sync(&reorder_buf->reorder_timer); } } static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, struct iwl_mvm_baid_data *data, u16 ssn, u16 buf_size) { int i; for (i = 0; i < mvm->trans->num_rx_queues; i++) { struct iwl_mvm_reorder_buffer *reorder_buf = &data->reorder_buf[i]; struct iwl_mvm_reorder_buf_entry *entries = &data->entries[i * data->entries_per_queue]; int j; reorder_buf->num_stored = 0; reorder_buf->head_sn = ssn; reorder_buf->buf_size = buf_size; /* rx reorder timer */ timer_setup(&reorder_buf->reorder_timer, iwl_mvm_reorder_timer_expired, 0); spin_lock_init(&reorder_buf->lock); reorder_buf->mvm = mvm; reorder_buf->queue = i; reorder_buf->valid = false; for (j = 0; j < reorder_buf->buf_size; j++) __skb_queue_head_init(&entries[j].e.frames); } } static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta, bool start, int tid, u16 ssn, u16 buf_size) { struct iwl_mvm_add_sta_cmd cmd = { .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), .sta_id = mvm_sta->sta_id, .add_modify = STA_MODE_MODIFY, }; u32 status; int ret; if (start) { cmd.add_immediate_ba_tid = tid; cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); cmd.rx_ba_window = cpu_to_le16(buf_size); cmd.modify_mask = STA_MODIFY_ADD_BA_TID; } else { cmd.remove_immediate_ba_tid = tid; cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID; } status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", start ? "start" : "stopp"); if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) && !(status & IWL_ADD_STA_BAID_VALID_MASK))) return -EINVAL; return u32_get_bits(status, IWL_ADD_STA_BAID_MASK); case ADD_STA_IMMEDIATE_BA_FAILURE: IWL_WARN(mvm, "RX BA Session refused by fw\n"); return -ENOSPC; default: IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", start ? "start" : "stopp", status); return -EIO; } } static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta, bool start, int tid, u16 ssn, u16 buf_size, int baid) { struct iwl_rx_baid_cfg_cmd cmd = { .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) : cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE), }; u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD); int ret; BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); if (start) { cmd.alloc.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id)); cmd.alloc.tid = tid; cmd.alloc.ssn = cpu_to_le16(ssn); cmd.alloc.win_size = cpu_to_le16(buf_size); baid = -EIO; } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) { cmd.remove_v1.baid = cpu_to_le32(baid); BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove)); } else { cmd.remove.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id)); cmd.remove.tid = cpu_to_le32(tid); } ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd), &cmd, &baid); if (ret) return ret; if (!start) { /* ignore firmware baid on remove */ baid = 0; } IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", start ? "start" : "stopp"); if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map)) return -EINVAL; return baid; } static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta, bool start, int tid, u16 ssn, u16 buf_size, int baid) { if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start, tid, ssn, buf_size, baid); return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start, tid, ssn, buf_size); } int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u16 ssn, bool start, u16 buf_size, u16 timeout) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_baid_data *baid_data = NULL; int ret, baid; u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID : IWL_MAX_BAID_OLD; lockdep_assert_held(&mvm->mutex); if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) { IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); return -ENOSPC; } if (iwl_mvm_has_new_rx_api(mvm) && start) { u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); /* sparse doesn't like the __align() so don't check */ #ifndef __CHECKER__ /* * The division below will be OK if either the cache line size * can be divided by the entry size (ALIGN will round up) or if * if the entry size can be divided by the cache line size, in * which case the ALIGN() will do nothing. */ BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) && sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES); #endif /* * Upward align the reorder buffer size to fill an entire cache * line for each queue, to avoid sharing cache lines between * different queues. */ reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES); /* * Allocate here so if allocation fails we can bail out early * before starting the BA session in the firmware */ baid_data = kzalloc(sizeof(*baid_data) + mvm->trans->num_rx_queues * reorder_buf_size, GFP_KERNEL); if (!baid_data) return -ENOMEM; /* * This division is why we need the above BUILD_BUG_ON(), * if that doesn't hold then this will not be right. */ baid_data->entries_per_queue = reorder_buf_size / sizeof(baid_data->entries[0]); } if (iwl_mvm_has_new_rx_api(mvm) && !start) { baid = mvm_sta->tid_to_baid[tid]; } else { /* we don't really need it in this case */ baid = -1; } /* Don't send command to remove (start=0) BAID during restart */ if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size, baid); if (baid < 0) { ret = baid; goto out_free; } if (start) { mvm->rx_ba_sessions++; if (!iwl_mvm_has_new_rx_api(mvm)) return 0; baid_data->baid = baid; baid_data->timeout = timeout; baid_data->last_rx = jiffies; baid_data->rcu_ptr = &mvm->baid_map[baid]; timer_setup(&baid_data->session_timer, iwl_mvm_rx_agg_session_expired, 0); baid_data->mvm = mvm; baid_data->tid = tid; baid_data->sta_id = mvm_sta->sta_id; mvm_sta->tid_to_baid[tid] = baid; if (timeout) mod_timer(&baid_data->session_timer, TU_TO_EXP_TIME(timeout * 2)); iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size); /* * protect the BA data with RCU to cover a case where our * internal RX sync mechanism will timeout (not that it's * supposed to happen) and we will free the session data while * RX is being processed in parallel */ IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", mvm_sta->sta_id, tid, baid); WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); rcu_assign_pointer(mvm->baid_map[baid], baid_data); } else { baid = mvm_sta->tid_to_baid[tid]; if (mvm->rx_ba_sessions > 0) /* check that restart flow didn't zero the counter */ mvm->rx_ba_sessions--; if (!iwl_mvm_has_new_rx_api(mvm)) return 0; if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) return -EINVAL; baid_data = rcu_access_pointer(mvm->baid_map[baid]); if (WARN_ON(!baid_data)) return -EINVAL; /* synchronize all rx queues so we can safely delete */ iwl_mvm_free_reorder(mvm, baid_data); del_timer_sync(&baid_data->session_timer); RCU_INIT_POINTER(mvm->baid_map[baid], NULL); kfree_rcu(baid_data, rcu_head); IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); /* * After we've deleted it, do another queue sync * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently * running it won't find a new session in the old * BAID. It can find the NULL pointer for the BAID, * but we must not have it find a different session. */ iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0); } return 0; out_free: kfree(baid_data); return ret; } int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u8 queue, bool start) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = {}; int ret; u32 status; lockdep_assert_held(&mvm->mutex); if (start) { mvm_sta->tfd_queue_msk |= BIT(queue); mvm_sta->tid_disable_agg &= ~BIT(tid); } else { /* In DQA-mode the queue isn't removed on agg termination */ mvm_sta->tid_disable_agg |= BIT(tid); } cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); cmd.sta_id = mvm_sta->sta_id; cmd.add_modify = STA_MODE_MODIFY; if (!iwl_mvm_has_new_tx_api(mvm)) cmd.modify_mask = STA_MODIFY_QUEUES; cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: break; default: ret = -EIO; IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", start ? "start" : "stopp", status); break; } return ret; } const u8 tid_to_mac80211_ac[] = { IEEE80211_AC_BE, IEEE80211_AC_BK, IEEE80211_AC_BK, IEEE80211_AC_BE, IEEE80211_AC_VI, IEEE80211_AC_VI, IEEE80211_AC_VO, IEEE80211_AC_VO, IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ }; static const u8 tid_to_ucode_ac[] = { AC_BE, AC_BK, AC_BK, AC_BE, AC_VI, AC_VI, AC_VO, AC_VO, }; int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data; u16 normalized_ssn; u16 txq_id; int ret; if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) return -EINVAL; if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED && mvmsta->tid_data[tid].state != IWL_AGG_OFF) { IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n", mvmsta->tid_data[tid].state); return -ENXIO; } lockdep_assert_held(&mvm->mutex); if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE && iwl_mvm_has_new_tx_api(mvm)) { u8 ac = tid_to_mac80211_ac[tid]; ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); if (ret) return ret; } spin_lock_bh(&mvmsta->lock); /* * Note the possible cases: * 1. An enabled TXQ - TXQ needs to become agg'ed * 2. The TXQ hasn't yet been enabled, so find a free one and mark * it as reserved */ txq_id = mvmsta->tid_data[tid].txq_id; if (txq_id == IWL_MVM_INVALID_QUEUE) { ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (ret < 0) { IWL_ERR(mvm, "Failed to allocate agg queue\n"); goto out; } txq_id = ret; /* TXQ hasn't yet been enabled, so mark it only as reserved */ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) { ret = -ENXIO; IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n", tid, IWL_MAX_HW_QUEUES - 1); goto out; } else if (unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) { ret = -ENXIO; IWL_DEBUG_TX_QUEUES(mvm, "Can't start tid %d agg on shared queue!\n", tid); goto out; } IWL_DEBUG_TX_QUEUES(mvm, "AGG for tid %d will be on queue #%d\n", tid, txq_id); tid_data = &mvmsta->tid_data[tid]; tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); tid_data->txq_id = txq_id; *ssn = tid_data->ssn; IWL_DEBUG_TX_QUEUES(mvm, "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", mvmsta->sta_id, tid, txq_id, tid_data->ssn, tid_data->next_reclaimed); /* * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ normalized_ssn = tid_data->ssn; if (mvm->trans->trans_cfg->gen2) normalized_ssn &= 0xff; if (normalized_ssn == tid_data->next_reclaimed) { tid_data->state = IWL_AGG_STARTING; ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; } else { tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA; } out: spin_unlock_bh(&mvmsta->lock); return ret; } int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 buf_size, bool amsdu) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); int queue, ret; bool alloc_queue = true; enum iwl_mvm_queue_status queue_status; u16 ssn; struct iwl_trans_txq_scd_cfg cfg = { .sta_id = mvmsta->sta_id, .tid = tid, .frame_limit = buf_size, .aggregate = true, }; /* * When FW supports TLC_OFFLOAD, it also implements Tx aggregation * manager, so this function should never be called in this case. */ if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm))) return -EINVAL; BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) != IWL_MAX_TID_COUNT); spin_lock_bh(&mvmsta->lock); ssn = tid_data->ssn; queue = tid_data->txq_id; tid_data->state = IWL_AGG_ON; mvmsta->agg_tids |= BIT(tid); tid_data->ssn = 0xffff; tid_data->amsdu_in_ampdu_allowed = amsdu; spin_unlock_bh(&mvmsta->lock); if (iwl_mvm_has_new_tx_api(mvm)) { /* * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start() * would have failed, so if we are here there is no need to * allocate a queue. * However, if aggregation size is different than the default * size, the scheduler should be reconfigured. * We cannot do this with the new TX API, so return unsupported * for now, until it will be offloaded to firmware.. * Note that if SCD default value changes - this condition * should be updated as well. */ if (buf_size < IWL_FRAME_LIMIT) return -ENOTSUPP; ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) return -EIO; goto out; } cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; queue_status = mvm->queue_info[queue].status; /* Maybe there is no need to even alloc a queue... */ if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) alloc_queue = false; /* * Only reconfig the SCD for the queue if the window size has * changed from current (become smaller) */ if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) { /* * If reconfiguring an existing queue, it first must be * drained */ ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); if (ret) { IWL_ERR(mvm, "Error draining queue before reconfig\n"); return ret; } ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, mvmsta->sta_id, tid, buf_size, ssn); if (ret) { IWL_ERR(mvm, "Error reconfiguring TXQ #%d\n", queue); return ret; } } if (alloc_queue) iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); /* Send ADD_STA command to enable aggs only if the queue isn't shared */ if (queue_status != IWL_MVM_QUEUE_SHARED) { ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) return -EIO; } /* No need to mark as reserved */ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; out: /* * Even though in theory the peer could have different * aggregation reorder buffer sizes for different sessions, * our ucode doesn't allow for that and has a global limit * for each station. Therefore, use the minimum of all the * aggregation sessions and our default value. */ mvmsta->max_agg_bufsize = min(mvmsta->max_agg_bufsize, buf_size); mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", sta->addr, tid); return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq); } static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, struct iwl_mvm_tid_data *tid_data) { u16 txq_id = tid_data->txq_id; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_has_new_tx_api(mvm)) return; /* * The TXQ is marked as reserved only if no traffic came through yet * This means no traffic has been sent on this TID (agg'd or not), so * we no longer have use for the queue. Since it hasn't even been * allocated through iwl_mvm_enable_txq, so we can just mark it back as * free. */ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) { mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; tid_data->txq_id = IWL_MVM_INVALID_QUEUE; } } int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; u16 txq_id; int err; /* * If mac80211 is cleaning its state, then say that we finished since * our state has been cleared anyway. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); return 0; } spin_lock_bh(&mvmsta->lock); txq_id = tid_data->txq_id; IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", mvmsta->sta_id, tid, txq_id, tid_data->state); mvmsta->agg_tids &= ~BIT(tid); iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); switch (tid_data->state) { case IWL_AGG_ON: tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); IWL_DEBUG_TX_QUEUES(mvm, "ssn = %d, next_recl = %d\n", tid_data->ssn, tid_data->next_reclaimed); tid_data->ssn = 0xffff; tid_data->state = IWL_AGG_OFF; spin_unlock_bh(&mvmsta->lock); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); return 0; case IWL_AGG_STARTING: case IWL_EMPTYING_HW_QUEUE_ADDBA: /* * The agg session has been stopped before it was set up. This * can happen when the AddBA timer times out for example. */ /* No barriers since we are under mutex */ lockdep_assert_held(&mvm->mutex); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); tid_data->state = IWL_AGG_OFF; err = 0; break; default: IWL_ERR(mvm, "Stopping AGG while state not ON or starting for %d on %d (%d)\n", mvmsta->sta_id, tid, tid_data->state); IWL_ERR(mvm, "\ttid_data->txq_id = %d\n", tid_data->txq_id); err = -EINVAL; } spin_unlock_bh(&mvmsta->lock); return err; } int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; u16 txq_id; enum iwl_mvm_agg_state old_state; /* * First set the agg state to OFF to avoid calling * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty. */ spin_lock_bh(&mvmsta->lock); txq_id = tid_data->txq_id; IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", mvmsta->sta_id, tid, txq_id, tid_data->state); old_state = tid_data->state; tid_data->state = IWL_AGG_OFF; mvmsta->agg_tids &= ~BIT(tid); spin_unlock_bh(&mvmsta->lock); iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); if (old_state >= IWL_AGG_ON) { iwl_mvm_drain_sta(mvm, mvmsta, true); if (iwl_mvm_has_new_tx_api(mvm)) { if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id, BIT(tid))) IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); iwl_trans_wait_txq_empty(mvm->trans, txq_id); } else { if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id))) IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id)); } iwl_mvm_drain_sta(mvm, mvmsta, false); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); } return 0; } static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) { int i, max = -1, max_offs = -1; lockdep_assert_held(&mvm->mutex); /* Pick the unused key offset with the highest 'deleted' * counter. Every time a key is deleted, all the counters * are incremented and the one that was just deleted is * reset to zero. Thus, the highest counter is the one * that was deleted longest ago. Pick that one. */ for (i = 0; i < STA_KEY_MAX_NUM; i++) { if (test_bit(i, mvm->fw_key_table)) continue; if (mvm->fw_key_deleted[i] > max) { max = mvm->fw_key_deleted[i]; max_offs = i; } } if (max_offs < 0) return STA_KEY_IDX_INVALID; return max_offs; } static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (sta) return iwl_mvm_sta_from_mac80211(sta); /* * The device expects GTKs for station interfaces to be * installed as GTKs for the AP station. If we have no * station ID, then use AP's station ID. */ if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { u8 sta_id = mvmvif->ap_sta_id; sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); /* * It is possible that the 'sta' parameter is NULL, * for example when a GTK is removed - the sta_id will then * be the AP ID, and no station was passed by mac80211. */ if (IS_ERR_OR_NULL(sta)) return NULL; return iwl_mvm_sta_from_mac80211(sta); } return NULL; } static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len) { int i; for (i = len - 1; i >= 0; i--) { if (pn1[i] > pn2[i]) return 1; if (pn1[i] < pn2[i]) return -1; } return 0; } static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, u32 sta_id, struct ieee80211_key_conf *key, bool mcast, u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, u8 key_offset, bool mfp) { union { struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; struct iwl_mvm_add_sta_key_cmd cmd; } u = {}; __le16 key_flags; int ret; u32 status; u16 keyidx; u64 pn = 0; int i, size; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS); int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY, new_api ? 2 : 1); if (sta_id == IWL_MVM_INVALID_STA) return -EINVAL; keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK; key_flags = cpu_to_le16(keyidx); key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); if (api_ver >= 2) { memcpy((void *)&u.cmd.tx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], IWL_MIC_KEY_SIZE); memcpy((void *)&u.cmd.rx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], IWL_MIC_KEY_SIZE); pn = atomic64_read(&key->tx_pn); } else { u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32; for (i = 0; i < 5; i++) u.cmd_v1.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); } memcpy(u.cmd.common.key, key->key, key->keylen); break; case WLAN_CIPHER_SUITE_CCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); memcpy(u.cmd.common.key, key->key, key->keylen); if (api_ver >= 2) pn = atomic64_read(&key->tx_pn); break; case WLAN_CIPHER_SUITE_WEP104: key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); fallthrough; case WLAN_CIPHER_SUITE_WEP40: key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); memcpy(u.cmd.common.key + 3, key->key, key->keylen); break; case WLAN_CIPHER_SUITE_GCMP_256: key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); fallthrough; case WLAN_CIPHER_SUITE_GCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); memcpy(u.cmd.common.key, key->key, key->keylen); if (api_ver >= 2) pn = atomic64_read(&key->tx_pn); break; default: key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); memcpy(u.cmd.common.key, key->key, key->keylen); } if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); if (mfp) key_flags |= cpu_to_le16(STA_KEY_MFP); u.cmd.common.key_offset = key_offset; u.cmd.common.key_flags = key_flags; u.cmd.common.sta_id = sta_id; if (key->cipher == WLAN_CIPHER_SUITE_TKIP) i = 0; else i = -1; for (; i < IEEE80211_NUM_TIDS; i++) { struct ieee80211_key_seq seq = {}; u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn; int rx_pn_len = 8; /* there's a hole at 2/3 in FW format depending on version */ int hole = api_ver >= 3 ? 0 : 2; ieee80211_get_key_rx_seq(key, i, &seq); if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { rx_pn[0] = seq.tkip.iv16; rx_pn[1] = seq.tkip.iv16 >> 8; rx_pn[2 + hole] = seq.tkip.iv32; rx_pn[3 + hole] = seq.tkip.iv32 >> 8; rx_pn[4 + hole] = seq.tkip.iv32 >> 16; rx_pn[5 + hole] = seq.tkip.iv32 >> 24; } else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) { rx_pn = seq.hw.seq; rx_pn_len = seq.hw.seq_len; } else { rx_pn[0] = seq.ccmp.pn[0]; rx_pn[1] = seq.ccmp.pn[1]; rx_pn[2 + hole] = seq.ccmp.pn[2]; rx_pn[3 + hole] = seq.ccmp.pn[3]; rx_pn[4 + hole] = seq.ccmp.pn[4]; rx_pn[5 + hole] = seq.ccmp.pn[5]; } if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt, rx_pn_len) > 0) memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn, rx_pn_len); } if (api_ver >= 2) { u.cmd.transmit_seq_cnt = cpu_to_le64(pn); size = sizeof(u.cmd); } else { size = sizeof(u.cmd_v1); } status = ADD_STA_SUCCESS; if (cmd_flags & CMD_ASYNC) ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size, &u.cmd); else ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, &status); switch (status) { case ADD_STA_SUCCESS: IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); break; default: ret = -EIO; IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); break; } return ret; } static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, struct ieee80211_key_conf *keyconf, u8 sta_id, bool remove_key) { struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; /* verify the key details match the required command's expectations */ if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || (keyconf->keyidx != 4 && keyconf->keyidx != 5 && keyconf->keyidx != 6 && keyconf->keyidx != 7) || (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC && keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 && keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256))) return -EINVAL; if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) && keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC)) return -EINVAL; igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); igtk_cmd.sta_id = cpu_to_le32(sta_id); if (remove_key) { /* This is a valid situation for IGTK */ if (sta_id == IWL_MVM_INVALID_STA) return 0; igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); } else { struct ieee80211_key_seq seq; const u8 *pn; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_AES_CMAC: igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP); break; default: return -EINVAL; } memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen); if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_KEY_32BYTES); ieee80211_get_key_rx_seq(keyconf, 0, &seq); pn = seq.aes_cmac.pn; igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | ((u64) pn[4] << 8) | ((u64) pn[3] << 16) | ((u64) pn[2] << 24) | ((u64) pn[1] << 32) | ((u64) pn[0] << 40)); } IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n", remove_key ? "removing" : "installing", keyconf->keyidx >= 6 ? "B" : "", keyconf->keyidx, igtk_cmd.sta_id); if (!iwl_mvm_has_new_rx_api(mvm)) { struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = { .ctrl_flags = igtk_cmd.ctrl_flags, .key_id = igtk_cmd.key_id, .sta_id = igtk_cmd.sta_id, .receive_seq_cnt = igtk_cmd.receive_seq_cnt }; memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk, ARRAY_SIZE(igtk_cmd_v1.igtk)); return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, sizeof(igtk_cmd_v1), &igtk_cmd_v1); } return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, sizeof(igtk_cmd), &igtk_cmd); } static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (sta) return sta->addr; if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { u8 sta_id = mvmvif->ap_sta_id; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); return sta->addr; } return NULL; } static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf, u8 key_offset, bool mcast) { const u8 *addr; struct ieee80211_key_seq seq; u16 p1k[5]; u32 sta_id; bool mfp = false; if (sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); sta_id = mvm_sta->sta_id; mfp = sta->mfp; } else if (vif->type == NL80211_IFTYPE_AP && !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); sta_id = mvmvif->mcast_sta.sta_id; } else { IWL_ERR(mvm, "Failed to find station id\n"); return -EINVAL; } if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) { addr = iwl_mvm_get_mac_addr(mvm, vif, sta); /* get phase 1 key from mac80211 */ ieee80211_get_key_rx_seq(keyconf, 0, &seq); ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, seq.tkip.iv32, p1k, 0, key_offset, mfp); } return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 0, NULL, 0, key_offset, mfp); } int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf, u8 key_offset) { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); struct iwl_mvm_sta *mvm_sta; u8 sta_id = IWL_MVM_INVALID_STA; int ret; static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; lockdep_assert_held(&mvm->mutex); if (vif->type != NL80211_IFTYPE_AP || keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) { /* Get the station id from the mvm local station table */ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); if (!mvm_sta) { IWL_ERR(mvm, "Failed to find station\n"); return -EINVAL; } sta_id = mvm_sta->sta_id; /* * It is possible that the 'sta' parameter is NULL, and thus * there is a need to retrieve the sta from the local station * table. */ if (!sta) { sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) { IWL_ERR(mvm, "Invalid station id\n"); return -EINVAL; } } if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) return -EINVAL; } else { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); sta_id = mvmvif->mcast_sta.sta_id; } if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); goto end; } /* If the key_offset is not pre-assigned, we need to find a * new offset to use. In normal cases, the offset is not * pre-assigned, but during HW_RESTART we want to reuse the * same indices, so we pass them when this function is called. * * In D3 entry, we need to hardcoded the indices (because the * firmware hardcodes the PTK offset to 0). In this case, we * need to make sure we don't overwrite the hw_key_idx in the * keyconf structure, because otherwise we cannot configure * the original ones back when resuming. */ if (key_offset == STA_KEY_IDX_INVALID) { key_offset = iwl_mvm_set_fw_key_idx(mvm); if (key_offset == STA_KEY_IDX_INVALID) return -ENOSPC; keyconf->hw_key_idx = key_offset; } ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); if (ret) goto end; /* * For WEP, the same key is used for multicast and unicast. Upload it * again, using the same key offset, and now pointing the other one * to the same key slot (offset). * If this fails, remove the original as well. */ if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) && sta) { ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, !mcast); if (ret) { __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); goto end; } } __set_bit(key_offset, mvm->fw_key_table); end: IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta ? sta->addr : zero_addr, ret); return ret; } int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf) { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); struct iwl_mvm_sta *mvm_sta; u8 sta_id = IWL_MVM_INVALID_STA; int ret, i; lockdep_assert_held(&mvm->mutex); /* Get the station from the mvm local station table */ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); if (mvm_sta) sta_id = mvm_sta->sta_id; else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast) sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id; IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { IWL_ERR(mvm, "offset %d not used in fw key table.\n", keyconf->hw_key_idx); return -ENOENT; } /* track which key was deleted last */ for (i = 0; i < STA_KEY_MAX_NUM; i++) { if (mvm->fw_key_deleted[i] < U8_MAX) mvm->fw_key_deleted[i]++; } mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; if (sta && !mvm_sta) { IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); return 0; } ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); if (ret) return ret; /* delete WEP key twice to get rid of (now useless) offset */ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); return ret; } void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) { struct iwl_mvm_sta *mvm_sta; bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); bool mfp = sta ? sta->mfp : false; rcu_read_lock(); mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); if (WARN_ON_ONCE(!mvm_sta)) goto unlock; iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast, iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx, mfp); unlock: rcu_read_unlock(); } void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, .sta_id = mvmsta->sta_id, .station_flags_msk = cpu_to_le32(STA_FLG_PS), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), }; int ret; ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum ieee80211_frame_release_type reason, u16 cnt, u16 tids, bool more_data, bool single_sta_queue) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, .sta_id = mvmsta->sta_id, .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, .sleep_tx_count = cpu_to_le16(cnt), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), }; int tid, ret; unsigned long _tids = tids; /* convert TIDs to ACs - we don't support TSPEC so that's OK * Note that this field is reserved and unused by firmware not * supporting GO uAPSD, so it's safe to always do this. */ for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); /* If we're releasing frames from aggregation or dqa queues then check * if all the queues that we're releasing frames from, combined, have: * - more frames than the service period, in which case more_data * needs to be set * - fewer than 'cnt' frames, in which case we need to adjust the * firmware command (but do that unconditionally) */ if (single_sta_queue) { int remaining = cnt; int sleep_tx_count; spin_lock_bh(&mvmsta->lock); for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) { struct iwl_mvm_tid_data *tid_data; u16 n_queued; tid_data = &mvmsta->tid_data[tid]; n_queued = iwl_mvm_tid_queued(mvm, tid_data); if (n_queued > remaining) { more_data = true; remaining = 0; break; } remaining -= n_queued; } sleep_tx_count = cnt - remaining; if (reason == IEEE80211_FRAME_RELEASE_UAPSD) mvmsta->sleep_tx_count = sleep_tx_count; spin_unlock_bh(&mvmsta->lock); cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count); if (WARN_ON(cnt - remaining == 0)) { ieee80211_sta_eosp(sta); return; } } /* Note: this is ignored by firmware not supporting GO uAPSD */ if (more_data) cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA; if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { mvmsta->next_status_eosp = true; cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL; } else { cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD; } /* block the Tx queues until the FW updated the sleep Tx count */ iwl_trans_block_txq_ptrs(mvm->trans, true); ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; struct ieee80211_sta *sta; u32 sta_id = le32_to_cpu(notif->sta_id); if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (!IS_ERR_OR_NULL(sta)) ieee80211_sta_eosp(sta); rcu_read_unlock(); } void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool disable) { struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, .sta_id = mvmsta->sta_id, .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), }; int ret; ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool disable) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); spin_lock_bh(&mvm_sta->lock); if (mvm_sta->disable_tx == disable) { spin_unlock_bh(&mvm_sta->lock); return; } mvm_sta->disable_tx = disable; /* * If sta PS state is handled by mac80211, tell it to start/stop * queuing tx for this station. */ if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS)) ieee80211_sta_block_awake(mvm->hw, sta, disable); iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); spin_unlock_bh(&mvm_sta->lock); } static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, struct iwl_mvm_int_sta *sta, bool disable) { u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, .sta_id = sta->sta_id, .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), .mac_id_n_color = cpu_to_le32(id), }; int ret; ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, bool disable) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvm_sta; int i; rcu_read_lock(); /* Block/unblock all the stations of the given mvmvif */ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { sta = rcu_dereference(mvm->fw_id_to_mac_id[i]); if (IS_ERR_OR_NULL(sta)) continue; mvm_sta = iwl_mvm_sta_from_mac80211(sta); if (mvm_sta->mac_id_n_color != FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) continue; iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); } rcu_read_unlock(); if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) return; /* Need to block/unblock also multicast station */ if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA) iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, &mvmvif->mcast_sta, disable); /* * Only unblock the broadcast station (FW blocks it for immediate * quiet, not the driver) */ if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA) iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, &mvmvif->bcast_sta, disable); } void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta; rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); if (mvmsta) iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); rcu_read_unlock(); } u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data) { u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); /* * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ if (mvm->trans->trans_cfg->gen2) sn &= 0xff; return ieee80211_sn_sub(sn, tid_data->next_reclaimed); } +#if defined(__linux__) int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, u8 *key, u32 key_len) { int ret; u16 queue; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_key_conf *keyconf; ret = iwl_mvm_allocate_int_sta(mvm, sta, 0, NL80211_IFTYPE_UNSPECIFIED, IWL_STA_LINK); if (ret) return ret; ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color, addr, sta, &queue, IWL_MVM_TX_FIFO_BE); if (ret) goto out; keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL); if (!keyconf) { ret = -ENOBUFS; goto out; } keyconf->cipher = cipher; memcpy(keyconf->key, key, key_len); keyconf->keylen = key_len; ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false, 0, NULL, 0, 0, true); kfree(keyconf); return 0; out: iwl_mvm_dealloc_int_sta(mvm, sta); return ret; } +#endif void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 mac_id) { struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = { .mac_id = cpu_to_le32(mac_id), }; int ret; ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD), CMD_ASYNC, sizeof(cancel_channel_switch_cmd), &cancel_channel_switch_cmd); if (ret) IWL_ERR(mvm, "Failed to cancel the channel switch\n"); } diff --git a/sys/contrib/dev/iwlwifi/mvm/sta.h b/sys/contrib/dev/iwlwifi/mvm/sta.h index f1a4fc3e4038..d72f52230f22 100644 --- a/sys/contrib/dev/iwlwifi/mvm/sta.h +++ b/sys/contrib/dev/iwlwifi/mvm/sta.h @@ -1,554 +1,556 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2016 Intel Deutschland GmbH */ #ifndef __sta_h__ #define __sta_h__ #include #include #include #include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */ #include "fw-api.h" /* IWL_MVM_STATION_COUNT_MAX */ #include "rs.h" struct iwl_mvm; struct iwl_mvm_vif; /** * DOC: DQA - Dynamic Queue Allocation -introduction * * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi * driver to allow dynamic allocation of queues on-demand, rather than allocate * them statically ahead of time. Ideally, we would like to allocate one queue * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2 * even if it also needs to send traffic to a sleeping STA1, without being * blocked by the sleeping station. * * Although the queues in DQA mode are dynamically allocated, there are still * some queues that are statically allocated: * TXQ #0 - command queue * TXQ #1 - aux frames * TXQ #2 - P2P device frames * TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames * TXQ #4 - BSS DATA frames queue * TXQ #5-8 - Non-QoS and MGMT frames queue pool * TXQ #9 - P2P GO/SoftAP probe responses * TXQ #10-31 - DATA frames queue pool * The queues are dynamically taken from either the MGMT frames queue pool or * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every * queue. * * When a frame for a previously unseen RA/TID comes in, it needs to be deferred * until a queue is allocated for it, and only then can be TXed. Therefore, it * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames. * * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT * queues in the pool. If there is no longer a free MGMT queue to allocate, a * queue will be allocated from the DATA pool instead. Since QoS NDPs can create * a problem for aggregations, they too will use a MGMT queue. * * When adding a STA, a DATA queue is reserved for it so that it can TX from * it. If no such free queue exists for reserving, the STA addition will fail. * * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a * new RA/TID comes in for an existing STA, one of the STA's queues will become * shared and will serve more than the single TID (but always for the same RA!). * * When a RA/TID needs to become aggregated, no new queue is required to be * allocated, only mark the queue as aggregated via the ADD_STA command. Note, * however, that a shared queue cannot be aggregated, and only after the other * TIDs become inactive and are removed - only then can the queue be * reconfigured and become aggregated. * * When removing a station, its queues are returned to the pool for reuse. Here * we also need to make sure that we are synced with the worker thread that TXes * the deferred frames so we don't get into a situation where the queues are * removed and then the worker puts deferred frames onto the released queues or * tries to allocate new queues for a STA we don't need anymore. */ /** * DOC: station table - introduction * * The station table is a list of data structure that reprensent the stations. * In STA/P2P client mode, the driver will hold one station for the AP/ GO. * In GO/AP mode, the driver will have as many stations as associated clients. * All these stations are reflected in the fw's station table. The driver * keeps the fw's station table up to date with the ADD_STA command. Stations * can be removed by the REMOVE_STA command. * * All the data related to a station is held in the structure %iwl_mvm_sta * which is embed in the mac80211's %ieee80211_sta (in the drv_priv) area. * This data includes the index of the station in the fw, per tid information * (sequence numbers, Block-ack state machine, etc...). The stations are * created and deleted by the %sta_state callback from %ieee80211_ops. * * The driver holds a map: %fw_id_to_mac_id that allows to fetch a * %ieee80211_sta (and the %iwl_mvm_sta embedded into it) based on a fw * station index. That way, the driver is able to get the tid related data in * O(1) in time sensitive paths (Tx / Tx response / BA notification). These * paths are triggered by the fw, and the driver needs to get a pointer to the * %ieee80211 structure. This map helps to get that pointer quickly. */ /** * DOC: station table - locking * * As stated before, the station is created / deleted by mac80211's %sta_state * callback from %ieee80211_ops which can sleep. The next paragraph explains * the locking of a single stations, the next ones relates to the station * table. * * The station holds the sequence number per tid. So this data needs to be * accessed in the Tx path (which is softIRQ). It also holds the Block-Ack * information (the state machine / and the logic that checks if the queues * were drained), so it also needs to be accessible from the Tx response flow. * In short, the station needs to be access from sleepable context as well as * from tasklets, so the station itself needs a spinlock. * * The writers of %fw_id_to_mac_id map are serialized by the global mutex of * the mvm op_mode. This is possible since %sta_state can sleep. * The pointers in this map are RCU protected, hence we won't replace the * station while we have Tx / Tx response / BA notification running. * * If a station is deleted while it still has packets in its A-MPDU queues, * then the reclaim flow will notice that there is no station in the map for * sta_id and it will dump the responses. */ /** * DOC: station table - internal stations * * The FW needs a few internal stations that are not reflected in * mac80211, such as broadcast station in AP / GO mode, or AUX sta for * scanning and P2P device (during the GO negotiation). * For these kind of stations we have %iwl_mvm_int_sta struct which holds the * data relevant for them from both %iwl_mvm_sta and %ieee80211_sta. * Usually the data for these stations is static, so no locking is required, * and no TID data as this is also not needed. * One thing to note, is that these stations have an ID in the fw, but not * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of * pointers from this mapping need to check that the value is not error * or NULL. * * Currently there is only one auxiliary station for scanning, initialized * on init. */ /** * DOC: station table - AP Station in STA mode * * %iwl_mvm_vif includes the index of the AP station in the fw's STA table: * %ap_sta_id. To get the point to the corresponding %ieee80211_sta, * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove * the AP station from the fw before setting the MAC context as unassociated. * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is * removed by mac80211, but the station won't be removed in the fw until the * VIF is set as unassociated. Then, %ap_sta_id will be invalidated. */ /** * DOC: station table - Drain vs. Flush * * Flush means that all the frames in the SCD queue are dumped regardless the * station to which they were sent. We do that when we disassociate and before * we remove the STA of the AP. The flush can be done synchronously against the * fw. * Drain means that the fw will drop all the frames sent to a specific station. * This is useful when a client (if we are IBSS / GO or AP) disassociates. */ /** * DOC: station table - fw restart * * When the fw asserts, or we have any other issue that requires to reset the * driver, we require mac80211 to reconfigure the driver. Since the private * data of the stations is embed in mac80211's %ieee80211_sta, that data will * not be zeroed and needs to be reinitialized manually. * %IWL_MVM_STATUS_IN_HW_RESTART is set during restart and that will hint us * that we must not allocate a new sta_id but reuse the previous one. This * means that the stations being re-added after the reset will have the same * place in the fw as before the reset. We do need to zero the %fw_id_to_mac_id * map, since the stations aren't in the fw any more. Internal stations that * are not added by mac80211 will be re-added in the init flow that is called * after the restart: mac80211 call's %iwl_mvm_mac_start which calls to * %iwl_mvm_up. */ /** * DOC: AP mode - PS * * When a station is asleep, the fw will set it as "asleep". All frames on * shared queues (i.e. non-aggregation queues) to that station will be dropped * by the fw (%TX_STATUS_FAIL_DEST_PS failure code). * * AMPDUs are in a separate queue that is stopped by the fw. We just need to * let mac80211 know when there are frames in these queues so that it can * properly handle trigger frames. * * When a trigger frame is received, mac80211 tells the driver to send frames * from the AMPDU queues or sends frames to non-aggregation queues itself, * depending on which ACs are delivery-enabled and what TID has frames to * transmit. Note that mac80211 has all the knowledge since all the non-agg * frames are buffered / filtered, and the driver tells mac80211 about agg * frames). The driver needs to tell the fw to let frames out even if the * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count. * * When we receive a frame from that station with PM bit unset, the driver * needs to let the fw know that this station isn't asleep any more. This is * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signaling the * station's wakeup. * * For a GO, the Service Period might be cut short due to an absence period * of the GO. In this (and all other cases) the firmware notifies us with the * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we * already sent to the device will be rejected again. * * See also "AP support for powersaving clients" in mac80211.h. */ /** * enum iwl_mvm_agg_state * * The state machine of the BA agreement establishment / tear down. * These states relate to a specific RA / TID. * * @IWL_AGG_OFF: aggregation is not used * @IWL_AGG_QUEUED: aggregation start work has been queued * @IWL_AGG_STARTING: aggregation are starting (between start and oper) * @IWL_AGG_ON: aggregation session is up * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the * HW queue to be empty from packets for this RA /TID. * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the * HW queue to be empty from packets for this RA /TID. */ enum iwl_mvm_agg_state { IWL_AGG_OFF = 0, IWL_AGG_QUEUED, IWL_AGG_STARTING, IWL_AGG_ON, IWL_EMPTYING_HW_QUEUE_ADDBA, IWL_EMPTYING_HW_QUEUE_DELBA, }; /** * struct iwl_mvm_tid_data - holds the states for each RA / TID * @seq_number: the next WiFi sequence number to use * @next_reclaimed: the WiFi sequence number of the next packet to be acked. * This is basically (last acked packet++). * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). * @lq_color: the color of the LQ command as it appears in tx response. * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. * @state: state of the BA agreement establishment / tear down. * @txq_id: Tx queue used by the BA session / DQA * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or * the first packet to be sent in legacy HW queue in Tx AGG stop flow. * Basically when next_reclaimed reaches ssn, we can tell mac80211 that * we are ready to finish the Tx AGG stop / start flow. * @tx_time: medium time consumed by this A-MPDU * @tpt_meas_start: time of the throughput measurements start, is reset every HZ * @tx_count_last: number of frames transmitted during the last second * @tx_count: counts the number of frames transmitted since the last reset of * tpt_meas_start */ struct iwl_mvm_tid_data { u16 seq_number; u16 next_reclaimed; /* The rest is Tx AGG related */ u32 rate_n_flags; u8 lq_color; bool amsdu_in_ampdu_allowed; enum iwl_mvm_agg_state state; u16 txq_id; u16 ssn; u16 tx_time; unsigned long tpt_meas_start; u32 tx_count_last; u32 tx_count; }; struct iwl_mvm_key_pn { struct rcu_head rcu_head; struct { u8 pn[IWL_MAX_TID_COUNT][IEEE80211_CCMP_PN_LEN]; } ____cacheline_aligned_in_smp q[]; }; /** * enum iwl_mvm_rxq_notif_type - Internal message identifier * * @IWL_MVM_RXQ_EMPTY: empty sync notification * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN */ enum iwl_mvm_rxq_notif_type { IWL_MVM_RXQ_EMPTY, IWL_MVM_RXQ_NOTIF_DEL_BA, IWL_MVM_RXQ_NSSN_SYNC, }; /** * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent * in &iwl_rxq_sync_cmd. Should be DWORD aligned. * FW is agnostic to the payload, so there are no endianity requirements. * * @type: value from &iwl_mvm_rxq_notif_type * @sync: ctrl path is waiting for all notifications to be received * @cookie: internal cookie to identify old notifications * @data: payload */ struct iwl_mvm_internal_rxq_notif { u16 type; u16 sync; u32 cookie; u8 data[]; } __packed; struct iwl_mvm_delba_data { u32 baid; } __packed; struct iwl_mvm_nssn_sync_data { u32 baid; u32 nssn; } __packed; /** * struct iwl_mvm_rxq_dup_data - per station per rx queue data * @last_seq: last sequence per tid for duplicate packet detection * @last_sub_frame: last subframe packet */ struct iwl_mvm_rxq_dup_data { __le16 last_seq[IWL_MAX_TID_COUNT + 1]; u8 last_sub_frame[IWL_MAX_TID_COUNT + 1]; } ____cacheline_aligned_in_smp; /** * struct iwl_mvm_sta - representation of a station in the driver * @sta_id: the index of the station in the fw (will be replaced by id_n_color) * @tfd_queue_msk: the tfd queues used by the station * @mac_id_n_color: the MAC context this station is linked to * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for * tid. * @max_agg_bufsize: the maximal size of the AGG buffer for this station * @sta_type: station type * @sta_state: station state according to enum %ieee80211_sta_state * @bt_reduced_txpower: is reduced tx power enabled for this station * @next_status_eosp: the next reclaimed packet is a PS-Poll response and * we need to signal the EOSP * @lock: lock to protect the whole struct. Since %tid_data is access from Tx * and from Tx response flow, it needs a spinlock. * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data. * @tid_to_baid: a simple map of TID to baid * @lq_sta: holds rate scaling data, either for the case when RS is done in * the driver - %rs_drv or in the FW - %rs_fw. * @reserved_queue: the queue reserved for this STA for DQA purposes * Every STA has is given one reserved queue to allow it to operate. If no * such queue can be guaranteed, the STA addition will fail. * @tx_protection: reference counter for controlling the Tx protection. * @tt_tx_protection: is thermal throttling enable Tx protection? * @disable_tx: is tx to this STA disabled? * @amsdu_enabled: bitmap of TX AMSDU allowed TIDs. * In case TLC offload is not active it is either 0xFFFF or 0. * @max_amsdu_len: max AMSDU length * @orig_amsdu_len: used to save the original amsdu_len when it is changed via * debugfs. If it's set to 0, it means that it is it's not set via * debugfs. * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) * @sleep_tx_count: the number of frames that we told the firmware to let out * even when that station is asleep. This is useful in case the queue * gets empty before all the frames were sent, which can happen when * we are sending frames from an AMPDU queue and there was a hole in * the BA window. To be used for UAPSD only. * @ptk_pn: per-queue PTK PN data structures * @dup_data: per queue duplicate packet detection data * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID * @tx_ant: the index of the antenna to use for data tx to this station. Only * used during connection establishment (e.g. for the 4 way handshake * exchange). * @pairwise_cipher: used to feed iwlmei upon authorization * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that * space. * */ struct iwl_mvm_sta { u32 sta_id; u32 tfd_queue_msk; u32 mac_id_n_color; u16 tid_disable_agg; u16 max_agg_bufsize; enum iwl_sta_type sta_type; enum ieee80211_sta_state sta_state; bool bt_reduced_txpower; bool next_status_eosp; spinlock_t lock; struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1]; u8 tid_to_baid[IWL_MAX_TID_COUNT]; union { struct iwl_lq_sta_rs_fw rs_fw; struct iwl_lq_sta rs_drv; } lq_sta; struct ieee80211_vif *vif; struct iwl_mvm_key_pn __rcu *ptk_pn[4]; struct iwl_mvm_rxq_dup_data *dup_data; u8 reserved_queue; /* Temporary, until the new TLC will control the Tx protection */ s8 tx_protection; bool tt_tx_protection; bool disable_tx; u16 amsdu_enabled; u16 max_amsdu_len; u16 orig_amsdu_len; bool sleeping; u8 agg_tids; u8 sleep_tx_count; u8 avg_energy; u8 tx_ant; u32 pairwise_cipher; }; u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data); static inline struct iwl_mvm_sta * iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta) { return (void *)sta->drv_priv; } /** * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or * broadcast) * @sta_id: the index of the station in the fw (will be replaced by id_n_color) * @type: station type * @tfd_queue_msk: the tfd queues used by the station */ struct iwl_mvm_int_sta { u32 sta_id; enum iwl_sta_type type; u32 tfd_queue_msk; }; /** * Send the STA info to the FW. * * @mvm: the iwl_mvm* to use * @sta: the STA * @update: this is true if the FW is being updated about a STA it already knows * about. Otherwise (if this is a new STA), this should be false. * @flags: if update==true, this marks what is being changed via ORs of values * from enum iwl_sta_modify_flag. Otherwise, this is ignored. */ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool update, unsigned int flags); int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); static inline int iwl_mvm_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0); } int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta); int iwl_mvm_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 sta_id); int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf, u8 key_offset); int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf); void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* AMPDU */ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u16 ssn, bool start, u16 buf_size, u16 timeout); int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn); int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 buf_size, bool amsdu); int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u8 queue, bool start); int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id); int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm); int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, u32 qmask, enum nl80211_iftype iftype, enum iwl_sta_type type); void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta); int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm); void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, struct ieee80211_sta *sta); void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum ieee80211_frame_release_type reason, u16 cnt, u16 tids, bool more_data, bool single_sta_queue); int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool drain); void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool disable); void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool disable); void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, bool disable); void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); +#if defined(__linux__) int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, u8 *key, u32 key_len); +#endif void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 mac_id); #endif /* __sta_h__ */