diff --git a/sys/contrib/dev/iwlwifi/fw/dbg.h b/sys/contrib/dev/iwlwifi/fw/dbg.h index be7806407de8..e240ef2e1c96 100644 --- a/sys/contrib/dev/iwlwifi/fw/dbg.h +++ b/sys/contrib/dev/iwlwifi/fw/dbg.h @@ -1,330 +1,334 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2005-2014, 2018-2019, 2021-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_dbg_h__ #define __iwl_fw_dbg_h__ #include #include #include "runtime.h" #include "iwl-prph.h" #include "iwl-io.h" #include "file.h" #include "error-dump.h" #include "api/commands.h" #include "api/dbg-tlv.h" #include "api/alive.h" /** * struct iwl_fw_dump_desc - describes the dump * @len: length of trig_desc->data * @trig_desc: the description of the dump */ struct iwl_fw_dump_desc { size_t len; /* must be last */ struct iwl_fw_error_dump_trigger_desc trig_desc; }; /** * struct iwl_fw_dbg_params - register values to restore * @in_sample: DBGC_IN_SAMPLE value * @out_ctrl: DBGC_OUT_CTRL value */ struct iwl_fw_dbg_params { u32 in_sample; u32 out_ctrl; }; extern const struct iwl_fw_dump_desc iwl_dump_desc_assert; int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dump_desc *desc, bool monitor_only, unsigned int delay); int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig_type); int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data, bool sync); int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig, const char *str, size_t len, struct iwl_fw_dbg_trigger_tlv *trigger); int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) __printf(3, 4); int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id); #define iwl_fw_dbg_trigger_enabled(fw, id) ({ \ void *__dbg_trigger = (fw)->dbg.trigger_tlv[(id)]; \ unlikely(__dbg_trigger); \ }) static inline struct iwl_fw_dbg_trigger_tlv* _iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id) { return fw->dbg.trigger_tlv[id]; } #define iwl_fw_dbg_get_trigger(fw, id) ({ \ BUILD_BUG_ON(!__builtin_constant_p(id)); \ BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \ _iwl_fw_dbg_get_trigger((fw), (id)); \ }) static inline bool iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig, struct wireless_dev *wdev) { u32 trig_vif = le32_to_cpu(trig->vif_type); return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || wdev->iftype == trig_vif; } static inline bool iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trig) { return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) && (fwrt->dump.conf == FW_DBG_INVALID || (BIT(fwrt->dump.conf) & le32_to_cpu(trig->stop_conf_ids)))); } static inline bool iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, u32 id, u32 dis_usec) { unsigned long wind_jiff = usecs_to_jiffies(dis_usec); /* If this is the first event checked, jump to update start ts */ if (fwrt->dump.non_collect_ts_start[id] && (time_after(fwrt->dump.non_collect_ts_start[id] + wind_jiff, jiffies))) return true; fwrt->dump.non_collect_ts_start[id] = jiffies; return false; } static inline bool iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt, struct wireless_dev *wdev, struct iwl_fw_dbg_trigger_tlv *trig) { u32 usec = le16_to_cpu(trig->trig_dis_ms) * USEC_PER_MSEC; if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev)) return false; if (iwl_fw_dbg_no_trig_window(fwrt, le32_to_cpu(trig->id), usec)) { IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n", trig->id); return false; } return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig); } static inline struct iwl_fw_dbg_trigger_tlv* _iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt, struct wireless_dev *wdev, const enum iwl_fw_dbg_trigger id) { struct iwl_fw_dbg_trigger_tlv *trig; if (iwl_trans_dbg_ini_valid(fwrt->trans)) return NULL; if (!iwl_fw_dbg_trigger_enabled(fwrt->fw, id)) return NULL; trig = _iwl_fw_dbg_get_trigger(fwrt->fw, id); if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trig)) return NULL; return trig; } #define iwl_fw_dbg_trigger_on(fwrt, wdev, id) ({ \ BUILD_BUG_ON(!__builtin_constant_p(id)); \ BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \ _iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \ }) static inline void _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, struct wireless_dev *wdev, struct iwl_fw_dbg_trigger_tlv *trigger) { if (!trigger) return; if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trigger)) return; +#if defined(__linux__) iwl_fw_dbg_collect_trig(fwrt, trigger, NULL); +#elif defined(__FreeBSD__) + iwl_fw_dbg_collect_trig(fwrt, trigger, ""); +#endif } #define iwl_fw_dbg_trigger_simple_stop(fwrt, wdev, trig) \ _iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev), \ iwl_fw_dbg_get_trigger((fwrt)->fw,\ (trig))) void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_params *params, bool stop); #ifdef CONFIG_IWLWIFI_DEBUGFS static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt) { if (fwrt->cur_fw_img == IWL_UCODE_REGULAR && (fwrt->fw->dbg.dest_tlv || fwrt->trans->dbg.ini_dest != IWL_FW_INI_LOCATION_INVALID)) fwrt->trans->dbg.rec_on = true; } #endif static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) { fwrt->dump.conf = FW_DBG_INVALID; } void iwl_fw_error_dump_wk(struct work_struct *work); static inline bool iwl_fw_dbg_type_on(struct iwl_fw_runtime *fwrt, u32 type) { return (fwrt->fw->dbg.dump_mask & BIT(type)); } static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt) { return fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D3_DEBUG) && fwrt->trans->cfg->d3_debug_data_length && fwrt->ops && fwrt->ops->d3_debug_enable && fwrt->ops->d3_debug_enable(fwrt->ops_ctx) && iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); } static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt) { return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) && !fwrt->trans->trans_cfg->gen2 && fwrt->cur_fw_img < IWL_UCODE_TYPE_MAX && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw_paging_db[0].fw_paging_block; } void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt); static inline void iwl_fw_flush_dumps(struct iwl_fw_runtime *fwrt) { int i; iwl_dbg_tlv_del_timers(fwrt->trans); for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) flush_delayed_work(&fwrt->dump.wks[i].wk); } #ifdef CONFIG_IWLWIFI_DEBUGFS static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) { fwrt->timestamp.delay = 0; cancel_delayed_work_sync(&fwrt->timestamp.wk); } void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay); static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) { cancel_delayed_work_sync(&fwrt->timestamp.wk); } static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) { if (!fwrt->timestamp.delay) return; schedule_delayed_work(&fwrt->timestamp.wk, round_jiffies_relative(fwrt->timestamp.delay)); } #else static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {} static inline void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay) {} static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {} static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {} #endif /* CONFIG_IWLWIFI_DEBUGFS */ void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime *fwrt); static inline void iwl_fw_lmac1_set_alive_err_table(struct iwl_trans *trans, u32 lmac_error_event_table) { if (!(trans->dbg.error_event_table_tlv_status & IWL_ERROR_EVENT_TABLE_LMAC1) || WARN_ON(trans->dbg.lmac_error_event_table[0] != lmac_error_event_table)) trans->dbg.lmac_error_event_table[0] = lmac_error_event_table; } static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans, u32 umac_error_event_table) { if (!(trans->dbg.error_event_table_tlv_status & IWL_ERROR_EVENT_TABLE_UMAC) || WARN_ON(trans->dbg.umac_error_event_table != umac_error_event_table)) trans->dbg.umac_error_event_table = umac_error_event_table; } static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync) { enum iwl_fw_ini_time_point tp_id; if (!iwl_trans_dbg_ini_valid(fwrt->trans)) { iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0); return; } if (fwrt->trans->dbg.hw_error) { tp_id = IWL_FW_INI_TIME_POINT_FW_HW_ERROR; fwrt->trans->dbg.hw_error = false; } else { tp_id = IWL_FW_INI_TIME_POINT_FW_ASSERT; } _iwl_dbg_tlv_time_point(fwrt, tp_id, NULL, sync); } void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt); static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt, struct iwl_lmac_alive *lmac, struct iwl_umac_alive *umac) { if (lmac) { fwrt->dump.fw_ver.type = lmac->ver_type; fwrt->dump.fw_ver.subtype = lmac->ver_subtype; fwrt->dump.fw_ver.lmac_major = le32_to_cpu(lmac->ucode_major); fwrt->dump.fw_ver.lmac_minor = le32_to_cpu(lmac->ucode_minor); } if (umac) { fwrt->dump.fw_ver.umac_major = le32_to_cpu(umac->umac_major); fwrt->dump.fw_ver.umac_minor = le32_to_cpu(umac->umac_minor); } } void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt); void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt, u32 timepoint, u32 timepoint_data); #endif /* __iwl_fw_dbg_h__ */ diff --git a/sys/contrib/dev/iwlwifi/iwl-debug.c b/sys/contrib/dev/iwlwifi/iwl-debug.c index 5a9c8bc70168..010ac7b4294f 100644 --- a/sys/contrib/dev/iwlwifi/iwl-debug.c +++ b/sys/contrib/dev/iwlwifi/iwl-debug.c @@ -1,169 +1,169 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2005-2011, 2021 Intel Corporation */ #include #include #include #if defined(CONFIG_IWLWIFI_DEBUG) #include #endif #include "iwl-drv.h" #include "iwl-debug.h" #if defined(__FreeBSD__) #include "iwl-modparams.h" #endif #include "iwl-devtrace.h" #if defined(__FreeBSD__) #if defined(CONFIG_IWLWIFI_DEBUG) #include /* hexdump(9) */ #include #endif #endif #if defined(__linux__) #define __iwl_fn(fn) \ void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \ { \ struct va_format vaf = { \ .fmt = fmt, \ }; \ va_list args; \ \ va_start(args, fmt); \ vaf.va = &args; \ dev_ ##fn(dev, "%pV", &vaf); \ trace_iwlwifi_ ##fn(&vaf); \ va_end(args); \ } #elif defined(__FreeBSD__) #define __iwl_fn(fn) \ void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \ { \ struct va_format vaf = { \ .fmt = fmt, \ }; \ va_list args; \ char *str; \ \ va_start(args, fmt); \ vaf.va = &args; \ - vasprintf(&str, M_KMALLOC, fmt, args); \ + vasprintf(&str, M_KMALLOC, vaf.fmt, args); \ dev_ ##fn(dev, "%s", str); \ trace_iwlwifi_ ##fn(&vaf); \ free(str, M_KMALLOC); \ va_end(args); \ } #endif __iwl_fn(warn) IWL_EXPORT_SYMBOL(__iwl_warn); __iwl_fn(info) IWL_EXPORT_SYMBOL(__iwl_info); __iwl_fn(crit) IWL_EXPORT_SYMBOL(__iwl_crit); void __iwl_err(struct device *dev, enum iwl_err_mode mode, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args, args2; va_start(args, fmt); switch (mode) { case IWL_ERR_MODE_RATELIMIT: if (net_ratelimit()) break; fallthrough; case IWL_ERR_MODE_REGULAR: case IWL_ERR_MODE_RFKILL: va_copy(args2, args); vaf.va = &args2; #if defined(__linux_) if (mode == IWL_ERR_MODE_RFKILL) dev_err(dev, "(RFKILL) %pV", &vaf); else dev_err(dev, "%pV", &vaf); #elif defined(__FreeBSD__) char *str; - vasprintf(&str, M_KMALLOC, fmt, args2); + vasprintf(&str, M_KMALLOC, vaf.fmt, args2); dev_err(dev, "%s%s", (mode == IWL_ERR_MODE_RFKILL) ? "(RFKILL)" : "", str); free(str, M_KMALLOC); #endif va_end(args2); break; default: break; } trace_iwlwifi_err(&vaf); va_end(args); } IWL_EXPORT_SYMBOL(__iwl_err); #if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) #ifdef CONFIG_IWLWIFI_DEBUG bool iwl_have_debug_level(enum iwl_dl level) { return (iwlwifi_mod_params.debug_level & level || level == IWL_DL_ANY); } /* Passing the iwl_drv * in seems pointless. */ void iwl_print_hex_dump(void *drv __unused, enum iwl_dl level, #if defined(__linux__) const char *prefix, uint8_t *data, size_t len) #elif defined(__FreeBSD__) const char *prefix, const uint8_t *data, size_t len) #endif { /* Given we have a level, check for it. */ if (!iwl_have_debug_level(level)) return; #if defined(__linux_) /* XXX I am cluseless in my editor. pcie/trans.c to the rescue. */ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, data, len, 0); #elif defined(__FreeBSD__) hexdump(data, len, prefix, 0); #endif } #endif void __iwl_dbg(struct device *dev, u32 level, bool limit, const char *function, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; #ifdef CONFIG_IWLWIFI_DEBUG if (iwl_have_debug_level(level) && (!limit || net_ratelimit())) { #if defined(__linux_) dev_printk(KERN_DEBUG, dev, "%s %pV", function, &vaf); #elif defined(__FreeBSD__) char *str; - vasprintf(&str, M_KMALLOC, fmt, args); + vasprintf(&str, M_KMALLOC, vaf.fmt, args); dev_printk(KERN_DEBUG, dev, "%d %u %s %s", curthread->td_tid, (unsigned int)ticks, function, str); free(str, M_KMALLOC); #endif } #endif trace_iwlwifi_dbg(level, function, &vaf); va_end(args); } IWL_EXPORT_SYMBOL(__iwl_dbg); #endif diff --git a/sys/contrib/dev/iwlwifi/mvm/mac-ctxt.c b/sys/contrib/dev/iwlwifi/mvm/mac-ctxt.c index 88893fade5cd..99624505c132 100644 --- a/sys/contrib/dev/iwlwifi/mvm/mac-ctxt.c +++ b/sys/contrib/dev/iwlwifi/mvm/mac-ctxt.c @@ -1,1673 +1,1677 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #include #include #include #include "iwl-io.h" #include "iwl-prph.h" #include "fw-api.h" #include "mvm.h" #include "time-event.h" const u8 iwl_mvm_ac_to_tx_fifo[] = { IWL_MVM_TX_FIFO_VO, IWL_MVM_TX_FIFO_VI, IWL_MVM_TX_FIFO_BE, IWL_MVM_TX_FIFO_BK, }; const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = { IWL_GEN2_EDCA_TX_FIFO_VO, IWL_GEN2_EDCA_TX_FIFO_VI, IWL_GEN2_EDCA_TX_FIFO_BE, IWL_GEN2_EDCA_TX_FIFO_BK, IWL_GEN2_TRIG_TX_FIFO_VO, IWL_GEN2_TRIG_TX_FIFO_VI, IWL_GEN2_TRIG_TX_FIFO_BE, IWL_GEN2_TRIG_TX_FIFO_BK, }; struct iwl_mvm_mac_iface_iterator_data { struct iwl_mvm *mvm; struct ieee80211_vif *vif; unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)]; unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)]; enum iwl_tsf_id preferred_tsf; bool found_vif; }; static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mac_iface_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 min_bi; /* Skip the interface for which we are trying to assign a tsf_id */ if (vif == data->vif) return; /* * The TSF is a hardware/firmware resource, there are 4 and * the driver should assign and free them as needed. However, * there are cases where 2 MACs should share the same TSF ID * for the purpose of clock sync, an optimization to avoid * clock drift causing overlapping TBTTs/DTIMs for a GO and * client in the system. * * The firmware will decide according to the MAC type which * will be the leader and follower. Clients that need to sync * with a remote station will be the leader, and an AP or GO * will be the follower. * * Depending on the new interface type it can be following * or become the leader of an existing interface. */ switch (data->vif->type) { case NL80211_IFTYPE_STATION: /* * The new interface is a client, so if the one we're iterating * is an AP, and the beacon interval of the AP is a multiple or * divisor of the beacon interval of the client, the same TSF * should be used to avoid drift between the new client and * existing AP. The existing AP will get drift updates from the * new client context in this case. */ if (vif->type != NL80211_IFTYPE_AP || data->preferred_tsf != NUM_TSF_IDS || !test_bit(mvmvif->tsf_id, data->available_tsf_ids)) break; min_bi = min(data->vif->bss_conf.beacon_int, vif->bss_conf.beacon_int); if (!min_bi) break; if ((data->vif->bss_conf.beacon_int - vif->bss_conf.beacon_int) % min_bi == 0) { data->preferred_tsf = mvmvif->tsf_id; return; } break; case NL80211_IFTYPE_AP: /* * The new interface is AP/GO, so if its beacon interval is a * multiple or a divisor of the beacon interval of an existing * interface, it should get drift updates from an existing * client or use the same TSF as an existing GO. There's no * drift between TSFs internally but if they used different * TSFs then a new client MAC could update one of them and * cause drift that way. */ if ((vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_STATION) || data->preferred_tsf != NUM_TSF_IDS || !test_bit(mvmvif->tsf_id, data->available_tsf_ids)) break; min_bi = min(data->vif->bss_conf.beacon_int, vif->bss_conf.beacon_int); if (!min_bi) break; if ((data->vif->bss_conf.beacon_int - vif->bss_conf.beacon_int) % min_bi == 0) { data->preferred_tsf = mvmvif->tsf_id; return; } break; default: /* * For all other interface types there's no need to * take drift into account. Either they're exclusive * like IBSS and monitor, or we don't care much about * their TSF (like P2P Device), but we won't be able * to share the TSF resource. */ break; } /* * Unless we exited above, we can't share the TSF resource * that the virtual interface we're iterating over is using * with the new one, so clear the available bit and if this * was the preferred one, reset that as well. */ __clear_bit(mvmvif->tsf_id, data->available_tsf_ids); if (data->preferred_tsf == mvmvif->tsf_id) data->preferred_tsf = NUM_TSF_IDS; } static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mac_iface_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); /* Iterator may already find the interface being added -- skip it */ if (vif == data->vif) { data->found_vif = true; return; } /* Mark MAC IDs as used by clearing the available bit, and * (below) mark TSFs as used if their existing use is not * compatible with the new interface type. * No locking or atomic bit operations are needed since the * data is on the stack of the caller function. */ __clear_bit(mvmvif->id, data->available_mac_ids); /* find a suitable tsf_id */ iwl_mvm_mac_tsf_id_iter(_data, mac, vif); } void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_mac_iface_iterator_data data = { .mvm = mvm, .vif = vif, .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, /* no preference yet */ .preferred_tsf = NUM_TSF_IDS, }; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_mac_tsf_id_iter, &data); if (data.preferred_tsf != NUM_TSF_IDS) mvmvif->tsf_id = data.preferred_tsf; else if (!test_bit(mvmvif->tsf_id, data.available_tsf_ids)) mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, NUM_TSF_IDS); } int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_mac_iface_iterator_data data = { .mvm = mvm, .vif = vif, .available_mac_ids = { (1 << NUM_MAC_INDEX_DRIVER) - 1 }, .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, /* no preference yet */ .preferred_tsf = NUM_TSF_IDS, .found_vif = false, }; int ret, i; lockdep_assert_held(&mvm->mutex); /* * Allocate a MAC ID and a TSF for this MAC, along with the queues * and other resources. */ /* * Before the iterator, we start with all MAC IDs and TSFs available. * * During iteration, all MAC IDs are cleared that are in use by other * virtual interfaces, and all TSF IDs are cleared that can't be used * by this new virtual interface because they're used by an interface * that can't share it with the new one. * At the same time, we check if there's a preferred TSF in the case * that we should share it with another interface. */ /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */ switch (vif->type) { case NL80211_IFTYPE_ADHOC: break; case NL80211_IFTYPE_STATION: if (!vif->p2p) break; fallthrough; default: __clear_bit(0, data.available_mac_ids); } ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_mac_iface_iterator, &data); /* * In the case we're getting here during resume, it's similar to * firmware restart, and with RESUME_ALL the iterator will find * the vif being added already. * We don't want to reassign any IDs in either case since doing * so would probably assign different IDs (as interfaces aren't * necessarily added in the same order), but the old IDs were * preserved anyway, so skip ID assignment for both resume and * recovery. */ if (data.found_vif) return 0; /* Therefore, in recovery, we can't get here */ if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return -EBUSY; mvmvif->id = find_first_bit(data.available_mac_ids, NUM_MAC_INDEX_DRIVER); if (mvmvif->id == NUM_MAC_INDEX_DRIVER) { IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n"); ret = -EIO; goto exit_fail; } if (data.preferred_tsf != NUM_TSF_IDS) mvmvif->tsf_id = data.preferred_tsf; else mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, NUM_TSF_IDS); if (mvmvif->tsf_id == NUM_TSF_IDS) { IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n"); ret = -EIO; goto exit_fail; } mvmvif->color = 0; INIT_LIST_HEAD(&mvmvif->time_event_data.list); mvmvif->time_event_data.id = TE_MAX; /* No need to allocate data queues to P2P Device MAC and NAN.*/ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) return 0; /* Allocate the CAB queue for softAP and GO interfaces */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { /* * For TVQM this will be overwritten later with the FW assigned * queue value (when queue is enabled). */ mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; } mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA; mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA; mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; return 0; exit_fail: memset(mvmvif, 0, sizeof(struct iwl_mvm_vif)); return ret; } static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum nl80211_band band, u8 *cck_rates, u8 *ofdm_rates) { struct ieee80211_supported_band *sband; unsigned long basic = vif->bss_conf.basic_rates; int lowest_present_ofdm = 100; int lowest_present_cck = 100; u8 cck = 0; u8 ofdm = 0; int i; sband = mvm->hw->wiphy->bands[band]; for_each_set_bit(i, &basic, BITS_PER_LONG) { int hw = sband->bitrates[i].hw_value; if (hw >= IWL_FIRST_OFDM_RATE) { ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE); if (lowest_present_ofdm > hw) lowest_present_ofdm = hw; } else { BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); cck |= BIT(hw); if (lowest_present_cck > hw) lowest_present_cck = hw; } } /* * Now we've got the basic rates as bitmaps in the ofdm and cck * variables. This isn't sufficient though, as there might not * be all the right rates in the bitmap. E.g. if the only basic * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps * and 6 Mbps because the 802.11-2007 standard says in 9.6: * * [...] a STA responding to a received frame shall transmit * its Control Response frame [...] at the highest rate in the * BSSBasicRateSet parameter that is less than or equal to the * rate of the immediately previous frame in the frame exchange * sequence ([...]) and that is of the same modulation class * ([...]) as the received frame. If no rate contained in the * BSSBasicRateSet parameter meets these conditions, then the * control frame sent in response to a received frame shall be * transmitted at the highest mandatory rate of the PHY that is * less than or equal to the rate of the received frame, and * that is of the same modulation class as the received frame. * * As a consequence, we need to add all mandatory rates that are * lower than all of the basic rates to these bitmaps. */ if (IWL_RATE_24M_INDEX < lowest_present_ofdm) ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE; if (IWL_RATE_12M_INDEX < lowest_present_ofdm) ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE; /* 6M already there or needed so always add */ ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE; /* * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP. * Note, however: * - if no CCK rates are basic, it must be ERP since there must * be some basic rates at all, so they're OFDM => ERP PHY * (or we're in 5 GHz, and the cck bitmap will never be used) * - if 11M is a basic rate, it must be ERP as well, so add 5.5M * - if 5.5M is basic, 1M and 2M are mandatory * - if 2M is basic, 1M is mandatory * - if 1M is basic, that's the only valid ACK rate. * As a consequence, it's not as complicated as it sounds, just add * any lower rates to the ACK rate bitmap. */ if (IWL_RATE_11M_INDEX < lowest_present_cck) cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE; if (IWL_RATE_5M_INDEX < lowest_present_cck) cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE; if (IWL_RATE_2M_INDEX < lowest_present_cck) cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE; /* 1M already there or needed so always add */ cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE; *cck_rates = cck; *ofdm_rates = ofdm; } static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_ctx_cmd *cmd) { /* for both sta and ap, ht_operation_mode hold the protection_mode */ u8 protection_mode = vif->bss_conf.ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; /* The fw does not distinguish between ht and fat */ u32 ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT; IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode); /* * See section 9.23.3.1 of IEEE 80211-2012. * Nongreenfield HT STAs Present is not supported. */ switch (protection_mode) { case IEEE80211_HT_OP_MODE_PROTECTION_NONE: break; case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: cmd->protection_flags |= cpu_to_le32(ht_flag); break; case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: /* Protect when channel wider than 20MHz */ if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20) cmd->protection_flags |= cpu_to_le32(ht_flag); break; default: IWL_ERR(mvm, "Illegal protection mode %d\n", protection_mode); break; } } static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_ctx_cmd *cmd, const u8 *bssid_override, u32 action) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_chanctx_conf *chanctx; bool ht_enabled = !!(vif->bss_conf.ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION); u8 cck_ack_rates, ofdm_ack_rates; const u8 *bssid = bssid_override ?: vif->bss_conf.bssid; int i; cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); cmd->action = cpu_to_le32(action); switch (vif->type) { case NL80211_IFTYPE_STATION: if (vif->p2p) cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA); else cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA); break; case NL80211_IFTYPE_AP: cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO); break; case NL80211_IFTYPE_MONITOR: cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER); break; case NL80211_IFTYPE_P2P_DEVICE: cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE); break; case NL80211_IFTYPE_ADHOC: cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS); break; default: WARN_ON_ONCE(1); } cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id); memcpy(cmd->node_addr, vif->addr, ETH_ALEN); if (bssid) memcpy(cmd->bssid_addr, bssid, ETH_ALEN); else eth_broadcast_addr(cmd->bssid_addr); rcu_read_lock(); chanctx = rcu_dereference(vif->chanctx_conf); iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band : NL80211_BAND_2GHZ, &cck_ack_rates, &ofdm_ack_rates); rcu_read_unlock(); cmd->cck_rates = cpu_to_le32((u32)cck_ack_rates); cmd->ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates); cmd->cck_short_preamble = cpu_to_le32(vif->bss_conf.use_short_preamble ? MAC_FLG_SHORT_PREAMBLE : 0); cmd->short_slot = cpu_to_le32(vif->bss_conf.use_short_slot ? MAC_FLG_SHORT_SLOT : 0); cmd->filter_flags = 0; for (i = 0; i < IEEE80211_NUM_ACS; i++) { u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); u8 ucode_ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); cmd->ac[ucode_ac].cw_min = cpu_to_le16(mvmvif->queue_params[i].cw_min); cmd->ac[ucode_ac].cw_max = cpu_to_le16(mvmvif->queue_params[i].cw_max); cmd->ac[ucode_ac].edca_txop = cpu_to_le16(mvmvif->queue_params[i].txop * 32); cmd->ac[ucode_ac].aifsn = mvmvif->queue_params[i].aifs; cmd->ac[ucode_ac].fifos_mask = BIT(txf); } if (vif->bss_conf.qos) cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); if (vif->bss_conf.use_cts_prot) cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", vif->bss_conf.use_cts_prot, vif->bss_conf.ht_operation_mode); if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN); if (ht_enabled) iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd); } static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, struct iwl_mac_ctx_cmd *cmd) { int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, sizeof(*cmd), cmd); if (ret) IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n", le32_to_cpu(cmd->action), ret); return ret; } static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action, bool force_assoc_off, const u8 *bssid_override) { struct iwl_mac_ctx_cmd cmd = {}; struct iwl_mac_data_sta *ctxt_sta; WARN_ON(vif->type != NL80211_IFTYPE_STATION); /* Fill the common data for all mac context types */ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action); if (vif->p2p) { struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr; cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK); ctxt_sta = &cmd.p2p_sta.sta; } else { ctxt_sta = &cmd.sta; } /* We need the dtim_period to set the MAC as associated */ if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && !force_assoc_off) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u8 ap_sta_id = mvmvif->ap_sta_id; u32 dtim_offs; /* * The DTIM count counts down, so when it is N that means N * more beacon intervals happen until the DTIM TBTT. Therefore * add this to the current time. If that ends up being in the * future, the firmware will handle it. * * Also note that the system_timestamp (which we get here as * "sync_device_ts") and TSF timestamp aren't at exactly the * same offset in the frame -- the TSF is at the first symbol * of the TSF, the system timestamp is at signal acquisition * time. This means there's an offset between them of at most * a few hundred microseconds (24 * 8 bits + PLCP time gives * 384us in the longest case), this is currently not relevant * as the firmware wakes up around 2ms before the TBTT. */ dtim_offs = vif->bss_conf.sync_dtim_count * vif->bss_conf.beacon_int; /* convert TU to usecs */ dtim_offs *= 1024; ctxt_sta->dtim_tsf = cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs); ctxt_sta->dtim_time = cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs); ctxt_sta->assoc_beacon_arrive_time = cpu_to_le32(vif->bss_conf.sync_device_ts); IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n", le64_to_cpu(ctxt_sta->dtim_tsf), le32_to_cpu(ctxt_sta->dtim_time), dtim_offs); ctxt_sta->is_assoc = cpu_to_le32(1); if (!mvmvif->authorized && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO)) ctxt_sta->data_policy |= cpu_to_le32(COEX_HIGH_PRIORITY_ENABLE); /* * allow multicast data frames only as long as the station is * authorized, i.e., GTK keys are already installed (if needed) */ if (ap_sta_id < mvm->fw->ucode_capa.num_stations) { struct ieee80211_sta *sta; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); if (!IS_ERR_OR_NULL(sta)) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->sta_state == IEEE80211_STA_AUTHORIZED) cmd.filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP); } rcu_read_unlock(); } } else { ctxt_sta->is_assoc = cpu_to_le32(0); /* Allow beacons to pass through as long as we are not * associated, or we do not have dtim period information. */ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); } ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * vif->bss_conf.dtim_period); ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval); ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid); if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p) cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) { cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX); if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT) ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED); if (vif->bss_conf.twt_protected) ctxt_sta->data_policy |= cpu_to_le32(PROTECTED_TWT_SUPPORTED); if (vif->bss_conf.twt_broadcast) ctxt_sta->data_policy |= cpu_to_le32(BROADCAST_TWT_SUPPORTED); } return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; u32 tfd_queue_msk = BIT(mvm->snif_queue); int ret; WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC | MAC_FILTER_IN_CONTROL_AND_MGMT | MAC_FILTER_IN_BEACON | MAC_FILTER_IN_PROBE_REQUEST | MAC_FILTER_IN_CRC32 | MAC_FILTER_ACCEPT_GRP); ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); /* Allocate sniffer station */ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->snif_sta, tfd_queue_msk, vif->type, IWL_STA_GENERAL_PURPOSE); if (ret) return ret; return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_ctx_cmd cmd = {}; WARN_ON(vif->type != NL80211_IFTYPE_ADHOC); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | MAC_FILTER_IN_PROBE_REQUEST | MAC_FILTER_ACCEPT_GRP); /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */ cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int); /* TODO: Assumes that the beacon id == mac context id */ cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } struct iwl_mvm_go_iterator_data { bool go_active; }; static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_go_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->type == NL80211_IFTYPE_AP && vif->p2p && mvmvif->ap_ibss_active) data->go_active = true; } static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; struct iwl_mvm_go_iterator_data data = {}; WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* Override the filter flags to accept only probe requests */ cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); /* * This flag should be set to true when the P2P Device is * discoverable and there is at least another active P2P GO. Settings * this flag will allow the P2P Device to be discoverable on other * channels in addition to its listen channel. * Note that this flag should not be set in other cases as it opens the * Rx filters on all MAC and increases the number of interrupts. */ ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_go_iterator, &data); cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, __le32 *tim_index, __le32 *tim_size, u8 *beacon, u32 frame_size) { u32 tim_idx; struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; /* The index is relative to frame start but we start looking at the * variable-length part of the beacon. */ tim_idx = mgmt->u.beacon.variable - beacon; /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ while ((tim_idx < (frame_size - 2)) && (beacon[tim_idx] != WLAN_EID_TIM)) tim_idx += beacon[tim_idx+1] + 2; /* If TIM field was found, set variables */ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { *tim_index = cpu_to_le32(tim_idx); *tim_size = cpu_to_le32((u32)beacon[tim_idx + 1]); } else { IWL_WARN(mvm, "Unable to find TIM Element in beacon\n"); } } static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) { struct ieee80211_mgmt *mgmt = (void *)beacon; const u8 *ie; if (WARN_ON_ONCE(frame_size <= (mgmt->u.beacon.variable - beacon))) return 0; frame_size -= mgmt->u.beacon.variable - beacon; ie = cfg80211_find_ie(eid, mgmt->u.beacon.variable, frame_size); if (!ie) return 0; return ie - beacon; } u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, struct ieee80211_vif *vif) { u8 rate; if (info->band == NL80211_BAND_2GHZ && !vif->p2p) rate = IWL_FIRST_CCK_RATE; else rate = IWL_FIRST_OFDM_RATE; return rate; } u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx) { u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx); bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10; if (rate_idx <= IWL_FIRST_CCK_RATE) flags |= is_new_rate ? IWL_MAC_BEACON_CCK : IWL_MAC_BEACON_CCK_V1; return flags; } static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon, struct iwl_tx_cmd *tx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_tx_info *info; u8 rate; u32 tx_flags; info = IEEE80211_SKB_CB(beacon); /* Set up TX command fields */ tx->len = cpu_to_le16((u16)beacon->len); tx->sta_id = mvmvif->bcast_sta.sta_id; tx->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF; tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) << TX_CMD_FLG_BT_PRIO_POS; tx->tx_flags = cpu_to_le32(tx_flags); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); tx->rate_n_flags = cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS); rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); tx->rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate)); if (rate == IWL_FIRST_CCK_RATE) tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK_V1); } int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, struct sk_buff *beacon, void *data, int len) { struct iwl_host_cmd cmd = { .id = BEACON_TEMPLATE_CMD, .flags = CMD_ASYNC, }; cmd.len[0] = len; cmd.data[0] = data; cmd.dataflags[0] = 0; cmd.len[1] = beacon->len; cmd.data[1] = beacon->data; cmd.dataflags[1] = IWL_HCMD_DFL_DUP; return iwl_mvm_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctxt_send_beacon_v6(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_beacon_cmd_v6 beacon_cmd = {}; iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data, beacon->len); return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_beacon_cmd_v7 beacon_cmd = {}; iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data, beacon->len); beacon_cmd.csa_offset = cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_CHANNEL_SWITCH, beacon->len)); beacon_cmd.ecsa_offset = cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_EXT_CHANSWITCH_ANN, beacon->len)); return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(beacon); struct iwl_mac_beacon_cmd beacon_cmd = {}; u8 rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); u16 flags; struct ieee80211_chanctx_conf *ctx; int channel; flags = iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw, rate); /* Enable FILS on PSC channels only */ rcu_read_lock(); ctx = rcu_dereference(vif->chanctx_conf); channel = ieee80211_frequency_to_channel(ctx->def.chan->center_freq); WARN_ON(channel == 0); if (cfg80211_channel_is_psc(ctx->def.chan) && !IWL_MVM_DISABLE_AP_FILS) { flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) > 10 ? IWL_MAC_BEACON_FILS : IWL_MAC_BEACON_FILS_V1; beacon_cmd.short_ssid = cpu_to_le32(~crc32_le(~0, vif->bss_conf.ssid, vif->bss_conf.ssid_len)); } rcu_read_unlock(); beacon_cmd.flags = cpu_to_le16(flags); beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data, beacon->len); beacon_cmd.csa_offset = cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_CHANNEL_SWITCH, beacon->len)); beacon_cmd.ecsa_offset = cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_EXT_CHANSWITCH_ANN, beacon->len)); return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon) { if (WARN_ON(!beacon)) return -EINVAL; if (IWL_MVM_NON_TRANSMITTING_AP) return 0; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) return iwl_mvm_mac_ctxt_send_beacon_v6(mvm, vif, beacon); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) return iwl_mvm_mac_ctxt_send_beacon_v9(mvm, vif, beacon); return iwl_mvm_mac_ctxt_send_beacon_v7(mvm, vif, beacon); } /* The beacon template for the AP/GO/IBSS has changed and needs update */ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct sk_buff *beacon; int ret; WARN_ON(vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC); beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL, 0); if (!beacon) return -ENOMEM; #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvm->beacon_inject_active) { dev_kfree_skb(beacon); return -EBUSY; } #endif ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon); dev_kfree_skb(beacon); return ret; } struct iwl_mvm_mac_ap_iterator_data { struct iwl_mvm *mvm; struct ieee80211_vif *vif; u32 beacon_device_ts; u16 beacon_int; }; /* Find the beacon_device_ts and beacon_int for a managed interface */ static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mac_ap_iterator_data *data = _data; if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) return; /* Station client has higher priority over P2P client*/ if (vif->p2p && data->beacon_device_ts) return; data->beacon_device_ts = vif->bss_conf.sync_device_ts; data->beacon_int = vif->bss_conf.beacon_int; } /* * Fill the specific data for mac context of type AP of P2P GO */ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_ctx_cmd *cmd, struct iwl_mac_data_ap *ctxt_ap, bool add) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_mac_ap_iterator_data data = { .mvm = mvm, .vif = vif, .beacon_device_ts = 0 }; /* in AP mode, the MCAST FIFO takes the EDCA params from VO */ cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST); /* * in AP mode, pass probe requests and beacons from other APs * (needed for ht protection); when there're no any associated * station don't ask FW to pass beacons to prevent unnecessary * wake-ups. */ cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) { cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); } else { IWL_DEBUG_HC(mvm, "No need to receive beacons\n"); } ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int); ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * vif->bss_conf.dtim_period); if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->cab_queue); /* * Only set the beacon time when the MAC is being added, when we * just modify the MAC then we should keep the time -- the firmware * can otherwise have a "jumping" TBTT. */ if (add) { /* * If there is a station/P2P client interface which is * associated, set the AP's TBTT far enough from the station's * TBTT. Otherwise, set it to the current system time */ ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_mac_ap_iterator, &data); if (data.beacon_device_ts) { u32 rand = (prandom_u32() % (64 - 36)) + 36; mvmvif->ap_beacon_time = data.beacon_device_ts + ieee80211_tu_to_usec(data.beacon_int * rand / 100); } else { mvmvif->ap_beacon_time = iwl_mvm_get_systime(mvm); } } ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time); ctxt_ap->beacon_tsf = 0; /* unused */ /* TODO: Assume that the beacon id == mac context id */ ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id); } static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p); /* Fill the common data for all mac context types */ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* Fill the data specific for ap mode */ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.ap, action == FW_CTXT_ACTION_ADD); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr; WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p); /* Fill the common data for all mac context types */ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* Fill the data specific for GO mode */ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.go.ap, action == FW_CTXT_ACTION_ADD); cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK); cmd.go.opp_ps_enabled = cpu_to_le32(!!(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT)); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action, bool force_assoc_off, const u8 *bssid_override) { switch (vif->type) { case NL80211_IFTYPE_STATION: return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action, force_assoc_off, bssid_override); case NL80211_IFTYPE_AP: if (!vif->p2p) return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action); else return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action); case NL80211_IFTYPE_MONITOR: return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action); case NL80211_IFTYPE_P2P_DEVICE: return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action); case NL80211_IFTYPE_ADHOC: return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action); default: break; } return -EOPNOTSUPP; } int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n", vif->addr, ieee80211_vif_type_p2p(vif))) return -EIO; ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD, true, NULL); if (ret) return ret; /* will only do anything at resume from D3 time */ iwl_mvm_set_last_nonqos_seq(mvm, vif); mvmvif->uploaded = true; return 0; } int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool force_assoc_off, const u8 *bssid_override) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n", vif->addr, ieee80211_vif_type_p2p(vif))) return -EIO; return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY, force_assoc_off, bssid_override); } int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_ctx_cmd cmd; int ret; if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n", vif->addr, ieee80211_vif_type_p2p(vif))) return -EIO; memset(&cmd, 0, sizeof(cmd)); cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd); if (ret) { IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret); return ret; } mvmvif->uploaded = false; if (vif->type == NL80211_IFTYPE_MONITOR) { __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags); iwl_mvm_dealloc_snif_sta(mvm); } return 0; } static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm, struct ieee80211_vif *csa_vif, u32 gp2, bool tx_success) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(csa_vif); /* Don't start to countdown from a failed beacon */ if (!tx_success && !mvmvif->csa_countdown) return; mvmvif->csa_countdown = true; if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) { int c = ieee80211_beacon_update_cntdwn(csa_vif); iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif); if (csa_vif->p2p && !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 && tx_success) { u32 rel_time = (c + 1) * csa_vif->bss_conf.beacon_int - IWL_MVM_CHANNEL_SWITCH_TIME_GO; u32 apply_time = gp2 + rel_time * 1024; iwl_mvm_schedule_csa_period(mvm, csa_vif, IWL_MVM_CHANNEL_SWITCH_TIME_GO - IWL_MVM_CHANNEL_SWITCH_MARGIN, apply_time); } } else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) { /* we don't have CSA NoA scheduled yet, switch now */ ieee80211_csa_finish(csa_vif); RCU_INIT_POINTER(mvm->csa_vif, NULL); } } void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); struct iwl_extended_beacon_notif *beacon = (void *)pkt->data; struct iwl_extended_beacon_notif_v5 *beacon_v5 = (void *)pkt->data; struct ieee80211_vif *csa_vif; struct ieee80211_vif *tx_blocked_vif; struct agg_tx_status *agg_status; u16 status; lockdep_assert_held(&mvm->mutex); mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2); if (!iwl_mvm_is_short_beacon_notif_supported(mvm)) { struct iwl_mvm_tx_resp *beacon_notify_hdr = &beacon_v5->beacon_notify_hdr; if (unlikely(pkt_len < sizeof(*beacon_v5))) return; mvm->ibss_manager = beacon_v5->ibss_mgr_status != 0; agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr); status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK; IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%016llX gp2:0x%X rate:%d\n", status, beacon_notify_hdr->failure_frame, le64_to_cpu(beacon->tsf), mvm->ap_last_beacon_gp2, le32_to_cpu(beacon_notify_hdr->initial_rate)); } else { if (unlikely(pkt_len < sizeof(*beacon))) return; mvm->ibss_manager = beacon->ibss_mgr_status != 0; status = le32_to_cpu(beacon->status) & TX_STATUS_MSK; IWL_DEBUG_RX(mvm, "beacon status %#x tsf:0x%016llX gp2:0x%X\n", status, le64_to_cpu(beacon->tsf), mvm->ap_last_beacon_gp2); } csa_vif = rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex)); if (unlikely(csa_vif && csa_vif->csa_active)) iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2, (status == TX_STATUS_SUCCESS)); tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex)); if (unlikely(tx_blocked_vif)) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); /* * The channel switch is started and we have blocked the * stations. If this is the first beacon (the timeout wasn't * set), set the unblock timeout, otherwise countdown */ if (!mvm->csa_tx_block_bcn_timeout) mvm->csa_tx_block_bcn_timeout = IWL_MVM_CS_UNBLOCK_TX_TIMEOUT; else mvm->csa_tx_block_bcn_timeout--; /* Check if the timeout is expired, and unblock tx */ if (mvm->csa_tx_block_bcn_timeout == 0) { iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); } } } void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_missed_beacons_notif *mb = (void *)pkt->data; struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig; struct iwl_fw_dbg_trigger_tlv *trigger; u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx; u32 rx_missed_bcon, rx_missed_bcon_since_rx; struct ieee80211_vif *vif; u32 id = le32_to_cpu(mb->mac_id); union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt }; IWL_DEBUG_INFO(mvm, "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n", le32_to_cpu(mb->mac_id), le32_to_cpu(mb->consec_missed_beacons), le32_to_cpu(mb->consec_missed_beacons_since_last_rx), le32_to_cpu(mb->num_recvd_beacons), le32_to_cpu(mb->num_expected_beacons)); rcu_read_lock(); vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); if (!vif) goto out; rx_missed_bcon = le32_to_cpu(mb->consec_missed_beacons); rx_missed_bcon_since_rx = le32_to_cpu(mb->consec_missed_beacons_since_last_rx); /* * TODO: the threshold should be adjusted based on latency conditions, * and/or in case of a CS flow on one of the other AP vifs. */ if (rx_missed_bcon > IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG) iwl_mvm_connection_loss(mvm, vif, "missed beacons"); else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) ieee80211_beacon_loss(vif); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data); trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MISSED_BEACONS); if (!trigger) goto out; bcon_trig = (void *)trigger->data; stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon); stop_trig_missed_bcon_since_rx = le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx); /* TODO: implement start trigger */ if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || rx_missed_bcon >= stop_trig_missed_bcon) +#if defined(__linux__) iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); +#elif defined(__FreeBSD__) + iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, ""); +#endif out: rcu_read_unlock(); } void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); struct iwl_stored_beacon_notif_common *sb = (void *)pkt->data; struct ieee80211_rx_status rx_status; struct sk_buff *skb; u8 *data; u32 size = le32_to_cpu(sb->byte_count); int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF), 0); if (size == 0) return; /* handle per-version differences */ if (ver <= 2) { struct iwl_stored_beacon_notif_v2 *sb_v2 = (void *)pkt->data; if (pkt_len < struct_size(sb_v2, data, size)) return; data = sb_v2->data; } else { struct iwl_stored_beacon_notif_v3 *sb_v3 = (void *)pkt->data; if (pkt_len < struct_size(sb_v3, data, size)) return; data = sb_v3->data; } skb = alloc_skb(size, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); return; } /* update rx_status according to the notification's metadata */ memset(&rx_status, 0, sizeof(rx_status)); rx_status.mactime = le64_to_cpu(sb->tsf); /* TSF as indicated by the firmware is at INA time */ rx_status.flag |= RX_FLAG_MACTIME_PLCP_START; rx_status.device_timestamp = le32_to_cpu(sb->system_time); rx_status.band = (sb->band & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(sb->channel), rx_status.band); /* copy the data */ skb_put_data(skb, data, size); memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); /* pass it as regular rx to mac80211 */ ieee80211_rx_napi(mvm->hw, NULL, skb, NULL); } void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_probe_resp_data_notif *notif = (void *)pkt->data; struct iwl_probe_resp_data *old_data, *new_data; u32 id = le32_to_cpu(notif->mac_id); struct ieee80211_vif *vif; struct iwl_mvm_vif *mvmvif; IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n", notif->noa_active, notif->csa_counter); vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false); if (!vif) return; mvmvif = iwl_mvm_vif_from_mac80211(vif); new_data = kzalloc(sizeof(*new_data), GFP_KERNEL); if (!new_data) return; memcpy(&new_data->notif, notif, sizeof(new_data->notif)); /* noa_attr contains 1 reserved byte, need to substruct it */ new_data->noa_len = sizeof(struct ieee80211_vendor_ie) + sizeof(new_data->notif.noa_attr) - 1; /* * If it's a one time NoA, only one descriptor is needed, * adjust the length according to len_low. */ if (new_data->notif.noa_attr.len_low == sizeof(struct ieee80211_p2p_noa_desc) + 2) new_data->noa_len -= sizeof(struct ieee80211_p2p_noa_desc); old_data = rcu_dereference_protected(mvmvif->probe_resp_data, lockdep_is_held(&mvmvif->mvm->mutex)); rcu_assign_pointer(mvmvif->probe_resp_data, new_data); if (old_data) kfree_rcu(old_data, rcu_head); if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA && notif->csa_counter >= 1) ieee80211_beacon_set_cntdwn(vif, notif->csa_counter); } void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_channel_switch_start_notif *notif = (void *)pkt->data; struct ieee80211_vif *csa_vif, *vif; struct iwl_mvm_vif *mvmvif; u32 id_n_color, csa_id, mac_id; id_n_color = le32_to_cpu(notif->id_and_color); mac_id = id_n_color & FW_CTXT_ID_MSK; if (WARN_ON_ONCE(mac_id >= NUM_MAC_INDEX_DRIVER)) return; rcu_read_lock(); vif = rcu_dereference(mvm->vif_id_to_mac[mac_id]); mvmvif = iwl_mvm_vif_from_mac80211(vif); switch (vif->type) { case NL80211_IFTYPE_AP: csa_vif = rcu_dereference(mvm->csa_vif); if (WARN_ON(!csa_vif || !csa_vif->csa_active || csa_vif != vif)) goto out_unlock; csa_id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); if (WARN(csa_id != id_n_color, "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)", csa_id, id_n_color)) goto out_unlock; IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n"); schedule_delayed_work(&mvm->cs_tx_unblock_dwork, msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT * csa_vif->bss_conf.beacon_int)); ieee80211_csa_finish(csa_vif); rcu_read_unlock(); RCU_INIT_POINTER(mvm->csa_vif, NULL); return; case NL80211_IFTYPE_STATION: /* * if we don't know about an ongoing channel switch, * make sure FW cancels it */ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, 0) && !vif->csa_active) { IWL_DEBUG_INFO(mvm, "Channel Switch was canceled\n"); iwl_mvm_cancel_channel_switch(mvm, vif, mac_id); break; } iwl_mvm_csa_client_absent(mvm, vif); cancel_delayed_work(&mvmvif->csa_work); ieee80211_chswitch_done(vif, true); break; default: /* should never happen */ WARN_ON_ONCE(1); break; } out_unlock: rcu_read_unlock(); } void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_channel_switch_error_notif *notif = (void *)pkt->data; struct ieee80211_vif *vif; u32 id = le32_to_cpu(notif->mac_id); u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask); rcu_read_lock(); vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); if (!vif) { rcu_read_unlock(); return; } IWL_DEBUG_INFO(mvm, "FW reports CSA error: mac_id=%u, csa_err_mask=%u\n", id, csa_err_mask); if (csa_err_mask & (CS_ERR_COUNT_ERROR | CS_ERR_LONG_DELAY_AFTER_CS | CS_ERR_TX_BLOCK_TIMER_EXPIRED)) ieee80211_channel_switch_disconnect(vif, true); rcu_read_unlock(); } void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_missed_vap_notif *mb = (void *)pkt->data; struct ieee80211_vif *vif; u32 id = le32_to_cpu(mb->mac_id); IWL_DEBUG_INFO(mvm, "missed_vap notify mac_id=%u, num_beacon_intervals_elapsed=%u, profile_periodicity=%u\n", le32_to_cpu(mb->mac_id), mb->num_beacon_intervals_elapsed, mb->profile_periodicity); rcu_read_lock(); vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); if (vif) iwl_mvm_connection_loss(mvm, vif, "missed vap beacon"); rcu_read_unlock(); } diff --git a/sys/contrib/dev/iwlwifi/mvm/rx.c b/sys/contrib/dev/iwlwifi/mvm/rx.c index 72d03c5260dc..ee9249a0c5b0 100644 --- a/sys/contrib/dev/iwlwifi/mvm/rx.c +++ b/sys/contrib/dev/iwlwifi/mvm/rx.c @@ -1,1056 +1,1064 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" /* * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler * * Copies the phy information in mvm->last_phy_info, it will be used when the * actual data will come from the fw in the next packet. */ void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); if (unlikely(pkt_len < sizeof(mvm->last_phy_info))) return; memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info)); mvm->ampdu_ref++; #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { spin_lock(&mvm->drv_stats_lock); mvm->drv_rx_stats.ampdu_count++; spin_unlock(&mvm->drv_stats_lock); } #endif } /* * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211 * * Adds the rxb to a new skb and give it to mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct napi_struct *napi, struct sk_buff *skb, struct ieee80211_hdr *hdr, u16 len, u8 crypt_len, struct iwl_rx_cmd_buffer *rxb) { unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); unsigned int fraglen; /* * The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len, * but those are all multiples of 4 long) all goes away, but we * want the *end* of it, which is going to be the start of the IP * header, to be aligned when it gets pulled in. * The beginning of the skb->data is aligned on at least a 4-byte * boundary after allocation. Everything here is aligned at least * on a 2-byte boundary so we can just take hdrlen & 3 and pad by * the result. */ skb_reserve(skb, hdrlen & 3); /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and * an additional 8 bytes for SNAP/ethertype, see below) so that * splice() or TCP coalesce are more efficient. * * Since, in addition, ieee80211_data_to_8023() always pull in at * least 8 bytes (possibly more for mesh) we can do the same here * to save the cost of doing it later. That still doesn't pull in * the actual IP header since the typical case has a SNAP header. * If the latter changes (there are efforts in the standards group * to do so) we should revisit this and ieee80211_data_to_8023(). */ hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8; skb_put_data(skb, hdr, hdrlen); fraglen = len - hdrlen; if (fraglen) { int offset = (u8 *)hdr + hdrlen - (u8 *)rxb_addr(rxb) + rxb_offset(rxb); skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize); } ieee80211_rx_napi(mvm->hw, sta, skb, napi); } /* * iwl_mvm_get_signal_strength - use new rx PHY INFO API * values are reported by the fw as positive values - need to negate * to obtain their dBM. Account for missing antennas by replacing 0 * values by -256dBm: practically 0 power and a non-feasible 8 bit value. */ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, struct iwl_rx_phy_info *phy_info, struct ieee80211_rx_status *rx_status) { int energy_a, energy_b, max_energy; u32 val; val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]); energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> IWL_RX_INFO_ENERGY_ANT_A_POS; energy_a = energy_a ? -energy_a : S8_MIN; energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> IWL_RX_INFO_ENERGY_ANT_B_POS; energy_b = energy_b ? -energy_b : S8_MIN; max_energy = max(energy_a, energy_b); IWL_DEBUG_STATS(mvm, "energy In A %d B %d , and max %d\n", energy_a, energy_b, max_energy); rx_status->signal = max_energy; rx_status->chains = (le16_to_cpu(phy_info->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA) >> RX_RES_PHY_FLAGS_ANTENNA_POS; rx_status->chain_signal[0] = energy_a; rx_status->chain_signal[1] = energy_b; } /* * iwl_mvm_set_mac80211_rx_flag - translate fw status to mac80211 format * @mvm: the mvm object * @hdr: 80211 header * @stats: status in mac80211's format * @rx_pkt_status: status coming from fw * * returns non 0 value if the packet should be dropped */ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_rx_status *stats, u32 rx_pkt_status, u8 *crypt_len) { if (!ieee80211_has_protected(hdr->frame_control) || (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_NO_ENC) return 0; /* packet was encrypted with unknown alg */ if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_ENC_ERR) return 0; switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) { case RX_MPDU_RES_STATUS_SEC_CCM_ENC: /* alg is CCM: check MIC only */ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; *crypt_len = IEEE80211_CCMP_HDR_LEN; return 0; case RX_MPDU_RES_STATUS_SEC_TKIP_ENC: /* Don't drop the frame and decrypt it in SW */ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_DEPRECATE_TTAK) && !(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK)) return 0; *crypt_len = IEEE80211_TKIP_IV_LEN; fallthrough; case RX_MPDU_RES_STATUS_SEC_WEP_ENC: if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) == RX_MPDU_RES_STATUS_SEC_WEP_ENC) *crypt_len = IEEE80211_WEP_IV_LEN; return 0; case RX_MPDU_RES_STATUS_SEC_EXT_ENC: if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; return 0; default: /* Expected in monitor (not having the keys) */ #if defined(__linux__) if (!mvm->monitor_on) IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status); #elif defined(__FreeBSD__) if (!mvm->monitor_on && net_ratelimit()) IWL_ERR(mvm, "%s: Unhandled alg: 0x%x\n", __func__, rx_pkt_status); #endif } return 0; } static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct ieee80211_hdr *hdr, u32 len, struct iwl_rx_phy_info *phy_info, u32 rate_n_flags) { struct iwl_mvm_sta *mvmsta; struct iwl_mvm_tcm_mac *mdata; int mac; int ac = IEEE80211_AC_BE; /* treat non-QoS as BE */ struct iwl_mvm_vif *mvmvif; /* expected throughput in 100Kbps, single stream, 20 MHz */ static const u8 thresh_tpt[] = { 9, 18, 30, 42, 60, 78, 90, 96, 120, 135, }; u16 thr; if (ieee80211_is_data_qos(hdr->frame_control)) ac = tid_to_mac80211_ac[ieee80211_get_tid(hdr)]; mvmsta = iwl_mvm_sta_from_mac80211(sta); mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) schedule_delayed_work(&mvm->tcm.work, 0); mdata = &mvm->tcm.data[mac]; mdata->rx.pkts[ac]++; /* count the airtime only once for each ampdu */ if (mdata->rx.last_ampdu_ref != mvm->ampdu_ref) { mdata->rx.last_ampdu_ref = mvm->ampdu_ref; mdata->rx.airtime += le16_to_cpu(phy_info->frame_time); } if (!(rate_n_flags & (RATE_MCS_HT_MSK_V1 | RATE_MCS_VHT_MSK_V1))) return; mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); if (mdata->opened_rx_ba_sessions || mdata->uapsd_nonagg_detect.detected || (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd && !mvmvif->queue_params[IEEE80211_AC_VI].uapsd && !mvmvif->queue_params[IEEE80211_AC_BE].uapsd && !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) || mvmsta->sta_id != mvmvif->ap_sta_id) return; if (rate_n_flags & RATE_MCS_HT_MSK_V1) { thr = thresh_tpt[rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK_V1]; thr *= 1 + ((rate_n_flags & RATE_HT_MCS_NSS_MSK_V1) >> RATE_HT_MCS_NSS_POS_V1); } else { if (WARN_ON((rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK) >= ARRAY_SIZE(thresh_tpt))) return; thr = thresh_tpt[rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK]; thr *= 1 + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS); } thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) >> RATE_MCS_CHAN_WIDTH_POS); mdata->uapsd_nonagg_detect.rx_bytes += len; ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, thr); } static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, struct sk_buff *skb, u32 status) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); if (mvmvif->features & NETIF_F_RXCSUM && status & RX_MPDU_RES_STATUS_CSUM_DONE && status & RX_MPDU_RES_STATUS_CSUM_OK) skb->ip_summed = CHECKSUM_UNNECESSARY; } /* * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler * * Handles the actual data of the Rx packet from the fw */ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb) { struct ieee80211_hdr *hdr; struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_phy_info *phy_info; struct iwl_rx_mpdu_res_start *rx_res; struct ieee80211_sta *sta = NULL; struct sk_buff *skb; u32 len, pkt_len = iwl_rx_packet_payload_len(pkt); u32 rate_n_flags; u32 rx_pkt_status; u8 crypt_len = 0; if (unlikely(pkt_len < sizeof(*rx_res))) { IWL_DEBUG_DROP(mvm, "Bad REPLY_RX_MPDU_CMD size\n"); return; } phy_info = &mvm->last_phy_info; rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data; hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res)); len = le16_to_cpu(rx_res->byte_count); if (unlikely(len + sizeof(*rx_res) + sizeof(__le32) > pkt_len)) { IWL_DEBUG_DROP(mvm, "FW lied about packet len\n"); return; } rx_pkt_status = get_unaligned_le32((__le32 *) (pkt->data + sizeof(*rx_res) + len)); /* Dont use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. */ skb = alloc_skb(128, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); return; } rx_status = IEEE80211_SKB_RXCB(skb); /* * drop the packet if it has failed being decrypted by HW */ if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status, &crypt_len)) { IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", rx_pkt_status); kfree_skb(skb); return; } /* * Keep packets with CRC errors (and with overrun) for monitor mode * (otherwise the firmware discards them) but mark them as bad. */ if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) || !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) { IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status); rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; } /* This will be used in several places later */ rate_n_flags = le32_to_cpu(phy_info->rate_n_flags); /* rx_status carries information about the packet to mac80211 */ rx_status->mactime = le64_to_cpu(phy_info->timestamp); rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp); rx_status->band = (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; rx_status->freq = ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), rx_status->band); /* TSF as indicated by the firmware is at INA time */ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; iwl_mvm_get_signal_strength(mvm, phy_info, rx_status); IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal, (unsigned long long)rx_status->mactime); rcu_read_lock(); if (rx_pkt_status & RX_MPDU_RES_STATUS_SRC_STA_FOUND) { u32 id = rx_pkt_status & RX_MPDU_RES_STATUS_STA_ID_MSK; id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT; if (!WARN_ON_ONCE(id >= mvm->fw->ucode_capa.num_stations)) { sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); if (IS_ERR(sta)) sta = NULL; } } else if (!is_multicast_ether_addr(hdr->addr2)) { /* This is fine since we prevent two stations with the same * address from being added. */ sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL); } if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_vif *tx_blocked_vif = rcu_dereference(mvm->csa_tx_blocked_vif); struct iwl_fw_dbg_trigger_tlv *trig; struct ieee80211_vif *vif = mvmsta->vif; /* We have tx blocked stations (with CS bit). If we heard * frames from a blocked station on a new channel we can * TX to it again. */ if (unlikely(tx_blocked_vif) && vif == tx_blocked_vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); if (mvmvif->csa_target_freq == rx_status->freq) iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false); } rs_update_last_rssi(mvm, mvmsta, rx_status); trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_RSSI); if (trig && ieee80211_is_beacon(hdr->frame_control)) { struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; s32 rssi; rssi_trig = (void *)trig->data; rssi = le32_to_cpu(rssi_trig->rssi); if (rx_status->signal < rssi) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, +#if defined(__linux__) NULL); +#elif defined(__FreeBSD__) + ""); +#endif } if (!mvm->tcm.paused && len >= sizeof(*hdr) && !is_multicast_ether_addr(hdr->addr1) && ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_handle_tcm(mvm, sta, hdr, len, phy_info, rate_n_flags); if (ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_csum(sta, skb, rx_pkt_status); } rcu_read_unlock(); /* set the preamble flag if appropriate */ if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE)) rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { /* * We know which subframes of an A-MPDU belong * together since we get a single PHY response * from the firmware for all of them */ rx_status->flag |= RX_FLAG_AMPDU_DETAILS; rx_status->ampdu_reference = mvm->ampdu_ref; } /* Set up the HT phy flags */ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) { case RATE_MCS_CHAN_WIDTH_20: break; case RATE_MCS_CHAN_WIDTH_40: rx_status->bw = RATE_INFO_BW_40; break; case RATE_MCS_CHAN_WIDTH_80: rx_status->bw = RATE_INFO_BW_80; break; case RATE_MCS_CHAN_WIDTH_160: rx_status->bw = RATE_INFO_BW_160; break; } if (!(rate_n_flags & RATE_MCS_CCK_MSK_V1) && rate_n_flags & RATE_MCS_SGI_MSK_V1) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_HT_MCS_GF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; if (rate_n_flags & RATE_MCS_LDPC_MSK_V1) rx_status->enc_flags |= RX_ENC_FLAG_LDPC; if (rate_n_flags & RATE_MCS_HT_MSK_V1) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1; rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; rx_status->encoding = RX_ENC_VHT; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_BF; } else { int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, rx_status->band); if (WARN(rate < 0 || rate > 0xFF, "Invalid rate flags 0x%x, band %d,\n", rate_n_flags, rx_status->band)) { kfree_skb(skb); return; } rx_status->rate_idx = rate; } #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_mvm_update_frame_stats(mvm, rate_n_flags, rx_status->flag & RX_FLAG_AMPDU_DETAILS); #endif if (unlikely((ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control)) && mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)) mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND; if (unlikely(ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control))) rx_status->boottime_ns = ktime_get_boottime_ns(); iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len, crypt_len, rxb); } struct iwl_mvm_stat_data { struct iwl_mvm *mvm; __le32 flags; __le32 mac_id; u8 beacon_filter_average_energy; __le32 *beacon_counter; u8 *beacon_average_energy; }; struct iwl_mvm_stat_data_all_macs { struct iwl_mvm *mvm; __le32 flags; struct iwl_statistics_ntfy_per_mac *per_mac_stats; }; static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; int thold = vif->bss_conf.cqm_rssi_thold; int hyst = vif->bss_conf.cqm_rssi_hyst; int last_event; if (sig == 0) { IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n"); return; } mvmvif->bf_data.ave_beacon_signal = sig; /* BT Coex */ if (mvmvif->bf_data.bt_coex_min_thold != mvmvif->bf_data.bt_coex_max_thold) { last_event = mvmvif->bf_data.last_bt_coex_event; if (sig > mvmvif->bf_data.bt_coex_max_thold && (last_event <= mvmvif->bf_data.bt_coex_min_thold || last_event == 0)) { mvmvif->bf_data.last_bt_coex_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n", sig); iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH); } else if (sig < mvmvif->bf_data.bt_coex_min_thold && (last_event >= mvmvif->bf_data.bt_coex_max_thold || last_event == 0)) { mvmvif->bf_data.last_bt_coex_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n", sig); iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW); } } if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) return; /* CQM Notification */ last_event = mvmvif->bf_data.last_cqm_event; if (thold && sig < thold && (last_event == 0 || sig < last_event - hyst)) { mvmvif->bf_data.last_cqm_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n", sig); ieee80211_cqm_rssi_notify( vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, sig, GFP_KERNEL); } else if (sig > thold && (last_event == 0 || sig > last_event + hyst)) { mvmvif->bf_data.last_cqm_event = sig; IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n", sig); ieee80211_cqm_rssi_notify( vif, NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, sig, GFP_KERNEL); } } static void iwl_mvm_stat_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_stat_data *data = _data; int sig = -data->beacon_filter_average_energy; u16 id = le32_to_cpu(data->mac_id); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 vif_id = mvmvif->id; /* This doesn't need the MAC ID check since it's not taking the * data copied into the "data" struct, but rather the data from * the notification directly. */ mvmvif->beacon_stats.num_beacons = le32_to_cpu(data->beacon_counter[vif_id]); mvmvif->beacon_stats.avg_signal = -data->beacon_average_energy[vif_id]; if (mvmvif->id != id) return; if (vif->type != NL80211_IFTYPE_STATION) return; /* make sure that beacon statistics don't go backwards with TCM * request to clear statistics */ if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) mvmvif->beacon_stats.accu_num_beacons += mvmvif->beacon_stats.num_beacons; iwl_mvm_update_vif_sig(vif, sig); } static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_stat_data_all_macs *data = _data; struct iwl_statistics_ntfy_per_mac *mac_stats; int sig; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 vif_id = mvmvif->id; if (WARN_ONCE(vif_id >= MAC_INDEX_AUX, "invalid vif id: %d", vif_id)) return; if (vif->type != NL80211_IFTYPE_STATION) return; mac_stats = &data->per_mac_stats[vif_id]; mvmvif->beacon_stats.num_beacons = le32_to_cpu(mac_stats->beacon_counter); mvmvif->beacon_stats.avg_signal = -le32_to_cpu(mac_stats->beacon_average_energy); /* make sure that beacon statistics don't go backwards with TCM * request to clear statistics */ if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) mvmvif->beacon_stats.accu_num_beacons += mvmvif->beacon_stats.num_beacons; sig = -le32_to_cpu(mac_stats->beacon_filter_average_energy); iwl_mvm_update_vif_sig(vif, sig); } static inline void iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_stats *trig_stats; u32 trig_offset, trig_thold; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_STATS); if (!trig) return; trig_stats = (void *)trig->data; trig_offset = le32_to_cpu(trig_stats->stop_offset); trig_thold = le32_to_cpu(trig_stats->stop_threshold); if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt))) return; if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold) return; +#if defined(__linux__) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL); +#elif defined(__FreeBSD__) + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, ""); +#endif } static void iwl_mvm_stats_energy_iter(void *_data, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); u8 *energy = _data; u32 sta_id = mvmsta->sta_id; if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT_MAX, "sta_id %d >= %d", sta_id, IWL_MVM_STATION_COUNT_MAX)) return; if (energy[sta_id]) mvmsta->avg_energy = energy[sta_id]; } static void iwl_mvm_update_tcm_from_stats(struct iwl_mvm *mvm, __le32 *air_time_le, __le32 *rx_bytes_le) { int i; spin_lock(&mvm->tcm.lock); for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) { struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i]; u32 rx_bytes = le32_to_cpu(rx_bytes_le[i]); u32 airtime = le32_to_cpu(air_time_le[i]); mdata->rx.airtime += airtime; mdata->uapsd_nonagg_detect.rx_bytes += rx_bytes; if (airtime) { /* re-init every time to store rate from FW */ ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, rx_bytes * 8 / airtime); } } spin_unlock(&mvm->tcm.lock); } static void iwl_mvm_stats_ver_15(struct iwl_mvm *mvm, struct iwl_statistics_operational_ntfy *stats) { struct iwl_mvm_stat_data_all_macs data = { .mvm = mvm, .flags = stats->flags, .per_mac_stats = stats->per_mac_stats, }; ieee80211_iterate_active_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_stat_iterator_all_macs, &data); } static void iwl_mvm_stats_ver_14(struct iwl_mvm *mvm, struct iwl_statistics_operational_ntfy_ver_14 *stats) { struct iwl_mvm_stat_data data = { .mvm = mvm, }; u8 beacon_average_energy[MAC_INDEX_AUX]; __le32 flags; int i; flags = stats->flags; data.mac_id = stats->mac_id; data.beacon_filter_average_energy = le32_to_cpu(stats->beacon_filter_average_energy); data.flags = flags; data.beacon_counter = stats->beacon_counter; for (i = 0; i < ARRAY_SIZE(beacon_average_energy); i++) beacon_average_energy[i] = le32_to_cpu(stats->beacon_average_energy[i]); data.beacon_average_energy = beacon_average_energy; ieee80211_iterate_active_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_stat_iterator, &data); } static bool iwl_mvm_verify_stats_len(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt, u32 expected_size) { struct iwl_statistics_ntfy_hdr *hdr; if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) < expected_size, "received invalid statistics size (%d)!, expected_size: %d\n", iwl_rx_packet_payload_len(pkt), expected_size)) return false; hdr = (void *)&pkt->data; if (WARN_ONCE((hdr->type & IWL_STATISTICS_TYPE_MSK) != FW_STATISTICS_OPERATIONAL || hdr->version != iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, STATISTICS_NOTIFICATION, 0), "received unsupported hdr type %d, version %d\n", hdr->type, hdr->version)) return false; if (WARN_ONCE(le16_to_cpu(hdr->size) != expected_size, "received invalid statistics size in header (%d)!, expected_size: %d\n", le16_to_cpu(hdr->size), expected_size)) return false; return true; } static void iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { u8 average_energy[IWL_MVM_STATION_COUNT_MAX]; __le32 air_time[MAC_INDEX_AUX]; __le32 rx_bytes[MAC_INDEX_AUX]; __le32 flags = 0; int i; u32 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, STATISTICS_NOTIFICATION, 0); if (WARN_ONCE(notif_ver > 15, "invalid statistics version id: %d\n", notif_ver)) return; if (notif_ver == 14) { struct iwl_statistics_operational_ntfy_ver_14 *stats = (void *)pkt->data; if (!iwl_mvm_verify_stats_len(mvm, pkt, sizeof(*stats))) return; iwl_mvm_stats_ver_14(mvm, stats); flags = stats->flags; mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time); mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time); mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf); mvm->radio_stats.on_time_scan = le64_to_cpu(stats->on_time_scan); for (i = 0; i < ARRAY_SIZE(average_energy); i++) average_energy[i] = le32_to_cpu(stats->average_energy[i]); for (i = 0; i < ARRAY_SIZE(air_time); i++) { air_time[i] = stats->air_time[i]; rx_bytes[i] = stats->rx_bytes[i]; } } if (notif_ver == 15) { struct iwl_statistics_operational_ntfy *stats = (void *)pkt->data; if (!iwl_mvm_verify_stats_len(mvm, pkt, sizeof(*stats))) return; iwl_mvm_stats_ver_15(mvm, stats); flags = stats->flags; mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time); mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time); mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf); mvm->radio_stats.on_time_scan = le64_to_cpu(stats->on_time_scan); for (i = 0; i < ARRAY_SIZE(average_energy); i++) average_energy[i] = le32_to_cpu(stats->per_sta_stats[i].average_energy); for (i = 0; i < ARRAY_SIZE(air_time); i++) { air_time[i] = stats->per_mac_stats[i].air_time; rx_bytes[i] = stats->per_mac_stats[i].rx_bytes; } } iwl_mvm_rx_stats_check_trigger(mvm, pkt); ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter, average_energy); /* * Don't update in case the statistics are not cleared, since * we will end up counting twice the same airtime, once in TCM * request and once in statistics notification. */ if (le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) iwl_mvm_update_tcm_from_stats(mvm, air_time, rx_bytes); } void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_mvm_stat_data data = { .mvm = mvm, }; __le32 *bytes, *air_time, flags; int expected_size; u8 *energy; /* From ver 14 and up we use TLV statistics format */ if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, STATISTICS_NOTIFICATION, 0) >= 14) return iwl_mvm_handle_rx_statistics_tlv(mvm, pkt); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { if (iwl_mvm_has_new_rx_api(mvm)) expected_size = sizeof(struct iwl_notif_statistics_v11); else expected_size = sizeof(struct iwl_notif_statistics_v10); } else { expected_size = sizeof(struct iwl_notif_statistics); } if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) != expected_size, "received invalid statistics size (%d)!\n", iwl_rx_packet_payload_len(pkt))) return; if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data; data.mac_id = stats->rx.general.mac_id; data.beacon_filter_average_energy = stats->general.common.beacon_filter_average_energy; mvm->rx_stats_v3 = stats->rx; mvm->radio_stats.rx_time = le64_to_cpu(stats->general.common.rx_time); mvm->radio_stats.tx_time = le64_to_cpu(stats->general.common.tx_time); mvm->radio_stats.on_time_rf = le64_to_cpu(stats->general.common.on_time_rf); mvm->radio_stats.on_time_scan = le64_to_cpu(stats->general.common.on_time_scan); data.beacon_counter = stats->general.beacon_counter; data.beacon_average_energy = stats->general.beacon_average_energy; flags = stats->flag; } else { struct iwl_notif_statistics *stats = (void *)&pkt->data; data.mac_id = stats->rx.general.mac_id; data.beacon_filter_average_energy = stats->general.common.beacon_filter_average_energy; mvm->rx_stats = stats->rx; mvm->radio_stats.rx_time = le64_to_cpu(stats->general.common.rx_time); mvm->radio_stats.tx_time = le64_to_cpu(stats->general.common.tx_time); mvm->radio_stats.on_time_rf = le64_to_cpu(stats->general.common.on_time_rf); mvm->radio_stats.on_time_scan = le64_to_cpu(stats->general.common.on_time_scan); data.beacon_counter = stats->general.beacon_counter; data.beacon_average_energy = stats->general.beacon_average_energy; flags = stats->flag; } data.flags = flags; iwl_mvm_rx_stats_check_trigger(mvm, pkt); ieee80211_iterate_active_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_stat_iterator, &data); if (!iwl_mvm_has_new_rx_api(mvm)) return; if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct iwl_notif_statistics_v11 *v11 = (void *)&pkt->data; energy = (void *)&v11->load_stats.avg_energy; bytes = (void *)&v11->load_stats.byte_count; air_time = (void *)&v11->load_stats.air_time; } else { struct iwl_notif_statistics *stats = (void *)&pkt->data; energy = (void *)&stats->load_stats.avg_energy; bytes = (void *)&stats->load_stats.byte_count; air_time = (void *)&stats->load_stats.air_time; } ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter, energy); /* * Don't update in case the statistics are not cleared, since * we will end up counting twice the same airtime, once in TCM * request and once in statistics notification. */ if (le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) iwl_mvm_update_tcm_from_stats(mvm, air_time, bytes); } void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb)); } void iwl_mvm_window_status_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_ba_window_status_notif *notif = (void *)pkt->data; int i; BUILD_BUG_ON(ARRAY_SIZE(notif->ra_tid) != BA_WINDOW_STREAMS_MAX); BUILD_BUG_ON(ARRAY_SIZE(notif->mpdu_rx_count) != BA_WINDOW_STREAMS_MAX); BUILD_BUG_ON(ARRAY_SIZE(notif->bitmap) != BA_WINDOW_STREAMS_MAX); BUILD_BUG_ON(ARRAY_SIZE(notif->start_seq_num) != BA_WINDOW_STREAMS_MAX); rcu_read_lock(); for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) { struct ieee80211_sta *sta; u8 sta_id, tid; u64 bitmap; u32 ssn; u16 ratid; u16 received_mpdu; ratid = le16_to_cpu(notif->ra_tid[i]); /* check that this TID is valid */ if (!(ratid & BA_WINDOW_STATUS_VALID_MSK)) continue; received_mpdu = le16_to_cpu(notif->mpdu_rx_count[i]); if (received_mpdu == 0) continue; tid = ratid & BA_WINDOW_STATUS_TID_MSK; /* get the station */ sta_id = (ratid & BA_WINDOW_STATUS_STA_ID_MSK) >> BA_WINDOW_STATUS_STA_ID_POS; sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (IS_ERR_OR_NULL(sta)) continue; bitmap = le64_to_cpu(notif->bitmap[i]); ssn = le32_to_cpu(notif->start_seq_num[i]); /* update mac80211 with the bitmap for the reordering buffer */ ieee80211_mark_rx_ba_filtered_frames(sta, tid, ssn, bitmap, received_mpdu); } rcu_read_unlock(); } diff --git a/sys/contrib/dev/iwlwifi/mvm/rxmq.c b/sys/contrib/dev/iwlwifi/mvm/rxmq.c index 7325e9d44273..872a45ea15ca 100644 --- a/sys/contrib/dev/iwlwifi/mvm/rxmq.c +++ b/sys/contrib/dev/iwlwifi/mvm/rxmq.c @@ -1,2271 +1,2275 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #include #include #if defined(__FreeBSD__) #include #endif #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" static void *iwl_mvm_skb_get_hdr(struct sk_buff *skb) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); u8 *data = skb->data; /* Alignment concerns */ BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4); BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4); BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) % 4); BUILD_BUG_ON(sizeof(struct ieee80211_vendor_radiotap) % 4); if (rx_status->flag & RX_FLAG_RADIOTAP_HE) data += sizeof(struct ieee80211_radiotap_he); if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU) data += sizeof(struct ieee80211_radiotap_he_mu); if (rx_status->flag & RX_FLAG_RADIOTAP_LSIG) data += sizeof(struct ieee80211_radiotap_lsig); if (rx_status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { struct ieee80211_vendor_radiotap *radiotap = (void *)data; data += sizeof(*radiotap) + radiotap->len + radiotap->pad; } return data; } static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, int queue, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta; struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb); struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb); struct iwl_mvm_key_pn *ptk_pn; int res; u8 tid, keyidx; u8 pn[IEEE80211_CCMP_PN_LEN]; u8 *extiv; /* do PN checking */ /* multicast and non-data only arrives on default queue */ if (!ieee80211_is_data(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) return 0; /* do not check PN for open AP */ if (!(stats->flag & RX_FLAG_DECRYPTED)) return 0; /* * avoid checking for default queue - we don't want to replicate * all the logic that's necessary for checking the PN on fragmented * frames, leave that to mac80211 */ if (queue == 0) return 0; /* if we are here - this for sure is either CCMP or GCMP */ if (IS_ERR_OR_NULL(sta)) { IWL_DEBUG_DROP(mvm, "expected hw-decrypted unicast frame for station\n"); return -1; } mvmsta = iwl_mvm_sta_from_mac80211(sta); extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control); keyidx = extiv[3] >> 6; ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]); if (!ptk_pn) return -1; if (ieee80211_is_data_qos(hdr->frame_control)) tid = ieee80211_get_tid(hdr); else tid = 0; /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */ if (tid >= IWL_MAX_TID_COUNT) return -1; /* load pn */ pn[0] = extiv[7]; pn[1] = extiv[6]; pn[2] = extiv[5]; pn[3] = extiv[4]; pn[4] = extiv[1]; pn[5] = extiv[0]; res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN); if (res < 0) return -1; if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN)) return -1; memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); stats->flag |= RX_FLAG_PN_VALIDATED; return 0; } /* iwl_mvm_create_skb Adds the rxb to a new skb */ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_hdr *hdr, u16 len, u8 crypt_len, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; unsigned int headlen, fraglen, pad_len = 0; unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); u8 mic_crc_len = u8_get_bits(desc->mac_flags1, IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1; if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { len -= 2; pad_len = 2; } /* * For non monitor interface strip the bytes the RADA might not have * removed. As monitor interface cannot exist with other interfaces * this removal is safe. */ if (mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS)) { u32 pkt_flags = le32_to_cpu(pkt->len_n_flags); /* * If RADA was not enabled then decryption was not performed so * the MIC cannot be removed. */ if (!(pkt_flags & FH_RSCSR_RADA_EN)) { if (WARN_ON(crypt_len > mic_crc_len)) return -EINVAL; mic_crc_len -= crypt_len; } if (WARN_ON(mic_crc_len > len)) return -EINVAL; len -= mic_crc_len; } /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and * an additional 8 bytes for SNAP/ethertype, see below) so that * splice() or TCP coalesce are more efficient. * * Since, in addition, ieee80211_data_to_8023() always pull in at * least 8 bytes (possibly more for mesh) we can do the same here * to save the cost of doing it later. That still doesn't pull in * the actual IP header since the typical case has a SNAP header. * If the latter changes (there are efforts in the standards group * to do so) we should revisit this and ieee80211_data_to_8023(). */ headlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8; /* The firmware may align the packet to DWORD. * The padding is inserted after the IV. * After copying the header + IV skip the padding if * present before copying packet data. */ hdrlen += crypt_len; if (unlikely(headlen < hdrlen)) return -EINVAL; skb_put_data(skb, hdr, hdrlen); skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen); /* * If we did CHECKSUM_COMPLETE, the hardware only does it right for * certain cases and starts the checksum after the SNAP. Check if * this is the case - it's easier to just bail out to CHECKSUM_NONE * in the cases the hardware didn't handle, since it's rare to see * such packets, even though the hardware did calculate the checksum * in this case, just starting after the MAC header instead. * * Starting from Bz hardware, it calculates starting directly after * the MAC header, so that matches mac80211's expectation. */ if (skb->ip_summed == CHECKSUM_COMPLETE && mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) { struct { u8 hdr[6]; __be16 type; } __packed *shdr = (void *)((u8 *)hdr + hdrlen + pad_len); if (unlikely(headlen - hdrlen < sizeof(*shdr) || !ether_addr_equal(shdr->hdr, rfc1042_header) || (shdr->type != htons(ETH_P_IP) && shdr->type != htons(ETH_P_ARP) && shdr->type != htons(ETH_P_IPV6) && shdr->type != htons(ETH_P_8021Q) && shdr->type != htons(ETH_P_PAE) && shdr->type != htons(ETH_P_TDLS)))) skb->ip_summed = CHECKSUM_NONE; else /* mac80211 assumes full CSUM including SNAP header */ skb_postpush_rcsum(skb, shdr, sizeof(*shdr)); } fraglen = len - headlen; if (fraglen) { int offset = (u8 *)hdr + headlen + pad_len - (u8 *)rxb_addr(rxb) + rxb_offset(rxb); skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize); } return 0; } static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_vendor_radiotap *radiotap; const int size = sizeof(*radiotap) + sizeof(__le16); if (!mvm->cur_aid) return; /* ensure alignment */ BUILD_BUG_ON((size + 2) % 4); radiotap = skb_put(skb, size + 2); radiotap->align = 1; /* Intel OUI */ radiotap->oui[0] = 0xf6; radiotap->oui[1] = 0x54; radiotap->oui[2] = 0x25; /* radiotap sniffer config sub-namespace */ radiotap->subns = 1; radiotap->present = 0x1; radiotap->len = size - sizeof(*radiotap); radiotap->pad = 2; /* fill the data now */ memcpy(radiotap->data, &mvm->cur_aid, sizeof(mvm->cur_aid)); /* and clear the padding */ memset(radiotap->data + sizeof(__le16), 0, radiotap->pad); rx_status->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA; } /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct napi_struct *napi, struct sk_buff *skb, int queue, struct ieee80211_sta *sta) { if (iwl_mvm_check_pn(mvm, skb, queue, sta)) kfree_skb(skb); else ieee80211_rx_napi(mvm->hw, sta, skb, napi); } static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, struct ieee80211_rx_status *rx_status, u32 rate_n_flags, int energy_a, int energy_b) { int max_energy; u32 rate_flags = rate_n_flags; energy_a = energy_a ? -energy_a : S8_MIN; energy_b = energy_b ? -energy_b : S8_MIN; max_energy = max(energy_a, energy_b); IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n", energy_a, energy_b, max_energy); rx_status->signal = max_energy; rx_status->chains = (rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS; rx_status->chain_signal[0] = energy_a; rx_status->chain_signal[1] = energy_b; } static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, struct ieee80211_hdr *hdr, struct iwl_rx_mpdu_desc *desc, u32 status) { struct iwl_mvm_sta *mvmsta; struct iwl_mvm_vif *mvmvif; u8 keyid; struct ieee80211_key_conf *key; u32 len = le16_to_cpu(desc->mpdu_len); const u8 *frame = (void *)hdr; if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE) return 0; /* * For non-beacon, we don't really care. But beacons may * be filtered out, and we thus need the firmware's replay * detection, otherwise beacons the firmware previously * filtered could be replayed, or something like that, and * it can filter a lot - though usually only if nothing has * changed. */ if (!ieee80211_is_beacon(hdr->frame_control)) return 0; /* key mismatch - will also report !MIC_OK but we shouldn't count it */ if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID)) return -1; /* good cases */ if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK && !(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) return 0; if (!sta) return -1; mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); /* * both keys will have the same cipher and MIC length, use * whichever one is available */ key = rcu_dereference(mvmvif->bcn_prot.keys[0]); if (!key) { key = rcu_dereference(mvmvif->bcn_prot.keys[1]); if (!key) return -1; } if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2) return -1; /* get the real key ID */ keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2]; /* and if that's the other key, look it up */ if (keyid != key->keyidx) { /* * shouldn't happen since firmware checked, but be safe * in case the MIC length is wrong too, for example */ if (keyid != 6 && keyid != 7) return -1; key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]); if (!key) return -1; } /* Report status to mac80211 */ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) ieee80211_key_mic_failure(key); else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR) ieee80211_key_replay(key); return -1; } static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct ieee80211_hdr *hdr, struct ieee80211_rx_status *stats, u16 phy_info, struct iwl_rx_mpdu_desc *desc, u32 pkt_flags, int queue, u8 *crypt_len) { u32 status = le32_to_cpu(desc->status); /* * Drop UNKNOWN frames in aggregation, unless in monitor mode * (where we don't have the keys). * We limit this to aggregation because in TKIP this is a valid * scenario, since we may not have the (correct) TTAK (phase 1 * key) in the firmware. */ if (phy_info & IWL_RX_MPDU_PHY_AMPDU && (status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on) return -1; if (unlikely(ieee80211_is_mgmt(hdr->frame_control) && !ieee80211_has_protected(hdr->frame_control))) return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status); if (!ieee80211_has_protected(hdr->frame_control) || (status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE) return 0; /* TODO: handle packets encrypted with unknown alg */ #if defined(__FreeBSD__) /* XXX-BZ do similar to rx.c for now as these are plenty. */ if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_ENC_ERR) return (0); #endif switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) { case IWL_RX_MPDU_STATUS_SEC_CCM: case IWL_RX_MPDU_STATUS_SEC_GCM: BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN); /* alg is CCM: check MIC only */ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; if (pkt_flags & FH_RSCSR_RADA_EN) stats->flag |= RX_FLAG_MIC_STRIPPED; *crypt_len = IEEE80211_CCMP_HDR_LEN; return 0; case IWL_RX_MPDU_STATUS_SEC_TKIP: /* Don't drop the frame and decrypt it in SW */ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_DEPRECATE_TTAK) && !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK)) return 0; if (mvm->trans->trans_cfg->gen2 && !(status & RX_MPDU_RES_STATUS_MIC_OK)) stats->flag |= RX_FLAG_MMIC_ERROR; *crypt_len = IEEE80211_TKIP_IV_LEN; fallthrough; case IWL_RX_MPDU_STATUS_SEC_WEP: if (!(status & IWL_RX_MPDU_STATUS_ICV_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_WEP) *crypt_len = IEEE80211_WEP_IV_LEN; if (pkt_flags & FH_RSCSR_RADA_EN) { stats->flag |= RX_FLAG_ICV_STRIPPED; if (mvm->trans->trans_cfg->gen2) stats->flag |= RX_FLAG_MMIC_STRIPPED; } return 0; case IWL_RX_MPDU_STATUS_SEC_EXT_ENC: if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) return -1; stats->flag |= RX_FLAG_DECRYPTED; return 0; case RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC: break; default: /* * Sometimes we can get frames that were not decrypted * because the firmware didn't have the keys yet. This can * happen after connection where we can get multicast frames * before the GTK is installed. * Silently drop those frames. * Also drop un-decrypted frames in monitor mode. */ if (!is_multicast_ether_addr(hdr->addr1) && !mvm->monitor_on && net_ratelimit()) #if defined(__linux__) IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status); #elif defined(__FreeBSD__) IWL_ERR(mvm, "%s: Unhandled alg: 0x%x\n", __func__, status); #endif } return 0; } static void iwl_mvm_rx_csum(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct sk_buff *skb, struct iwl_rx_packet *pkt) { struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { if (pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) { u16 hwsum = be16_to_cpu(desc->v3.raw_xsum); skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold(~(__force __sum16)hwsum); } } else { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_vif *mvmvif; u16 flags = le16_to_cpu(desc->l3l4_flags); u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >> IWL_RX_L3_PROTO_POS); mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); if (mvmvif->features & NETIF_F_RXCSUM && flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK && (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK || l3_prot == IWL_RX_L3_TYPE_IPV6 || l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG)) skb->ip_summed = CHECKSUM_UNNECESSARY; } } /* * returns true if a packet is a duplicate and should be dropped. * Updates AMSDU PN tracking info */ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, struct ieee80211_rx_status *rx_status, struct ieee80211_hdr *hdr, struct iwl_rx_mpdu_desc *desc) { struct iwl_mvm_sta *mvm_sta; struct iwl_mvm_rxq_dup_data *dup_data; u8 tid, sub_frame_idx; if (WARN_ON(IS_ERR_OR_NULL(sta))) return false; mvm_sta = iwl_mvm_sta_from_mac80211(sta); #if defined(__FreeBSD__) if (WARN_ON(mvm_sta->dup_data == NULL)) return false; #endif dup_data = &mvm_sta->dup_data[queue]; /* * Drop duplicate 802.11 retransmissions * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") */ if (ieee80211_is_ctl(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) { rx_status->flag |= RX_FLAG_DUP_VALIDATED; return false; } if (ieee80211_is_data_qos(hdr->frame_control)) /* frame has qos control */ tid = ieee80211_get_tid(hdr); else tid = IWL_MAX_TID_COUNT; /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */ sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; if (unlikely(ieee80211_has_retry(hdr->frame_control) && dup_data->last_seq[tid] == hdr->seq_ctrl && dup_data->last_sub_frame[tid] >= sub_frame_idx)) return true; /* Allow same PN as the first subframe for following sub frames */ if (dup_data->last_seq[tid] == hdr->seq_ctrl && sub_frame_idx > dup_data->last_sub_frame[tid] && desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) rx_status->flag |= RX_FLAG_ALLOW_SAME_PN; dup_data->last_seq[tid] = hdr->seq_ctrl; dup_data->last_sub_frame[tid] = sub_frame_idx; rx_status->flag |= RX_FLAG_DUP_VALIDATED; return false; } /* * Returns true if sn2 - buffer_size < sn1 < sn2. * To be used only in order to compare reorder buffer head with NSSN. * We fully trust NSSN unless it is behind us due to reorder timeout. * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN. */ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size) { return ieee80211_sn_less(sn1, sn2) && !ieee80211_sn_less(sn1, sn2 - buffer_size); } static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn) { if (IWL_MVM_USE_NSSN_SYNC) { struct iwl_mvm_nssn_sync_data notif = { .baid = baid, .nssn = nssn, }; iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NSSN_SYNC, false, ¬if, sizeof(notif)); } } #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10) enum iwl_mvm_release_flags { IWL_MVM_RELEASE_SEND_RSS_SYNC = BIT(0), IWL_MVM_RELEASE_FROM_RSS_SYNC = BIT(1), }; static void iwl_mvm_release_frames(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct napi_struct *napi, struct iwl_mvm_baid_data *baid_data, struct iwl_mvm_reorder_buffer *reorder_buf, u16 nssn, u32 flags) { struct iwl_mvm_reorder_buf_entry *entries = &baid_data->entries[reorder_buf->queue * baid_data->entries_per_queue]; u16 ssn = reorder_buf->head_sn; lockdep_assert_held(&reorder_buf->lock); /* * We keep the NSSN not too far behind, if we are sync'ing it and it * is more than 2048 ahead of us, it must be behind us. Discard it. * This can happen if the queue that hit the 0 / 2048 seqno was lagging * behind and this queue already processed packets. The next if * would have caught cases where this queue would have processed less * than 64 packets, but it may have processed more than 64 packets. */ if ((flags & IWL_MVM_RELEASE_FROM_RSS_SYNC) && ieee80211_sn_less(nssn, ssn)) goto set_timer; /* ignore nssn smaller than head sn - this can happen due to timeout */ if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size)) goto set_timer; while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) { int index = ssn % reorder_buf->buf_size; struct sk_buff_head *skb_list = &entries[index].e.frames; struct sk_buff *skb; ssn = ieee80211_sn_inc(ssn); if ((flags & IWL_MVM_RELEASE_SEND_RSS_SYNC) && (ssn == 2048 || ssn == 0)) iwl_mvm_sync_nssn(mvm, baid_data->baid, ssn); /* * Empty the list. Will have more than one frame for A-MSDU. * Empty list is valid as well since nssn indicates frames were * received. */ while ((skb = __skb_dequeue(skb_list))) { iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, reorder_buf->queue, sta); reorder_buf->num_stored--; } } reorder_buf->head_sn = nssn; set_timer: if (reorder_buf->num_stored && !reorder_buf->removed) { u16 index = reorder_buf->head_sn % reorder_buf->buf_size; while (skb_queue_empty(&entries[index].e.frames)) index = (index + 1) % reorder_buf->buf_size; /* modify timer to match next frame's expiration time */ mod_timer(&reorder_buf->reorder_timer, entries[index].e.reorder_time + 1 + RX_REORDER_BUF_TIMEOUT_MQ); } else { del_timer(&reorder_buf->reorder_timer); } } void iwl_mvm_reorder_timer_expired(struct timer_list *t) { struct iwl_mvm_reorder_buffer *buf = from_timer(buf, t, reorder_timer); struct iwl_mvm_baid_data *baid_data = iwl_mvm_baid_data_from_reorder_buf(buf); struct iwl_mvm_reorder_buf_entry *entries = &baid_data->entries[buf->queue * baid_data->entries_per_queue]; int i; u16 sn = 0, index = 0; bool expired = false; bool cont = false; spin_lock(&buf->lock); if (!buf->num_stored || buf->removed) { spin_unlock(&buf->lock); return; } for (i = 0; i < buf->buf_size ; i++) { index = (buf->head_sn + i) % buf->buf_size; if (skb_queue_empty(&entries[index].e.frames)) { /* * If there is a hole and the next frame didn't expire * we want to break and not advance SN */ cont = false; continue; } if (!cont && !time_after(jiffies, entries[index].e.reorder_time + RX_REORDER_BUF_TIMEOUT_MQ)) break; expired = true; /* continue until next hole after this expired frames */ cont = true; sn = ieee80211_sn_add(buf->head_sn, i + 1); } if (expired) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; u8 sta_id = baid_data->sta_id; rcu_read_lock(); sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]); mvmsta = iwl_mvm_sta_from_mac80211(sta); /* SN is set to the last expired frame + 1 */ IWL_DEBUG_HT(buf->mvm, "Releasing expired frames for sta %u, sn %d\n", sta_id, sn); iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif, sta, baid_data->tid); iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn, IWL_MVM_RELEASE_SEND_RSS_SYNC); rcu_read_unlock(); } else { /* * If no frame expired and there are stored frames, index is now * pointing to the first unexpired frame - modify timer * accordingly to this frame. */ mod_timer(&buf->reorder_timer, entries[index].e.reorder_time + 1 + RX_REORDER_BUF_TIMEOUT_MQ); } spin_unlock(&buf->lock); } static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, struct iwl_mvm_delba_data *data) { struct iwl_mvm_baid_data *ba_data; struct ieee80211_sta *sta; struct iwl_mvm_reorder_buffer *reorder_buf; u8 baid = data->baid; if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid)) return; rcu_read_lock(); ba_data = rcu_dereference(mvm->baid_map[baid]); if (WARN_ON_ONCE(!ba_data)) goto out; sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) goto out; reorder_buf = &ba_data->reorder_buf[queue]; /* release all frames that are in the reorder buffer to the stack */ spin_lock_bh(&reorder_buf->lock); iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf, ieee80211_sn_add(reorder_buf->head_sn, reorder_buf->buf_size), 0); spin_unlock_bh(&reorder_buf->lock); del_timer_sync(&reorder_buf->reorder_timer); out: rcu_read_unlock(); } static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm, struct napi_struct *napi, u8 baid, u16 nssn, int queue, u32 flags) { struct ieee80211_sta *sta; struct iwl_mvm_reorder_buffer *reorder_buf; struct iwl_mvm_baid_data *ba_data; IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n", baid, nssn); if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID || baid >= ARRAY_SIZE(mvm->baid_map))) return; rcu_read_lock(); ba_data = rcu_dereference(mvm->baid_map[baid]); if (!ba_data) { WARN(!(flags & IWL_MVM_RELEASE_FROM_RSS_SYNC), "BAID %d not found in map\n", baid); goto out; } sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) goto out; reorder_buf = &ba_data->reorder_buf[queue]; spin_lock_bh(&reorder_buf->lock); iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf, nssn, flags); spin_unlock_bh(&reorder_buf->lock); out: rcu_read_unlock(); } static void iwl_mvm_nssn_sync(struct iwl_mvm *mvm, struct napi_struct *napi, int queue, const struct iwl_mvm_nssn_sync_data *data) { iwl_mvm_release_frames_from_notif(mvm, napi, data->baid, data->nssn, queue, IWL_MVM_RELEASE_FROM_RSS_SYNC); } void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rxq_sync_notification *notif; struct iwl_mvm_internal_rxq_notif *internal_notif; u32 len = iwl_rx_packet_payload_len(pkt); notif = (void *)pkt->data; internal_notif = (void *)notif->payload; if (WARN_ONCE(len < sizeof(*notif) + sizeof(*internal_notif), "invalid notification size %d (%d)", len, (int)(sizeof(*notif) + sizeof(*internal_notif)))) return; len -= sizeof(*notif) + sizeof(*internal_notif); if (internal_notif->sync && mvm->queue_sync_cookie != internal_notif->cookie) { WARN_ONCE(1, "Received expired RX queue sync message\n"); return; } switch (internal_notif->type) { case IWL_MVM_RXQ_EMPTY: WARN_ONCE(len, "invalid empty notification size %d", len); break; case IWL_MVM_RXQ_NOTIF_DEL_BA: if (WARN_ONCE(len != sizeof(struct iwl_mvm_delba_data), "invalid delba notification size %d (%d)", len, (int)sizeof(struct iwl_mvm_delba_data))) break; iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data); break; case IWL_MVM_RXQ_NSSN_SYNC: if (WARN_ONCE(len != sizeof(struct iwl_mvm_nssn_sync_data), "invalid nssn sync notification size %d (%d)", len, (int)sizeof(struct iwl_mvm_nssn_sync_data))) break; iwl_mvm_nssn_sync(mvm, napi, queue, (void *)internal_notif->data); break; default: WARN_ONCE(1, "Invalid identifier %d", internal_notif->type); } if (internal_notif->sync) { WARN_ONCE(!test_and_clear_bit(queue, &mvm->queue_sync_state), "queue sync: queue %d responded a second time!\n", queue); if (READ_ONCE(mvm->queue_sync_state) == 0) wake_up(&mvm->rx_sync_waitq); } } static void iwl_mvm_oldsn_workaround(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, struct iwl_mvm_reorder_buffer *buffer, u32 reorder, u32 gp2, int queue) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (gp2 != buffer->consec_oldsn_ampdu_gp2) { /* we have a new (A-)MPDU ... */ /* * reset counter to 0 if we didn't have any oldsn in * the last A-MPDU (as detected by GP2 being identical) */ if (!buffer->consec_oldsn_prev_drop) buffer->consec_oldsn_drops = 0; /* either way, update our tracking state */ buffer->consec_oldsn_ampdu_gp2 = gp2; } else if (buffer->consec_oldsn_prev_drop) { /* * tracking state didn't change, and we had an old SN * indication before - do nothing in this case, we * already noted this one down and are waiting for the * next A-MPDU (by GP2) */ return; } /* return unless this MPDU has old SN */ if (!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN)) return; /* update state */ buffer->consec_oldsn_prev_drop = 1; buffer->consec_oldsn_drops++; /* if limit is reached, send del BA and reset state */ if (buffer->consec_oldsn_drops == IWL_MVM_AMPDU_CONSEC_DROPS_DELBA) { IWL_WARN(mvm, "reached %d old SN frames from %pM on queue %d, stopping BA session on TID %d\n", IWL_MVM_AMPDU_CONSEC_DROPS_DELBA, sta->addr, queue, tid); ieee80211_stop_rx_ba_session(mvmsta->vif, BIT(tid), sta->addr); buffer->consec_oldsn_prev_drop = 0; buffer->consec_oldsn_drops = 0; } } /* * Returns true if the MPDU was buffered\dropped, false if it should be passed * to upper layer. */ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, struct napi_struct *napi, int queue, struct ieee80211_sta *sta, struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb); struct iwl_mvm_sta *mvm_sta; struct iwl_mvm_baid_data *baid_data; struct iwl_mvm_reorder_buffer *buffer; struct sk_buff *tail; u32 reorder = le32_to_cpu(desc->reorder_data); bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU; bool last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME; #if defined(__linux__) u8 tid = ieee80211_get_tid(hdr); #elif defined(__FreeBSD__) u8 tid; #endif u8 sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; struct iwl_mvm_reorder_buf_entry *entries; int index; u16 nssn, sn; u8 baid; baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT; /* * This also covers the case of receiving a Block Ack Request * outside a BA session; we'll pass it to mac80211 and that * then sends a delBA action frame. * This also covers pure monitor mode, in which case we won't * have any BA sessions. */ if (baid == IWL_RX_REORDER_DATA_INVALID_BAID) return false; /* no sta yet */ if (WARN_ONCE(IS_ERR_OR_NULL(sta), "Got valid BAID without a valid station assigned\n")) return false; mvm_sta = iwl_mvm_sta_from_mac80211(sta); /* not a data packet or a bar */ if (!ieee80211_is_back_req(hdr->frame_control) && (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1))) return false; if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) return false; baid_data = rcu_dereference(mvm->baid_map[baid]); if (!baid_data) { IWL_DEBUG_RX(mvm, "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n", baid, reorder); return false; } #if defined(__FreeBSD__) tid = ieee80211_get_tid(hdr); #endif if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id, "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n", baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id, tid)) return false; nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK; sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >> IWL_RX_MPDU_REORDER_SN_SHIFT; buffer = &baid_data->reorder_buf[queue]; entries = &baid_data->entries[queue * baid_data->entries_per_queue]; spin_lock_bh(&buffer->lock); if (!buffer->valid) { if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) { spin_unlock_bh(&buffer->lock); return false; } buffer->valid = true; } if (ieee80211_is_back_req(hdr->frame_control)) { iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn, 0); goto drop; } /* * If there was a significant jump in the nssn - adjust. * If the SN is smaller than the NSSN it might need to first go into * the reorder buffer, in which case we just release up to it and the * rest of the function will take care of storing it and releasing up to * the nssn. * This should not happen. This queue has been lagging and it should * have been updated by a IWL_MVM_RXQ_NSSN_SYNC notification. Be nice * and update the other queues. */ if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, buffer->buf_size) || !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) { u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, min_sn, IWL_MVM_RELEASE_SEND_RSS_SYNC); } iwl_mvm_oldsn_workaround(mvm, sta, tid, buffer, reorder, rx_status->device_timestamp, queue); /* drop any oudated packets */ if (ieee80211_sn_less(sn, buffer->head_sn)) goto drop; /* release immediately if allowed by nssn and no stored frames */ if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { if (iwl_mvm_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) && (!amsdu || last_subframe)) { /* * If we crossed the 2048 or 0 SN, notify all the * queues. This is done in order to avoid having a * head_sn that lags behind for too long. When that * happens, we can get to a situation where the head_sn * is within the interval [nssn - buf_size : nssn] * which will make us think that the nssn is a packet * that we already freed because of the reordering * buffer and we will ignore it. So maintain the * head_sn somewhat updated across all the queues: * when it crosses 0 and 2048. */ if (sn == 2048 || sn == 0) iwl_mvm_sync_nssn(mvm, baid, sn); buffer->head_sn = nssn; } /* No need to update AMSDU last SN - we are moving the head */ spin_unlock_bh(&buffer->lock); return false; } /* * release immediately if there are no stored frames, and the sn is * equal to the head. * This can happen due to reorder timer, where NSSN is behind head_sn. * When we released everything, and we got the next frame in the * sequence, according to the NSSN we can't release immediately, * while technically there is no hole and we can move forward. */ if (!buffer->num_stored && sn == buffer->head_sn) { if (!amsdu || last_subframe) { if (sn == 2048 || sn == 0) iwl_mvm_sync_nssn(mvm, baid, sn); buffer->head_sn = ieee80211_sn_inc(buffer->head_sn); } /* No need to update AMSDU last SN - we are moving the head */ spin_unlock_bh(&buffer->lock); return false; } index = sn % buffer->buf_size; /* * Check if we already stored this frame * As AMSDU is either received or not as whole, logic is simple: * If we have frames in that position in the buffer and the last frame * originated from AMSDU had a different SN then it is a retransmission. * If it is the same SN then if the subframe index is incrementing it * is the same AMSDU - otherwise it is a retransmission. */ tail = skb_peek_tail(&entries[index].e.frames); if (tail && !amsdu) goto drop; else if (tail && (sn != buffer->last_amsdu || buffer->last_sub_index >= sub_frame_idx)) goto drop; /* put in reorder buffer */ __skb_queue_tail(&entries[index].e.frames, skb); buffer->num_stored++; entries[index].e.reorder_time = jiffies; if (amsdu) { buffer->last_amsdu = sn; buffer->last_sub_index = sub_frame_idx; } /* * We cannot trust NSSN for AMSDU sub-frames that are not the last. * The reason is that NSSN advances on the first sub-frame, and may * cause the reorder buffer to advance before all the sub-frames arrive. * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with * SN 1. NSSN for first sub frame will be 3 with the result of driver * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is * already ahead and it will be dropped. * If the last sub-frame is not on this queue - we will get frame * release notification with up to date NSSN. */ if (!amsdu || last_subframe) iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn, IWL_MVM_RELEASE_SEND_RSS_SYNC); spin_unlock_bh(&buffer->lock); return true; drop: kfree_skb(skb); spin_unlock_bh(&buffer->lock); return true; } static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u32 reorder_data, u8 baid) { unsigned long now = jiffies; unsigned long timeout; struct iwl_mvm_baid_data *data; rcu_read_lock(); data = rcu_dereference(mvm->baid_map[baid]); if (!data) { IWL_DEBUG_RX(mvm, "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n", baid, reorder_data); goto out; } if (!data->timeout) goto out; timeout = data->timeout; /* * Do not update last rx all the time to avoid cache bouncing * between the rx queues. * Update it every timeout. Worst case is the session will * expire after ~ 2 * timeout, which doesn't matter that much. */ if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now)) /* Update is atomic */ data->last_rx = now; out: rcu_read_unlock(); } static void iwl_mvm_flip_address(u8 *addr) { int i; u8 mac_addr[ETH_ALEN]; for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = addr[ETH_ALEN - i - 1]; ether_addr_copy(addr, mac_addr); } struct iwl_mvm_rx_phy_data { enum iwl_rx_phy_info_type info_type; __le32 d0, d1, d2, d3; __le16 d4; }; static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm, struct iwl_mvm_rx_phy_data *phy_data, u32 rate_n_flags, struct ieee80211_radiotap_he_mu *he_mu) { u32 phy_data2 = le32_to_cpu(phy_data->d2); u32 phy_data3 = le32_to_cpu(phy_data->d3); u16 phy_data4 = le16_to_cpu(phy_data->d4); if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK, phy_data4)) { he_mu->flags1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN); he_mu->flags1 |= le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU, phy_data4), IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU); he_mu->ru_ch1[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0, phy_data2); he_mu->ru_ch1[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1, phy_data3); he_mu->ru_ch1[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2, phy_data2); he_mu->ru_ch1[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3, phy_data3); } if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK, phy_data4) && (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) != RATE_MCS_CHAN_WIDTH_20) { he_mu->flags1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN); he_mu->flags2 |= le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU, phy_data4), IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU); he_mu->ru_ch2[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0, phy_data2); he_mu->ru_ch2[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1, phy_data3); he_mu->ru_ch2[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2, phy_data2); he_mu->ru_ch2[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3, phy_data3); } } static void iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, u32 rate_n_flags, struct ieee80211_radiotap_he *he, struct ieee80211_radiotap_he_mu *he_mu, struct ieee80211_rx_status *rx_status) { /* * Unfortunately, we have to leave the mac80211 data * incorrect for the case that we receive an HE-MU * transmission and *don't* have the HE phy data (due * to the bits being used for TSF). This shouldn't * happen though as management frames where we need * the TSF/timers are not be transmitted in HE-MU. */ u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK); u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1; u8 offs = 0; rx_status->bw = RATE_INFO_BW_HE_RU; he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); switch (ru) { case 0 ... 36: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; offs = ru; break; case 37 ... 52: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; offs = ru - 37; break; case 53 ... 60: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; offs = ru - 53; break; case 61 ... 64: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; offs = ru - 61; break; case 65 ... 66: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; offs = ru - 65; break; case 67: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; break; case 68: rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; break; } he->data2 |= le16_encode_bits(offs, IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN | IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN); if (phy_data->d1 & cpu_to_le32(IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80)) he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC); #define CHECK_BW(bw) \ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS); \ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_ ## bw ## MHZ != \ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS) CHECK_BW(20); CHECK_BW(40); CHECK_BW(80); CHECK_BW(160); if (he_mu) he_mu->flags2 |= le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, rate_n_flags), IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); else if (he_type == RATE_MCS_HE_TYPE_TRIG_V1) he->data6 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) | le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, rate_n_flags), IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW); } static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, struct iwl_mvm_rx_phy_data *phy_data, struct ieee80211_radiotap_he *he, struct ieee80211_radiotap_he_mu *he_mu, struct ieee80211_rx_status *rx_status, u32 rate_n_flags, int queue) { switch (phy_data->info_type) { case IWL_RX_PHY_INFO_TYPE_NONE: case IWL_RX_PHY_INFO_TYPE_CCK: case IWL_RX_PHY_INFO_TYPE_OFDM_LGCY: case IWL_RX_PHY_INFO_TYPE_HT: case IWL_RX_PHY_INFO_TYPE_VHT_SU: case IWL_RX_PHY_INFO_TYPE_VHT_MU: return; case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN); he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1); he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2); he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3); he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4); fallthrough; case IWL_RX_PHY_INFO_TYPE_HE_SU: case IWL_RX_PHY_INFO_TYPE_HE_MU: case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: case IWL_RX_PHY_INFO_TYPE_HE_TB: /* HE common */ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN); he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN | IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN | IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN | IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK), IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR); if (phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB && phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB_EXT) { he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN); he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_UPLINK), IEEE80211_RADIOTAP_HE_DATA3_UL_DL); } he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM), IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG); he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK), IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD); he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_PE_DISAMBIG), IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG); he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK), IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK), IEEE80211_RADIOTAP_HE_DATA6_TXOP); he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_DOPPLER), IEEE80211_RADIOTAP_HE_DATA6_DOPPLER); break; } switch (phy_data->info_type) { case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: case IWL_RX_PHY_INFO_TYPE_HE_MU: case IWL_RX_PHY_INFO_TYPE_HE_SU: he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN); he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK), IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE); break; default: /* nothing here */ break; } switch (phy_data->info_type) { case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: he_mu->flags1 |= le16_encode_bits(le16_get_bits(phy_data->d4, IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM), IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM); he_mu->flags1 |= le16_encode_bits(le16_get_bits(phy_data->d4, IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK), IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS); he_mu->flags2 |= le16_encode_bits(le16_get_bits(phy_data->d4, IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK), IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); iwl_mvm_decode_he_mu_ext(mvm, phy_data, rate_n_flags, he_mu); fallthrough; case IWL_RX_PHY_INFO_TYPE_HE_MU: he_mu->flags2 |= le16_encode_bits(le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK), IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS); he_mu->flags2 |= le16_encode_bits(le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION), IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); fallthrough; case IWL_RX_PHY_INFO_TYPE_HE_TB: case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: iwl_mvm_decode_he_phy_ru_alloc(phy_data, rate_n_flags, he, he_mu, rx_status); break; case IWL_RX_PHY_INFO_TYPE_HE_SU: he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN); he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_BEAM_CHNG), IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE); break; default: /* nothing */ break; } } static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_mvm_rx_phy_data *phy_data, u32 rate_n_flags, u16 phy_info, int queue) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_radiotap_he *he = NULL; struct ieee80211_radiotap_he_mu *he_mu = NULL; u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; u8 stbc, ltf; static const struct ieee80211_radiotap_he known = { .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN), .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN | IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN), }; static const struct ieee80211_radiotap_he_mu mu_known = { .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN), .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN), }; he = skb_put_data(skb, &known, sizeof(known)); rx_status->flag |= RX_FLAG_RADIOTAP_HE; if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU || phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) { he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known)); rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU; } /* report the AMPDU-EOF bit on single frames */ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { rx_status->flag |= RX_FLAG_AMPDU_DETAILS; rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) iwl_mvm_decode_he_phy_data(mvm, phy_data, he, he_mu, rx_status, rate_n_flags, queue); /* update aggregation data for monitor sake on default queue */ if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; /* toggle is switched whenever new aggregation starts */ if (toggle_bit != mvm->ampdu_toggle) { rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } } if (he_type == RATE_MCS_HE_TYPE_EXT_SU && rate_n_flags & RATE_MCS_HE_106T_MSK) { rx_status->bw = RATE_INFO_BW_HE_RU; rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; } /* actually data is filled in mac80211 */ if (he_type == RATE_MCS_HE_TYPE_SU || he_type == RATE_MCS_HE_TYPE_EXT_SU) he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->nss = ((rate_n_flags & RATE_MCS_NSS_MSK) >> RATE_MCS_NSS_POS) + 1; rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; rx_status->encoding = RX_ENC_HE; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_BF; rx_status->he_dcm = !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK); #define CHECK_TYPE(F) \ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS)) CHECK_TYPE(SU); CHECK_TYPE(EXT_SU); CHECK_TYPE(MU); CHECK_TYPE(TRIG); he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS); if (rate_n_flags & RATE_MCS_BF_MSK) he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF); switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >> RATE_MCS_HE_GI_LTF_POS) { case 0: if (he_type == RATE_MCS_HE_TYPE_TRIG) rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; if (he_type == RATE_MCS_HE_TYPE_MU) ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; else ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X; break; case 1: if (he_type == RATE_MCS_HE_TYPE_TRIG) rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X; break; case 2: if (he_type == RATE_MCS_HE_TYPE_TRIG) { rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2; ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; } else { rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X; } break; case 3: rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2; ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; break; case 4: rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; break; default: ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN; } he->data5 |= le16_encode_bits(ltf, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); } static void iwl_mvm_decode_lsig(struct sk_buff *skb, struct iwl_mvm_rx_phy_data *phy_data) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_radiotap_lsig *lsig; switch (phy_data->info_type) { case IWL_RX_PHY_INFO_TYPE_HT: case IWL_RX_PHY_INFO_TYPE_VHT_SU: case IWL_RX_PHY_INFO_TYPE_VHT_MU: case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: case IWL_RX_PHY_INFO_TYPE_HE_SU: case IWL_RX_PHY_INFO_TYPE_HE_MU: case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: case IWL_RX_PHY_INFO_TYPE_HE_TB: lsig = skb_put(skb, sizeof(*lsig)); lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN); lsig->data2 = le16_encode_bits(le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_LSIG_LEN_MASK), IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH); rx_status->flag |= RX_FLAG_RADIOTAP_LSIG; break; default: break; } } static inline u8 iwl_mvm_nl80211_band_from_rx_msdu(u8 phy_band) { switch (phy_band) { case PHY_BAND_24: return NL80211_BAND_2GHZ; case PHY_BAND_5: return NL80211_BAND_5GHZ; case PHY_BAND_6: return NL80211_BAND_6GHZ; default: WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band); return NL80211_BAND_5GHZ; } } struct iwl_rx_sta_csa { bool all_sta_unblocked; struct ieee80211_vif *vif; }; static void iwl_mvm_rx_get_sta_block_tx(void *data, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_rx_sta_csa *rx_sta_csa = data; if (mvmsta->vif != rx_sta_csa->vif) return; if (mvmsta->disable_tx) rx_sta_csa->all_sta_unblocked = false; } void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; struct ieee80211_hdr *hdr; u32 len; u32 pkt_len = iwl_rx_packet_payload_len(pkt); u32 rate_n_flags, gp2_on_air_rise; u16 phy_info; struct ieee80211_sta *sta = NULL; struct sk_buff *skb; u8 crypt_len = 0, channel, energy_a, energy_b; size_t desc_size; struct iwl_mvm_rx_phy_data phy_data = { .info_type = IWL_RX_PHY_INFO_TYPE_NONE, }; u32 format; bool is_sgi; if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) desc_size = sizeof(*desc); else desc_size = IWL_RX_DESC_SIZE_V1; if (unlikely(pkt_len < desc_size)) { IWL_DEBUG_DROP(mvm, "Bad REPLY_RX_MPDU_CMD size\n"); return; } if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags); channel = desc->v3.channel; gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise); energy_a = desc->v3.energy_a; energy_b = desc->v3.energy_b; phy_data.d0 = desc->v3.phy_data0; phy_data.d1 = desc->v3.phy_data1; phy_data.d2 = desc->v3.phy_data2; phy_data.d3 = desc->v3.phy_data3; } else { rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags); channel = desc->v1.channel; gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise); energy_a = desc->v1.energy_a; energy_b = desc->v1.energy_b; phy_data.d0 = desc->v1.phy_data0; phy_data.d1 = desc->v1.phy_data1; phy_data.d2 = desc->v1.phy_data2; phy_data.d3 = desc->v1.phy_data3; } if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, REPLY_RX_MPDU_CMD, 0) < 4) { rate_n_flags = iwl_new_rate_from_v1(rate_n_flags); IWL_DEBUG_DROP(mvm, "Got old format rate, converting. New rate: 0x%x\n", rate_n_flags); } format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; len = le16_to_cpu(desc->mpdu_len); if (unlikely(len + desc_size > pkt_len)) { IWL_DEBUG_DROP(mvm, "FW lied about packet len\n"); return; } phy_info = le16_to_cpu(desc->phy_info); phy_data.d4 = desc->phy_data4; if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) phy_data.info_type = le32_get_bits(phy_data.d1, IWL_RX_PHY_DATA1_INFO_TYPE_MASK); hdr = (void *)(pkt->data + desc_size); /* Dont use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. */ skb = alloc_skb(128, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); return; } if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { /* * If the device inserted padding it means that (it thought) * the 802.11 header wasn't a multiple of 4 bytes long. In * this case, reserve two bytes at the start of the SKB to * align the payload properly in case we end up copying it. */ skb_reserve(skb, 2); } rx_status = IEEE80211_SKB_RXCB(skb); /* This may be overridden by iwl_mvm_rx_he() to HE_RU */ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: break; case RATE_MCS_CHAN_WIDTH_40: rx_status->bw = RATE_INFO_BW_40; break; case RATE_MCS_CHAN_WIDTH_80: rx_status->bw = RATE_INFO_BW_80; break; case RATE_MCS_CHAN_WIDTH_160: rx_status->bw = RATE_INFO_BW_160; break; } if (format == RATE_MCS_HE_MSK) iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags, phy_info, queue); iwl_mvm_decode_lsig(skb, &phy_data); /* * Keep packets with CRC errors (and with overrun) for monitor mode * (otherwise the firmware discards them) but mark them as bad. */ if (!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) || !(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK))) { IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(desc->status)); rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; } /* set the preamble flag if appropriate */ if (format == RATE_MCS_CCK_MSK && phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { u64 tsf_on_air_rise; if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise); else tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise); rx_status->mactime = tsf_on_air_rise; /* TSF as indicated by the firmware is at INA time */ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; } rx_status->device_timestamp = gp2_on_air_rise; if (iwl_mvm_is_band_in_rx_supported(mvm)) { u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx); rx_status->band = iwl_mvm_nl80211_band_from_rx_msdu(band); } else { rx_status->band = channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; } rx_status->freq = ieee80211_channel_to_frequency(channel, rx_status->band); iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a, energy_b); /* update aggregation data for monitor sake on default queue */ if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; rx_status->flag |= RX_FLAG_AMPDU_DETAILS; /* * Toggle is switched whenever new aggregation starts. Make * sure ampdu_reference is never 0 so we can later use it to * see if the frame was really part of an A-MPDU or not. */ if (toggle_bit != mvm->ampdu_toggle) { mvm->ampdu_ref++; if (mvm->ampdu_ref == 0) mvm->ampdu_ref++; mvm->ampdu_toggle = toggle_bit; } rx_status->ampdu_reference = mvm->ampdu_ref; } if (unlikely(mvm->monitor_on)) iwl_mvm_add_rtap_sniffer_config(mvm, skb); rcu_read_lock(); if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) { u8 id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID); if (!WARN_ON_ONCE(id >= mvm->fw->ucode_capa.num_stations)) { sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); if (IS_ERR(sta)) sta = NULL; } } else if (!is_multicast_ether_addr(hdr->addr2)) { /* * This is fine since we prevent two stations with the same * address from being added. */ sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL); } if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_info, desc, le32_to_cpu(pkt->len_n_flags), queue, &crypt_len)) { kfree_skb(skb); goto out; } if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_vif *tx_blocked_vif = rcu_dereference(mvm->csa_tx_blocked_vif); u8 baid = (u8)((le32_to_cpu(desc->reorder_data) & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT); struct iwl_fw_dbg_trigger_tlv *trig; struct ieee80211_vif *vif = mvmsta->vif; if (!mvm->tcm.paused && len >= sizeof(*hdr) && !is_multicast_ether_addr(hdr->addr1) && ieee80211_is_data(hdr->frame_control) && time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) schedule_delayed_work(&mvm->tcm.work, 0); /* * We have tx blocked stations (with CS bit). If we heard * frames from a blocked station on a new channel we can * TX to it again. */ if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); struct iwl_rx_sta_csa rx_sta_csa = { .all_sta_unblocked = true, .vif = tx_blocked_vif, }; if (mvmvif->csa_target_freq == rx_status->freq) iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false); ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_rx_get_sta_block_tx, &rx_sta_csa); if (rx_sta_csa.all_sta_unblocked) { RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); /* Unblock BCAST / MCAST station */ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); } } rs_update_last_rssi(mvm, mvmsta, rx_status); trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_RSSI); if (trig && ieee80211_is_beacon(hdr->frame_control)) { struct iwl_fw_dbg_trigger_low_rssi *rssi_trig; s32 rssi; rssi_trig = (void *)trig->data; rssi = le32_to_cpu(rssi_trig->rssi); if (rx_status->signal < rssi) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, +#if defined(__linux__) NULL); +#elif defined(__FreeBSD__) + ""); +#endif } if (ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_csum(mvm, sta, skb, pkt); if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) { kfree_skb(skb); goto out; } /* * Our hardware de-aggregates AMSDUs but copies the mac header * as it to the de-aggregated MPDUs. We need to turn off the * AMSDU bit in the QoS control ourselves. * In addition, HW reverses addr3 and addr4 - reverse it back. */ if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) && !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) { u8 *qc = ieee80211_get_qos_ctl(hdr); *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000) { iwl_mvm_flip_address(hdr->addr3); if (ieee80211_has_a4(hdr->frame_control)) iwl_mvm_flip_address(hdr->addr4); } } if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) { u32 reorder_data = le32_to_cpu(desc->reorder_data); iwl_mvm_agg_rx_received(mvm, reorder_data, baid); } } is_sgi = format == RATE_MCS_HE_MSK ? iwl_he_is_sgi(rate_n_flags) : rate_n_flags & RATE_MCS_SGI_MSK; if (!(format == RATE_MCS_CCK_MSK) && is_sgi) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_MCS_LDPC_MSK) rx_status->enc_flags |= RX_ENC_FLAG_LDPC; if (format == RATE_MCS_HT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags); rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; } else if (format == RATE_MCS_VHT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->nss = ((rate_n_flags & RATE_MCS_NSS_MSK) >> RATE_MCS_NSS_POS) + 1; rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; rx_status->encoding = RX_ENC_VHT; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_BF; } else if (!(format == RATE_MCS_HE_MSK)) { int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, rx_status->band); if (WARN(rate < 0 || rate > 0xFF, "Invalid rate flags 0x%x, band %d,\n", rate_n_flags, rx_status->band)) { kfree_skb(skb); goto out; } rx_status->rate_idx = rate; } /* management stuff on default queue */ if (!queue) { if (unlikely((ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control)) && mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)) mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND; if (unlikely(ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control))) rx_status->boottime_ns = ktime_get_boottime_ns(); } if (iwl_mvm_create_skb(mvm, skb, hdr, len, crypt_len, rxb)) { kfree_skb(skb); goto out; } if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); out: rcu_read_unlock(); } void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_no_data *desc = (void *)pkt->data; u32 rate_n_flags = le32_to_cpu(desc->rate); u32 gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time); u32 rssi = le32_to_cpu(desc->rssi); u32 info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK; u16 phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD; struct ieee80211_sta *sta = NULL; struct sk_buff *skb; u8 channel, energy_a, energy_b; u32 format; struct iwl_mvm_rx_phy_data phy_data = { .info_type = le32_get_bits(desc->phy_info[1], IWL_RX_PHY_DATA1_INFO_TYPE_MASK), .d0 = desc->phy_info[0], .d1 = desc->phy_info[1], }; bool is_sgi; if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, RX_NO_DATA_NOTIF, 0) < 2) { IWL_DEBUG_DROP(mvm, "Got an old rate format. Old rate: 0x%x\n", rate_n_flags); rate_n_flags = iwl_new_rate_from_v1(rate_n_flags); IWL_DEBUG_DROP(mvm, " Rate after conversion to the new format: 0x%x\n", rate_n_flags); } format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*desc))) return; if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; energy_a = (rssi & RX_NO_DATA_CHAIN_A_MSK) >> RX_NO_DATA_CHAIN_A_POS; energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS; channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS; /* Dont use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. */ skb = alloc_skb(128, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); return; } rx_status = IEEE80211_SKB_RXCB(skb); /* 0-length PSDU */ rx_status->flag |= RX_FLAG_NO_PSDU; switch (info_type) { case RX_NO_DATA_INFO_TYPE_NDP: rx_status->zero_length_psdu_type = IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING; break; case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED: case RX_NO_DATA_INFO_TYPE_HE_TB_UNMATCHED: rx_status->zero_length_psdu_type = IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED; break; default: rx_status->zero_length_psdu_type = IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR; break; } /* This may be overridden by iwl_mvm_rx_he() to HE_RU */ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: break; case RATE_MCS_CHAN_WIDTH_40: rx_status->bw = RATE_INFO_BW_40; break; case RATE_MCS_CHAN_WIDTH_80: rx_status->bw = RATE_INFO_BW_80; break; case RATE_MCS_CHAN_WIDTH_160: rx_status->bw = RATE_INFO_BW_160; break; } if (format == RATE_MCS_HE_MSK) iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags, phy_info, queue); iwl_mvm_decode_lsig(skb, &phy_data); rx_status->device_timestamp = gp2_on_air_rise; rx_status->band = channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; rx_status->freq = ieee80211_channel_to_frequency(channel, rx_status->band); iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a, energy_b); rcu_read_lock(); is_sgi = format == RATE_MCS_HE_MSK ? iwl_he_is_sgi(rate_n_flags) : rate_n_flags & RATE_MCS_SGI_MSK; if (!(format == RATE_MCS_CCK_MSK) && is_sgi) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate_n_flags & RATE_MCS_LDPC_MSK) rx_status->enc_flags |= RX_ENC_FLAG_LDPC; if (format == RATE_MCS_HT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->encoding = RX_ENC_HT; rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags); rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; } else if (format == RATE_MCS_VHT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; rx_status->encoding = RX_ENC_VHT; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_BF; /* * take the nss from the rx_vec since the rate_n_flags has * only 2 bits for the nss which gives a max of 4 ss but * there may be up to 8 spatial streams */ rx_status->nss = le32_get_bits(desc->rx_vec[0], RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1; } else if (format == RATE_MCS_HE_MSK) { rx_status->nss = le32_get_bits(desc->rx_vec[0], RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1; } else { int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, rx_status->band); if (WARN(rate < 0 || rate > 0xFF, "Invalid rate flags 0x%x, band %d,\n", rate_n_flags, rx_status->band)) { kfree_skb(skb); goto out; } rx_status->rate_idx = rate; } ieee80211_rx_napi(mvm->hw, sta, skb, napi); out: rcu_read_unlock(); } void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_frame_release *release = (void *)pkt->data; if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release))) return; iwl_mvm_release_frames_from_notif(mvm, napi, release->baid, le16_to_cpu(release->nssn), queue, 0); } void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bar_frame_release *release = (void *)pkt->data; unsigned int baid = le32_get_bits(release->ba_info, IWL_BAR_FRAME_RELEASE_BAID_MASK); unsigned int nssn = le32_get_bits(release->ba_info, IWL_BAR_FRAME_RELEASE_NSSN_MASK); unsigned int sta_id = le32_get_bits(release->sta_tid, IWL_BAR_FRAME_RELEASE_STA_MASK); unsigned int tid = le32_get_bits(release->sta_tid, IWL_BAR_FRAME_RELEASE_TID_MASK); struct iwl_mvm_baid_data *baid_data; if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release))) return; if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID || baid >= ARRAY_SIZE(mvm->baid_map))) return; rcu_read_lock(); baid_data = rcu_dereference(mvm->baid_map[baid]); if (!baid_data) { IWL_DEBUG_RX(mvm, "Got valid BAID %d but not allocated, invalid BAR release!\n", baid); goto out; } if (WARN(tid != baid_data->tid || sta_id != baid_data->sta_id, "baid 0x%x is mapped to sta:%d tid:%d, but BAR release received for sta:%d tid:%d\n", baid, baid_data->sta_id, baid_data->tid, sta_id, tid)) goto out; iwl_mvm_release_frames_from_notif(mvm, napi, baid, nssn, queue, 0); out: rcu_read_unlock(); } diff --git a/sys/contrib/dev/rtw88/debug.c b/sys/contrib/dev/rtw88/debug.c index 7b6319c07b65..fbbd11be5eef 100644 --- a/sys/contrib/dev/rtw88/debug.c +++ b/sys/contrib/dev/rtw88/debug.c @@ -1,1294 +1,1294 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2018-2019 Realtek Corporation */ #include #include #include "main.h" #include "coex.h" #include "sec.h" #include "fw.h" #include "debug.h" #include "phy.h" #include "reg.h" #include "ps.h" #include "regd.h" #ifdef CONFIG_RTW88_DEBUGFS struct rtw_debugfs_priv { struct rtw_dev *rtwdev; int (*cb_read)(struct seq_file *m, void *v); ssize_t (*cb_write)(struct file *filp, const char __user *buffer, size_t count, loff_t *loff); union { u32 cb_data; u8 *buf; struct { u32 page_offset; u32 page_num; } rsvd_page; struct { u8 rf_path; u32 rf_addr; u32 rf_mask; }; struct { u32 addr; u32 len; } read_reg; struct { u8 bit; } dm_cap; }; }; static const char * const rtw_dm_cap_strs[] = { [RTW_DM_CAP_NA] = "NA", [RTW_DM_CAP_TXGAPK] = "TXGAPK", }; static int rtw_debugfs_single_show(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; return debugfs_priv->cb_read(m, v); } static ssize_t rtw_debugfs_common_write(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct rtw_debugfs_priv *debugfs_priv = filp->private_data; return debugfs_priv->cb_write(filp, buffer, count, loff); } static ssize_t rtw_debugfs_single_write(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct seq_file *seqpriv = (struct seq_file *)filp->private_data; struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; return debugfs_priv->cb_write(filp, buffer, count, loff); } static int rtw_debugfs_single_open_rw(struct inode *inode, struct file *filp) { return single_open(filp, rtw_debugfs_single_show, inode->i_private); } static int rtw_debugfs_close(struct inode *inode, struct file *filp) { return 0; } static const struct file_operations file_ops_single_r = { .owner = THIS_MODULE, .open = rtw_debugfs_single_open_rw, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations file_ops_single_rw = { .owner = THIS_MODULE, .open = rtw_debugfs_single_open_rw, .release = single_release, .read = seq_read, .llseek = seq_lseek, .write = rtw_debugfs_single_write, }; static const struct file_operations file_ops_common_write = { .owner = THIS_MODULE, .write = rtw_debugfs_common_write, .open = simple_open, .release = rtw_debugfs_close, }; static int rtw_debugfs_get_read_reg(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; u32 val, len, addr; len = debugfs_priv->read_reg.len; addr = debugfs_priv->read_reg.addr; switch (len) { case 1: val = rtw_read8(rtwdev, addr); seq_printf(m, "reg 0x%03x: 0x%02x\n", addr, val); break; case 2: val = rtw_read16(rtwdev, addr); seq_printf(m, "reg 0x%03x: 0x%04x\n", addr, val); break; case 4: val = rtw_read32(rtwdev, addr); seq_printf(m, "reg 0x%03x: 0x%08x\n", addr, val); break; } return 0; } static int rtw_debugfs_get_rf_read(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; u32 val, addr, mask; u8 path; path = debugfs_priv->rf_path; addr = debugfs_priv->rf_addr; mask = debugfs_priv->rf_mask; val = rtw_read_rf(rtwdev, path, addr, mask); seq_printf(m, "rf_read path:%d addr:0x%08x mask:0x%08x val=0x%08x\n", path, addr, mask, val); return 0; } static int rtw_debugfs_get_fix_rate(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; struct rtw_dm_info *dm_info = &rtwdev->dm_info; u8 fix_rate = dm_info->fix_rate; if (fix_rate >= DESC_RATE_MAX) { seq_printf(m, "Fix rate disabled, fix_rate = %u\n", fix_rate); return 0; } seq_printf(m, "Data frames fixed at desc rate %u\n", fix_rate); return 0; } static int rtw_debugfs_copy_from_user(char tmp[], int size, const char __user *buffer, size_t count, int num) { int tmp_len; memset(tmp, 0, size); if (count < num) return -EFAULT; tmp_len = (count > size - 1 ? size - 1 : count); if (!buffer || copy_from_user(tmp, buffer, tmp_len)) return count; tmp[tmp_len] = '\0'; return 0; } static ssize_t rtw_debugfs_set_read_reg(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct seq_file *seqpriv = (struct seq_file *)filp->private_data; struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; char tmp[32 + 1]; u32 addr, len; int num; rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 2); num = sscanf(tmp, "%x %x", &addr, &len); if (num != 2) return count; if (len != 1 && len != 2 && len != 4) { rtw_warn(rtwdev, "read reg setting wrong len\n"); return -EINVAL; } debugfs_priv->read_reg.addr = addr; debugfs_priv->read_reg.len = len; return count; } static int rtw_debugfs_get_dump_cam(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; u32 val, command; u32 hw_key_idx = debugfs_priv->cb_data << RTW_SEC_CAM_ENTRY_SHIFT; u32 read_cmd = RTW_SEC_CMD_POLLING; int i; seq_printf(m, "cam entry%d\n", debugfs_priv->cb_data); seq_puts(m, "0x0 0x1 0x2 0x3 "); seq_puts(m, "0x4 0x5\n"); mutex_lock(&rtwdev->mutex); for (i = 0; i <= 5; i++) { command = read_cmd | (hw_key_idx + i); rtw_write32(rtwdev, RTW_SEC_CMD_REG, command); val = rtw_read32(rtwdev, RTW_SEC_READ_REG); seq_printf(m, "%8.8x", val); if (i < 2) seq_puts(m, " "); } seq_puts(m, "\n"); mutex_unlock(&rtwdev->mutex); return 0; } static int rtw_debugfs_get_rsvd_page(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; u8 page_size = rtwdev->chip->page_size; u32 buf_size = debugfs_priv->rsvd_page.page_num * page_size; u32 offset = debugfs_priv->rsvd_page.page_offset * page_size; u8 *buf; int i; int ret; buf = vzalloc(buf_size); if (!buf) return -ENOMEM; ret = rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RSVD_PAGE, offset, buf_size, (u32 *)buf); if (ret) { rtw_err(rtwdev, "failed to dump rsvd page\n"); vfree(buf); return ret; } for (i = 0 ; i < buf_size ; i += 8) { if (i % page_size == 0) seq_printf(m, "PAGE %d\n", (i + offset) / page_size); seq_printf(m, "%8ph\n", buf + i); } vfree(buf); return 0; } static ssize_t rtw_debugfs_set_rsvd_page(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct seq_file *seqpriv = (struct seq_file *)filp->private_data; struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; char tmp[32 + 1]; u32 offset, page_num; int num; rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 2); num = sscanf(tmp, "%d %d", &offset, &page_num); if (num != 2) { rtw_warn(rtwdev, "invalid arguments\n"); return -EINVAL; } debugfs_priv->rsvd_page.page_offset = offset; debugfs_priv->rsvd_page.page_num = page_num; return count; } static ssize_t rtw_debugfs_set_single_input(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct seq_file *seqpriv = (struct seq_file *)filp->private_data; struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; char tmp[32 + 1]; u32 input; int num; rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1); num = kstrtoint(tmp, 0, &input); if (num) { rtw_warn(rtwdev, "kstrtoint failed\n"); return num; } debugfs_priv->cb_data = input; return count; } static ssize_t rtw_debugfs_set_write_reg(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct rtw_debugfs_priv *debugfs_priv = filp->private_data; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; char tmp[32 + 1]; u32 addr, val, len; int num; rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3); /* write BB/MAC register */ num = sscanf(tmp, "%x %x %x", &addr, &val, &len); if (num != 3) return count; switch (len) { case 1: rtw_dbg(rtwdev, RTW_DBG_DEBUGFS, "reg write8 0x%03x: 0x%08x\n", addr, val); rtw_write8(rtwdev, addr, (u8)val); break; case 2: rtw_dbg(rtwdev, RTW_DBG_DEBUGFS, "reg write16 0x%03x: 0x%08x\n", addr, val); rtw_write16(rtwdev, addr, (u16)val); break; case 4: rtw_dbg(rtwdev, RTW_DBG_DEBUGFS, "reg write32 0x%03x: 0x%08x\n", addr, val); rtw_write32(rtwdev, addr, (u32)val); break; default: rtw_dbg(rtwdev, RTW_DBG_DEBUGFS, "error write length = %d\n", len); break; } return count; } static ssize_t rtw_debugfs_set_h2c(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct rtw_debugfs_priv *debugfs_priv = filp->private_data; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; char tmp[32 + 1]; u8 param[8]; int num; rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3); num = sscanf(tmp, "%hhx,%hhx,%hhx,%hhx,%hhx,%hhx,%hhx,%hhx", ¶m[0], ¶m[1], ¶m[2], ¶m[3], ¶m[4], ¶m[5], ¶m[6], ¶m[7]); if (num != 8) { rtw_warn(rtwdev, "invalid H2C command format for debug\n"); return -EINVAL; } rtw_fw_h2c_cmd_dbg(rtwdev, param); return count; } static ssize_t rtw_debugfs_set_rf_write(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct rtw_debugfs_priv *debugfs_priv = filp->private_data; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; char tmp[32 + 1]; u32 path, addr, mask, val; int num; rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 4); num = sscanf(tmp, "%x %x %x %x", &path, &addr, &mask, &val); if (num != 4) { rtw_warn(rtwdev, "invalid args, [path] [addr] [mask] [val]\n"); return count; } rtw_write_rf(rtwdev, path, addr, mask, val); rtw_dbg(rtwdev, RTW_DBG_DEBUGFS, "write_rf path:%d addr:0x%08x mask:0x%08x, val:0x%08x\n", path, addr, mask, val); return count; } static ssize_t rtw_debugfs_set_rf_read(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct seq_file *seqpriv = (struct seq_file *)filp->private_data; struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; char tmp[32 + 1]; u32 path, addr, mask; int num; rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3); num = sscanf(tmp, "%x %x %x", &path, &addr, &mask); if (num != 3) { rtw_warn(rtwdev, "invalid args, [path] [addr] [mask] [val]\n"); return count; } debugfs_priv->rf_path = path; debugfs_priv->rf_addr = addr; debugfs_priv->rf_mask = mask; return count; } static ssize_t rtw_debugfs_set_fix_rate(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct seq_file *seqpriv = (struct seq_file *)filp->private_data; struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; struct rtw_dm_info *dm_info = &rtwdev->dm_info; u8 fix_rate; char tmp[32 + 1]; int ret; rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1); ret = kstrtou8(tmp, 0, &fix_rate); if (ret) { rtw_warn(rtwdev, "invalid args, [rate]\n"); return ret; } dm_info->fix_rate = fix_rate; return count; } static int rtw_debug_get_mac_page(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; u32 page = debugfs_priv->cb_data; int i, n; int max = 0xff; rtw_read32(rtwdev, debugfs_priv->cb_data); for (n = 0; n <= max; ) { seq_printf(m, "\n%8.8x ", n + page); for (i = 0; i < 4 && n <= max; i++, n += 4) seq_printf(m, "%8.8x ", rtw_read32(rtwdev, (page | n))); } seq_puts(m, "\n"); return 0; } static int rtw_debug_get_bb_page(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; u32 page = debugfs_priv->cb_data; int i, n; int max = 0xff; rtw_read32(rtwdev, debugfs_priv->cb_data); for (n = 0; n <= max; ) { seq_printf(m, "\n%8.8x ", n + page); for (i = 0; i < 4 && n <= max; i++, n += 4) seq_printf(m, "%8.8x ", rtw_read32(rtwdev, (page | n))); } seq_puts(m, "\n"); return 0; } static int rtw_debug_get_rf_dump(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; u32 addr, offset, data; u8 path; for (path = 0; path < rtwdev->hal.rf_path_num; path++) { seq_printf(m, "RF path:%d\n", path); for (addr = 0; addr < 0x100; addr += 4) { seq_printf(m, "%8.8x ", addr); for (offset = 0; offset < 4; offset++) { data = rtw_read_rf(rtwdev, path, addr + offset, 0xffffffff); seq_printf(m, "%8.8x ", data); } seq_puts(m, "\n"); } seq_puts(m, "\n"); } return 0; } static void rtw_print_cck_rate_txt(struct seq_file *m, u8 rate) { static const char * const cck_rate[] = {"1M", "2M", "5.5M", "11M"}; u8 idx = rate - DESC_RATE1M; seq_printf(m, " CCK_%-5s", cck_rate[idx]); } static void rtw_print_ofdm_rate_txt(struct seq_file *m, u8 rate) { static const char * const ofdm_rate[] = {"6M", "9M", "12M", "18M", "24M", "36M", "48M", "54M"}; u8 idx = rate - DESC_RATE6M; seq_printf(m, " OFDM_%-4s", ofdm_rate[idx]); } static void rtw_print_ht_rate_txt(struct seq_file *m, u8 rate) { u8 mcs_n = rate - DESC_RATEMCS0; seq_printf(m, " MCS%-6u", mcs_n); } static void rtw_print_vht_rate_txt(struct seq_file *m, u8 rate) { u8 idx = rate - DESC_RATEVHT1SS_MCS0; u8 n_ss, mcs_n; /* n spatial stream */ n_ss = 1 + idx / 10; /* MCS n */ mcs_n = idx % 10; seq_printf(m, " VHT%uSMCS%u", n_ss, mcs_n); } static void rtw_print_rate(struct seq_file *m, u8 rate) { switch (rate) { case DESC_RATE1M...DESC_RATE11M: rtw_print_cck_rate_txt(m, rate); break; case DESC_RATE6M...DESC_RATE54M: rtw_print_ofdm_rate_txt(m, rate); break; case DESC_RATEMCS0...DESC_RATEMCS15: rtw_print_ht_rate_txt(m, rate); break; case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT2SS_MCS9: rtw_print_vht_rate_txt(m, rate); break; default: seq_printf(m, " Unknown rate=0x%x\n", rate); break; } } #define case_REGD(src) \ case RTW_REGD_##src: return #src static const char *rtw_get_regd_string(u8 regd) { switch (regd) { case_REGD(FCC); case_REGD(MKK); case_REGD(ETSI); case_REGD(IC); case_REGD(KCC); case_REGD(ACMA); case_REGD(CHILE); case_REGD(UKRAINE); case_REGD(MEXICO); case_REGD(CN); case_REGD(WW); default: return "Unknown"; } } static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; struct rtw_hal *hal = &rtwdev->hal; u8 path, rate; struct rtw_power_params pwr_param = {0}; u8 bw = hal->current_band_width; u8 ch = hal->current_channel; u8 regd = rtw_regd_get(rtwdev); seq_printf(m, "channel: %u\n", ch); seq_printf(m, "bandwidth: %u\n", bw); seq_printf(m, "regulatory: %s\n", rtw_get_regd_string(regd)); seq_printf(m, "%-4s %-10s %-9s %-9s (%-4s %-4s %-4s) %-4s\n", "path", "rate", "pwr", "base", "byr", "lmt", "sar", "rem"); mutex_lock(&hal->tx_power_mutex); for (path = RF_PATH_A; path <= RF_PATH_B; path++) { /* there is no CCK rates used in 5G */ if (hal->current_band_type == RTW_BAND_5G) rate = DESC_RATE6M; else rate = DESC_RATE1M; /* now, not support vht 3ss and vht 4ss*/ for (; rate <= DESC_RATEVHT2SS_MCS9; rate++) { /* now, not support ht 3ss and ht 4ss*/ if (rate > DESC_RATEMCS15 && rate < DESC_RATEVHT1SS_MCS0) continue; rtw_get_tx_power_params(rtwdev, path, rate, bw, ch, regd, &pwr_param); seq_printf(m, "%4c ", path + 'A'); rtw_print_rate(m, rate); seq_printf(m, " %3u(0x%02x) %4u %4d (%4d %4d %4d) %4d\n", hal->tx_pwr_tbl[path][rate], hal->tx_pwr_tbl[path][rate], pwr_param.pwr_base, min3(pwr_param.pwr_offset, pwr_param.pwr_limit, pwr_param.pwr_sar), pwr_param.pwr_offset, pwr_param.pwr_limit, pwr_param.pwr_sar, pwr_param.pwr_remnant); } } mutex_unlock(&hal->tx_power_mutex); return 0; } void rtw_debugfs_get_simple_phy_info(struct seq_file *m) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; struct rtw_hal *hal = &rtwdev->hal; struct rtw_dm_info *dm_info = &rtwdev->dm_info; struct rtw_traffic_stats *stats = &rtwdev->stats; seq_printf(m, "%-40s = %ddBm/ %d\n", "RSSI/ STA Channel", dm_info->rssi[RF_PATH_A] - 100, hal->current_channel); seq_printf(m, "TP {Tx, Rx} = {%u, %u}Mbps\n", stats->tx_throughput, stats->rx_throughput); seq_puts(m, "[Tx Rate] = "); rtw_print_rate(m, dm_info->tx_rate); seq_printf(m, "(0x%x)\n", dm_info->tx_rate); seq_puts(m, "[Rx Rate] = "); rtw_print_rate(m, dm_info->curr_rx_rate); seq_printf(m, "(0x%x)\n", dm_info->curr_rx_rate); } static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; struct rtw_dm_info *dm_info = &rtwdev->dm_info; struct rtw_traffic_stats *stats = &rtwdev->stats; struct rtw_pkt_count *last_cnt = &dm_info->last_pkt_count; struct rtw_efuse *efuse = &rtwdev->efuse; struct ewma_evm *ewma_evm = dm_info->ewma_evm; struct ewma_snr *ewma_snr = dm_info->ewma_snr; u8 ss, rate_id; seq_puts(m, "==========[Common Info]========\n"); seq_printf(m, "Is link = %c\n", rtw_is_assoc(rtwdev) ? 'Y' : 'N'); seq_printf(m, "Current CH(fc) = %u\n", rtwdev->hal.current_channel); seq_printf(m, "Current BW = %u\n", rtwdev->hal.current_band_width); seq_printf(m, "Current IGI = 0x%x\n", dm_info->igi_history[0]); seq_printf(m, "TP {Tx, Rx} = {%u, %u}Mbps\n", stats->tx_throughput, stats->rx_throughput); seq_printf(m, "1SS for TX and RX = %c\n\n", rtwdev->hal.txrx_1ss ? 'Y' : 'N'); seq_puts(m, "==========[Tx Phy Info]========\n"); seq_puts(m, "[Tx Rate] = "); rtw_print_rate(m, dm_info->tx_rate); seq_printf(m, "(0x%x)\n\n", dm_info->tx_rate); seq_puts(m, "==========[Rx Phy Info]========\n"); seq_printf(m, "[Rx Beacon Count] = %u\n", last_cnt->num_bcn_pkt); seq_puts(m, "[Rx Rate] = "); rtw_print_rate(m, dm_info->curr_rx_rate); seq_printf(m, "(0x%x)\n", dm_info->curr_rx_rate); seq_puts(m, "[Rx Rate Count]:\n"); seq_printf(m, " * CCK = {%u, %u, %u, %u}\n", last_cnt->num_qry_pkt[DESC_RATE1M], last_cnt->num_qry_pkt[DESC_RATE2M], last_cnt->num_qry_pkt[DESC_RATE5_5M], last_cnt->num_qry_pkt[DESC_RATE11M]); seq_printf(m, " * OFDM = {%u, %u, %u, %u, %u, %u, %u, %u}\n", last_cnt->num_qry_pkt[DESC_RATE6M], last_cnt->num_qry_pkt[DESC_RATE9M], last_cnt->num_qry_pkt[DESC_RATE12M], last_cnt->num_qry_pkt[DESC_RATE18M], last_cnt->num_qry_pkt[DESC_RATE24M], last_cnt->num_qry_pkt[DESC_RATE36M], last_cnt->num_qry_pkt[DESC_RATE48M], last_cnt->num_qry_pkt[DESC_RATE54M]); for (ss = 0; ss < efuse->hw_cap.nss; ss++) { rate_id = DESC_RATEMCS0 + ss * 8; seq_printf(m, " * HT_MCS[%u:%u] = {%u, %u, %u, %u, %u, %u, %u, %u}\n", ss * 8, ss * 8 + 7, last_cnt->num_qry_pkt[rate_id], last_cnt->num_qry_pkt[rate_id + 1], last_cnt->num_qry_pkt[rate_id + 2], last_cnt->num_qry_pkt[rate_id + 3], last_cnt->num_qry_pkt[rate_id + 4], last_cnt->num_qry_pkt[rate_id + 5], last_cnt->num_qry_pkt[rate_id + 6], last_cnt->num_qry_pkt[rate_id + 7]); } for (ss = 0; ss < efuse->hw_cap.nss; ss++) { rate_id = DESC_RATEVHT1SS_MCS0 + ss * 10; seq_printf(m, " * VHT_MCS-%uss MCS[0:9] = {%u, %u, %u, %u, %u, %u, %u, %u, %u, %u}\n", ss + 1, last_cnt->num_qry_pkt[rate_id], last_cnt->num_qry_pkt[rate_id + 1], last_cnt->num_qry_pkt[rate_id + 2], last_cnt->num_qry_pkt[rate_id + 3], last_cnt->num_qry_pkt[rate_id + 4], last_cnt->num_qry_pkt[rate_id + 5], last_cnt->num_qry_pkt[rate_id + 6], last_cnt->num_qry_pkt[rate_id + 7], last_cnt->num_qry_pkt[rate_id + 8], last_cnt->num_qry_pkt[rate_id + 9]); } seq_printf(m, "[RSSI(dBm)] = {%d, %d}\n", dm_info->rssi[RF_PATH_A] - 100, dm_info->rssi[RF_PATH_B] - 100); seq_printf(m, "[Rx EVM(dB)] = {-%d, -%d}\n", dm_info->rx_evm_dbm[RF_PATH_A], dm_info->rx_evm_dbm[RF_PATH_B]); seq_printf(m, "[Rx SNR] = {%d, %d}\n", dm_info->rx_snr[RF_PATH_A], dm_info->rx_snr[RF_PATH_B]); seq_printf(m, "[CFO_tail(KHz)] = {%d, %d}\n", dm_info->cfo_tail[RF_PATH_A], dm_info->cfo_tail[RF_PATH_B]); if (dm_info->curr_rx_rate >= DESC_RATE11M) { seq_puts(m, "[Rx Average Status]:\n"); seq_printf(m, " * OFDM, EVM: {-%d}, SNR: {%d}\n", (u8)ewma_evm_read(&ewma_evm[RTW_EVM_OFDM]), (u8)ewma_snr_read(&ewma_snr[RTW_SNR_OFDM_A])); seq_printf(m, " * 1SS, EVM: {-%d}, SNR: {%d}\n", (u8)ewma_evm_read(&ewma_evm[RTW_EVM_1SS]), (u8)ewma_snr_read(&ewma_snr[RTW_SNR_1SS_A])); seq_printf(m, " * 2SS, EVM: {-%d, -%d}, SNR: {%d, %d}\n", (u8)ewma_evm_read(&ewma_evm[RTW_EVM_2SS_A]), (u8)ewma_evm_read(&ewma_evm[RTW_EVM_2SS_B]), (u8)ewma_snr_read(&ewma_snr[RTW_SNR_2SS_A]), (u8)ewma_snr_read(&ewma_snr[RTW_SNR_2SS_B])); } seq_puts(m, "[Rx Counter]:\n"); seq_printf(m, " * CCA (CCK, OFDM, Total) = (%u, %u, %u)\n", dm_info->cck_cca_cnt, dm_info->ofdm_cca_cnt, dm_info->total_cca_cnt); seq_printf(m, " * False Alarm (CCK, OFDM, Total) = (%u, %u, %u)\n", dm_info->cck_fa_cnt, dm_info->ofdm_fa_cnt, dm_info->total_fa_cnt); seq_printf(m, " * CCK cnt (ok, err) = (%u, %u)\n", dm_info->cck_ok_cnt, dm_info->cck_err_cnt); seq_printf(m, " * OFDM cnt (ok, err) = (%u, %u)\n", dm_info->ofdm_ok_cnt, dm_info->ofdm_err_cnt); seq_printf(m, " * HT cnt (ok, err) = (%u, %u)\n", dm_info->ht_ok_cnt, dm_info->ht_err_cnt); seq_printf(m, " * VHT cnt (ok, err) = (%u, %u)\n", dm_info->vht_ok_cnt, dm_info->vht_err_cnt); return 0; } static int rtw_debugfs_get_coex_info(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; rtw_coex_display_coex_info(rtwdev, m); return 0; } static ssize_t rtw_debugfs_set_coex_enable(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct seq_file *seqpriv = (struct seq_file *)filp->private_data; struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; struct rtw_coex *coex = &rtwdev->coex; char tmp[32 + 1]; bool enable; int ret; rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1); ret = kstrtobool(tmp, &enable); if (ret) { rtw_warn(rtwdev, "invalid arguments\n"); return ret; } mutex_lock(&rtwdev->mutex); coex->manual_control = !enable; mutex_unlock(&rtwdev->mutex); return count; } static int rtw_debugfs_get_coex_enable(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; struct rtw_coex *coex = &rtwdev->coex; seq_printf(m, "coex mechanism %s\n", coex->manual_control ? "disabled" : "enabled"); return 0; } static ssize_t rtw_debugfs_set_edcca_enable(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct seq_file *seqpriv = (struct seq_file *)filp->private_data; struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; bool input; int err; err = kstrtobool_from_user(buffer, count, &input); if (err) return err; rtw_edcca_enabled = input; rtw_phy_adaptivity_set_mode(rtwdev); return count; } static int rtw_debugfs_get_edcca_enable(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; struct rtw_dm_info *dm_info = &rtwdev->dm_info; seq_printf(m, "EDCCA %s: EDCCA mode %d\n", rtw_edcca_enabled ? "enabled" : "disabled", dm_info->edcca_mode); return 0; } static ssize_t rtw_debugfs_set_fw_crash(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct seq_file *seqpriv = (struct seq_file *)filp->private_data; struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; char tmp[32 + 1]; bool input; int ret; rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1); ret = kstrtobool(tmp, &input); if (ret) return -EINVAL; if (!input) return -EINVAL; if (test_bit(RTW_FLAG_RESTARTING, rtwdev->flags)) return -EINPROGRESS; mutex_lock(&rtwdev->mutex); rtw_leave_lps_deep(rtwdev); set_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags); rtw_write8(rtwdev, REG_HRCV_MSG, 1); mutex_unlock(&rtwdev->mutex); return count; } static int rtw_debugfs_get_fw_crash(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; seq_printf(m, "%d\n", test_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags) || test_bit(RTW_FLAG_RESTARTING, rtwdev->flags)); return 0; } static ssize_t rtw_debugfs_set_force_lowest_basic_rate(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct seq_file *seqpriv = (struct seq_file *)filp->private_data; struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; bool input; int err; err = kstrtobool_from_user(buffer, count, &input); if (err) return err; if (input) set_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags); else clear_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags); return count; } static int rtw_debugfs_get_force_lowest_basic_rate(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; seq_printf(m, "force lowest basic rate: %d\n", test_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags)); return 0; } static ssize_t rtw_debugfs_set_dm_cap(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct seq_file *seqpriv = (struct seq_file *)filp->private_data; struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; struct rtw_dm_info *dm_info = &rtwdev->dm_info; int bit; bool en; if (kstrtoint_from_user(buffer, count, 10, &bit)) return -EINVAL; en = bit > 0; bit = abs(bit); if (bit >= RTW_DM_CAP_NUM) { rtw_warn(rtwdev, "unknown DM CAP %d\n", bit); return -EINVAL; } if (en) dm_info->dm_flags &= ~BIT(bit); else dm_info->dm_flags |= BIT(bit); debugfs_priv->dm_cap.bit = bit; return count; } static void dump_gapk_status(struct rtw_dev *rtwdev, struct seq_file *m) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk; int i, path; u32 val; seq_printf(m, "\n(%2d) %c%s\n\n", RTW_DM_CAP_TXGAPK, dm_info->dm_flags & BIT(RTW_DM_CAP_TXGAPK) ? '-' : '+', rtw_dm_cap_strs[RTW_DM_CAP_TXGAPK]); for (path = 0; path < rtwdev->hal.rf_path_num; path++) { val = rtw_read_rf(rtwdev, path, RF_GAINTX, RFREG_MASK); seq_printf(m, "path %d:\n0x%x = 0x%x\n", path, RF_GAINTX, val); for (i = 0; i < RF_HW_OFFSET_NUM; i++) seq_printf(m, "[TXGAPK] offset %d %d\n", txgapk->rf3f_fs[path][i], i); seq_puts(m, "\n"); } } static int rtw_debugfs_get_dm_cap(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; struct rtw_dm_info *dm_info = &rtwdev->dm_info; int i; switch (debugfs_priv->dm_cap.bit) { case RTW_DM_CAP_TXGAPK: dump_gapk_status(rtwdev, m); break; default: for (i = 1; i < RTW_DM_CAP_NUM; i++) { seq_printf(m, "(%2d) %c%s\n", i, dm_info->dm_flags & BIT(i) ? '-' : '+', rtw_dm_cap_strs[i]); } break; } debugfs_priv->dm_cap.bit = RTW_DM_CAP_NA; return 0; } #define rtw_debug_impl_mac(page, addr) \ static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \ .cb_read = rtw_debug_get_mac_page, \ .cb_data = addr, \ } rtw_debug_impl_mac(0, 0x0000); rtw_debug_impl_mac(1, 0x0100); rtw_debug_impl_mac(2, 0x0200); rtw_debug_impl_mac(3, 0x0300); rtw_debug_impl_mac(4, 0x0400); rtw_debug_impl_mac(5, 0x0500); rtw_debug_impl_mac(6, 0x0600); rtw_debug_impl_mac(7, 0x0700); rtw_debug_impl_mac(10, 0x1000); rtw_debug_impl_mac(11, 0x1100); rtw_debug_impl_mac(12, 0x1200); rtw_debug_impl_mac(13, 0x1300); rtw_debug_impl_mac(14, 0x1400); rtw_debug_impl_mac(15, 0x1500); rtw_debug_impl_mac(16, 0x1600); rtw_debug_impl_mac(17, 0x1700); #define rtw_debug_impl_bb(page, addr) \ static struct rtw_debugfs_priv rtw_debug_priv_bb_ ##page = { \ .cb_read = rtw_debug_get_bb_page, \ .cb_data = addr, \ } rtw_debug_impl_bb(8, 0x0800); rtw_debug_impl_bb(9, 0x0900); rtw_debug_impl_bb(a, 0x0a00); rtw_debug_impl_bb(b, 0x0b00); rtw_debug_impl_bb(c, 0x0c00); rtw_debug_impl_bb(d, 0x0d00); rtw_debug_impl_bb(e, 0x0e00); rtw_debug_impl_bb(f, 0x0f00); rtw_debug_impl_bb(18, 0x1800); rtw_debug_impl_bb(19, 0x1900); rtw_debug_impl_bb(1a, 0x1a00); rtw_debug_impl_bb(1b, 0x1b00); rtw_debug_impl_bb(1c, 0x1c00); rtw_debug_impl_bb(1d, 0x1d00); rtw_debug_impl_bb(1e, 0x1e00); rtw_debug_impl_bb(1f, 0x1f00); rtw_debug_impl_bb(2c, 0x2c00); rtw_debug_impl_bb(2d, 0x2d00); rtw_debug_impl_bb(40, 0x4000); rtw_debug_impl_bb(41, 0x4100); static struct rtw_debugfs_priv rtw_debug_priv_rf_dump = { .cb_read = rtw_debug_get_rf_dump, }; static struct rtw_debugfs_priv rtw_debug_priv_tx_pwr_tbl = { .cb_read = rtw_debugfs_get_tx_pwr_tbl, }; static struct rtw_debugfs_priv rtw_debug_priv_write_reg = { .cb_write = rtw_debugfs_set_write_reg, }; static struct rtw_debugfs_priv rtw_debug_priv_h2c = { .cb_write = rtw_debugfs_set_h2c, }; static struct rtw_debugfs_priv rtw_debug_priv_rf_write = { .cb_write = rtw_debugfs_set_rf_write, }; static struct rtw_debugfs_priv rtw_debug_priv_rf_read = { .cb_write = rtw_debugfs_set_rf_read, .cb_read = rtw_debugfs_get_rf_read, }; static struct rtw_debugfs_priv rtw_debug_priv_read_reg = { .cb_write = rtw_debugfs_set_read_reg, .cb_read = rtw_debugfs_get_read_reg, }; static struct rtw_debugfs_priv rtw_debug_priv_fix_rate = { .cb_write = rtw_debugfs_set_fix_rate, .cb_read = rtw_debugfs_get_fix_rate, }; static struct rtw_debugfs_priv rtw_debug_priv_dump_cam = { .cb_write = rtw_debugfs_set_single_input, .cb_read = rtw_debugfs_get_dump_cam, }; static struct rtw_debugfs_priv rtw_debug_priv_rsvd_page = { .cb_write = rtw_debugfs_set_rsvd_page, .cb_read = rtw_debugfs_get_rsvd_page, }; static struct rtw_debugfs_priv rtw_debug_priv_phy_info = { .cb_read = rtw_debugfs_get_phy_info, }; static struct rtw_debugfs_priv rtw_debug_priv_coex_enable = { .cb_write = rtw_debugfs_set_coex_enable, .cb_read = rtw_debugfs_get_coex_enable, }; static struct rtw_debugfs_priv rtw_debug_priv_coex_info = { .cb_read = rtw_debugfs_get_coex_info, }; static struct rtw_debugfs_priv rtw_debug_priv_edcca_enable = { .cb_write = rtw_debugfs_set_edcca_enable, .cb_read = rtw_debugfs_get_edcca_enable, }; static struct rtw_debugfs_priv rtw_debug_priv_fw_crash = { .cb_write = rtw_debugfs_set_fw_crash, .cb_read = rtw_debugfs_get_fw_crash, }; static struct rtw_debugfs_priv rtw_debug_priv_force_lowest_basic_rate = { .cb_write = rtw_debugfs_set_force_lowest_basic_rate, .cb_read = rtw_debugfs_get_force_lowest_basic_rate, }; static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = { .cb_write = rtw_debugfs_set_dm_cap, .cb_read = rtw_debugfs_get_dm_cap, }; #define rtw_debugfs_add_core(name, mode, fopname, parent) \ do { \ rtw_debug_priv_ ##name.rtwdev = rtwdev; \ if (!debugfs_create_file(#name, mode, \ parent, &rtw_debug_priv_ ##name,\ &file_ops_ ##fopname)) \ pr_debug("Unable to initialize debugfs:%s\n", \ #name); \ } while (0) #define rtw_debugfs_add_w(name) \ rtw_debugfs_add_core(name, S_IFREG | 0222, common_write, debugfs_topdir) #define rtw_debugfs_add_rw(name) \ rtw_debugfs_add_core(name, S_IFREG | 0666, single_rw, debugfs_topdir) #define rtw_debugfs_add_r(name) \ rtw_debugfs_add_core(name, S_IFREG | 0444, single_r, debugfs_topdir) void rtw_debugfs_init(struct rtw_dev *rtwdev) { struct dentry *debugfs_topdir; debugfs_topdir = debugfs_create_dir("rtw88", rtwdev->hw->wiphy->debugfsdir); rtw_debugfs_add_w(write_reg); rtw_debugfs_add_rw(read_reg); rtw_debugfs_add_w(rf_write); rtw_debugfs_add_rw(rf_read); rtw_debugfs_add_rw(fix_rate); rtw_debugfs_add_rw(dump_cam); rtw_debugfs_add_rw(rsvd_page); rtw_debugfs_add_r(phy_info); rtw_debugfs_add_r(coex_info); rtw_debugfs_add_rw(coex_enable); rtw_debugfs_add_w(h2c); rtw_debugfs_add_r(mac_0); rtw_debugfs_add_r(mac_1); rtw_debugfs_add_r(mac_2); rtw_debugfs_add_r(mac_3); rtw_debugfs_add_r(mac_4); rtw_debugfs_add_r(mac_5); rtw_debugfs_add_r(mac_6); rtw_debugfs_add_r(mac_7); rtw_debugfs_add_r(bb_8); rtw_debugfs_add_r(bb_9); rtw_debugfs_add_r(bb_a); rtw_debugfs_add_r(bb_b); rtw_debugfs_add_r(bb_c); rtw_debugfs_add_r(bb_d); rtw_debugfs_add_r(bb_e); rtw_debugfs_add_r(bb_f); rtw_debugfs_add_r(mac_10); rtw_debugfs_add_r(mac_11); rtw_debugfs_add_r(mac_12); rtw_debugfs_add_r(mac_13); rtw_debugfs_add_r(mac_14); rtw_debugfs_add_r(mac_15); rtw_debugfs_add_r(mac_16); rtw_debugfs_add_r(mac_17); rtw_debugfs_add_r(bb_18); rtw_debugfs_add_r(bb_19); rtw_debugfs_add_r(bb_1a); rtw_debugfs_add_r(bb_1b); rtw_debugfs_add_r(bb_1c); rtw_debugfs_add_r(bb_1d); rtw_debugfs_add_r(bb_1e); rtw_debugfs_add_r(bb_1f); if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C) { rtw_debugfs_add_r(bb_2c); rtw_debugfs_add_r(bb_2d); rtw_debugfs_add_r(bb_40); rtw_debugfs_add_r(bb_41); } rtw_debugfs_add_r(rf_dump); rtw_debugfs_add_r(tx_pwr_tbl); rtw_debugfs_add_rw(edcca_enable); rtw_debugfs_add_rw(fw_crash); rtw_debugfs_add_rw(force_lowest_basic_rate); rtw_debugfs_add_rw(dm_cap); } #endif /* CONFIG_RTW88_DEBUGFS */ #ifdef CONFIG_RTW88_DEBUG void __rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; if (rtw_debug_mask & mask) #if defined(__linux__) dev_printk(KERN_DEBUG, rtwdev->dev, "%pV", &vaf); #elif defined(__FreeBSD__) - vlog(LOG_DEBUG, fmt, args); + vlog(LOG_DEBUG, vaf.fmt, args); #endif va_end(args); } EXPORT_SYMBOL(__rtw_dbg); #endif /* CONFIG_RTW88_DEBUG */ diff --git a/sys/contrib/dev/rtw89/debug.c b/sys/contrib/dev/rtw89/debug.c index 1a14633c5ff7..90a15279b8f8 100644 --- a/sys/contrib/dev/rtw89/debug.c +++ b/sys/contrib/dev/rtw89/debug.c @@ -1,2667 +1,2667 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2019-2020 Realtek Corporation */ #if defined(__FreeBSD__) #define LINUXKPI_PARAM_PREFIX rtw89_debug_ #endif #include #include "coex.h" #include "debug.h" #include "fw.h" #include "mac.h" #include "ps.h" #include "reg.h" #include "sar.h" #if defined(__FreeBSD__) #ifdef CONFIG_RTW89_DEBUGFS #include #endif #endif #ifdef CONFIG_RTW89_DEBUGMSG unsigned int rtw89_debug_mask; EXPORT_SYMBOL(rtw89_debug_mask); module_param_named(debug_mask, rtw89_debug_mask, uint, 0644); MODULE_PARM_DESC(debug_mask, "Debugging mask"); #endif #ifdef CONFIG_RTW89_DEBUGFS struct rtw89_debugfs_priv { struct rtw89_dev *rtwdev; int (*cb_read)(struct seq_file *m, void *v); ssize_t (*cb_write)(struct file *filp, const char __user *buffer, size_t count, loff_t *loff); union { u32 cb_data; struct { u32 addr; u8 len; } read_reg; struct { u32 addr; u32 mask; u8 path; } read_rf; struct { u8 ss_dbg:1; u8 dle_dbg:1; u8 dmac_dbg:1; u8 cmac_dbg:1; u8 dbg_port:1; } dbgpkg_en; struct { u32 start; u32 len; u8 sel; } mac_mem; }; }; static int rtw89_debugfs_single_show(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; return debugfs_priv->cb_read(m, v); } static ssize_t rtw89_debugfs_single_write(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; return debugfs_priv->cb_write(filp, buffer, count, loff); } static ssize_t rtw89_debugfs_seq_file_write(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) { struct seq_file *seqpriv = (struct seq_file *)filp->private_data; struct rtw89_debugfs_priv *debugfs_priv = seqpriv->private; return debugfs_priv->cb_write(filp, buffer, count, loff); } static int rtw89_debugfs_single_open(struct inode *inode, struct file *filp) { return single_open(filp, rtw89_debugfs_single_show, inode->i_private); } static int rtw89_debugfs_close(struct inode *inode, struct file *filp) { return 0; } static const struct file_operations file_ops_single_r = { .owner = THIS_MODULE, .open = rtw89_debugfs_single_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations file_ops_common_rw = { .owner = THIS_MODULE, .open = rtw89_debugfs_single_open, .release = single_release, .read = seq_read, .llseek = seq_lseek, .write = rtw89_debugfs_seq_file_write, }; static const struct file_operations file_ops_single_w = { .owner = THIS_MODULE, .write = rtw89_debugfs_single_write, .open = simple_open, .release = rtw89_debugfs_close, }; static ssize_t rtw89_debug_priv_read_reg_select(struct file *filp, const char __user *user_buf, size_t count, loff_t *loff) { struct seq_file *m = (struct seq_file *)filp->private_data; struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; char buf[32]; size_t buf_size; u32 addr, len; int num; buf_size = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = '\0'; num = sscanf(buf, "%x %x", &addr, &len); if (num != 2) { rtw89_info(rtwdev, "invalid format: \n"); return -EINVAL; } debugfs_priv->read_reg.addr = addr; debugfs_priv->read_reg.len = len; rtw89_info(rtwdev, "select read %d bytes from 0x%08x\n", len, addr); return count; } static int rtw89_debug_priv_read_reg_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; u32 addr, data; u8 len; len = debugfs_priv->read_reg.len; addr = debugfs_priv->read_reg.addr; switch (len) { case 1: data = rtw89_read8(rtwdev, addr); break; case 2: data = rtw89_read16(rtwdev, addr); break; case 4: data = rtw89_read32(rtwdev, addr); break; default: rtw89_info(rtwdev, "invalid read reg len %d\n", len); return -EINVAL; } seq_printf(m, "get %d bytes at 0x%08x=0x%08x\n", len, addr, data); return 0; } static ssize_t rtw89_debug_priv_write_reg_set(struct file *filp, const char __user *user_buf, size_t count, loff_t *loff) { struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; char buf[32]; size_t buf_size; u32 addr, val, len; int num; buf_size = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = '\0'; num = sscanf(buf, "%x %x %x", &addr, &val, &len); if (num != 3) { rtw89_info(rtwdev, "invalid format: \n"); return -EINVAL; } switch (len) { case 1: rtw89_info(rtwdev, "reg write8 0x%08x: 0x%02x\n", addr, val); rtw89_write8(rtwdev, addr, (u8)val); break; case 2: rtw89_info(rtwdev, "reg write16 0x%08x: 0x%04x\n", addr, val); rtw89_write16(rtwdev, addr, (u16)val); break; case 4: rtw89_info(rtwdev, "reg write32 0x%08x: 0x%08x\n", addr, val); rtw89_write32(rtwdev, addr, (u32)val); break; default: rtw89_info(rtwdev, "invalid read write len %d\n", len); break; } return count; } static ssize_t rtw89_debug_priv_read_rf_select(struct file *filp, const char __user *user_buf, size_t count, loff_t *loff) { struct seq_file *m = (struct seq_file *)filp->private_data; struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; char buf[32]; size_t buf_size; u32 addr, mask; u8 path; int num; buf_size = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = '\0'; num = sscanf(buf, "%hhd %x %x", &path, &addr, &mask); if (num != 3) { rtw89_info(rtwdev, "invalid format: \n"); return -EINVAL; } if (path >= rtwdev->chip->rf_path_num) { rtw89_info(rtwdev, "wrong rf path\n"); return -EINVAL; } debugfs_priv->read_rf.addr = addr; debugfs_priv->read_rf.mask = mask; debugfs_priv->read_rf.path = path; rtw89_info(rtwdev, "select read rf path %d from 0x%08x\n", path, addr); return count; } static int rtw89_debug_priv_read_rf_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; u32 addr, data, mask; u8 path; addr = debugfs_priv->read_rf.addr; mask = debugfs_priv->read_rf.mask; path = debugfs_priv->read_rf.path; data = rtw89_read_rf(rtwdev, path, addr, mask); seq_printf(m, "path %d, rf register 0x%08x=0x%08x\n", path, addr, data); return 0; } static ssize_t rtw89_debug_priv_write_rf_set(struct file *filp, const char __user *user_buf, size_t count, loff_t *loff) { struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; char buf[32]; size_t buf_size; u32 addr, val, mask; u8 path; int num; buf_size = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = '\0'; num = sscanf(buf, "%hhd %x %x %x", &path, &addr, &mask, &val); if (num != 4) { rtw89_info(rtwdev, "invalid format: \n"); return -EINVAL; } if (path >= rtwdev->chip->rf_path_num) { rtw89_info(rtwdev, "wrong rf path\n"); return -EINVAL; } rtw89_info(rtwdev, "path %d, rf register write 0x%08x=0x%08x (mask = 0x%08x)\n", path, addr, val, mask); rtw89_write_rf(rtwdev, path, addr, mask, val); return count; } static int rtw89_debug_priv_rf_reg_dump_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; const struct rtw89_chip_info *chip = rtwdev->chip; u32 addr, offset, data; u8 path; for (path = 0; path < chip->rf_path_num; path++) { seq_printf(m, "RF path %d:\n\n", path); for (addr = 0; addr < 0x100; addr += 4) { seq_printf(m, "0x%08x: ", addr); for (offset = 0; offset < 4; offset++) { data = rtw89_read_rf(rtwdev, path, addr + offset, RFREG_MASK); seq_printf(m, "0x%05x ", data); } seq_puts(m, "\n"); } seq_puts(m, "\n"); } return 0; } struct txpwr_ent { const char *txt; u8 len; }; struct txpwr_map { const struct txpwr_ent *ent; u8 size; u32 addr_from; u32 addr_to; }; #define __GEN_TXPWR_ENT2(_t, _e0, _e1) \ { .len = 2, .txt = _t "\t- " _e0 " " _e1 } #define __GEN_TXPWR_ENT4(_t, _e0, _e1, _e2, _e3) \ { .len = 4, .txt = _t "\t- " _e0 " " _e1 " " _e2 " " _e3 } #define __GEN_TXPWR_ENT8(_t, _e0, _e1, _e2, _e3, _e4, _e5, _e6, _e7) \ { .len = 8, .txt = _t "\t- " \ _e0 " " _e1 " " _e2 " " _e3 " " \ _e4 " " _e5 " " _e6 " " _e7 } static const struct txpwr_ent __txpwr_ent_byr[] = { __GEN_TXPWR_ENT4("CCK ", "1M ", "2M ", "5.5M ", "11M "), __GEN_TXPWR_ENT4("LEGACY ", "6M ", "9M ", "12M ", "18M "), __GEN_TXPWR_ENT4("LEGACY ", "24M ", "36M ", "48M ", "54M "), /* 1NSS */ __GEN_TXPWR_ENT4("MCS_1NSS ", "MCS0 ", "MCS1 ", "MCS2 ", "MCS3 "), __GEN_TXPWR_ENT4("MCS_1NSS ", "MCS4 ", "MCS5 ", "MCS6 ", "MCS7 "), __GEN_TXPWR_ENT4("MCS_1NSS ", "MCS8 ", "MCS9 ", "MCS10", "MCS11"), __GEN_TXPWR_ENT4("HEDCM_1NSS", "MCS0 ", "MCS1 ", "MCS3 ", "MCS4 "), /* 2NSS */ __GEN_TXPWR_ENT4("MCS_2NSS ", "MCS0 ", "MCS1 ", "MCS2 ", "MCS3 "), __GEN_TXPWR_ENT4("MCS_2NSS ", "MCS4 ", "MCS5 ", "MCS6 ", "MCS7 "), __GEN_TXPWR_ENT4("MCS_2NSS ", "MCS8 ", "MCS9 ", "MCS10", "MCS11"), __GEN_TXPWR_ENT4("HEDCM_2NSS", "MCS0 ", "MCS1 ", "MCS3 ", "MCS4 "), }; #if defined(__linux__) static_assert((ARRAY_SIZE(__txpwr_ent_byr) * 4) == #elif defined(__FreeBSD__) rtw89_static_assert((ARRAY_SIZE(__txpwr_ent_byr) * 4) == #endif (R_AX_PWR_BY_RATE_MAX - R_AX_PWR_BY_RATE + 4)); static const struct txpwr_map __txpwr_map_byr = { .ent = __txpwr_ent_byr, .size = ARRAY_SIZE(__txpwr_ent_byr), .addr_from = R_AX_PWR_BY_RATE, .addr_to = R_AX_PWR_BY_RATE_MAX, }; static const struct txpwr_ent __txpwr_ent_lmt[] = { /* 1TX */ __GEN_TXPWR_ENT2("CCK_1TX_20M ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("CCK_1TX_40M ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("OFDM_1TX ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_20M_0 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_20M_1 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_20M_2 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_20M_3 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_20M_4 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_20M_5 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_20M_6 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_20M_7 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_40M_0 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_40M_1 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_40M_2 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_40M_3 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_80M_0 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_80M_1 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_160M ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_40M_0p5", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_1TX_40M_2p5", "NON_BF", "BF"), /* 2TX */ __GEN_TXPWR_ENT2("CCK_2TX_20M ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("CCK_2TX_40M ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("OFDM_2TX ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_20M_0 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_20M_1 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_20M_2 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_20M_3 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_20M_4 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_20M_5 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_20M_6 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_20M_7 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_40M_0 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_40M_1 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_40M_2 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_40M_3 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_80M_0 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_80M_1 ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_160M ", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_40M_0p5", "NON_BF", "BF"), __GEN_TXPWR_ENT2("MCS_2TX_40M_2p5", "NON_BF", "BF"), }; #if defined(__linux__) static_assert((ARRAY_SIZE(__txpwr_ent_lmt) * 2) == #elif defined(__FreeBSD__) rtw89_static_assert((ARRAY_SIZE(__txpwr_ent_lmt) * 2) == #endif (R_AX_PWR_LMT_MAX - R_AX_PWR_LMT + 4)); static const struct txpwr_map __txpwr_map_lmt = { .ent = __txpwr_ent_lmt, .size = ARRAY_SIZE(__txpwr_ent_lmt), .addr_from = R_AX_PWR_LMT, .addr_to = R_AX_PWR_LMT_MAX, }; static const struct txpwr_ent __txpwr_ent_lmt_ru[] = { /* 1TX */ __GEN_TXPWR_ENT8("1TX", "RU26__0", "RU26__1", "RU26__2", "RU26__3", "RU26__4", "RU26__5", "RU26__6", "RU26__7"), __GEN_TXPWR_ENT8("1TX", "RU52__0", "RU52__1", "RU52__2", "RU52__3", "RU52__4", "RU52__5", "RU52__6", "RU52__7"), __GEN_TXPWR_ENT8("1TX", "RU106_0", "RU106_1", "RU106_2", "RU106_3", "RU106_4", "RU106_5", "RU106_6", "RU106_7"), /* 2TX */ __GEN_TXPWR_ENT8("2TX", "RU26__0", "RU26__1", "RU26__2", "RU26__3", "RU26__4", "RU26__5", "RU26__6", "RU26__7"), __GEN_TXPWR_ENT8("2TX", "RU52__0", "RU52__1", "RU52__2", "RU52__3", "RU52__4", "RU52__5", "RU52__6", "RU52__7"), __GEN_TXPWR_ENT8("2TX", "RU106_0", "RU106_1", "RU106_2", "RU106_3", "RU106_4", "RU106_5", "RU106_6", "RU106_7"), }; #if defined(__linux__) static_assert((ARRAY_SIZE(__txpwr_ent_lmt_ru) * 8) == #elif defined(__FreeBSD__) rtw89_static_assert((ARRAY_SIZE(__txpwr_ent_lmt_ru) * 8) == #endif (R_AX_PWR_RU_LMT_MAX - R_AX_PWR_RU_LMT + 4)); static const struct txpwr_map __txpwr_map_lmt_ru = { .ent = __txpwr_ent_lmt_ru, .size = ARRAY_SIZE(__txpwr_ent_lmt_ru), .addr_from = R_AX_PWR_RU_LMT, .addr_to = R_AX_PWR_RU_LMT_MAX, }; static u8 __print_txpwr_ent(struct seq_file *m, const struct txpwr_ent *ent, const u8 *buf, const u8 cur) { char *fmt; switch (ent->len) { case 2: fmt = "%s\t| %3d, %3d,\tdBm\n"; seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1]); return 2; case 4: fmt = "%s\t| %3d, %3d, %3d, %3d,\tdBm\n"; seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1], buf[cur + 2], buf[cur + 3]); return 4; case 8: fmt = "%s\t| %3d, %3d, %3d, %3d, %3d, %3d, %3d, %3d,\tdBm\n"; seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1], buf[cur + 2], buf[cur + 3], buf[cur + 4], buf[cur + 5], buf[cur + 6], buf[cur + 7]); return 8; default: return 0; } } static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev, const struct txpwr_map *map) { u8 fct = rtwdev->chip->txpwr_factor_mac; u8 *buf, cur, i; u32 val, addr; int ret; buf = vzalloc(map->addr_to - map->addr_from + 4); if (!buf) return -ENOMEM; for (addr = map->addr_from; addr <= map->addr_to; addr += 4) { ret = rtw89_mac_txpwr_read32(rtwdev, RTW89_PHY_0, addr, &val); if (ret) val = MASKDWORD; cur = addr - map->addr_from; for (i = 0; i < 4; i++, val >>= 8) buf[cur + i] = FIELD_GET(MASKBYTE0, val) >> fct; } for (cur = 0, i = 0; i < map->size; i++) cur += __print_txpwr_ent(m, &map->ent[i], buf, cur); vfree(buf); return 0; } #define case_REGD(_regd) \ case RTW89_ ## _regd: \ seq_puts(m, #_regd "\n"); \ break static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev) { const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); u8 band = chan->band_type; u8 regd = rtw89_regd_get(rtwdev, band); switch (regd) { default: seq_printf(m, "UNKNOWN: %d\n", regd); break; case_REGD(WW); case_REGD(ETSI); case_REGD(FCC); case_REGD(MKK); case_REGD(NA); case_REGD(IC); case_REGD(KCC); case_REGD(NCC); case_REGD(CHILE); case_REGD(ACMA); case_REGD(MEXICO); case_REGD(UKRAINE); case_REGD(CN); } } #undef case_REGD static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; int ret = 0; mutex_lock(&rtwdev->mutex); rtw89_leave_ps_mode(rtwdev); seq_puts(m, "[Regulatory] "); __print_regd(m, rtwdev); seq_puts(m, "[SAR]\n"); rtw89_print_sar(m, rtwdev); seq_puts(m, "\n[TX power byrate]\n"); ret = __print_txpwr_map(m, rtwdev, &__txpwr_map_byr); if (ret) goto err; seq_puts(m, "\n[TX power limit]\n"); ret = __print_txpwr_map(m, rtwdev, &__txpwr_map_lmt); if (ret) goto err; seq_puts(m, "\n[TX power limit_ru]\n"); ret = __print_txpwr_map(m, rtwdev, &__txpwr_map_lmt_ru); if (ret) goto err; err: mutex_unlock(&rtwdev->mutex); return ret; } static ssize_t rtw89_debug_priv_mac_reg_dump_select(struct file *filp, const char __user *user_buf, size_t count, loff_t *loff) { struct seq_file *m = (struct seq_file *)filp->private_data; struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; char buf[32]; size_t buf_size; int sel; int ret; buf_size = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = '\0'; ret = kstrtoint(buf, 0, &sel); if (ret) return ret; if (sel < RTW89_DBG_SEL_MAC_00 || sel > RTW89_DBG_SEL_RFC) { rtw89_info(rtwdev, "invalid args: %d\n", sel); return -EINVAL; } debugfs_priv->cb_data = sel; rtw89_info(rtwdev, "select mac page dump %d\n", debugfs_priv->cb_data); return count; } #define RTW89_MAC_PAGE_SIZE 0x100 static int rtw89_debug_priv_mac_reg_dump_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; enum rtw89_debug_mac_reg_sel reg_sel = debugfs_priv->cb_data; u32 start, end; u32 i, j, k, page; u32 val; switch (reg_sel) { case RTW89_DBG_SEL_MAC_00: seq_puts(m, "Debug selected MAC page 0x00\n"); start = 0x000; end = 0x014; break; case RTW89_DBG_SEL_MAC_30: seq_puts(m, "Debug selected MAC page 0x30\n"); start = 0x030; end = 0x033; break; case RTW89_DBG_SEL_MAC_40: seq_puts(m, "Debug selected MAC page 0x40\n"); start = 0x040; end = 0x07f; break; case RTW89_DBG_SEL_MAC_80: seq_puts(m, "Debug selected MAC page 0x80\n"); start = 0x080; end = 0x09f; break; case RTW89_DBG_SEL_MAC_C0: seq_puts(m, "Debug selected MAC page 0xc0\n"); start = 0x0c0; end = 0x0df; break; case RTW89_DBG_SEL_MAC_E0: seq_puts(m, "Debug selected MAC page 0xe0\n"); start = 0x0e0; end = 0x0ff; break; case RTW89_DBG_SEL_BB: seq_puts(m, "Debug selected BB register\n"); start = 0x100; end = 0x17f; break; case RTW89_DBG_SEL_IQK: seq_puts(m, "Debug selected IQK register\n"); start = 0x180; end = 0x1bf; break; case RTW89_DBG_SEL_RFC: seq_puts(m, "Debug selected RFC register\n"); start = 0x1c0; end = 0x1ff; break; default: seq_puts(m, "Selected invalid register page\n"); return -EINVAL; } for (i = start; i <= end; i++) { page = i << 8; for (j = page; j < page + RTW89_MAC_PAGE_SIZE; j += 16) { seq_printf(m, "%08xh : ", 0x18600000 + j); for (k = 0; k < 4; k++) { val = rtw89_read32(rtwdev, j + (k << 2)); seq_printf(m, "%08x ", val); } seq_puts(m, "\n"); } } return 0; } static ssize_t rtw89_debug_priv_mac_mem_dump_select(struct file *filp, const char __user *user_buf, size_t count, loff_t *loff) { struct seq_file *m = (struct seq_file *)filp->private_data; struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; char buf[32]; size_t buf_size; u32 sel, start_addr, len; int num; buf_size = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = '\0'; num = sscanf(buf, "%x %x %x", &sel, &start_addr, &len); if (num != 3) { rtw89_info(rtwdev, "invalid format: \n"); return -EINVAL; } debugfs_priv->mac_mem.sel = sel; debugfs_priv->mac_mem.start = start_addr; debugfs_priv->mac_mem.len = len; rtw89_info(rtwdev, "select mem %d start %d len %d\n", sel, start_addr, len); return count; } static void rtw89_debug_dump_mac_mem(struct seq_file *m, struct rtw89_dev *rtwdev, u8 sel, u32 start_addr, u32 len) { u32 base_addr, start_page, residue; u32 i, j, p, pages; u32 dump_len, remain; u32 val; remain = len; pages = len / MAC_MEM_DUMP_PAGE_SIZE + 1; start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE; residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE; base_addr = rtw89_mac_mem_base_addrs[sel]; base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE; for (p = 0; p < pages; p++) { dump_len = min_t(u32, remain, MAC_MEM_DUMP_PAGE_SIZE); rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, base_addr); for (i = R_AX_INDIR_ACCESS_ENTRY + residue; i < R_AX_INDIR_ACCESS_ENTRY + dump_len;) { seq_printf(m, "%08xh:", i); for (j = 0; j < 4 && i < R_AX_INDIR_ACCESS_ENTRY + dump_len; j++, i += 4) { val = rtw89_read32(rtwdev, i); seq_printf(m, " %08x", val); remain -= 4; } seq_puts(m, "\n"); } base_addr += MAC_MEM_DUMP_PAGE_SIZE; } } static int rtw89_debug_priv_mac_mem_dump_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; mutex_lock(&rtwdev->mutex); rtw89_leave_ps_mode(rtwdev); rtw89_debug_dump_mac_mem(m, rtwdev, debugfs_priv->mac_mem.sel, debugfs_priv->mac_mem.start, debugfs_priv->mac_mem.len); mutex_unlock(&rtwdev->mutex); return 0; } static ssize_t rtw89_debug_priv_mac_dbg_port_dump_select(struct file *filp, const char __user *user_buf, size_t count, loff_t *loff) { struct seq_file *m = (struct seq_file *)filp->private_data; struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; char buf[32]; size_t buf_size; int sel, set; int num; bool enable; buf_size = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = '\0'; num = sscanf(buf, "%d %d", &sel, &set); if (num != 2) { rtw89_info(rtwdev, "invalid format: \n"); return -EINVAL; } enable = set != 0; switch (sel) { case 0: debugfs_priv->dbgpkg_en.ss_dbg = enable; break; case 1: debugfs_priv->dbgpkg_en.dle_dbg = enable; break; case 2: debugfs_priv->dbgpkg_en.dmac_dbg = enable; break; case 3: debugfs_priv->dbgpkg_en.cmac_dbg = enable; break; case 4: debugfs_priv->dbgpkg_en.dbg_port = enable; break; default: rtw89_info(rtwdev, "invalid args: sel %d set %d\n", sel, set); return -EINVAL; } rtw89_info(rtwdev, "%s debug port dump %d\n", enable ? "Enable" : "Disable", sel); return count; } static int rtw89_debug_mac_dump_ss_dbg(struct rtw89_dev *rtwdev, struct seq_file *m) { return 0; } static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev, struct seq_file *m) { #define DLE_DFI_DUMP(__type, __target, __sel) \ ({ \ u32 __ctrl; \ u32 __reg_ctrl = R_AX_##__type##_DBG_FUN_INTF_CTL; \ u32 __reg_data = R_AX_##__type##_DBG_FUN_INTF_DATA; \ u32 __data, __val32; \ int __ret; \ \ __ctrl = FIELD_PREP(B_AX_##__type##_DFI_TRGSEL_MASK, \ DLE_DFI_TYPE_##__target) | \ FIELD_PREP(B_AX_##__type##_DFI_ADDR_MASK, __sel) | \ B_AX_WDE_DFI_ACTIVE; \ rtw89_write32(rtwdev, __reg_ctrl, __ctrl); \ __ret = read_poll_timeout(rtw89_read32, __val32, \ !(__val32 & B_AX_##__type##_DFI_ACTIVE), \ 1000, 50000, false, \ rtwdev, __reg_ctrl); \ if (__ret) { \ rtw89_err(rtwdev, "failed to dump DLE %s %s %d\n", \ #__type, #__target, __sel); \ return __ret; \ } \ \ __data = rtw89_read32(rtwdev, __reg_data); \ __data; \ }) #define DLE_DFI_FREE_PAGE_DUMP(__m, __type) \ ({ \ u32 __freepg, __pubpg; \ u32 __freepg_head, __freepg_tail, __pubpg_num; \ \ __freepg = DLE_DFI_DUMP(__type, FREEPG, 0); \ __pubpg = DLE_DFI_DUMP(__type, FREEPG, 1); \ __freepg_head = FIELD_GET(B_AX_DLE_FREE_HEADPG, __freepg); \ __freepg_tail = FIELD_GET(B_AX_DLE_FREE_TAILPG, __freepg); \ __pubpg_num = FIELD_GET(B_AX_DLE_PUB_PGNUM, __pubpg); \ seq_printf(__m, "[%s] freepg head: %d\n", \ #__type, __freepg_head); \ seq_printf(__m, "[%s] freepg tail: %d\n", \ #__type, __freepg_tail); \ seq_printf(__m, "[%s] pubpg num : %d\n", \ #__type, __pubpg_num); \ }) #define case_QUOTA(__m, __type, __id) \ case __type##_QTAID_##__id: \ val32 = DLE_DFI_DUMP(__type, QUOTA, __type##_QTAID_##__id); \ rsv_pgnum = FIELD_GET(B_AX_DLE_RSV_PGNUM, val32); \ use_pgnum = FIELD_GET(B_AX_DLE_USE_PGNUM, val32); \ seq_printf(__m, "[%s][%s] rsv_pgnum: %d\n", \ #__type, #__id, rsv_pgnum); \ seq_printf(__m, "[%s][%s] use_pgnum: %d\n", \ #__type, #__id, use_pgnum); \ break u32 quota_id; u32 val32; u16 rsv_pgnum, use_pgnum; int ret; ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL); if (ret) { seq_puts(m, "[DLE] : DMAC not enabled\n"); return ret; } DLE_DFI_FREE_PAGE_DUMP(m, WDE); DLE_DFI_FREE_PAGE_DUMP(m, PLE); for (quota_id = 0; quota_id <= WDE_QTAID_CPUIO; quota_id++) { switch (quota_id) { case_QUOTA(m, WDE, HOST_IF); case_QUOTA(m, WDE, WLAN_CPU); case_QUOTA(m, WDE, DATA_CPU); case_QUOTA(m, WDE, PKTIN); case_QUOTA(m, WDE, CPUIO); } } for (quota_id = 0; quota_id <= PLE_QTAID_CPUIO; quota_id++) { switch (quota_id) { case_QUOTA(m, PLE, B0_TXPL); case_QUOTA(m, PLE, B1_TXPL); case_QUOTA(m, PLE, C2H); case_QUOTA(m, PLE, H2C); case_QUOTA(m, PLE, WLAN_CPU); case_QUOTA(m, PLE, MPDU); case_QUOTA(m, PLE, CMAC0_RX); case_QUOTA(m, PLE, CMAC1_RX); case_QUOTA(m, PLE, CMAC1_BBRPT); case_QUOTA(m, PLE, WDRLS); case_QUOTA(m, PLE, CPUIO); } } return 0; #undef case_QUOTA #undef DLE_DFI_DUMP #undef DLE_DFI_FREE_PAGE_DUMP } static int rtw89_debug_mac_dump_dmac_dbg(struct rtw89_dev *rtwdev, struct seq_file *m) { int ret; ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL); if (ret) { seq_puts(m, "[DMAC] : DMAC not enabled\n"); return ret; } seq_printf(m, "R_AX_DMAC_ERR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR)); seq_printf(m, "[0]R_AX_WDRLS_ERR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR)); seq_printf(m, "[1]R_AX_SEC_ERR_IMR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_SEC_ERR_IMR_ISR)); seq_printf(m, "[2.1]R_AX_MPDU_TX_ERR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR)); seq_printf(m, "[2.2]R_AX_MPDU_RX_ERR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR)); seq_printf(m, "[3]R_AX_STA_SCHEDULER_ERR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR)); seq_printf(m, "[4]R_AX_WDE_ERR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); seq_printf(m, "[5.1]R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR)); seq_printf(m, "[5.2]R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n", rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1)); seq_printf(m, "[6]R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); seq_printf(m, "[7]R_AX_PKTIN_ERR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR)); seq_printf(m, "[8.1]R_AX_OTHER_DISPATCHER_ERR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR)); seq_printf(m, "[8.2]R_AX_HOST_DISPATCHER_ERR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR)); seq_printf(m, "[8.3]R_AX_CPU_DISPATCHER_ERR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR)); seq_printf(m, "[10]R_AX_CPUIO_ERR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_CPUIO_ERR_ISR)); seq_printf(m, "[11.1]R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR)); seq_printf(m, "[11.2]R_AX_BBRPT_CHINFO_ERR_IMR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR_ISR)); seq_printf(m, "[11.3]R_AX_BBRPT_DFS_ERR_IMR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR_ISR)); seq_printf(m, "[11.4]R_AX_LA_ERRFLAG=0x%08x\n", rtw89_read32(rtwdev, R_AX_LA_ERRFLAG)); return 0; } static int rtw89_debug_mac_dump_cmac_dbg(struct rtw89_dev *rtwdev, struct seq_file *m) { int ret; ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_CMAC_SEL); if (ret) { seq_puts(m, "[CMAC] : CMAC 0 not enabled\n"); return ret; } seq_printf(m, "R_AX_CMAC_ERR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR)); seq_printf(m, "[0]R_AX_SCHEDULE_ERR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR)); seq_printf(m, "[1]R_AX_PTCL_ISR0=0x%08x\n", rtw89_read32(rtwdev, R_AX_PTCL_ISR0)); seq_printf(m, "[3]R_AX_DLE_CTRL=0x%08x\n", rtw89_read32(rtwdev, R_AX_DLE_CTRL)); seq_printf(m, "[4]R_AX_PHYINFO_ERR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR)); seq_printf(m, "[5]R_AX_TXPWR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_TXPWR_ISR)); seq_printf(m, "[6]R_AX_RMAC_ERR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_RMAC_ERR_ISR)); seq_printf(m, "[7]R_AX_TMAC_ERR_IMR_ISR=0x%08x\n", rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR)); ret = rtw89_mac_check_mac_en(rtwdev, 1, RTW89_CMAC_SEL); if (ret) { seq_puts(m, "[CMAC] : CMAC 1 not enabled\n"); return ret; } seq_printf(m, "R_AX_CMAC_ERR_ISR_C1=0x%08x\n", rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR_C1)); seq_printf(m, "[0]R_AX_SCHEDULE_ERR_ISR_C1=0x%08x\n", rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR_C1)); seq_printf(m, "[1]R_AX_PTCL_ISR0_C1=0x%08x\n", rtw89_read32(rtwdev, R_AX_PTCL_ISR0_C1)); seq_printf(m, "[3]R_AX_DLE_CTRL_C1=0x%08x\n", rtw89_read32(rtwdev, R_AX_DLE_CTRL_C1)); seq_printf(m, "[4]R_AX_PHYINFO_ERR_ISR_C1=0x%02x\n", rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR_C1)); seq_printf(m, "[5]R_AX_TXPWR_ISR_C1=0x%08x\n", rtw89_read32(rtwdev, R_AX_TXPWR_ISR_C1)); seq_printf(m, "[6]R_AX_RMAC_ERR_ISR_C1=0x%08x\n", rtw89_read32(rtwdev, R_AX_RMAC_ERR_ISR_C1)); seq_printf(m, "[7]R_AX_TMAC_ERR_IMR_ISR_C1=0x%08x\n", rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR_C1)); return 0; } static const struct rtw89_mac_dbg_port_info dbg_port_ptcl_c0 = { .sel_addr = R_AX_PTCL_DBG, .sel_byte = 1, .sel_msk = B_AX_PTCL_DBG_SEL_MASK, .srt = 0x00, .end = 0x3F, .rd_addr = R_AX_PTCL_DBG_INFO, .rd_byte = 4, .rd_msk = B_AX_PTCL_DBG_INFO_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_ptcl_c1 = { .sel_addr = R_AX_PTCL_DBG_C1, .sel_byte = 1, .sel_msk = B_AX_PTCL_DBG_SEL_MASK, .srt = 0x00, .end = 0x3F, .rd_addr = R_AX_PTCL_DBG_INFO_C1, .rd_byte = 4, .rd_msk = B_AX_PTCL_DBG_INFO_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_sch_c0 = { .sel_addr = R_AX_SCH_DBG_SEL, .sel_byte = 1, .sel_msk = B_AX_SCH_DBG_SEL_MASK, .srt = 0x00, .end = 0x2F, .rd_addr = R_AX_SCH_DBG, .rd_byte = 4, .rd_msk = B_AX_SCHEDULER_DBG_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_sch_c1 = { .sel_addr = R_AX_SCH_DBG_SEL_C1, .sel_byte = 1, .sel_msk = B_AX_SCH_DBG_SEL_MASK, .srt = 0x00, .end = 0x2F, .rd_addr = R_AX_SCH_DBG_C1, .rd_byte = 4, .rd_msk = B_AX_SCHEDULER_DBG_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_tmac_c0 = { .sel_addr = R_AX_MACTX_DBG_SEL_CNT, .sel_byte = 1, .sel_msk = B_AX_DBGSEL_MACTX_MASK, .srt = 0x00, .end = 0x19, .rd_addr = R_AX_DBG_PORT_SEL, .rd_byte = 4, .rd_msk = B_AX_DEBUG_ST_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_tmac_c1 = { .sel_addr = R_AX_MACTX_DBG_SEL_CNT_C1, .sel_byte = 1, .sel_msk = B_AX_DBGSEL_MACTX_MASK, .srt = 0x00, .end = 0x19, .rd_addr = R_AX_DBG_PORT_SEL, .rd_byte = 4, .rd_msk = B_AX_DEBUG_ST_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_rmac_c0 = { .sel_addr = R_AX_RX_DEBUG_SELECT, .sel_byte = 1, .sel_msk = B_AX_DEBUG_SEL_MASK, .srt = 0x00, .end = 0x58, .rd_addr = R_AX_DBG_PORT_SEL, .rd_byte = 4, .rd_msk = B_AX_DEBUG_ST_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_rmac_c1 = { .sel_addr = R_AX_RX_DEBUG_SELECT_C1, .sel_byte = 1, .sel_msk = B_AX_DEBUG_SEL_MASK, .srt = 0x00, .end = 0x58, .rd_addr = R_AX_DBG_PORT_SEL, .rd_byte = 4, .rd_msk = B_AX_DEBUG_ST_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_rmacst_c0 = { .sel_addr = R_AX_RX_STATE_MONITOR, .sel_byte = 1, .sel_msk = B_AX_STATE_SEL_MASK, .srt = 0x00, .end = 0x17, .rd_addr = R_AX_RX_STATE_MONITOR, .rd_byte = 4, .rd_msk = B_AX_RX_STATE_MONITOR_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_rmacst_c1 = { .sel_addr = R_AX_RX_STATE_MONITOR_C1, .sel_byte = 1, .sel_msk = B_AX_STATE_SEL_MASK, .srt = 0x00, .end = 0x17, .rd_addr = R_AX_RX_STATE_MONITOR_C1, .rd_byte = 4, .rd_msk = B_AX_RX_STATE_MONITOR_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_rmac_plcp_c0 = { .sel_addr = R_AX_RMAC_PLCP_MON, .sel_byte = 4, .sel_msk = B_AX_PCLP_MON_SEL_MASK, .srt = 0x0, .end = 0xF, .rd_addr = R_AX_RMAC_PLCP_MON, .rd_byte = 4, .rd_msk = B_AX_RMAC_PLCP_MON_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_rmac_plcp_c1 = { .sel_addr = R_AX_RMAC_PLCP_MON_C1, .sel_byte = 4, .sel_msk = B_AX_PCLP_MON_SEL_MASK, .srt = 0x0, .end = 0xF, .rd_addr = R_AX_RMAC_PLCP_MON_C1, .rd_byte = 4, .rd_msk = B_AX_RMAC_PLCP_MON_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_trxptcl_c0 = { .sel_addr = R_AX_DBGSEL_TRXPTCL, .sel_byte = 1, .sel_msk = B_AX_DBGSEL_TRXPTCL_MASK, .srt = 0x08, .end = 0x10, .rd_addr = R_AX_DBG_PORT_SEL, .rd_byte = 4, .rd_msk = B_AX_DEBUG_ST_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_trxptcl_c1 = { .sel_addr = R_AX_DBGSEL_TRXPTCL_C1, .sel_byte = 1, .sel_msk = B_AX_DBGSEL_TRXPTCL_MASK, .srt = 0x08, .end = 0x10, .rd_addr = R_AX_DBG_PORT_SEL, .rd_byte = 4, .rd_msk = B_AX_DEBUG_ST_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_tx_infol_c0 = { .sel_addr = R_AX_WMAC_TX_CTRL_DEBUG, .sel_byte = 1, .sel_msk = B_AX_TX_CTRL_DEBUG_SEL_MASK, .srt = 0x00, .end = 0x07, .rd_addr = R_AX_WMAC_TX_INFO0_DEBUG, .rd_byte = 4, .rd_msk = B_AX_TX_CTRL_INFO_P0_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_tx_infoh_c0 = { .sel_addr = R_AX_WMAC_TX_CTRL_DEBUG, .sel_byte = 1, .sel_msk = B_AX_TX_CTRL_DEBUG_SEL_MASK, .srt = 0x00, .end = 0x07, .rd_addr = R_AX_WMAC_TX_INFO1_DEBUG, .rd_byte = 4, .rd_msk = B_AX_TX_CTRL_INFO_P1_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_tx_infol_c1 = { .sel_addr = R_AX_WMAC_TX_CTRL_DEBUG_C1, .sel_byte = 1, .sel_msk = B_AX_TX_CTRL_DEBUG_SEL_MASK, .srt = 0x00, .end = 0x07, .rd_addr = R_AX_WMAC_TX_INFO0_DEBUG_C1, .rd_byte = 4, .rd_msk = B_AX_TX_CTRL_INFO_P0_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_tx_infoh_c1 = { .sel_addr = R_AX_WMAC_TX_CTRL_DEBUG_C1, .sel_byte = 1, .sel_msk = B_AX_TX_CTRL_DEBUG_SEL_MASK, .srt = 0x00, .end = 0x07, .rd_addr = R_AX_WMAC_TX_INFO1_DEBUG_C1, .rd_byte = 4, .rd_msk = B_AX_TX_CTRL_INFO_P1_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_txtf_infol_c0 = { .sel_addr = R_AX_WMAC_TX_TF_INFO_0, .sel_byte = 1, .sel_msk = B_AX_WMAC_TX_TF_INFO_SEL_MASK, .srt = 0x00, .end = 0x04, .rd_addr = R_AX_WMAC_TX_TF_INFO_1, .rd_byte = 4, .rd_msk = B_AX_WMAC_TX_TF_INFO_P0_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_txtf_infoh_c0 = { .sel_addr = R_AX_WMAC_TX_TF_INFO_0, .sel_byte = 1, .sel_msk = B_AX_WMAC_TX_TF_INFO_SEL_MASK, .srt = 0x00, .end = 0x04, .rd_addr = R_AX_WMAC_TX_TF_INFO_2, .rd_byte = 4, .rd_msk = B_AX_WMAC_TX_TF_INFO_P1_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_txtf_infol_c1 = { .sel_addr = R_AX_WMAC_TX_TF_INFO_0_C1, .sel_byte = 1, .sel_msk = B_AX_WMAC_TX_TF_INFO_SEL_MASK, .srt = 0x00, .end = 0x04, .rd_addr = R_AX_WMAC_TX_TF_INFO_1_C1, .rd_byte = 4, .rd_msk = B_AX_WMAC_TX_TF_INFO_P0_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_txtf_infoh_c1 = { .sel_addr = R_AX_WMAC_TX_TF_INFO_0_C1, .sel_byte = 1, .sel_msk = B_AX_WMAC_TX_TF_INFO_SEL_MASK, .srt = 0x00, .end = 0x04, .rd_addr = R_AX_WMAC_TX_TF_INFO_2_C1, .rd_byte = 4, .rd_msk = B_AX_WMAC_TX_TF_INFO_P1_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_wde_bufmgn_freepg = { .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_WDE_DFI_DATA_MASK, .srt = 0x80000000, .end = 0x80000001, .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_WDE_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_wde_bufmgn_quota = { .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_WDE_DFI_DATA_MASK, .srt = 0x80010000, .end = 0x80010004, .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_WDE_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_wde_bufmgn_pagellt = { .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_WDE_DFI_DATA_MASK, .srt = 0x80020000, .end = 0x80020FFF, .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_WDE_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_wde_bufmgn_pktinfo = { .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_WDE_DFI_DATA_MASK, .srt = 0x80030000, .end = 0x80030FFF, .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_WDE_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_wde_quemgn_prepkt = { .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_WDE_DFI_DATA_MASK, .srt = 0x80040000, .end = 0x80040FFF, .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_WDE_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_wde_quemgn_nxtpkt = { .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_WDE_DFI_DATA_MASK, .srt = 0x80050000, .end = 0x80050FFF, .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_WDE_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_wde_quemgn_qlnktbl = { .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_WDE_DFI_DATA_MASK, .srt = 0x80060000, .end = 0x80060453, .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_WDE_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_wde_quemgn_qempty = { .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_WDE_DFI_DATA_MASK, .srt = 0x80070000, .end = 0x80070011, .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_WDE_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_ple_bufmgn_freepg = { .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_PLE_DFI_DATA_MASK, .srt = 0x80000000, .end = 0x80000001, .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_PLE_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_ple_bufmgn_quota = { .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_PLE_DFI_DATA_MASK, .srt = 0x80010000, .end = 0x8001000A, .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_PLE_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_ple_bufmgn_pagellt = { .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_PLE_DFI_DATA_MASK, .srt = 0x80020000, .end = 0x80020DBF, .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_PLE_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_ple_bufmgn_pktinfo = { .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_PLE_DFI_DATA_MASK, .srt = 0x80030000, .end = 0x80030DBF, .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_PLE_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_ple_quemgn_prepkt = { .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_PLE_DFI_DATA_MASK, .srt = 0x80040000, .end = 0x80040DBF, .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_PLE_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_ple_quemgn_nxtpkt = { .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_PLE_DFI_DATA_MASK, .srt = 0x80050000, .end = 0x80050DBF, .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_PLE_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_ple_quemgn_qlnktbl = { .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_PLE_DFI_DATA_MASK, .srt = 0x80060000, .end = 0x80060041, .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_PLE_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_ple_quemgn_qempty = { .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_PLE_DFI_DATA_MASK, .srt = 0x80070000, .end = 0x80070001, .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_PLE_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_pktinfo = { .sel_addr = R_AX_DBG_FUN_INTF_CTL, .sel_byte = 4, .sel_msk = B_AX_DFI_DATA_MASK, .srt = 0x80000000, .end = 0x8000017f, .rd_addr = R_AX_DBG_FUN_INTF_DATA, .rd_byte = 4, .rd_msk = B_AX_DFI_DATA_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_pcie_txdma = { .sel_addr = R_AX_PCIE_DBG_CTRL, .sel_byte = 2, .sel_msk = B_AX_DBG_SEL_MASK, .srt = 0x00, .end = 0x03, .rd_addr = R_AX_DBG_PORT_SEL, .rd_byte = 4, .rd_msk = B_AX_DEBUG_ST_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_pcie_rxdma = { .sel_addr = R_AX_PCIE_DBG_CTRL, .sel_byte = 2, .sel_msk = B_AX_DBG_SEL_MASK, .srt = 0x00, .end = 0x04, .rd_addr = R_AX_DBG_PORT_SEL, .rd_byte = 4, .rd_msk = B_AX_DEBUG_ST_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_pcie_cvt = { .sel_addr = R_AX_PCIE_DBG_CTRL, .sel_byte = 2, .sel_msk = B_AX_DBG_SEL_MASK, .srt = 0x00, .end = 0x01, .rd_addr = R_AX_DBG_PORT_SEL, .rd_byte = 4, .rd_msk = B_AX_DEBUG_ST_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_pcie_cxpl = { .sel_addr = R_AX_PCIE_DBG_CTRL, .sel_byte = 2, .sel_msk = B_AX_DBG_SEL_MASK, .srt = 0x00, .end = 0x05, .rd_addr = R_AX_DBG_PORT_SEL, .rd_byte = 4, .rd_msk = B_AX_DEBUG_ST_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_pcie_io = { .sel_addr = R_AX_PCIE_DBG_CTRL, .sel_byte = 2, .sel_msk = B_AX_DBG_SEL_MASK, .srt = 0x00, .end = 0x05, .rd_addr = R_AX_DBG_PORT_SEL, .rd_byte = 4, .rd_msk = B_AX_DEBUG_ST_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_pcie_misc = { .sel_addr = R_AX_PCIE_DBG_CTRL, .sel_byte = 2, .sel_msk = B_AX_DBG_SEL_MASK, .srt = 0x00, .end = 0x06, .rd_addr = R_AX_DBG_PORT_SEL, .rd_byte = 4, .rd_msk = B_AX_DEBUG_ST_MASK }; static const struct rtw89_mac_dbg_port_info dbg_port_pcie_misc2 = { .sel_addr = R_AX_DBG_CTRL, .sel_byte = 1, .sel_msk = B_AX_DBG_SEL0, .srt = 0x34, .end = 0x3C, .rd_addr = R_AX_DBG_PORT_SEL, .rd_byte = 4, .rd_msk = B_AX_DEBUG_ST_MASK }; static const struct rtw89_mac_dbg_port_info * rtw89_debug_mac_dbg_port_sel(struct seq_file *m, struct rtw89_dev *rtwdev, u32 sel) { const struct rtw89_mac_dbg_port_info *info; u32 val32; u16 val16; u8 val8; switch (sel) { case RTW89_DBG_PORT_SEL_PTCL_C0: info = &dbg_port_ptcl_c0; val16 = rtw89_read16(rtwdev, R_AX_PTCL_DBG); val16 |= B_AX_PTCL_DBG_EN; rtw89_write16(rtwdev, R_AX_PTCL_DBG, val16); seq_puts(m, "Enable PTCL C0 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_PTCL_C1: info = &dbg_port_ptcl_c1; val16 = rtw89_read16(rtwdev, R_AX_PTCL_DBG_C1); val16 |= B_AX_PTCL_DBG_EN; rtw89_write16(rtwdev, R_AX_PTCL_DBG_C1, val16); seq_puts(m, "Enable PTCL C1 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_SCH_C0: info = &dbg_port_sch_c0; val32 = rtw89_read32(rtwdev, R_AX_SCH_DBG_SEL); val32 |= B_AX_SCH_DBG_EN; rtw89_write32(rtwdev, R_AX_SCH_DBG_SEL, val32); seq_puts(m, "Enable SCH C0 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_SCH_C1: info = &dbg_port_sch_c1; val32 = rtw89_read32(rtwdev, R_AX_SCH_DBG_SEL_C1); val32 |= B_AX_SCH_DBG_EN; rtw89_write32(rtwdev, R_AX_SCH_DBG_SEL_C1, val32); seq_puts(m, "Enable SCH C1 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_TMAC_C0: info = &dbg_port_tmac_c0; val32 = rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL); val32 = u32_replace_bits(val32, TRXPTRL_DBG_SEL_TMAC, B_AX_DBGSEL_TRXPTCL_MASK); rtw89_write32(rtwdev, R_AX_DBGSEL_TRXPTCL, val32); val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); val32 = u32_replace_bits(val32, TMAC_DBG_SEL_C0, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, TMAC_DBG_SEL_C0, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1); val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK); rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32); seq_puts(m, "Enable TMAC C0 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_TMAC_C1: info = &dbg_port_tmac_c1; val32 = rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL_C1); val32 = u32_replace_bits(val32, TRXPTRL_DBG_SEL_TMAC, B_AX_DBGSEL_TRXPTCL_MASK); rtw89_write32(rtwdev, R_AX_DBGSEL_TRXPTCL_C1, val32); val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); val32 = u32_replace_bits(val32, TMAC_DBG_SEL_C1, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, TMAC_DBG_SEL_C1, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1); val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK); rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32); seq_puts(m, "Enable TMAC C1 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_RMAC_C0: info = &dbg_port_rmac_c0; val32 = rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL); val32 = u32_replace_bits(val32, TRXPTRL_DBG_SEL_RMAC, B_AX_DBGSEL_TRXPTCL_MASK); rtw89_write32(rtwdev, R_AX_DBGSEL_TRXPTCL, val32); val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); val32 = u32_replace_bits(val32, RMAC_DBG_SEL_C0, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, RMAC_DBG_SEL_C0, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1); val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK); rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32); val8 = rtw89_read8(rtwdev, R_AX_DBGSEL_TRXPTCL); val8 = u8_replace_bits(val8, RMAC_CMAC_DBG_SEL, B_AX_DBGSEL_TRXPTCL_MASK); rtw89_write8(rtwdev, R_AX_DBGSEL_TRXPTCL, val8); seq_puts(m, "Enable RMAC C0 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_RMAC_C1: info = &dbg_port_rmac_c1; val32 = rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL_C1); val32 = u32_replace_bits(val32, TRXPTRL_DBG_SEL_RMAC, B_AX_DBGSEL_TRXPTCL_MASK); rtw89_write32(rtwdev, R_AX_DBGSEL_TRXPTCL_C1, val32); val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); val32 = u32_replace_bits(val32, RMAC_DBG_SEL_C1, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, RMAC_DBG_SEL_C1, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1); val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK); rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32); val8 = rtw89_read8(rtwdev, R_AX_DBGSEL_TRXPTCL_C1); val8 = u8_replace_bits(val8, RMAC_CMAC_DBG_SEL, B_AX_DBGSEL_TRXPTCL_MASK); rtw89_write8(rtwdev, R_AX_DBGSEL_TRXPTCL_C1, val8); seq_puts(m, "Enable RMAC C1 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_RMACST_C0: info = &dbg_port_rmacst_c0; seq_puts(m, "Enable RMAC state C0 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_RMACST_C1: info = &dbg_port_rmacst_c1; seq_puts(m, "Enable RMAC state C1 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_RMAC_PLCP_C0: info = &dbg_port_rmac_plcp_c0; seq_puts(m, "Enable RMAC PLCP C0 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_RMAC_PLCP_C1: info = &dbg_port_rmac_plcp_c1; seq_puts(m, "Enable RMAC PLCP C1 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_TRXPTCL_C0: info = &dbg_port_trxptcl_c0; val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); val32 = u32_replace_bits(val32, TRXPTCL_DBG_SEL_C0, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, TRXPTCL_DBG_SEL_C0, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1); val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK); rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32); seq_puts(m, "Enable TRXPTCL C0 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_TRXPTCL_C1: info = &dbg_port_trxptcl_c1; val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); val32 = u32_replace_bits(val32, TRXPTCL_DBG_SEL_C1, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, TRXPTCL_DBG_SEL_C1, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1); val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK); rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32); seq_puts(m, "Enable TRXPTCL C1 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_TX_INFOL_C0: info = &dbg_port_tx_infol_c0; val32 = rtw89_read32(rtwdev, R_AX_TCR1); val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; rtw89_write32(rtwdev, R_AX_TCR1, val32); seq_puts(m, "Enable tx infol dump.\n"); break; case RTW89_DBG_PORT_SEL_TX_INFOH_C0: info = &dbg_port_tx_infoh_c0; val32 = rtw89_read32(rtwdev, R_AX_TCR1); val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; rtw89_write32(rtwdev, R_AX_TCR1, val32); seq_puts(m, "Enable tx infoh dump.\n"); break; case RTW89_DBG_PORT_SEL_TX_INFOL_C1: info = &dbg_port_tx_infol_c1; val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1); val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; rtw89_write32(rtwdev, R_AX_TCR1_C1, val32); seq_puts(m, "Enable tx infol dump.\n"); break; case RTW89_DBG_PORT_SEL_TX_INFOH_C1: info = &dbg_port_tx_infoh_c1; val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1); val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; rtw89_write32(rtwdev, R_AX_TCR1_C1, val32); seq_puts(m, "Enable tx infoh dump.\n"); break; case RTW89_DBG_PORT_SEL_TXTF_INFOL_C0: info = &dbg_port_txtf_infol_c0; val32 = rtw89_read32(rtwdev, R_AX_TCR1); val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; rtw89_write32(rtwdev, R_AX_TCR1, val32); seq_puts(m, "Enable tx tf infol dump.\n"); break; case RTW89_DBG_PORT_SEL_TXTF_INFOH_C0: info = &dbg_port_txtf_infoh_c0; val32 = rtw89_read32(rtwdev, R_AX_TCR1); val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; rtw89_write32(rtwdev, R_AX_TCR1, val32); seq_puts(m, "Enable tx tf infoh dump.\n"); break; case RTW89_DBG_PORT_SEL_TXTF_INFOL_C1: info = &dbg_port_txtf_infol_c1; val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1); val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; rtw89_write32(rtwdev, R_AX_TCR1_C1, val32); seq_puts(m, "Enable tx tf infol dump.\n"); break; case RTW89_DBG_PORT_SEL_TXTF_INFOH_C1: info = &dbg_port_txtf_infoh_c1; val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1); val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; rtw89_write32(rtwdev, R_AX_TCR1_C1, val32); seq_puts(m, "Enable tx tf infoh dump.\n"); break; case RTW89_DBG_PORT_SEL_WDE_BUFMGN_FREEPG: info = &dbg_port_wde_bufmgn_freepg; seq_puts(m, "Enable wde bufmgn freepg dump.\n"); break; case RTW89_DBG_PORT_SEL_WDE_BUFMGN_QUOTA: info = &dbg_port_wde_bufmgn_quota; seq_puts(m, "Enable wde bufmgn quota dump.\n"); break; case RTW89_DBG_PORT_SEL_WDE_BUFMGN_PAGELLT: info = &dbg_port_wde_bufmgn_pagellt; seq_puts(m, "Enable wde bufmgn pagellt dump.\n"); break; case RTW89_DBG_PORT_SEL_WDE_BUFMGN_PKTINFO: info = &dbg_port_wde_bufmgn_pktinfo; seq_puts(m, "Enable wde bufmgn pktinfo dump.\n"); break; case RTW89_DBG_PORT_SEL_WDE_QUEMGN_PREPKT: info = &dbg_port_wde_quemgn_prepkt; seq_puts(m, "Enable wde quemgn prepkt dump.\n"); break; case RTW89_DBG_PORT_SEL_WDE_QUEMGN_NXTPKT: info = &dbg_port_wde_quemgn_nxtpkt; seq_puts(m, "Enable wde quemgn nxtpkt dump.\n"); break; case RTW89_DBG_PORT_SEL_WDE_QUEMGN_QLNKTBL: info = &dbg_port_wde_quemgn_qlnktbl; seq_puts(m, "Enable wde quemgn qlnktbl dump.\n"); break; case RTW89_DBG_PORT_SEL_WDE_QUEMGN_QEMPTY: info = &dbg_port_wde_quemgn_qempty; seq_puts(m, "Enable wde quemgn qempty dump.\n"); break; case RTW89_DBG_PORT_SEL_PLE_BUFMGN_FREEPG: info = &dbg_port_ple_bufmgn_freepg; seq_puts(m, "Enable ple bufmgn freepg dump.\n"); break; case RTW89_DBG_PORT_SEL_PLE_BUFMGN_QUOTA: info = &dbg_port_ple_bufmgn_quota; seq_puts(m, "Enable ple bufmgn quota dump.\n"); break; case RTW89_DBG_PORT_SEL_PLE_BUFMGN_PAGELLT: info = &dbg_port_ple_bufmgn_pagellt; seq_puts(m, "Enable ple bufmgn pagellt dump.\n"); break; case RTW89_DBG_PORT_SEL_PLE_BUFMGN_PKTINFO: info = &dbg_port_ple_bufmgn_pktinfo; seq_puts(m, "Enable ple bufmgn pktinfo dump.\n"); break; case RTW89_DBG_PORT_SEL_PLE_QUEMGN_PREPKT: info = &dbg_port_ple_quemgn_prepkt; seq_puts(m, "Enable ple quemgn prepkt dump.\n"); break; case RTW89_DBG_PORT_SEL_PLE_QUEMGN_NXTPKT: info = &dbg_port_ple_quemgn_nxtpkt; seq_puts(m, "Enable ple quemgn nxtpkt dump.\n"); break; case RTW89_DBG_PORT_SEL_PLE_QUEMGN_QLNKTBL: info = &dbg_port_ple_quemgn_qlnktbl; seq_puts(m, "Enable ple quemgn qlnktbl dump.\n"); break; case RTW89_DBG_PORT_SEL_PLE_QUEMGN_QEMPTY: info = &dbg_port_ple_quemgn_qempty; seq_puts(m, "Enable ple quemgn qempty dump.\n"); break; case RTW89_DBG_PORT_SEL_PKTINFO: info = &dbg_port_pktinfo; seq_puts(m, "Enable pktinfo dump.\n"); break; case RTW89_DBG_PORT_SEL_PCIE_TXDMA: info = &dbg_port_pcie_txdma; val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); val32 = u32_replace_bits(val32, PCIE_TXDMA_DBG_SEL, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, PCIE_TXDMA_DBG_SEL, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); seq_puts(m, "Enable pcie txdma dump.\n"); break; case RTW89_DBG_PORT_SEL_PCIE_RXDMA: info = &dbg_port_pcie_rxdma; val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); val32 = u32_replace_bits(val32, PCIE_RXDMA_DBG_SEL, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, PCIE_RXDMA_DBG_SEL, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); seq_puts(m, "Enable pcie rxdma dump.\n"); break; case RTW89_DBG_PORT_SEL_PCIE_CVT: info = &dbg_port_pcie_cvt; val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); val32 = u32_replace_bits(val32, PCIE_CVT_DBG_SEL, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, PCIE_CVT_DBG_SEL, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); seq_puts(m, "Enable pcie cvt dump.\n"); break; case RTW89_DBG_PORT_SEL_PCIE_CXPL: info = &dbg_port_pcie_cxpl; val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); val32 = u32_replace_bits(val32, PCIE_CXPL_DBG_SEL, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, PCIE_CXPL_DBG_SEL, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); seq_puts(m, "Enable pcie cxpl dump.\n"); break; case RTW89_DBG_PORT_SEL_PCIE_IO: info = &dbg_port_pcie_io; val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); val32 = u32_replace_bits(val32, PCIE_IO_DBG_SEL, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, PCIE_IO_DBG_SEL, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); seq_puts(m, "Enable pcie io dump.\n"); break; case RTW89_DBG_PORT_SEL_PCIE_MISC: info = &dbg_port_pcie_misc; val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL); val32 = u32_replace_bits(val32, PCIE_MISC_DBG_SEL, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, PCIE_MISC_DBG_SEL, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); seq_puts(m, "Enable pcie misc dump.\n"); break; case RTW89_DBG_PORT_SEL_PCIE_MISC2: info = &dbg_port_pcie_misc2; val16 = rtw89_read16(rtwdev, R_AX_PCIE_DBG_CTRL); val16 = u16_replace_bits(val16, PCIE_MISC2_DBG_SEL, B_AX_DBG_SEL_MASK); rtw89_write16(rtwdev, R_AX_PCIE_DBG_CTRL, val16); seq_puts(m, "Enable pcie misc2 dump.\n"); break; default: seq_puts(m, "Dbg port select err\n"); return NULL; } return info; } static bool is_dbg_port_valid(struct rtw89_dev *rtwdev, u32 sel) { if (rtwdev->hci.type != RTW89_HCI_TYPE_PCIE && sel >= RTW89_DBG_PORT_SEL_PCIE_TXDMA && sel <= RTW89_DBG_PORT_SEL_PCIE_MISC2) return false; if (rtwdev->chip->chip_id == RTL8852B && sel >= RTW89_DBG_PORT_SEL_PTCL_C1 && sel <= RTW89_DBG_PORT_SEL_TXTF_INFOH_C1) return false; if (rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL) && sel >= RTW89_DBG_PORT_SEL_WDE_BUFMGN_FREEPG && sel <= RTW89_DBG_PORT_SEL_PKTINFO) return false; if (rtw89_mac_check_mac_en(rtwdev, 0, RTW89_CMAC_SEL) && sel >= RTW89_DBG_PORT_SEL_PTCL_C0 && sel <= RTW89_DBG_PORT_SEL_TXTF_INFOH_C0) return false; if (rtw89_mac_check_mac_en(rtwdev, 1, RTW89_CMAC_SEL) && sel >= RTW89_DBG_PORT_SEL_PTCL_C1 && sel <= RTW89_DBG_PORT_SEL_TXTF_INFOH_C1) return false; return true; } static int rtw89_debug_mac_dbg_port_dump(struct rtw89_dev *rtwdev, struct seq_file *m, u32 sel) { const struct rtw89_mac_dbg_port_info *info; u8 val8; u16 val16; u32 val32; u32 i; info = rtw89_debug_mac_dbg_port_sel(m, rtwdev, sel); if (!info) { rtw89_err(rtwdev, "failed to select debug port %d\n", sel); return -EINVAL; } #define case_DBG_SEL(__sel) \ case RTW89_DBG_PORT_SEL_##__sel: \ seq_puts(m, "Dump debug port " #__sel ":\n"); \ break switch (sel) { case_DBG_SEL(PTCL_C0); case_DBG_SEL(PTCL_C1); case_DBG_SEL(SCH_C0); case_DBG_SEL(SCH_C1); case_DBG_SEL(TMAC_C0); case_DBG_SEL(TMAC_C1); case_DBG_SEL(RMAC_C0); case_DBG_SEL(RMAC_C1); case_DBG_SEL(RMACST_C0); case_DBG_SEL(RMACST_C1); case_DBG_SEL(TRXPTCL_C0); case_DBG_SEL(TRXPTCL_C1); case_DBG_SEL(TX_INFOL_C0); case_DBG_SEL(TX_INFOH_C0); case_DBG_SEL(TX_INFOL_C1); case_DBG_SEL(TX_INFOH_C1); case_DBG_SEL(TXTF_INFOL_C0); case_DBG_SEL(TXTF_INFOH_C0); case_DBG_SEL(TXTF_INFOL_C1); case_DBG_SEL(TXTF_INFOH_C1); case_DBG_SEL(WDE_BUFMGN_FREEPG); case_DBG_SEL(WDE_BUFMGN_QUOTA); case_DBG_SEL(WDE_BUFMGN_PAGELLT); case_DBG_SEL(WDE_BUFMGN_PKTINFO); case_DBG_SEL(WDE_QUEMGN_PREPKT); case_DBG_SEL(WDE_QUEMGN_NXTPKT); case_DBG_SEL(WDE_QUEMGN_QLNKTBL); case_DBG_SEL(WDE_QUEMGN_QEMPTY); case_DBG_SEL(PLE_BUFMGN_FREEPG); case_DBG_SEL(PLE_BUFMGN_QUOTA); case_DBG_SEL(PLE_BUFMGN_PAGELLT); case_DBG_SEL(PLE_BUFMGN_PKTINFO); case_DBG_SEL(PLE_QUEMGN_PREPKT); case_DBG_SEL(PLE_QUEMGN_NXTPKT); case_DBG_SEL(PLE_QUEMGN_QLNKTBL); case_DBG_SEL(PLE_QUEMGN_QEMPTY); case_DBG_SEL(PKTINFO); case_DBG_SEL(PCIE_TXDMA); case_DBG_SEL(PCIE_RXDMA); case_DBG_SEL(PCIE_CVT); case_DBG_SEL(PCIE_CXPL); case_DBG_SEL(PCIE_IO); case_DBG_SEL(PCIE_MISC); case_DBG_SEL(PCIE_MISC2); } #undef case_DBG_SEL seq_printf(m, "Sel addr = 0x%X\n", info->sel_addr); seq_printf(m, "Read addr = 0x%X\n", info->rd_addr); for (i = info->srt; i <= info->end; i++) { switch (info->sel_byte) { case 1: default: rtw89_write8_mask(rtwdev, info->sel_addr, info->sel_msk, i); seq_printf(m, "0x%02X: ", i); break; case 2: rtw89_write16_mask(rtwdev, info->sel_addr, info->sel_msk, i); seq_printf(m, "0x%04X: ", i); break; case 4: rtw89_write32_mask(rtwdev, info->sel_addr, info->sel_msk, i); seq_printf(m, "0x%04X: ", i); break; } udelay(10); switch (info->rd_byte) { case 1: default: val8 = rtw89_read8_mask(rtwdev, info->rd_addr, info->rd_msk); seq_printf(m, "0x%02X\n", val8); break; case 2: val16 = rtw89_read16_mask(rtwdev, info->rd_addr, info->rd_msk); seq_printf(m, "0x%04X\n", val16); break; case 4: val32 = rtw89_read32_mask(rtwdev, info->rd_addr, info->rd_msk); seq_printf(m, "0x%08X\n", val32); break; } } return 0; } static int rtw89_debug_mac_dump_dbg_port(struct rtw89_dev *rtwdev, struct seq_file *m) { u32 sel; int ret = 0; for (sel = RTW89_DBG_PORT_SEL_PTCL_C0; sel < RTW89_DBG_PORT_SEL_LAST; sel++) { if (!is_dbg_port_valid(rtwdev, sel)) continue; ret = rtw89_debug_mac_dbg_port_dump(rtwdev, m, sel); if (ret) { rtw89_err(rtwdev, "failed to dump debug port %d\n", sel); break; } } return ret; } static int rtw89_debug_priv_mac_dbg_port_dump_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; if (debugfs_priv->dbgpkg_en.ss_dbg) rtw89_debug_mac_dump_ss_dbg(rtwdev, m); if (debugfs_priv->dbgpkg_en.dle_dbg) rtw89_debug_mac_dump_dle_dbg(rtwdev, m); if (debugfs_priv->dbgpkg_en.dmac_dbg) rtw89_debug_mac_dump_dmac_dbg(rtwdev, m); if (debugfs_priv->dbgpkg_en.cmac_dbg) rtw89_debug_mac_dump_cmac_dbg(rtwdev, m); if (debugfs_priv->dbgpkg_en.dbg_port) rtw89_debug_mac_dump_dbg_port(rtwdev, m); return 0; }; static u8 *rtw89_hex2bin_user(struct rtw89_dev *rtwdev, const char __user *user_buf, size_t count) { char *buf; u8 *bin; int num; int err = 0; buf = memdup_user(user_buf, count); if (IS_ERR(buf)) return buf; num = count / 2; bin = kmalloc(num, GFP_KERNEL); if (!bin) { err = -EFAULT; goto out; } if (hex2bin(bin, buf, num)) { rtw89_info(rtwdev, "valid format: H1H2H3...\n"); kfree(bin); err = -EINVAL; } out: kfree(buf); return err ? ERR_PTR(err) : bin; } static ssize_t rtw89_debug_priv_send_h2c_set(struct file *filp, const char __user *user_buf, size_t count, loff_t *loff) { struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; u8 *h2c; u16 h2c_len = count / 2; h2c = rtw89_hex2bin_user(rtwdev, user_buf, count); if (IS_ERR(h2c)) return -EFAULT; rtw89_fw_h2c_raw(rtwdev, h2c, h2c_len); kfree(h2c); return count; } static int rtw89_debug_priv_early_h2c_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; struct rtw89_early_h2c *early_h2c; int seq = 0; mutex_lock(&rtwdev->mutex); list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) seq_printf(m, "%d: %*ph\n", ++seq, early_h2c->h2c_len, early_h2c->h2c); mutex_unlock(&rtwdev->mutex); return 0; } static ssize_t rtw89_debug_priv_early_h2c_set(struct file *filp, const char __user *user_buf, size_t count, loff_t *loff) { struct seq_file *m = (struct seq_file *)filp->private_data; struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; struct rtw89_early_h2c *early_h2c; u8 *h2c; u16 h2c_len = count / 2; h2c = rtw89_hex2bin_user(rtwdev, user_buf, count); if (IS_ERR(h2c)) return -EFAULT; if (h2c_len >= 2 && h2c[0] == 0x00 && h2c[1] == 0x00) { kfree(h2c); rtw89_fw_free_all_early_h2c(rtwdev); goto out; } early_h2c = kmalloc(sizeof(*early_h2c), GFP_KERNEL); if (!early_h2c) { kfree(h2c); return -EFAULT; } early_h2c->h2c = h2c; early_h2c->h2c_len = h2c_len; mutex_lock(&rtwdev->mutex); list_add_tail(&early_h2c->list, &rtwdev->early_h2c_list); mutex_unlock(&rtwdev->mutex); out: return count; } static int rtw89_debug_priv_fw_crash_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; seq_printf(m, "%d\n", test_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags)); return 0; } static ssize_t rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf, size_t count, loff_t *loff) { struct seq_file *m = (struct seq_file *)filp->private_data; struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; bool fw_crash; int ret; if (!RTW89_CHK_FW_FEATURE(CRASH_TRIGGER, &rtwdev->fw)) return -EOPNOTSUPP; ret = kstrtobool_from_user(user_buf, count, &fw_crash); if (ret) return -EINVAL; if (!fw_crash) return -EINVAL; mutex_lock(&rtwdev->mutex); set_bit(RTW89_FLAG_RESTART_TRIGGER, rtwdev->flags); ret = rtw89_fw_h2c_trigger_cpu_exception(rtwdev); mutex_unlock(&rtwdev->mutex); if (ret) return ret; return count; } static int rtw89_debug_priv_btc_info_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; rtw89_btc_dump_info(rtwdev, m); return 0; } static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp, const char __user *user_buf, size_t count, loff_t *loff) { struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; struct rtw89_btc *btc = &rtwdev->btc; bool btc_manual; if (kstrtobool_from_user(user_buf, count, &btc_manual)) goto out; btc->ctrl.manual = btc_manual; out: return count; } static ssize_t rtw89_debug_fw_log_btc_manual_set(struct file *filp, const char __user *user_buf, size_t count, loff_t *loff) { struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; struct rtw89_fw_info *fw_info = &rtwdev->fw; bool fw_log_manual; if (kstrtobool_from_user(user_buf, count, &fw_log_manual)) goto out; mutex_lock(&rtwdev->mutex); fw_info->fw_log_enable = fw_log_manual; rtw89_fw_h2c_fw_log(rtwdev, fw_log_manual); mutex_unlock(&rtwdev->mutex); out: return count; } static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta) { static const char * const he_gi_str[] = { [NL80211_RATE_INFO_HE_GI_0_8] = "0.8", [NL80211_RATE_INFO_HE_GI_1_6] = "1.6", [NL80211_RATE_INFO_HE_GI_3_2] = "3.2", }; struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; struct rate_info *rate = &rtwsta->ra_report.txrate; struct ieee80211_rx_status *status = &rtwsta->rx_status; struct seq_file *m = (struct seq_file *)data; u8 rssi; seq_printf(m, "TX rate [%d]: ", rtwsta->mac_id); if (rate->flags & RATE_INFO_FLAGS_MCS) seq_printf(m, "HT MCS-%d%s", rate->mcs, rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : ""); else if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) seq_printf(m, "VHT %dSS MCS-%d%s", rate->nss, rate->mcs, rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : ""); else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) seq_printf(m, "HE %dSS MCS-%d GI:%s", rate->nss, rate->mcs, rate->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ? he_gi_str[rate->he_gi] : "N/A"); else seq_printf(m, "Legacy %d", rate->legacy); seq_printf(m, "%s", rtwsta->ra_report.might_fallback_legacy ? " FB_G" : ""); seq_printf(m, "\t(hw_rate=0x%x)", rtwsta->ra_report.hw_rate); seq_printf(m, "\t==> agg_wait=%d (%d)\n", rtwsta->max_agg_wait, sta->max_rc_amsdu_len); seq_printf(m, "RX rate [%d]: ", rtwsta->mac_id); switch (status->encoding) { case RX_ENC_LEGACY: seq_printf(m, "Legacy %d", status->rate_idx + (status->band != NL80211_BAND_2GHZ ? 4 : 0)); break; case RX_ENC_HT: seq_printf(m, "HT MCS-%d%s", status->rate_idx, status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : ""); break; case RX_ENC_VHT: seq_printf(m, "VHT %dSS MCS-%d%s", status->nss, status->rate_idx, status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : ""); break; case RX_ENC_HE: seq_printf(m, "HE %dSS MCS-%d GI:%s", status->nss, status->rate_idx, status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ? he_gi_str[rate->he_gi] : "N/A"); break; } seq_printf(m, "\t(hw_rate=0x%x)\n", rtwsta->rx_hw_rate); rssi = ewma_rssi_read(&rtwsta->avg_rssi); seq_printf(m, "RSSI: %d dBm (raw=%d, prev=%d)\n", RTW89_RSSI_RAW_TO_DBM(rssi), rssi, rtwsta->prev_rssi); } static void rtw89_debug_append_rx_rate(struct seq_file *m, struct rtw89_pkt_stat *pkt_stat, enum rtw89_hw_rate first_rate, int len) { int i; for (i = 0; i < len; i++) seq_printf(m, "%s%u", i == 0 ? "" : ", ", pkt_stat->rx_rate_cnt[first_rate + i]); } static const struct rtw89_rx_rate_cnt_info { enum rtw89_hw_rate first_rate; int len; int ext; const char *rate_mode; } rtw89_rx_rate_cnt_infos[] = { {RTW89_HW_RATE_CCK1, 4, 0, "Legacy:"}, {RTW89_HW_RATE_OFDM6, 8, 0, "OFDM:"}, {RTW89_HW_RATE_MCS0, 8, 0, "HT 0:"}, {RTW89_HW_RATE_MCS8, 8, 0, "HT 1:"}, {RTW89_HW_RATE_VHT_NSS1_MCS0, 10, 2, "VHT 1SS:"}, {RTW89_HW_RATE_VHT_NSS2_MCS0, 10, 2, "VHT 2SS:"}, {RTW89_HW_RATE_HE_NSS1_MCS0, 12, 0, "HE 1SS:"}, {RTW89_HW_RATE_HE_NSS2_MCS0, 12, 0, "HE 2ss:"}, }; static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; struct rtw89_traffic_stats *stats = &rtwdev->stats; struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.last_pkt_stat; const struct rtw89_rx_rate_cnt_info *info; int i; seq_printf(m, "TP TX: %u [%u] Mbps (lv: %d), RX: %u [%u] Mbps (lv: %d)\n", stats->tx_throughput, stats->tx_throughput_raw, stats->tx_tfc_lv, stats->rx_throughput, stats->rx_throughput_raw, stats->rx_tfc_lv); seq_printf(m, "Beacon: %u, TF: %u\n", pkt_stat->beacon_nr, stats->rx_tf_periodic); seq_printf(m, "Avg packet length: TX=%u, RX=%u\n", stats->tx_avg_len, stats->rx_avg_len); seq_puts(m, "RX count:\n"); for (i = 0; i < ARRAY_SIZE(rtw89_rx_rate_cnt_infos); i++) { info = &rtw89_rx_rate_cnt_infos[i]; seq_printf(m, "%10s [", info->rate_mode); rtw89_debug_append_rx_rate(m, pkt_stat, info->first_rate, info->len); if (info->ext) { seq_puts(m, "]["); rtw89_debug_append_rx_rate(m, pkt_stat, info->first_rate + info->len, info->ext); } seq_puts(m, "]\n"); } ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_info_get_iter, m); return 0; } static void rtw89_dump_addr_cam(struct seq_file *m, struct rtw89_addr_cam_entry *addr_cam) { struct rtw89_sec_cam_entry *sec_entry; int i; seq_printf(m, "\taddr_cam_idx=%u\n", addr_cam->addr_cam_idx); seq_printf(m, "\t-> bssid_cam_idx=%u\n", addr_cam->bssid_cam_idx); seq_printf(m, "\tsec_cam_bitmap=%*ph\n", (int)sizeof(addr_cam->sec_cam_map), addr_cam->sec_cam_map); for (i = 0; i < RTW89_SEC_CAM_IN_ADDR_CAM; i++) { sec_entry = addr_cam->sec_entries[i]; if (!sec_entry) continue; seq_printf(m, "\tsec[%d]: sec_cam_idx %u", i, sec_entry->sec_cam_idx); if (sec_entry->ext_key) seq_printf(m, ", %u", sec_entry->sec_cam_idx + 1); seq_puts(m, "\n"); } } static void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; struct seq_file *m = (struct seq_file *)data; struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam; seq_printf(m, "VIF [%d] %pM\n", rtwvif->mac_id, rtwvif->mac_addr); seq_printf(m, "\tbssid_cam_idx=%u\n", bssid_cam->bssid_cam_idx); rtw89_dump_addr_cam(m, &rtwvif->addr_cam); } static void rtw89_dump_ba_cam(struct seq_file *m, struct rtw89_sta *rtwsta) { struct rtw89_vif *rtwvif = rtwsta->rtwvif; struct rtw89_dev *rtwdev = rtwvif->rtwdev; struct rtw89_ba_cam_entry *entry; bool first = true; list_for_each_entry(entry, &rtwsta->ba_cam_list, list) { if (first) { seq_puts(m, "\tba_cam "); first = false; } else { seq_puts(m, ", "); } seq_printf(m, "tid[%u]=%d", entry->tid, (int)(entry - rtwdev->cam_info.ba_cam_entry)); } seq_puts(m, "\n"); } static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta) { struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; struct seq_file *m = (struct seq_file *)data; seq_printf(m, "STA [%d] %pM %s\n", rtwsta->mac_id, sta->addr, sta->tdls ? "(TDLS)" : ""); rtw89_dump_addr_cam(m, &rtwsta->addr_cam); rtw89_dump_ba_cam(m, rtwsta); } static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; struct rtw89_cam_info *cam_info = &rtwdev->cam_info; mutex_lock(&rtwdev->mutex); seq_puts(m, "map:\n"); seq_printf(m, "\tmac_id: %*ph\n", (int)sizeof(rtwdev->mac_id_map), rtwdev->mac_id_map); seq_printf(m, "\taddr_cam: %*ph\n", (int)sizeof(cam_info->addr_cam_map), cam_info->addr_cam_map); seq_printf(m, "\tbssid_cam: %*ph\n", (int)sizeof(cam_info->bssid_cam_map), cam_info->bssid_cam_map); seq_printf(m, "\tsec_cam: %*ph\n", (int)sizeof(cam_info->sec_cam_map), cam_info->sec_cam_map); seq_printf(m, "\tba_cam: %*ph\n", (int)sizeof(cam_info->ba_cam_map), cam_info->ba_cam_map); ieee80211_iterate_active_interfaces_atomic(rtwdev->hw, IEEE80211_IFACE_ITER_NORMAL, rtw89_vif_ids_get_iter, m); ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_ids_get_iter, m); mutex_unlock(&rtwdev->mutex); return 0; } static struct rtw89_debugfs_priv rtw89_debug_priv_read_reg = { .cb_read = rtw89_debug_priv_read_reg_get, .cb_write = rtw89_debug_priv_read_reg_select, }; static struct rtw89_debugfs_priv rtw89_debug_priv_write_reg = { .cb_write = rtw89_debug_priv_write_reg_set, }; static struct rtw89_debugfs_priv rtw89_debug_priv_read_rf = { .cb_read = rtw89_debug_priv_read_rf_get, .cb_write = rtw89_debug_priv_read_rf_select, }; static struct rtw89_debugfs_priv rtw89_debug_priv_write_rf = { .cb_write = rtw89_debug_priv_write_rf_set, }; static struct rtw89_debugfs_priv rtw89_debug_priv_rf_reg_dump = { .cb_read = rtw89_debug_priv_rf_reg_dump_get, }; static struct rtw89_debugfs_priv rtw89_debug_priv_txpwr_table = { .cb_read = rtw89_debug_priv_txpwr_table_get, }; static struct rtw89_debugfs_priv rtw89_debug_priv_mac_reg_dump = { .cb_read = rtw89_debug_priv_mac_reg_dump_get, .cb_write = rtw89_debug_priv_mac_reg_dump_select, }; static struct rtw89_debugfs_priv rtw89_debug_priv_mac_mem_dump = { .cb_read = rtw89_debug_priv_mac_mem_dump_get, .cb_write = rtw89_debug_priv_mac_mem_dump_select, }; static struct rtw89_debugfs_priv rtw89_debug_priv_mac_dbg_port_dump = { .cb_read = rtw89_debug_priv_mac_dbg_port_dump_get, .cb_write = rtw89_debug_priv_mac_dbg_port_dump_select, }; static struct rtw89_debugfs_priv rtw89_debug_priv_send_h2c = { .cb_write = rtw89_debug_priv_send_h2c_set, }; static struct rtw89_debugfs_priv rtw89_debug_priv_early_h2c = { .cb_read = rtw89_debug_priv_early_h2c_get, .cb_write = rtw89_debug_priv_early_h2c_set, }; static struct rtw89_debugfs_priv rtw89_debug_priv_fw_crash = { .cb_read = rtw89_debug_priv_fw_crash_get, .cb_write = rtw89_debug_priv_fw_crash_set, }; static struct rtw89_debugfs_priv rtw89_debug_priv_btc_info = { .cb_read = rtw89_debug_priv_btc_info_get, }; static struct rtw89_debugfs_priv rtw89_debug_priv_btc_manual = { .cb_write = rtw89_debug_priv_btc_manual_set, }; static struct rtw89_debugfs_priv rtw89_debug_priv_fw_log_manual = { .cb_write = rtw89_debug_fw_log_btc_manual_set, }; static struct rtw89_debugfs_priv rtw89_debug_priv_phy_info = { .cb_read = rtw89_debug_priv_phy_info_get, }; static struct rtw89_debugfs_priv rtw89_debug_priv_stations = { .cb_read = rtw89_debug_priv_stations_get, }; #define rtw89_debugfs_add(name, mode, fopname, parent) \ do { \ rtw89_debug_priv_ ##name.rtwdev = rtwdev; \ if (!debugfs_create_file(#name, mode, \ parent, &rtw89_debug_priv_ ##name, \ &file_ops_ ##fopname)) \ pr_debug("Unable to initialize debugfs:%s\n", #name); \ } while (0) #define rtw89_debugfs_add_w(name) \ rtw89_debugfs_add(name, S_IFREG | 0222, single_w, debugfs_topdir) #define rtw89_debugfs_add_rw(name) \ rtw89_debugfs_add(name, S_IFREG | 0666, common_rw, debugfs_topdir) #define rtw89_debugfs_add_r(name) \ rtw89_debugfs_add(name, S_IFREG | 0444, single_r, debugfs_topdir) void rtw89_debugfs_init(struct rtw89_dev *rtwdev) { struct dentry *debugfs_topdir; debugfs_topdir = debugfs_create_dir("rtw89", rtwdev->hw->wiphy->debugfsdir); rtw89_debugfs_add_rw(read_reg); rtw89_debugfs_add_w(write_reg); rtw89_debugfs_add_rw(read_rf); rtw89_debugfs_add_w(write_rf); rtw89_debugfs_add_r(rf_reg_dump); rtw89_debugfs_add_r(txpwr_table); rtw89_debugfs_add_rw(mac_reg_dump); rtw89_debugfs_add_rw(mac_mem_dump); rtw89_debugfs_add_rw(mac_dbg_port_dump); rtw89_debugfs_add_w(send_h2c); rtw89_debugfs_add_rw(early_h2c); rtw89_debugfs_add_rw(fw_crash); rtw89_debugfs_add_r(btc_info); rtw89_debugfs_add_w(btc_manual); rtw89_debugfs_add_w(fw_log_manual); rtw89_debugfs_add_r(phy_info); rtw89_debugfs_add_r(stations); } #endif #ifdef CONFIG_RTW89_DEBUGMSG void __rtw89_debug(struct rtw89_dev *rtwdev, enum rtw89_debug_mask mask, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; if (rtw89_debug_mask & mask) #if defined(__linux__) dev_printk(KERN_DEBUG, rtwdev->dev, "%pV", &vaf); #elif defined(__FreeBSD__) { char *str; - vasprintf(&str, M_KMALLOC, fmt, args); + vasprintf(&str, M_KMALLOC, vaf.fmt, args); dev_printk(KERN_DEBUG, rtwdev->dev, "%s", str); free(str, M_KMALLOC); } #endif va_end(args); } EXPORT_SYMBOL(__rtw89_debug); #endif diff --git a/sys/modules/iwlwifi/Makefile b/sys/modules/iwlwifi/Makefile index 39e60cb452af..831fe97847a5 100644 --- a/sys/modules/iwlwifi/Makefile +++ b/sys/modules/iwlwifi/Makefile @@ -1,72 +1,66 @@ # $FreeBSD$ DEVIWLWIFIDIR= ${SRCTOP}/sys/contrib/dev/iwlwifi .PATH: ${DEVIWLWIFIDIR} WITH_DEBUGFS= 0 KMOD= if_iwlwifi SRCS= iwl-drv.c SRCS+= iwl-dbg-tlv.c iwl-debug.c SRCS+= iwl-eeprom-parse.c iwl-eeprom-read.c SRCS+= iwl-io.c iwl-nvm-parse.c iwl-phy-db.c iwl-trans.c SRCS+= cfg/7000.c cfg/8000.c cfg/9000.c cfg/22000.c SRCS+= fw/dbg.c fw/dump.c fw/img.c fw/notif-wait.c SRCS+= fw/paging.c fw/pnvm.c fw/rs.c fw/smem.c fw/init.c #SRCS+= fw/uefi.c SRCS+= mvm/rs.c mvm/binding.c mvm/coex.c mvm/ftm-initiator.c SRCS+= mvm/ftm-responder.c mvm/fw.c mvm/mac-ctxt.c SRCS+= mvm/mac80211.c mvm/nvm.c mvm/offloading.c mvm/ops.c SRCS+= mvm/phy-ctxt.c mvm/power.c mvm/quota.c mvm/rs-fw.c mvm/rfi.c SRCS+= mvm/rx.c mvm/rxmq.c mvm/scan.c mvm/sf.c mvm/sta.c mvm/tdls.c SRCS+= mvm/time-event.c mvm/tt.c mvm/tx.c mvm/utils.c #SRCS+= mvm/led.c SRCS+= pcie/ctxt-info-gen3.c pcie/ctxt-info.c SRCS+= pcie/drv.c pcie/rx.c pcie/trans-gen2.c pcie/trans.c SRCS+= pcie/tx-gen2.c pcie/tx.c SRCS+= queue/tx.c .if defined(WITH_DEBUGFS) && ${WITH_DEBUGFS} > 0 SRCS+= fw/debugfs.c mvm/debugfs.c mvm/debugfs-vif.c CFLAGS+= -DCONFIG_IWLWIFI_DEBUGFS=${WITH_DEBUGFS} CFLAGS+= -DCONFIG_MAC80211_DEBUGFS=${WITH_DEBUGFS} .endif SRCS+= iwl-devtrace.c # Other SRCS+= ${LINUXKPI_GENSRCS} SRCS+= opt_wlan.h opt_inet6.h opt_inet.h opt_acpi.h CFLAGS+= -DKBUILD_MODNAME='"iwlwifi"' CFLAGS+= -I${DEVIWLWIFIDIR} CFLAGS+= ${LINUXKPI_INCLUDES} CFLAGS+= -DCONFIG_IWLDVM=0 CFLAGS+= -DCONFIG_IWLMVM=1 # Helpful after fresh imports. #CFLAGS+= -ferror-limit=0 #CFLAGS+= -DCONFIG_ACPI=1 #CFLAGS+= -DCONFIG_INET=1 # Need LKPI TSO implementation. #CFLAGS+= -DCONFIG_IPV6=1 CFLAGS+= -DCONFIG_IWLWIFI_DEBUG=1 #CFLAGS+= -DCONFIG_IWLWIFI_LEDS=1 #CFLAGS+= -DCONFIG_IWLWIFI_OPMODE_MODULAR=1 CFLAGS+= -DCONFIG_IWLWIFI_DEVICE_TRACING=1 #CFLAGS+= -DCONFIG_LOCKDEP=1 #CFLAGS+= -DCONFIG_NL80211_TESTMODE=1 #CFLAGS+= -DCONFIG_PM=1 #CFLAGS+= -DCONFIG_PM_SLEEP=1 #CFLAGS+= -DCONFIG_THERMAL=1 #CFLAGS+= -DCONFIG_EFI=1 -# GCC warns about NULL format strings passed to iwl_fw_dbg_collect_trig -CWARNFLAGS.gcc+= -Wno-format - -# GCC warns about set but unused vaf variables -CWARNFLAGS.iwl-debug.c+= ${NO_WUNUSED_BUT_SET_VARIABLE} - .include diff --git a/sys/modules/rtw88/Makefile b/sys/modules/rtw88/Makefile index 930438a04f2c..2df00358821c 100644 --- a/sys/modules/rtw88/Makefile +++ b/sys/modules/rtw88/Makefile @@ -1,46 +1,43 @@ # $FreeBSD$ DEVRTW88DIR= ${SRCTOP}/sys/contrib/dev/rtw88 .PATH: ${DEVRTW88DIR} WITH_CONFIG_PM= 0 KMOD= if_rtw88 # Core parts. SRCS= main.c SRCS+= bf.c coex.c debug.c efuse.c fw.c mac.c mac80211.c SRCS+= phy.c ps.c regd.c SRCS+= rx.c sar.c sec.c tx.c util.c .if defined(WITH_CONFIG_PM) && ${WITH_CONFIG_PM} > 0 SRCS+= wow.c CFLAGS+= -DCONFIG_PM=${WITH_CONFIG_PM} .endif # PCI parts. SRCS+= pci.c SRCS+= rtw8723d.c rtw8723d_table.c rtw8723de.c # 11n SRCS+= rtw8821c.c rtw8821c_table.c rtw8821ce.c # 11ac SRCS+= rtw8822b.c rtw8822b_table.c rtw8822be.c # 11ac SRCS+= rtw8822c.c rtw8822c_table.c rtw8822ce.c # 11ac # Other SRCS+= ${LINUXKPI_GENSRCS} SRCS+= opt_wlan.h opt_inet6.h opt_inet.h # Helpful after fresh imports. #CFLAGS+= -ferror-limit=0 CFLAGS+= -DKBUILD_MODNAME='"rtw88"' CFLAGS+= -I${DEVRTW88DIR} CFLAGS+= ${LINUXKPI_INCLUDES} CFLAGS+= -DCONFIG_RTW88_DEBUG #CFLAGS+= -DCONFIG_RTW88_DEBUGFS -# GCC warns about set but unused vaf variables -CWARNFLAGS.debug.c+= ${NO_WUNUSED_BUT_SET_VARIABLE} - .include