diff --git a/sys/compat/linuxkpi/common/include/net/mac80211.h b/sys/compat/linuxkpi/common/include/net/mac80211.h index b5ee9467591d..71107c407c25 100644 --- a/sys/compat/linuxkpi/common/include/net/mac80211.h +++ b/sys/compat/linuxkpi/common/include/net/mac80211.h @@ -1,2121 +1,2122 @@ /*- * Copyright (c) 2020-2021 The FreeBSD Foundation * Copyright (c) 2020-2022 Bjoern A. Zeeb * * This software was developed by Björn Zeeb under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUXKPI_NET_MAC80211_H #define _LINUXKPI_NET_MAC80211_H #include #include #include #include #include #include #include #include #include #define ARPHRD_IEEE80211_RADIOTAP __LINE__ /* XXX TODO brcmfmac */ #define WLAN_OUI_MICROSOFT (0x0050F2) #define WLAN_OUI_TYPE_MICROSOFT_WPA (1) #define WLAN_OUI_TYPE_MICROSOFT_TPC (8) #define WLAN_OUI_TYPE_WFA_P2P (9) #define WLAN_OUI_WFA (0x506F9A) /* hw->conf.flags */ enum ieee80211_hw_conf_flags { IEEE80211_CONF_IDLE = BIT(0), IEEE80211_CONF_PS = BIT(1), IEEE80211_CONF_MONITOR = BIT(2), }; /* (*ops->config()) */ enum ieee80211_hw_conf_changed_flags { IEEE80211_CONF_CHANGE_CHANNEL = BIT(0), IEEE80211_CONF_CHANGE_IDLE = BIT(1), IEEE80211_CONF_CHANGE_PS = BIT(2), IEEE80211_CONF_CHANGE_MONITOR = BIT(3), }; #define CFG80211_TESTMODE_CMD(_x) /* XXX TODO */ #define FCS_LEN 4 /* ops.configure_filter() */ enum mcast_filter_flags { FIF_ALLMULTI = BIT(0), FIF_PROBE_REQ = BIT(1), FIF_BCN_PRBRESP_PROMISC = BIT(2), FIF_FCSFAIL = BIT(3), FIF_OTHER_BSS = BIT(4), FIF_PSPOLL = BIT(5), FIF_CONTROL = BIT(6), }; enum ieee80211_bss_changed { BSS_CHANGED_ARP_FILTER = BIT(0), BSS_CHANGED_ASSOC = BIT(1), BSS_CHANGED_BANDWIDTH = BIT(2), BSS_CHANGED_BEACON = BIT(3), BSS_CHANGED_BEACON_ENABLED = BIT(4), BSS_CHANGED_BEACON_INFO = BIT(5), BSS_CHANGED_BEACON_INT = BIT(6), BSS_CHANGED_BSSID = BIT(7), BSS_CHANGED_CQM = BIT(8), BSS_CHANGED_ERP_CTS_PROT = BIT(9), BSS_CHANGED_ERP_SLOT = BIT(10), BSS_CHANGED_FTM_RESPONDER = BIT(11), BSS_CHANGED_HT = BIT(12), BSS_CHANGED_IDLE = BIT(13), BSS_CHANGED_MU_GROUPS = BIT(14), BSS_CHANGED_P2P_PS = BIT(15), BSS_CHANGED_PS = BIT(16), BSS_CHANGED_QOS = BIT(17), BSS_CHANGED_TXPOWER = BIT(18), BSS_CHANGED_HE_BSS_COLOR = BIT(19), BSS_CHANGED_AP_PROBE_RESP = BIT(20), BSS_CHANGED_BASIC_RATES = BIT(21), BSS_CHANGED_ERP_PREAMBLE = BIT(22), BSS_CHANGED_IBSS = BIT(23), BSS_CHANGED_MCAST_RATE = BIT(24), BSS_CHANGED_SSID = BIT(25), }; /* 802.11 Figure 9-256 Suite selector format. [OUI(3), SUITE TYPE(1)] */ #define WLAN_CIPHER_SUITE_OUI(_oui, _x) (((_oui) << 8) | ((_x) & 0xff)) /* 802.11 Table 9-131 Cipher suite selectors. */ /* 802.1x suite B 11 */ #define WLAN_CIPHER_SUITE(_x) WLAN_CIPHER_SUITE_OUI(0x000fac, _x) /* Use group 0 */ #define WLAN_CIPHER_SUITE_WEP40 WLAN_CIPHER_SUITE(1) #define WLAN_CIPHER_SUITE_TKIP WLAN_CIPHER_SUITE(2) /* Reserved 3 */ #define WLAN_CIPHER_SUITE_CCMP WLAN_CIPHER_SUITE(4) /* CCMP-128 */ #define WLAN_CIPHER_SUITE_WEP104 WLAN_CIPHER_SUITE(5) #define WLAN_CIPHER_SUITE_AES_CMAC WLAN_CIPHER_SUITE(6) /* BIP-CMAC-128 */ /* Group addressed traffic not allowed 7 */ #define WLAN_CIPHER_SUITE_GCMP WLAN_CIPHER_SUITE(8) #define WLAN_CIPHER_SUITE_GCMP_256 WLAN_CIPHER_SUITE(9) #define WLAN_CIPHER_SUITE_CCMP_256 WLAN_CIPHER_SUITE(10) #define WLAN_CIPHER_SUITE_BIP_GMAC_128 WLAN_CIPHER_SUITE(11) #define WLAN_CIPHER_SUITE_BIP_GMAC_256 WLAN_CIPHER_SUITE(12) #define WLAN_CIPHER_SUITE_BIP_CMAC_256 WLAN_CIPHER_SUITE(13) /* Reserved 14-255 */ /* 802.11 Table 9-133 AKM suite selectors. */ #define WLAN_AKM_SUITE(_x) WLAN_CIPHER_SUITE_OUI(0x000fac, _x) /* Reserved 0 */ #define WLAN_AKM_SUITE_8021X WLAN_AKM_SUITE(1) #define WLAN_AKM_SUITE_PSK WLAN_AKM_SUITE(2) #define WLAN_AKM_SUITE_FT_8021X WLAN_AKM_SUITE(3) #define WLAN_AKM_SUITE_FT_PSK WLAN_AKM_SUITE(4) #define WLAN_AKM_SUITE_8021X_SHA256 WLAN_AKM_SUITE(5) #define WLAN_AKM_SUITE_PSK_SHA256 WLAN_AKM_SUITE(6) /* TDLS 7 */ #define WLAN_AKM_SUITE_SAE WLAN_AKM_SUITE(8) /* FToSAE 9 */ /* AP peer key 10 */ /* 802.1x suite B 11 */ /* 802.1x suite B 384 12 */ /* FTo802.1x 384 13 */ /* Reserved 14-255 */ /* Apparently 11ax defines more. Seen (19,20) mentioned. */ struct ieee80211_sta; struct ieee80211_ampdu_params { /* TODO FIXME */ struct ieee80211_sta *sta; uint8_t tid; uint16_t ssn; int action, amsdu, buf_size, timeout; }; struct ieee80211_bar { /* TODO FIXME */ int control, start_seq_num; uint8_t *ra; uint16_t frame_control; }; struct ieee80211_p2p_noa_desc { uint32_t count; /* uint8_t ? */ uint32_t duration; uint32_t interval; uint32_t start_time; }; struct ieee80211_p2p_noa_attr { uint8_t index; uint8_t oppps_ctwindow; struct ieee80211_p2p_noa_desc desc[4]; }; struct ieee80211_mutable_offsets { /* TODO FIXME */ uint16_t tim_offset; int cntdwn_counter_offs; }; #define WLAN_MEMBERSHIP_LEN (8) #define WLAN_USER_POSITION_LEN (16) struct ieee80211_bss_conf { /* TODO FIXME */ uint8_t bssid[ETH_ALEN]; uint8_t transmitter_bssid[ETH_ALEN]; struct ieee80211_ftm_responder_params *ftmr_params; struct ieee80211_p2p_noa_attr p2p_noa_attr; struct cfg80211_chan_def chandef; __be32 arp_addr_list[1]; /* XXX TODO */ struct ieee80211_rate *beacon_rate; struct { uint8_t membership[WLAN_MEMBERSHIP_LEN]; uint8_t position[WLAN_USER_POSITION_LEN]; } mu_group; struct { int color; } he_bss_color; size_t ssid_len; uint8_t ssid[IEEE80211_NWID_LEN]; uint16_t aid; uint16_t ht_operation_mode; int arp_addr_cnt; uint8_t dtim_period; bool assoc; bool idle; bool qos; bool ps; bool twt_broadcast; bool use_cts_prot; bool use_short_preamble; bool use_short_slot; uint16_t beacon_int; uint32_t sync_device_ts; uint64_t sync_tsf; uint8_t sync_dtim_count; int16_t txpower; int mcast_rate[NUM_NL80211_BANDS]; int ack_enabled, bssid_index, bssid_indicator, cqm_rssi_hyst, cqm_rssi_thold, ema_ap, frame_time_rts_th, ftm_responder; int htc_trig_based_pkt_ext; int multi_sta_back_32bit, nontransmitted; int profile_periodicity; int twt_requester, uora_exists, uora_ocw_range; int assoc_capability, enable_beacon, hidden_ssid, ibss_joined, twt_protected; int fils_discovery, he_obss_pd, he_oper, twt_responder, unsol_bcast_probe_resp_interval; unsigned long basic_rates; bool he_support; }; struct ieee80211_chanctx_conf { /* TODO FIXME */ int rx_chains_dynamic, rx_chains_static; bool radar_enabled; struct cfg80211_chan_def def; struct cfg80211_chan_def min_def; /* Must stay last. */ uint8_t drv_priv[0] __aligned(CACHE_LINE_SIZE); }; struct ieee80211_channel_switch { /* TODO FIXME */ int block_tx, count, delay, device_timestamp, timestamp; struct cfg80211_chan_def chandef; }; struct ieee80211_cipher_scheme { uint32_t cipher; uint8_t iftype; /* We do not know the size of this. */ uint8_t hdr_len; uint8_t pn_len; uint8_t pn_off; uint8_t key_idx_off; uint8_t key_idx_mask; uint8_t key_idx_shift; uint8_t mic_len; }; enum ieee80211_event_type { BA_FRAME_TIMEOUT, BAR_RX_EVENT, MLME_EVENT, RSSI_EVENT, }; enum ieee80211_rssi_event_data { RSSI_EVENT_LOW, RSSI_EVENT_HIGH, }; enum ieee80211_mlme_event_data { ASSOC_EVENT, AUTH_EVENT, DEAUTH_RX_EVENT, DEAUTH_TX_EVENT, }; enum ieee80211_mlme_event_status { MLME_DENIED, MLME_TIMEOUT, }; struct ieee80211_mlme_event { enum ieee80211_mlme_event_data data; enum ieee80211_mlme_event_status status; int reason; }; struct ieee80211_event { /* TODO FIXME */ enum ieee80211_event_type type; union { struct { int ssn; struct ieee80211_sta *sta; uint8_t tid; } ba; struct ieee80211_mlme_event mlme; } u; }; struct ieee80211_ftm_responder_params { /* TODO FIXME */ uint8_t *lci; uint8_t *civicloc; int lci_len; int civicloc_len; }; struct ieee80211_he_mu_edca_param_ac_rec { /* TODO FIXME */ int aifsn, ecw_min_max, mu_edca_timer; }; struct ieee80211_conf { int dynamic_ps_timeout; uint32_t listen_interval; enum ieee80211_hw_conf_flags flags; struct cfg80211_chan_def chandef; }; enum ieee80211_hw_flags { IEEE80211_HW_AMPDU_AGGREGATION, IEEE80211_HW_AP_LINK_PS, IEEE80211_HW_BUFF_MMPDU_TXQ, IEEE80211_HW_CHANCTX_STA_CSA, IEEE80211_HW_CONNECTION_MONITOR, IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP, IEEE80211_HW_HAS_RATE_CONTROL, IEEE80211_HW_MFP_CAPABLE, IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR, IEEE80211_HW_REPORTS_TX_ACK_STATUS, IEEE80211_HW_RX_INCLUDES_FCS, IEEE80211_HW_SIGNAL_DBM, IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS, IEEE80211_HW_SPECTRUM_MGMT, IEEE80211_HW_STA_MMPDU_TXQ, IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU, IEEE80211_HW_SUPPORTS_CLONED_SKBS, IEEE80211_HW_SUPPORTS_DYNAMIC_PS, IEEE80211_HW_SUPPORTS_MULTI_BSSID, IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID, IEEE80211_HW_SUPPORTS_PS, IEEE80211_HW_SUPPORTS_REORDERING_BUFFER, IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW, IEEE80211_HW_SUPPORT_FAST_XMIT, IEEE80211_HW_TDLS_WIDER_BW, IEEE80211_HW_TIMING_BEACON_ONLY, IEEE80211_HW_TX_AMPDU_SETUP_IN_HW, IEEE80211_HW_TX_AMSDU, IEEE80211_HW_TX_FRAG_LIST, IEEE80211_HW_USES_RSS, IEEE80211_HW_WANT_MONITOR_VIF, IEEE80211_HW_SW_CRYPTO_CONTROL, IEEE80211_HW_SUPPORTS_TX_FRAG, IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA, IEEE80211_HW_SUPPORTS_PER_STA_GTK, IEEE80211_HW_REPORTS_LOW_ACK, IEEE80211_HW_QUEUE_CONTROL, /* Keep last. */ NUM_IEEE80211_HW_FLAGS }; struct ieee80211_hw { struct wiphy *wiphy; /* TODO FIXME */ int max_rx_aggregation_subframes, max_tx_aggregation_subframes; int extra_tx_headroom, weight_multiplier; int max_rate_tries, max_rates, max_report_rates; struct ieee80211_cipher_scheme *cipher_schemes; int n_cipher_schemes; const char *rate_control_algorithm; struct { uint16_t units_pos; /* radiotap "spec" is .. inconsistent. */ uint16_t accuracy; } radiotap_timestamp; size_t sta_data_size; size_t vif_data_size; size_t chanctx_data_size; size_t txq_data_size; uint16_t radiotap_mcs_details; uint16_t radiotap_vht_details; uint16_t queues; uint16_t offchannel_tx_hw_queue; uint16_t uapsd_max_sp_len; uint16_t uapsd_queues; uint16_t max_tx_fragments; uint16_t max_listen_interval; netdev_features_t netdev_features; unsigned long flags[BITS_TO_LONGS(NUM_IEEE80211_HW_FLAGS)]; struct ieee80211_conf conf; #if 0 /* leave here for documentation purposes. This does NOT work. */ /* Must stay last. */ uint8_t priv[0] __aligned(CACHE_LINE_SIZE); #else void *priv; #endif }; enum ieee802111_key_flag { IEEE80211_KEY_FLAG_GENERATE_IV = BIT(0), IEEE80211_KEY_FLAG_GENERATE_MMIC = BIT(1), IEEE80211_KEY_FLAG_PAIRWISE = BIT(2), IEEE80211_KEY_FLAG_PUT_IV_SPACE = BIT(3), IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(4), IEEE80211_KEY_FLAG_SW_MGMT_TX = BIT(5), IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(6), }; struct ieee80211_key_conf { atomic64_t tx_pn; uint32_t cipher; uint8_t icv_len; /* __unused nowadays? */ uint8_t iv_len; uint8_t hw_key_idx; /* Set by drv. */ uint8_t keyidx; uint16_t flags; uint8_t keylen; uint8_t key[0]; }; struct ieee80211_key_seq { /* TODO FIXME */ union { struct { uint8_t seq[IEEE80211_MAX_PN_LEN]; uint8_t seq_len; } hw; struct { uint8_t pn[IEEE80211_CCMP_PN_LEN]; } ccmp; struct { uint8_t pn[IEEE80211_CCMP_PN_LEN]; } aes_cmac; struct { uint32_t iv32; uint16_t iv16; } tkip; }; }; enum ieee80211_rx_status_flags { RX_FLAG_ALLOW_SAME_PN = BIT(0), RX_FLAG_AMPDU_DETAILS = BIT(1), RX_FLAG_AMPDU_EOF_BIT = BIT(2), RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(3), RX_FLAG_DECRYPTED = BIT(4), RX_FLAG_DUP_VALIDATED = BIT(5), RX_FLAG_FAILED_FCS_CRC = BIT(6), RX_FLAG_ICV_STRIPPED = BIT(7), RX_FLAG_MACTIME_PLCP_START = BIT(8), RX_FLAG_MACTIME_START = BIT(9), RX_FLAG_MIC_STRIPPED = BIT(10), RX_FLAG_MMIC_ERROR = BIT(11), RX_FLAG_MMIC_STRIPPED = BIT(12), RX_FLAG_NO_PSDU = BIT(13), RX_FLAG_PN_VALIDATED = BIT(14), RX_FLAG_RADIOTAP_HE = BIT(15), RX_FLAG_RADIOTAP_HE_MU = BIT(16), RX_FLAG_RADIOTAP_LSIG = BIT(17), RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(18), RX_FLAG_NO_SIGNAL_VAL = BIT(19), RX_FLAG_IV_STRIPPED = BIT(20), RX_FLAG_AMPDU_IS_LAST = BIT(21), RX_FLAG_AMPDU_LAST_KNOWN = BIT(22), RX_FLAG_AMSDU_MORE = BIT(23), RX_FLAG_MACTIME_END = BIT(24), RX_FLAG_ONLY_MONITOR = BIT(25), RX_FLAG_SKIP_MONITOR = BIT(26), }; struct ieee80211_rx_status { /* TODO FIXME, this is too large. Over-reduce types to u8 where possible. */ uint64_t boottime_ns; uint64_t mactime; uint32_t device_timestamp; enum ieee80211_rx_status_flags flag; uint16_t freq; uint8_t bw; #define RATE_INFO_BW_20 0x01 #define RATE_INFO_BW_40 0x02 #define RATE_INFO_BW_80 0x04 #define RATE_INFO_BW_160 0x08 #define RATE_INFO_BW_HE_RU 0x10 uint8_t encoding; #define RX_ENC_LEGACY 0x00 #define RX_ENC_HE 0x01 #define RX_ENC_HT 0x02 #define RX_ENC_VHT 0x04 uint8_t ampdu_reference; uint8_t band; uint8_t chains; int8_t chain_signal[IEEE80211_MAX_CHAINS]; int8_t signal; uint8_t enc_flags; uint8_t he_dcm; uint8_t he_gi; uint8_t he_ru; uint8_t zero_length_psdu_type; uint8_t nss; uint8_t rate_idx; }; struct ieee80211_scan_ies { /* TODO FIXME */ int common_ie_len; int len[NUM_NL80211_BANDS]; uint8_t *common_ies; uint8_t *ies[NUM_NL80211_BANDS]; }; struct ieee80211_scan_request { struct ieee80211_scan_ies ies; struct cfg80211_scan_request req; }; struct ieee80211_txq { struct ieee80211_sta *sta; struct ieee80211_vif *vif; int ac; uint8_t tid; /* Must stay last. */ uint8_t drv_priv[0] __aligned(CACHE_LINE_SIZE); }; struct ieee80211_sta_rates { /* XXX TODO */ /* XXX some _rcu thing */ struct { int idx; int flags; } rate[1]; /* XXX what is the real number? */ }; struct ieee80211_sta_txpwr { /* XXX TODO */ enum nl80211_tx_power_setting type; short power; }; struct ieee80211_link_sta { uint32_t supp_rates[NUM_NL80211_BANDS]; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; struct ieee80211_sta_he_cap he_cap; struct ieee80211_sta_he_6ghz_capa he_6ghz_capa; uint8_t rx_nss; enum ieee80211_sta_rx_bw bandwidth; struct ieee80211_sta_txpwr txpwr; }; #define IEEE80211_NUM_TIDS 16 /* net80211::WME_NUM_TID */ struct ieee80211_sta { /* TODO FIXME */ int max_amsdu_len, max_amsdu_subframes, max_rc_amsdu_len, max_sp; int mfp, smps_mode, tdls, tdls_initiator, uapsd_queues, wme; struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1]; /* iwlwifi: 8 and adds +1 to tid_data, net80211::IEEE80211_TID_SIZE */ struct ieee80211_sta_rates *rates; /* some rcu thing? */ uint32_t max_tid_amsdu_len[IEEE80211_NUM_TIDS]; uint8_t addr[ETH_ALEN]; uint16_t aid; struct ieee80211_link_sta deflink; /* Must stay last. */ uint8_t drv_priv[0] __aligned(CACHE_LINE_SIZE); }; struct ieee80211_tdls_ch_sw_params { /* TODO FIXME */ int action_code, ch_sw_tm_ie, status, switch_time, switch_timeout, timestamp; struct ieee80211_sta *sta; struct cfg80211_chan_def *chandef; struct sk_buff *tmpl_skb; }; struct ieee80211_tx_control { /* TODO FIXME */ struct ieee80211_sta *sta; }; struct ieee80211_tx_queue_params { /* These types are based on iwlwifi FW structs. */ uint16_t cw_min; uint16_t cw_max; uint16_t txop; uint8_t aifs; /* TODO FIXME */ int acm, mu_edca, uapsd; struct ieee80211_he_mu_edca_param_ac_rec mu_edca_param_rec; }; struct ieee80211_tx_rate { uint8_t idx; uint16_t count:5, flags:11; }; enum ieee80211_vif_driver_flags { IEEE80211_VIF_BEACON_FILTER = BIT(0), IEEE80211_VIF_SUPPORTS_CQM_RSSI = BIT(1), IEEE80211_VIF_SUPPORTS_UAPSD = BIT(2), }; struct ieee80211_vif { /* TODO FIXME */ enum nl80211_iftype type; int csa_active, mu_mimo_owner; int cab_queue; int color_change_active, offload_flags; enum ieee80211_vif_driver_flags driver_flags; bool p2p; bool probe_req_reg; uint8_t addr[ETH_ALEN]; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_txq *txq; struct ieee80211_bss_conf bss_conf; uint8_t hw_queue[IEEE80211_NUM_ACS]; /* Must stay last. */ uint8_t drv_priv[0] __aligned(CACHE_LINE_SIZE); }; struct ieee80211_vif_chanctx_switch { struct ieee80211_chanctx_conf *old_ctx, *new_ctx; struct ieee80211_vif *vif; }; struct ieee80211_prep_tx_info { u16 duration; bool success; }; /* XXX-BZ too big, over-reduce size to u8, and array sizes to minuimum to fit in skb->cb. */ /* Also warning: some sizes change by pointer size! This is 64bit only. */ struct ieee80211_tx_info { enum ieee80211_tx_info_flags flags; /* TODO FIXME */ u8 band; u8 hw_queue; bool tx_time_est; union { struct { struct ieee80211_tx_rate rates[4]; bool use_rts; struct ieee80211_vif *vif; struct ieee80211_key_conf *hw_key; enum ieee80211_tx_control_flags flags; } control; struct { struct ieee80211_tx_rate rates[4]; uint32_t ack_signal; uint8_t ampdu_ack_len; uint8_t ampdu_len; uint8_t antenna; uint16_t tx_time; bool is_valid_ack_signal; void *status_driver_data[16 / sizeof(void *)]; /* XXX TODO */ } status; #define IEEE80211_TX_INFO_DRIVER_DATA_SIZE (5 * sizeof(void *)) /* XXX TODO 5? */ void *driver_data[IEEE80211_TX_INFO_DRIVER_DATA_SIZE / sizeof(void *)]; }; }; /* net80211 conflict */ struct linuxkpi_ieee80211_tim_ie { uint8_t dtim_count; uint8_t dtim_period; uint8_t bitmap_ctrl; uint8_t *virtual_map; }; #define ieee80211_tim_ie linuxkpi_ieee80211_tim_ie struct survey_info { /* net80211::struct ieee80211_channel_survey */ /* TODO FIXME */ uint32_t filled; #define SURVEY_INFO_TIME 0x0001 #define SURVEY_INFO_TIME_RX 0x0002 #define SURVEY_INFO_TIME_SCAN 0x0004 #define SURVEY_INFO_TIME_TX 0x0008 #define SURVEY_INFO_TIME_BSS_RX 0x0010 #define SURVEY_INFO_TIME_BUSY 0x0020 #define SURVEY_INFO_IN_USE 0x0040 #define SURVEY_INFO_NOISE_DBM 0x0080 uint32_t noise; uint64_t time; uint64_t time_bss_rx; uint64_t time_busy; uint64_t time_rx; uint64_t time_scan; uint64_t time_tx; struct ieee80211_channel *channel; }; enum ieee80211_iface_iter { IEEE80211_IFACE_ITER_NORMAL = BIT(0), IEEE80211_IFACE_ITER_RESUME_ALL = BIT(1), IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER = BIT(2), /* seems to be an iter flag */ /* Internal flags only. */ /* ieee80211_iterate_active_interfaces*(). */ IEEE80211_IFACE_ITER__ATOMIC = BIT(6), IEEE80211_IFACE_ITER__ACTIVE = BIT(7), }; enum set_key_cmd { SET_KEY, DISABLE_KEY, }; enum rx_enc_flags { RX_ENC_FLAG_SHORTPRE = BIT(0), RX_ENC_FLAG_SHORT_GI = BIT(1), RX_ENC_FLAG_HT_GF = BIT(2), RX_ENC_FLAG_LDPC = BIT(3), RX_ENC_FLAG_BF = BIT(4), #define RX_ENC_FLAG_STBC_SHIFT 6 }; enum sta_notify_cmd { STA_NOTIFY_AWAKE, STA_NOTIFY_SLEEP, }; struct ieee80211_ops { /* TODO FIXME */ int (*start)(struct ieee80211_hw *); void (*stop)(struct ieee80211_hw *); int (*config)(struct ieee80211_hw *, u32); void (*reconfig_complete)(struct ieee80211_hw *, enum ieee80211_reconfig_type); int (*add_interface)(struct ieee80211_hw *, struct ieee80211_vif *); void (*remove_interface)(struct ieee80211_hw *, struct ieee80211_vif *); int (*change_interface)(struct ieee80211_hw *, struct ieee80211_vif *, enum nl80211_iftype, bool); void (*sw_scan_start)(struct ieee80211_hw *, struct ieee80211_vif *, const u8 *); void (*sw_scan_complete)(struct ieee80211_hw *, struct ieee80211_vif *); int (*sched_scan_start)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_sched_scan_request *, struct ieee80211_scan_ies *); int (*sched_scan_stop)(struct ieee80211_hw *, struct ieee80211_vif *); int (*hw_scan)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_scan_request *); void (*cancel_hw_scan)(struct ieee80211_hw *, struct ieee80211_vif *); int (*conf_tx)(struct ieee80211_hw *, struct ieee80211_vif *, u16, const struct ieee80211_tx_queue_params *); void (*tx)(struct ieee80211_hw *, struct ieee80211_tx_control *, struct sk_buff *); int (*tx_last_beacon)(struct ieee80211_hw *); void (*wake_tx_queue)(struct ieee80211_hw *, struct ieee80211_txq *); void (*mgd_prepare_tx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_prep_tx_info *); void (*mgd_complete_tx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_prep_tx_info *); void (*mgd_protect_tdls_discover)(struct ieee80211_hw *, struct ieee80211_vif *); void (*flush)(struct ieee80211_hw *, struct ieee80211_vif *, u32, bool); int (*set_frag_threshold)(struct ieee80211_hw *, u32); void (*sync_rx_queues)(struct ieee80211_hw *); void (*allow_buffered_frames)(struct ieee80211_hw *, struct ieee80211_sta *, u16, int, enum ieee80211_frame_release_type, bool); void (*release_buffered_frames)(struct ieee80211_hw *, struct ieee80211_sta *, u16, int, enum ieee80211_frame_release_type, bool); int (*sta_add)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); int (*sta_remove)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); int (*sta_set_txpwr)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); void (*sta_statistics)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct station_info *); void (*sta_pre_rcu_remove)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); int (*sta_state)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, enum ieee80211_sta_state, enum ieee80211_sta_state); void (*sta_notify)(struct ieee80211_hw *, struct ieee80211_vif *, enum sta_notify_cmd, struct ieee80211_sta *); void (*sta_rc_update)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, u32); void (*sta_rate_tbl_update)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); void (*sta_set_4addr)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, bool); u64 (*prepare_multicast)(struct ieee80211_hw *, struct netdev_hw_addr_list *); int (*ampdu_action)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_ampdu_params *); bool (*can_aggregate_in_amsdu)(struct ieee80211_hw *, struct sk_buff *, struct sk_buff *); int (*pre_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel_switch *); int (*post_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *); void (*channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel_switch *); void (*abort_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *); void (*channel_switch_rx_beacon)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel_switch *); int (*tdls_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, u8, struct cfg80211_chan_def *, struct sk_buff *, u32); void (*tdls_cancel_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); void (*tdls_recv_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_tdls_ch_sw_params *); int (*add_chanctx)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *); void (*remove_chanctx)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *); void (*change_chanctx)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *, u32); int (*assign_vif_chanctx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_chanctx_conf *); void (*unassign_vif_chanctx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_chanctx_conf *); int (*switch_vif_chanctx)(struct ieee80211_hw *, struct ieee80211_vif_chanctx_switch *, int, enum ieee80211_chanctx_switch_mode); int (*get_antenna)(struct ieee80211_hw *, u32 *, u32 *); int (*set_antenna)(struct ieee80211_hw *, u32, u32); int (*remain_on_channel)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel *, int, enum ieee80211_roc_type); int (*cancel_remain_on_channel)(struct ieee80211_hw *, struct ieee80211_vif *); void (*configure_filter)(struct ieee80211_hw *, unsigned int, unsigned int *, u64); void (*config_iface_filter)(struct ieee80211_hw *, struct ieee80211_vif *, unsigned int, unsigned int); - void (*bss_info_changed)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *, u32); + void (*bss_info_changed)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *, u64); int (*set_rts_threshold)(struct ieee80211_hw *, u32); void (*event_callback)(struct ieee80211_hw *, struct ieee80211_vif *, const struct ieee80211_event *); int (*get_survey)(struct ieee80211_hw *, int, struct survey_info *); int (*get_ftm_responder_stats)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_ftm_responder_stats *); void (*offset_tsf)(struct ieee80211_hw *, struct ieee80211_vif *, s64); int (*set_bitrate_mask)(struct ieee80211_hw *, struct ieee80211_vif *, const struct cfg80211_bitrate_mask *); void (*set_coverage_class)(struct ieee80211_hw *, s16); int (*set_tim)(struct ieee80211_hw *, struct ieee80211_sta *, bool); int (*set_key)(struct ieee80211_hw *, enum set_key_cmd, struct ieee80211_vif *, struct ieee80211_sta *, struct ieee80211_key_conf *); void (*set_default_unicast_key)(struct ieee80211_hw *, struct ieee80211_vif *, int); void (*update_tkip_key)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_key_conf *, struct ieee80211_sta *, u32, u16 *); int (*start_pmsr)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_pmsr_request *); void (*abort_pmsr)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_pmsr_request *); int (*start_ap)(struct ieee80211_hw *, struct ieee80211_vif *); void (*stop_ap)(struct ieee80211_hw *, struct ieee80211_vif *); int (*join_ibss)(struct ieee80211_hw *, struct ieee80211_vif *); void (*leave_ibss)(struct ieee80211_hw *, struct ieee80211_vif *); int (*set_sar_specs)(struct ieee80211_hw *, const struct cfg80211_sar_specs *); int (*set_tid_config)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct cfg80211_tid_config *); int (*reset_tid_config)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, u8); int (*get_et_sset_count)(struct ieee80211_hw *, struct ieee80211_vif *, int); void (*get_et_stats)(struct ieee80211_hw *, struct ieee80211_vif *, struct ethtool_stats *, u64 *); void (*get_et_strings)(struct ieee80211_hw *, struct ieee80211_vif *, u32, u8 *); void (*update_vif_offload)(struct ieee80211_hw *, struct ieee80211_vif *); }; /* -------------------------------------------------------------------------- */ /* linux_80211.c */ extern const struct cfg80211_ops linuxkpi_mac80211cfgops; struct ieee80211_hw *linuxkpi_ieee80211_alloc_hw(size_t, const struct ieee80211_ops *); void linuxkpi_ieee80211_iffree(struct ieee80211_hw *); void linuxkpi_set_ieee80211_dev(struct ieee80211_hw *, char *); int linuxkpi_ieee80211_ifattach(struct ieee80211_hw *); void linuxkpi_ieee80211_ifdetach(struct ieee80211_hw *); struct ieee80211_hw * linuxkpi_wiphy_to_ieee80211_hw(struct wiphy *); void linuxkpi_ieee80211_iterate_interfaces( struct ieee80211_hw *hw, enum ieee80211_iface_iter flags, void(*iterfunc)(void *, uint8_t *, struct ieee80211_vif *), void *); void linuxkpi_ieee80211_iterate_keys(struct ieee80211_hw *, struct ieee80211_vif *, void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct ieee80211_key_conf *, void *), void *); void linuxkpi_ieee80211_iterate_chan_contexts(struct ieee80211_hw *, void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *, void *), void *); void linuxkpi_ieee80211_iterate_stations_atomic(struct ieee80211_hw *, void (*iterfunc)(void *, struct ieee80211_sta *), void *); void linuxkpi_ieee80211_scan_completed(struct ieee80211_hw *, struct cfg80211_scan_info *); void linuxkpi_ieee80211_rx(struct ieee80211_hw *, struct sk_buff *, struct ieee80211_sta *, struct napi_struct *); uint8_t linuxkpi_ieee80211_get_tid(struct ieee80211_hdr *); struct ieee80211_sta *linuxkpi_ieee80211_find_sta(struct ieee80211_vif *, const u8 *); struct ieee80211_sta *linuxkpi_ieee80211_find_sta_by_ifaddr( struct ieee80211_hw *, const uint8_t *, const uint8_t *); struct sk_buff *linuxkpi_ieee80211_tx_dequeue(struct ieee80211_hw *, struct ieee80211_txq *); bool linuxkpi_ieee80211_is_ie_id_in_ie_buf(const u8, const u8 *, size_t); bool linuxkpi_ieee80211_ie_advance(size_t *, const u8 *, size_t); void linuxkpi_ieee80211_free_txskb(struct ieee80211_hw *, struct sk_buff *, int); void linuxkpi_ieee80211_queue_delayed_work(struct ieee80211_hw *, struct delayed_work *, int); void linuxkpi_ieee80211_queue_work(struct ieee80211_hw *, struct work_struct *); struct sk_buff *linuxkpi_ieee80211_pspoll_get(struct ieee80211_hw *, struct ieee80211_vif *); struct sk_buff *linuxkpi_ieee80211_nullfunc_get(struct ieee80211_hw *, struct ieee80211_vif *, bool); void linuxkpi_ieee80211_txq_get_depth(struct ieee80211_txq *, unsigned long *, unsigned long *); struct wireless_dev *linuxkpi_ieee80211_vif_to_wdev(struct ieee80211_vif *); void linuxkpi_ieee80211_connection_loss(struct ieee80211_vif *); void linuxkpi_ieee80211_beacon_loss(struct ieee80211_vif *); struct sk_buff *linuxkpi_ieee80211_probereq_get(struct ieee80211_hw *, uint8_t *, uint8_t *, size_t, size_t); void linuxkpi_ieee80211_tx_status(struct ieee80211_hw *, struct sk_buff *); /* -------------------------------------------------------------------------- */ static __inline void _ieee80211_hw_set(struct ieee80211_hw *hw, enum ieee80211_hw_flags flag) { set_bit(flag, hw->flags); } static __inline bool __ieee80211_hw_check(struct ieee80211_hw *hw, enum ieee80211_hw_flags flag) { return (test_bit(flag, hw->flags)); } /* They pass in shortened flag names; how confusingly inconsistent. */ #define ieee80211_hw_set(_hw, _flag) \ _ieee80211_hw_set(_hw, IEEE80211_HW_ ## _flag) #define ieee80211_hw_check(_hw, _flag) \ __ieee80211_hw_check(_hw, IEEE80211_HW_ ## _flag) /* XXX-BZ add CTASSERTS that size of struct is <= sizeof skb->cb. */ CTASSERT(sizeof(struct ieee80211_tx_info) <= sizeof(((struct sk_buff *)0)->cb)); #define IEEE80211_SKB_CB(_skb) \ ((struct ieee80211_tx_info *)((_skb)->cb)) CTASSERT(sizeof(struct ieee80211_rx_status) <= sizeof(((struct sk_buff *)0)->cb)); #define IEEE80211_SKB_RXCB(_skb) \ ((struct ieee80211_rx_status *)((_skb)->cb)) static __inline void ieee80211_free_hw(struct ieee80211_hw *hw) { linuxkpi_ieee80211_iffree(hw); if (hw->wiphy != NULL) wiphy_free(hw->wiphy); /* Note that *hw is not valid any longer after this. */ IMPROVE(); } static __inline struct ieee80211_hw * ieee80211_alloc_hw(size_t priv_len, const struct ieee80211_ops *ops) { return (linuxkpi_ieee80211_alloc_hw(priv_len, ops)); } static __inline void SET_IEEE80211_DEV(struct ieee80211_hw *hw, struct device *dev) { set_wiphy_dev(hw->wiphy, dev); linuxkpi_set_ieee80211_dev(hw, dev_name(dev)); IMPROVE(); } static __inline int ieee80211_register_hw(struct ieee80211_hw *hw) { int error; error = wiphy_register(hw->wiphy); if (error != 0) return (error); /* * At this point the driver has set all the options, flags, bands, * ciphers, hw address(es), ... basically mac80211/cfg80211 hw/wiphy * setup is done. * We need to replicate a lot of information from here into net80211. */ error = linuxkpi_ieee80211_ifattach(hw); IMPROVE(); return (error); } static __inline void ieee80211_unregister_hw(struct ieee80211_hw *hw) { wiphy_unregister(hw->wiphy); linuxkpi_ieee80211_ifdetach(hw); IMPROVE(); } static __inline struct ieee80211_hw * wiphy_to_ieee80211_hw(struct wiphy *wiphy) { return (linuxkpi_wiphy_to_ieee80211_hw(wiphy)); } /* -------------------------------------------------------------------------- */ static __inline bool ieee80211_is_action(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_ACTION | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_probe_resp(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_PROBE_RESP | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_auth(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_AUTH | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_assoc_req(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_ASSOC_REQ | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_assoc_resp(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_ASSOC_RESP | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_reassoc_req(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_REASSOC_REQ | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_reassoc_resp(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_REASSOC_RESP | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_disassoc(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_DISASSOC | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_data_present(__le16 fc) { __le16 v; /* If it is a data frame and NODATA is not present. */ fc &= htole16(IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_NODATA); v = htole16(IEEE80211_FC0_TYPE_DATA); return (fc == v); } static __inline bool ieee80211_is_deauth(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_DEAUTH | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_beacon(__le16 fc) { __le16 v; /* * For as much as I get it this comes in LE and unlike FreeBSD * where we get the entire frame header and u8[], here we get the * 9.2.4.1 Frame Control field only. Mask and compare. */ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_BEACON | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_probe_req(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_PROBE_REQ | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_has_protected(__le16 fc) { return (fc & htole16(IEEE80211_FC1_PROTECTED << 8)); } static __inline bool ieee80211_is_back_req(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_BAR | IEEE80211_FC0_TYPE_CTL); return (fc == v); } static __inline bool ieee80211_is_bufferable_mmpdu(__le16 fc) { /* 11.2.2 Bufferable MMPDUs, 80211-2020. */ /* XXX we do not care about IBSS yet. */ if (!ieee80211_is_mgmt(fc)) return (false); if (ieee80211_is_action(fc)) /* XXX FTM? */ return (true); if (ieee80211_is_disassoc(fc)) return (true); if (ieee80211_is_deauth(fc)) return (true); return (false); } static __inline bool ieee80211_is_nullfunc(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_NODATA | IEEE80211_FC0_TYPE_DATA); return (fc == v); } static __inline bool ieee80211_is_qos_nullfunc(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_QOS_NULL | IEEE80211_FC0_TYPE_DATA); return (fc == v); } static __inline bool ieee80211_is_any_nullfunc(__le16 fc) { return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)); } static __inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) { TODO(); return (false); } static __inline bool ieee80211_has_a4(__le16 fc) { __le16 v; fc &= htole16((IEEE80211_FC1_DIR_TODS | IEEE80211_FC1_DIR_FROMDS) << 8); v = htole16((IEEE80211_FC1_DIR_TODS | IEEE80211_FC1_DIR_FROMDS) << 8); return (fc == v); } static __inline bool ieee80211_has_order(__le16 fc) { return (fc & htole16(IEEE80211_FC1_ORDER << 8)); } static __inline bool ieee80211_has_retry(__le16 fc) { return (fc & htole16(IEEE80211_FC1_RETRY << 8)); } static __inline bool ieee80211_has_fromds(__le16 fc) { return (fc & htole16(IEEE80211_FC1_DIR_FROMDS << 8)); } static __inline bool ieee80211_has_tods(__le16 fc) { return (fc & htole16(IEEE80211_FC1_DIR_TODS << 8)); } static __inline uint8_t * ieee80211_get_SA(struct ieee80211_hdr *hdr) { if (ieee80211_has_a4(hdr->frame_control)) return (hdr->addr4); if (ieee80211_has_fromds(hdr->frame_control)) return (hdr->addr3); return (hdr->addr2); } static __inline uint8_t * ieee80211_get_DA(struct ieee80211_hdr *hdr) { if (ieee80211_has_tods(hdr->frame_control)) return (hdr->addr3); return (hdr->addr1); } static __inline bool ieee80211_has_morefrags(__le16 fc) { fc &= htole16(IEEE80211_FC1_MORE_FRAG << 8); return (fc != 0); } static __inline u8 * ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr) { if (ieee80211_has_a4(hdr->frame_control)) return (u8 *)hdr + 30; else return (u8 *)hdr + 24; } /* -------------------------------------------------------------------------- */ /* Receive functions (air/driver to mac80211/net80211). */ static __inline void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct sk_buff *skb, struct napi_struct *napi) { linuxkpi_ieee80211_rx(hw, skb, sta, napi); } static __inline void ieee80211_rx_ni(struct ieee80211_hw *hw, struct sk_buff *skb) { linuxkpi_ieee80211_rx(hw, skb, NULL, NULL); } static __inline void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) { linuxkpi_ieee80211_rx(hw, skb, NULL, NULL); } /* -------------------------------------------------------------------------- */ static __inline uint8_t ieee80211_get_tid(struct ieee80211_hdr *hdr) { return (linuxkpi_ieee80211_get_tid(hdr)); } static __inline struct sk_buff * ieee80211_beacon_get_tim(struct ieee80211_hw *hw, struct ieee80211_vif *vif, uint16_t *tim_offset, uint16_t *tim_len) { if (tim_offset != NULL) *tim_offset = 0; if (tim_len != NULL) *tim_len = 0; TODO(); return (NULL); } static __inline void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw, enum ieee80211_iface_iter flags, void(*iterfunc)(void *, uint8_t *, struct ieee80211_vif *), void *arg) { flags |= IEEE80211_IFACE_ITER__ATOMIC; flags |= IEEE80211_IFACE_ITER__ACTIVE; linuxkpi_ieee80211_iterate_interfaces(hw, flags, iterfunc, arg); } static __inline void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, enum ieee80211_iface_iter flags, void(*iterfunc)(void *, uint8_t *, struct ieee80211_vif *), void *arg) { flags |= IEEE80211_IFACE_ITER__ACTIVE; linuxkpi_ieee80211_iterate_interfaces(hw, flags, iterfunc, arg); } static __inline void ieee80211_iterate_interfaces(struct ieee80211_hw *hw, enum ieee80211_iface_iter flags, void (*iterfunc)(void *, uint8_t *, struct ieee80211_vif *), void *arg) { linuxkpi_ieee80211_iterate_interfaces(hw, flags, iterfunc, arg); } static __inline void ieee80211_iter_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct ieee80211_key_conf *, void *), void *arg) { linuxkpi_ieee80211_iterate_keys(hw, vif, iterfunc, arg); } static __inline void ieee80211_iter_keys_rcu(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct ieee80211_key_conf *, void *), void *arg) { IMPROVE(); /* "rcu" */ linuxkpi_ieee80211_iterate_keys(hw, vif, iterfunc, arg); } static __inline void ieee80211_iter_chan_contexts_atomic(struct ieee80211_hw *hw, void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *, void *), void *arg) { linuxkpi_ieee80211_iterate_chan_contexts(hw, iterfunc, arg); } static __inline void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw, void (*iterfunc)(void *, struct ieee80211_sta *), void *arg) { linuxkpi_ieee80211_iterate_stations_atomic(hw, iterfunc, arg); } static __inline struct wireless_dev * ieee80211_vif_to_wdev(struct ieee80211_vif *vif) { return (linuxkpi_ieee80211_vif_to_wdev(vif)); } static __inline struct sk_buff * ieee80211_beacon_get_template(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, struct ieee80211_mutable_offsets *offs) + struct ieee80211_vif *vif, struct ieee80211_mutable_offsets *offs, + int x) { TODO(); return (NULL); } static __inline void ieee80211_beacon_loss(struct ieee80211_vif *vif) { linuxkpi_ieee80211_beacon_loss(vif); } static __inline void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool t) { TODO(); } static __inline bool ieee80211_csa_is_complete(struct ieee80211_vif *vif) { TODO(); return (false); } static __inline void ieee80211_csa_set_counter(struct ieee80211_vif *vif, uint8_t counter) { TODO(); } static __inline int ieee80211_csa_update_counter(struct ieee80211_vif *vif) { TODO(); return (-1); } static __inline void ieee80211_csa_finish(struct ieee80211_vif *vif) { TODO(); } static __inline enum nl80211_iftype ieee80211_vif_type_p2p(struct ieee80211_vif *vif) { /* If we are not p2p enabled, just return the type. */ if (!vif->p2p) return (vif->type); /* If we are p2p, depending on side, return type. */ switch (vif->type) { case NL80211_IFTYPE_AP: return (NL80211_IFTYPE_P2P_GO); case NL80211_IFTYPE_STATION: return (NL80211_IFTYPE_P2P_CLIENT); default: fallthrough; } return (vif->type); } static __inline unsigned long ieee80211_tu_to_usec(unsigned long tu) { return (tu * IEEE80211_DUR_TU); } static __inline int ieee80211_action_contains_tpc(struct sk_buff *skb) { TODO(); return (0); } static __inline void ieee80211_connection_loss(struct ieee80211_vif *vif) { linuxkpi_ieee80211_connection_loss(vif); } static __inline struct ieee80211_sta * ieee80211_find_sta(struct ieee80211_vif *vif, const u8 *peer) { return (linuxkpi_ieee80211_find_sta(vif, peer)); } static __inline struct ieee80211_sta * ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, const uint8_t *addr, const uint8_t *ourvifaddr) { return (linuxkpi_ieee80211_find_sta_by_ifaddr(hw, addr, ourvifaddr)); } static __inline void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf, struct sk_buff *skb_frag, u8 *key) { TODO(); } static __inline void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf, const u8 *addr, uint32_t iv32, u16 *p1k) { TODO(); } static __inline size_t ieee80211_ie_split(const u8 *ies, size_t ies_len, const u8 *ie_ids, size_t ie_ids_len, size_t start) { size_t x; x = start; /* XXX FIXME, we need to deal with "Element ID Extension" */ while (x < ies_len) { /* Is this IE[s] one of the ie_ids? */ if (!linuxkpi_ieee80211_is_ie_id_in_ie_buf(ies[x], ie_ids, ie_ids_len)) break; if (!linuxkpi_ieee80211_ie_advance(&x, ies, ies_len)) break; } return (x); } static __inline void ieee80211_request_smps(struct ieee80211_vif *vif, enum ieee80211_smps_mode smps) { TODO(); } static __inline void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, uint8_t *addr, enum nl80211_tdls_operation oper, enum ieee80211_reason_code code, gfp_t gfp) { TODO(); } static __inline void ieee80211_stop_queues(struct ieee80211_hw *hw) { TODO(); } static __inline void ieee80211_wake_queues(struct ieee80211_hw *hw) { TODO(); } static __inline void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool state) { TODO(); } static __inline void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb) { IMPROVE(); /* * This is called on transmit failure. * Use a not-so-random random high status error so we can distinguish * it from normal low values flying around in net80211 ("ETX"). */ linuxkpi_ieee80211_free_txskb(hw, skb, 0x455458); } static __inline void ieee80211_restart_hw(struct ieee80211_hw *hw) { TODO(); } static __inline void ieee80211_ready_on_channel(struct ieee80211_hw *hw) { TODO(); /* XXX-BZ We need to see that. */ } static __inline void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw) { TODO(); } static __inline void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, enum nl80211_cqm_rssi_threshold_event crte, int sig, gfp_t gfp) { TODO(); } static __inline void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *sta, uint8_t tid, uint32_t ssn, uint64_t bitmap, uint16_t received_mpdu) { TODO(); } static __inline bool ieee80211_sn_less(uint16_t sn1, uint16_t sn2) { TODO(); return (false); } static __inline uint16_t ieee80211_sn_inc(uint16_t sn) { TODO(); return (sn + 1); } static __inline uint16_t ieee80211_sn_add(uint16_t sn, uint16_t a) { TODO(); return (sn + a); } static __inline void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, uint32_t x, uint8_t *addr) { TODO(); } static __inline void ieee80211_rate_set_vht(struct ieee80211_tx_rate *r, uint32_t f1, uint32_t f2) { TODO(); } static __inline void ieee80211_reserve_tid(struct ieee80211_sta *sta, uint8_t tid) { TODO(); } static __inline void ieee80211_unreserve_tid(struct ieee80211_sta *sta, uint8_t tid) { TODO(); } static __inline void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif, uint8_t *addr, uint8_t tid) { TODO(); } static __inline void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *sta, uint8_t tid) { TODO(); } static __inline uint16_t ieee80211_sn_sub(uint16_t sa, uint16_t sb) { return ((sa - sb) & (IEEE80211_SEQ_SEQ_MASK >> IEEE80211_SEQ_SEQ_SHIFT)); } static __inline void ieee80211_sta_block_awake(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool disable) { TODO(); } static __inline void ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool sleeping) { TODO(); } static __inline void ieee80211_sta_pspoll(struct ieee80211_sta *sta) { TODO(); } static __inline void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, int ntids) { TODO(); } static __inline void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, uint8_t *addr, uint8_t tid) { TODO(); } static __inline void ieee80211_tkip_add_iv(u8 *crypto_hdr, struct ieee80211_key_conf *keyconf, uint64_t pn) { TODO(); } static __inline struct sk_buff * ieee80211_tx_dequeue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { return (linuxkpi_ieee80211_tx_dequeue(hw, txq)); } static __inline void ieee80211_update_mu_groups(struct ieee80211_vif *vif, uint8_t *ms, uint8_t *up) { TODO(); } static __inline void ieee80211_sta_set_buffered(struct ieee80211_sta *sta, uint8_t tid, bool t) { TODO(); } static __inline void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) { linuxkpi_ieee80211_tx_status(hw, skb); } static __inline void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, uint8_t tid, struct ieee80211_key_seq *seq) { TODO(); } static __inline void ieee80211_sched_scan_results(struct ieee80211_hw *hw) { TODO(); } static __inline void ieee80211_sta_eosp(struct ieee80211_sta *sta) { TODO(); } static __inline void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, uint8_t *addr, uint8_t tid) { TODO(); } static __inline void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw) { TODO(); } static __inline void ieee80211_scan_completed(struct ieee80211_hw *hw, struct cfg80211_scan_info *info) { linuxkpi_ieee80211_scan_completed(hw, info); } static __inline struct sk_buff * ieee80211_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { TODO(); return (NULL); } static __inline struct sk_buff * ieee80211_pspoll_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { /* Only STA needs this. Otherwise return NULL and panic bad drivers. */ if (vif->type != NL80211_IFTYPE_STATION) return (NULL); return (linuxkpi_ieee80211_pspoll_get(hw, vif)); } static __inline struct sk_buff * ieee80211_proberesp_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { TODO(); return (NULL); } static __inline struct sk_buff * ieee80211_nullfunc_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, bool qos) { /* Only STA needs this. Otherwise return NULL and panic bad drivers. */ if (vif->type != NL80211_IFTYPE_STATION) return (NULL); return (linuxkpi_ieee80211_nullfunc_get(hw, vif, qos)); } static __inline struct sk_buff * ieee80211_probereq_get(struct ieee80211_hw *hw, uint8_t *addr, uint8_t *ssid, size_t ssid_len, size_t tailroom) { return (linuxkpi_ieee80211_probereq_get(hw, addr, ssid, ssid_len, tailroom)); } static __inline void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, struct delayed_work *w, int delay) { linuxkpi_ieee80211_queue_delayed_work(hw, w, delay); } static __inline void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *w) { linuxkpi_ieee80211_queue_work(hw, w); } static __inline void ieee80211_stop_queue(struct ieee80211_hw *hw, uint16_t q) { TODO(); } static __inline void ieee80211_wake_queue(struct ieee80211_hw *hw, uint16_t q) { TODO(); } static __inline void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) { IMPROVE(); ieee80211_tx_status(hw, skb); } static __inline void ieee80211_tx_status_ni(struct ieee80211_hw *hw, struct sk_buff *skb) { IMPROVE(); ieee80211_tx_status(hw, skb); } static __inline int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, uint8_t tid, int x) { TODO(); return (-EINVAL); } static __inline void ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) { int i; /* * Apparently clearing flags and some other fields is not right. * Given the function is called "status" we work on that part of * the union. */ for (i = 0; i < nitems(info->status.rates); i++) info->status.rates[i].count = 0; /* * Unclear if ack_signal should be included or not but we clear the * "valid" bool so this field is no longer valid. */ memset(&info->status.ack_signal, 0, sizeof(*info) - offsetof(struct ieee80211_tx_info, status.ack_signal)); } static __inline void ieee80211_txq_get_depth(struct ieee80211_txq *txq, unsigned long *frame_cnt, unsigned long *byte_cnt) { if (frame_cnt == NULL && byte_cnt == NULL) return; linuxkpi_ieee80211_txq_get_depth(txq, frame_cnt, byte_cnt); } static __inline int rate_lowest_index(struct ieee80211_supported_band *band, struct ieee80211_sta *sta) { IMPROVE(); return (0); } static __inline void SET_IEEE80211_PERM_ADDR (struct ieee80211_hw *hw, uint8_t *addr) { ether_addr_copy(hw->wiphy->perm_addr, addr); } static __inline uint8_t * ieee80211_bss_get_ie(struct cfg80211_bss *bss, uint32_t x) { TODO(); return (NULL); } static __inline void ieee80211_report_low_ack(struct ieee80211_sta *sta, int x) { TODO(); } static __inline void ieee80211_start_rx_ba_session_offl(struct ieee80211_vif *vif, uint8_t *addr, uint8_t tid) { TODO(); } static __inline void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif, uint8_t *addr, uint8_t tid) { TODO(); } static __inline struct sk_buff * ieee80211_tx_dequeue_ni(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { TODO(); return (NULL); } static __inline void ieee80211_tx_rate_update(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct ieee80211_tx_info *info) { TODO(); } static __inline bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { TODO(); return (false); } static __inline struct ieee80211_txq * ieee80211_next_txq(struct ieee80211_hw *hw, uint32_t ac) { TODO(); return (NULL); } static __inline void ieee80211_radar_detected(struct ieee80211_hw *hw) { TODO(); } static __inline void ieee80211_sta_register_airtime(struct ieee80211_sta *sta, uint8_t tid, uint32_t duration, int x) { TODO(); } static __inline void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, bool _t) { TODO(); } static __inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, uint32_t ac) { TODO(); } static __inline void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, uint32_t ac) { TODO(); } static __inline void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { TODO(); } static __inline void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter) { TODO(); } static __inline int ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif) { TODO(); return (-1); } static __inline int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *vht_cap, uint32_t chanwidth, int x, bool t, int nss) { TODO(); return (-1); } static __inline bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif) { TODO(); return (true); } static __inline void ieee80211_disconnect(struct ieee80211_vif *vif, bool _x) { TODO(); } static __inline void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool _x) { TODO(); } static __inline const struct ieee80211_sta_he_cap * ieee80211_get_he_iftype_cap(const struct ieee80211_supported_band *band, enum nl80211_iftype type) { TODO(); return (NULL); } static __inline void ieee80211_key_mic_failure(struct ieee80211_key_conf *key) { TODO(); } static __inline void ieee80211_key_replay(struct ieee80211_key_conf *key) { TODO(); } #endif /* _LINUXKPI_NET_MAC80211_H */ diff --git a/sys/compat/linuxkpi/common/src/linux_80211.h b/sys/compat/linuxkpi/common/src/linux_80211.h index f7ade2d5e2f9..3c107f76de32 100644 --- a/sys/compat/linuxkpi/common/src/linux_80211.h +++ b/sys/compat/linuxkpi/common/src/linux_80211.h @@ -1,246 +1,246 @@ /*- * Copyright (c) 2020-2022 The FreeBSD Foundation * Copyright (c) 2020-2021 Bjoern A. Zeeb * * This software was developed by Björn Zeeb under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Public functions are called linuxkpi_*(). * Internal (static) functions are called lkpi_*(). * * The internal structures holding metadata over public structures are also * called lkpi_xxx (usually with a member at the end called xxx). * Note: we do not replicate the structure names but the general variable names * for these (e.g., struct hw -> struct lkpi_hw, struct sta -> struct lkpi_sta). * There are macros to access one from the other. * We call the internal versions lxxx (e.g., hw -> lhw, sta -> lsta). */ #ifndef _LKPI_SRC_LINUX_80211_H #define _LKPI_SRC_LINUX_80211_H struct lkpi_radiotap_tx_hdr { struct ieee80211_radiotap_header wt_ihdr; uint8_t wt_flags; uint8_t wt_rate; uint16_t wt_chan_freq; uint16_t wt_chan_flags; } __packed; #define LKPI_RTAP_TX_FLAGS_PRESENT \ ((1 << IEEE80211_RADIOTAP_FLAGS) | \ (1 << IEEE80211_RADIOTAP_RATE) | \ (1 << IEEE80211_RADIOTAP_CHANNEL)) struct lkpi_radiotap_rx_hdr { struct ieee80211_radiotap_header wr_ihdr; uint64_t wr_tsft; uint8_t wr_flags; uint8_t wr_rate; uint16_t wr_chan_freq; uint16_t wr_chan_flags; int8_t wr_dbm_antsignal; int8_t wr_dbm_antnoise; } __packed __aligned(8); #define LKPI_RTAP_RX_FLAGS_PRESENT \ ((1 << IEEE80211_RADIOTAP_TSFT) | \ (1 << IEEE80211_RADIOTAP_FLAGS) | \ (1 << IEEE80211_RADIOTAP_RATE) | \ (1 << IEEE80211_RADIOTAP_CHANNEL) | \ (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \ (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE)) struct lkpi_txq { bool seen_dequeue; struct sk_buff_head skbq; /* Must be last! */ struct ieee80211_txq txq __aligned(CACHE_LINE_SIZE); }; #define TXQ_TO_LTXQ(_txq) container_of(_txq, struct lkpi_txq, txq) struct lkpi_sta { TAILQ_ENTRY(lkpi_sta) lsta_entry; struct ieee80211_node *ni; /* Deferred TX path. */ /* Eventually we might want to migrate this into net80211 entirely. */ /* XXX-BZ can we use sta->txq[] instead directly? */ struct task txq_task; struct mbufq txq; struct mtx txq_mtx; struct ieee80211_key_conf *kc; enum ieee80211_sta_state state; bool added_to_drv; /* Driver knows; i.e. we called ...(). */ bool in_mgd; /* XXX-BZ should this be per-vif? */ /* Must be last! */ struct ieee80211_sta sta __aligned(CACHE_LINE_SIZE); }; #define STA_TO_LSTA(_sta) container_of(_sta, struct lkpi_sta, sta) #define LSTA_TO_STA(_lsta) (&(_lsta)->sta) struct lkpi_vif { TAILQ_ENTRY(lkpi_vif) lvif_entry; struct ieee80211vap iv_vap; struct mtx mtx; struct wireless_dev wdev; /* Other local stuff. */ int (*iv_newstate)(struct ieee80211vap *, enum ieee80211_state, int); struct ieee80211_node * (*iv_update_bss)(struct ieee80211vap *, struct ieee80211_node *); TAILQ_HEAD(, lkpi_sta) lsta_head; bool added_to_drv; /* Driver knows; i.e. we called add_interface(). */ /* Must be last! */ struct ieee80211_vif vif __aligned(CACHE_LINE_SIZE); }; #define VAP_TO_LVIF(_vap) container_of(_vap, struct lkpi_vif, iv_vap) #define LVIF_TO_VAP(_lvif) (&(_lvif)->iv_vap) #define VIF_TO_LVIF(_vif) container_of(_vif, struct lkpi_vif, vif) #define LVIF_TO_VIF(_lvif) (&(_lvif)->vif) struct lkpi_hw { /* name it mac80211_sc? */ const struct ieee80211_ops *ops; struct ieee80211_scan_request *hw_req; struct workqueue_struct *workq; /* FreeBSD specific compat. */ /* Linux device is in hw.wiphy->dev after SET_IEEE80211_DEV(). */ struct ieee80211com *ic; struct lkpi_radiotap_tx_hdr rtap_tx; struct lkpi_radiotap_rx_hdr rtap_rx; TAILQ_HEAD(, lkpi_vif) lvif_head; struct sx lvif_sx; struct mtx mtx; /* Node functions we overload to sync state. */ struct ieee80211_node * (*ic_node_alloc)(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN]); int (*ic_node_init)(struct ieee80211_node *); void (*ic_node_cleanup)(struct ieee80211_node *); void (*ic_node_free)(struct ieee80211_node *); #define LKPI_MAC80211_DRV_STARTED 0x00000001 uint32_t sc_flags; #define LKPI_SCAN_RUNNING 0x00000001 uint32_t scan_flags; int supbands; /* Number of supported bands. */ int max_rates; /* Maximum number of bitrates supported in any channel. */ int scan_ie_len; /* Length of common per-band scan IEs. */ bool update_mc; /* Must be last! */ struct ieee80211_hw hw __aligned(CACHE_LINE_SIZE); }; #define LHW_TO_HW(_lhw) (&(_lhw)->hw) #define HW_TO_LHW(_hw) container_of(_hw, struct lkpi_hw, hw) struct lkpi_wiphy { const struct cfg80211_ops *ops; /* Must be last! */ struct wiphy wiphy __aligned(CACHE_LINE_SIZE); }; #define WIPHY_TO_LWIPHY(_wiphy) container_of(_wiphy, struct lkpi_wiphy, wiphy) #define LWIPHY_TO_WIPHY(_lwiphy) (&(_lwiphy)->wiphy) #define LKPI_80211_LHW_LOCK(_lhw) mtx_lock(&(_lhw)->mtx) #define LKPI_80211_LHW_UNLOCK(_lhw) mtx_unlock(&(_lhw)->mtx) #define LKPI_80211_LHW_LOCK_ASSERT(_lhw) \ mtx_assert(&(_lhw)->mtx, MA_OWNED) #define LKPI_80211_LHW_UNLOCK_ASSERT(_lhw) \ mtx_assert(&(_lhw)->mtx, MA_NOTOWNED) #define LKPI_80211_LHW_LVIF_LOCK(_lhw) sx_xlock(&(_lhw)->lvif_sx) #define LKPI_80211_LHW_LVIF_UNLOCK(_lhw) sx_xunlock(&(_lhw)->lvif_sx) #define LKPI_80211_LVIF_LOCK(_lvif) mtx_lock(&(_lvif)->mtx) #define LKPI_80211_LVIF_UNLOCK(_lvif) mtx_unlock(&(_lvif)->mtx) #define LKPI_80211_LSTA_LOCK(_lsta) mtx_lock(&(_lsta)->txq_mtx) #define LKPI_80211_LSTA_UNLOCK(_lsta) mtx_unlock(&(_lsta)->txq_mtx) int lkpi_80211_mo_start(struct ieee80211_hw *); void lkpi_80211_mo_stop(struct ieee80211_hw *); int lkpi_80211_mo_set_frag_threshold(struct ieee80211_hw *, uint32_t); int lkpi_80211_mo_set_rts_threshold(struct ieee80211_hw *, uint32_t); int lkpi_80211_mo_add_interface(struct ieee80211_hw *, struct ieee80211_vif *); void lkpi_80211_mo_remove_interface(struct ieee80211_hw *, struct ieee80211_vif *); int lkpi_80211_mo_hw_scan(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_scan_request *); void lkpi_80211_mo_cancel_hw_scan(struct ieee80211_hw *, struct ieee80211_vif *); void lkpi_80211_mo_sw_scan_complete(struct ieee80211_hw *, struct ieee80211_vif *); void lkpi_80211_mo_sw_scan_start(struct ieee80211_hw *, struct ieee80211_vif *, const u8 *); u64 lkpi_80211_mo_prepare_multicast(struct ieee80211_hw *, struct netdev_hw_addr_list *); void lkpi_80211_mo_configure_filter(struct ieee80211_hw *, unsigned int, unsigned int *, u64); int lkpi_80211_mo_sta_state(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, enum ieee80211_sta_state); int lkpi_80211_mo_config(struct ieee80211_hw *, uint32_t); int lkpi_80211_mo_assign_vif_chanctx(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_chanctx_conf *); void lkpi_80211_mo_unassign_vif_chanctx(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_chanctx_conf **); int lkpi_80211_mo_add_chanctx(struct ieee80211_hw *, struct ieee80211_chanctx_conf *); void lkpi_80211_mo_change_chanctx(struct ieee80211_hw *, struct ieee80211_chanctx_conf *, uint32_t); void lkpi_80211_mo_remove_chanctx(struct ieee80211_hw *, struct ieee80211_chanctx_conf *); void lkpi_80211_mo_bss_info_changed(struct ieee80211_hw *, struct ieee80211_vif *, - struct ieee80211_bss_conf *, uint32_t); + struct ieee80211_bss_conf *, uint64_t); int lkpi_80211_mo_conf_tx(struct ieee80211_hw *, struct ieee80211_vif *, uint16_t, const struct ieee80211_tx_queue_params *); void lkpi_80211_mo_flush(struct ieee80211_hw *, struct ieee80211_vif *, uint32_t, bool); void lkpi_80211_mo_mgd_prepare_tx(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_prep_tx_info *); void lkpi_80211_mo_mgd_complete_tx(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_prep_tx_info *); void lkpi_80211_mo_tx(struct ieee80211_hw *, struct ieee80211_tx_control *, struct sk_buff *); void lkpi_80211_mo_wake_tx_queue(struct ieee80211_hw *, struct ieee80211_txq *); void lkpi_80211_mo_sync_rx_queues(struct ieee80211_hw *); void lkpi_80211_mo_sta_pre_rcu_remove(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); int lkpi_80211_mo_set_key(struct ieee80211_hw *, enum set_key_cmd, struct ieee80211_vif *, struct ieee80211_sta *, struct ieee80211_key_conf *); #endif /* _LKPI_SRC_LINUX_80211_H */ diff --git a/sys/compat/linuxkpi/common/src/linux_80211_macops.c b/sys/compat/linuxkpi/common/src/linux_80211_macops.c index e77aeb9afb67..b3e01780e1ce 100644 --- a/sys/compat/linuxkpi/common/src/linux_80211_macops.c +++ b/sys/compat/linuxkpi/common/src/linux_80211_macops.c @@ -1,621 +1,621 @@ /*- * Copyright (c) 2021-2022 The FreeBSD Foundation * * This software was developed by Björn Zeeb under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #define LINUXKPI_NET80211 #include #include "linux_80211.h" int lkpi_80211_mo_start(struct ieee80211_hw *hw) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->start == NULL) { error = EOPNOTSUPP; goto out; } if ((lhw->sc_flags & LKPI_MAC80211_DRV_STARTED)) { /* Trying to start twice is an error. */ error = EEXIST; goto out; } error = lhw->ops->start(hw); if (error == 0) lhw->sc_flags |= LKPI_MAC80211_DRV_STARTED; out: return (error); } void lkpi_80211_mo_stop(struct ieee80211_hw *hw) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->stop == NULL) return; lhw->ops->stop(hw); lhw->sc_flags &= ~LKPI_MAC80211_DRV_STARTED; } int lkpi_80211_mo_set_frag_threshold(struct ieee80211_hw *hw, uint32_t frag_th) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->set_frag_threshold == NULL) { error = EOPNOTSUPP; goto out; } error = lhw->ops->set_frag_threshold(hw, frag_th); out: return (error); } int lkpi_80211_mo_set_rts_threshold(struct ieee80211_hw *hw, uint32_t rts_th) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->set_rts_threshold == NULL) { error = EOPNOTSUPP; goto out; } error = lhw->ops->set_rts_threshold(hw, rts_th); out: return (error); } int lkpi_80211_mo_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct lkpi_hw *lhw; struct lkpi_vif *lvif; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->add_interface == NULL) { error = EOPNOTSUPP; goto out; } lvif = VIF_TO_LVIF(vif); LKPI_80211_LVIF_LOCK(lvif); if (lvif->added_to_drv) { LKPI_80211_LVIF_UNLOCK(lvif); /* Trying to add twice is an error. */ error = EEXIST; goto out; } LKPI_80211_LVIF_UNLOCK(lvif); error = lhw->ops->add_interface(hw, vif); if (error == 0) { LKPI_80211_LVIF_LOCK(lvif); lvif->added_to_drv = true; LKPI_80211_LVIF_UNLOCK(lvif); } out: return (error); } void lkpi_80211_mo_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct lkpi_hw *lhw; struct lkpi_vif *lvif; lhw = HW_TO_LHW(hw); if (lhw->ops->remove_interface == NULL) return; lvif = VIF_TO_LVIF(vif); LKPI_80211_LVIF_LOCK(lvif); if (!lvif->added_to_drv) { LKPI_80211_LVIF_UNLOCK(lvif); return; } LKPI_80211_LVIF_UNLOCK(lvif); lhw->ops->remove_interface(hw, vif); LKPI_80211_LVIF_LOCK(lvif); lvif->added_to_drv = false; LKPI_80211_LVIF_UNLOCK(lvif); } int lkpi_80211_mo_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *sr) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->hw_scan == NULL) { /* XXX-BZ can we hide other scans like we can for sta_add..? */ error = EOPNOTSUPP; goto out; } lhw->scan_flags |= LKPI_SCAN_RUNNING; error = lhw->ops->hw_scan(hw, vif, sr); if (error != 0) lhw->scan_flags &= ~LKPI_SCAN_RUNNING; out: return (error); } void lkpi_80211_mo_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->cancel_hw_scan == NULL) return; lhw->ops->cancel_hw_scan(hw, vif); } void lkpi_80211_mo_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->sw_scan_complete == NULL) return; lhw->ops->sw_scan_complete(hw, vif); lhw->scan_flags &= ~LKPI_SCAN_RUNNING; } void lkpi_80211_mo_sw_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *addr) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->sw_scan_start == NULL) return; lhw->ops->sw_scan_start(hw, vif, addr); } /* * We keep the Linux type here; it really is an uintptr_t. */ u64 lkpi_80211_mo_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list) { struct lkpi_hw *lhw; u64 ptr; lhw = HW_TO_LHW(hw); if (lhw->ops->prepare_multicast == NULL) return (0); ptr = lhw->ops->prepare_multicast(hw, mc_list); return (ptr); } void lkpi_80211_mo_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 mc_ptr) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->configure_filter == NULL) return; if (mc_ptr == 0) return; lhw->ops->configure_filter(hw, changed_flags, total_flags, mc_ptr); } /* * So far we only called sta_{add,remove} as an alternative to sta_state. * Let's keep the implementation simpler and hide sta_{add,remove} under the * hood here calling them if state_state is not available from mo_sta_state. */ static int lkpi_80211_mo_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct lkpi_hw *lhw; struct lkpi_sta *lsta; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->sta_add == NULL) { error = EOPNOTSUPP; goto out; } lsta = STA_TO_LSTA(sta); if (lsta->added_to_drv) { error = EEXIST; goto out; } error = lhw->ops->sta_add(hw, vif, sta); if (error == 0) lsta->added_to_drv = true; out: return error; } static int lkpi_80211_mo_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct lkpi_hw *lhw; struct lkpi_sta *lsta; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->sta_remove == NULL) { error = EOPNOTSUPP; goto out; } lsta = STA_TO_LSTA(sta); if (!lsta->added_to_drv) { /* If we never added the sta, do not complain on cleanup. */ error = 0; goto out; } error = lhw->ops->sta_remove(hw, vif, sta); if (error == 0) lsta->added_to_drv = false; out: return error; } int lkpi_80211_mo_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state nstate) { struct lkpi_hw *lhw; struct lkpi_sta *lsta; int error; lhw = HW_TO_LHW(hw); lsta = STA_TO_LSTA(sta); if (lhw->ops->sta_state != NULL) { error = lhw->ops->sta_state(hw, vif, sta, lsta->state, nstate); if (error == 0) { if (nstate == IEEE80211_STA_NOTEXIST) lsta->added_to_drv = false; else lsta->added_to_drv = true; lsta->state = nstate; } goto out; } /* XXX-BZ is the change state AUTH or ASSOC here? */ if (lsta->state < IEEE80211_STA_ASSOC && nstate == IEEE80211_STA_ASSOC) { error = lkpi_80211_mo_sta_add(hw, vif, sta); if (error == 0) lsta->added_to_drv = true; } else if (lsta->state >= IEEE80211_STA_ASSOC && nstate < IEEE80211_STA_ASSOC) { error = lkpi_80211_mo_sta_remove(hw, vif, sta); if (error == 0) lsta->added_to_drv = false; } else /* Nothing to do. */ error = 0; if (error == 0) lsta->state = nstate; out: /* XXX-BZ should we manage state in here? */ return (error); } int lkpi_80211_mo_config(struct ieee80211_hw *hw, uint32_t changed) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->config == NULL) { error = EOPNOTSUPP; goto out; } error = lhw->ops->config(hw, changed); out: return (error); } int lkpi_80211_mo_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *chanctx_conf) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->assign_vif_chanctx == NULL) { error = EOPNOTSUPP; goto out; } error = lhw->ops->assign_vif_chanctx(hw, vif, chanctx_conf); if (error == 0) vif->chanctx_conf = chanctx_conf; out: return (error); } void lkpi_80211_mo_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf **chanctx_conf) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->unassign_vif_chanctx == NULL) return; if (*chanctx_conf == NULL) return; lhw->ops->unassign_vif_chanctx(hw, vif, *chanctx_conf); *chanctx_conf = NULL; } int lkpi_80211_mo_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *chanctx_conf) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->add_chanctx == NULL) { error = EOPNOTSUPP; goto out; } error = lhw->ops->add_chanctx(hw, chanctx_conf); out: return (error); } void lkpi_80211_mo_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *chanctx_conf, uint32_t changed) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->change_chanctx == NULL) return; lhw->ops->change_chanctx(hw, chanctx_conf, changed); } void lkpi_80211_mo_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *chanctx_conf) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->remove_chanctx == NULL) return; lhw->ops->remove_chanctx(hw, chanctx_conf); } void lkpi_80211_mo_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_bss_conf *conf, uint32_t changed) + struct ieee80211_bss_conf *conf, uint64_t changed) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->bss_info_changed == NULL) return; lhw->ops->bss_info_changed(hw, vif, conf, changed); } int lkpi_80211_mo_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, uint16_t ac, const struct ieee80211_tx_queue_params *txqp) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->conf_tx == NULL) { error = EOPNOTSUPP; goto out; } error = lhw->ops->conf_tx(hw, vif, ac, txqp); out: return (error); } void lkpi_80211_mo_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, uint32_t nqueues, bool drop) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->flush == NULL) return; lhw->ops->flush(hw, vif, nqueues, drop); } void lkpi_80211_mo_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *txinfo) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->mgd_prepare_tx == NULL) return; lhw->ops->mgd_prepare_tx(hw, vif, txinfo); } void lkpi_80211_mo_mgd_complete_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *txinfo) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->mgd_complete_tx == NULL) return; lhw->ops->mgd_complete_tx(hw, vif, txinfo); } void lkpi_80211_mo_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *txctrl, struct sk_buff *skb) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->tx == NULL) return; lhw->ops->tx(hw, txctrl, skb); } void lkpi_80211_mo_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->wake_tx_queue == NULL) return; lhw->ops->wake_tx_queue(hw, txq); } void lkpi_80211_mo_sync_rx_queues(struct ieee80211_hw *hw) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->sync_rx_queues == NULL) return; lhw->ops->sync_rx_queues(hw); } void lkpi_80211_mo_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct lkpi_hw *lhw; lhw = HW_TO_LHW(hw); if (lhw->ops->sta_pre_rcu_remove == NULL) return; lhw->ops->sta_pre_rcu_remove(hw, vif, sta); } int lkpi_80211_mo_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *kc) { struct lkpi_hw *lhw; int error; lhw = HW_TO_LHW(hw); if (lhw->ops->set_key == NULL) { error = EOPNOTSUPP; goto out; } error = lhw->ops->set_key(hw, cmd, vif, sta, kc); out: return (error); } diff --git a/sys/contrib/dev/iwlwifi/mvm/debugfs.c b/sys/contrib/dev/iwlwifi/mvm/debugfs.c index 49898fd99594..c0bd697b080a 100644 --- a/sys/contrib/dev/iwlwifi/mvm/debugfs.c +++ b/sys/contrib/dev/iwlwifi/mvm/debugfs.c @@ -1,1947 +1,1947 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include #include "mvm.h" #include "sta.h" #include "iwl-io.h" #include "debugfs.h" #include "iwl-modparams.h" #include "fw/error-dump.h" static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[16]; int pos, budget; if (!iwl_mvm_is_ctdp_supported(mvm)) return -EOPNOTSUPP; if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; mutex_lock(&mvm->mutex); budget = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_REPORT, 0); mutex_unlock(&mvm->mutex); if (budget < 0) return budget; pos = scnprintf(buf, sizeof(buf), "%d\n", budget); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; if (!iwl_mvm_is_ctdp_supported(mvm)) return -EOPNOTSUPP; if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; mutex_lock(&mvm->mutex); ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_STOP, 0); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_force_ctkill_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; iwl_mvm_enter_ctkill(mvm); return count; } static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; u32 flush_arg; if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; if (kstrtou32(buf, 0, &flush_arg)) return -EINVAL; if (iwl_mvm_has_new_tx_api(mvm)) { IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING all tids queues on sta_id = %d\n", flush_arg); mutex_lock(&mvm->mutex); ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFFFF) ? : count; mutex_unlock(&mvm->mutex); return ret; } IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING queues mask to flush = 0x%x\n", flush_arg); mutex_lock(&mvm->mutex); ret = iwl_mvm_flush_tx_path(mvm, flush_arg) ? : count; mutex_unlock(&mvm->mutex); return ret; } static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_sta *mvmsta; int sta_id, drain, ret; if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) return -EINVAL; if (sta_id < 0 || sta_id >= mvm->fw->ucode_capa.num_stations) return -EINVAL; if (drain < 0 || drain > 1) return -EINVAL; mutex_lock(&mvm->mutex); mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); if (!mvmsta) ret = -ENOENT; else ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count; mutex_unlock(&mvm->mutex); return ret; } static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; const struct fw_img *img; unsigned int ofs, len; size_t ret; u8 *ptr; if (!iwl_mvm_firmware_running(mvm)) return -EINVAL; /* default is to dump the entire data segment */ img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; len = img->sec[IWL_UCODE_SECTION_DATA].len; if (mvm->dbgfs_sram_len) { ofs = mvm->dbgfs_sram_offset; len = mvm->dbgfs_sram_len; } ptr = kzalloc(len, GFP_KERNEL); if (!ptr) return -ENOMEM; iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len); ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len); kfree(ptr); return ret; } static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { const struct fw_img *img; u32 offset, len; u32 img_offset, img_len; if (!iwl_mvm_firmware_running(mvm)) return -EINVAL; img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset; img_len = img->sec[IWL_UCODE_SECTION_DATA].len; if (sscanf(buf, "%x,%x", &offset, &len) == 2) { if ((offset & 0x3) || (len & 0x3)) return -EINVAL; if (offset + len > img_offset + img_len) return -EINVAL; mvm->dbgfs_sram_offset = offset; mvm->dbgfs_sram_len = len; } else { mvm->dbgfs_sram_offset = 0; mvm->dbgfs_sram_len = 0; } return count; } static ssize_t iwl_dbgfs_set_nic_temperature_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[16]; int pos; if (!mvm->temperature_test) pos = scnprintf(buf , sizeof(buf), "disabled\n"); else pos = scnprintf(buf , sizeof(buf), "%d\n", mvm->temperature); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } /* * Set NIC Temperature * Cause the driver to ignore the actual NIC temperature reported by the FW * Enable: any value between IWL_MVM_DEBUG_SET_TEMPERATURE_MIN - * IWL_MVM_DEBUG_SET_TEMPERATURE_MAX * Disable: IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE */ static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int temperature; if (!iwl_mvm_firmware_running(mvm) && !mvm->temperature_test) return -EIO; if (kstrtoint(buf, 10, &temperature)) return -EINVAL; /* not a legal temperature */ if ((temperature > IWL_MVM_DEBUG_SET_TEMPERATURE_MAX && temperature != IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) || temperature < IWL_MVM_DEBUG_SET_TEMPERATURE_MIN) return -EINVAL; mutex_lock(&mvm->mutex); if (temperature == IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) { if (!mvm->temperature_test) goto out; mvm->temperature_test = false; /* Since we can't read the temp while awake, just set * it to zero until we get the next RX stats from the * firmware. */ mvm->temperature = 0; } else { mvm->temperature_test = true; mvm->temperature = temperature; } IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n", mvm->temperature_test ? "En" : "Dis" , mvm->temperature); /* handle the temperature change */ iwl_mvm_tt_handler(mvm); out: mutex_unlock(&mvm->mutex); return count; } static ssize_t iwl_dbgfs_nic_temp_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[16]; int pos, ret; s32 temp; if (!iwl_mvm_firmware_running(mvm)) return -EIO; mutex_lock(&mvm->mutex); ret = iwl_mvm_get_temp(mvm, &temp); mutex_unlock(&mvm->mutex); if (ret) return -EIO; pos = scnprintf(buf , sizeof(buf), "%d\n", temp); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } #ifdef CONFIG_ACPI static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[256]; int pos = 0; int bufsz = sizeof(buf); int tbl_idx; if (!iwl_mvm_firmware_running(mvm)) return -EIO; mutex_lock(&mvm->mutex); tbl_idx = iwl_mvm_get_sar_geo_profile(mvm); if (tbl_idx < 0) { mutex_unlock(&mvm->mutex); return tbl_idx; } if (!tbl_idx) { pos = scnprintf(buf, bufsz, "SAR geographic profile disabled\n"); } else { pos += scnprintf(buf + pos, bufsz - pos, "Use geographic profile %d\n", tbl_idx); pos += scnprintf(buf + pos, bufsz - pos, "2.4GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n", mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[0], mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[1], mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].max); pos += scnprintf(buf + pos, bufsz - pos, "5.2GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n", mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[0], mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[1], mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].max); } mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } #endif static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct ieee80211_sta *sta; char buf[400]; int i, pos = 0, bufsz = sizeof(buf); mutex_lock(&mvm->mutex); for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i); sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (!sta) pos += scnprintf(buf + pos, bufsz - pos, "N/A\n"); else if (IS_ERR(sta)) pos += scnprintf(buf + pos, bufsz - pos, "%ld\n", PTR_ERR(sta)); else pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", sta->addr); } mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; struct iwl_mvm *mvm = lq_sta->pers.drv; static const size_t bufsz = 2048; char *buff; int desc = 0; ssize_t ret; buff = kmalloc(bufsz, GFP_KERNEL); if (!buff) return -ENOMEM; mutex_lock(&mvm->mutex); desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n", lq_sta->pers.sta_id); desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n", lq_sta->pers.dbg_fixed_rate); desc += scnprintf(buff + desc, bufsz - desc, "A-MPDU size limit %d\n", lq_sta->pers.dbg_agg_frame_count_lim); desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s\n", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "", (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : ""); desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X ", lq_sta->last_rate_n_flags); desc += rs_pretty_print_rate(buff + desc, bufsz - desc, lq_sta->last_rate_n_flags); if (desc < bufsz - 1) buff[desc++] = '\n'; mutex_unlock(&mvm->mutex); ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); kfree(buff); return ret; } static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta, char *buf, size_t count, loff_t *ppos) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int i; u16 amsdu_len; if (kstrtou16(buf, 0, &amsdu_len)) return -EINVAL; /* only change from debug set <-> debug unset */ if (amsdu_len && mvmsta->orig_amsdu_len) return -EBUSY; if (amsdu_len) { mvmsta->orig_amsdu_len = sta->max_amsdu_len; sta->max_amsdu_len = amsdu_len; for (i = 0; i < ARRAY_SIZE(sta->max_tid_amsdu_len); i++) sta->max_tid_amsdu_len[i] = amsdu_len; } else { sta->max_amsdu_len = mvmsta->orig_amsdu_len; mvmsta->orig_amsdu_len = 0; } return count; } static ssize_t iwl_dbgfs_amsdu_len_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); char buf[32]; int pos; pos = scnprintf(buf, sizeof(buf), "current %d ", sta->max_amsdu_len); pos += scnprintf(buf + pos, sizeof(buf) - pos, "stored %d\n", mvmsta->orig_amsdu_len); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char buf[64]; int bufsz = sizeof(buf); int pos = 0; pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n", mvm->disable_power_off); pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n", mvm->disable_power_off_d3); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret, val; if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (!strncmp("disable_power_off_d0=", buf, 21)) { if (sscanf(buf + 21, "%d", &val) != 1) return -EINVAL; mvm->disable_power_off = val; } else if (!strncmp("disable_power_off_d3=", buf, 21)) { if (sscanf(buf + 21, "%d", &val) != 1) return -EINVAL; mvm->disable_power_off_d3 = val; } else { return -EINVAL; } mutex_lock(&mvm->mutex); ret = iwl_mvm_power_update_device(mvm); mutex_unlock(&mvm->mutex); return ret ?: count; } static int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, int pos, int bufsz) { pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n"); BT_MBOX_PRINT(0, LE_SLAVE_LAT, false); BT_MBOX_PRINT(0, LE_PROF1, false); BT_MBOX_PRINT(0, LE_PROF2, false); BT_MBOX_PRINT(0, LE_PROF_OTHER, false); BT_MBOX_PRINT(0, CHL_SEQ_N, false); BT_MBOX_PRINT(0, INBAND_S, false); BT_MBOX_PRINT(0, LE_MIN_RSSI, false); BT_MBOX_PRINT(0, LE_SCAN, false); BT_MBOX_PRINT(0, LE_ADV, false); BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false); BT_MBOX_PRINT(0, OPEN_CON_1, true); pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n"); BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false); BT_MBOX_PRINT(1, IP_SR, false); BT_MBOX_PRINT(1, LE_MSTR, false); BT_MBOX_PRINT(1, AGGR_TRFC_LD, false); BT_MBOX_PRINT(1, MSG_TYPE, false); BT_MBOX_PRINT(1, SSN, true); pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n"); BT_MBOX_PRINT(2, SNIFF_ACT, false); BT_MBOX_PRINT(2, PAG, false); BT_MBOX_PRINT(2, INQUIRY, false); BT_MBOX_PRINT(2, CONN, false); BT_MBOX_PRINT(2, SNIFF_INTERVAL, false); BT_MBOX_PRINT(2, DISC, false); BT_MBOX_PRINT(2, SCO_TX_ACT, false); BT_MBOX_PRINT(2, SCO_RX_ACT, false); BT_MBOX_PRINT(2, ESCO_RE_TX, false); BT_MBOX_PRINT(2, SCO_DURATION, true); pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n"); BT_MBOX_PRINT(3, SCO_STATE, false); BT_MBOX_PRINT(3, SNIFF_STATE, false); BT_MBOX_PRINT(3, A2DP_STATE, false); BT_MBOX_PRINT(3, A2DP_SRC, false); BT_MBOX_PRINT(3, ACL_STATE, false); BT_MBOX_PRINT(3, MSTR_STATE, false); BT_MBOX_PRINT(3, OBX_STATE, false); BT_MBOX_PRINT(3, OPEN_CON_2, false); BT_MBOX_PRINT(3, TRAFFIC_LOAD, false); BT_MBOX_PRINT(3, CHL_SEQN_LSB, false); BT_MBOX_PRINT(3, INBAND_P, false); BT_MBOX_PRINT(3, MSG_TYPE_2, false); BT_MBOX_PRINT(3, SSN_2, false); BT_MBOX_PRINT(3, UPDATE_REQUEST, true); return pos; } static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif; char *buf; int ret, pos = 0, bufsz = sizeof(char) * 1024; buf = kmalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&mvm->mutex); pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n", notif->bt_ci_compliance); pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n", le32_to_cpu(notif->primary_ch_lut)); pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n", le32_to_cpu(notif->secondary_ch_lut)); pos += scnprintf(buf + pos, bufsz - pos, "bt_activity_grading = %d\n", le32_to_cpu(notif->bt_activity_grading)); pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", notif->rrc_status & 0xF); pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", notif->ttc_status & 0xF); pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n", IWL_MVM_BT_COEX_SYNC2SCO); pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n", IWL_MVM_BT_COEX_MPLUT); mutex_unlock(&mvm->mutex); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } #undef BT_MBOX_PRINT static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd; char buf[256]; int bufsz = sizeof(buf); int pos = 0; mutex_lock(&mvm->mutex); pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n"); pos += scnprintf(buf + pos, bufsz - pos, "\tPrimary Channel Bitmap 0x%016llx\n", le64_to_cpu(cmd->bt_primary_ci)); pos += scnprintf(buf + pos, bufsz - pos, "\tSecondary Channel Bitmap 0x%016llx\n", le64_to_cpu(cmd->bt_secondary_ci)); mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { u32 bt_tx_prio; if (sscanf(buf, "%u", &bt_tx_prio) != 1) return -EINVAL; if (bt_tx_prio > 4) return -EINVAL; mvm->bt_tx_prio = bt_tx_prio; return count; } static ssize_t iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { static const char * const modes_str[BT_FORCE_ANT_MAX] = { [BT_FORCE_ANT_DIS] = "dis", [BT_FORCE_ANT_AUTO] = "auto", [BT_FORCE_ANT_BT] = "bt", [BT_FORCE_ANT_WIFI] = "wifi", }; int ret, bt_force_ant_mode; ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf); if (ret < 0) return ret; bt_force_ant_mode = ret; ret = 0; mutex_lock(&mvm->mutex); if (mvm->bt_force_ant_mode == bt_force_ant_mode) goto out; mvm->bt_force_ant_mode = bt_force_ant_mode; IWL_DEBUG_COEX(mvm, "Force mode: %s\n", modes_str[mvm->bt_force_ant_mode]); if (iwl_mvm_firmware_running(mvm)) ret = iwl_mvm_send_bt_init_conf(mvm); else ret = 0; out: mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char *buff, *pos, *endpos; static const size_t bufsz = 1024; int ret; buff = kmalloc(bufsz, GFP_KERNEL); if (!buff) return -ENOMEM; pos = buff; endpos = pos + bufsz; pos += scnprintf(pos, endpos - pos, "FW prefix: %s\n", mvm->trans->cfg->fw_name_pre); pos += scnprintf(pos, endpos - pos, "FW: %s\n", mvm->fwrt.fw->human_readable); pos += scnprintf(pos, endpos - pos, "Device: %s\n", mvm->fwrt.trans->name); pos += scnprintf(pos, endpos - pos, "Bus: %s\n", mvm->fwrt.dev->bus->name); ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); kfree(buff); return ret; } static ssize_t iwl_dbgfs_phy_integration_ver_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char *buf; size_t bufsz; int pos; ssize_t ret; bufsz = mvm->fw->phy_integration_ver_len + 2; buf = kmalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; pos = scnprintf(buf, bufsz, "%.*s\n", mvm->fw->phy_integration_ver_len, mvm->fw->phy_integration_ver); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } #define PRINT_STATS_LE32(_struct, _memb) \ pos += scnprintf(buf + pos, bufsz - pos, \ fmt_table, #_memb, \ le32_to_cpu(_struct->_memb)) static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; static const char *fmt_table = "\t%-30s %10u\n"; static const char *fmt_header = "%-32s\n"; int pos = 0; char *buf; int ret; size_t bufsz; if (iwl_mvm_has_new_rx_stats_api(mvm)) bufsz = ((sizeof(struct mvm_statistics_rx) / sizeof(__le32)) * 43) + (4 * 33) + 1; else /* 43 = size of each data line; 33 = size of each header */ bufsz = ((sizeof(struct mvm_statistics_rx_v3) / sizeof(__le32)) * 43) + (4 * 33) + 1; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&mvm->mutex); if (iwl_mvm_firmware_running(mvm)) iwl_mvm_request_statistics(mvm, false); pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - OFDM"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_rx_phy_v2 *ofdm = &mvm->rx_stats_v3.ofdm; PRINT_STATS_LE32(ofdm, ina_cnt); PRINT_STATS_LE32(ofdm, fina_cnt); PRINT_STATS_LE32(ofdm, plcp_err); PRINT_STATS_LE32(ofdm, crc32_err); PRINT_STATS_LE32(ofdm, overrun_err); PRINT_STATS_LE32(ofdm, early_overrun_err); PRINT_STATS_LE32(ofdm, crc32_good); PRINT_STATS_LE32(ofdm, false_alarm_cnt); PRINT_STATS_LE32(ofdm, fina_sync_err_cnt); PRINT_STATS_LE32(ofdm, sfd_timeout); PRINT_STATS_LE32(ofdm, fina_timeout); PRINT_STATS_LE32(ofdm, unresponded_rts); PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun); PRINT_STATS_LE32(ofdm, sent_ack_cnt); PRINT_STATS_LE32(ofdm, sent_cts_cnt); PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt); PRINT_STATS_LE32(ofdm, dsp_self_kill); PRINT_STATS_LE32(ofdm, mh_format_err); PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum); PRINT_STATS_LE32(ofdm, reserved); } else { struct mvm_statistics_rx_phy *ofdm = &mvm->rx_stats.ofdm; PRINT_STATS_LE32(ofdm, unresponded_rts); PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun); PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt); PRINT_STATS_LE32(ofdm, dsp_self_kill); PRINT_STATS_LE32(ofdm, reserved); } pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - CCK"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_rx_phy_v2 *cck = &mvm->rx_stats_v3.cck; PRINT_STATS_LE32(cck, ina_cnt); PRINT_STATS_LE32(cck, fina_cnt); PRINT_STATS_LE32(cck, plcp_err); PRINT_STATS_LE32(cck, crc32_err); PRINT_STATS_LE32(cck, overrun_err); PRINT_STATS_LE32(cck, early_overrun_err); PRINT_STATS_LE32(cck, crc32_good); PRINT_STATS_LE32(cck, false_alarm_cnt); PRINT_STATS_LE32(cck, fina_sync_err_cnt); PRINT_STATS_LE32(cck, sfd_timeout); PRINT_STATS_LE32(cck, fina_timeout); PRINT_STATS_LE32(cck, unresponded_rts); PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun); PRINT_STATS_LE32(cck, sent_ack_cnt); PRINT_STATS_LE32(cck, sent_cts_cnt); PRINT_STATS_LE32(cck, sent_ba_rsp_cnt); PRINT_STATS_LE32(cck, dsp_self_kill); PRINT_STATS_LE32(cck, mh_format_err); PRINT_STATS_LE32(cck, re_acq_main_rssi_sum); PRINT_STATS_LE32(cck, reserved); } else { struct mvm_statistics_rx_phy *cck = &mvm->rx_stats.cck; PRINT_STATS_LE32(cck, unresponded_rts); PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun); PRINT_STATS_LE32(cck, sent_ba_rsp_cnt); PRINT_STATS_LE32(cck, dsp_self_kill); PRINT_STATS_LE32(cck, reserved); } pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - GENERAL"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_rx_non_phy_v3 *general = &mvm->rx_stats_v3.general; PRINT_STATS_LE32(general, bogus_cts); PRINT_STATS_LE32(general, bogus_ack); PRINT_STATS_LE32(general, non_bssid_frames); PRINT_STATS_LE32(general, filtered_frames); PRINT_STATS_LE32(general, non_channel_beacons); PRINT_STATS_LE32(general, channel_beacons); PRINT_STATS_LE32(general, num_missed_bcon); PRINT_STATS_LE32(general, adc_rx_saturation_time); PRINT_STATS_LE32(general, ina_detection_search_time); PRINT_STATS_LE32(general, beacon_silence_rssi_a); PRINT_STATS_LE32(general, beacon_silence_rssi_b); PRINT_STATS_LE32(general, beacon_silence_rssi_c); PRINT_STATS_LE32(general, interference_data_flag); PRINT_STATS_LE32(general, channel_load); PRINT_STATS_LE32(general, dsp_false_alarms); PRINT_STATS_LE32(general, beacon_rssi_a); PRINT_STATS_LE32(general, beacon_rssi_b); PRINT_STATS_LE32(general, beacon_rssi_c); PRINT_STATS_LE32(general, beacon_energy_a); PRINT_STATS_LE32(general, beacon_energy_b); PRINT_STATS_LE32(general, beacon_energy_c); PRINT_STATS_LE32(general, num_bt_kills); PRINT_STATS_LE32(general, mac_id); PRINT_STATS_LE32(general, directed_data_mpdu); } else { struct mvm_statistics_rx_non_phy *general = &mvm->rx_stats.general; PRINT_STATS_LE32(general, bogus_cts); PRINT_STATS_LE32(general, bogus_ack); PRINT_STATS_LE32(general, non_channel_beacons); PRINT_STATS_LE32(general, channel_beacons); PRINT_STATS_LE32(general, num_missed_bcon); PRINT_STATS_LE32(general, adc_rx_saturation_time); PRINT_STATS_LE32(general, ina_detection_search_time); PRINT_STATS_LE32(general, beacon_silence_rssi_a); PRINT_STATS_LE32(general, beacon_silence_rssi_b); PRINT_STATS_LE32(general, beacon_silence_rssi_c); PRINT_STATS_LE32(general, interference_data_flag); PRINT_STATS_LE32(general, channel_load); PRINT_STATS_LE32(general, beacon_rssi_a); PRINT_STATS_LE32(general, beacon_rssi_b); PRINT_STATS_LE32(general, beacon_rssi_c); PRINT_STATS_LE32(general, beacon_energy_a); PRINT_STATS_LE32(general, beacon_energy_b); PRINT_STATS_LE32(general, beacon_energy_c); PRINT_STATS_LE32(general, num_bt_kills); PRINT_STATS_LE32(general, mac_id); } pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Rx - HT"); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { struct mvm_statistics_rx_ht_phy_v1 *ht = &mvm->rx_stats_v3.ofdm_ht; PRINT_STATS_LE32(ht, plcp_err); PRINT_STATS_LE32(ht, overrun_err); PRINT_STATS_LE32(ht, early_overrun_err); PRINT_STATS_LE32(ht, crc32_good); PRINT_STATS_LE32(ht, crc32_err); PRINT_STATS_LE32(ht, mh_format_err); PRINT_STATS_LE32(ht, agg_crc32_good); PRINT_STATS_LE32(ht, agg_mpdu_cnt); PRINT_STATS_LE32(ht, agg_cnt); PRINT_STATS_LE32(ht, unsupport_mcs); } else { struct mvm_statistics_rx_ht_phy *ht = &mvm->rx_stats.ofdm_ht; PRINT_STATS_LE32(ht, mh_format_err); PRINT_STATS_LE32(ht, agg_mpdu_cnt); PRINT_STATS_LE32(ht, agg_cnt); PRINT_STATS_LE32(ht, unsupport_mcs); } mutex_unlock(&mvm->mutex); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } #undef PRINT_STAT_LE32 static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm, char __user *user_buf, size_t count, loff_t *ppos, struct iwl_mvm_frame_stats *stats) { char *buff, *pos, *endpos; int idx, i; int ret; static const size_t bufsz = 1024; buff = kmalloc(bufsz, GFP_KERNEL); if (!buff) return -ENOMEM; spin_lock_bh(&mvm->drv_stats_lock); pos = buff; endpos = pos + bufsz; pos += scnprintf(pos, endpos - pos, "Legacy/HT/VHT\t:\t%d/%d/%d\n", stats->legacy_frames, stats->ht_frames, stats->vht_frames); pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n", stats->bw_20_frames, stats->bw_40_frames, stats->bw_80_frames); pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n", stats->ngi_frames, stats->sgi_frames); pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n", stats->siso_frames, stats->mimo2_frames); pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n", stats->fail_frames, stats->success_frames); pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n", stats->agg_frames); pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n", stats->ampdu_count); pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n", stats->ampdu_count > 0 ? (stats->agg_frames / stats->ampdu_count) : 0); pos += scnprintf(pos, endpos - pos, "Last Rates\n"); idx = stats->last_frame_idx - 1; for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) { idx = (idx + 1) % ARRAY_SIZE(stats->last_rates); if (stats->last_rates[idx] == 0) continue; pos += scnprintf(pos, endpos - pos, "Rate[%d]: ", (int)(ARRAY_SIZE(stats->last_rates) - i)); pos += rs_pretty_print_rate_v1(pos, endpos - pos, stats->last_rates[idx]); if (pos < endpos - 1) *pos++ = '\n'; } spin_unlock_bh(&mvm->drv_stats_lock); ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); kfree(buff); return ret; } static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos, &mvm->drv_rx_stats); } static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int __maybe_unused ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; mutex_lock(&mvm->mutex); /* allow one more restart that we're provoking here */ if (mvm->fw_restart >= 0) mvm->fw_restart++; if (count == 6 && !strcmp(buf, "nolog\n")) { set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status); set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &mvm->trans->status); } /* take the return value to make compiler happy - it will fail anyway */ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LONG_GROUP, REPLY_ERROR), 0, 0, NULL); mutex_unlock(&mvm->mutex); return count; } static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (count == 6 && !strcmp(buf, "nolog\n")) set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status); iwl_force_nmi(mvm->trans); return count; } static ssize_t iwl_dbgfs_scan_ant_rxchain_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; int pos = 0; char buf[32]; const size_t bufsz = sizeof(buf); /* print which antennas were set for the scan command by the user */ pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: "); if (mvm->scan_rx_ant & ANT_A) pos += scnprintf(buf + pos, bufsz - pos, "A"); if (mvm->scan_rx_ant & ANT_B) pos += scnprintf(buf + pos, bufsz - pos, "B"); pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { u8 scan_rx_ant; if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (sscanf(buf, "%hhx", &scan_rx_ant) != 1) return -EINVAL; if (scan_rx_ant > ANT_ABC) return -EINVAL; if (scan_rx_ant & ~(iwl_mvm_get_valid_rx_ant(mvm))) return -EINVAL; if (mvm->scan_rx_ant != scan_rx_ant) { mvm->scan_rx_ant = scan_rx_ant; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); } return count; } static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_rss_config_cmd cmd = { .flags = cpu_to_le32(IWL_RSS_ENABLE), .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | IWL_RSS_HASH_TYPE_IPV4_UDP | IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | IWL_RSS_HASH_TYPE_IPV6_TCP | IWL_RSS_HASH_TYPE_IPV6_UDP | IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, }; int ret, i, num_repeats, nbytes = count / 2; ret = hex2bin(cmd.indirection_table, buf, nbytes); if (ret) return ret; /* * The input is the redirection table, partial or full. * Repeat the pattern if needed. * For example, input of 01020F will be repeated 42 times, * indirecting RSS hash results to queues 1, 2, 15 (skipping * queues 3 - 14). */ num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes; for (i = 1; i < num_repeats; i++) memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, nbytes); /* handle cut in the middle pattern for the last places */ memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, ARRAY_SIZE(cmd.indirection_table) % nbytes); netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); mutex_lock(&mvm->mutex); if (iwl_mvm_firmware_running(mvm)) ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); else ret = 0; mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_op_mode *opmode = container_of((void *)mvm, struct iwl_op_mode, op_mode_specific); struct iwl_rx_cmd_buffer rxb = { ._rx_page_order = 0, .truesize = 0, /* not used */ ._offset = 0, }; struct iwl_rx_packet *pkt; int bin_len = count / 2; int ret = -EINVAL; if (!iwl_mvm_firmware_running(mvm)) return -EIO; /* supporting only MQ RX */ if (!mvm->trans->trans_cfg->mq_rx_supported) return -ENOTSUPP; rxb._page = alloc_pages(GFP_ATOMIC, 0); if (!rxb._page) return -ENOMEM; pkt = rxb_addr(&rxb); ret = hex2bin(page_address(rxb._page), buf, bin_len); if (ret) goto out; /* avoid invalid memory access and malformed packet */ if (bin_len < sizeof(*pkt) || bin_len != sizeof(*pkt) + iwl_rx_packet_payload_len(pkt)) goto out; local_bh_disable(); iwl_mvm_rx_mq(opmode, NULL, &rxb); local_bh_enable(); ret = 0; out: iwl_free_rxb(&rxb); return ret ?: count; } static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) { struct ieee80211_vif *vif; struct iwl_mvm_vif *mvmvif; struct sk_buff *beacon; struct ieee80211_tx_info *info; struct iwl_mac_beacon_cmd beacon_cmd = {}; u8 rate; int i; len /= 2; /* Element len should be represented by u8 */ if (len >= U8_MAX) return -EINVAL; if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (!iwl_mvm_has_new_tx_api(mvm) && !fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) return -EINVAL; mutex_lock(&mvm->mutex); for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) { vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, false); if (!vif) continue; if (vif->type == NL80211_IFTYPE_AP) break; } if (i == NUM_MAC_INDEX_DRIVER || !vif) goto out_err; mvm->hw->extra_beacon_tailroom = len; - beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL); + beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL, 0); if (!beacon) goto out_err; if (len && hex2bin(skb_put_zero(beacon, len), bin, len)) { dev_kfree_skb(beacon); goto out_err; } mvm->beacon_inject_active = true; mvmvif = iwl_mvm_vif_from_mac80211(vif); info = IEEE80211_SKB_CB(beacon); rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); beacon_cmd.flags = cpu_to_le16(iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw, rate)); beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data, beacon->len); iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); mutex_unlock(&mvm->mutex); dev_kfree_skb(beacon); return 0; out_err: mutex_unlock(&mvm->mutex); return -EINVAL; } static ssize_t iwl_dbgfs_inject_beacon_ie_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret = _iwl_dbgfs_inject_beacon_ie(mvm, buf, count); mvm->hw->extra_beacon_tailroom = 0; return ret ?: count; } static ssize_t iwl_dbgfs_inject_beacon_ie_restore_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret = _iwl_dbgfs_inject_beacon_ie(mvm, NULL, 0); mvm->hw->extra_beacon_tailroom = 0; mvm->beacon_inject_active = false; return ret ?: count; } static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; int conf; char buf[8]; const size_t bufsz = sizeof(buf); int pos = 0; mutex_lock(&mvm->mutex); conf = mvm->fwrt.dump.conf; mutex_unlock(&mvm->mutex); pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { unsigned int conf_id; int ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; ret = kstrtouint(buf, 0, &conf_id); if (ret) return ret; if (WARN_ON(conf_id >= FW_DBG_CONF_MAX)) return -EINVAL; mutex_lock(&mvm->mutex); ret = iwl_fw_start_dbg_conf(&mvm->fwrt, conf_id); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { if (count == 0) return 0; iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_USER_TRIGGER, NULL); iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf, (count - 1), NULL); return count; } static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { u32 timepoint; if (kstrtou32(buf, 0, &timepoint)) return -EINVAL; if (timepoint == IWL_FW_INI_TIME_POINT_INVALID || timepoint >= IWL_FW_INI_TIME_POINT_NUM) return -EINVAL; iwl_dbg_tlv_time_point(&mvm->fwrt, timepoint, NULL); return count; } #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) #define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \ debugfs_create_file(alias, mode, parent, mvm, \ &iwl_dbgfs_##name##_ops); \ } while (0) #define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \ MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) #define MVM_DEBUGFS_WRITE_STA_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta) #define MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta) #define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do { \ debugfs_create_file(alias, mode, parent, sta, \ &iwl_dbgfs_##name##_ops); \ } while (0) #define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \ MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode) static ssize_t iwl_dbgfs_prph_reg_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; int pos = 0; char buf[32]; const size_t bufsz = sizeof(buf); if (!mvm->dbgfs_prph_reg_addr) return -EINVAL; pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n", mvm->dbgfs_prph_reg_addr, iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr)); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { u8 args; u32 value; args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value); /* if we only want to set the reg address - nothing more to do */ if (args == 1) goto out; /* otherwise, make sure we have both address and value */ if (args != 2) return -EINVAL; iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value); out: return count; } static ssize_t iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); mutex_unlock(&mvm->mutex); return ret ?: count; } struct iwl_mvm_sniffer_apply { struct iwl_mvm *mvm; u8 *bssid; u16 aid; }; static bool iwl_mvm_sniffer_apply(struct iwl_notif_wait_data *notif_data, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm_sniffer_apply *apply = data; apply->mvm->cur_aid = cpu_to_le16(apply->aid); memcpy(apply->mvm->cur_bssid, apply->bssid, sizeof(apply->mvm->cur_bssid)); return true; } static ssize_t iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { struct iwl_notification_wait wait; struct iwl_he_monitor_cmd he_mon_cmd = {}; struct iwl_mvm_sniffer_apply apply = { .mvm = mvm, }; u16 wait_cmds[] = { WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD), }; u32 aid; int ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid, &he_mon_cmd.bssid[0], &he_mon_cmd.bssid[1], &he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3], &he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]); if (ret != 7) return -EINVAL; he_mon_cmd.aid = cpu_to_le16(aid); apply.aid = aid; apply.bssid = (void *)he_mon_cmd.bssid; mutex_lock(&mvm->mutex); /* * Use the notification waiter to get our function triggered * in sequence with other RX. This ensures that frames we get * on the RX queue _before_ the new configuration is applied * still have mvm->cur_aid pointing to the old AID, and that * frames on the RX queue _after_ the firmware processed the * new configuration (and sent the response, synchronously) * get mvm->cur_aid correctly set to the new AID. */ iwl_init_notification_wait(&mvm->notif_wait, &wait, wait_cmds, ARRAY_SIZE(wait_cmds), iwl_mvm_sniffer_apply, &apply); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD), 0, sizeof(he_mon_cmd), &he_mon_cmd); /* no need to really wait, we already did anyway */ iwl_remove_notification(&mvm->notif_wait, &wait); mutex_unlock(&mvm->mutex); return ret ?: count; } static ssize_t iwl_dbgfs_he_sniffer_params_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; u8 buf[32]; int len; len = scnprintf(buf, sizeof(buf), "%d %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n", le16_to_cpu(mvm->cur_aid), mvm->cur_bssid[0], mvm->cur_bssid[1], mvm->cur_bssid[2], mvm->cur_bssid[3], mvm->cur_bssid[4], mvm->cur_bssid[5]); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; u8 buf[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM * ETH_ALEN * 3 + 1]; unsigned int pos = 0; size_t bufsz = sizeof(buf); int i; mutex_lock(&mvm->mutex); for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", mvm->uapsd_noagg_bssids[i].addr); mutex_unlock(&mvm->mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_ltr_config_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret; struct iwl_ltr_config_cmd ltr_config = {0}; if (!iwl_mvm_firmware_running(mvm)) return -EIO; if (sscanf(buf, "%x,%x,%x,%x,%x,%x,%x", <r_config.flags, <r_config.static_long, <r_config.static_short, <r_config.ltr_cfg_values[0], <r_config.ltr_cfg_values[1], <r_config.ltr_cfg_values[2], <r_config.ltr_cfg_values[3]) != 7) { return -EINVAL; } mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, sizeof(ltr_config), <r_config); mutex_unlock(&mvm->mutex); if (ret) IWL_ERR(mvm, "failed to send ltr configuration cmd\n"); return ret ?: count; } static ssize_t iwl_dbgfs_rfi_freq_table_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { int ret = 0; u16 op_id; if (kstrtou16(buf, 10, &op_id)) return -EINVAL; /* value zero triggers re-sending the default table to the device */ if (!op_id) { mutex_lock(&mvm->mutex); ret = iwl_rfi_send_config_cmd(mvm, NULL); mutex_unlock(&mvm->mutex); } else { ret = -EOPNOTSUPP; /* in the future a new table will be added */ } return ret ?: count; } /* The size computation is as follows: * each number needs at most 3 characters, number of rows is the size of * the table; So, need 5 chars for the "freq: " part and each tuple afterwards * needs 6 characters for numbers and 5 for the punctuation around. */ #define IWL_RFI_BUF_SIZE (IWL_RFI_LUT_INSTALLED_SIZE *\ (5 + IWL_RFI_LUT_ENTRY_CHANNELS_NUM * (6 + 5))) static ssize_t iwl_dbgfs_rfi_freq_table_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_rfi_freq_table_resp_cmd *resp; u32 status; char buf[IWL_RFI_BUF_SIZE]; int i, j, pos = 0; resp = iwl_rfi_get_freq_table(mvm); if (IS_ERR(resp)) return PTR_ERR(resp); status = le32_to_cpu(resp->status); if (status != RFI_FREQ_TABLE_OK) { scnprintf(buf, IWL_RFI_BUF_SIZE, "status = %d\n", status); goto out; } for (i = 0; i < ARRAY_SIZE(resp->table); i++) { pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "%d: ", resp->table[i].freq); for (j = 0; j < ARRAY_SIZE(resp->table[i].channels); j++) pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "(%d, %d) ", resp->table[i].channels[j], resp->table[i].bands[j]); pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "\n"); } out: kfree(resp); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); /* Device wide debugfs entries */ MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget); MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8); MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8); MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64); MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64); MVM_DEBUGFS_READ_FILE_OPS(nic_temp); MVM_DEBUGFS_READ_FILE_OPS(stations); MVM_DEBUGFS_READ_FILE_OPS(rs_data); MVM_DEBUGFS_READ_FILE_OPS(bt_notif); MVM_DEBUGFS_READ_FILE_OPS(bt_cmd); MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64); MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats); MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats); MVM_DEBUGFS_READ_FILE_OPS(fw_ver); MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver); MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10); MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10); MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10); MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); MVM_DEBUGFS_WRITE_FILE_OPS(dbg_time_point, 64); MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, (IWL_RSS_INDIRECTION_TABLE_SIZE * 2)); MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512); MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie, 512); MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512); MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids); #ifdef CONFIG_ACPI MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile); #endif MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(amsdu_len, 16); MVM_DEBUGFS_READ_WRITE_FILE_OPS(he_sniffer_params, 32); MVM_DEBUGFS_WRITE_FILE_OPS(ltr_config, 512); MVM_DEBUGFS_READ_WRITE_FILE_OPS(rfi_freq_table, 16); static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_dbg_mem_access_cmd cmd = {}; struct iwl_dbg_mem_access_rsp *rsp; struct iwl_host_cmd hcmd = { .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, .data = { &cmd, }, .len = { sizeof(cmd) }, }; size_t delta; ssize_t ret, len; if (!iwl_mvm_firmware_running(mvm)) return -EIO; hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR); cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ); /* Take care of alignment of both the position and the length */ delta = *ppos & 0x3; cmd.addr = cpu_to_le32(*ppos - delta); cmd.len = cpu_to_le32(min(ALIGN(count + delta, 4) / 4, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS)); mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &hcmd); mutex_unlock(&mvm->mutex); if (ret < 0) return ret; rsp = (void *)hcmd.resp_pkt->data; if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) { ret = -ENXIO; goto out; } len = min((size_t)le32_to_cpu(rsp->len) << 2, iwl_rx_packet_payload_len(hcmd.resp_pkt) - sizeof(*rsp)); len = min(len - delta, count); if (len < 0) { ret = -EFAULT; goto out; } ret = len - copy_to_user(user_buf, (u8 *)rsp->data + delta, len); *ppos += ret; out: iwl_free_resp(&hcmd); return ret; } static ssize_t iwl_dbgfs_mem_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; struct iwl_dbg_mem_access_cmd *cmd; struct iwl_dbg_mem_access_rsp *rsp; struct iwl_host_cmd hcmd = {}; size_t cmd_size; size_t data_size; u32 op, len; ssize_t ret; if (!iwl_mvm_firmware_running(mvm)) return -EIO; hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR); if (*ppos & 0x3 || count < 4) { op = DEBUG_MEM_OP_WRITE_BYTES; len = min(count, (size_t)(4 - (*ppos & 0x3))); data_size = len; } else { op = DEBUG_MEM_OP_WRITE; len = min(count >> 2, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS); data_size = len << 2; } cmd_size = sizeof(*cmd) + ALIGN(data_size, 4); cmd = kzalloc(cmd_size, GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->op = cpu_to_le32(op); cmd->len = cpu_to_le32(len); cmd->addr = cpu_to_le32(*ppos); if (copy_from_user((void *)cmd->data, user_buf, data_size)) { kfree(cmd); return -EFAULT; } hcmd.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, hcmd.data[0] = (void *)cmd; hcmd.len[0] = cmd_size; mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &hcmd); mutex_unlock(&mvm->mutex); kfree(cmd); if (ret < 0) return ret; rsp = (void *)hcmd.resp_pkt->data; if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) { ret = -ENXIO; goto out; } ret = data_size; *ppos += ret; out: iwl_free_resp(&hcmd); return ret; } static const struct file_operations iwl_dbgfs_mem_ops = { .read = iwl_dbgfs_mem_read, .write = iwl_dbgfs_mem_write, .open = simple_open, .llseek = default_llseek, }; void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); if (iwl_mvm_has_tlc_offload(mvm)) { MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, 0400); } MVM_DEBUGFS_ADD_STA_FILE(amsdu_len, dir, 0600); } void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) { struct dentry *bcast_dir __maybe_unused; spin_lock_init(&mvm->drv_stats_lock); MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(nic_temp, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(ctdp_budget, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(stop_ctdp, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(bt_notif, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(bt_cmd, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(dbg_time_point, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(inject_beacon_ie, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(inject_beacon_ie_restore, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(rfi_freq_table, mvm->debugfs_dir, 0600); if (mvm->fw->phy_integration_ver) MVM_DEBUGFS_ADD_FILE(phy_integration_ver, mvm->debugfs_dir, 0400); #ifdef CONFIG_ACPI MVM_DEBUGFS_ADD_FILE(sar_geo_profile, mvm->debugfs_dir, 0400); #endif MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0600); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2)) MVM_DEBUGFS_ADD_FILE(ltr_config, mvm->debugfs_dir, 0200); debugfs_create_bool("enable_scan_iteration_notif", 0600, mvm->debugfs_dir, &mvm->scan_iter_notif_enabled); debugfs_create_bool("drop_bcn_ap_mode", 0600, mvm->debugfs_dir, &mvm->drop_bcn_ap_mode); MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR); #ifdef CONFIG_PM_SLEEP MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400); debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir, &mvm->d3_wake_sysassert); debugfs_create_u32("last_netdetect_scans", 0400, mvm->debugfs_dir, &mvm->last_netdetect_scans); #endif debugfs_create_u8("ps_disabled", 0400, mvm->debugfs_dir, &mvm->ps_disabled); debugfs_create_blob("nvm_hw", 0400, mvm->debugfs_dir, &mvm->nvm_hw_blob); debugfs_create_blob("nvm_sw", 0400, mvm->debugfs_dir, &mvm->nvm_sw_blob); debugfs_create_blob("nvm_calib", 0400, mvm->debugfs_dir, &mvm->nvm_calib_blob); debugfs_create_blob("nvm_prod", 0400, mvm->debugfs_dir, &mvm->nvm_prod_blob); debugfs_create_blob("nvm_phy_sku", 0400, mvm->debugfs_dir, &mvm->nvm_phy_sku_blob); debugfs_create_blob("nvm_reg", S_IRUSR, mvm->debugfs_dir, &mvm->nvm_reg_blob); debugfs_create_file("mem", 0600, mvm->debugfs_dir, mvm, &iwl_dbgfs_mem_ops); /* * Create a symlink with mac80211. It will be removed when mac80211 * exists (before the opmode exists which removes the target.) */ if (!IS_ERR(mvm->debugfs_dir)) { char buf[100]; snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent); debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf); } } diff --git a/sys/contrib/dev/iwlwifi/mvm/mac-ctxt.c b/sys/contrib/dev/iwlwifi/mvm/mac-ctxt.c index 5aa4520b70ac..88893fade5cd 100644 --- a/sys/contrib/dev/iwlwifi/mvm/mac-ctxt.c +++ b/sys/contrib/dev/iwlwifi/mvm/mac-ctxt.c @@ -1,1673 +1,1673 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #include #include #include #include "iwl-io.h" #include "iwl-prph.h" #include "fw-api.h" #include "mvm.h" #include "time-event.h" const u8 iwl_mvm_ac_to_tx_fifo[] = { IWL_MVM_TX_FIFO_VO, IWL_MVM_TX_FIFO_VI, IWL_MVM_TX_FIFO_BE, IWL_MVM_TX_FIFO_BK, }; const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = { IWL_GEN2_EDCA_TX_FIFO_VO, IWL_GEN2_EDCA_TX_FIFO_VI, IWL_GEN2_EDCA_TX_FIFO_BE, IWL_GEN2_EDCA_TX_FIFO_BK, IWL_GEN2_TRIG_TX_FIFO_VO, IWL_GEN2_TRIG_TX_FIFO_VI, IWL_GEN2_TRIG_TX_FIFO_BE, IWL_GEN2_TRIG_TX_FIFO_BK, }; struct iwl_mvm_mac_iface_iterator_data { struct iwl_mvm *mvm; struct ieee80211_vif *vif; unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)]; unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)]; enum iwl_tsf_id preferred_tsf; bool found_vif; }; static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mac_iface_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 min_bi; /* Skip the interface for which we are trying to assign a tsf_id */ if (vif == data->vif) return; /* * The TSF is a hardware/firmware resource, there are 4 and * the driver should assign and free them as needed. However, * there are cases where 2 MACs should share the same TSF ID * for the purpose of clock sync, an optimization to avoid * clock drift causing overlapping TBTTs/DTIMs for a GO and * client in the system. * * The firmware will decide according to the MAC type which * will be the leader and follower. Clients that need to sync * with a remote station will be the leader, and an AP or GO * will be the follower. * * Depending on the new interface type it can be following * or become the leader of an existing interface. */ switch (data->vif->type) { case NL80211_IFTYPE_STATION: /* * The new interface is a client, so if the one we're iterating * is an AP, and the beacon interval of the AP is a multiple or * divisor of the beacon interval of the client, the same TSF * should be used to avoid drift between the new client and * existing AP. The existing AP will get drift updates from the * new client context in this case. */ if (vif->type != NL80211_IFTYPE_AP || data->preferred_tsf != NUM_TSF_IDS || !test_bit(mvmvif->tsf_id, data->available_tsf_ids)) break; min_bi = min(data->vif->bss_conf.beacon_int, vif->bss_conf.beacon_int); if (!min_bi) break; if ((data->vif->bss_conf.beacon_int - vif->bss_conf.beacon_int) % min_bi == 0) { data->preferred_tsf = mvmvif->tsf_id; return; } break; case NL80211_IFTYPE_AP: /* * The new interface is AP/GO, so if its beacon interval is a * multiple or a divisor of the beacon interval of an existing * interface, it should get drift updates from an existing * client or use the same TSF as an existing GO. There's no * drift between TSFs internally but if they used different * TSFs then a new client MAC could update one of them and * cause drift that way. */ if ((vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_STATION) || data->preferred_tsf != NUM_TSF_IDS || !test_bit(mvmvif->tsf_id, data->available_tsf_ids)) break; min_bi = min(data->vif->bss_conf.beacon_int, vif->bss_conf.beacon_int); if (!min_bi) break; if ((data->vif->bss_conf.beacon_int - vif->bss_conf.beacon_int) % min_bi == 0) { data->preferred_tsf = mvmvif->tsf_id; return; } break; default: /* * For all other interface types there's no need to * take drift into account. Either they're exclusive * like IBSS and monitor, or we don't care much about * their TSF (like P2P Device), but we won't be able * to share the TSF resource. */ break; } /* * Unless we exited above, we can't share the TSF resource * that the virtual interface we're iterating over is using * with the new one, so clear the available bit and if this * was the preferred one, reset that as well. */ __clear_bit(mvmvif->tsf_id, data->available_tsf_ids); if (data->preferred_tsf == mvmvif->tsf_id) data->preferred_tsf = NUM_TSF_IDS; } static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mac_iface_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); /* Iterator may already find the interface being added -- skip it */ if (vif == data->vif) { data->found_vif = true; return; } /* Mark MAC IDs as used by clearing the available bit, and * (below) mark TSFs as used if their existing use is not * compatible with the new interface type. * No locking or atomic bit operations are needed since the * data is on the stack of the caller function. */ __clear_bit(mvmvif->id, data->available_mac_ids); /* find a suitable tsf_id */ iwl_mvm_mac_tsf_id_iter(_data, mac, vif); } void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_mac_iface_iterator_data data = { .mvm = mvm, .vif = vif, .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, /* no preference yet */ .preferred_tsf = NUM_TSF_IDS, }; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_mac_tsf_id_iter, &data); if (data.preferred_tsf != NUM_TSF_IDS) mvmvif->tsf_id = data.preferred_tsf; else if (!test_bit(mvmvif->tsf_id, data.available_tsf_ids)) mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, NUM_TSF_IDS); } int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_mac_iface_iterator_data data = { .mvm = mvm, .vif = vif, .available_mac_ids = { (1 << NUM_MAC_INDEX_DRIVER) - 1 }, .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, /* no preference yet */ .preferred_tsf = NUM_TSF_IDS, .found_vif = false, }; int ret, i; lockdep_assert_held(&mvm->mutex); /* * Allocate a MAC ID and a TSF for this MAC, along with the queues * and other resources. */ /* * Before the iterator, we start with all MAC IDs and TSFs available. * * During iteration, all MAC IDs are cleared that are in use by other * virtual interfaces, and all TSF IDs are cleared that can't be used * by this new virtual interface because they're used by an interface * that can't share it with the new one. * At the same time, we check if there's a preferred TSF in the case * that we should share it with another interface. */ /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */ switch (vif->type) { case NL80211_IFTYPE_ADHOC: break; case NL80211_IFTYPE_STATION: if (!vif->p2p) break; fallthrough; default: __clear_bit(0, data.available_mac_ids); } ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_mac_iface_iterator, &data); /* * In the case we're getting here during resume, it's similar to * firmware restart, and with RESUME_ALL the iterator will find * the vif being added already. * We don't want to reassign any IDs in either case since doing * so would probably assign different IDs (as interfaces aren't * necessarily added in the same order), but the old IDs were * preserved anyway, so skip ID assignment for both resume and * recovery. */ if (data.found_vif) return 0; /* Therefore, in recovery, we can't get here */ if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return -EBUSY; mvmvif->id = find_first_bit(data.available_mac_ids, NUM_MAC_INDEX_DRIVER); if (mvmvif->id == NUM_MAC_INDEX_DRIVER) { IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n"); ret = -EIO; goto exit_fail; } if (data.preferred_tsf != NUM_TSF_IDS) mvmvif->tsf_id = data.preferred_tsf; else mvmvif->tsf_id = find_first_bit(data.available_tsf_ids, NUM_TSF_IDS); if (mvmvif->tsf_id == NUM_TSF_IDS) { IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n"); ret = -EIO; goto exit_fail; } mvmvif->color = 0; INIT_LIST_HEAD(&mvmvif->time_event_data.list); mvmvif->time_event_data.id = TE_MAX; /* No need to allocate data queues to P2P Device MAC and NAN.*/ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) return 0; /* Allocate the CAB queue for softAP and GO interfaces */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { /* * For TVQM this will be overwritten later with the FW assigned * queue value (when queue is enabled). */ mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; } mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA; mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA; mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; return 0; exit_fail: memset(mvmvif, 0, sizeof(struct iwl_mvm_vif)); return ret; } static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum nl80211_band band, u8 *cck_rates, u8 *ofdm_rates) { struct ieee80211_supported_band *sband; unsigned long basic = vif->bss_conf.basic_rates; int lowest_present_ofdm = 100; int lowest_present_cck = 100; u8 cck = 0; u8 ofdm = 0; int i; sband = mvm->hw->wiphy->bands[band]; for_each_set_bit(i, &basic, BITS_PER_LONG) { int hw = sband->bitrates[i].hw_value; if (hw >= IWL_FIRST_OFDM_RATE) { ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE); if (lowest_present_ofdm > hw) lowest_present_ofdm = hw; } else { BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); cck |= BIT(hw); if (lowest_present_cck > hw) lowest_present_cck = hw; } } /* * Now we've got the basic rates as bitmaps in the ofdm and cck * variables. This isn't sufficient though, as there might not * be all the right rates in the bitmap. E.g. if the only basic * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps * and 6 Mbps because the 802.11-2007 standard says in 9.6: * * [...] a STA responding to a received frame shall transmit * its Control Response frame [...] at the highest rate in the * BSSBasicRateSet parameter that is less than or equal to the * rate of the immediately previous frame in the frame exchange * sequence ([...]) and that is of the same modulation class * ([...]) as the received frame. If no rate contained in the * BSSBasicRateSet parameter meets these conditions, then the * control frame sent in response to a received frame shall be * transmitted at the highest mandatory rate of the PHY that is * less than or equal to the rate of the received frame, and * that is of the same modulation class as the received frame. * * As a consequence, we need to add all mandatory rates that are * lower than all of the basic rates to these bitmaps. */ if (IWL_RATE_24M_INDEX < lowest_present_ofdm) ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE; if (IWL_RATE_12M_INDEX < lowest_present_ofdm) ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE; /* 6M already there or needed so always add */ ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE; /* * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP. * Note, however: * - if no CCK rates are basic, it must be ERP since there must * be some basic rates at all, so they're OFDM => ERP PHY * (or we're in 5 GHz, and the cck bitmap will never be used) * - if 11M is a basic rate, it must be ERP as well, so add 5.5M * - if 5.5M is basic, 1M and 2M are mandatory * - if 2M is basic, 1M is mandatory * - if 1M is basic, that's the only valid ACK rate. * As a consequence, it's not as complicated as it sounds, just add * any lower rates to the ACK rate bitmap. */ if (IWL_RATE_11M_INDEX < lowest_present_cck) cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE; if (IWL_RATE_5M_INDEX < lowest_present_cck) cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE; if (IWL_RATE_2M_INDEX < lowest_present_cck) cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE; /* 1M already there or needed so always add */ cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE; *cck_rates = cck; *ofdm_rates = ofdm; } static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_ctx_cmd *cmd) { /* for both sta and ap, ht_operation_mode hold the protection_mode */ u8 protection_mode = vif->bss_conf.ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; /* The fw does not distinguish between ht and fat */ u32 ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT; IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode); /* * See section 9.23.3.1 of IEEE 80211-2012. * Nongreenfield HT STAs Present is not supported. */ switch (protection_mode) { case IEEE80211_HT_OP_MODE_PROTECTION_NONE: break; case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: cmd->protection_flags |= cpu_to_le32(ht_flag); break; case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: /* Protect when channel wider than 20MHz */ if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20) cmd->protection_flags |= cpu_to_le32(ht_flag); break; default: IWL_ERR(mvm, "Illegal protection mode %d\n", protection_mode); break; } } static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_ctx_cmd *cmd, const u8 *bssid_override, u32 action) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_chanctx_conf *chanctx; bool ht_enabled = !!(vif->bss_conf.ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION); u8 cck_ack_rates, ofdm_ack_rates; const u8 *bssid = bssid_override ?: vif->bss_conf.bssid; int i; cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); cmd->action = cpu_to_le32(action); switch (vif->type) { case NL80211_IFTYPE_STATION: if (vif->p2p) cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA); else cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA); break; case NL80211_IFTYPE_AP: cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO); break; case NL80211_IFTYPE_MONITOR: cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER); break; case NL80211_IFTYPE_P2P_DEVICE: cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE); break; case NL80211_IFTYPE_ADHOC: cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS); break; default: WARN_ON_ONCE(1); } cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id); memcpy(cmd->node_addr, vif->addr, ETH_ALEN); if (bssid) memcpy(cmd->bssid_addr, bssid, ETH_ALEN); else eth_broadcast_addr(cmd->bssid_addr); rcu_read_lock(); chanctx = rcu_dereference(vif->chanctx_conf); iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band : NL80211_BAND_2GHZ, &cck_ack_rates, &ofdm_ack_rates); rcu_read_unlock(); cmd->cck_rates = cpu_to_le32((u32)cck_ack_rates); cmd->ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates); cmd->cck_short_preamble = cpu_to_le32(vif->bss_conf.use_short_preamble ? MAC_FLG_SHORT_PREAMBLE : 0); cmd->short_slot = cpu_to_le32(vif->bss_conf.use_short_slot ? MAC_FLG_SHORT_SLOT : 0); cmd->filter_flags = 0; for (i = 0; i < IEEE80211_NUM_ACS; i++) { u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); u8 ucode_ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); cmd->ac[ucode_ac].cw_min = cpu_to_le16(mvmvif->queue_params[i].cw_min); cmd->ac[ucode_ac].cw_max = cpu_to_le16(mvmvif->queue_params[i].cw_max); cmd->ac[ucode_ac].edca_txop = cpu_to_le16(mvmvif->queue_params[i].txop * 32); cmd->ac[ucode_ac].aifsn = mvmvif->queue_params[i].aifs; cmd->ac[ucode_ac].fifos_mask = BIT(txf); } if (vif->bss_conf.qos) cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); if (vif->bss_conf.use_cts_prot) cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", vif->bss_conf.use_cts_prot, vif->bss_conf.ht_operation_mode); if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN); if (ht_enabled) iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd); } static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, struct iwl_mac_ctx_cmd *cmd) { int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, sizeof(*cmd), cmd); if (ret) IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n", le32_to_cpu(cmd->action), ret); return ret; } static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action, bool force_assoc_off, const u8 *bssid_override) { struct iwl_mac_ctx_cmd cmd = {}; struct iwl_mac_data_sta *ctxt_sta; WARN_ON(vif->type != NL80211_IFTYPE_STATION); /* Fill the common data for all mac context types */ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action); if (vif->p2p) { struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr; cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK); ctxt_sta = &cmd.p2p_sta.sta; } else { ctxt_sta = &cmd.sta; } /* We need the dtim_period to set the MAC as associated */ if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && !force_assoc_off) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u8 ap_sta_id = mvmvif->ap_sta_id; u32 dtim_offs; /* * The DTIM count counts down, so when it is N that means N * more beacon intervals happen until the DTIM TBTT. Therefore * add this to the current time. If that ends up being in the * future, the firmware will handle it. * * Also note that the system_timestamp (which we get here as * "sync_device_ts") and TSF timestamp aren't at exactly the * same offset in the frame -- the TSF is at the first symbol * of the TSF, the system timestamp is at signal acquisition * time. This means there's an offset between them of at most * a few hundred microseconds (24 * 8 bits + PLCP time gives * 384us in the longest case), this is currently not relevant * as the firmware wakes up around 2ms before the TBTT. */ dtim_offs = vif->bss_conf.sync_dtim_count * vif->bss_conf.beacon_int; /* convert TU to usecs */ dtim_offs *= 1024; ctxt_sta->dtim_tsf = cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs); ctxt_sta->dtim_time = cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs); ctxt_sta->assoc_beacon_arrive_time = cpu_to_le32(vif->bss_conf.sync_device_ts); IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n", le64_to_cpu(ctxt_sta->dtim_tsf), le32_to_cpu(ctxt_sta->dtim_time), dtim_offs); ctxt_sta->is_assoc = cpu_to_le32(1); if (!mvmvif->authorized && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO)) ctxt_sta->data_policy |= cpu_to_le32(COEX_HIGH_PRIORITY_ENABLE); /* * allow multicast data frames only as long as the station is * authorized, i.e., GTK keys are already installed (if needed) */ if (ap_sta_id < mvm->fw->ucode_capa.num_stations) { struct ieee80211_sta *sta; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); if (!IS_ERR_OR_NULL(sta)) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->sta_state == IEEE80211_STA_AUTHORIZED) cmd.filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP); } rcu_read_unlock(); } } else { ctxt_sta->is_assoc = cpu_to_le32(0); /* Allow beacons to pass through as long as we are not * associated, or we do not have dtim period information. */ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); } ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * vif->bss_conf.dtim_period); ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval); ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid); if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p) cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) { cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX); if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT) ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED); if (vif->bss_conf.twt_protected) ctxt_sta->data_policy |= cpu_to_le32(PROTECTED_TWT_SUPPORTED); if (vif->bss_conf.twt_broadcast) ctxt_sta->data_policy |= cpu_to_le32(BROADCAST_TWT_SUPPORTED); } return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; u32 tfd_queue_msk = BIT(mvm->snif_queue); int ret; WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC | MAC_FILTER_IN_CONTROL_AND_MGMT | MAC_FILTER_IN_BEACON | MAC_FILTER_IN_PROBE_REQUEST | MAC_FILTER_IN_CRC32 | MAC_FILTER_ACCEPT_GRP); ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); /* Allocate sniffer station */ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->snif_sta, tfd_queue_msk, vif->type, IWL_STA_GENERAL_PURPOSE); if (ret) return ret; return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_ctx_cmd cmd = {}; WARN_ON(vif->type != NL80211_IFTYPE_ADHOC); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | MAC_FILTER_IN_PROBE_REQUEST | MAC_FILTER_ACCEPT_GRP); /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */ cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int); /* TODO: Assumes that the beacon id == mac context id */ cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } struct iwl_mvm_go_iterator_data { bool go_active; }; static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_go_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->type == NL80211_IFTYPE_AP && vif->p2p && mvmvif->ap_ibss_active) data->go_active = true; } static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; struct iwl_mvm_go_iterator_data data = {}; WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* Override the filter flags to accept only probe requests */ cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); /* * This flag should be set to true when the P2P Device is * discoverable and there is at least another active P2P GO. Settings * this flag will allow the P2P Device to be discoverable on other * channels in addition to its listen channel. * Note that this flag should not be set in other cases as it opens the * Rx filters on all MAC and increases the number of interrupts. */ ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_go_iterator, &data); cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, __le32 *tim_index, __le32 *tim_size, u8 *beacon, u32 frame_size) { u32 tim_idx; struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; /* The index is relative to frame start but we start looking at the * variable-length part of the beacon. */ tim_idx = mgmt->u.beacon.variable - beacon; /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ while ((tim_idx < (frame_size - 2)) && (beacon[tim_idx] != WLAN_EID_TIM)) tim_idx += beacon[tim_idx+1] + 2; /* If TIM field was found, set variables */ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { *tim_index = cpu_to_le32(tim_idx); *tim_size = cpu_to_le32((u32)beacon[tim_idx + 1]); } else { IWL_WARN(mvm, "Unable to find TIM Element in beacon\n"); } } static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) { struct ieee80211_mgmt *mgmt = (void *)beacon; const u8 *ie; if (WARN_ON_ONCE(frame_size <= (mgmt->u.beacon.variable - beacon))) return 0; frame_size -= mgmt->u.beacon.variable - beacon; ie = cfg80211_find_ie(eid, mgmt->u.beacon.variable, frame_size); if (!ie) return 0; return ie - beacon; } u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, struct ieee80211_vif *vif) { u8 rate; if (info->band == NL80211_BAND_2GHZ && !vif->p2p) rate = IWL_FIRST_CCK_RATE; else rate = IWL_FIRST_OFDM_RATE; return rate; } u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx) { u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx); bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10; if (rate_idx <= IWL_FIRST_CCK_RATE) flags |= is_new_rate ? IWL_MAC_BEACON_CCK : IWL_MAC_BEACON_CCK_V1; return flags; } static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon, struct iwl_tx_cmd *tx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_tx_info *info; u8 rate; u32 tx_flags; info = IEEE80211_SKB_CB(beacon); /* Set up TX command fields */ tx->len = cpu_to_le16((u16)beacon->len); tx->sta_id = mvmvif->bcast_sta.sta_id; tx->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF; tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) << TX_CMD_FLG_BT_PRIO_POS; tx->tx_flags = cpu_to_le32(tx_flags); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); tx->rate_n_flags = cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS); rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); tx->rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate)); if (rate == IWL_FIRST_CCK_RATE) tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK_V1); } int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, struct sk_buff *beacon, void *data, int len) { struct iwl_host_cmd cmd = { .id = BEACON_TEMPLATE_CMD, .flags = CMD_ASYNC, }; cmd.len[0] = len; cmd.data[0] = data; cmd.dataflags[0] = 0; cmd.len[1] = beacon->len; cmd.data[1] = beacon->data; cmd.dataflags[1] = IWL_HCMD_DFL_DUP; return iwl_mvm_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctxt_send_beacon_v6(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_beacon_cmd_v6 beacon_cmd = {}; iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data, beacon->len); return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_beacon_cmd_v7 beacon_cmd = {}; iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data, beacon->len); beacon_cmd.csa_offset = cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_CHANNEL_SWITCH, beacon->len)); beacon_cmd.ecsa_offset = cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_EXT_CHANSWITCH_ANN, beacon->len)); return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(beacon); struct iwl_mac_beacon_cmd beacon_cmd = {}; u8 rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); u16 flags; struct ieee80211_chanctx_conf *ctx; int channel; flags = iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw, rate); /* Enable FILS on PSC channels only */ rcu_read_lock(); ctx = rcu_dereference(vif->chanctx_conf); channel = ieee80211_frequency_to_channel(ctx->def.chan->center_freq); WARN_ON(channel == 0); if (cfg80211_channel_is_psc(ctx->def.chan) && !IWL_MVM_DISABLE_AP_FILS) { flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) > 10 ? IWL_MAC_BEACON_FILS : IWL_MAC_BEACON_FILS_V1; beacon_cmd.short_ssid = cpu_to_le32(~crc32_le(~0, vif->bss_conf.ssid, vif->bss_conf.ssid_len)); } rcu_read_unlock(); beacon_cmd.flags = cpu_to_le16(flags); beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data, beacon->len); beacon_cmd.csa_offset = cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_CHANNEL_SWITCH, beacon->len)); beacon_cmd.ecsa_offset = cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, WLAN_EID_EXT_CHANSWITCH_ANN, beacon->len)); return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon) { if (WARN_ON(!beacon)) return -EINVAL; if (IWL_MVM_NON_TRANSMITTING_AP) return 0; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) return iwl_mvm_mac_ctxt_send_beacon_v6(mvm, vif, beacon); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) return iwl_mvm_mac_ctxt_send_beacon_v9(mvm, vif, beacon); return iwl_mvm_mac_ctxt_send_beacon_v7(mvm, vif, beacon); } /* The beacon template for the AP/GO/IBSS has changed and needs update */ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct sk_buff *beacon; int ret; WARN_ON(vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC); - beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL); + beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL, 0); if (!beacon) return -ENOMEM; #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvm->beacon_inject_active) { dev_kfree_skb(beacon); return -EBUSY; } #endif ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon); dev_kfree_skb(beacon); return ret; } struct iwl_mvm_mac_ap_iterator_data { struct iwl_mvm *mvm; struct ieee80211_vif *vif; u32 beacon_device_ts; u16 beacon_int; }; /* Find the beacon_device_ts and beacon_int for a managed interface */ static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mac_ap_iterator_data *data = _data; if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) return; /* Station client has higher priority over P2P client*/ if (vif->p2p && data->beacon_device_ts) return; data->beacon_device_ts = vif->bss_conf.sync_device_ts; data->beacon_int = vif->bss_conf.beacon_int; } /* * Fill the specific data for mac context of type AP of P2P GO */ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_ctx_cmd *cmd, struct iwl_mac_data_ap *ctxt_ap, bool add) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_mac_ap_iterator_data data = { .mvm = mvm, .vif = vif, .beacon_device_ts = 0 }; /* in AP mode, the MCAST FIFO takes the EDCA params from VO */ cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST); /* * in AP mode, pass probe requests and beacons from other APs * (needed for ht protection); when there're no any associated * station don't ask FW to pass beacons to prevent unnecessary * wake-ups. */ cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) { cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); } else { IWL_DEBUG_HC(mvm, "No need to receive beacons\n"); } ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int); ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * vif->bss_conf.dtim_period); if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->cab_queue); /* * Only set the beacon time when the MAC is being added, when we * just modify the MAC then we should keep the time -- the firmware * can otherwise have a "jumping" TBTT. */ if (add) { /* * If there is a station/P2P client interface which is * associated, set the AP's TBTT far enough from the station's * TBTT. Otherwise, set it to the current system time */ ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_mac_ap_iterator, &data); if (data.beacon_device_ts) { u32 rand = (prandom_u32() % (64 - 36)) + 36; mvmvif->ap_beacon_time = data.beacon_device_ts + ieee80211_tu_to_usec(data.beacon_int * rand / 100); } else { mvmvif->ap_beacon_time = iwl_mvm_get_systime(mvm); } } ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time); ctxt_ap->beacon_tsf = 0; /* unused */ /* TODO: Assume that the beacon id == mac context id */ ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id); } static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p); /* Fill the common data for all mac context types */ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* Fill the data specific for ap mode */ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.ap, action == FW_CTXT_ACTION_ADD); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr; WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p); /* Fill the common data for all mac context types */ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* Fill the data specific for GO mode */ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.go.ap, action == FW_CTXT_ACTION_ADD); cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK); cmd.go.opp_ps_enabled = cpu_to_le32(!!(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT)); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action, bool force_assoc_off, const u8 *bssid_override) { switch (vif->type) { case NL80211_IFTYPE_STATION: return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action, force_assoc_off, bssid_override); case NL80211_IFTYPE_AP: if (!vif->p2p) return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action); else return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action); case NL80211_IFTYPE_MONITOR: return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action); case NL80211_IFTYPE_P2P_DEVICE: return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action); case NL80211_IFTYPE_ADHOC: return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action); default: break; } return -EOPNOTSUPP; } int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n", vif->addr, ieee80211_vif_type_p2p(vif))) return -EIO; ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD, true, NULL); if (ret) return ret; /* will only do anything at resume from D3 time */ iwl_mvm_set_last_nonqos_seq(mvm, vif); mvmvif->uploaded = true; return 0; } int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool force_assoc_off, const u8 *bssid_override) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n", vif->addr, ieee80211_vif_type_p2p(vif))) return -EIO; return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY, force_assoc_off, bssid_override); } int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mac_ctx_cmd cmd; int ret; if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n", vif->addr, ieee80211_vif_type_p2p(vif))) return -EIO; memset(&cmd, 0, sizeof(cmd)); cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd); if (ret) { IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret); return ret; } mvmvif->uploaded = false; if (vif->type == NL80211_IFTYPE_MONITOR) { __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags); iwl_mvm_dealloc_snif_sta(mvm); } return 0; } static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm, struct ieee80211_vif *csa_vif, u32 gp2, bool tx_success) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(csa_vif); /* Don't start to countdown from a failed beacon */ if (!tx_success && !mvmvif->csa_countdown) return; mvmvif->csa_countdown = true; if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) { int c = ieee80211_beacon_update_cntdwn(csa_vif); iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif); if (csa_vif->p2p && !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 && tx_success) { u32 rel_time = (c + 1) * csa_vif->bss_conf.beacon_int - IWL_MVM_CHANNEL_SWITCH_TIME_GO; u32 apply_time = gp2 + rel_time * 1024; iwl_mvm_schedule_csa_period(mvm, csa_vif, IWL_MVM_CHANNEL_SWITCH_TIME_GO - IWL_MVM_CHANNEL_SWITCH_MARGIN, apply_time); } } else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) { /* we don't have CSA NoA scheduled yet, switch now */ ieee80211_csa_finish(csa_vif); RCU_INIT_POINTER(mvm->csa_vif, NULL); } } void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); struct iwl_extended_beacon_notif *beacon = (void *)pkt->data; struct iwl_extended_beacon_notif_v5 *beacon_v5 = (void *)pkt->data; struct ieee80211_vif *csa_vif; struct ieee80211_vif *tx_blocked_vif; struct agg_tx_status *agg_status; u16 status; lockdep_assert_held(&mvm->mutex); mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2); if (!iwl_mvm_is_short_beacon_notif_supported(mvm)) { struct iwl_mvm_tx_resp *beacon_notify_hdr = &beacon_v5->beacon_notify_hdr; if (unlikely(pkt_len < sizeof(*beacon_v5))) return; mvm->ibss_manager = beacon_v5->ibss_mgr_status != 0; agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr); status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK; IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%016llX gp2:0x%X rate:%d\n", status, beacon_notify_hdr->failure_frame, le64_to_cpu(beacon->tsf), mvm->ap_last_beacon_gp2, le32_to_cpu(beacon_notify_hdr->initial_rate)); } else { if (unlikely(pkt_len < sizeof(*beacon))) return; mvm->ibss_manager = beacon->ibss_mgr_status != 0; status = le32_to_cpu(beacon->status) & TX_STATUS_MSK; IWL_DEBUG_RX(mvm, "beacon status %#x tsf:0x%016llX gp2:0x%X\n", status, le64_to_cpu(beacon->tsf), mvm->ap_last_beacon_gp2); } csa_vif = rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex)); if (unlikely(csa_vif && csa_vif->csa_active)) iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2, (status == TX_STATUS_SUCCESS)); tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex)); if (unlikely(tx_blocked_vif)) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); /* * The channel switch is started and we have blocked the * stations. If this is the first beacon (the timeout wasn't * set), set the unblock timeout, otherwise countdown */ if (!mvm->csa_tx_block_bcn_timeout) mvm->csa_tx_block_bcn_timeout = IWL_MVM_CS_UNBLOCK_TX_TIMEOUT; else mvm->csa_tx_block_bcn_timeout--; /* Check if the timeout is expired, and unblock tx */ if (mvm->csa_tx_block_bcn_timeout == 0) { iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); } } } void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_missed_beacons_notif *mb = (void *)pkt->data; struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig; struct iwl_fw_dbg_trigger_tlv *trigger; u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx; u32 rx_missed_bcon, rx_missed_bcon_since_rx; struct ieee80211_vif *vif; u32 id = le32_to_cpu(mb->mac_id); union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt }; IWL_DEBUG_INFO(mvm, "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n", le32_to_cpu(mb->mac_id), le32_to_cpu(mb->consec_missed_beacons), le32_to_cpu(mb->consec_missed_beacons_since_last_rx), le32_to_cpu(mb->num_recvd_beacons), le32_to_cpu(mb->num_expected_beacons)); rcu_read_lock(); vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); if (!vif) goto out; rx_missed_bcon = le32_to_cpu(mb->consec_missed_beacons); rx_missed_bcon_since_rx = le32_to_cpu(mb->consec_missed_beacons_since_last_rx); /* * TODO: the threshold should be adjusted based on latency conditions, * and/or in case of a CS flow on one of the other AP vifs. */ if (rx_missed_bcon > IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG) iwl_mvm_connection_loss(mvm, vif, "missed beacons"); else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) ieee80211_beacon_loss(vif); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data); trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MISSED_BEACONS); if (!trigger) goto out; bcon_trig = (void *)trigger->data; stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon); stop_trig_missed_bcon_since_rx = le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx); /* TODO: implement start trigger */ if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || rx_missed_bcon >= stop_trig_missed_bcon) iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); out: rcu_read_unlock(); } void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); struct iwl_stored_beacon_notif_common *sb = (void *)pkt->data; struct ieee80211_rx_status rx_status; struct sk_buff *skb; u8 *data; u32 size = le32_to_cpu(sb->byte_count); int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF), 0); if (size == 0) return; /* handle per-version differences */ if (ver <= 2) { struct iwl_stored_beacon_notif_v2 *sb_v2 = (void *)pkt->data; if (pkt_len < struct_size(sb_v2, data, size)) return; data = sb_v2->data; } else { struct iwl_stored_beacon_notif_v3 *sb_v3 = (void *)pkt->data; if (pkt_len < struct_size(sb_v3, data, size)) return; data = sb_v3->data; } skb = alloc_skb(size, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); return; } /* update rx_status according to the notification's metadata */ memset(&rx_status, 0, sizeof(rx_status)); rx_status.mactime = le64_to_cpu(sb->tsf); /* TSF as indicated by the firmware is at INA time */ rx_status.flag |= RX_FLAG_MACTIME_PLCP_START; rx_status.device_timestamp = le32_to_cpu(sb->system_time); rx_status.band = (sb->band & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; rx_status.freq = ieee80211_channel_to_frequency(le16_to_cpu(sb->channel), rx_status.band); /* copy the data */ skb_put_data(skb, data, size); memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); /* pass it as regular rx to mac80211 */ ieee80211_rx_napi(mvm->hw, NULL, skb, NULL); } void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_probe_resp_data_notif *notif = (void *)pkt->data; struct iwl_probe_resp_data *old_data, *new_data; u32 id = le32_to_cpu(notif->mac_id); struct ieee80211_vif *vif; struct iwl_mvm_vif *mvmvif; IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n", notif->noa_active, notif->csa_counter); vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false); if (!vif) return; mvmvif = iwl_mvm_vif_from_mac80211(vif); new_data = kzalloc(sizeof(*new_data), GFP_KERNEL); if (!new_data) return; memcpy(&new_data->notif, notif, sizeof(new_data->notif)); /* noa_attr contains 1 reserved byte, need to substruct it */ new_data->noa_len = sizeof(struct ieee80211_vendor_ie) + sizeof(new_data->notif.noa_attr) - 1; /* * If it's a one time NoA, only one descriptor is needed, * adjust the length according to len_low. */ if (new_data->notif.noa_attr.len_low == sizeof(struct ieee80211_p2p_noa_desc) + 2) new_data->noa_len -= sizeof(struct ieee80211_p2p_noa_desc); old_data = rcu_dereference_protected(mvmvif->probe_resp_data, lockdep_is_held(&mvmvif->mvm->mutex)); rcu_assign_pointer(mvmvif->probe_resp_data, new_data); if (old_data) kfree_rcu(old_data, rcu_head); if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA && notif->csa_counter >= 1) ieee80211_beacon_set_cntdwn(vif, notif->csa_counter); } void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_channel_switch_start_notif *notif = (void *)pkt->data; struct ieee80211_vif *csa_vif, *vif; struct iwl_mvm_vif *mvmvif; u32 id_n_color, csa_id, mac_id; id_n_color = le32_to_cpu(notif->id_and_color); mac_id = id_n_color & FW_CTXT_ID_MSK; if (WARN_ON_ONCE(mac_id >= NUM_MAC_INDEX_DRIVER)) return; rcu_read_lock(); vif = rcu_dereference(mvm->vif_id_to_mac[mac_id]); mvmvif = iwl_mvm_vif_from_mac80211(vif); switch (vif->type) { case NL80211_IFTYPE_AP: csa_vif = rcu_dereference(mvm->csa_vif); if (WARN_ON(!csa_vif || !csa_vif->csa_active || csa_vif != vif)) goto out_unlock; csa_id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); if (WARN(csa_id != id_n_color, "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)", csa_id, id_n_color)) goto out_unlock; IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n"); schedule_delayed_work(&mvm->cs_tx_unblock_dwork, msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT * csa_vif->bss_conf.beacon_int)); ieee80211_csa_finish(csa_vif); rcu_read_unlock(); RCU_INIT_POINTER(mvm->csa_vif, NULL); return; case NL80211_IFTYPE_STATION: /* * if we don't know about an ongoing channel switch, * make sure FW cancels it */ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, 0) && !vif->csa_active) { IWL_DEBUG_INFO(mvm, "Channel Switch was canceled\n"); iwl_mvm_cancel_channel_switch(mvm, vif, mac_id); break; } iwl_mvm_csa_client_absent(mvm, vif); cancel_delayed_work(&mvmvif->csa_work); ieee80211_chswitch_done(vif, true); break; default: /* should never happen */ WARN_ON_ONCE(1); break; } out_unlock: rcu_read_unlock(); } void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_channel_switch_error_notif *notif = (void *)pkt->data; struct ieee80211_vif *vif; u32 id = le32_to_cpu(notif->mac_id); u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask); rcu_read_lock(); vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); if (!vif) { rcu_read_unlock(); return; } IWL_DEBUG_INFO(mvm, "FW reports CSA error: mac_id=%u, csa_err_mask=%u\n", id, csa_err_mask); if (csa_err_mask & (CS_ERR_COUNT_ERROR | CS_ERR_LONG_DELAY_AFTER_CS | CS_ERR_TX_BLOCK_TIMER_EXPIRED)) ieee80211_channel_switch_disconnect(vif, true); rcu_read_unlock(); } void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_missed_vap_notif *mb = (void *)pkt->data; struct ieee80211_vif *vif; u32 id = le32_to_cpu(mb->mac_id); IWL_DEBUG_INFO(mvm, "missed_vap notify mac_id=%u, num_beacon_intervals_elapsed=%u, profile_periodicity=%u\n", le32_to_cpu(mb->mac_id), mb->num_beacon_intervals_elapsed, mb->profile_periodicity); rcu_read_lock(); vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); if (vif) iwl_mvm_connection_loss(mvm, vif, "missed vap beacon"); rcu_read_unlock(); } diff --git a/sys/contrib/dev/iwlwifi/mvm/mac80211.c b/sys/contrib/dev/iwlwifi/mvm/mac80211.c index 8c1e499fa0f1..4f40a9f64ee3 100644 --- a/sys/contrib/dev/iwlwifi/mvm/mac80211.c +++ b/sys/contrib/dev/iwlwifi/mvm/mac80211.c @@ -1,5491 +1,5491 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include #include #include #include #include #if defined(__FreeBSD__) #include #endif #include #include #include #if defined(__FreeBSD__) #include #endif #include "iwl-drv.h" #include "iwl-op-mode.h" #include "iwl-io.h" #include "mvm.h" #include "sta.h" #include "time-event.h" #include "iwl-eeprom-parse.h" #include "iwl-phy-db.h" #ifdef CONFIG_NL80211_TESTMODE #include "testmode.h" #endif #include "fw/error-dump.h" #include "iwl-prph.h" #include "iwl-nvm-parse.h" static const struct ieee80211_iface_limit iwl_mvm_limits[] = { { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, { .max = 1, .types = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), }, { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE), }, }; static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { { .num_different_channels = 2, .max_interfaces = 3, .limits = iwl_mvm_limits, .n_limits = ARRAY_SIZE(iwl_mvm_limits), }, }; static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { .max_peers = IWL_MVM_TOF_MAX_APS, .report_ap_tsf = 1, .randomize_mac_addr = 1, .ftm = { .supported = 1, .asap = 1, .non_asap = 1, .request_lci = 1, .request_civicloc = 1, .trigger_based = 1, .non_trigger_based = 1, .max_bursts_exponent = -1, /* all supported */ .max_ftms_per_burst = 0, /* no limits */ .bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | BIT(NL80211_CHAN_WIDTH_40) | BIT(NL80211_CHAN_WIDTH_80) | BIT(NL80211_CHAN_WIDTH_160), .preambles = BIT(NL80211_PREAMBLE_LEGACY) | BIT(NL80211_PREAMBLE_HT) | BIT(NL80211_PREAMBLE_VHT) | BIT(NL80211_PREAMBLE_HE), }, }; static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key); static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) { int i; memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts)); for (i = 0; i < NUM_PHY_CTX; i++) { mvm->phy_ctxts[i].id = i; mvm->phy_ctxts[i].ref = 0; } } struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, const char *alpha2, enum iwl_mcc_source src_id, bool *changed) { struct ieee80211_regdomain *regd = NULL; struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcc_update_resp *resp; u8 resp_ver; IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); lockdep_assert_held(&mvm->mutex); resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); if (IS_ERR_OR_NULL(resp)) { IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", PTR_ERR_OR_ZERO(resp)); resp = NULL; goto out; } if (changed) { u32 status = le32_to_cpu(resp->status); *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || status == MCC_RESP_ILLEGAL); } resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, MCC_UPDATE_CMD, 0); IWL_DEBUG_LAR(mvm, "MCC update response version: %d\n", resp_ver); regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, __le32_to_cpu(resp->n_channels), resp->channels, __le16_to_cpu(resp->mcc), __le16_to_cpu(resp->geo_info), __le16_to_cpu(resp->cap), resp_ver); /* Store the return source id */ src_id = resp->source_id; if (IS_ERR_OR_NULL(regd)) { IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", PTR_ERR_OR_ZERO(regd)); goto out; } IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n", regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id); mvm->lar_regdom_set = true; mvm->mcc_src = src_id; iwl_mei_set_country_code(__le16_to_cpu(resp->mcc)); out: kfree(resp); return regd; } void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm) { bool changed; struct ieee80211_regdomain *regd; if (!iwl_mvm_is_lar_supported(mvm)) return; regd = iwl_mvm_get_current_regdomain(mvm, &changed); if (!IS_ERR_OR_NULL(regd)) { /* only update the regulatory core if changed */ if (changed) regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); kfree(regd); } } struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, bool *changed) { return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ", iwl_mvm_is_wifi_mcc_supported(mvm) ? MCC_SOURCE_GET_CURRENT : MCC_SOURCE_OLD_FW, changed); } int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) { enum iwl_mcc_source used_src; struct ieee80211_regdomain *regd; int ret; bool changed; const struct ieee80211_regdomain *r = wiphy_dereference(mvm->hw->wiphy, mvm->hw->wiphy->regd); if (!r) return -ENOENT; /* save the last source in case we overwrite it below */ used_src = mvm->mcc_src; if (iwl_mvm_is_wifi_mcc_supported(mvm)) { /* Notify the firmware we support wifi location updates */ regd = iwl_mvm_get_current_regdomain(mvm, NULL); if (!IS_ERR_OR_NULL(regd)) kfree(regd); } /* Now set our last stored MCC and source */ regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, &changed); if (IS_ERR_OR_NULL(regd)) return -EIO; /* update cfg80211 if the regdomain was changed */ if (changed) ret = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd); else ret = 0; kfree(regd); return ret; } static const u8 he_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, }; static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { { .iftype = NL80211_IFTYPE_STATION, .extended_capabilities = he_if_types_ext_capa_sta, .extended_capabilities_mask = he_if_types_ext_capa_sta, .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta), }, }; static int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); *tx_ant = iwl_mvm_get_valid_tx_ant(mvm); *rx_ant = iwl_mvm_get_valid_rx_ant(mvm); return 0; } int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) { struct ieee80211_hw *hw = mvm->hw; int num_mac, ret, i; static const u32 mvm_ciphers[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, }; #ifdef CONFIG_PM_SLEEP bool unified = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); #endif /* Tell mac80211 our characteristics */ ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, SPECTRUM_MGMT); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(hw, WANT_MONITOR_VIF); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, TIMING_BEACON_ONLY); ieee80211_hw_set(hw, CONNECTION_MONITOR); ieee80211_hw_set(hw, CHANCTX_STA_CSA); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); ieee80211_hw_set(hw, STA_MMPDU_TXQ); /* * On older devices, enabling TX A-MSDU occasionally leads to * something getting messed up, the command read from the FIFO * gets out of sync and isn't a TX command, so that we have an * assert EDC. * * It's not clear where the bug is, but since we didn't used to * support A-MSDU until moving the mac80211 iTXQs, just leave it * for older devices. We also don't see this issue on any newer * devices. */ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) ieee80211_hw_set(hw, TX_AMSDU); ieee80211_hw_set(hw, TX_FRAG_LIST); if (iwl_mvm_has_tlc_offload(mvm)) { ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); ieee80211_hw_set(hw, HAS_RATE_CONTROL); } if (iwl_mvm_has_new_rx_api(mvm)) ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) { ieee80211_hw_set(hw, AP_LINK_PS); } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { /* * we absolutely need this for the new TX API since that comes * with many more queues than the current code can deal with * for station powersave */ return -EINVAL; } if (mvm->trans->num_rx_queues > 1) ieee80211_hw_set(hw, USES_RSS); if (mvm->trans->max_skb_frags) hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; hw->queues = IEEE80211_NUM_ACS; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | IEEE80211_RADIOTAP_MCS_HAVE_STBC; hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; hw->radiotap_timestamp.units_pos = IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US | IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ; /* this is the case for CCK frames, it's better (only 8) for OFDM */ hw->radiotap_timestamp.accuracy = 22; if (!iwl_mvm_has_tlc_offload(mvm)) hw->rate_control_algorithm = RS_NAME; hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; hw->max_tx_fragments = mvm->trans->max_skb_frags; BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); hw->wiphy->cipher_suites = mvm->ciphers; if (iwl_mvm_has_new_rx_api(mvm)) { mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_GCMP; hw->wiphy->n_cipher_suites++; mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_GCMP_256; hw->wiphy->n_cipher_suites++; } if (iwlwifi_mod_params.swcrypto) IWL_ERR(mvm, "iwlmvm doesn't allow to disable HW crypto, check swcrypto module parameter\n"); if (!iwlwifi_mod_params.bt_coex_active) IWL_ERR(mvm, "iwlmvm doesn't allow to disable BT Coex, check bt_coex_active module parameter\n"); ieee80211_hw_set(hw, MFP_CAPABLE); mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC; hw->wiphy->n_cipher_suites++; if (iwl_mvm_has_new_rx_api(mvm)) { mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_BIP_GMAC_128; hw->wiphy->n_cipher_suites++; mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_BIP_GMAC_256; hw->wiphy->n_cipher_suites++; } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) { wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa; } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT)) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT); ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); hw->wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_ND_RANDOM_MAC_ADDR; hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); hw->chanctx_data_size = sizeof(u16); hw->txq_data_size = sizeof(struct iwl_mvm_txq); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_ADHOC); hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); /* The new Tx API does not allow to pass the key or keyid of a MPDU to * the hw, preventing us to control which key(id) to use per MPDU. * Till that's fixed we can't use Extended Key ID for the newer cards. */ if (!iwl_mvm_has_new_tx_api(mvm)) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_EXT_KEY_ID); hw->wiphy->features |= NL80211_FEATURE_HT_IBSS; hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; if (iwl_mvm_is_lar_supported(mvm)) hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; else hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | REGULATORY_DISABLE_BEACON_HINTS; hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ; hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; hw->wiphy->n_iface_combinations = ARRAY_SIZE(iwl_mvm_iface_combinations); hw->wiphy->max_remain_on_channel_duration = 10000; hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; /* Extract MAC address */ memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); hw->wiphy->addresses = mvm->addresses; hw->wiphy->n_addresses = 1; /* Extract additional MAC addresses if available */ num_mac = (mvm->nvm_data->n_hw_addrs > 1) ? min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1; for (i = 1; i < num_mac; i++) { memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr, ETH_ALEN); mvm->addresses[i].addr[5]++; hw->wiphy->n_addresses++; } iwl_mvm_reset_phy_ctxts(mvm); hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm); hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; else mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) hw->wiphy->bands[NL80211_BAND_2GHZ] = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) { hw->wiphy->bands[NL80211_BAND_5GHZ] = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS)) hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT) && mvm->nvm_data->bands[NL80211_BAND_6GHZ].n_channels) hw->wiphy->bands[NL80211_BAND_6GHZ] = &mvm->nvm_data->bands[NL80211_BAND_6GHZ]; hw->wiphy->hw_version = mvm->trans->hw_id; if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; else hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->max_sched_scan_reqs = 1; hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; hw->wiphy->max_match_sets = iwl_umac_scan_get_max_profiles(mvm->fw); /* we create the 802.11 header and zero length SSID IE. */ hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS; hw->wiphy->max_sched_scan_plan_interval = U16_MAX; /* * the firmware uses u8 for num of iterations, but 0xff is saved for * infinite loop, so the maximum number of iterations is actually 254. */ hw->wiphy->max_sched_scan_plan_iterations = 254; hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_P2P_GO_OPPPS | NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | NL80211_FEATURE_DYNAMIC_SMPS | NL80211_FEATURE_STATIC_SMPS | NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_QUIET; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL, IWL_FW_CMD_VER_UNKNOWN) == 3) hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) { wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_START_TIME); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BSS_PARENT_TSF); } if (iwl_mvm_is_oce_supported(mvm)) { u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, 0); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); /* Old firmware also supports probe deferral and suppression */ if (scan_ver < 15) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION); } if (mvm->nvm_data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) { hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa; hw->wiphy->num_iftype_ext_capab = ARRAY_SIZE(he_iftypes_ext_capa); ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); } mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; #ifdef CONFIG_PM_SLEEP if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) && mvm->trans->ops->d3_suspend && mvm->trans->ops->d3_resume && device_can_wakeup(mvm->trans->dev)) { mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | WIPHY_WOWLAN_EAP_IDENTITY_REQ | WIPHY_WOWLAN_RFKILL_RELEASE | WIPHY_WOWLAN_NET_DETECT; mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_GTK_REKEY_FAILURE | WIPHY_WOWLAN_4WAY_HANDSHAKE; mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; mvm->wowlan.max_nd_match_sets = iwl_umac_scan_get_max_profiles(mvm->fw); hw->wiphy->wowlan = &mvm->wowlan; } #endif ret = iwl_mvm_leds_init(mvm); if (ret) return ret; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; ieee80211_hw_set(hw, TDLS_WIDER_BW); } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) { IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; } hw->netdev_features |= mvm->cfg->features; if (!iwl_mvm_is_csum_supported(mvm)) hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK; if (mvm->cfg->vht_mu_mimo_supported) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT)) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_PROTECTED_TWT); iwl_mvm_vendor_cmds_register(mvm); hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm); hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm); ret = ieee80211_register_hw(mvm->hw); if (ret) { iwl_mvm_leds_exit(mvm); } return ret; } static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta) { if (likely(sta)) { if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0)) return; } else { if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0)) return; } ieee80211_free_txskb(mvm->hw, skb); } static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_sta *sta = control->sta; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; bool offchannel = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_TX_OFFCHAN; if (iwl_mvm_is_radio_killed(mvm)) { IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); goto drop; } if (offchannel && !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) goto drop; /* * bufferable MMPDUs or MMPDUs on STA interfaces come via TXQs * so we treat the others as broadcast */ if (ieee80211_is_mgmt(hdr->frame_control)) sta = NULL; /* If there is no sta, and it's not offchannel - send through AP */ if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION && !offchannel) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); if (ap_sta_id < mvm->fw->ucode_capa.num_stations) { /* mac80211 holds rcu read lock */ sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); if (IS_ERR_OR_NULL(sta)) goto drop; } } iwl_mvm_tx_skb(mvm, skb, sta); return; drop: ieee80211_free_txskb(hw, skb); } void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); struct sk_buff *skb = NULL; /* * No need for threads to be pending here, they can leave the first * taker all the work. * * mvmtxq->tx_request logic: * * If 0, no one is currently TXing, set to 1 to indicate current thread * will now start TX and other threads should quit. * * If 1, another thread is currently TXing, set to 2 to indicate to * that thread that there was another request. Since that request may * have raced with the check whether the queue is empty, the TXing * thread should check the queue's status one more time before leaving. * This check is done in order to not leave any TX hanging in the queue * until the next TX invocation (which may not even happen). * * If 2, another thread is currently TXing, and it will already double * check the queue, so do nothing. */ if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2)) return; rcu_read_lock(); do { while (likely(!mvmtxq->stopped && !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) { skb = ieee80211_tx_dequeue(hw, txq); if (!skb) { if (txq->sta) IWL_DEBUG_TX(mvm, "TXQ of sta %pM tid %d is now empty\n", txq->sta->addr, txq->tid); break; } iwl_mvm_tx_skb(mvm, skb, txq->sta); } } while (atomic_dec_return(&mvmtxq->tx_request)); rcu_read_unlock(); } static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); /* * Please note that racing is handled very carefully here: * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is * deleted afterwards. * This means that if: * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list): * queue is allocated and we can TX. * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list): * a race, should defer the frame. * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list): * need to allocate the queue and defer the frame. * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list): * queue is already scheduled for allocation, no need to allocate, * should defer the frame. */ /* If the queue is allocated TX and return. */ if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) { /* * Check that list is empty to avoid a race where txq_id is * already updated, but the queue allocation work wasn't * finished */ if (unlikely(txq->sta && !list_empty(&mvmtxq->list))) return; iwl_mvm_mac_itxq_xmit(hw, txq); return; } /* The list is being deleted only after the queue is fully allocated. */ if (!list_empty(&mvmtxq->list)) return; list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); schedule_work(&mvm->add_stream_wk); } #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ do { \ if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ break; \ iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \ } while (0) static void iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn, enum ieee80211_ampdu_mlme_action action) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_BA); if (!trig) return; ba_trig = (void *)trig->data; switch (action) { case IEEE80211_AMPDU_TX_OPERATIONAL: { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid, "TX AGG START: MAC %pM tid %d ssn %d\n", sta->addr, tid, tid_data->ssn); break; } case IEEE80211_AMPDU_TX_STOP_CONT: CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid, "TX AGG STOP: MAC %pM tid %d\n", sta->addr, tid); break; case IEEE80211_AMPDU_RX_START: CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid, "RX AGG START: MAC %pM tid %d ssn %d\n", sta->addr, tid, rx_ba_ssn); break; case IEEE80211_AMPDU_RX_STOP: CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid, "RX AGG STOP: MAC %pM tid %d\n", sta->addr, tid); break; default: break; } } static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; struct ieee80211_sta *sta = params->sta; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; u16 *ssn = ¶ms->ssn; u16 buf_size = params->buf_size; bool amsdu = params->amsdu; u16 timeout = params->timeout; IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", sta->addr, tid, action); if (!(mvm->nvm_data->sku_cap_11n_enable)) return -EACCES; mutex_lock(&mvm->mutex); switch (action) { case IEEE80211_AMPDU_RX_START: if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id == iwl_mvm_sta_from_mac80211(sta)->sta_id) { struct iwl_mvm_vif *mvmvif; u16 macid = iwl_mvm_vif_from_mac80211(vif)->id; struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid]; mdata->opened_rx_ba_sessions = true; mvmvif = iwl_mvm_vif_from_mac80211(vif); cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk); } if (!iwl_enable_rx_ampdu()) { ret = -EINVAL; break; } ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size, timeout); break; case IEEE80211_AMPDU_RX_STOP: ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size, timeout); break; case IEEE80211_AMPDU_TX_START: if (!iwl_enable_tx_ampdu()) { ret = -EINVAL; break; } ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); break; case IEEE80211_AMPDU_TX_STOP_CONT: ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); break; case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size, amsdu); break; default: WARN_ON_ONCE(1); ret = -EINVAL; break; } if (!ret) { u16 rx_ba_ssn = 0; if (action == IEEE80211_AMPDU_RX_START) rx_ba_ssn = *ssn; iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid, rx_ba_ssn, action); } mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->uploaded = false; mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); spin_unlock_bh(&mvm->time_event_lock); mvmvif->phy_ctxt = NULL; memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); memset(&mvmvif->probe_resp_data, 0, sizeof(mvmvif->probe_resp_data)); } static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) { iwl_mvm_stop_device(mvm); mvm->cur_aid = 0; mvm->scan_status = 0; mvm->ps_disabled = false; mvm->rfkill_safe_init_done = false; /* just in case one was running */ iwl_mvm_cleanup_roc_te(mvm); ieee80211_remain_on_channel_expired(mvm->hw); iwl_mvm_ftm_restart(mvm); /* * cleanup all interfaces, even inactive ones, as some might have * gone down during the HW restart */ ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); mvm->p2p_device_vif = NULL; iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); ieee80211_wake_queues(mvm->hw); mvm->vif_count = 0; mvm->rx_ba_sessions = 0; mvm->fwrt.dump.conf = FW_DBG_INVALID; mvm->monitor_on = false; /* keep statistics ticking */ iwl_mvm_accu_radio_stats(mvm); } int __iwl_mvm_mac_start(struct iwl_mvm *mvm) { int ret; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_mei_get_ownership(mvm); if (ret) return ret; if (mvm->mei_nvm_data) { /* We got the NIC, we can now free the MEI NVM data */ kfree(mvm->mei_nvm_data); mvm->mei_nvm_data = NULL; /* * We can't free the nvm_data we allocated based on the SAP * data because we registered to cfg80211 with the channels * allocated on mvm->nvm_data. Keep a pointer in temp_nvm_data * just in order to be able free it later. * NULLify nvm_data so that we will read the NVM from the * firmware this time. */ mvm->temp_nvm_data = mvm->nvm_data; mvm->nvm_data = NULL; } if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { /* * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART * so later code will - from now on - see that we're doing it. */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); /* Clean up some internal and mac80211 state on restart */ iwl_mvm_restart_cleanup(mvm); } ret = iwl_mvm_up(mvm); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT, NULL); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC, NULL); mvm->last_reset_or_resume_time_jiffies = jiffies; if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { /* Something went wrong - we need to finish some cleanup * that normally iwl_mvm_mac_restart_complete() below * would do. */ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); } return ret; } static int iwl_mvm_mac_start(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; int retry, max_retry = 0; mutex_lock(&mvm->mutex); /* we are starting the mac not in error flow, and restart is enabled */ if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) && iwlwifi_mod_params.fw_restart) { max_retry = IWL_MAX_INIT_RETRY; /* * This will prevent mac80211 recovery flows to trigger during * init failures */ set_bit(IWL_MVM_STATUS_STARTING, &mvm->status); } for (retry = 0; retry <= max_retry; retry++) { ret = __iwl_mvm_mac_start(mvm); if (!ret) break; IWL_ERR(mvm, "mac start retry %d\n", retry); } clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status); mutex_unlock(&mvm->mutex); iwl_mvm_mei_set_sw_rfkill_state(mvm); return ret; } static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) { int ret; mutex_lock(&mvm->mutex); clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); ret = iwl_mvm_update_quotas(mvm, true, NULL); if (ret) IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", ret); iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_END_OF_RECOVERY); /* * If we have TDLS peers, remove them. We don't know the last seqno/PN * of packets the FW sent out, so we must reconnect. */ iwl_mvm_teardown_tdls_peers(mvm); mutex_unlock(&mvm->mutex); } static void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); switch (reconfig_type) { case IEEE80211_RECONFIG_TYPE_RESTART: iwl_mvm_restart_complete(mvm); break; case IEEE80211_RECONFIG_TYPE_SUSPEND: break; } } void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) { lockdep_assert_held(&mvm->mutex); iwl_mvm_ftm_initiator_smooth_stop(mvm); /* firmware counters are obviously reset now, but we shouldn't * partially track so also clear the fw_reset_accu counters. */ memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats)); /* async_handlers_wk is now blocked */ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) iwl_mvm_rm_aux_sta(mvm); iwl_mvm_stop_device(mvm); iwl_mvm_async_handlers_purge(mvm); /* async_handlers_list is empty and will stay empty: HW is stopped */ /* * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the * hw (as restart_complete() won't be called in this case) and mac80211 * won't execute the restart. * But make sure to cleanup interfaces that have gone down before/during * HW restart was requested. */ if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); /* We shouldn't have any UIDs still set. Loop over all the UIDs to * make sure there's nothing left there and warn if any is found. */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { int i; for (i = 0; i < mvm->max_scans; i++) { if (WARN_ONCE(mvm->scan_uid_status[i], "UMAC scan UID %d status was not cleaned\n", i)) mvm->scan_uid_status[i] = 0; } } } static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); flush_work(&mvm->async_handlers_wk); flush_work(&mvm->add_stream_wk); /* * Lock and clear the firmware running bit here already, so that * new commands coming in elsewhere, e.g. from debugfs, will not * be able to proceed. This is important here because one of those * debugfs files causes the firmware dump to be triggered, and if we * don't stop debugfs accesses before canceling that it could be * retriggered after we flush it but before we've cleared the bit. */ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); cancel_delayed_work_sync(&mvm->scan_timeout_dwork); /* * The work item could be running or queued if the * ROC time event stops just as we get here. */ flush_work(&mvm->roc_done_wk); iwl_mvm_mei_set_sw_rfkill_state(mvm); mutex_lock(&mvm->mutex); __iwl_mvm_mac_stop(mvm); mutex_unlock(&mvm->mutex); /* * The worker might have been waiting for the mutex, let it run and * discover that its list is now empty. */ cancel_work_sync(&mvm->async_handlers_wk); } static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) { u16 i; lockdep_assert_held(&mvm->mutex); for (i = 0; i < NUM_PHY_CTX; i++) if (!mvm->phy_ctxts[i].ref) return &mvm->phy_ctxts[i]; IWL_ERR(mvm, "No available PHY context\n"); return NULL; } static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, s16 tx_power) { u32 cmd_id = REDUCE_TX_POWER_CMD; int len; struct iwl_dev_tx_power_cmd cmd = { .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), .common.mac_context_id = cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), .common.pwr_restriction = cpu_to_le16(8 * tx_power), }; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); if (tx_power == IWL_DEFAULT_MAX_TX_POWER) cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); if (cmd_ver == 7) len = sizeof(cmd.v7); else if (cmd_ver == 6) len = sizeof(cmd.v6); else if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCE_TX_POWER)) len = sizeof(cmd.v5); else if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) len = sizeof(cmd.v4); else len = sizeof(cmd.v3); /* all structs have the same common part, add it */ len += sizeof(cmd.common); return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); } static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); if (vif->type == NL80211_IFTYPE_STATION) { struct iwl_mvm_sta *mvmsta; mvmvif->csa_bcn_pending = false; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id); if (WARN_ON(!mvmsta)) { ret = -EIO; goto out_unlock; } iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); if (ret) goto out_unlock; iwl_mvm_stop_session_protection(mvm, vif); } } mvmvif->ps_disabled = false; ret = iwl_mvm_power_update_ps(mvm); out_unlock: if (mvmvif->csa_failed) ret = -EIO; mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_chan_switch_te_cmd cmd = { .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), }; /* * In the new flow since FW is in charge of the timing, * if driver has canceled the channel switch he will receive the * CHANNEL_SWITCH_START_NOTIF notification from FW and then cancel it */ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, 0)) return; IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id); mutex_lock(&mvm->mutex); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) iwl_mvm_remove_csa_period(mvm, vif); else WARN_ON(iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd)); mvmvif->csa_failed = true; mutex_unlock(&mvm->mutex); iwl_mvm_post_channel_switch(hw, vif); } static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) { struct iwl_mvm_vif *mvmvif; struct ieee80211_vif *vif; mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work); vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); /* Trigger disconnect (should clear the CSA state) */ ieee80211_chswitch_done(vif, false); } static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; mvmvif->mvm = mvm; RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); /* * Not much to do here. The stack will not allow interface * types or combinations that we didn't advertise, so we * don't really have to check the types. */ mutex_lock(&mvm->mutex); /* make sure that beacon statistics don't go backwards with FW reset */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) mvmvif->beacon_stats.accu_num_beacons += mvmvif->beacon_stats.num_beacons; /* Allocate resources for the MAC context, and add it to the fw */ ret = iwl_mvm_mac_ctxt_init(mvm, vif); if (ret) goto out_unlock; rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); /* Counting number of interfaces is needed for legacy PM */ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count++; /* * The AP binding flow can be done only after the beacon * template is configured (which happens only in the mac80211 * start_ap() flow), and adding the broadcast station can happen * only after the binding. * In addition, since modifying the MAC before adding a bcast * station is not allowed by the FW, delay the adding of MAC context to * the point where we can also add the bcast station. * In short: there's not much we can do at this point, other than * allocating resources :) */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { ret = iwl_mvm_alloc_bcast_sta(mvm, vif); if (ret) { IWL_ERR(mvm, "Failed to allocate bcast sta\n"); goto out_release; } /* * Only queue for this station is the mcast queue, * which shouldn't be in TFD mask anyway */ ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, 0, vif->type, IWL_STA_MULTICAST); if (ret) goto out_release; iwl_mvm_vif_dbgfs_register(mvm, vif); goto out_unlock; } mvmvif->features |= hw->netdev_features; ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) goto out_release; ret = iwl_mvm_power_update_mac(mvm); if (ret) goto out_remove_mac; /* beacon filtering */ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); if (ret) goto out_remove_mac; if (!mvm->bf_allowed_vif && vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { mvm->bf_allowed_vif = mvmvif; vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI; } /* * P2P_DEVICE interface does not have a channel context assigned to it, * so a dedicated PHY context is allocated to it and the corresponding * MAC context is bound to it at this stage. */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); if (!mvmvif->phy_ctxt) { ret = -ENOSPC; goto out_free_bf; } iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) goto out_unref_phy; ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif); if (ret) goto out_unbind; /* Save a pointer to p2p device vif, so it can later be used to * update the p2p device MAC when a GO is started/stopped */ mvm->p2p_device_vif = vif; } iwl_mvm_tcm_add_vif(mvm, vif); INIT_DELAYED_WORK(&mvmvif->csa_work, iwl_mvm_channel_switch_disconnect_wk); if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = true; iwl_mvm_vif_dbgfs_register(mvm, vif); if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && vif->type == NL80211_IFTYPE_STATION && !vif->p2p && !mvm->csme_vif && mvm->mei_registered) { iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr); iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev); mvm->csme_vif = vif; } goto out_unlock; out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); out_unref_phy: iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); out_free_bf: if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI); } out_remove_mac: mvmvif->phy_ctxt = NULL; iwl_mvm_mac_ctxt_remove(mvm, vif); out_release: if (vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count--; out_unlock: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { /* * Flush the ROC worker which will flush the OFFCHANNEL queue. * We assume here that all the packets sent to the OFFCHANNEL * queue are sent in ROC session. */ flush_work(&mvm->roc_done_wk); } } static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_probe_resp_data *probe_data; iwl_mvm_prepare_mac_removal(mvm, vif); if (!(vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC)) iwl_mvm_tcm_rm_vif(mvm, vif); mutex_lock(&mvm->mutex); if (vif == mvm->csme_vif) { iwl_mei_set_netdev(NULL); mvm->csme_vif = NULL; } probe_data = rcu_dereference_protected(mvmvif->probe_resp_data, lockdep_is_held(&mvm->mutex)); RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); if (probe_data) kfree_rcu(probe_data, rcu_head); if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI); } if (vif->bss_conf.ftm_responder) memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); iwl_mvm_vif_dbgfs_clean(mvm, vif); /* * For AP/GO interface, the tear down of the resources allocated to the * interface is be handled as part of the stop_ap flow. */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { #ifdef CONFIG_NL80211_TESTMODE if (vif == mvm->noa_vif) { mvm->noa_vif = NULL; mvm->noa_duration = 0; } #endif iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta); iwl_mvm_dealloc_bcast_sta(mvm, vif); goto out_release; } if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvm->p2p_device_vif = NULL; iwl_mvm_rm_p2p_bcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); mvmvif->phy_ctxt = NULL; } if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count--; iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = false; out_release: mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) { return 0; } struct iwl_mvm_mc_iter_data { struct iwl_mvm *mvm; int port_id; }; static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mc_iter_data *data = _data; struct iwl_mvm *mvm = data->mvm; struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; struct iwl_host_cmd hcmd = { .id = MCAST_FILTER_CMD, .flags = CMD_ASYNC, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; int ret, len; /* if we don't have free ports, mcast frames will be dropped */ if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM)) return; if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) return; cmd->port_id = data->port_id++; memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); hcmd.len[0] = len; hcmd.data[0] = cmd; ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); } static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) { struct iwl_mvm_mc_iter_data iter_data = { .mvm = mvm, }; int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) return; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mc_iface_iterator, &iter_data); /* * Send a (synchronous) ech command so that we wait for the * multiple asynchronous MCAST_FILTER_CMD commands sent by * the interface iterator. Otherwise, we might get here over * and over again (by userspace just sending a lot of these) * and the CPU can send them faster than the firmware can * process them. * Note that the CPU is still faster - but with this we'll * actually send fewer commands overall because the CPU will * not schedule the work in mac80211 as frequently if it's * still running when rescheduled (possibly multiple times). */ ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); if (ret) IWL_ERR(mvm, "Failed to synchronize multicast groups update\n"); } static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcast_filter_cmd *cmd; struct netdev_hw_addr *addr; int addr_count; bool pass_all; int len; addr_count = netdev_hw_addr_list_count(mc_list); pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES || IWL_MVM_FW_MCAST_FILTER_PASS_ALL; if (pass_all) addr_count = 0; len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4); cmd = kzalloc(len, GFP_ATOMIC); if (!cmd) return 0; if (pass_all) { cmd->pass_all = 1; #if defined(__linux__) return (u64)(unsigned long)cmd; #elif defined(__FreeBSD__) return (u64)(uintptr_t)cmd; #endif } netdev_hw_addr_list_for_each(addr, mc_list) { #if defined(__linux__) IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n", cmd->count, addr->addr); #elif defined(__FreeBSD__) IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %6D\n", cmd->count, addr->addr, ":"); #endif memcpy(&cmd->addr_list[cmd->count * ETH_ALEN], addr->addr, ETH_ALEN); cmd->count++; } #if defined(__linux__) return (u64)(unsigned long)cmd; #elif defined(__FreeBSD__) return (u64)(uintptr_t)cmd; #endif } static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); #if defined(__linux__) struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; #elif defined(__FreeBSD__) struct iwl_mcast_filter_cmd *cmd = (void *)(uintptr_t)multicast; #endif mutex_lock(&mvm->mutex); /* replace previous configuration */ kfree(mvm->mcast_filter_cmd); mvm->mcast_filter_cmd = cmd; if (!cmd) goto out; if (changed_flags & FIF_ALLMULTI) cmd->pass_all = !!(*total_flags & FIF_ALLMULTI); if (cmd->pass_all) cmd->count = 0; iwl_mvm_recalc_multicast(mvm); out: mutex_unlock(&mvm->mutex); *total_flags = 0; } static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int filter_flags, unsigned int changed_flags) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* We support only filter for probe requests */ if (!(changed_flags & FIF_PROBE_REQ)) return; /* Supported only for p2p client interfaces */ if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || !vif->p2p) return; mutex_lock(&mvm->mutex); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); mutex_unlock(&mvm->mutex); } static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mu_group_mgmt_cmd cmd = {}; memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership, WLAN_MEMBERSHIP_LEN); memcpy(cmd.user_position, vif->bss_conf.mu_group.position, WLAN_USER_POSITION_LEN); return iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(DATA_PATH_GROUP, UPDATE_MU_GROUPS_CMD), 0, sizeof(cmd), &cmd); } static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { if (vif->mu_mimo_owner) { struct iwl_mu_group_mgmt_notif *notif = _data; /* * MU-MIMO Group Id action frame is little endian. We treat * the data received from firmware as if it came from the * action frame, so no conversion is needed. */ ieee80211_update_mu_groups(vif, (u8 *)¬if->membership_status, (u8 *)¬if->user_position); } } void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mu_mimo_iface_iterator, notif); } static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit) { u8 byte_num = ppe_pos_bit / 8; u8 bit_num = ppe_pos_bit % 8; u8 residue_bits; u8 res; if (bit_num <= 5) return (ppe[byte_num] >> bit_num) & (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1); /* * If bit_num > 5, we have to combine bits with next byte. * Calculate how many bits we need to take from current byte (called * here "residue_bits"), and add them to bits from next byte. */ residue_bits = 8 - bit_num; res = (ppe[byte_num + 1] & (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) << residue_bits; res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1); return res; } static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm, struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss, u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit) { int i; /* * FW currently supports only nss == MAX_HE_SUPP_NSS * * If nss > MAX: we can ignore values we don't support * If nss < MAX: we can set zeros in other streams */ if (nss > MAX_HE_SUPP_NSS) { IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, MAX_HE_SUPP_NSS); nss = MAX_HE_SUPP_NSS; } for (i = 0; i < nss; i++) { u8 ru_index_tmp = ru_index_bitmap << 1; u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE; u8 bw; for (bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); bw++) { ru_index_tmp >>= 1; if (!(ru_index_tmp & 1)) continue; high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; } } } static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_he_pkt_ext_v2 *pkt_ext) { u8 nss = (sta->deflink.he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1; u8 *ppe = &sta->deflink.he_cap.ppe_thres[0]; u8 ru_index_bitmap = u8_get_bits(*ppe, IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); /* Starting after PPE header */ u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE; iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit); } static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nominal_padding, u32 *flags) { int low_th = -1; int high_th = -1; int i; switch (nominal_padding) { case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US: low_th = IWL_HE_PKT_EXT_NONE; high_th = IWL_HE_PKT_EXT_NONE; break; case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US: low_th = IWL_HE_PKT_EXT_BPSK; high_th = IWL_HE_PKT_EXT_NONE; break; case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US: low_th = IWL_HE_PKT_EXT_NONE; high_th = IWL_HE_PKT_EXT_BPSK; break; } /* Set the PPE thresholds accordingly */ if (low_th >= 0 && high_th >= 0) { for (i = 0; i < MAX_HE_SUPP_NSS; i++) { u8 bw; for (bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); bw++) { pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; } } *flags |= STA_CTXT_HE_PACKET_EXT; } } static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 sta_id) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_he_sta_context_cmd_v3 sta_ctxt_cmd = { .sta_id = sta_id, .tid_limit = IWL_MAX_TID_COUNT, .bss_color = vif->bss_conf.he_bss_color.color, .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext, .frame_time_rts_th = cpu_to_le16(vif->bss_conf.frame_time_rts_th), }; struct iwl_he_sta_context_cmd_v2 sta_ctxt_cmd_v2 = {}; u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, STA_HE_CTXT_CMD); u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 2); int size; struct ieee80211_sta *sta; u32 flags; int i; const struct ieee80211_sta_he_cap *own_he_cap = NULL; struct ieee80211_chanctx_conf *chanctx_conf; const struct ieee80211_supported_band *sband; void *cmd; if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE)) ver = 1; switch (ver) { case 1: /* same layout as v2 except some data at the end */ cmd = &sta_ctxt_cmd_v2; size = sizeof(struct iwl_he_sta_context_cmd_v1); break; case 2: cmd = &sta_ctxt_cmd_v2; size = sizeof(struct iwl_he_sta_context_cmd_v2); break; case 3: cmd = &sta_ctxt_cmd; size = sizeof(struct iwl_he_sta_context_cmd_v3); break; default: IWL_ERR(mvm, "bad STA_HE_CTXT_CMD version %d\n", ver); return; } rcu_read_lock(); chanctx_conf = rcu_dereference(vif->chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); return; } sband = mvm->hw->wiphy->bands[chanctx_conf->def.chan->band]; own_he_cap = ieee80211_get_he_iftype_cap(sband, ieee80211_vif_type_p2p(vif)); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); if (IS_ERR_OR_NULL(sta)) { rcu_read_unlock(); WARN(1, "Can't find STA to configure HE\n"); return; } if (!sta->deflink.he_cap.has_he) { rcu_read_unlock(); return; } flags = 0; /* Block 26-tone RU OFDMA transmissions */ if (mvmvif->he_ru_2mhz_block) flags |= STA_CTXT_HE_RU_2MHZ_BLOCK; /* HTC flags */ if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT); if ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { u8 link_adap = ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + (sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); if (link_adap == 2) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED); else if (link_adap == 3) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); } if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP); if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); /* * Initialize the PPE thresholds to "None" (7), as described in Table * 9-262ac of 80211.ax/D3.0. */ memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE, sizeof(sta_ctxt_cmd.pkt_ext)); /* If PPE Thresholds exist, parse them into a FW-familiar format. */ if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta, &sta_ctxt_cmd.pkt_ext); flags |= STA_CTXT_HE_PACKET_EXT; /* PPE Thresholds doesn't exist - set the API PPE values * according to Common Nominal Packet Padding fiels. */ } else { u8 nominal_padding = u8_get_bits(sta->deflink.he_cap.he_cap_elem.phy_cap_info[9], IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext, nominal_padding, &flags); } if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP) flags |= STA_CTXT_HE_32BIT_BA_BITMAP; if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_ACK_EN) flags |= STA_CTXT_HE_ACK_ENABLED; rcu_read_unlock(); /* Mark MU EDCA as enabled, unless none detected on some AC */ flags |= STA_CTXT_HE_MU_EDCA_CW; for (i = 0; i < IEEE80211_NUM_ACS; i++) { struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = &mvmvif->queue_params[i].mu_edca_param_rec; u8 ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); if (!mvmvif->queue_params[i].mu_edca) { flags &= ~STA_CTXT_HE_MU_EDCA_CW; break; } sta_ctxt_cmd.trig_based_txf[ac].cwmin = cpu_to_le16(mu_edca->ecw_min_max & 0xf); sta_ctxt_cmd.trig_based_txf[ac].cwmax = cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); sta_ctxt_cmd.trig_based_txf[ac].aifsn = cpu_to_le16(mu_edca->aifsn); sta_ctxt_cmd.trig_based_txf[ac].mu_time = cpu_to_le16(mu_edca->mu_edca_timer); } if (vif->bss_conf.uora_exists) { flags |= STA_CTXT_HE_TRIG_RND_ALLOC; sta_ctxt_cmd.rand_alloc_ecwmin = vif->bss_conf.uora_ocw_range & 0x7; sta_ctxt_cmd.rand_alloc_ecwmax = (vif->bss_conf.uora_ocw_range >> 3) & 0x7; } if (own_he_cap && !(own_he_cap->he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_ACK_EN)) flags |= STA_CTXT_HE_NIC_NOT_ACK_ENABLED; if (vif->bss_conf.nontransmitted) { flags |= STA_CTXT_HE_REF_BSSID_VALID; ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr, vif->bss_conf.transmitter_bssid); sta_ctxt_cmd.max_bssid_indicator = vif->bss_conf.bssid_indicator; sta_ctxt_cmd.bssid_index = vif->bss_conf.bssid_index; sta_ctxt_cmd.ema_ap = vif->bss_conf.ema_ap; sta_ctxt_cmd.profile_periodicity = vif->bss_conf.profile_periodicity; } sta_ctxt_cmd.flags = cpu_to_le32(flags); if (ver < 3) { /* fields before pkt_ext */ BUILD_BUG_ON(offsetof(typeof(sta_ctxt_cmd), pkt_ext) != offsetof(typeof(sta_ctxt_cmd_v2), pkt_ext)); memcpy(&sta_ctxt_cmd_v2, &sta_ctxt_cmd, offsetof(typeof(sta_ctxt_cmd), pkt_ext)); /* pkt_ext */ for (i = 0; i < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th); i++) { u8 bw; for (bw = 0; bw < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i]); bw++) { BUILD_BUG_ON(sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]) != sizeof(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw])); memcpy(&sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw], &sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw], sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw])); } } /* fields after pkt_ext */ BUILD_BUG_ON(sizeof(sta_ctxt_cmd) - offsetofend(typeof(sta_ctxt_cmd), pkt_ext) != sizeof(sta_ctxt_cmd_v2) - offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext)); memcpy((u8 *)&sta_ctxt_cmd_v2 + offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext), (u8 *)&sta_ctxt_cmd + offsetofend(typeof(sta_ctxt_cmd), pkt_ext), sizeof(sta_ctxt_cmd) - offsetofend(typeof(sta_ctxt_cmd), pkt_ext)); sta_ctxt_cmd_v2.reserved3 = 0; } if (iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, size, cmd)) IWL_ERR(mvm, "Failed to config FW to work HE!\n"); } static void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration_override) { u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; if (duration_override > duration) duration = duration_override; /* Try really hard to protect the session and hear a beacon * The new session protection command allows us to protect the * session for a much longer time since the firmware will internally * create two events: a 300TU one with a very high priority that * won't be fragmented which should be enough for 99% of the cases, * and another one (which we configure here to be 900TU long) which * will have a slightly lower priority, but more importantly, can be * fragmented so that it'll allow other activities to run. */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) iwl_mvm_schedule_session_protection(mvm, vif, 900, min_duration, false); else iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); } static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, - u32 changes) + u64 changes) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; /* * Re-calculate the tsf id, as the leader-follower relations depend * on the beacon interval, which was not known when the station * interface was added. */ if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); } /* Update MU EDCA params */ if (changes & BSS_CHANGED_QOS && mvmvif->associated && bss_conf->assoc && vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); /* * If we're not associated yet, take the (new) BSSID before associating * so the firmware knows. If we're already associated, then use the old * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC * branch for disassociation below. */ if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); if (ret) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); /* after sending it once, adopt mac80211 data */ memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); mvmvif->associated = bss_conf->assoc; if (changes & BSS_CHANGED_ASSOC) { if (bss_conf->assoc) { /* clear statistics to get clean beacon counter */ iwl_mvm_request_statistics(mvm, true); memset(&mvmvif->beacon_stats, 0, sizeof(mvmvif->beacon_stats)); /* add quota for this interface */ ret = iwl_mvm_update_quotas(mvm, true, NULL); if (ret) { IWL_ERR(mvm, "failed to update quotas\n"); return; } if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && !fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { /* * If we're restarting then the firmware will * obviously have lost synchronisation with * the AP. It will attempt to synchronise by * itself, but we can make it more reliable by * scheduling a session protection time event. * * The firmware needs to receive a beacon to * catch up with synchronisation, use 110% of * the beacon interval. * * Set a large maximum delay to allow for more * than a single interface. * * For new firmware versions, rely on the * firmware. This is relevant for DCM scenarios * only anyway. */ u32 dur = (11 * vif->bss_conf.beacon_int) / 10; iwl_mvm_protect_session(mvm, vif, dur, dur, 5 * dur, false); } else if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && !vif->bss_conf.dtim_period) { /* * If we're not restarting and still haven't * heard a beacon (dtim period unknown) then * make sure we still have enough minimum time * remaining in the time event, since the auth * might actually have taken quite a while * (especially for SAE) and so the remaining * time could be small without us having heard * a beacon yet. */ iwl_mvm_protect_assoc(mvm, vif, 0); } iwl_mvm_sf_update(mvm, vif, false); iwl_mvm_power_vif_assoc(mvm, vif); if (vif->p2p) { iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_PROT, IEEE80211_SMPS_DYNAMIC); } } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { iwl_mvm_mei_host_disassociated(mvm); /* * If update fails - SF might be running in associated * mode while disassociated - which is forbidden. */ ret = iwl_mvm_sf_update(mvm, vif, false); WARN_ONCE(ret && !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status), "Failed to update SF upon disassociation\n"); /* * If we get an assert during the connection (after the * station has been added, but before the vif is set * to associated), mac80211 will re-add the station and * then configure the vif. Since the vif is not * associated, we would remove the station here and * this would fail the recovery. */ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { /* * Remove AP station now that * the MAC is unassoc */ ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); if (ret) IWL_ERR(mvm, "failed to remove AP station\n"); mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; } /* remove quota for this interface */ ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) IWL_ERR(mvm, "failed to update quotas\n"); /* this will take the cleared BSSID from bss_conf */ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (ret) IWL_ERR(mvm, "failed to update MAC %pM (clear after unassoc)\n", vif->addr); } /* * The firmware tracks the MU-MIMO group on its own. * However, on HW restart we should restore this data. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) { ret = iwl_mvm_update_mu_groups(mvm, vif); if (ret) IWL_ERR(mvm, "failed to update VHT MU_MIMO groups\n"); } iwl_mvm_recalc_multicast(mvm); /* reset rssi values */ mvmvif->bf_data.ave_beacon_signal = 0; iwl_mvm_bt_coex_vif_change(mvm); iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, IEEE80211_SMPS_AUTOMATIC); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); } if (changes & BSS_CHANGED_BEACON_INFO) { /* * We received a beacon from the associated AP so * remove the session protection. */ iwl_mvm_stop_session_protection(mvm, vif); iwl_mvm_sf_update(mvm, vif, false); WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); } if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | /* * Send power command on every beacon change, * because we may have not enabled beacon abort yet. */ BSS_CHANGED_BEACON_INFO)) { ret = iwl_mvm_power_update_mac(mvm); if (ret) IWL_ERR(mvm, "failed to update power mode\n"); } if (changes & BSS_CHANGED_CQM) { IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); /* reset cqm events tracking */ mvmvif->bf_data.last_cqm_event = 0; if (mvmvif->bf_data.bf_enabled) { ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); if (ret) IWL_ERR(mvm, "failed to update CQM thresholds\n"); } } if (changes & BSS_CHANGED_BANDWIDTH) iwl_mvm_apply_fw_smps_request(vif); } static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret, i; mutex_lock(&mvm->mutex); /* Send the beacon template */ ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif); if (ret) goto out_unlock; /* * Re-calculate the tsf id, as the leader-follower relations depend on * the beacon interval, which was not known when the AP interface * was added. */ if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); mvmvif->ap_assoc_sta_count = 0; /* Add the mac context */ ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) goto out_unlock; /* Perform the binding */ ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) goto out_remove; /* * This is not very nice, but the simplest: * For older FWs adding the mcast sta before the bcast station may * cause assert 0x2b00. * This is fixed in later FW so make the order of removal depend on * the TLV */ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { ret = iwl_mvm_add_mcast_sta(mvm, vif); if (ret) goto out_unbind; /* * Send the bcast station. At this stage the TBTT and DTIM time * events are added and applied to the scheduler */ ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) { iwl_mvm_rm_mcast_sta(mvm, vif); goto out_unbind; } } else { /* * Send the bcast station. At this stage the TBTT and DTIM time * events are added and applied to the scheduler */ ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) goto out_unbind; ret = iwl_mvm_add_mcast_sta(mvm, vif); if (ret) { iwl_mvm_send_rm_bcast_sta(mvm, vif); goto out_unbind; } } /* must be set before quota calculations */ mvmvif->ap_ibss_active = true; /* send all the early keys to the device now */ for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { struct ieee80211_key_conf *key = mvmvif->ap_early_keys[i]; if (!key) continue; mvmvif->ap_early_keys[i] = NULL; ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key); if (ret) goto out_quota_failed; } if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { iwl_mvm_vif_set_low_latency(mvmvif, true, LOW_LATENCY_VIF_TYPE); iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id); } /* power updated needs to be done before quotas */ iwl_mvm_power_update_mac(mvm); ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) goto out_quota_failed; /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_bt_coex_vif_change(mvm); /* we don't support TDLS during DCM */ if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); iwl_mvm_ftm_restart_responder(mvm, vif); goto out_unlock; out_quota_failed: iwl_mvm_power_update_mac(mvm); mvmvif->ap_ibss_active = false; iwl_mvm_send_rm_bcast_sta(mvm, vif); iwl_mvm_rm_mcast_sta(mvm, vif); out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); out_remove: iwl_mvm_mac_ctxt_remove(mvm, vif); out_unlock: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); iwl_mvm_prepare_mac_removal(mvm, vif); mutex_lock(&mvm->mutex); /* Handle AP stop while in CSA */ if (rcu_access_pointer(mvm->csa_vif) == vif) { iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); RCU_INIT_POINTER(mvm->csa_vif, NULL); mvmvif->csa_countdown = false; } if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); mvm->csa_tx_block_bcn_timeout = 0; } mvmvif->ap_ibss_active = false; mvm->ap_last_beacon_gp2 = 0; if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { iwl_mvm_vif_set_low_latency(mvmvif, false, LOW_LATENCY_VIF_TYPE); iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id); } iwl_mvm_bt_coex_vif_change(mvm); /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_update_quotas(mvm, false, NULL); iwl_mvm_ftm_responder_clear(mvm, vif); /* * This is not very nice, but the simplest: * For older FWs removing the mcast sta before the bcast station may * cause assert 0x2b00. * This is fixed in later FW (which will stop beaconing when removing * bcast station). * So make the order of removal depend on the TLV */ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) iwl_mvm_rm_mcast_sta(mvm, vif); iwl_mvm_send_rm_bcast_sta(mvm, vif); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) iwl_mvm_rm_mcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); mutex_unlock(&mvm->mutex); } static void iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, - u32 changes) + u64 changes) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); /* Changes will be applied when the AP/IBSS is started */ if (!mvmvif->ap_ibss_active) return; if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) && iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL)) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); /* Need to send a new beacon template to the FW */ if (changes & BSS_CHANGED_BEACON && iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) IWL_WARN(mvm, "Failed updating beacon data\n"); if (changes & BSS_CHANGED_FTM_RESPONDER) { int ret = iwl_mvm_ftm_start_responder(mvm, vif); if (ret) IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n", ret); } } static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, - u32 changes) + u64 changes) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); switch (vif->type) { case NL80211_IFTYPE_STATION: iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); break; case NL80211_IFTYPE_MONITOR: if (changes & BSS_CHANGED_MU_GROUPS) iwl_mvm_update_mu_groups(mvm, vif); break; default: /* shouldn't happen */ WARN_ON_ONCE(1); } if (changes & BSS_CHANGED_TXPOWER) { IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d dBm\n", bss_conf->txpower); iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); } mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; if (hw_req->req.n_channels == 0 || hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) return -EINVAL; mutex_lock(&mvm->mutex); ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); /* Due to a race condition, it's possible that mac80211 asks * us to stop a hw_scan when it's already stopped. This can * happen, for instance, if we stopped the scan ourselves, * called ieee80211_scan_completed() and the userspace called * cancel scan scan before ieee80211_scan_work() could run. * To handle that, simply return if the scan is not running. */ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); mutex_unlock(&mvm->mutex); } static void iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* Called when we need to transmit (a) frame(s) from mac80211 */ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, tids, more_data, false); } static void iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, tids, more_data, true); } static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); unsigned long txqs = 0, tids = 0; int tid; /* * If we have TVQM then we get too high queue numbers - luckily * we really shouldn't get here with that because such hardware * should have firmware supporting buffer station offload. */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return; spin_lock_bh(&mvmsta->lock); for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) continue; __set_bit(tid_data->txq_id, &txqs); if (iwl_mvm_tid_queued(mvm, tid_data) == 0) continue; __set_bit(tid, &tids); } switch (cmd) { case STA_NOTIFY_SLEEP: for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) ieee80211_sta_set_buffered(sta, tid, true); if (txqs) iwl_trans_freeze_txq_timer(mvm->trans, txqs, true); /* * The fw updates the STA to be asleep. Tx packets on the Tx * queues to this station will not be transmitted. The fw will * send a Tx response with TX_STATUS_FAIL_DEST_PS. */ break; case STA_NOTIFY_AWAKE: if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA)) break; if (txqs) iwl_trans_freeze_txq_timer(mvm->trans, txqs, false); iwl_mvm_sta_modify_ps_wake(mvm, sta); break; default: break; } spin_unlock_bh(&mvmsta->lock); } static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { __iwl_mvm_mac_sta_notify(hw, cmd, sta); } void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE); if (WARN_ON(notif->sta_id >= mvm->fw->ucode_capa.num_stations)) return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); if (WARN_ON(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return; } mvmsta = iwl_mvm_sta_from_mac80211(sta); if (!mvmsta->vif || mvmsta->vif->type != NL80211_IFTYPE_AP) { rcu_read_unlock(); return; } if (mvmsta->sleeping != sleeping) { mvmsta->sleeping = sleeping; __iwl_mvm_mac_sta_notify(mvm->hw, sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, sta); ieee80211_sta_ps_transition(sta, sleeping); } if (sleeping) { switch (notif->type) { case IWL_MVM_PM_EVENT_AWAKE: case IWL_MVM_PM_EVENT_ASLEEP: break; case IWL_MVM_PM_EVENT_UAPSD: ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS); break; case IWL_MVM_PM_EVENT_PS_POLL: ieee80211_sta_pspoll(sta); break; default: break; } } rcu_read_unlock(); } static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); /* * This is called before mac80211 does RCU synchronisation, * so here we already invalidate our internal RCU-protected * station pointer. The rest of the code will thus no longer * be able to find the station this way, and we don't rely * on further RCU synchronisation after the sta_state() * callback deleted the station. */ mutex_lock(&mvm->mutex); if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id])) rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], ERR_PTR(-ENOENT)); mutex_unlock(&mvm->mutex); } static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const u8 *bssid) { int i; if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_tcm_mac *mdata; mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id]; ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); mdata->opened_rx_ba_sessions = false; } if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) return; if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } if (!vif->p2p && (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) { if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } } vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; } static void iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *peer_addr, enum nl80211_tdls_operation action) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tdls *tdls_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_TDLS); if (!trig) return; tdls_trig = (void *)trig->data; if (!(tdls_trig->action_bitmap & BIT(action))) return; if (tdls_trig->peer_mode && memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "TDLS event occurred, peer %pM, action %d", peer_addr, action); } struct iwl_mvm_he_obss_narrow_bw_ru_data { bool tolerated; }; static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, struct cfg80211_bss *bss, void *_data) { struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data; const struct cfg80211_bss_ies *ies; const struct element *elem; rcu_read_lock(); ies = rcu_dereference(bss->ies); elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data, ies->len); if (!elem || elem->datalen < 10 || !(elem->data[10] & WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) { data->tolerated = false; } rcu_read_unlock(); } static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_he_obss_narrow_bw_ru_data iter_data = { .tolerated = true, }; if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR)) { mvmvif->he_ru_2mhz_block = false; return; } cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef, iwl_mvm_check_he_obss_narrow_bw_ru_iter, &iter_data); /* * If there is at least one AP on radar channel that cannot * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU. */ mvmvif->he_ru_2mhz_block = !iter_data.tolerated; } static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct ieee80211_supported_band *sband; const struct ieee80211_sta_he_cap *he_cap; if (vif->type != NL80211_IFTYPE_STATION) return; if (!mvm->cca_40mhz_workaround) return; /* decrement and check that we reached zero */ mvm->cca_40mhz_workaround--; if (mvm->cca_40mhz_workaround) return; sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]; sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; he_cap = ieee80211_get_he_iftype_cap(sband, ieee80211_vif_type_p2p(vif)); if (he_cap) { /* we know that ours is writable */ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap; he->he_cap_elem.phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; } } static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_sta *mvm_sta) { #if IS_ENABLED(CONFIG_IWLMEI) struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mei_conn_info conn_info = { .ssid_len = vif->bss_conf.ssid_len, .channel = vif->bss_conf.chandef.chan->hw_value, }; if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) return; if (!mvm->mei_registered) return; switch (mvm_sta->pairwise_cipher) { case WLAN_CIPHER_SUITE_CCMP: conn_info.pairwise_cipher = IWL_MEI_CIPHER_CCMP; break; case WLAN_CIPHER_SUITE_GCMP: conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP; break; case WLAN_CIPHER_SUITE_GCMP_256: conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP_256; break; case 0: /* open profile */ break; default: /* cipher not supported, don't send anything to iwlmei */ return; } switch (mvmvif->rekey_data.akm) { case WLAN_AKM_SUITE_SAE & 0xff: conn_info.auth_mode = IWL_MEI_AKM_AUTH_SAE; break; case WLAN_AKM_SUITE_PSK & 0xff: conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA_PSK; break; case WLAN_AKM_SUITE_8021X & 0xff: conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA; break; case 0: /* open profile */ conn_info.auth_mode = IWL_MEI_AKM_AUTH_OPEN; break; default: /* auth method / AKM not supported */ /* TODO: All the FT vesions of these? */ return; } memcpy(conn_info.ssid, vif->bss_conf.ssid, vif->bss_conf.ssid_len); memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN); /* TODO: add support for collocated AP data */ iwl_mei_host_associated(&conn_info, NULL); #endif } static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); int ret; IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", sta->addr, old_state, new_state); /* this would be a mac80211 bug ... but don't crash */ if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL; /* * If we are in a STA removal flow and in DQA mode: * * This is after the sync_rcu part, so the queues have already been * flushed. No more TXs on their way in mac80211's path, and no more in * the queues. * Also, we won't be getting any new TX frames for this station. * What we might have are deferred TX frames that need to be taken care * of. * * Drop any still-queued deferred-frame before removing the STA, and * make sure the worker is no longer handling frames for this STA. */ if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { flush_work(&mvm->add_stream_wk); /* * No need to make sure deferred TX indication is off since the * worker will already remove it if it was on */ /* * Additionally, reset the 40 MHz capability if we disconnected * from the AP now. */ iwl_mvm_reset_cca_40mhz_workaround(mvm, vif); } mutex_lock(&mvm->mutex); /* track whether or not the station is associated */ mvm_sta->sta_state = new_state; if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { /* * Firmware bug - it'll crash if the beacon interval is less * than 16. We can't avoid connecting at all, so refuse the * station state change, this will cause mac80211 to abandon * attempts to connect to this AP, and eventually wpa_s will * blocklist the AP... */ if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.beacon_int < 16) { IWL_ERR(mvm, "AP %pM beacon interval is %d, refusing due to firmware bug!\n", sta->addr, vif->bss_conf.beacon_int); ret = -EINVAL; goto out_unlock; } if (vif->type == NL80211_IFTYPE_STATION) vif->bss_conf.he_support = sta->deflink.he_cap.has_he; if (sta->tdls && (vif->p2p || iwl_mvm_tdls_sta_count(mvm, NULL) == IWL_MVM_TDLS_STA_COUNT || iwl_mvm_phy_ctx_count(mvm) > 1)) { IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); ret = -EBUSY; goto out_unlock; } ret = iwl_mvm_add_sta(mvm, vif, sta); if (sta->tdls && ret == 0) { iwl_mvm_recalc_tdls_state(mvm, vif, true); iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_SETUP); } sta->max_rc_amsdu_len = 1; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_AUTH) { /* * EBS may be disabled due to previous failures reported by FW. * Reset EBS status here assuming environment has been changed. */ mvm->last_ebs_successful = true; iwl_mvm_check_uapsd(mvm, vif, sta->addr); ret = 0; } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) { if (vif->type == NL80211_IFTYPE_AP) { vif->bss_conf.he_support = sta->deflink.he_cap.has_he; mvmvif->ap_assoc_sta_count++; iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id); } else if (vif->type == NL80211_IFTYPE_STATION) { vif->bss_conf.he_support = sta->deflink.he_cap.has_he; mvmvif->he_ru_2mhz_block = false; if (sta->deflink.he_cap.has_he) iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, false); ret = iwl_mvm_update_sta(mvm, vif, sta); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { ret = 0; /* we don't support TDLS during DCM */ if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); if (sta->tdls) { iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_ENABLE_LINK); } else { /* enable beacon filtering */ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); mvmvif->authorized = 1; /* * Now that the station is authorized, i.e., keys were already * installed, need to indicate to the FW that * multicast data frames can be forwarded to the driver */ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); } iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, true); } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { /* once we move into assoc state, need to update rate scale to * disable using wide bandwidth */ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, false); if (!sta->tdls) { /* Multicast data frames are no longer allowed */ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); /* * Set this after the above iwl_mvm_mac_ctxt_changed() * to avoid sending high prio again for a little time. */ mvmvif->authorized = 0; /* disable beacon filtering */ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); WARN_ON(ret && !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)); } ret = 0; } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) { if (vif->type == NL80211_IFTYPE_AP) { mvmvif->ap_assoc_sta_count--; iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) iwl_mvm_stop_session_protection(mvm, vif); ret = 0; } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_NONE) { ret = 0; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) iwl_mvm_stop_session_protection(mvm, vif); ret = iwl_mvm_rm_sta(mvm, vif, sta); if (sta->tdls) { iwl_mvm_recalc_tdls_state(mvm, vif, false); iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_DISABLE_LINK); } if (unlikely(ret && test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status))) ret = 0; } else { ret = -EIO; } out_unlock: mutex_unlock(&mvm->mutex); if (sta->tdls && ret == 0) { if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID); else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID); } return ret; } static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mvm->rts_threshold = value; return 0; } static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_SUPP_RATES_CHANGED | IEEE80211_RC_NSS_CHANGED)) iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, true); if (vif->type == NL80211_IFTYPE_STATION && changed & IEEE80211_RC_NSS_CHANGED) iwl_mvm_sf_update(mvm, vif, false); } static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 ac, const struct ieee80211_tx_queue_params *params) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->queue_params[ac] = *params; /* * No need to update right away, we'll get BSS_CHANGED_QOS * The exception is P2P_DEVICE interface which needs immediate update. */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { int ret; mutex_lock(&mvm->mutex); ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); mutex_unlock(&mvm->mutex); return ret; } return 0; } static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *info) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); iwl_mvm_protect_assoc(mvm, vif, info->duration); mutex_unlock(&mvm->mutex); } static void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *info) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* for successful cases (auth/assoc), don't cancel session protection */ if (info->success) return; mutex_lock(&mvm->mutex); iwl_mvm_stop_session_protection(mvm, vif); mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); if (!vif->bss_conf.idle) { ret = -EBUSY; goto out; } ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); out: mutex_unlock(&mvm->mutex); return ret; } static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); /* Due to a race condition, it's possible that mac80211 asks * us to stop a sched_scan when it's already stopped. This * can happen, for instance, if we stopped the scan ourselves, * called ieee80211_sched_scan_stopped() and the userspace called * stop sched scan scan before ieee80211_sched_scan_stopped_work() * could run. To handle this, simply return if the scan is * not running. */ if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) { mutex_unlock(&mvm->mutex); return 0; } ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false); mutex_unlock(&mvm->mutex); iwl_mvm_wait_for_async_handlers(mvm); return ret; } static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvmsta = NULL; struct iwl_mvm_key_pn *ptk_pn; int keyidx = key->keyidx; int ret, i; u8 key_offset; if (sta) mvmsta = iwl_mvm_sta_from_mac80211(sta); switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: if (!mvm->trans->trans_cfg->gen2) { key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; } else if (vif->type == NL80211_IFTYPE_STATION) { key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE; } else { IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n"); return -EOPNOTSUPP; } break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (!iwl_mvm_has_new_tx_api(mvm)) key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: if (vif->type == NL80211_IFTYPE_STATION) break; if (iwl_mvm_has_new_tx_api(mvm)) return -EOPNOTSUPP; /* support HW crypto on TX */ return 0; default: return -EOPNOTSUPP; } switch (cmd) { case SET_KEY: if (keyidx == 6 || keyidx == 7) rcu_assign_pointer(mvmvif->bcn_prot.keys[keyidx - 6], key); if ((vif->type == NL80211_IFTYPE_ADHOC || vif->type == NL80211_IFTYPE_AP) && !sta) { /* * GTK on AP interface is a TX-only key, return 0; * on IBSS they're per-station and because we're lazy * we don't support them for RX, so do the same. * CMAC/GMAC in AP/IBSS modes must be done in software. */ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { ret = -EOPNOTSUPP; break; } if (key->cipher != WLAN_CIPHER_SUITE_GCMP && key->cipher != WLAN_CIPHER_SUITE_GCMP_256 && !iwl_mvm_has_new_tx_api(mvm)) { key->hw_key_idx = STA_KEY_IDX_INVALID; ret = 0; break; } if (!mvmvif->ap_ibss_active) { for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { if (!mvmvif->ap_early_keys[i]) { mvmvif->ap_early_keys[i] = key; break; } } if (i >= ARRAY_SIZE(mvmvif->ap_early_keys)) ret = -ENOSPC; else ret = 0; break; } } /* During FW restart, in order to restore the state as it was, * don't try to reprogram keys we previously failed for. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && key->hw_key_idx == STA_KEY_IDX_INVALID) { IWL_DEBUG_MAC80211(mvm, "skip invalid idx key programming during restart\n"); ret = 0; break; } if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && mvmsta && iwl_mvm_has_new_rx_api(mvm) && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && (key->cipher == WLAN_CIPHER_SUITE_CCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { struct ieee80211_key_seq seq; int tid, q; WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); ptk_pn = kzalloc(struct_size(ptk_pn, q, mvm->trans->num_rx_queues), GFP_KERNEL); if (!ptk_pn) { ret = -ENOMEM; break; } for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { ieee80211_get_key_rx_seq(key, tid, &seq); for (q = 0; q < mvm->trans->num_rx_queues; q++) memcpy(ptk_pn->q[q].pn[tid], seq.ccmp.pn, IEEE80211_CCMP_PN_LEN); } rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn); } /* in HW restart reuse the index, otherwise request a new one */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) key_offset = key->hw_key_idx; else key_offset = STA_KEY_IDX_INVALID; if (mvmsta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE) mvmsta->pairwise_cipher = key->cipher; IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); if (ret) { IWL_WARN(mvm, "set key failed\n"); key->hw_key_idx = STA_KEY_IDX_INVALID; /* * can't add key for RX, but we don't need it * in the device for TX so still return 0, * unless we have new TX API where we cannot * put key material into the TX_CMD */ if (iwl_mvm_has_new_tx_api(mvm)) ret = -EOPNOTSUPP; else ret = 0; } break; case DISABLE_KEY: if (keyidx == 6 || keyidx == 7) RCU_INIT_POINTER(mvmvif->bcn_prot.keys[keyidx - 6], NULL); ret = -ENOENT; for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { if (mvmvif->ap_early_keys[i] == key) { mvmvif->ap_early_keys[i] = NULL; ret = 0; } } /* found in pending list - don't do anything else */ if (ret == 0) break; if (key->hw_key_idx == STA_KEY_IDX_INVALID) { ret = 0; break; } if (mvmsta && iwl_mvm_has_new_rx_api(mvm) && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && (key->cipher == WLAN_CIPHER_SUITE_CCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { ptk_pn = rcu_dereference_protected( mvmsta->ptk_pn[keyidx], lockdep_is_held(&mvm->mutex)); RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL); if (ptk_pn) kfree_rcu(ptk_pn, rcu_head); } IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); break; default: ret = -EINVAL; } return ret; } static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key); mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) return; iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key); } static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_hs20_roc_res *resp; int resp_len = iwl_rx_packet_payload_len(pkt); struct iwl_mvm_time_event_data *te_data = data; if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD)) return true; if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n"); return true; } resp = (void *)pkt->data; IWL_DEBUG_TE(mvm, "Aux ROC: Received response from ucode: status=%d uid=%d\n", resp->status, resp->event_unique_id); te_data->uid = le32_to_cpu(resp->event_unique_id); IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", te_data->uid); spin_lock_bh(&mvm->time_event_lock); list_add_tail(&te_data->list, &mvm->aux_roc_te_list); spin_unlock_bh(&mvm->time_event_lock); return true; } #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100) #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200) #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, struct ieee80211_channel *channel, struct ieee80211_vif *vif, int duration) { int res; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; static const u16 time_event_response[] = { HOT_SPOT_CMD }; struct iwl_notification_wait wait_time_event; u32 dtim_interval = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; u32 req_dur, delay; struct iwl_hs20_roc_req aux_roc_req = { .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), }; struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, &aux_roc_req.channel_info); u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm); /* Set the channel info data */ iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value, iwl_mvm_phy_band_from_nl80211(channel->band), PHY_VHT_CHANNEL_MODE20, 0); /* Set the time and duration */ tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm)); delay = AUX_ROC_MIN_DELAY; req_dur = MSEC_TO_TU(duration); /* * If we are associated we want the delay time to be at least one * dtim interval so that the FW can wait until after the DTIM and * then start the time event, this will potentially allow us to * remain off-channel for the max duration. * Since we want to use almost a whole dtim interval we would also * like the delay to be for 2-3 dtim intervals, in case there are * other time events with higher priority. */ if (vif->bss_conf.assoc) { delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); /* We cannot remain off-channel longer than the DTIM interval */ if (dtim_interval <= req_dur) { req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER; if (req_dur <= AUX_ROC_MIN_DURATION) req_dur = dtim_interval - AUX_ROC_MIN_SAFETY_BUFFER; } } tail->duration = cpu_to_le32(req_dur); tail->apply_time_max_delay = cpu_to_le32(delay); IWL_DEBUG_TE(mvm, "ROC: Requesting to remain on channel %u for %ums\n", channel->hw_value, req_dur); IWL_DEBUG_TE(mvm, "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", duration, delay, dtim_interval); /* Set the node address */ memcpy(tail->node_addr, vif->addr, ETH_ALEN); lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); if (WARN_ON(te_data->id == HOT_SPOT_CMD)) { spin_unlock_bh(&mvm->time_event_lock); return -EIO; } te_data->vif = vif; te_data->duration = duration; te_data->id = HOT_SPOT_CMD; spin_unlock_bh(&mvm->time_event_lock); /* * Use a notification wait, which really just processes the * command response and doesn't wait for anything, in order * to be able to process the response and get the UID inside * the RX path. Using CMD_WANT_SKB doesn't work because it * stores the buffer and then wakes up this thread, by which * time another notification (that the time event started) * might already be processed unsuccessfully. */ iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, time_event_response, ARRAY_SIZE(time_event_response), iwl_mvm_rx_aux_roc, te_data); res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len, &aux_roc_req); if (res) { IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res); iwl_remove_notification(&mvm->notif_wait, &wait_time_event); goto out_clear_te; } /* No need to wait for anything, so just pass 1 (0 isn't valid) */ res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); /* should never fail */ WARN_ON_ONCE(res); if (res) { out_clear_te: spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, te_data); spin_unlock_bh(&mvm->time_event_lock); } return res; } static int iwl_mvm_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *channel, int duration, enum ieee80211_roc_type type) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct cfg80211_chan_def chandef; struct iwl_mvm_phy_ctxt *phy_ctxt; bool band_change_removal; int ret, i; IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, duration, type); /* * Flush the done work, just in case it's still pending, so that * the work it does can complete and we can accept new frames. */ flush_work(&mvm->roc_done_wk); mutex_lock(&mvm->mutex); switch (vif->type) { case NL80211_IFTYPE_STATION: if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { /* Use aux roc framework (HS20) */ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) { u32 lmac_id; lmac_id = iwl_mvm_get_lmac_id(mvm->fw, channel->band); ret = iwl_mvm_add_aux_sta(mvm, lmac_id); if (WARN(ret, "Failed to allocate aux station")) goto out_unlock; } ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration); goto out_unlock; } IWL_ERR(mvm, "hotspot not supported\n"); ret = -EINVAL; goto out_unlock; case NL80211_IFTYPE_P2P_DEVICE: /* handle below */ break; default: IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); ret = -EINVAL; goto out_unlock; } for (i = 0; i < NUM_PHY_CTX; i++) { phy_ctxt = &mvm->phy_ctxts[i]; if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) continue; if (phy_ctxt->ref && channel == phy_ctxt->channel) { /* * Unbind the P2P_DEVICE from the current PHY context, * and if the PHY context is not used remove it. */ ret = iwl_mvm_binding_remove_vif(mvm, vif); if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) goto out_unlock; iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); /* Bind the P2P_DEVICE to the current PHY Context */ mvmvif->phy_ctxt = phy_ctxt; ret = iwl_mvm_binding_add_vif(mvm, vif); if (WARN(ret, "Failed binding P2P_DEVICE\n")) goto out_unlock; iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); goto schedule_time_event; } } /* Need to update the PHY context only if the ROC channel changed */ if (channel == mvmvif->phy_ctxt->channel) goto schedule_time_event; cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); /* * Check if the remain-on-channel is on a different band and that * requires context removal, see iwl_mvm_phy_ctxt_changed(). If * so, we'll need to release and then re-configure here, since we * must not remove a PHY context that's part of a binding. */ band_change_removal = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) && mvmvif->phy_ctxt->channel->band != chandef.chan->band; if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) { /* * Change the PHY context configuration as it is currently * referenced only by the P2P Device MAC (and we can modify it) */ ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, &chandef, 1, 1); if (ret) goto out_unlock; } else { /* * The PHY context is shared with other MACs (or we're trying to * switch bands), so remove the P2P Device from the binding, * allocate an new PHY context and create a new binding. */ phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); if (!phy_ctxt) { ret = -ENOSPC; goto out_unlock; } ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, 1, 1); if (ret) { IWL_ERR(mvm, "Failed to change PHY context\n"); goto out_unlock; } /* Unbind the P2P_DEVICE from the current PHY context */ ret = iwl_mvm_binding_remove_vif(mvm, vif); if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) goto out_unlock; iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); /* Bind the P2P_DEVICE to the new allocated PHY context */ mvmvif->phy_ctxt = phy_ctxt; ret = iwl_mvm_binding_add_vif(mvm, vif); if (WARN(ret, "Failed binding P2P_DEVICE\n")) goto out_unlock; iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); } schedule_time_event: /* Schedule the time events */ ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); out_unlock: mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); return ret; } static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); IWL_DEBUG_MAC80211(mvm, "enter\n"); mutex_lock(&mvm->mutex); iwl_mvm_stop_roc(mvm, vif); mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); return 0; } struct iwl_mvm_ftm_responder_iter_data { bool responder; struct ieee80211_chanctx_conf *ctx; }; static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_ftm_responder_iter_data *data = _data; if (rcu_access_pointer(vif->chanctx_conf) == data->ctx && vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params) data->responder = true; } static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm_ftm_responder_iter_data data = { .responder = false, .ctx = ctx, }; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_ftm_responder_chanctx_iter, &data); return data.responder; } static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt; bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; int ret; lockdep_assert_held(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "Add channel context\n"); phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); if (!phy_ctxt) { ret = -ENOSPC; goto out; } ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, ctx->rx_chains_static, ctx->rx_chains_dynamic); if (ret) { IWL_ERR(mvm, "Failed to add PHY context\n"); goto out; } iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); *phy_ctxt_id = phy_ctxt->id; out: return ret; } static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = __iwl_mvm_add_chanctx(mvm, ctx); mutex_unlock(&mvm->mutex); return ret; } static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; lockdep_assert_held(&mvm->mutex); iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); } static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); __iwl_mvm_remove_chanctx(mvm, ctx); mutex_unlock(&mvm->mutex); } static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; if (WARN_ONCE((phy_ctxt->ref > 1) && (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | IEEE80211_CHANCTX_CHANGE_RX_CHAINS | IEEE80211_CHANCTX_CHANGE_RADAR | IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)), "Cannot change PHY. Ref=%d, changed=0x%X\n", phy_ctxt->ref, changed)) return; mutex_lock(&mvm->mutex); /* we are only changing the min_width, may be a noop */ if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) { if (phy_ctxt->width == def->width) goto out_unlock; /* we are just toggling between 20_NOHT and 20 */ if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 && def->width <= NL80211_CHAN_WIDTH_20) goto out_unlock; } iwl_mvm_bt_coex_vif_change(mvm); iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, ctx->rx_chains_static, ctx->rx_chains_dynamic); out_unlock: mutex_unlock(&mvm->mutex); } static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx, bool switching_chanctx) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); mvmvif->phy_ctxt = phy_ctxt; switch (vif->type) { case NL80211_IFTYPE_AP: /* only needed if we're switching chanctx (i.e. during CSA) */ if (switching_chanctx) { mvmvif->ap_ibss_active = true; break; } fallthrough; case NL80211_IFTYPE_ADHOC: /* * The AP binding flow is handled as part of the start_ap flow * (in bss_info_changed), similarly for IBSS. */ ret = 0; goto out; case NL80211_IFTYPE_STATION: mvmvif->csa_bcn_pending = false; break; case NL80211_IFTYPE_MONITOR: /* always disable PS when a monitor interface is active */ mvmvif->ps_disabled = true; break; default: ret = -EINVAL; goto out; } ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) goto out; /* * Power state must be updated before quotas, * otherwise fw will complain. */ iwl_mvm_power_update_mac(mvm); /* Setting the quota at this stage is only required for monitor * interfaces. For the other types, the bss_info changed flow * will handle quota settings. */ if (vif->type == NL80211_IFTYPE_MONITOR) { mvmvif->monitor_active = true; ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) goto out_remove_binding; ret = iwl_mvm_add_snif_sta(mvm, vif); if (ret) goto out_remove_binding; } /* Handle binding during CSA */ if (vif->type == NL80211_IFTYPE_AP) { iwl_mvm_update_quotas(mvm, false, NULL); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { mvmvif->csa_bcn_pending = true; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { u32 duration = 3 * vif->bss_conf.beacon_int; /* Protect the session to make sure we hear the first * beacon on the new channel. */ iwl_mvm_protect_session(mvm, vif, duration, duration, vif->bss_conf.beacon_int / 2, true); } iwl_mvm_update_quotas(mvm, false, NULL); } goto out; out_remove_binding: iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_power_update_mac(mvm); out: if (ret) mvmvif->phy_ctxt = NULL; return ret; } static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false); mutex_unlock(&mvm->mutex); return ret; } static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx, bool switching_chanctx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_vif *disabled_vif = NULL; lockdep_assert_held(&mvm->mutex); iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); switch (vif->type) { case NL80211_IFTYPE_ADHOC: goto out; case NL80211_IFTYPE_MONITOR: mvmvif->monitor_active = false; mvmvif->ps_disabled = false; iwl_mvm_rm_snif_sta(mvm, vif); break; case NL80211_IFTYPE_AP: /* This part is triggered only during CSA */ if (!switching_chanctx || !mvmvif->ap_ibss_active) goto out; mvmvif->csa_countdown = false; /* Set CS bit on all the stations */ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); /* Save blocked iface, the timeout is set on the next beacon */ rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif); mvmvif->ap_ibss_active = false; break; case NL80211_IFTYPE_STATION: if (!switching_chanctx) break; disabled_vif = vif; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); break; default: break; } iwl_mvm_update_quotas(mvm, false, disabled_vif); iwl_mvm_binding_remove_vif(mvm, vif); out: if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) && switching_chanctx) return; mvmvif->phy_ctxt = NULL; iwl_mvm_power_update_mac(mvm); } static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false); mutex_unlock(&mvm->mutex); } static int iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, struct ieee80211_vif_chanctx_switch *vifs) { int ret; mutex_lock(&mvm->mutex); __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx); if (ret) { IWL_ERR(mvm, "failed to add new_ctx during channel switch\n"); goto out_reassign; } ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, true); if (ret) { IWL_ERR(mvm, "failed to assign new_ctx during channel switch\n"); goto out_remove; } /* we don't support TDLS during DCM - can be caused by channel switch */ if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); goto out; out_remove: __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); out_reassign: if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) { IWL_ERR(mvm, "failed to add old_ctx back after failure.\n"); goto out_restart; } if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true)) { IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); goto out_restart; } goto out; out_restart: /* things keep failing, better restart the hw */ iwl_mvm_nic_restart(mvm, false); out: mutex_unlock(&mvm->mutex); return ret; } static int iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, struct ieee80211_vif_chanctx_switch *vifs) { int ret; mutex_lock(&mvm->mutex); __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, true); if (ret) { IWL_ERR(mvm, "failed to assign new_ctx during channel switch\n"); goto out_reassign; } goto out; out_reassign: if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true)) { IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); goto out_restart; } goto out; out_restart: /* things keep failing, better restart the hw */ iwl_mvm_nic_restart(mvm, false); out: mutex_unlock(&mvm->mutex); return ret; } static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs, enum ieee80211_chanctx_switch_mode mode) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; /* we only support a single-vif right now */ if (n_vifs > 1) return -EOPNOTSUPP; switch (mode) { case CHANCTX_SWMODE_SWAP_CONTEXTS: ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs); break; case CHANCTX_SWMODE_REASSIGN_VIF: ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs); break; default: ret = -EOPNOTSUPP; break; } return ret; } static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); return mvm->ibss_manager; } static int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); if (!mvm_sta || !mvm_sta->vif) { IWL_ERR(mvm, "Station is not associated to a vif\n"); return -EINVAL; } return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); } #ifdef CONFIG_NL80211_TESTMODE static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = { [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 }, [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 }, [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 }, }; static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, void *data, int len) { struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1]; int err; u32 noa_duration; err = nla_parse_deprecated(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy, NULL); if (err) return err; if (!tb[IWL_MVM_TM_ATTR_CMD]) return -EINVAL; switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) { case IWL_MVM_TM_CMD_SET_NOA: if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p || !vif->bss_conf.enable_beacon || !tb[IWL_MVM_TM_ATTR_NOA_DURATION]) return -EINVAL; noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]); if (noa_duration >= vif->bss_conf.beacon_int) return -EINVAL; mvm->noa_duration = noa_duration; mvm->noa_vif = vif; return iwl_mvm_update_quotas(mvm, true, NULL); case IWL_MVM_TM_CMD_SET_BEACON_FILTER: /* must be associated client vif - ignore authorized */ if (!vif || vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || !vif->bss_conf.dtim_period || !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]) return -EINVAL; if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) return iwl_mvm_enable_beacon_filter(mvm, vif, 0); return iwl_mvm_disable_beacon_filter(mvm, vif, 0); } return -EOPNOTSUPP; } static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *data, int len) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int err; mutex_lock(&mvm->mutex); err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); mutex_unlock(&mvm->mutex); return err; } #endif static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { /* By implementing this operation, we prevent mac80211 from * starting its own channel switch timer, so that we can call * ieee80211_chswitch_done() ourselves at the right time * (which is when the absence time event starts). */ IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "dummy channel switch op\n"); } static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_chan_switch_te_cmd cmd = { .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .tsf = cpu_to_le32(chsw->timestamp), .cs_count = chsw->count, .cs_mode = chsw->block_tx, }; lockdep_assert_held(&mvm->mutex); if (chsw->delay) cmd.cs_delayed_bcn_count = DIV_ROUND_UP(chsw->delay, vif->bss_conf.beacon_int); return iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd); } static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u32 apply_time; /* Schedule the time event to a bit before beacon 1, * to make sure we're in the new channel when the * GO/AP arrives. In case count <= 1 immediately schedule the * TE (this might result with some packet loss or connection * loss). */ if (chsw->count <= 1) apply_time = 0; else apply_time = chsw->device_timestamp + ((vif->bss_conf.beacon_int * (chsw->count - 1) - IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); if (chsw->block_tx) iwl_mvm_csa_client_absent(mvm, vif); if (mvmvif->bf_data.bf_enabled) { int ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); if (ret) return ret; } iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, apply_time); return 0; } #define IWL_MAX_CSA_BLOCK_TX 1500 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_vif *csa_vif; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; mutex_lock(&mvm->mutex); mvmvif->csa_failed = false; IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", chsw->chandef.center_freq1); iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_CHANNEL_SWITCH); switch (vif->type) { case NL80211_IFTYPE_AP: csa_vif = rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex)); if (WARN_ONCE(csa_vif && csa_vif->csa_active, "Another CSA is already in progress")) { ret = -EBUSY; goto out_unlock; } /* we still didn't unblock tx. prevent new CS meanwhile */ if (rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex))) { ret = -EBUSY; goto out_unlock; } rcu_assign_pointer(mvm->csa_vif, vif); if (WARN_ONCE(mvmvif->csa_countdown, "Previous CSA countdown didn't complete")) { ret = -EBUSY; goto out_unlock; } mvmvif->csa_target_freq = chsw->chandef.chan->center_freq; break; case NL80211_IFTYPE_STATION: /* * In the new flow FW is in charge of timing the switch so there * is no need for all of this */ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, 0)) break; /* * We haven't configured the firmware to be associated yet since * we don't know the dtim period. In this case, the firmware can't * track the beacons. */ if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period) { ret = -EBUSY; goto out_unlock; } if (chsw->delay > IWL_MAX_CSA_BLOCK_TX) schedule_delayed_work(&mvmvif->csa_work, 0); if (chsw->block_tx) { /* * In case of undetermined / long time with immediate * quiet monitor status to gracefully disconnect */ if (!chsw->count || chsw->count * vif->bss_conf.beacon_int > IWL_MAX_CSA_BLOCK_TX) schedule_delayed_work(&mvmvif->csa_work, msecs_to_jiffies(IWL_MAX_CSA_BLOCK_TX)); } if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { ret = iwl_mvm_old_pre_chan_sw_sta(mvm, vif, chsw); if (ret) goto out_unlock; } else { iwl_mvm_schedule_client_csa(mvm, vif, chsw); } mvmvif->csa_count = chsw->count; mvmvif->csa_misbehave = false; break; default: break; } mvmvif->ps_disabled = true; ret = iwl_mvm_power_update_ps(mvm); if (ret) goto out_unlock; /* we won't be on this channel any longer */ iwl_mvm_teardown_tdls_peers(mvm); out_unlock: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_chan_switch_te_cmd cmd = { .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY), .tsf = cpu_to_le32(chsw->timestamp), .cs_count = chsw->count, .cs_mode = chsw->block_tx, }; /* * In the new flow FW is in charge of timing the switch so there is no * need for all of this */ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, 0)) return; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY)) return; IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d count = %d (old %d) mode = %d\n", mvmvif->id, chsw->count, mvmvif->csa_count, chsw->block_tx); if (chsw->count >= mvmvif->csa_count && chsw->block_tx) { if (mvmvif->csa_misbehave) { /* Second time, give up on this AP*/ iwl_mvm_abort_channel_switch(hw, vif); ieee80211_chswitch_done(vif, false); mvmvif->csa_misbehave = false; return; } mvmvif->csa_misbehave = true; } mvmvif->csa_count = chsw->count; mutex_lock(&mvm->mutex); if (mvmvif->csa_failed) goto out_unlock; WARN_ON(iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd)); out_unlock: mutex_unlock(&mvm->mutex); } static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) { int i; if (!iwl_mvm_has_new_tx_api(mvm)) { if (drop) { mutex_lock(&mvm->mutex); iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm) & queues); mutex_unlock(&mvm->mutex); } else { iwl_trans_wait_tx_queues_empty(mvm->trans, queues); } return; } mutex_lock(&mvm->mutex); for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { struct ieee80211_sta *sta; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) continue; if (drop) iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF); else iwl_mvm_wait_sta_queues_empty(mvm, iwl_mvm_sta_from_mac80211(sta)); } mutex_unlock(&mvm->mutex); } static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif; struct iwl_mvm_sta *mvmsta; struct ieee80211_sta *sta; int i; u32 msk = 0; if (!vif) { iwl_mvm_flush_no_vif(mvm, queues, drop); return; } if (vif->type != NL80211_IFTYPE_STATION) return; /* Make sure we're done with the deferred traffic before flushing */ flush_work(&mvm->add_stream_wk); mutex_lock(&mvm->mutex); mvmvif = iwl_mvm_vif_from_mac80211(vif); /* flush the AP-station and all TDLS peers */ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) continue; mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->vif != vif) continue; /* make sure only TDLS peers or the AP are flushed */ WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); if (drop) { if (iwl_mvm_flush_sta(mvm, mvmsta, false)) IWL_ERR(mvm, "flush request fail\n"); } else { msk |= mvmsta->tfd_queue_msk; if (iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_wait_sta_queues_empty(mvm, mvmsta); } } mutex_unlock(&mvm->mutex); /* this can take a while, and we may need/want other operations * to succeed while doing this, so do it without the mutex held */ if (!drop && !iwl_mvm_has_new_tx_api(mvm)) iwl_trans_wait_tx_queues_empty(mvm->trans, msk); } static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; memset(survey, 0, sizeof(*survey)); /* only support global statistics right now */ if (idx != 0) return -ENOENT; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) return -ENOENT; mutex_lock(&mvm->mutex); if (iwl_mvm_firmware_running(mvm)) { ret = iwl_mvm_request_statistics(mvm, false); if (ret) goto out; } survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_TX | SURVEY_INFO_TIME_SCAN; survey->time = mvm->accu_radio_stats.on_time_rf + mvm->radio_stats.on_time_rf; do_div(survey->time, USEC_PER_MSEC); survey->time_rx = mvm->accu_radio_stats.rx_time + mvm->radio_stats.rx_time; do_div(survey->time_rx, USEC_PER_MSEC); survey->time_tx = mvm->accu_radio_stats.tx_time + mvm->radio_stats.tx_time; do_div(survey->time_tx, USEC_PER_MSEC); survey->time_scan = mvm->accu_radio_stats.on_time_scan + mvm->radio_stats.on_time_scan; do_div(survey->time_scan, USEC_PER_MSEC); ret = 0; out: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) { u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: rinfo->bw = RATE_INFO_BW_20; break; case RATE_MCS_CHAN_WIDTH_40: rinfo->bw = RATE_INFO_BW_40; break; case RATE_MCS_CHAN_WIDTH_80: rinfo->bw = RATE_INFO_BW_80; break; case RATE_MCS_CHAN_WIDTH_160: rinfo->bw = RATE_INFO_BW_160; break; } if (format == RATE_MCS_CCK_MSK || format == RATE_MCS_LEGACY_OFDM_MSK) { int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK); /* add the offset needed to get to the legacy ofdm indices */ if (format == RATE_MCS_LEGACY_OFDM_MSK) rate += IWL_FIRST_OFDM_RATE; switch (rate) { case IWL_RATE_1M_INDEX: rinfo->legacy = 10; break; case IWL_RATE_2M_INDEX: rinfo->legacy = 20; break; case IWL_RATE_5M_INDEX: rinfo->legacy = 55; break; case IWL_RATE_11M_INDEX: rinfo->legacy = 110; break; case IWL_RATE_6M_INDEX: rinfo->legacy = 60; break; case IWL_RATE_9M_INDEX: rinfo->legacy = 90; break; case IWL_RATE_12M_INDEX: rinfo->legacy = 120; break; case IWL_RATE_18M_INDEX: rinfo->legacy = 180; break; case IWL_RATE_24M_INDEX: rinfo->legacy = 240; break; case IWL_RATE_36M_INDEX: rinfo->legacy = 360; break; case IWL_RATE_48M_INDEX: rinfo->legacy = 480; break; case IWL_RATE_54M_INDEX: rinfo->legacy = 540; } return; } rinfo->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1; rinfo->mcs = format == RATE_MCS_HT_MSK ? RATE_HT_MCS_INDEX(rate_n_flags) : u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK); if (format == RATE_MCS_HE_MSK) { u32 gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK); rinfo->flags |= RATE_INFO_FLAGS_HE_MCS; if (rate_n_flags & RATE_MCS_HE_106T_MSK) { rinfo->bw = RATE_INFO_BW_HE_RU; rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106; } switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) { case RATE_MCS_HE_TYPE_SU: case RATE_MCS_HE_TYPE_EXT_SU: if (gi_ltf == 0 || gi_ltf == 1) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; else if (gi_ltf == 2) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else if (gi_ltf == 3) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; else rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; break; case RATE_MCS_HE_TYPE_MU: if (gi_ltf == 0 || gi_ltf == 1) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; else if (gi_ltf == 2) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; break; case RATE_MCS_HE_TYPE_TRIG: if (gi_ltf == 0 || gi_ltf == 1) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; break; } if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK) rinfo->he_dcm = 1; return; } if (rate_n_flags & RATE_MCS_SGI_MSK) rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; if (format == RATE_MCS_HT_MSK) { rinfo->flags |= RATE_INFO_FLAGS_MCS; } else if (format == RATE_MCS_VHT_MSK) { rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; } } static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct station_info *sinfo) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->avg_energy) { sinfo->signal_avg = -(s8)mvmsta->avg_energy; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } if (iwl_mvm_has_tlc_offload(mvm)) { struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } /* if beacon filtering isn't on mac80211 does it anyway */ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) return; if (!vif->bss_conf.assoc) return; mutex_lock(&mvm->mutex); if (mvmvif->ap_sta_id != mvmsta->sta_id) goto unlock; if (iwl_mvm_request_statistics(mvm, false)) goto unlock; sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + mvmvif->beacon_stats.accu_num_beacons; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); if (mvmvif->beacon_stats.avg_signal) { /* firmware only reports a value after RXing a few beacons */ sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); } unlock: mutex_unlock(&mvm->mutex); } static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_mlme_event *mlme) { if ((mlme->data == ASSOC_EVENT || mlme->data == AUTH_EVENT) && (mlme->status == MLME_DENIED || mlme->status == MLME_TIMEOUT)) { iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_ASSOC_FAILED, NULL); return; } if (mlme->data == DEAUTH_RX_EVENT || mlme->data == DEAUTH_TX_EVENT) { iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_DEASSOC, NULL); return; } } static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_event *event) { #define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ do { \ if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ break; \ iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \ } while (0) struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; if (iwl_trans_dbg_ini_valid(mvm->trans)) { iwl_mvm_event_mlme_callback_ini(mvm, vif, &event->u.mlme); return; } trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MLME); if (!trig) return; trig_mlme = (void *)trig->data; if (event->u.mlme.data == ASSOC_EVENT) { if (event->u.mlme.status == MLME_DENIED) CHECK_MLME_TRIGGER(stop_assoc_denied, "DENIED ASSOC: reason %d", event->u.mlme.reason); else if (event->u.mlme.status == MLME_TIMEOUT) CHECK_MLME_TRIGGER(stop_assoc_timeout, "ASSOC TIMEOUT"); } else if (event->u.mlme.data == AUTH_EVENT) { if (event->u.mlme.status == MLME_DENIED) CHECK_MLME_TRIGGER(stop_auth_denied, "DENIED AUTH: reason %d", event->u.mlme.reason); else if (event->u.mlme.status == MLME_TIMEOUT) CHECK_MLME_TRIGGER(stop_auth_timeout, "AUTH TIMEOUT"); } else if (event->u.mlme.data == DEAUTH_RX_EVENT) { CHECK_MLME_TRIGGER(stop_rx_deauth, "DEAUTH RX %d", event->u.mlme.reason); } else if (event->u.mlme.data == DEAUTH_TX_EVENT) { CHECK_MLME_TRIGGER(stop_tx_deauth, "DEAUTH TX %d", event->u.mlme.reason); } #undef CHECK_MLME_TRIGGER } static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_event *event) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_BA); if (!trig) return; ba_trig = (void *)trig->data; if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "BAR received from %pM, tid %d, ssn %d", event->u.ba.sta->addr, event->u.ba.tid, event->u.ba.ssn); } static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct ieee80211_event *event) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); switch (event->type) { case MLME_EVENT: iwl_mvm_event_mlme_callback(mvm, vif, event); break; case BAR_RX_EVENT: iwl_mvm_event_bar_rx_callback(mvm, vif, event); break; case BA_FRAME_TIMEOUT: iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta, event->u.ba.tid); break; default: break; } } void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, enum iwl_mvm_rxq_notif_type type, bool sync, const void *data, u32 size) { struct { struct iwl_rxq_sync_cmd cmd; struct iwl_mvm_internal_rxq_notif notif; } __packed cmd = { .cmd.rxq_mask = cpu_to_le32(BIT(mvm->trans->num_rx_queues) - 1), .cmd.count = cpu_to_le32(sizeof(struct iwl_mvm_internal_rxq_notif) + size), .notif.type = type, .notif.sync = sync, }; struct iwl_host_cmd hcmd = { .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD), .data[0] = &cmd, .len[0] = sizeof(cmd), .data[1] = data, .len[1] = size, .flags = sync ? 0 : CMD_ASYNC, }; int ret; /* size must be a multiple of DWORD */ if (WARN_ON(cmd.cmd.count & cpu_to_le32(3))) return; if (!iwl_mvm_has_new_rx_api(mvm)) return; if (sync) { cmd.notif.cookie = mvm->queue_sync_cookie; mvm->queue_sync_state = (1 << mvm->trans->num_rx_queues) - 1; } ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) { IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); goto out; } if (sync) { lockdep_assert_held(&mvm->mutex); ret = wait_event_timeout(mvm->rx_sync_waitq, READ_ONCE(mvm->queue_sync_state) == 0 || iwl_mvm_is_radio_killed(mvm), HZ); WARN_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm), "queue sync: failed to sync, state is 0x%lx\n", mvm->queue_sync_state); } out: if (sync) { mvm->queue_sync_state = 0; mvm->queue_sync_cookie++; } } static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0); mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_ftm_responder_stats *stats) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->p2p || vif->type != NL80211_IFTYPE_AP || !mvmvif->ap_ibss_active || !vif->bss_conf.ftm_responder) return -EINVAL; mutex_lock(&mvm->mutex); *stats = mvm->ftm_resp_stats; mutex_unlock(&mvm->mutex); stats->filled = BIT(NL80211_FTM_STATS_SUCCESS_NUM) | BIT(NL80211_FTM_STATS_PARTIAL_NUM) | BIT(NL80211_FTM_STATS_FAILED_NUM) | BIT(NL80211_FTM_STATS_ASAP_NUM) | BIT(NL80211_FTM_STATS_NON_ASAP_NUM) | BIT(NL80211_FTM_STATS_TOTAL_DURATION_MSEC) | BIT(NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM) | BIT(NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM) | BIT(NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM); return 0; } static int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = iwl_mvm_ftm_start(mvm, vif, request); mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); iwl_mvm_ftm_abort(mvm, request); mutex_unlock(&mvm->mutex); } static bool iwl_mvm_can_hw_csum(struct sk_buff *skb) { u8 protocol = ip_hdr(skb)->protocol; if (!IS_ENABLED(CONFIG_INET)) return false; return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP; } static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, struct sk_buff *head, struct sk_buff *skb) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) return iwl_mvm_tx_csum_bz(mvm, head, true) == iwl_mvm_tx_csum_bz(mvm, skb, true); /* For now don't aggregate IPv6 in AMSDU */ if (skb->protocol != htons(ETH_P_IP)) return false; if (!iwl_mvm_is_csum_supported(mvm)) return true; return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head); } const struct ieee80211_ops iwl_mvm_hw_ops = { .tx = iwl_mvm_mac_tx, .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, .ampdu_action = iwl_mvm_mac_ampdu_action, .get_antenna = iwl_mvm_op_get_antenna, .start = iwl_mvm_mac_start, .reconfig_complete = iwl_mvm_mac_reconfig_complete, .stop = iwl_mvm_mac_stop, .add_interface = iwl_mvm_mac_add_interface, .remove_interface = iwl_mvm_mac_remove_interface, .config = iwl_mvm_mac_config, .prepare_multicast = iwl_mvm_prepare_multicast, .configure_filter = iwl_mvm_configure_filter, .config_iface_filter = iwl_mvm_config_iface_filter, .bss_info_changed = iwl_mvm_bss_info_changed, .hw_scan = iwl_mvm_mac_hw_scan, .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove, .sta_state = iwl_mvm_mac_sta_state, .sta_notify = iwl_mvm_mac_sta_notify, .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, .sta_rc_update = iwl_mvm_sta_rc_update, .conf_tx = iwl_mvm_mac_conf_tx, .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, .mgd_complete_tx = iwl_mvm_mac_mgd_complete_tx, .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, .flush = iwl_mvm_mac_flush, .sched_scan_start = iwl_mvm_mac_sched_scan_start, .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, .set_key = iwl_mvm_mac_set_key, .update_tkip_key = iwl_mvm_mac_update_tkip_key, .remain_on_channel = iwl_mvm_roc, .cancel_remain_on_channel = iwl_mvm_cancel_roc, .add_chanctx = iwl_mvm_add_chanctx, .remove_chanctx = iwl_mvm_remove_chanctx, .change_chanctx = iwl_mvm_change_chanctx, .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx, .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx, .start_ap = iwl_mvm_start_ap_ibss, .stop_ap = iwl_mvm_stop_ap_ibss, .join_ibss = iwl_mvm_start_ap_ibss, .leave_ibss = iwl_mvm_stop_ap_ibss, .tx_last_beacon = iwl_mvm_tx_last_beacon, .set_tim = iwl_mvm_set_tim, .channel_switch = iwl_mvm_channel_switch, .pre_channel_switch = iwl_mvm_pre_channel_switch, .post_channel_switch = iwl_mvm_post_channel_switch, .abort_channel_switch = iwl_mvm_abort_channel_switch, .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, .tdls_channel_switch = iwl_mvm_tdls_channel_switch, .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, .event_callback = iwl_mvm_mac_event_callback, .sync_rx_queues = iwl_mvm_sync_rx_queues, CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) #ifdef CONFIG_PM_SLEEP /* look at d3.c */ .suspend = iwl_mvm_suspend, .resume = iwl_mvm_resume, .set_wakeup = iwl_mvm_set_wakeup, .set_rekey_data = iwl_mvm_set_rekey_data, #if IS_ENABLED(CONFIG_IPV6) .ipv6_addr_change = iwl_mvm_ipv6_addr_change, #endif .set_default_unicast_key = iwl_mvm_set_default_unicast_key, #endif .get_survey = iwl_mvm_mac_get_survey, .sta_statistics = iwl_mvm_mac_sta_statistics, .get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats, .start_pmsr = iwl_mvm_start_pmsr, .abort_pmsr = iwl_mvm_abort_pmsr, .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate, #ifdef CONFIG_IWLWIFI_DEBUGFS .sta_add_debugfs = iwl_mvm_sta_add_debugfs, #endif }; diff --git a/sys/contrib/dev/rtw88/mac80211.c b/sys/contrib/dev/rtw88/mac80211.c index 94a6fb578281..73c5d9535d72 100644 --- a/sys/contrib/dev/rtw88/mac80211.c +++ b/sys/contrib/dev/rtw88/mac80211.c @@ -1,940 +1,940 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2018-2019 Realtek Corporation */ #include "main.h" #include "sec.h" #include "tx.h" #include "fw.h" #include "mac.h" #include "coex.h" #include "ps.h" #include "reg.h" #include "bf.h" #include "debug.h" #include "wow.h" #include "sar.h" static void rtw_ops_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct rtw_dev *rtwdev = hw->priv; if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags)) { ieee80211_free_txskb(hw, skb); return; } rtw_tx(rtwdev, control, skb); } static void rtw_ops_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct rtw_dev *rtwdev = hw->priv; struct rtw_txq *rtwtxq = (struct rtw_txq *)txq->drv_priv; if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags)) return; spin_lock_bh(&rtwdev->txq_lock); if (list_empty(&rtwtxq->list)) list_add_tail(&rtwtxq->list, &rtwdev->txqs); spin_unlock_bh(&rtwdev->txq_lock); queue_work(rtwdev->tx_wq, &rtwdev->tx_work); } static int rtw_ops_start(struct ieee80211_hw *hw) { struct rtw_dev *rtwdev = hw->priv; int ret; mutex_lock(&rtwdev->mutex); ret = rtw_core_start(rtwdev); mutex_unlock(&rtwdev->mutex); return ret; } static void rtw_ops_stop(struct ieee80211_hw *hw) { struct rtw_dev *rtwdev = hw->priv; mutex_lock(&rtwdev->mutex); rtw_core_stop(rtwdev); mutex_unlock(&rtwdev->mutex); } static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed) { struct rtw_dev *rtwdev = hw->priv; int ret = 0; /* let previous ips work finish to ensure we don't leave ips twice */ cancel_work_sync(&rtwdev->ips_work); mutex_lock(&rtwdev->mutex); rtw_leave_lps_deep(rtwdev); if ((changed & IEEE80211_CONF_CHANGE_IDLE) && !(hw->conf.flags & IEEE80211_CONF_IDLE)) { ret = rtw_leave_ips(rtwdev); if (ret) { rtw_err(rtwdev, "failed to leave idle state\n"); goto out; } } if (changed & IEEE80211_CONF_CHANGE_PS) { if (hw->conf.flags & IEEE80211_CONF_PS) { rtwdev->ps_enabled = true; } else { rtwdev->ps_enabled = false; rtw_leave_lps(rtwdev); } } if (changed & IEEE80211_CONF_CHANGE_CHANNEL) rtw_set_channel(rtwdev); if ((changed & IEEE80211_CONF_CHANGE_IDLE) && (hw->conf.flags & IEEE80211_CONF_IDLE)) rtw_enter_ips(rtwdev); out: mutex_unlock(&rtwdev->mutex); return ret; } static const struct rtw_vif_port rtw_vif_port[] = { [0] = { .mac_addr = {.addr = 0x0610}, .bssid = {.addr = 0x0618}, .net_type = {.addr = 0x0100, .mask = 0x30000}, .aid = {.addr = 0x06a8, .mask = 0x7ff}, .bcn_ctrl = {.addr = 0x0550, .mask = 0xff}, }, [1] = { .mac_addr = {.addr = 0x0700}, .bssid = {.addr = 0x0708}, .net_type = {.addr = 0x0100, .mask = 0xc0000}, .aid = {.addr = 0x0710, .mask = 0x7ff}, .bcn_ctrl = {.addr = 0x0551, .mask = 0xff}, }, [2] = { .mac_addr = {.addr = 0x1620}, .bssid = {.addr = 0x1628}, .net_type = {.addr = 0x1100, .mask = 0x3}, .aid = {.addr = 0x1600, .mask = 0x7ff}, .bcn_ctrl = {.addr = 0x0578, .mask = 0xff}, }, [3] = { .mac_addr = {.addr = 0x1630}, .bssid = {.addr = 0x1638}, .net_type = {.addr = 0x1100, .mask = 0xc}, .aid = {.addr = 0x1604, .mask = 0x7ff}, .bcn_ctrl = {.addr = 0x0579, .mask = 0xff}, }, [4] = { .mac_addr = {.addr = 0x1640}, .bssid = {.addr = 0x1648}, .net_type = {.addr = 0x1100, .mask = 0x30}, .aid = {.addr = 0x1608, .mask = 0x7ff}, .bcn_ctrl = {.addr = 0x057a, .mask = 0xff}, }, }; static int rtw_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rtw_dev *rtwdev = hw->priv; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; enum rtw_net_type net_type; u32 config = 0; u8 port = 0; u8 bcn_ctrl = 0; if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER)) vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI; rtwvif->port = port; rtwvif->stats.tx_unicast = 0; rtwvif->stats.rx_unicast = 0; rtwvif->stats.tx_cnt = 0; rtwvif->stats.rx_cnt = 0; rtwvif->scan_req = NULL; memset(&rtwvif->bfee, 0, sizeof(struct rtw_bfee)); rtwvif->conf = &rtw_vif_port[port]; rtw_txq_init(rtwdev, vif->txq); INIT_LIST_HEAD(&rtwvif->rsvd_page_list); mutex_lock(&rtwdev->mutex); rtw_leave_lps_deep(rtwdev); switch (vif->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: rtw_add_rsvd_page_bcn(rtwdev, rtwvif); net_type = RTW_NET_AP_MODE; bcn_ctrl = BIT_EN_BCN_FUNCTION | BIT_DIS_TSF_UDT; break; case NL80211_IFTYPE_ADHOC: rtw_add_rsvd_page_bcn(rtwdev, rtwvif); net_type = RTW_NET_AD_HOC; bcn_ctrl = BIT_EN_BCN_FUNCTION | BIT_DIS_TSF_UDT; break; case NL80211_IFTYPE_STATION: rtw_add_rsvd_page_sta(rtwdev, rtwvif); net_type = RTW_NET_NO_LINK; bcn_ctrl = BIT_EN_BCN_FUNCTION; break; default: WARN_ON(1); mutex_unlock(&rtwdev->mutex); return -EINVAL; } ether_addr_copy(rtwvif->mac_addr, vif->addr); config |= PORT_SET_MAC_ADDR; rtwvif->net_type = net_type; config |= PORT_SET_NET_TYPE; rtwvif->bcn_ctrl = bcn_ctrl; config |= PORT_SET_BCN_CTRL; rtw_vif_port_config(rtwdev, rtwvif, config); mutex_unlock(&rtwdev->mutex); #if defined(__linux__) rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %pM on port %d\n", vif->addr, rtwvif->port); #elif defined(__FreeBSD__) rtw_dbg(rtwdev, RTW_DBG_STATE, "start vif %6D on port %d\n", vif->addr, ":", rtwvif->port); #endif return 0; } static void rtw_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rtw_dev *rtwdev = hw->priv; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; u32 config = 0; #if defined(__linux__) rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %pM on port %d\n", vif->addr, rtwvif->port); #elif defined(__FreeBSD__) rtw_dbg(rtwdev, RTW_DBG_STATE, "stop vif %6D on port %d\n", vif->addr, ":", rtwvif->port); #endif mutex_lock(&rtwdev->mutex); rtw_leave_lps_deep(rtwdev); rtw_txq_cleanup(rtwdev, vif->txq); rtw_remove_rsvd_page(rtwdev, rtwvif); eth_zero_addr(rtwvif->mac_addr); config |= PORT_SET_MAC_ADDR; rtwvif->net_type = RTW_NET_NO_LINK; config |= PORT_SET_NET_TYPE; rtwvif->bcn_ctrl = 0; config |= PORT_SET_BCN_CTRL; rtw_vif_port_config(rtwdev, rtwvif, config); mutex_unlock(&rtwdev->mutex); } static int rtw_ops_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum nl80211_iftype type, bool p2p) { struct rtw_dev *rtwdev = hw->priv; #if defined(__linux__) rtw_dbg(rtwdev, RTW_DBG_STATE, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n", vif->addr, vif->type, type, vif->p2p, p2p); #elif defined(__FreeBSD__) rtw_dbg(rtwdev, RTW_DBG_STATE, "change vif %6D (%d)->(%d), p2p (%d)->(%d)\n", vif->addr, ":", vif->type, type, vif->p2p, p2p); #endif rtw_ops_remove_interface(hw, vif); vif->type = type; vif->p2p = p2p; return rtw_ops_add_interface(hw, vif); } static void rtw_ops_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *new_flags, u64 multicast) { struct rtw_dev *rtwdev = hw->priv; *new_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_FCSFAIL | FIF_BCN_PRBRESP_PROMISC; mutex_lock(&rtwdev->mutex); rtw_leave_lps_deep(rtwdev); if (changed_flags & FIF_ALLMULTI) { if (*new_flags & FIF_ALLMULTI) rtwdev->hal.rcr |= BIT_AM | BIT_AB; else rtwdev->hal.rcr &= ~(BIT_AM | BIT_AB); } if (changed_flags & FIF_FCSFAIL) { if (*new_flags & FIF_FCSFAIL) rtwdev->hal.rcr |= BIT_ACRC32; else rtwdev->hal.rcr &= ~(BIT_ACRC32); } if (changed_flags & FIF_OTHER_BSS) { if (*new_flags & FIF_OTHER_BSS) rtwdev->hal.rcr |= BIT_AAP; else rtwdev->hal.rcr &= ~(BIT_AAP); } if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { if (*new_flags & FIF_BCN_PRBRESP_PROMISC) rtwdev->hal.rcr &= ~(BIT_CBSSID_BCN | BIT_CBSSID_DATA); else rtwdev->hal.rcr |= BIT_CBSSID_BCN; } rtw_dbg(rtwdev, RTW_DBG_RX, "config rx filter, changed=0x%08x, new=0x%08x, rcr=0x%08x\n", changed_flags, *new_flags, rtwdev->hal.rcr); rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr); mutex_unlock(&rtwdev->mutex); } /* Only have one group of EDCA parameters now */ static const u32 ac_to_edca_param[IEEE80211_NUM_ACS] = { [IEEE80211_AC_VO] = REG_EDCA_VO_PARAM, [IEEE80211_AC_VI] = REG_EDCA_VI_PARAM, [IEEE80211_AC_BE] = REG_EDCA_BE_PARAM, [IEEE80211_AC_BK] = REG_EDCA_BK_PARAM, }; static u8 rtw_aifsn_to_aifs(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, u8 aifsn) { struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); u8 slot_time; u8 sifs; slot_time = vif->bss_conf.use_short_slot ? 9 : 20; sifs = rtwdev->hal.current_band_type == RTW_BAND_5G ? 16 : 10; return aifsn * slot_time + sifs; } static void __rtw_conf_tx(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, u16 ac) { struct ieee80211_tx_queue_params *params = &rtwvif->tx_params[ac]; u32 edca_param = ac_to_edca_param[ac]; u8 ecw_max, ecw_min; u8 aifs; /* 2^ecw - 1 = cw; ecw = log2(cw + 1) */ ecw_max = ilog2(params->cw_max + 1); ecw_min = ilog2(params->cw_min + 1); aifs = rtw_aifsn_to_aifs(rtwdev, rtwvif, params->aifs); rtw_write32_mask(rtwdev, edca_param, BIT_MASK_TXOP_LMT, params->txop); rtw_write32_mask(rtwdev, edca_param, BIT_MASK_CWMAX, ecw_max); rtw_write32_mask(rtwdev, edca_param, BIT_MASK_CWMIN, ecw_min); rtw_write32_mask(rtwdev, edca_param, BIT_MASK_AIFS, aifs); } static void rtw_conf_tx(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif) { u16 ac; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) __rtw_conf_tx(rtwdev, rtwvif, ac); } static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf, - u32 changed) + u64 changed) { struct rtw_dev *rtwdev = hw->priv; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_stat *coex_stat = &coex->stat; u32 config = 0; mutex_lock(&rtwdev->mutex); rtw_leave_lps_deep(rtwdev); if (changed & BSS_CHANGED_ASSOC) { rtw_vif_assoc_changed(rtwvif, conf); if (conf->assoc) { rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_FINISH); rtw_fw_download_rsvd_page(rtwdev); rtw_send_rsvd_page_h2c(rtwdev); rtw_coex_media_status_notify(rtwdev, conf->assoc); if (rtw_bf_support) rtw_bf_assoc(rtwdev, vif, conf); rtw_store_op_chan(rtwdev); } else { rtw_leave_lps(rtwdev); rtw_bf_disassoc(rtwdev, vif, conf); /* Abort ongoing scan if cancel_scan isn't issued * when disconnected by peer */ if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) rtw_hw_scan_abort(rtwdev, vif); } config |= PORT_SET_NET_TYPE; config |= PORT_SET_AID; } if (changed & BSS_CHANGED_BSSID) { ether_addr_copy(rtwvif->bssid, conf->bssid); config |= PORT_SET_BSSID; } if (changed & BSS_CHANGED_BEACON_INT) { if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION) coex_stat->wl_beacon_interval = conf->beacon_int; } if (changed & BSS_CHANGED_BEACON) { rtw_set_dtim_period(rtwdev, conf->dtim_period); rtw_fw_download_rsvd_page(rtwdev); } if (changed & BSS_CHANGED_BEACON_ENABLED) { if (conf->enable_beacon) rtw_write32_set(rtwdev, REG_FWHW_TXQ_CTRL, BIT_EN_BCNQ_DL); else rtw_write32_clr(rtwdev, REG_FWHW_TXQ_CTRL, BIT_EN_BCNQ_DL); } if (changed & BSS_CHANGED_CQM) rtw_fw_beacon_filter_config(rtwdev, true, vif); if (changed & BSS_CHANGED_MU_GROUPS) rtw_chip_set_gid_table(rtwdev, vif, conf); if (changed & BSS_CHANGED_ERP_SLOT) rtw_conf_tx(rtwdev, rtwvif); rtw_vif_port_config(rtwdev, rtwvif, config); mutex_unlock(&rtwdev->mutex); } static int rtw_ops_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rtw_dev *rtwdev = hw->priv; struct rtw_chip_info *chip = rtwdev->chip; mutex_lock(&rtwdev->mutex); chip->ops->phy_calibration(rtwdev); mutex_unlock(&rtwdev->mutex); return 0; } static int rtw_ops_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 ac, const struct ieee80211_tx_queue_params *params) { struct rtw_dev *rtwdev = hw->priv; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; mutex_lock(&rtwdev->mutex); rtw_leave_lps_deep(rtwdev); rtwvif->tx_params[ac] = *params; __rtw_conf_tx(rtwdev, rtwvif, ac); mutex_unlock(&rtwdev->mutex); return 0; } static int rtw_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct rtw_dev *rtwdev = hw->priv; int ret = 0; mutex_lock(&rtwdev->mutex); ret = rtw_sta_add(rtwdev, sta, vif); mutex_unlock(&rtwdev->mutex); return ret; } static int rtw_ops_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct rtw_dev *rtwdev = hw->priv; rtw_fw_beacon_filter_config(rtwdev, false, vif); mutex_lock(&rtwdev->mutex); rtw_sta_remove(rtwdev, sta, true); mutex_unlock(&rtwdev->mutex); return 0; } static int rtw_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) { struct rtw_dev *rtwdev = hw->priv; ieee80211_queue_work(hw, &rtwdev->update_beacon_work); return 0; } static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct rtw_dev *rtwdev = hw->priv; struct rtw_sec_desc *sec = &rtwdev->sec; u8 hw_key_type; u8 hw_key_idx; int ret = 0; switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: hw_key_type = RTW_CAM_WEP40; break; case WLAN_CIPHER_SUITE_WEP104: hw_key_type = RTW_CAM_WEP104; break; case WLAN_CIPHER_SUITE_TKIP: hw_key_type = RTW_CAM_TKIP; key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; break; case WLAN_CIPHER_SUITE_CCMP: hw_key_type = RTW_CAM_AES; key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: /* suppress error messages */ return -EOPNOTSUPP; default: return -ENOTSUPP; } mutex_lock(&rtwdev->mutex); rtw_leave_lps_deep(rtwdev); if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { hw_key_idx = rtw_sec_get_free_cam(sec); } else { /* multiple interfaces? */ hw_key_idx = key->keyidx; } if (hw_key_idx > sec->total_cam_num) { ret = -ENOSPC; goto out; } switch (cmd) { case SET_KEY: /* need sw generated IV */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; key->hw_key_idx = hw_key_idx; rtw_sec_write_cam(rtwdev, sec, sta, key, hw_key_type, hw_key_idx); break; case DISABLE_KEY: rtw_hci_flush_all_queues(rtwdev, false); rtw_mac_flush_all_queues(rtwdev, false); rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx); break; } /* download new cam settings for PG to backup */ if (rtw_get_lps_deep_mode(rtwdev) == LPS_DEEP_MODE_PG) rtw_fw_download_rsvd_page(rtwdev); out: mutex_unlock(&rtwdev->mutex); return ret; } static int rtw_ops_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) { struct ieee80211_sta *sta = params->sta; u16 tid = params->tid; struct ieee80211_txq *txq = sta->txq[tid]; struct rtw_txq *rtwtxq = (struct rtw_txq *)txq->drv_priv; switch (params->action) { case IEEE80211_AMPDU_TX_START: return IEEE80211_AMPDU_TX_START_IMMEDIATE; case IEEE80211_AMPDU_TX_STOP_CONT: case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: clear_bit(RTW_TXQ_AMPDU, &rtwtxq->flags); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: set_bit(RTW_TXQ_AMPDU, &rtwtxq->flags); break; case IEEE80211_AMPDU_RX_START: case IEEE80211_AMPDU_RX_STOP: break; default: WARN_ON(1); return -ENOTSUPP; } return 0; } static bool rtw_ops_can_aggregate_in_amsdu(struct ieee80211_hw *hw, struct sk_buff *head, struct sk_buff *skb) { struct rtw_dev *rtwdev = hw->priv; struct rtw_hal *hal = &rtwdev->hal; /* we don't want to enable TX AMSDU on 2.4G */ if (hal->current_band_type == RTW_BAND_2G) return false; return true; } static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac_addr) { struct rtw_dev *rtwdev = hw->priv; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; mutex_lock(&rtwdev->mutex); rtw_core_scan_start(rtwdev, rtwvif, mac_addr, false); mutex_unlock(&rtwdev->mutex); } static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rtw_dev *rtwdev = hw->priv; mutex_lock(&rtwdev->mutex); rtw_core_scan_complete(rtwdev, vif, false); mutex_unlock(&rtwdev->mutex); } static void rtw_ops_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *info) { struct rtw_dev *rtwdev = hw->priv; mutex_lock(&rtwdev->mutex); rtw_leave_lps_deep(rtwdev); rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_START); rtw_chip_prepare_tx(rtwdev); mutex_unlock(&rtwdev->mutex); } static int rtw_ops_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { struct rtw_dev *rtwdev = hw->priv; mutex_lock(&rtwdev->mutex); rtwdev->rts_threshold = value; mutex_unlock(&rtwdev->mutex); return 0; } static void rtw_ops_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct station_info *sinfo) { struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; sinfo->txrate = si->ra_report.txrate; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } static void rtw_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { struct rtw_dev *rtwdev = hw->priv; mutex_lock(&rtwdev->mutex); rtw_leave_lps_deep(rtwdev); rtw_hci_flush_queues(rtwdev, queues, drop); rtw_mac_flush_queues(rtwdev, queues, drop); mutex_unlock(&rtwdev->mutex); } struct rtw_iter_bitrate_mask_data { struct rtw_dev *rtwdev; struct ieee80211_vif *vif; const struct cfg80211_bitrate_mask *mask; }; static void rtw_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta) { struct rtw_iter_bitrate_mask_data *br_data = data; struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; if (si->vif != br_data->vif) return; /* free previous mask setting */ kfree(si->mask); si->mask = kmemdup(br_data->mask, sizeof(struct cfg80211_bitrate_mask), GFP_ATOMIC); if (!si->mask) { si->use_cfg_mask = false; return; } si->use_cfg_mask = true; rtw_update_sta_info(br_data->rtwdev, si, true); } static void rtw_ra_mask_info_update(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { struct rtw_iter_bitrate_mask_data br_data; br_data.rtwdev = rtwdev; br_data.vif = vif; br_data.mask = mask; rtw_iterate_stas_atomic(rtwdev, rtw_ra_mask_info_update_iter, &br_data); } static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { struct rtw_dev *rtwdev = hw->priv; rtw_ra_mask_info_update(rtwdev, vif, mask); return 0; } static int rtw_ops_set_antenna(struct ieee80211_hw *hw, u32 tx_antenna, u32 rx_antenna) { struct rtw_dev *rtwdev = hw->priv; struct rtw_chip_info *chip = rtwdev->chip; int ret; if (!chip->ops->set_antenna) return -EOPNOTSUPP; mutex_lock(&rtwdev->mutex); ret = chip->ops->set_antenna(rtwdev, tx_antenna, rx_antenna); mutex_unlock(&rtwdev->mutex); return ret; } static int rtw_ops_get_antenna(struct ieee80211_hw *hw, u32 *tx_antenna, u32 *rx_antenna) { struct rtw_dev *rtwdev = hw->priv; struct rtw_hal *hal = &rtwdev->hal; *tx_antenna = hal->antenna_tx; *rx_antenna = hal->antenna_rx; return 0; } #ifdef CONFIG_PM static int rtw_ops_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { struct rtw_dev *rtwdev = hw->priv; int ret; mutex_lock(&rtwdev->mutex); ret = rtw_wow_suspend(rtwdev, wowlan); if (ret) rtw_err(rtwdev, "failed to suspend for wow %d\n", ret); mutex_unlock(&rtwdev->mutex); return ret ? 1 : 0; } static int rtw_ops_resume(struct ieee80211_hw *hw) { struct rtw_dev *rtwdev = hw->priv; int ret; mutex_lock(&rtwdev->mutex); ret = rtw_wow_resume(rtwdev); if (ret) rtw_err(rtwdev, "failed to resume for wow %d\n", ret); mutex_unlock(&rtwdev->mutex); return ret ? 1 : 0; } static void rtw_ops_set_wakeup(struct ieee80211_hw *hw, bool enabled) { struct rtw_dev *rtwdev = hw->priv; device_set_wakeup_enable(rtwdev->dev, enabled); } #endif static void rtw_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type) { struct rtw_dev *rtwdev = hw->priv; mutex_lock(&rtwdev->mutex); if (reconfig_type == IEEE80211_RECONFIG_TYPE_RESTART) clear_bit(RTW_FLAG_RESTARTING, rtwdev->flags); mutex_unlock(&rtwdev->mutex); } static int rtw_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *req) { struct rtw_dev *rtwdev = hw->priv; int ret; if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD)) return 1; if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) return -EBUSY; mutex_lock(&rtwdev->mutex); rtw_hw_scan_start(rtwdev, vif, req); ret = rtw_hw_scan_offload(rtwdev, vif, true); if (ret) { rtw_hw_scan_abort(rtwdev, vif); rtw_err(rtwdev, "HW scan failed with status: %d\n", ret); } mutex_unlock(&rtwdev->mutex); return ret; } static void rtw_ops_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rtw_dev *rtwdev = hw->priv; if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD)) return; if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) return; mutex_lock(&rtwdev->mutex); rtw_hw_scan_abort(rtwdev, vif); mutex_unlock(&rtwdev->mutex); } static int rtw_ops_set_sar_specs(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar) { struct rtw_dev *rtwdev = hw->priv; rtw_set_sar_specs(rtwdev, sar); return 0; } static void rtw_ops_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u32 changed) { struct rtw_dev *rtwdev = hw->priv; struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; if (changed & IEEE80211_RC_BW_CHANGED) rtw_update_sta_info(rtwdev, si, true); } const struct ieee80211_ops rtw_ops = { .tx = rtw_ops_tx, .wake_tx_queue = rtw_ops_wake_tx_queue, .start = rtw_ops_start, .stop = rtw_ops_stop, .config = rtw_ops_config, .add_interface = rtw_ops_add_interface, .remove_interface = rtw_ops_remove_interface, .change_interface = rtw_ops_change_interface, .configure_filter = rtw_ops_configure_filter, .bss_info_changed = rtw_ops_bss_info_changed, .start_ap = rtw_ops_start_ap, .conf_tx = rtw_ops_conf_tx, .sta_add = rtw_ops_sta_add, .sta_remove = rtw_ops_sta_remove, .set_tim = rtw_ops_set_tim, .set_key = rtw_ops_set_key, .ampdu_action = rtw_ops_ampdu_action, .can_aggregate_in_amsdu = rtw_ops_can_aggregate_in_amsdu, .sw_scan_start = rtw_ops_sw_scan_start, .sw_scan_complete = rtw_ops_sw_scan_complete, .mgd_prepare_tx = rtw_ops_mgd_prepare_tx, .set_rts_threshold = rtw_ops_set_rts_threshold, .sta_statistics = rtw_ops_sta_statistics, .flush = rtw_ops_flush, .set_bitrate_mask = rtw_ops_set_bitrate_mask, .set_antenna = rtw_ops_set_antenna, .get_antenna = rtw_ops_get_antenna, .reconfig_complete = rtw_reconfig_complete, .hw_scan = rtw_ops_hw_scan, .cancel_hw_scan = rtw_ops_cancel_hw_scan, .sta_rc_update = rtw_ops_sta_rc_update, .set_sar_specs = rtw_ops_set_sar_specs, #ifdef CONFIG_PM .suspend = rtw_ops_suspend, .resume = rtw_ops_resume, .set_wakeup = rtw_ops_set_wakeup, #endif }; EXPORT_SYMBOL(rtw_ops);