diff --git a/sys/compat/linuxkpi/common/include/net/mac80211.h b/sys/compat/linuxkpi/common/include/net/mac80211.h index 7a8306919194..dcdad44665a3 100644 --- a/sys/compat/linuxkpi/common/include/net/mac80211.h +++ b/sys/compat/linuxkpi/common/include/net/mac80211.h @@ -1,2115 +1,2121 @@ /*- * Copyright (c) 2020-2021 The FreeBSD Foundation * Copyright (c) 2020-2022 Bjoern A. Zeeb * * This software was developed by Björn Zeeb under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUXKPI_NET_MAC80211_H #define _LINUXKPI_NET_MAC80211_H #include #include #include #include #include #include #include #include #include #define ARPHRD_IEEE80211_RADIOTAP __LINE__ /* XXX TODO brcmfmac */ #define WLAN_OUI_MICROSOFT (0x0050F2) #define WLAN_OUI_TYPE_MICROSOFT_WPA (1) #define WLAN_OUI_TYPE_MICROSOFT_TPC (8) #define WLAN_OUI_TYPE_WFA_P2P (9) #define WLAN_OUI_WFA (0x506F9A) /* hw->conf.flags */ enum ieee80211_hw_conf_flags { IEEE80211_CONF_IDLE = BIT(0), IEEE80211_CONF_PS = BIT(1), IEEE80211_CONF_MONITOR = BIT(2), }; /* (*ops->config()) */ enum ieee80211_hw_conf_changed_flags { IEEE80211_CONF_CHANGE_CHANNEL = BIT(0), IEEE80211_CONF_CHANGE_IDLE = BIT(1), IEEE80211_CONF_CHANGE_PS = BIT(2), IEEE80211_CONF_CHANGE_MONITOR = BIT(3), }; #define CFG80211_TESTMODE_CMD(_x) /* XXX TODO */ #define FCS_LEN 4 /* ops.configure_filter() */ enum mcast_filter_flags { FIF_ALLMULTI = BIT(0), FIF_PROBE_REQ = BIT(1), FIF_BCN_PRBRESP_PROMISC = BIT(2), FIF_FCSFAIL = BIT(3), FIF_OTHER_BSS = BIT(4), FIF_PSPOLL = BIT(5), FIF_CONTROL = BIT(6), }; enum ieee80211_bss_changed { BSS_CHANGED_ARP_FILTER = BIT(0), BSS_CHANGED_ASSOC = BIT(1), BSS_CHANGED_BANDWIDTH = BIT(2), BSS_CHANGED_BEACON = BIT(3), BSS_CHANGED_BEACON_ENABLED = BIT(4), BSS_CHANGED_BEACON_INFO = BIT(5), BSS_CHANGED_BEACON_INT = BIT(6), BSS_CHANGED_BSSID = BIT(7), BSS_CHANGED_CQM = BIT(8), BSS_CHANGED_ERP_CTS_PROT = BIT(9), BSS_CHANGED_ERP_SLOT = BIT(10), BSS_CHANGED_FTM_RESPONDER = BIT(11), BSS_CHANGED_HT = BIT(12), BSS_CHANGED_IDLE = BIT(13), BSS_CHANGED_MU_GROUPS = BIT(14), BSS_CHANGED_P2P_PS = BIT(15), BSS_CHANGED_PS = BIT(16), BSS_CHANGED_QOS = BIT(17), BSS_CHANGED_TXPOWER = BIT(18), BSS_CHANGED_HE_BSS_COLOR = BIT(19), BSS_CHANGED_AP_PROBE_RESP = BIT(20), BSS_CHANGED_BASIC_RATES = BIT(21), BSS_CHANGED_ERP_PREAMBLE = BIT(22), BSS_CHANGED_IBSS = BIT(23), BSS_CHANGED_MCAST_RATE = BIT(24), BSS_CHANGED_SSID = BIT(25), }; /* 802.11 Figure 9-256 Suite selector format. [OUI(3), SUITE TYPE(1)] */ #define WLAN_CIPHER_SUITE_OUI(_oui, _x) (((_oui) << 8) | ((_x) & 0xff)) /* 802.11 Table 9-131 Cipher suite selectors. */ /* 802.1x suite B 11 */ #define WLAN_CIPHER_SUITE(_x) WLAN_CIPHER_SUITE_OUI(0x000fac, _x) /* Use group 0 */ #define WLAN_CIPHER_SUITE_WEP40 WLAN_CIPHER_SUITE(1) #define WLAN_CIPHER_SUITE_TKIP WLAN_CIPHER_SUITE(2) /* Reserved 3 */ #define WLAN_CIPHER_SUITE_CCMP WLAN_CIPHER_SUITE(4) /* CCMP-128 */ #define WLAN_CIPHER_SUITE_WEP104 WLAN_CIPHER_SUITE(5) #define WLAN_CIPHER_SUITE_AES_CMAC WLAN_CIPHER_SUITE(6) /* BIP-CMAC-128 */ /* Group addressed traffic not allowed 7 */ #define WLAN_CIPHER_SUITE_GCMP WLAN_CIPHER_SUITE(8) #define WLAN_CIPHER_SUITE_GCMP_256 WLAN_CIPHER_SUITE(9) #define WLAN_CIPHER_SUITE_CCMP_256 WLAN_CIPHER_SUITE(10) #define WLAN_CIPHER_SUITE_BIP_GMAC_128 WLAN_CIPHER_SUITE(11) #define WLAN_CIPHER_SUITE_BIP_GMAC_256 WLAN_CIPHER_SUITE(12) #define WLAN_CIPHER_SUITE_BIP_CMAC_256 WLAN_CIPHER_SUITE(13) /* Reserved 14-255 */ /* 802.11 Table 9-133 AKM suite selectors. */ #define WLAN_AKM_SUITE(_x) WLAN_CIPHER_SUITE_OUI(0x000fac, _x) /* Reserved 0 */ #define WLAN_AKM_SUITE_8021X WLAN_AKM_SUITE(1) #define WLAN_AKM_SUITE_PSK WLAN_AKM_SUITE(2) #define WLAN_AKM_SUITE_FT_8021X WLAN_AKM_SUITE(3) #define WLAN_AKM_SUITE_FT_PSK WLAN_AKM_SUITE(4) #define WLAN_AKM_SUITE_8021X_SHA256 WLAN_AKM_SUITE(5) #define WLAN_AKM_SUITE_PSK_SHA256 WLAN_AKM_SUITE(6) /* TDLS 7 */ #define WLAN_AKM_SUITE_SAE WLAN_AKM_SUITE(8) /* FToSAE 9 */ /* AP peer key 10 */ /* 802.1x suite B 11 */ /* 802.1x suite B 384 12 */ /* FTo802.1x 384 13 */ /* Reserved 14-255 */ /* Apparently 11ax defines more. Seen (19,20) mentioned. */ struct ieee80211_sta; struct ieee80211_ampdu_params { /* TODO FIXME */ struct ieee80211_sta *sta; uint8_t tid; uint16_t ssn; int action, amsdu, buf_size, timeout; }; struct ieee80211_bar { /* TODO FIXME */ int control, start_seq_num; uint8_t *ra; uint16_t frame_control; }; struct ieee80211_p2p_noa_desc { uint32_t count; /* uint8_t ? */ uint32_t duration; uint32_t interval; uint32_t start_time; }; struct ieee80211_p2p_noa_attr { uint8_t index; uint8_t oppps_ctwindow; struct ieee80211_p2p_noa_desc desc[4]; }; struct ieee80211_mutable_offsets { /* TODO FIXME */ uint16_t tim_offset; int cntdwn_counter_offs; }; #define WLAN_MEMBERSHIP_LEN (8) #define WLAN_USER_POSITION_LEN (16) struct ieee80211_bss_conf { /* TODO FIXME */ uint8_t bssid[ETH_ALEN]; uint8_t transmitter_bssid[ETH_ALEN]; struct ieee80211_ftm_responder_params *ftmr_params; struct ieee80211_p2p_noa_attr p2p_noa_attr; struct cfg80211_chan_def chandef; __be32 arp_addr_list[1]; /* XXX TODO */ struct ieee80211_rate *beacon_rate; struct { uint8_t membership[WLAN_MEMBERSHIP_LEN]; uint8_t position[WLAN_USER_POSITION_LEN]; } mu_group; struct { int color; } he_bss_color; size_t ssid_len; uint8_t ssid[IEEE80211_NWID_LEN]; uint16_t aid; uint16_t ht_operation_mode; int arp_addr_cnt; uint8_t dtim_period; bool assoc; bool idle; bool qos; bool ps; bool twt_broadcast; bool use_cts_prot; bool use_short_preamble; bool use_short_slot; uint16_t beacon_int; uint32_t sync_device_ts; uint64_t sync_tsf; uint8_t sync_dtim_count; int16_t txpower; int mcast_rate[NUM_NL80211_BANDS]; int ack_enabled, bssid_index, bssid_indicator, cqm_rssi_hyst, cqm_rssi_thold, ema_ap, frame_time_rts_th, ftm_responder; int htc_trig_based_pkt_ext; int multi_sta_back_32bit, nontransmitted; int profile_periodicity; int twt_requester, uora_exists, uora_ocw_range; int assoc_capability, enable_beacon, hidden_ssid, ibss_joined, twt_protected; int fils_discovery, he_obss_pd, he_oper, twt_responder, unsol_bcast_probe_resp_interval; unsigned long basic_rates; bool he_support; }; struct ieee80211_chanctx_conf { /* TODO FIXME */ int rx_chains_dynamic, rx_chains_static; bool radar_enabled; struct cfg80211_chan_def def; struct cfg80211_chan_def min_def; /* Must stay last. */ uint8_t drv_priv[0] __aligned(CACHE_LINE_SIZE); }; struct ieee80211_channel_switch { /* TODO FIXME */ int block_tx, count, delay, device_timestamp, timestamp; struct cfg80211_chan_def chandef; }; struct ieee80211_cipher_scheme { uint32_t cipher; uint8_t iftype; /* We do not know the size of this. */ uint8_t hdr_len; uint8_t pn_len; uint8_t pn_off; uint8_t key_idx_off; uint8_t key_idx_mask; uint8_t key_idx_shift; uint8_t mic_len; }; enum ieee80211_event_type { BA_FRAME_TIMEOUT, BAR_RX_EVENT, MLME_EVENT, RSSI_EVENT, }; enum ieee80211_rssi_event_data { RSSI_EVENT_LOW, RSSI_EVENT_HIGH, }; enum ieee80211_mlme_event_data { ASSOC_EVENT, AUTH_EVENT, DEAUTH_RX_EVENT, DEAUTH_TX_EVENT, }; enum ieee80211_mlme_event_status { MLME_DENIED, MLME_TIMEOUT, }; struct ieee80211_mlme_event { enum ieee80211_mlme_event_data data; enum ieee80211_mlme_event_status status; int reason; }; struct ieee80211_event { /* TODO FIXME */ enum ieee80211_event_type type; union { struct { int ssn; struct ieee80211_sta *sta; uint8_t tid; } ba; struct ieee80211_mlme_event mlme; } u; }; struct ieee80211_ftm_responder_params { /* TODO FIXME */ uint8_t *lci; uint8_t *civicloc; int lci_len; int civicloc_len; }; struct ieee80211_he_mu_edca_param_ac_rec { /* TODO FIXME */ int aifsn, ecw_min_max, mu_edca_timer; }; struct ieee80211_conf { int dynamic_ps_timeout; uint32_t listen_interval; enum ieee80211_hw_conf_flags flags; struct cfg80211_chan_def chandef; }; enum ieee80211_hw_flags { IEEE80211_HW_AMPDU_AGGREGATION, IEEE80211_HW_AP_LINK_PS, IEEE80211_HW_BUFF_MMPDU_TXQ, IEEE80211_HW_CHANCTX_STA_CSA, IEEE80211_HW_CONNECTION_MONITOR, IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP, IEEE80211_HW_HAS_RATE_CONTROL, IEEE80211_HW_MFP_CAPABLE, IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR, IEEE80211_HW_REPORTS_TX_ACK_STATUS, IEEE80211_HW_RX_INCLUDES_FCS, IEEE80211_HW_SIGNAL_DBM, IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS, IEEE80211_HW_SPECTRUM_MGMT, IEEE80211_HW_STA_MMPDU_TXQ, IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU, IEEE80211_HW_SUPPORTS_CLONED_SKBS, IEEE80211_HW_SUPPORTS_DYNAMIC_PS, IEEE80211_HW_SUPPORTS_MULTI_BSSID, IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID, IEEE80211_HW_SUPPORTS_PS, IEEE80211_HW_SUPPORTS_REORDERING_BUFFER, IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW, IEEE80211_HW_SUPPORT_FAST_XMIT, IEEE80211_HW_TDLS_WIDER_BW, IEEE80211_HW_TIMING_BEACON_ONLY, IEEE80211_HW_TX_AMPDU_SETUP_IN_HW, IEEE80211_HW_TX_AMSDU, IEEE80211_HW_TX_FRAG_LIST, IEEE80211_HW_USES_RSS, IEEE80211_HW_WANT_MONITOR_VIF, IEEE80211_HW_SW_CRYPTO_CONTROL, IEEE80211_HW_SUPPORTS_TX_FRAG, IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA, IEEE80211_HW_SUPPORTS_PER_STA_GTK, IEEE80211_HW_REPORTS_LOW_ACK, IEEE80211_HW_QUEUE_CONTROL, /* Keep last. */ NUM_IEEE80211_HW_FLAGS }; struct ieee80211_hw { struct wiphy *wiphy; /* TODO FIXME */ int max_rx_aggregation_subframes, max_tx_aggregation_subframes; int extra_tx_headroom, weight_multiplier; int max_rate_tries, max_rates, max_report_rates; struct ieee80211_cipher_scheme *cipher_schemes; int n_cipher_schemes; const char *rate_control_algorithm; struct { uint16_t units_pos; /* radiotap "spec" is .. inconsistent. */ uint16_t accuracy; } radiotap_timestamp; size_t sta_data_size; size_t vif_data_size; size_t chanctx_data_size; size_t txq_data_size; uint16_t radiotap_mcs_details; uint16_t radiotap_vht_details; uint16_t queues; uint16_t offchannel_tx_hw_queue; uint16_t uapsd_max_sp_len; uint16_t uapsd_queues; uint16_t max_tx_fragments; uint16_t max_listen_interval; netdev_features_t netdev_features; unsigned long flags[BITS_TO_LONGS(NUM_IEEE80211_HW_FLAGS)]; struct ieee80211_conf conf; #if 0 /* leave here for documentation purposes. This does NOT work. */ /* Must stay last. */ uint8_t priv[0] __aligned(CACHE_LINE_SIZE); #else void *priv; #endif }; enum ieee802111_key_flag { IEEE80211_KEY_FLAG_GENERATE_IV = BIT(0), IEEE80211_KEY_FLAG_GENERATE_MMIC = BIT(1), IEEE80211_KEY_FLAG_PAIRWISE = BIT(2), IEEE80211_KEY_FLAG_PUT_IV_SPACE = BIT(3), IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(4), IEEE80211_KEY_FLAG_SW_MGMT_TX = BIT(5), IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(6), }; struct ieee80211_key_conf { atomic64_t tx_pn; uint32_t cipher; uint8_t icv_len; /* __unused nowadays? */ uint8_t iv_len; uint8_t hw_key_idx; /* Set by drv. */ uint8_t keyidx; uint16_t flags; uint8_t keylen; uint8_t key[0]; }; struct ieee80211_key_seq { /* TODO FIXME */ union { struct { uint8_t seq[IEEE80211_MAX_PN_LEN]; uint8_t seq_len; } hw; struct { uint8_t pn[IEEE80211_CCMP_PN_LEN]; } ccmp; struct { uint8_t pn[IEEE80211_CCMP_PN_LEN]; } aes_cmac; struct { uint32_t iv32; uint16_t iv16; } tkip; }; }; enum ieee80211_rx_status_flags { RX_FLAG_ALLOW_SAME_PN = BIT(0), RX_FLAG_AMPDU_DETAILS = BIT(1), RX_FLAG_AMPDU_EOF_BIT = BIT(2), RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(3), RX_FLAG_DECRYPTED = BIT(4), RX_FLAG_DUP_VALIDATED = BIT(5), RX_FLAG_FAILED_FCS_CRC = BIT(6), RX_FLAG_ICV_STRIPPED = BIT(7), RX_FLAG_MACTIME_PLCP_START = BIT(8), RX_FLAG_MACTIME_START = BIT(9), RX_FLAG_MIC_STRIPPED = BIT(10), RX_FLAG_MMIC_ERROR = BIT(11), RX_FLAG_MMIC_STRIPPED = BIT(12), RX_FLAG_NO_PSDU = BIT(13), RX_FLAG_PN_VALIDATED = BIT(14), RX_FLAG_RADIOTAP_HE = BIT(15), RX_FLAG_RADIOTAP_HE_MU = BIT(16), RX_FLAG_RADIOTAP_LSIG = BIT(17), RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(18), RX_FLAG_NO_SIGNAL_VAL = BIT(19), RX_FLAG_IV_STRIPPED = BIT(20), RX_FLAG_AMPDU_IS_LAST = BIT(21), RX_FLAG_AMPDU_LAST_KNOWN = BIT(22), RX_FLAG_AMSDU_MORE = BIT(23), RX_FLAG_MACTIME_END = BIT(24), RX_FLAG_ONLY_MONITOR = BIT(25), RX_FLAG_SKIP_MONITOR = BIT(26), }; struct ieee80211_rx_status { /* TODO FIXME, this is too large. Over-reduce types to u8 where possible. */ uint64_t boottime_ns; uint64_t mactime; uint32_t device_timestamp; enum ieee80211_rx_status_flags flag; uint16_t freq; uint8_t bw; #define RATE_INFO_BW_20 0x01 #define RATE_INFO_BW_40 0x02 #define RATE_INFO_BW_80 0x04 #define RATE_INFO_BW_160 0x08 #define RATE_INFO_BW_HE_RU 0x10 uint8_t encoding; #define RX_ENC_LEGACY 0x00 #define RX_ENC_HE 0x01 #define RX_ENC_HT 0x02 #define RX_ENC_VHT 0x04 uint8_t ampdu_reference; uint8_t band; uint8_t chains; int8_t chain_signal[IEEE80211_MAX_CHAINS]; int8_t signal; uint8_t enc_flags; uint8_t he_dcm; uint8_t he_gi; uint8_t he_ru; uint8_t zero_length_psdu_type; uint8_t nss; uint8_t rate_idx; }; struct ieee80211_scan_ies { /* TODO FIXME */ int common_ie_len; int len[NUM_NL80211_BANDS]; uint8_t *common_ies; uint8_t *ies[NUM_NL80211_BANDS]; }; struct ieee80211_scan_request { struct ieee80211_scan_ies ies; struct cfg80211_scan_request req; }; struct ieee80211_txq { struct ieee80211_sta *sta; struct ieee80211_vif *vif; int ac; uint8_t tid; /* Must stay last. */ uint8_t drv_priv[0] __aligned(CACHE_LINE_SIZE); }; struct ieee80211_sta_rates { /* XXX TODO */ /* XXX some _rcu thing */ struct { int idx; int flags; } rate[1]; /* XXX what is the real number? */ }; struct ieee80211_sta_txpwr { /* XXX TODO */ enum nl80211_tx_power_setting type; short power; }; -#define IEEE80211_NUM_TIDS 16 /* net80211::WME_NUM_TID */ -struct ieee80211_sta { - /* TODO FIXME */ - int max_amsdu_len, max_amsdu_subframes, max_rc_amsdu_len, max_sp; - int mfp, rx_nss, smps_mode, tdls, tdls_initiator, uapsd_queues, wme; - enum ieee80211_sta_rx_bw bandwidth; +struct ieee80211_link_sta { + uint32_t supp_rates[NUM_NL80211_BANDS]; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; struct ieee80211_sta_he_cap he_cap; struct ieee80211_sta_he_6ghz_capa he_6ghz_capa; + uint8_t rx_nss; + enum ieee80211_sta_rx_bw bandwidth; + struct ieee80211_sta_txpwr txpwr; +}; + +#define IEEE80211_NUM_TIDS 16 /* net80211::WME_NUM_TID */ +struct ieee80211_sta { + /* TODO FIXME */ + int max_amsdu_len, max_amsdu_subframes, max_rc_amsdu_len, max_sp; + int mfp, smps_mode, tdls, tdls_initiator, uapsd_queues, wme; struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1]; /* iwlwifi: 8 and adds +1 to tid_data, net80211::IEEE80211_TID_SIZE */ struct ieee80211_sta_rates *rates; /* some rcu thing? */ - struct ieee80211_sta_txpwr txpwr; uint32_t max_tid_amsdu_len[IEEE80211_NUM_TIDS]; - uint32_t supp_rates[NUM_NL80211_BANDS]; uint8_t addr[ETH_ALEN]; uint16_t aid; + struct ieee80211_link_sta deflink; + /* Must stay last. */ uint8_t drv_priv[0] __aligned(CACHE_LINE_SIZE); }; struct ieee80211_tdls_ch_sw_params { /* TODO FIXME */ int action_code, ch_sw_tm_ie, status, switch_time, switch_timeout, timestamp; struct ieee80211_sta *sta; struct cfg80211_chan_def *chandef; struct sk_buff *tmpl_skb; }; struct ieee80211_tx_control { /* TODO FIXME */ struct ieee80211_sta *sta; }; struct ieee80211_tx_queue_params { /* These types are based on iwlwifi FW structs. */ uint16_t cw_min; uint16_t cw_max; uint16_t txop; uint8_t aifs; /* TODO FIXME */ int acm, mu_edca, uapsd; struct ieee80211_he_mu_edca_param_ac_rec mu_edca_param_rec; }; struct ieee80211_tx_rate { uint8_t idx; uint16_t count:5, flags:11; }; enum ieee80211_vif_driver_flags { IEEE80211_VIF_BEACON_FILTER = BIT(0), IEEE80211_VIF_SUPPORTS_CQM_RSSI = BIT(1), IEEE80211_VIF_SUPPORTS_UAPSD = BIT(2), }; struct ieee80211_vif { /* TODO FIXME */ enum nl80211_iftype type; int csa_active, mu_mimo_owner; int cab_queue; int color_change_active, offload_flags; enum ieee80211_vif_driver_flags driver_flags; bool p2p; bool probe_req_reg; uint8_t addr[ETH_ALEN]; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_txq *txq; struct ieee80211_bss_conf bss_conf; uint8_t hw_queue[IEEE80211_NUM_ACS]; /* Must stay last. */ uint8_t drv_priv[0] __aligned(CACHE_LINE_SIZE); }; struct ieee80211_vif_chanctx_switch { struct ieee80211_chanctx_conf *old_ctx, *new_ctx; struct ieee80211_vif *vif; }; struct ieee80211_prep_tx_info { u16 duration; bool success; }; /* XXX-BZ too big, over-reduce size to u8, and array sizes to minuimum to fit in skb->cb. */ /* Also warning: some sizes change by pointer size! This is 64bit only. */ struct ieee80211_tx_info { enum ieee80211_tx_info_flags flags; /* TODO FIXME */ u8 band; u8 hw_queue; bool tx_time_est; union { struct { struct ieee80211_tx_rate rates[4]; bool use_rts; struct ieee80211_vif *vif; struct ieee80211_key_conf *hw_key; enum ieee80211_tx_control_flags flags; } control; struct { struct ieee80211_tx_rate rates[4]; uint32_t ack_signal; uint8_t ampdu_ack_len; uint8_t ampdu_len; uint8_t antenna; uint16_t tx_time; bool is_valid_ack_signal; void *status_driver_data[16 / sizeof(void *)]; /* XXX TODO */ } status; #define IEEE80211_TX_INFO_DRIVER_DATA_SIZE (5 * sizeof(void *)) /* XXX TODO 5? */ void *driver_data[IEEE80211_TX_INFO_DRIVER_DATA_SIZE / sizeof(void *)]; }; }; /* net80211 conflict */ struct linuxkpi_ieee80211_tim_ie { uint8_t dtim_count; uint8_t dtim_period; uint8_t bitmap_ctrl; uint8_t *virtual_map; }; #define ieee80211_tim_ie linuxkpi_ieee80211_tim_ie struct survey_info { /* net80211::struct ieee80211_channel_survey */ /* TODO FIXME */ uint32_t filled; #define SURVEY_INFO_TIME 0x0001 #define SURVEY_INFO_TIME_RX 0x0002 #define SURVEY_INFO_TIME_SCAN 0x0004 #define SURVEY_INFO_TIME_TX 0x0008 #define SURVEY_INFO_TIME_BSS_RX 0x0010 #define SURVEY_INFO_TIME_BUSY 0x0020 #define SURVEY_INFO_IN_USE 0x0040 #define SURVEY_INFO_NOISE_DBM 0x0080 uint32_t noise; uint64_t time; uint64_t time_bss_rx; uint64_t time_busy; uint64_t time_rx; uint64_t time_scan; uint64_t time_tx; struct ieee80211_channel *channel; }; enum ieee80211_iface_iter { IEEE80211_IFACE_ITER_NORMAL = BIT(0), IEEE80211_IFACE_ITER_RESUME_ALL = BIT(1), IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER = BIT(2), /* seems to be an iter flag */ /* Internal flags only. */ /* ieee80211_iterate_active_interfaces*(). */ IEEE80211_IFACE_ITER__ATOMIC = BIT(6), IEEE80211_IFACE_ITER__ACTIVE = BIT(7), }; enum set_key_cmd { SET_KEY, DISABLE_KEY, }; enum rx_enc_flags { RX_ENC_FLAG_SHORTPRE = BIT(0), RX_ENC_FLAG_SHORT_GI = BIT(1), RX_ENC_FLAG_HT_GF = BIT(2), RX_ENC_FLAG_LDPC = BIT(3), RX_ENC_FLAG_BF = BIT(4), #define RX_ENC_FLAG_STBC_SHIFT 6 }; enum sta_notify_cmd { STA_NOTIFY_AWAKE, STA_NOTIFY_SLEEP, }; struct ieee80211_ops { /* TODO FIXME */ int (*start)(struct ieee80211_hw *); void (*stop)(struct ieee80211_hw *); int (*config)(struct ieee80211_hw *, u32); void (*reconfig_complete)(struct ieee80211_hw *, enum ieee80211_reconfig_type); int (*add_interface)(struct ieee80211_hw *, struct ieee80211_vif *); void (*remove_interface)(struct ieee80211_hw *, struct ieee80211_vif *); int (*change_interface)(struct ieee80211_hw *, struct ieee80211_vif *, enum nl80211_iftype, bool); void (*sw_scan_start)(struct ieee80211_hw *, struct ieee80211_vif *, const u8 *); void (*sw_scan_complete)(struct ieee80211_hw *, struct ieee80211_vif *); int (*sched_scan_start)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_sched_scan_request *, struct ieee80211_scan_ies *); int (*sched_scan_stop)(struct ieee80211_hw *, struct ieee80211_vif *); int (*hw_scan)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_scan_request *); void (*cancel_hw_scan)(struct ieee80211_hw *, struct ieee80211_vif *); int (*conf_tx)(struct ieee80211_hw *, struct ieee80211_vif *, u16, const struct ieee80211_tx_queue_params *); void (*tx)(struct ieee80211_hw *, struct ieee80211_tx_control *, struct sk_buff *); int (*tx_last_beacon)(struct ieee80211_hw *); void (*wake_tx_queue)(struct ieee80211_hw *, struct ieee80211_txq *); void (*mgd_prepare_tx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_prep_tx_info *); void (*mgd_complete_tx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_prep_tx_info *); void (*mgd_protect_tdls_discover)(struct ieee80211_hw *, struct ieee80211_vif *); void (*flush)(struct ieee80211_hw *, struct ieee80211_vif *, u32, bool); int (*set_frag_threshold)(struct ieee80211_hw *, u32); void (*sync_rx_queues)(struct ieee80211_hw *); void (*allow_buffered_frames)(struct ieee80211_hw *, struct ieee80211_sta *, u16, int, enum ieee80211_frame_release_type, bool); void (*release_buffered_frames)(struct ieee80211_hw *, struct ieee80211_sta *, u16, int, enum ieee80211_frame_release_type, bool); int (*sta_add)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); int (*sta_remove)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); int (*sta_set_txpwr)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); void (*sta_statistics)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct station_info *); void (*sta_pre_rcu_remove)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); int (*sta_state)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, enum ieee80211_sta_state, enum ieee80211_sta_state); void (*sta_notify)(struct ieee80211_hw *, struct ieee80211_vif *, enum sta_notify_cmd, struct ieee80211_sta *); void (*sta_rc_update)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, u32); void (*sta_rate_tbl_update)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); void (*sta_set_4addr)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, bool); u64 (*prepare_multicast)(struct ieee80211_hw *, struct netdev_hw_addr_list *); int (*ampdu_action)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_ampdu_params *); bool (*can_aggregate_in_amsdu)(struct ieee80211_hw *, struct sk_buff *, struct sk_buff *); int (*pre_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel_switch *); int (*post_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *); void (*channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel_switch *); void (*abort_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *); void (*channel_switch_rx_beacon)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel_switch *); int (*tdls_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, u8, struct cfg80211_chan_def *, struct sk_buff *, u32); void (*tdls_cancel_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *); void (*tdls_recv_channel_switch)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_tdls_ch_sw_params *); int (*add_chanctx)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *); void (*remove_chanctx)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *); void (*change_chanctx)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *, u32); int (*assign_vif_chanctx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_chanctx_conf *); void (*unassign_vif_chanctx)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_chanctx_conf *); int (*switch_vif_chanctx)(struct ieee80211_hw *, struct ieee80211_vif_chanctx_switch *, int, enum ieee80211_chanctx_switch_mode); int (*get_antenna)(struct ieee80211_hw *, u32 *, u32 *); int (*set_antenna)(struct ieee80211_hw *, u32, u32); int (*remain_on_channel)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_channel *, int, enum ieee80211_roc_type); int (*cancel_remain_on_channel)(struct ieee80211_hw *, struct ieee80211_vif *); void (*configure_filter)(struct ieee80211_hw *, unsigned int, unsigned int *, u64); void (*config_iface_filter)(struct ieee80211_hw *, struct ieee80211_vif *, unsigned int, unsigned int); void (*bss_info_changed)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_bss_conf *, u32); int (*set_rts_threshold)(struct ieee80211_hw *, u32); void (*event_callback)(struct ieee80211_hw *, struct ieee80211_vif *, const struct ieee80211_event *); int (*get_survey)(struct ieee80211_hw *, int, struct survey_info *); int (*get_ftm_responder_stats)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_ftm_responder_stats *); void (*offset_tsf)(struct ieee80211_hw *, struct ieee80211_vif *, s64); int (*set_bitrate_mask)(struct ieee80211_hw *, struct ieee80211_vif *, const struct cfg80211_bitrate_mask *); void (*set_coverage_class)(struct ieee80211_hw *, s16); int (*set_tim)(struct ieee80211_hw *, struct ieee80211_sta *, bool); int (*set_key)(struct ieee80211_hw *, enum set_key_cmd, struct ieee80211_vif *, struct ieee80211_sta *, struct ieee80211_key_conf *); void (*set_default_unicast_key)(struct ieee80211_hw *, struct ieee80211_vif *, int); void (*update_tkip_key)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_key_conf *, struct ieee80211_sta *, u32, u16 *); int (*start_pmsr)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_pmsr_request *); void (*abort_pmsr)(struct ieee80211_hw *, struct ieee80211_vif *, struct cfg80211_pmsr_request *); int (*start_ap)(struct ieee80211_hw *, struct ieee80211_vif *); void (*stop_ap)(struct ieee80211_hw *, struct ieee80211_vif *); int (*join_ibss)(struct ieee80211_hw *, struct ieee80211_vif *); void (*leave_ibss)(struct ieee80211_hw *, struct ieee80211_vif *); int (*set_sar_specs)(struct ieee80211_hw *, const struct cfg80211_sar_specs *); int (*set_tid_config)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct cfg80211_tid_config *); int (*reset_tid_config)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, u8); int (*get_et_sset_count)(struct ieee80211_hw *, struct ieee80211_vif *, int); void (*get_et_stats)(struct ieee80211_hw *, struct ieee80211_vif *, struct ethtool_stats *, u64 *); void (*get_et_strings)(struct ieee80211_hw *, struct ieee80211_vif *, u32, u8 *); void (*update_vif_offload)(struct ieee80211_hw *, struct ieee80211_vif *); }; /* -------------------------------------------------------------------------- */ /* linux_80211.c */ extern const struct cfg80211_ops linuxkpi_mac80211cfgops; struct ieee80211_hw *linuxkpi_ieee80211_alloc_hw(size_t, const struct ieee80211_ops *); void linuxkpi_ieee80211_iffree(struct ieee80211_hw *); void linuxkpi_set_ieee80211_dev(struct ieee80211_hw *, char *); int linuxkpi_ieee80211_ifattach(struct ieee80211_hw *); void linuxkpi_ieee80211_ifdetach(struct ieee80211_hw *); struct ieee80211_hw * linuxkpi_wiphy_to_ieee80211_hw(struct wiphy *); void linuxkpi_ieee80211_iterate_interfaces( struct ieee80211_hw *hw, enum ieee80211_iface_iter flags, void(*iterfunc)(void *, uint8_t *, struct ieee80211_vif *), void *); void linuxkpi_ieee80211_iterate_keys(struct ieee80211_hw *, struct ieee80211_vif *, void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct ieee80211_key_conf *, void *), void *); void linuxkpi_ieee80211_iterate_chan_contexts(struct ieee80211_hw *, void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *, void *), void *); void linuxkpi_ieee80211_iterate_stations_atomic(struct ieee80211_hw *, void (*iterfunc)(void *, struct ieee80211_sta *), void *); void linuxkpi_ieee80211_scan_completed(struct ieee80211_hw *, struct cfg80211_scan_info *); void linuxkpi_ieee80211_rx(struct ieee80211_hw *, struct sk_buff *, struct ieee80211_sta *, struct napi_struct *); uint8_t linuxkpi_ieee80211_get_tid(struct ieee80211_hdr *); struct ieee80211_sta *linuxkpi_ieee80211_find_sta(struct ieee80211_vif *, const u8 *); struct ieee80211_sta *linuxkpi_ieee80211_find_sta_by_ifaddr( struct ieee80211_hw *, const uint8_t *, const uint8_t *); struct sk_buff *linuxkpi_ieee80211_tx_dequeue(struct ieee80211_hw *, struct ieee80211_txq *); bool linuxkpi_ieee80211_is_ie_id_in_ie_buf(const u8, const u8 *, size_t); bool linuxkpi_ieee80211_ie_advance(size_t *, const u8 *, size_t); void linuxkpi_ieee80211_free_txskb(struct ieee80211_hw *, struct sk_buff *, int); void linuxkpi_ieee80211_queue_delayed_work(struct ieee80211_hw *, struct delayed_work *, int); void linuxkpi_ieee80211_queue_work(struct ieee80211_hw *, struct work_struct *); struct sk_buff *linuxkpi_ieee80211_pspoll_get(struct ieee80211_hw *, struct ieee80211_vif *); struct sk_buff *linuxkpi_ieee80211_nullfunc_get(struct ieee80211_hw *, struct ieee80211_vif *, bool); void linuxkpi_ieee80211_txq_get_depth(struct ieee80211_txq *, unsigned long *, unsigned long *); struct wireless_dev *linuxkpi_ieee80211_vif_to_wdev(struct ieee80211_vif *); void linuxkpi_ieee80211_connection_loss(struct ieee80211_vif *); void linuxkpi_ieee80211_beacon_loss(struct ieee80211_vif *); struct sk_buff *linuxkpi_ieee80211_probereq_get(struct ieee80211_hw *, uint8_t *, uint8_t *, size_t, size_t); void linuxkpi_ieee80211_tx_status(struct ieee80211_hw *, struct sk_buff *); /* -------------------------------------------------------------------------- */ static __inline void _ieee80211_hw_set(struct ieee80211_hw *hw, enum ieee80211_hw_flags flag) { set_bit(flag, hw->flags); } static __inline bool __ieee80211_hw_check(struct ieee80211_hw *hw, enum ieee80211_hw_flags flag) { return (test_bit(flag, hw->flags)); } /* They pass in shortened flag names; how confusingly inconsistent. */ #define ieee80211_hw_set(_hw, _flag) \ _ieee80211_hw_set(_hw, IEEE80211_HW_ ## _flag) #define ieee80211_hw_check(_hw, _flag) \ __ieee80211_hw_check(_hw, IEEE80211_HW_ ## _flag) /* XXX-BZ add CTASSERTS that size of struct is <= sizeof skb->cb. */ CTASSERT(sizeof(struct ieee80211_tx_info) <= sizeof(((struct sk_buff *)0)->cb)); #define IEEE80211_SKB_CB(_skb) \ ((struct ieee80211_tx_info *)((_skb)->cb)) CTASSERT(sizeof(struct ieee80211_rx_status) <= sizeof(((struct sk_buff *)0)->cb)); #define IEEE80211_SKB_RXCB(_skb) \ ((struct ieee80211_rx_status *)((_skb)->cb)) static __inline void ieee80211_free_hw(struct ieee80211_hw *hw) { linuxkpi_ieee80211_iffree(hw); if (hw->wiphy != NULL) wiphy_free(hw->wiphy); /* Note that *hw is not valid any longer after this. */ IMPROVE(); } static __inline struct ieee80211_hw * ieee80211_alloc_hw(size_t priv_len, const struct ieee80211_ops *ops) { return (linuxkpi_ieee80211_alloc_hw(priv_len, ops)); } static __inline void SET_IEEE80211_DEV(struct ieee80211_hw *hw, struct device *dev) { set_wiphy_dev(hw->wiphy, dev); linuxkpi_set_ieee80211_dev(hw, dev_name(dev)); IMPROVE(); } static __inline int ieee80211_register_hw(struct ieee80211_hw *hw) { int error; error = wiphy_register(hw->wiphy); if (error != 0) return (error); /* * At this point the driver has set all the options, flags, bands, * ciphers, hw address(es), ... basically mac80211/cfg80211 hw/wiphy * setup is done. * We need to replicate a lot of information from here into net80211. */ error = linuxkpi_ieee80211_ifattach(hw); IMPROVE(); return (error); } static __inline void ieee80211_unregister_hw(struct ieee80211_hw *hw) { wiphy_unregister(hw->wiphy); linuxkpi_ieee80211_ifdetach(hw); IMPROVE(); } static __inline struct ieee80211_hw * wiphy_to_ieee80211_hw(struct wiphy *wiphy) { return (linuxkpi_wiphy_to_ieee80211_hw(wiphy)); } /* -------------------------------------------------------------------------- */ static __inline bool ieee80211_is_action(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_ACTION | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_probe_resp(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_PROBE_RESP | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_auth(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_AUTH | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_assoc_req(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_ASSOC_REQ | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_assoc_resp(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_ASSOC_RESP | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_reassoc_req(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_REASSOC_REQ | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_reassoc_resp(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_REASSOC_RESP | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_disassoc(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_DISASSOC | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_data_present(__le16 fc) { __le16 v; /* If it is a data frame and NODATA is not present. */ fc &= htole16(IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_NODATA); v = htole16(IEEE80211_FC0_TYPE_DATA); return (fc == v); } static __inline bool ieee80211_is_deauth(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_DEAUTH | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_beacon(__le16 fc) { __le16 v; /* * For as much as I get it this comes in LE and unlike FreeBSD * where we get the entire frame header and u8[], here we get the * 9.2.4.1 Frame Control field only. Mask and compare. */ fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_BEACON | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_is_probe_req(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_PROBE_REQ | IEEE80211_FC0_TYPE_MGT); return (fc == v); } static __inline bool ieee80211_has_protected(__le16 fc) { return (fc & htole16(IEEE80211_FC1_PROTECTED << 8)); } static __inline bool ieee80211_is_back_req(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_BAR | IEEE80211_FC0_TYPE_CTL); return (fc == v); } static __inline bool ieee80211_is_bufferable_mmpdu(__le16 fc) { /* 11.2.2 Bufferable MMPDUs, 80211-2020. */ /* XXX we do not care about IBSS yet. */ if (!ieee80211_is_mgmt(fc)) return (false); if (ieee80211_is_action(fc)) /* XXX FTM? */ return (true); if (ieee80211_is_disassoc(fc)) return (true); if (ieee80211_is_deauth(fc)) return (true); return (false); } static __inline bool ieee80211_is_nullfunc(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_NODATA | IEEE80211_FC0_TYPE_DATA); return (fc == v); } static __inline bool ieee80211_is_qos_nullfunc(__le16 fc) { __le16 v; fc &= htole16(IEEE80211_FC0_SUBTYPE_MASK | IEEE80211_FC0_TYPE_MASK); v = htole16(IEEE80211_FC0_SUBTYPE_QOS_NULL | IEEE80211_FC0_TYPE_DATA); return (fc == v); } static __inline bool ieee80211_is_any_nullfunc(__le16 fc) { return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)); } static __inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) { TODO(); return (false); } static __inline bool ieee80211_has_a4(__le16 fc) { __le16 v; fc &= htole16((IEEE80211_FC1_DIR_TODS | IEEE80211_FC1_DIR_FROMDS) << 8); v = htole16((IEEE80211_FC1_DIR_TODS | IEEE80211_FC1_DIR_FROMDS) << 8); return (fc == v); } static __inline bool ieee80211_has_order(__le16 fc) { return (fc & htole16(IEEE80211_FC1_ORDER << 8)); } static __inline bool ieee80211_has_retry(__le16 fc) { return (fc & htole16(IEEE80211_FC1_RETRY << 8)); } static __inline bool ieee80211_has_fromds(__le16 fc) { return (fc & htole16(IEEE80211_FC1_DIR_FROMDS << 8)); } static __inline bool ieee80211_has_tods(__le16 fc) { return (fc & htole16(IEEE80211_FC1_DIR_TODS << 8)); } static __inline uint8_t * ieee80211_get_SA(struct ieee80211_hdr *hdr) { if (ieee80211_has_a4(hdr->frame_control)) return (hdr->addr4); if (ieee80211_has_fromds(hdr->frame_control)) return (hdr->addr3); return (hdr->addr2); } static __inline uint8_t * ieee80211_get_DA(struct ieee80211_hdr *hdr) { if (ieee80211_has_tods(hdr->frame_control)) return (hdr->addr3); return (hdr->addr1); } static __inline bool ieee80211_has_morefrags(__le16 fc) { fc &= htole16(IEEE80211_FC1_MORE_FRAG << 8); return (fc != 0); } static __inline u8 * ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr) { if (ieee80211_has_a4(hdr->frame_control)) return (u8 *)hdr + 30; else return (u8 *)hdr + 24; } /* -------------------------------------------------------------------------- */ /* Receive functions (air/driver to mac80211/net80211). */ static __inline void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct sk_buff *skb, struct napi_struct *napi) { linuxkpi_ieee80211_rx(hw, skb, sta, napi); } static __inline void ieee80211_rx_ni(struct ieee80211_hw *hw, struct sk_buff *skb) { linuxkpi_ieee80211_rx(hw, skb, NULL, NULL); } static __inline void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) { linuxkpi_ieee80211_rx(hw, skb, NULL, NULL); } /* -------------------------------------------------------------------------- */ static __inline uint8_t ieee80211_get_tid(struct ieee80211_hdr *hdr) { return (linuxkpi_ieee80211_get_tid(hdr)); } static __inline struct sk_buff * ieee80211_beacon_get_tim(struct ieee80211_hw *hw, struct ieee80211_vif *vif, uint16_t *tim_offset, uint16_t *tim_len) { if (tim_offset != NULL) *tim_offset = 0; if (tim_len != NULL) *tim_len = 0; TODO(); return (NULL); } static __inline void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw, enum ieee80211_iface_iter flags, void(*iterfunc)(void *, uint8_t *, struct ieee80211_vif *), void *arg) { flags |= IEEE80211_IFACE_ITER__ATOMIC; flags |= IEEE80211_IFACE_ITER__ACTIVE; linuxkpi_ieee80211_iterate_interfaces(hw, flags, iterfunc, arg); } static __inline void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, enum ieee80211_iface_iter flags, void(*iterfunc)(void *, uint8_t *, struct ieee80211_vif *), void *arg) { flags |= IEEE80211_IFACE_ITER__ACTIVE; linuxkpi_ieee80211_iterate_interfaces(hw, flags, iterfunc, arg); } static __inline void ieee80211_iterate_interfaces(struct ieee80211_hw *hw, enum ieee80211_iface_iter flags, void (*iterfunc)(void *, uint8_t *, struct ieee80211_vif *), void *arg) { linuxkpi_ieee80211_iterate_interfaces(hw, flags, iterfunc, arg); } static __inline void ieee80211_iter_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct ieee80211_key_conf *, void *), void *arg) { linuxkpi_ieee80211_iterate_keys(hw, vif, iterfunc, arg); } static __inline void ieee80211_iter_keys_rcu(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_vif *, struct ieee80211_sta *, struct ieee80211_key_conf *, void *), void *arg) { IMPROVE(); /* "rcu" */ linuxkpi_ieee80211_iterate_keys(hw, vif, iterfunc, arg); } static __inline void ieee80211_iter_chan_contexts_atomic(struct ieee80211_hw *hw, void(*iterfunc)(struct ieee80211_hw *, struct ieee80211_chanctx_conf *, void *), void *arg) { linuxkpi_ieee80211_iterate_chan_contexts(hw, iterfunc, arg); } static __inline void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw, void (*iterfunc)(void *, struct ieee80211_sta *), void *arg) { linuxkpi_ieee80211_iterate_stations_atomic(hw, iterfunc, arg); } static __inline struct wireless_dev * ieee80211_vif_to_wdev(struct ieee80211_vif *vif) { return (linuxkpi_ieee80211_vif_to_wdev(vif)); } static __inline struct sk_buff * ieee80211_beacon_get_template(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_mutable_offsets *offs) { TODO(); return (NULL); } static __inline void ieee80211_beacon_loss(struct ieee80211_vif *vif) { linuxkpi_ieee80211_beacon_loss(vif); } static __inline void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool t) { TODO(); } static __inline bool ieee80211_csa_is_complete(struct ieee80211_vif *vif) { TODO(); return (false); } static __inline void ieee80211_csa_set_counter(struct ieee80211_vif *vif, uint8_t counter) { TODO(); } static __inline int ieee80211_csa_update_counter(struct ieee80211_vif *vif) { TODO(); return (-1); } static __inline void ieee80211_csa_finish(struct ieee80211_vif *vif) { TODO(); } static __inline enum nl80211_iftype ieee80211_vif_type_p2p(struct ieee80211_vif *vif) { /* If we are not p2p enabled, just return the type. */ if (!vif->p2p) return (vif->type); /* If we are p2p, depending on side, return type. */ switch (vif->type) { case NL80211_IFTYPE_AP: return (NL80211_IFTYPE_P2P_GO); case NL80211_IFTYPE_STATION: return (NL80211_IFTYPE_P2P_CLIENT); default: fallthrough; } return (vif->type); } static __inline unsigned long ieee80211_tu_to_usec(unsigned long tu) { return (tu * IEEE80211_DUR_TU); } static __inline int ieee80211_action_contains_tpc(struct sk_buff *skb) { TODO(); return (0); } static __inline void ieee80211_connection_loss(struct ieee80211_vif *vif) { linuxkpi_ieee80211_connection_loss(vif); } static __inline struct ieee80211_sta * ieee80211_find_sta(struct ieee80211_vif *vif, const u8 *peer) { return (linuxkpi_ieee80211_find_sta(vif, peer)); } static __inline struct ieee80211_sta * ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, const uint8_t *addr, const uint8_t *ourvifaddr) { return (linuxkpi_ieee80211_find_sta_by_ifaddr(hw, addr, ourvifaddr)); } static __inline void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf, struct sk_buff *skb_frag, u8 *key) { TODO(); } static __inline void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf, const u8 *addr, uint32_t iv32, u16 *p1k) { TODO(); } static __inline size_t ieee80211_ie_split(const u8 *ies, size_t ies_len, const u8 *ie_ids, size_t ie_ids_len, size_t start) { size_t x; x = start; /* XXX FIXME, we need to deal with "Element ID Extension" */ while (x < ies_len) { /* Is this IE[s] one of the ie_ids? */ if (!linuxkpi_ieee80211_is_ie_id_in_ie_buf(ies[x], ie_ids, ie_ids_len)) break; if (!linuxkpi_ieee80211_ie_advance(&x, ies, ies_len)) break; } return (x); } static __inline void ieee80211_request_smps(struct ieee80211_vif *vif, enum ieee80211_smps_mode smps) { TODO(); } static __inline void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, uint8_t *addr, enum nl80211_tdls_operation oper, enum ieee80211_reason_code code, gfp_t gfp) { TODO(); } static __inline void ieee80211_stop_queues(struct ieee80211_hw *hw) { TODO(); } static __inline void ieee80211_wake_queues(struct ieee80211_hw *hw) { TODO(); } static __inline void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool state) { TODO(); } static __inline void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb) { IMPROVE(); /* * This is called on transmit failure. * Use a not-so-random random high status error so we can distinguish * it from normal low values flying around in net80211 ("ETX"). */ linuxkpi_ieee80211_free_txskb(hw, skb, 0x455458); } static __inline void ieee80211_restart_hw(struct ieee80211_hw *hw) { TODO(); } static __inline void ieee80211_ready_on_channel(struct ieee80211_hw *hw) { TODO(); /* XXX-BZ We need to see that. */ } static __inline void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw) { TODO(); } static __inline void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, enum nl80211_cqm_rssi_threshold_event crte, int sig, gfp_t gfp) { TODO(); } static __inline void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *sta, uint8_t tid, uint32_t ssn, uint64_t bitmap, uint16_t received_mpdu) { TODO(); } static __inline bool ieee80211_sn_less(uint16_t sn1, uint16_t sn2) { TODO(); return (false); } static __inline uint16_t ieee80211_sn_inc(uint16_t sn) { TODO(); return (sn + 1); } static __inline uint16_t ieee80211_sn_add(uint16_t sn, uint16_t a) { TODO(); return (sn + a); } static __inline void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, uint32_t x, uint8_t *addr) { TODO(); } static __inline void ieee80211_rate_set_vht(struct ieee80211_tx_rate *r, uint32_t f1, uint32_t f2) { TODO(); } static __inline void ieee80211_reserve_tid(struct ieee80211_sta *sta, uint8_t tid) { TODO(); } static __inline void ieee80211_unreserve_tid(struct ieee80211_sta *sta, uint8_t tid) { TODO(); } static __inline void ieee80211_rx_ba_timer_expired(struct ieee80211_vif *vif, uint8_t *addr, uint8_t tid) { TODO(); } static __inline void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *sta, uint8_t tid) { TODO(); } static __inline uint16_t ieee80211_sn_sub(uint16_t sa, uint16_t sb) { return ((sa - sb) & (IEEE80211_SEQ_SEQ_MASK >> IEEE80211_SEQ_SEQ_SHIFT)); } static __inline void ieee80211_sta_block_awake(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool disable) { TODO(); } static __inline void ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool sleeping) { TODO(); } static __inline void ieee80211_sta_pspoll(struct ieee80211_sta *sta) { TODO(); } static __inline void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, int ntids) { TODO(); } static __inline void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, uint8_t *addr, uint8_t tid) { TODO(); } static __inline void ieee80211_tkip_add_iv(u8 *crypto_hdr, struct ieee80211_key_conf *keyconf, uint64_t pn) { TODO(); } static __inline struct sk_buff * ieee80211_tx_dequeue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { return (linuxkpi_ieee80211_tx_dequeue(hw, txq)); } static __inline void ieee80211_update_mu_groups(struct ieee80211_vif *vif, uint8_t *ms, uint8_t *up) { TODO(); } static __inline void ieee80211_sta_set_buffered(struct ieee80211_sta *sta, uint8_t tid, bool t) { TODO(); } static __inline void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) { linuxkpi_ieee80211_tx_status(hw, skb); } static __inline void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, uint8_t tid, struct ieee80211_key_seq *seq) { TODO(); } static __inline void ieee80211_sched_scan_results(struct ieee80211_hw *hw) { TODO(); } static __inline void ieee80211_sta_eosp(struct ieee80211_sta *sta) { TODO(); } static __inline void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, uint8_t *addr, uint8_t tid) { TODO(); } static __inline void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw) { TODO(); } static __inline void ieee80211_scan_completed(struct ieee80211_hw *hw, struct cfg80211_scan_info *info) { linuxkpi_ieee80211_scan_completed(hw, info); } static __inline struct sk_buff * ieee80211_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { TODO(); return (NULL); } static __inline struct sk_buff * ieee80211_pspoll_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { /* Only STA needs this. Otherwise return NULL and panic bad drivers. */ if (vif->type != NL80211_IFTYPE_STATION) return (NULL); return (linuxkpi_ieee80211_pspoll_get(hw, vif)); } static __inline struct sk_buff * ieee80211_proberesp_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { TODO(); return (NULL); } static __inline struct sk_buff * ieee80211_nullfunc_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif, bool qos) { /* Only STA needs this. Otherwise return NULL and panic bad drivers. */ if (vif->type != NL80211_IFTYPE_STATION) return (NULL); return (linuxkpi_ieee80211_nullfunc_get(hw, vif, qos)); } static __inline struct sk_buff * ieee80211_probereq_get(struct ieee80211_hw *hw, uint8_t *addr, uint8_t *ssid, size_t ssid_len, size_t tailroom) { return (linuxkpi_ieee80211_probereq_get(hw, addr, ssid, ssid_len, tailroom)); } static __inline void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, struct delayed_work *w, int delay) { linuxkpi_ieee80211_queue_delayed_work(hw, w, delay); } static __inline void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *w) { linuxkpi_ieee80211_queue_work(hw, w); } static __inline void ieee80211_stop_queue(struct ieee80211_hw *hw, uint16_t q) { TODO(); } static __inline void ieee80211_wake_queue(struct ieee80211_hw *hw, uint16_t q) { TODO(); } static __inline void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) { IMPROVE(); ieee80211_tx_status(hw, skb); } static __inline void ieee80211_tx_status_ni(struct ieee80211_hw *hw, struct sk_buff *skb) { IMPROVE(); ieee80211_tx_status(hw, skb); } static __inline int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, uint8_t tid, int x) { TODO(); return (ENXIO); } static __inline void ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) { int i; /* * Apparently clearing flags and some other fields is not right. * Given the function is called "status" we work on that part of * the union. */ for (i = 0; i < nitems(info->status.rates); i++) info->status.rates[i].count = 0; /* * Unclear if ack_signal should be included or not but we clear the * "valid" bool so this field is no longer valid. */ memset(&info->status.ack_signal, 0, sizeof(*info) - offsetof(struct ieee80211_tx_info, status.ack_signal)); } static __inline void ieee80211_txq_get_depth(struct ieee80211_txq *txq, unsigned long *frame_cnt, unsigned long *byte_cnt) { if (frame_cnt == NULL && byte_cnt == NULL) return; linuxkpi_ieee80211_txq_get_depth(txq, frame_cnt, byte_cnt); } static __inline int rate_lowest_index(struct ieee80211_supported_band *band, struct ieee80211_sta *sta) { IMPROVE(); return (0); } static __inline void SET_IEEE80211_PERM_ADDR (struct ieee80211_hw *hw, uint8_t *addr) { ether_addr_copy(hw->wiphy->perm_addr, addr); } static __inline uint8_t * ieee80211_bss_get_ie(struct cfg80211_bss *bss, uint32_t x) { TODO(); return (NULL); } static __inline void ieee80211_report_low_ack(struct ieee80211_sta *sta, int x) { TODO(); } static __inline void ieee80211_start_rx_ba_session_offl(struct ieee80211_vif *vif, uint8_t *addr, uint8_t tid) { TODO(); } static __inline void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif, uint8_t *addr, uint8_t tid) { TODO(); } static __inline struct sk_buff * ieee80211_tx_dequeue_ni(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { TODO(); return (NULL); } static __inline void ieee80211_tx_rate_update(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct ieee80211_tx_info *info) { TODO(); } static __inline bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { TODO(); return (false); } static __inline struct ieee80211_txq * ieee80211_next_txq(struct ieee80211_hw *hw, uint32_t ac) { TODO(); return (NULL); } static __inline void ieee80211_radar_detected(struct ieee80211_hw *hw) { TODO(); } static __inline void ieee80211_sta_register_airtime(struct ieee80211_sta *sta, uint8_t tid, uint32_t duration, int x) { TODO(); } static __inline void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, bool _t) { TODO(); } static __inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, uint32_t ac) { TODO(); } static __inline void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, uint32_t ac) { TODO(); } static __inline void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { TODO(); } static __inline void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter) { TODO(); } static __inline int ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif) { TODO(); return (-1); } static __inline int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *vht_cap, uint32_t chanwidth, int x, bool t, int nss) { TODO(); return (-1); } static __inline bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif) { TODO(); return (true); } static __inline void ieee80211_disconnect(struct ieee80211_vif *vif, bool _x) { TODO(); } static __inline void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool _x) { TODO(); } static __inline const struct ieee80211_sta_he_cap * ieee80211_get_he_iftype_cap(const struct ieee80211_supported_band *band, enum nl80211_iftype type) { TODO(); return (NULL); } static __inline void ieee80211_key_mic_failure(struct ieee80211_key_conf *key) { TODO(); } static __inline void ieee80211_key_replay(struct ieee80211_key_conf *key) { TODO(); } #endif /* _LINUXKPI_NET_MAC80211_H */ diff --git a/sys/contrib/dev/iwlwifi/mvm/d3.c b/sys/contrib/dev/iwlwifi/mvm/d3.c index a995bba0ba81..bcc4ed20fe5b 100644 --- a/sys/contrib/dev/iwlwifi/mvm/d3.c +++ b/sys/contrib/dev/iwlwifi/mvm/d3.c @@ -1,2797 +1,2797 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include #include #include #include #include "iwl-modparams.h" #include "fw-api.h" #include "mvm.h" #include "fw/img.h" void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mutex_lock(&mvm->mutex); mvmvif->rekey_data.kek_len = data->kek_len; mvmvif->rekey_data.kck_len = data->kck_len; memcpy(mvmvif->rekey_data.kek, data->kek, data->kek_len); memcpy(mvmvif->rekey_data.kck, data->kck, data->kck_len); mvmvif->rekey_data.akm = data->akm & 0xFF; mvmvif->rekey_data.replay_ctr = cpu_to_le64(be64_to_cpup((const __be64 *)data->replay_ctr)); mvmvif->rekey_data.valid = true; mutex_unlock(&mvm->mutex); } #if IS_ENABLED(CONFIG_IPV6) void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct inet6_dev *idev) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct inet6_ifaddr *ifa; int idx = 0; memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs)); read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { mvmvif->target_ipv6_addrs[idx] = ifa->addr; if (ifa->flags & IFA_F_TENTATIVE) __set_bit(idx, mvmvif->tentative_addrs); idx++; if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX) break; } read_unlock_bh(&idev->lock); mvmvif->num_target_ipv6_addrs = idx; } #endif void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->tx_key_idx = idx; } static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out) { int i; for (i = 0; i < IWL_P1K_SIZE; i++) out[i] = cpu_to_le16(p1k[i]); } static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key, struct iwl_mvm_key_pn *ptk_pn, struct ieee80211_key_seq *seq, int tid, int queues) { const u8 *ret = seq->ccmp.pn; int i; /* get the PN from mac80211, used on the default queue */ ieee80211_get_key_rx_seq(key, tid, seq); /* and use the internal data for the other queues */ for (i = 1; i < queues; i++) { const u8 *tmp = ptk_pn->q[i].pn[tid]; if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0) ret = tmp; } return ret; } struct wowlan_key_reprogram_data { bool error; int wep_key_idx; }; static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct wowlan_key_reprogram_data *data = _data; int ret; switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */ struct { struct iwl_mvm_wep_key_cmd wep_key_cmd; struct iwl_mvm_wep_key wep_key; } __packed wkc = { .wep_key_cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .wep_key_cmd.num_keys = 1, /* firmware sets STA_KEY_FLG_WEP_13BYTES */ .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP, .wep_key.key_index = key->keyidx, .wep_key.key_size = key->keylen, }; /* * This will fail -- the key functions don't set support * pairwise WEP keys. However, that's better than silently * failing WoWLAN. Or maybe not? */ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) break; memcpy(&wkc.wep_key.key[3], key->key, key->keylen); if (key->keyidx == mvmvif->tx_key_idx) { /* TX key must be at offset 0 */ wkc.wep_key.key_offset = 0; } else { /* others start at 1 */ data->wep_key_idx++; wkc.wep_key.key_offset = data->wep_key_idx; } mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc); data->error = ret != 0; mvm->ptk_ivlen = key->iv_len; mvm->ptk_icvlen = key->icv_len; mvm->gtk_ivlen = key->iv_len; mvm->gtk_icvlen = key->icv_len; mutex_unlock(&mvm->mutex); /* don't upload key again */ return; } default: data->error = true; return; case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: return; case WLAN_CIPHER_SUITE_AES_CMAC: /* * Ignore CMAC keys -- the WoWLAN firmware doesn't support them * but we also shouldn't abort suspend due to that. It does have * support for the IGTK key renewal, but doesn't really use the * IGTK for anything. This means we could spuriously wake up or * be deauthenticated, but that was considered acceptable. */ return; case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: break; } mutex_lock(&mvm->mutex); /* * The D3 firmware hardcodes the key offset 0 as the key it * uses to transmit packets to the AP, i.e. the PTK. */ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { mvm->ptk_ivlen = key->iv_len; mvm->ptk_icvlen = key->icv_len; ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); } else { /* * firmware only supports TSC/RSC for a single key, * so if there are multiple keep overwriting them * with new ones -- this relies on mac80211 doing * list_add_tail(). */ mvm->gtk_ivlen = key->iv_len; mvm->gtk_icvlen = key->icv_len; ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); } mutex_unlock(&mvm->mutex); data->error = ret != 0; } struct wowlan_key_rsc_tsc_data { struct iwl_wowlan_rsc_tsc_params_cmd_v4 *rsc_tsc; bool have_rsc_tsc; }; static void iwl_mvm_wowlan_get_rsc_tsc_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct wowlan_key_rsc_tsc_data *data = _data; struct aes_sc *aes_sc; struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; struct ieee80211_key_seq seq; int i; switch (key->cipher) { default: break; case WLAN_CIPHER_SUITE_TKIP: if (sta) { u64 pn64; tkip_sc = data->rsc_tsc->params.all_tsc_rsc.tkip.unicast_rsc; tkip_tx_sc = &data->rsc_tsc->params.all_tsc_rsc.tkip.tsc; pn64 = atomic64_read(&key->tx_pn); tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); } else { tkip_sc = data->rsc_tsc->params.all_tsc_rsc.tkip.multicast_rsc; } /* * For non-QoS this relies on the fact that both the uCode and * mac80211 use TID 0 (as they need to to avoid replay attacks) * for checking the IV in the frames. */ for (i = 0; i < IWL_NUM_RSC; i++) { ieee80211_get_key_rx_seq(key, i, &seq); tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); } data->have_rsc_tsc = true; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (sta) { struct aes_sc *aes_tx_sc; u64 pn64; aes_sc = data->rsc_tsc->params.all_tsc_rsc.aes.unicast_rsc; aes_tx_sc = &data->rsc_tsc->params.all_tsc_rsc.aes.tsc; pn64 = atomic64_read(&key->tx_pn); aes_tx_sc->pn = cpu_to_le64(pn64); } else { aes_sc = data->rsc_tsc->params.all_tsc_rsc.aes.multicast_rsc; } /* * For non-QoS this relies on the fact that both the uCode and * mac80211/our RX code use TID 0 for checking the PN. */ if (sta && iwl_mvm_has_new_rx_api(mvm)) { struct iwl_mvm_sta *mvmsta; struct iwl_mvm_key_pn *ptk_pn; const u8 *pn; mvmsta = iwl_mvm_sta_from_mac80211(sta); rcu_read_lock(); ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); if (WARN_ON(!ptk_pn)) { rcu_read_unlock(); break; } for (i = 0; i < IWL_MAX_TID_COUNT; i++) { pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, mvm->trans->num_rx_queues); aes_sc[i].pn = cpu_to_le64((u64)pn[5] | ((u64)pn[4] << 8) | ((u64)pn[3] << 16) | ((u64)pn[2] << 24) | ((u64)pn[1] << 32) | ((u64)pn[0] << 40)); } rcu_read_unlock(); } else { for (i = 0; i < IWL_NUM_RSC; i++) { u8 *pn = seq.ccmp.pn; ieee80211_get_key_rx_seq(key, i, &seq); aes_sc[i].pn = cpu_to_le64((u64)pn[5] | ((u64)pn[4] << 8) | ((u64)pn[3] << 16) | ((u64)pn[2] << 24) | ((u64)pn[1] << 32) | ((u64)pn[0] << 40)); } } data->have_rsc_tsc = true; break; } } struct wowlan_key_rsc_v5_data { struct iwl_wowlan_rsc_tsc_params_cmd *rsc; bool have_rsc; int gtks; int gtk_ids[4]; }; static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct wowlan_key_rsc_v5_data *data = _data; struct ieee80211_key_seq seq; __le64 *rsc; int i; /* only for ciphers that can be PTK/GTK */ switch (key->cipher) { default: return; case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: break; } if (sta) { rsc = data->rsc->ucast_rsc; } else { if (WARN_ON(data->gtks >= ARRAY_SIZE(data->gtk_ids))) return; data->gtk_ids[data->gtks] = key->keyidx; rsc = data->rsc->mcast_rsc[data->gtks % 2]; if (WARN_ON(key->keyidx >= ARRAY_SIZE(data->rsc->mcast_key_id_map))) return; data->rsc->mcast_key_id_map[key->keyidx] = data->gtks % 2; if (data->gtks >= 2) { int prev = data->gtks - 2; int prev_idx = data->gtk_ids[prev]; data->rsc->mcast_key_id_map[prev_idx] = IWL_MCAST_KEY_MAP_INVALID; } data->gtks++; } switch (key->cipher) { default: WARN_ON(1); break; case WLAN_CIPHER_SUITE_TKIP: /* * For non-QoS this relies on the fact that both the uCode and * mac80211 use TID 0 (as they need to to avoid replay attacks) * for checking the IV in the frames. */ for (i = 0; i < IWL_MAX_TID_COUNT; i++) { ieee80211_get_key_rx_seq(key, i, &seq); rsc[i] = cpu_to_le64(((u64)seq.tkip.iv32 << 16) | seq.tkip.iv16); } data->have_rsc = true; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: /* * For non-QoS this relies on the fact that both the uCode and * mac80211/our RX code use TID 0 for checking the PN. */ if (sta) { struct iwl_mvm_sta *mvmsta; struct iwl_mvm_key_pn *ptk_pn; const u8 *pn; mvmsta = iwl_mvm_sta_from_mac80211(sta); rcu_read_lock(); ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); if (WARN_ON(!ptk_pn)) { rcu_read_unlock(); break; } for (i = 0; i < IWL_MAX_TID_COUNT; i++) { pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, mvm->trans->num_rx_queues); rsc[i] = cpu_to_le64((u64)pn[5] | ((u64)pn[4] << 8) | ((u64)pn[3] << 16) | ((u64)pn[2] << 24) | ((u64)pn[1] << 32) | ((u64)pn[0] << 40)); } rcu_read_unlock(); } else { for (i = 0; i < IWL_MAX_TID_COUNT; i++) { u8 *pn = seq.ccmp.pn; ieee80211_get_key_rx_seq(key, i, &seq); rsc[i] = cpu_to_le64((u64)pn[5] | ((u64)pn[4] << 8) | ((u64)pn[3] << 16) | ((u64)pn[2] << 24) | ((u64)pn[1] << 32) | ((u64)pn[0] << 40)); } } data->have_rsc = true; break; } } static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TSC_RSC_PARAM, IWL_FW_CMD_VER_UNKNOWN); int ret; if (ver == 5) { struct wowlan_key_rsc_v5_data data = {}; int i; data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL); if (!data.rsc) return -ENOMEM; memset(data.rsc, 0xff, sizeof(*data.rsc)); for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++) data.rsc->mcast_key_id_map[i] = IWL_MCAST_KEY_MAP_INVALID; data.rsc->sta_id = cpu_to_le32(mvmvif->ap_sta_id); ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_get_rsc_v5_data, &data); if (data.have_rsc) ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, CMD_ASYNC, sizeof(*data.rsc), data.rsc); else ret = 0; kfree(data.rsc); } else if (ver == 4 || ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN) { struct wowlan_key_rsc_tsc_data data = {}; int size; data.rsc_tsc = kzalloc(sizeof(*data.rsc_tsc), GFP_KERNEL); if (!data.rsc_tsc) return -ENOMEM; if (ver == 4) { size = sizeof(*data.rsc_tsc); data.rsc_tsc->sta_id = cpu_to_le32(mvmvif->ap_sta_id); } else { /* ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN */ size = sizeof(data.rsc_tsc->params); } ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_get_rsc_tsc_data, &data); if (data.have_rsc_tsc) ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, CMD_ASYNC, size, data.rsc_tsc); else ret = 0; kfree(data.rsc_tsc); } else { ret = 0; WARN_ON_ONCE(1); } return ret; } struct wowlan_key_tkip_data { struct iwl_wowlan_tkip_params_cmd tkip; bool have_tkip_keys; }; static void iwl_mvm_wowlan_get_tkip_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *_data) { struct wowlan_key_tkip_data *data = _data; struct iwl_p1k_cache *rx_p1ks; u8 *rx_mic_key; struct ieee80211_key_seq seq; u32 cur_rx_iv32 = 0; u16 p1k[IWL_P1K_SIZE]; int i; switch (key->cipher) { default: break; case WLAN_CIPHER_SUITE_TKIP: if (sta) { u64 pn64; rx_p1ks = data->tkip.rx_uni; pn64 = atomic64_read(&key->tx_pn); ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), p1k); iwl_mvm_convert_p1k(p1k, data->tkip.tx.p1k); memcpy(data->tkip.mic_keys.tx, &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], IWL_MIC_KEY_SIZE); rx_mic_key = data->tkip.mic_keys.rx_unicast; } else { rx_p1ks = data->tkip.rx_multi; rx_mic_key = data->tkip.mic_keys.rx_mcast; } for (i = 0; i < IWL_NUM_RSC; i++) { /* wrapping isn't allowed, AP must rekey */ if (seq.tkip.iv32 > cur_rx_iv32) cur_rx_iv32 = seq.tkip.iv32; } ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, cur_rx_iv32, p1k); iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k); ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, cur_rx_iv32 + 1, p1k); iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k); memcpy(rx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], IWL_MIC_KEY_SIZE); data->have_tkip_keys = true; break; } } struct wowlan_key_gtk_type_iter { struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd; }; static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *_data) { struct wowlan_key_gtk_type_iter *data = _data; switch (key->cipher) { default: return; case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); return; case WLAN_CIPHER_SUITE_AES_CMAC: data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM); return; case WLAN_CIPHER_SUITE_CCMP: if (!sta) data->kek_kck_cmd->gtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (!sta) data->kek_kck_cmd->gtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); break; } } static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan) { struct iwl_wowlan_patterns_cmd_v1 *pattern_cmd; struct iwl_host_cmd cmd = { .id = WOWLAN_PATTERNS, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; int i, err; if (!wowlan->n_patterns) return 0; cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns); pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); if (!pattern_cmd) return -ENOMEM; pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); for (i = 0; i < wowlan->n_patterns; i++) { int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); memcpy(&pattern_cmd->patterns[i].mask, wowlan->patterns[i].mask, mask_len); memcpy(&pattern_cmd->patterns[i].pattern, wowlan->patterns[i].pattern, wowlan->patterns[i].pattern_len); pattern_cmd->patterns[i].mask_size = mask_len; pattern_cmd->patterns[i].pattern_size = wowlan->patterns[i].pattern_len; } cmd.data[0] = pattern_cmd; err = iwl_mvm_send_cmd(mvm, &cmd); kfree(pattern_cmd); return err; } static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_wowlan *wowlan) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_wowlan_patterns_cmd *pattern_cmd; struct iwl_host_cmd cmd = { .id = WOWLAN_PATTERNS, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; int i, err; int ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, IWL_FW_CMD_VER_UNKNOWN); if (!wowlan->n_patterns) return 0; cmd.len[0] = sizeof(*pattern_cmd) + wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2); pattern_cmd = kzalloc(cmd.len[0], GFP_KERNEL); if (!pattern_cmd) return -ENOMEM; pattern_cmd->n_patterns = wowlan->n_patterns; if (ver >= 3) pattern_cmd->sta_id = mvmvif->ap_sta_id; for (i = 0; i < wowlan->n_patterns; i++) { int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); pattern_cmd->patterns[i].pattern_type = WOWLAN_PATTERN_TYPE_BITMASK; memcpy(&pattern_cmd->patterns[i].u.bitmask.mask, wowlan->patterns[i].mask, mask_len); memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern, wowlan->patterns[i].pattern, wowlan->patterns[i].pattern_len); pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len; pattern_cmd->patterns[i].u.bitmask.pattern_size = wowlan->patterns[i].pattern_len; } cmd.data[0] = pattern_cmd; err = iwl_mvm_send_cmd(mvm, &cmd); kfree(pattern_cmd); return err; } static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *ap_sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_chanctx_conf *ctx; u8 chains_static, chains_dynamic; struct cfg80211_chan_def chandef; int ret, i; struct iwl_binding_cmd_v1 binding_cmd = {}; struct iwl_time_quota_cmd quota_cmd = {}; struct iwl_time_quota_data *quota; u32 status; if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm))) return -EINVAL; /* add back the PHY */ if (WARN_ON(!mvmvif->phy_ctxt)) return -EINVAL; rcu_read_lock(); ctx = rcu_dereference(vif->chanctx_conf); if (WARN_ON(!ctx)) { rcu_read_unlock(); return -EINVAL; } chandef = ctx->def; chains_static = ctx->rx_chains_static; chains_dynamic = ctx->rx_chains_dynamic; rcu_read_unlock(); ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef, chains_static, chains_dynamic); if (ret) return ret; /* add back the MAC */ mvmvif->uploaded = false; if (WARN_ON(!vif->bss_conf.assoc)) return -EINVAL; ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) return ret; /* add back binding - XXX refactor? */ binding_cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, mvmvif->phy_ctxt->color)); binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); binding_cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, mvmvif->phy_ctxt->color)); binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); for (i = 1; i < MAX_MACS_IN_BINDING; i++) binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); status = 0; ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, IWL_BINDING_CMD_SIZE_V1, &binding_cmd, &status); if (ret) { IWL_ERR(mvm, "Failed to add binding: %d\n", ret); return ret; } if (status) { IWL_ERR(mvm, "Binding command failed: %u\n", status); return -EIO; } ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0); if (ret) return ret; rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (ret) return ret; /* and some quota */ quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, 0); quota->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, mvmvif->phy_ctxt->color)); quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA); quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA); for (i = 1; i < MAX_BINDINGS; i++) { quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, i); quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID); } ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, iwl_mvm_quota_cmd_size(mvm), "a_cmd); if (ret) IWL_ERR(mvm, "Failed to send quota: %d\n", ret); if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm)) IWL_ERR(mvm, "Failed to initialize D3 LAR information\n"); return 0; } static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_nonqos_seq_query_cmd query_cmd = { .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET), .mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), }; struct iwl_host_cmd cmd = { .id = NON_QOS_TX_COUNTER_CMD, .flags = CMD_WANT_SKB, }; int err; u32 size; cmd.data[0] = &query_cmd; cmd.len[0] = sizeof(query_cmd); err = iwl_mvm_send_cmd(mvm, &cmd); if (err) return err; size = iwl_rx_packet_payload_len(cmd.resp_pkt); if (size < sizeof(__le16)) { err = -EINVAL; } else { err = le16_to_cpup((__le16 *)cmd.resp_pkt->data); /* firmware returns next, not last-used seqno */ err = (u16) (err - 0x10); } iwl_free_resp(&cmd); return err; } void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_nonqos_seq_query_cmd query_cmd = { .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET), .mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .value = cpu_to_le16(mvmvif->seqno), }; /* return if called during restart, not resume from D3 */ if (!mvmvif->seqno_valid) return; mvmvif->seqno_valid = false; if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0, sizeof(query_cmd), &query_cmd)) IWL_ERR(mvm, "failed to set non-QoS seqno\n"); } static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) { iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); iwl_mvm_stop_device(mvm); /* * Set the HW restart bit -- this is mostly true as we're * going to load new firmware and reprogram that, though * the reprogramming is going to be manual to avoid adding * all the MACs that aren't support. * We don't have to clear up everything though because the * reprogramming is manual. When we resume, we'll actually * go through a proper restart sequence again to switch * back to the runtime firmware image. */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); /* the fw is reset, so all the keys are cleared */ memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); mvm->ptk_ivlen = 0; mvm->ptk_icvlen = 0; mvm->ptk_ivlen = 0; mvm->ptk_icvlen = 0; return iwl_mvm_load_d3_fw(mvm); } static int iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan, struct iwl_wowlan_config_cmd *wowlan_config_cmd, struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, struct ieee80211_sta *ap_sta) { struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */ wowlan_config_cmd->is_11n_connection = - ap_sta->ht_cap.ht_supported; + ap_sta->deflink.ht_cap.ht_supported; wowlan_config_cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) { /* Query the last used seqno and set it */ int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); if (ret < 0) return ret; wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret); } iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd); if (wowlan->disconnect) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | IWL_WOWLAN_WAKEUP_LINK_CHANGE); if (wowlan->magic_pkt) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET); if (wowlan->gtk_rekey_failure) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL); if (wowlan->eap_identity_req) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ); if (wowlan->four_way_handshake) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE); if (wowlan->n_patterns) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH); if (wowlan->rfkill_release) wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); if (wowlan->tcp) { /* * Set the "link change" (really "link lost") flag as well * since that implies losing the TCP connection. */ wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS | IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE | IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET | IWL_WOWLAN_WAKEUP_LINK_CHANGE); } if (wowlan->any) { wowlan_config_cmd->wakeup_filter |= cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | IWL_WOWLAN_WAKEUP_LINK_CHANGE | IWL_WOWLAN_WAKEUP_RX_FRAME | IWL_WOWLAN_WAKEUP_BCN_FILTERING); } return 0; } static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { bool unified = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); struct wowlan_key_reprogram_data key_data = {}; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; u8 cmd_ver; size_t cmd_size; if (!unified) { /* * if we have to configure keys, call ieee80211_iter_keys(), * as we need non-atomic context in order to take the * required locks. */ /* * Note that currently we don't use CMD_ASYNC in the iterator. * In case of key_data.configure_keys, all the configured * commands are SYNC, and iwl_mvm_wowlan_program_keys() will * take care of locking/unlocking mvm->mutex. */ ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys, &key_data); if (key_data.error) return -EIO; } ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif); if (ret) return ret; if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) { int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TKIP_PARAM, IWL_FW_CMD_VER_UNKNOWN); struct wowlan_key_tkip_data tkip_data = {}; int size; if (ver == 2) { size = sizeof(tkip_data.tkip); tkip_data.tkip.sta_id = cpu_to_le32(mvmvif->ap_sta_id); } else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) { size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1); } else { WARN_ON_ONCE(1); return -EINVAL; } ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_get_tkip_data, &tkip_data); if (tkip_data.have_tkip_keys) { /* send relevant data according to CMD version */ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TKIP_PARAM, CMD_ASYNC, size, &tkip_data.tkip); if (ret) return ret; } } /* configure rekey data only if offloaded rekey is supported (d3) */ if (mvmvif->rekey_data.valid) { struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {}; struct iwl_wowlan_kek_kck_material_cmd_v4 *_kek_kck_cmd = &kek_kck_cmd; struct wowlan_key_gtk_type_iter gtk_type_data = { .kek_kck_cmd = _kek_kck_cmd, }; cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL, IWL_FW_CMD_VER_UNKNOWN); if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && cmd_ver != 4 && cmd_ver != IWL_FW_CMD_VER_UNKNOWN)) return -EINVAL; ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_gtk_type_iter, >k_type_data); memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, mvmvif->rekey_data.kck_len); kek_kck_cmd.kck_len = cpu_to_le16(mvmvif->rekey_data.kck_len); memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, mvmvif->rekey_data.kek_len); kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len); kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm); kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->ap_sta_id); if (cmd_ver == 4) { cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4); } else { if (cmd_ver == 3) cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3); else cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2); /* skip the sta_id at the beginning */ _kek_kck_cmd = (void *) ((u8 *)_kek_kck_cmd + sizeof(kek_kck_cmd.sta_id)); } IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n", mvmvif->rekey_data.akm); ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_KEK_KCK_MATERIAL, CMD_ASYNC, cmd_size, _kek_kck_cmd); if (ret) return ret; } return 0; } static int iwl_mvm_wowlan_config(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan, struct iwl_wowlan_config_cmd *wowlan_config_cmd, struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, struct ieee80211_sta *ap_sta) { int ret; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); mvm->offload_tid = wowlan_config_cmd->offloading_tid; if (!unified_image) { ret = iwl_mvm_switch_to_d3(mvm); if (ret) return ret; ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); if (ret) return ret; } /* * This needs to be unlocked due to lock ordering * constraints. Since we're in the suspend path * that isn't really a problem though. */ mutex_unlock(&mvm->mutex); ret = iwl_mvm_wowlan_config_key_params(mvm, vif); mutex_lock(&mvm->mutex); if (ret) return ret; ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, sizeof(*wowlan_config_cmd), wowlan_config_cmd); if (ret) return ret; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE)) ret = iwl_mvm_send_patterns(mvm, vif, wowlan); else ret = iwl_mvm_send_patterns_v1(mvm, wowlan); if (ret) return ret; return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0); } static int iwl_mvm_netdetect_config(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan, struct cfg80211_sched_scan_request *nd_config, struct ieee80211_vif *vif) { int ret; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); if (!unified_image) { ret = iwl_mvm_switch_to_d3(mvm); if (ret) return ret; } else { /* In theory, we wouldn't have to stop a running sched * scan in order to start another one (for * net-detect). But in practice this doesn't seem to * work properly, so stop any running sched_scan now. */ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); if (ret) return ret; } ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies, IWL_MVM_SCAN_NETDETECT); if (ret) return ret; if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels)) return -EBUSY; /* save the sched scan matchsets... */ if (nd_config->n_match_sets) { mvm->nd_match_sets = kmemdup(nd_config->match_sets, sizeof(*nd_config->match_sets) * nd_config->n_match_sets, GFP_KERNEL); if (mvm->nd_match_sets) mvm->n_nd_match_sets = nd_config->n_match_sets; } /* ...and the sched scan channels for later reporting */ mvm->nd_channels = kmemdup(nd_config->channels, sizeof(*nd_config->channels) * nd_config->n_channels, GFP_KERNEL); if (mvm->nd_channels) mvm->n_nd_channels = nd_config->n_channels; return 0; } static void iwl_mvm_free_nd(struct iwl_mvm *mvm) { kfree(mvm->nd_match_sets); mvm->nd_match_sets = NULL; mvm->n_nd_match_sets = 0; kfree(mvm->nd_channels); mvm->nd_channels = NULL; mvm->n_nd_channels = 0; } static int __iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan, bool test) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_vif *vif = NULL; struct iwl_mvm_vif *mvmvif = NULL; struct ieee80211_sta *ap_sta = NULL; struct iwl_d3_manager_config d3_cfg_cmd_data = { /* * Program the minimum sleep time to 10 seconds, as many * platforms have issues processing a wakeup signal while * still being in the process of suspending. */ .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), }; struct iwl_host_cmd d3_cfg_cmd = { .id = D3_CONFIG_CMD, .flags = CMD_WANT_SKB | CMD_SEND_IN_D3, .data[0] = &d3_cfg_cmd_data, .len[0] = sizeof(d3_cfg_cmd_data), }; int ret; int len __maybe_unused; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); if (!wowlan) { /* * mac80211 shouldn't get here, but for D3 test * it doesn't warrant a warning */ WARN_ON(!test); return -EINVAL; } mutex_lock(&mvm->mutex); set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); synchronize_net(); vif = iwl_mvm_get_bss_vif(mvm); if (IS_ERR_OR_NULL(vif)) { ret = 1; goto out_noreset; } mvmvif = iwl_mvm_vif_from_mac80211(vif); if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) { /* if we're not associated, this must be netdetect */ if (!wowlan->nd_config) { ret = 1; goto out_noreset; } ret = iwl_mvm_netdetect_config( mvm, wowlan, wowlan->nd_config, vif); if (ret) goto out; mvm->net_detect = true; } else { struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; wowlan_config_cmd.sta_id = mvmvif->ap_sta_id; ap_sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(ap_sta)) { ret = -EINVAL; goto out_noreset; } ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd, vif, mvmvif, ap_sta); if (ret) goto out_noreset; ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd, vif, mvmvif, ap_sta); if (ret) goto out; mvm->net_detect = false; } ret = iwl_mvm_power_update_device(mvm); if (ret) goto out; ret = iwl_mvm_power_update_mac(mvm); if (ret) goto out; #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvm->d3_wake_sysassert) d3_cfg_cmd_data.wakeup_flags |= cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR); #endif /* * Prior to 9000 device family the driver needs to stop the dbg * recording before entering D3. In later devices the FW stops the * recording automatically. */ if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true); mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; /* must be last -- this switches firmware state */ ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); if (ret) goto out; #ifdef CONFIG_IWLWIFI_DEBUGFS len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt); if (len >= sizeof(u32)) { mvm->d3_test_pme_ptr = le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data); } #endif iwl_free_resp(&d3_cfg_cmd); clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); ret = iwl_trans_d3_suspend(mvm->trans, test, !unified_image); out: if (ret < 0) { iwl_mvm_free_nd(mvm); if (!unified_image) { if (mvm->fw_restart > 0) { mvm->fw_restart--; ieee80211_restart_hw(mvm->hw); } } clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); } out_noreset: mutex_unlock(&mvm->mutex); return ret; } int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); iwl_mvm_pause_tcm(mvm, true); iwl_fw_runtime_suspend(&mvm->fwrt); return __iwl_mvm_suspend(hw, wowlan, false); } /* converted data from the different status responses */ struct iwl_wowlan_status_data { u64 replay_ctr; u32 num_of_gtk_rekeys; u32 received_beacons; u32 wakeup_reasons; u32 wake_packet_length; u32 wake_packet_bufsize; u16 pattern_number; u16 non_qos_seq_ctr; u16 qos_seq_ctr[8]; u8 tid_tear_down; struct { /* including RX MIC key for TKIP */ u8 key[WOWLAN_KEY_MAX_SIZE]; u8 len; u8 flags; } gtk; struct { /* * We store both the TKIP and AES representations * coming from the firmware because we decode the * data from there before we iterate the keys and * know which one we need. */ struct { struct ieee80211_key_seq seq[IWL_MAX_TID_COUNT]; } tkip, aes; /* * We use -1 for when we have valid data but don't know * the key ID from firmware, and thus it needs to be * installed with the last key (depending on rekeying). */ s8 key_id; bool valid; } gtk_seq[2]; struct { /* Same as above */ struct { struct ieee80211_key_seq seq[IWL_MAX_TID_COUNT]; u64 tx_pn; } tkip, aes; } ptk; struct { u64 ipn; u8 key[WOWLAN_KEY_MAX_SIZE]; u8 len; u8 flags; } igtk; u8 wake_packet[]; }; static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_wowlan_status_data *status) { struct sk_buff *pkt = NULL; struct cfg80211_wowlan_wakeup wakeup = { .pattern_idx = -1, }; struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; u32 reasons = status->wakeup_reasons; if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { wakeup_report = NULL; goto report; } pm_wakeup_event(mvm->dev, 0); if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) wakeup.magic_pkt = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) wakeup.pattern_idx = status->pattern_number; if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) wakeup.disconnect = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) wakeup.gtk_rekey_failure = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) wakeup.rfkill_release = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) wakeup.eap_identity_req = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) wakeup.four_way_handshake = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS) wakeup.tcp_connlost = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE) wakeup.tcp_nomoretokens = true; if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET) wakeup.tcp_match = true; if (status->wake_packet_bufsize) { int pktsize = status->wake_packet_bufsize; int pktlen = status->wake_packet_length; const u8 *pktdata = status->wake_packet; const struct ieee80211_hdr *hdr = (const void *)pktdata; int truncated = pktlen - pktsize; /* this would be a firmware bug */ if (WARN_ON_ONCE(truncated < 0)) truncated = 0; if (ieee80211_is_data(hdr->frame_control)) { int hdrlen = ieee80211_hdrlen(hdr->frame_control); int ivlen = 0, icvlen = 4; /* also FCS */ pkt = alloc_skb(pktsize, GFP_KERNEL); if (!pkt) goto report; skb_put_data(pkt, pktdata, hdrlen); pktdata += hdrlen; pktsize -= hdrlen; if (ieee80211_has_protected(hdr->frame_control)) { /* * This is unlocked and using gtk_i(c)vlen, * but since everything is under RTNL still * that's not really a problem - changing * it would be difficult. */ if (is_multicast_ether_addr(hdr->addr1)) { ivlen = mvm->gtk_ivlen; icvlen += mvm->gtk_icvlen; } else { ivlen = mvm->ptk_ivlen; icvlen += mvm->ptk_icvlen; } } /* if truncated, FCS/ICV is (partially) gone */ if (truncated >= icvlen) { icvlen = 0; truncated -= icvlen; } else { icvlen -= truncated; truncated = 0; } pktsize -= ivlen + icvlen; pktdata += ivlen; skb_put_data(pkt, pktdata, pktsize); if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) goto report; wakeup.packet = pkt->data; wakeup.packet_present_len = pkt->len; wakeup.packet_len = pkt->len - truncated; wakeup.packet_80211 = false; } else { int fcslen = 4; if (truncated >= 4) { truncated -= 4; fcslen = 0; } else { fcslen -= truncated; truncated = 0; } pktsize -= fcslen; wakeup.packet = status->wake_packet; wakeup.packet_present_len = pktsize; wakeup.packet_len = pktlen - truncated; wakeup.packet_80211 = true; } } report: ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); kfree_skb(pkt); } static void iwl_mvm_le64_to_aes_seq(__le64 le_pn, struct ieee80211_key_seq *seq) { u64 pn = le64_to_cpu(le_pn); seq->ccmp.pn[0] = pn >> 40; seq->ccmp.pn[1] = pn >> 32; seq->ccmp.pn[2] = pn >> 24; seq->ccmp.pn[3] = pn >> 16; seq->ccmp.pn[4] = pn >> 8; seq->ccmp.pn[5] = pn; } static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc, struct ieee80211_key_seq *seq) { iwl_mvm_le64_to_aes_seq(sc->pn, seq); } static void iwl_mvm_le64_to_tkip_seq(__le64 le_pn, struct ieee80211_key_seq *seq) { u64 pn = le64_to_cpu(le_pn); seq->tkip.iv16 = (u16)pn; seq->tkip.iv32 = (u32)(pn >> 16); } static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc, struct ieee80211_key_seq *seq) { seq->tkip.iv32 = le32_to_cpu(sc->iv32); seq->tkip.iv16 = le16_to_cpu(sc->iv16); } static void iwl_mvm_set_key_rx_seq_tids(struct ieee80211_key_conf *key, struct ieee80211_key_seq *seq) { int tid; for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) ieee80211_set_key_rx_seq(key, tid, &seq[tid]); } static void iwl_mvm_set_aes_ptk_rx_seq(struct iwl_mvm *mvm, struct iwl_wowlan_status_data *status, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_key_pn *ptk_pn; int tid; iwl_mvm_set_key_rx_seq_tids(key, status->ptk.aes.seq); if (!iwl_mvm_has_new_rx_api(mvm)) return; rcu_read_lock(); ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); if (WARN_ON(!ptk_pn)) { rcu_read_unlock(); return; } for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { int i; for (i = 1; i < mvm->trans->num_rx_queues; i++) memcpy(ptk_pn->q[i].pn[tid], status->ptk.aes.seq[tid].ccmp.pn, IEEE80211_CCMP_PN_LEN); } rcu_read_unlock(); } static void iwl_mvm_convert_key_counters(struct iwl_wowlan_status_data *status, union iwl_all_tsc_rsc *sc) { int i; BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_MAX_TID_COUNT); BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_NUM_RSC); /* GTK RX counters */ for (i = 0; i < IWL_MAX_TID_COUNT; i++) { iwl_mvm_tkip_sc_to_seq(&sc->tkip.multicast_rsc[i], &status->gtk_seq[0].tkip.seq[i]); iwl_mvm_aes_sc_to_seq(&sc->aes.multicast_rsc[i], &status->gtk_seq[0].aes.seq[i]); } status->gtk_seq[0].valid = true; status->gtk_seq[0].key_id = -1; /* PTK TX counter */ status->ptk.tkip.tx_pn = (u64)le16_to_cpu(sc->tkip.tsc.iv16) | ((u64)le32_to_cpu(sc->tkip.tsc.iv32) << 16); status->ptk.aes.tx_pn = le64_to_cpu(sc->aes.tsc.pn); /* PTK RX counters */ for (i = 0; i < IWL_MAX_TID_COUNT; i++) { iwl_mvm_tkip_sc_to_seq(&sc->tkip.unicast_rsc[i], &status->ptk.tkip.seq[i]); iwl_mvm_aes_sc_to_seq(&sc->aes.unicast_rsc[i], &status->ptk.aes.seq[i]); } } static void iwl_mvm_convert_key_counters_v5_gtk_seq(struct iwl_wowlan_status_data *status, struct iwl_wowlan_all_rsc_tsc_v5 *sc, unsigned int idx, unsigned int key_id) { int tid; for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { iwl_mvm_le64_to_tkip_seq(sc->mcast_rsc[idx][tid], &status->gtk_seq[idx].tkip.seq[tid]); iwl_mvm_le64_to_aes_seq(sc->mcast_rsc[idx][tid], &status->gtk_seq[idx].aes.seq[tid]); } status->gtk_seq[idx].valid = true; status->gtk_seq[idx].key_id = key_id; } static void iwl_mvm_convert_key_counters_v5(struct iwl_wowlan_status_data *status, struct iwl_wowlan_all_rsc_tsc_v5 *sc) { int i, tid; BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_MAX_TID_COUNT); BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_NUM_RSC); BUILD_BUG_ON(ARRAY_SIZE(sc->mcast_rsc) != ARRAY_SIZE(status->gtk_seq)); /* GTK RX counters */ for (i = 0; i < ARRAY_SIZE(sc->mcast_key_id_map); i++) { u8 entry = sc->mcast_key_id_map[i]; if (entry < ARRAY_SIZE(sc->mcast_rsc)) iwl_mvm_convert_key_counters_v5_gtk_seq(status, sc, entry, i); } /* PTK TX counters not needed, assigned in device */ /* PTK RX counters */ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { iwl_mvm_le64_to_tkip_seq(sc->ucast_rsc[tid], &status->ptk.tkip.seq[tid]); iwl_mvm_le64_to_aes_seq(sc->ucast_rsc[tid], &status->ptk.aes.seq[tid]); } } static void iwl_mvm_set_key_rx_seq_idx(struct ieee80211_key_conf *key, struct iwl_wowlan_status_data *status, int idx) { switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: iwl_mvm_set_key_rx_seq_tids(key, status->gtk_seq[idx].aes.seq); break; case WLAN_CIPHER_SUITE_TKIP: iwl_mvm_set_key_rx_seq_tids(key, status->gtk_seq[idx].tkip.seq); break; default: WARN_ON(1); } } static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key, struct iwl_wowlan_status_data *status, bool installed) { int i; for (i = 0; i < ARRAY_SIZE(status->gtk_seq); i++) { if (!status->gtk_seq[i].valid) continue; /* Handle the case where we know the key ID */ if (status->gtk_seq[i].key_id == key->keyidx) { s8 new_key_id = -1; if (status->num_of_gtk_rekeys) new_key_id = status->gtk.flags & IWL_WOWLAN_GTK_IDX_MASK; /* Don't install a new key's value to an old key */ if (new_key_id != key->keyidx) iwl_mvm_set_key_rx_seq_idx(key, status, i); continue; } /* handle the case where we didn't, last key only */ if (status->gtk_seq[i].key_id == -1 && (!status->num_of_gtk_rekeys || installed)) iwl_mvm_set_key_rx_seq_idx(key, status, i); } } struct iwl_mvm_d3_gtk_iter_data { struct iwl_mvm *mvm; struct iwl_wowlan_status_data *status; void *last_gtk; u32 cipher; bool find_phase, unhandled_cipher; int num_keys; }; static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *_data) { struct iwl_mvm_d3_gtk_iter_data *data = _data; struct iwl_wowlan_status_data *status = data->status; if (data->unhandled_cipher) return; switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: /* ignore WEP completely, nothing to do */ return; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: case WLAN_CIPHER_SUITE_TKIP: /* we support these */ break; default: /* everything else (even CMAC for MFP) - disconnect from AP */ data->unhandled_cipher = true; return; } data->num_keys++; /* * pairwise key - update sequence counters only; * note that this assumes no TDLS sessions are active */ if (sta) { if (data->find_phase) return; switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: atomic64_set(&key->tx_pn, status->ptk.aes.tx_pn); iwl_mvm_set_aes_ptk_rx_seq(data->mvm, status, sta, key); break; case WLAN_CIPHER_SUITE_TKIP: atomic64_set(&key->tx_pn, status->ptk.tkip.tx_pn); iwl_mvm_set_key_rx_seq_tids(key, status->ptk.tkip.seq); break; } /* that's it for this key */ return; } if (data->find_phase) { data->last_gtk = key; data->cipher = key->cipher; return; } if (data->status->num_of_gtk_rekeys) ieee80211_remove_key(key); if (data->last_gtk == key) iwl_mvm_set_key_rx_seq(key, data->status, false); } static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_wowlan_status_data *status) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_d3_gtk_iter_data gtkdata = { .mvm = mvm, .status = status, }; u32 disconnection_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; if (!status || !vif->bss_conf.bssid) return false; if (status->wakeup_reasons & disconnection_reasons) return false; /* find last GTK that we used initially, if any */ gtkdata.find_phase = true; ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_d3_update_keys, >kdata); /* not trying to keep connections with MFP/unhandled ciphers */ if (gtkdata.unhandled_cipher) return false; if (!gtkdata.num_keys) goto out; if (!gtkdata.last_gtk) return false; /* * invalidate all other GTKs that might still exist and update * the one that we used */ gtkdata.find_phase = false; ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_d3_update_keys, >kdata); IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n", status->num_of_gtk_rekeys); if (status->num_of_gtk_rekeys) { struct ieee80211_key_conf *key; struct { struct ieee80211_key_conf conf; u8 key[32]; } conf = { .conf.cipher = gtkdata.cipher, .conf.keyidx = status->gtk.flags & IWL_WOWLAN_GTK_IDX_MASK, }; __be64 replay_ctr; IWL_DEBUG_WOWLAN(mvm, "Received from FW GTK cipher %d, key index %d\n", conf.conf.cipher, conf.conf.keyidx); BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP); BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP); BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256); BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP); BUILD_BUG_ON(sizeof(conf.key) < sizeof(status->gtk.key)); memcpy(conf.conf.key, status->gtk.key, sizeof(status->gtk.key)); switch (gtkdata.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: conf.conf.keylen = WLAN_KEY_LEN_CCMP; break; case WLAN_CIPHER_SUITE_GCMP_256: conf.conf.keylen = WLAN_KEY_LEN_GCMP_256; break; case WLAN_CIPHER_SUITE_TKIP: conf.conf.keylen = WLAN_KEY_LEN_TKIP; break; } key = ieee80211_gtk_rekey_add(vif, &conf.conf); if (IS_ERR(key)) return false; iwl_mvm_set_key_rx_seq(key, status, true); replay_ctr = cpu_to_be64(status->replay_ctr); ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, (void *)&replay_ctr, GFP_KERNEL); } out: if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, WOWLAN_GET_STATUSES, 0) < 10) { mvmvif->seqno_valid = true; /* +0x10 because the set API expects next-to-use, not last-used */ mvmvif->seqno = status->non_qos_seq_ctr + 0x10; } return true; } /* Occasionally, templates would be nice. This is one of those times ... */ #define iwl_mvm_parse_wowlan_status_common(_ver) \ static struct iwl_wowlan_status_data * \ iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \ struct iwl_wowlan_status_ ##_ver *data,\ int len) \ { \ struct iwl_wowlan_status_data *status; \ int data_size, i; \ \ if (len < sizeof(*data)) { \ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ return ERR_PTR(-EIO); \ } \ \ data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \ if (len != sizeof(*data) + data_size) { \ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ return ERR_PTR(-EIO); \ } \ \ status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); \ if (!status) \ return ERR_PTR(-ENOMEM); \ \ /* copy all the common fields */ \ status->replay_ctr = le64_to_cpu(data->replay_ctr); \ status->pattern_number = le16_to_cpu(data->pattern_number); \ status->non_qos_seq_ctr = le16_to_cpu(data->non_qos_seq_ctr); \ for (i = 0; i < 8; i++) \ status->qos_seq_ctr[i] = \ le16_to_cpu(data->qos_seq_ctr[i]); \ status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); \ status->num_of_gtk_rekeys = \ le32_to_cpu(data->num_of_gtk_rekeys); \ status->received_beacons = le32_to_cpu(data->received_beacons); \ status->wake_packet_length = \ le32_to_cpu(data->wake_packet_length); \ status->wake_packet_bufsize = \ le32_to_cpu(data->wake_packet_bufsize); \ memcpy(status->wake_packet, data->wake_packet, \ status->wake_packet_bufsize); \ \ return status; \ } iwl_mvm_parse_wowlan_status_common(v6) iwl_mvm_parse_wowlan_status_common(v7) iwl_mvm_parse_wowlan_status_common(v9) iwl_mvm_parse_wowlan_status_common(v12) static void iwl_mvm_convert_gtk_v2(struct iwl_wowlan_status_data *status, struct iwl_wowlan_gtk_status_v2 *data) { BUILD_BUG_ON(sizeof(status->gtk.key) < sizeof(data->key)); BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY + sizeof(data->tkip_mic_key) > sizeof(status->gtk.key)); status->gtk.len = data->key_len; status->gtk.flags = data->key_flags; memcpy(status->gtk.key, data->key, sizeof(data->key)); /* if it's as long as the TKIP encryption key, copy MIC key */ if (status->gtk.len == NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY) memcpy(status->gtk.key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, data->tkip_mic_key, sizeof(data->tkip_mic_key)); } static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status, struct iwl_wowlan_gtk_status_v3 *data) { /* The parts we need are identical in v2 and v3 */ #define CHECK(_f) do { \ BUILD_BUG_ON(offsetof(struct iwl_wowlan_gtk_status_v2, _f) != \ offsetof(struct iwl_wowlan_gtk_status_v3, _f)); \ BUILD_BUG_ON(offsetofend(struct iwl_wowlan_gtk_status_v2, _f) !=\ offsetofend(struct iwl_wowlan_gtk_status_v3, _f)); \ } while (0) CHECK(key); CHECK(key_len); CHECK(key_flags); CHECK(tkip_mic_key); #undef CHECK iwl_mvm_convert_gtk_v2(status, (void *)data); } static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status, struct iwl_wowlan_igtk_status *data) { const u8 *ipn = data->ipn; BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key)); status->igtk.len = data->key_len; status->igtk.flags = data->key_flags; memcpy(status->igtk.key, data->key, sizeof(data->key)); status->igtk.ipn = ((u64)ipn[5] << 0) | ((u64)ipn[4] << 8) | ((u64)ipn[3] << 16) | ((u64)ipn[2] << 24) | ((u64)ipn[1] << 32) | ((u64)ipn[0] << 40); } static struct iwl_wowlan_status_data * iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) { struct iwl_wowlan_status_data *status; struct iwl_wowlan_get_status_cmd get_status_cmd = { .sta_id = cpu_to_le32(sta_id), }; struct iwl_host_cmd cmd = { .id = WOWLAN_GET_STATUSES, .flags = CMD_WANT_SKB, .data = { &get_status_cmd, }, .len = { sizeof(get_status_cmd), }, }; int ret, len; u8 notif_ver; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN) cmd.len[0] = 0; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { IWL_ERR(mvm, "failed to query wakeup status (%d)\n", ret); return ERR_PTR(ret); } len = iwl_rx_packet_payload_len(cmd.resp_pkt); /* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */ notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, WOWLAN_GET_STATUSES, 0); if (!notif_ver) notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, WOWLAN_GET_STATUSES, 7); if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) { struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data; status = iwl_mvm_parse_wowlan_status_common_v6(mvm, v6, len); if (IS_ERR(status)) goto out_free_resp; BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) > sizeof(status->gtk.key)); BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY + sizeof(v6->gtk.tkip_mic_key) > sizeof(status->gtk.key)); /* copy GTK info to the right place */ memcpy(status->gtk.key, v6->gtk.decrypt_key, sizeof(v6->gtk.decrypt_key)); memcpy(status->gtk.key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, v6->gtk.tkip_mic_key, sizeof(v6->gtk.tkip_mic_key)); iwl_mvm_convert_key_counters(status, &v6->gtk.rsc.all_tsc_rsc); /* hardcode the key length to 16 since v6 only supports 16 */ status->gtk.len = 16; /* * The key index only uses 2 bits (values 0 to 3) and * we always set bit 7 which means this is the * currently used key. */ status->gtk.flags = v6->gtk.key_index | BIT(7); } else if (notif_ver == 7) { struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data; status = iwl_mvm_parse_wowlan_status_common_v7(mvm, v7, len); if (IS_ERR(status)) goto out_free_resp; iwl_mvm_convert_key_counters(status, &v7->gtk[0].rsc.all_tsc_rsc); iwl_mvm_convert_gtk_v2(status, &v7->gtk[0]); iwl_mvm_convert_igtk(status, &v7->igtk[0]); } else if (notif_ver == 9 || notif_ver == 10 || notif_ver == 11) { struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data; /* these three command versions have same layout and size, the * difference is only in a few not used (reserved) fields. */ status = iwl_mvm_parse_wowlan_status_common_v9(mvm, v9, len); if (IS_ERR(status)) goto out_free_resp; iwl_mvm_convert_key_counters(status, &v9->gtk[0].rsc.all_tsc_rsc); iwl_mvm_convert_gtk_v2(status, &v9->gtk[0]); iwl_mvm_convert_igtk(status, &v9->igtk[0]); status->tid_tear_down = v9->tid_tear_down; } else if (notif_ver == 12) { struct iwl_wowlan_status_v12 *v12 = (void *)cmd.resp_pkt->data; status = iwl_mvm_parse_wowlan_status_common_v12(mvm, v12, len); if (IS_ERR(status)) goto out_free_resp; iwl_mvm_convert_key_counters_v5(status, &v12->gtk[0].sc); iwl_mvm_convert_gtk_v3(status, &v12->gtk[0]); iwl_mvm_convert_igtk(status, &v12->igtk[0]); status->tid_tear_down = v12->tid_tear_down; } else { IWL_ERR(mvm, "Firmware advertises unknown WoWLAN status response %d!\n", notif_ver); status = ERR_PTR(-EIO); } out_free_resp: iwl_free_resp(&cmd); return status; } static struct iwl_wowlan_status_data * iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id) { u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, OFFLOADS_QUERY_CMD, IWL_FW_CMD_VER_UNKNOWN); __le32 station_id = cpu_to_le32(sta_id); u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0; if (!mvm->net_detect) { /* only for tracing for now */ int ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, cmd_size, &station_id); if (ret) IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); } return iwl_mvm_send_wowlan_get_status(mvm, sta_id); } /* releases the MVM mutex */ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_wowlan_status_data *status; int i; bool keep; struct iwl_mvm_sta *mvm_ap_sta; status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id); if (IS_ERR(status)) goto out_unlock; IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n", status->wakeup_reasons); /* still at hard-coded place 0 for D3 image */ mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0); if (!mvm_ap_sta) goto out_free; for (i = 0; i < IWL_MAX_TID_COUNT; i++) { u16 seq = status->qos_seq_ctr[i]; /* firmware stores last-used value, we store next value */ seq += 0x10; mvm_ap_sta->tid_data[i].seq_number = seq; } if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { i = mvm->offload_tid; iwl_trans_set_q_ptrs(mvm->trans, mvm_ap_sta->tid_data[i].txq_id, mvm_ap_sta->tid_data[i].seq_number >> 4); } /* now we have all the data we need, unlock to avoid mac80211 issues */ mutex_unlock(&mvm->mutex); iwl_mvm_report_wakeup_reasons(mvm, vif, status); keep = iwl_mvm_setup_connection_keep(mvm, vif, status); kfree(status); return keep; out_free: kfree(status); out_unlock: mutex_unlock(&mvm->mutex); return false; } #define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \ IWL_SCAN_MAX_PROFILES) struct iwl_mvm_nd_query_results { u32 matched_profiles; u8 matches[ND_QUERY_BUF_LEN]; }; static int iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, struct iwl_mvm_nd_query_results *results) { struct iwl_scan_offload_profiles_query *query; struct iwl_host_cmd cmd = { .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD, .flags = CMD_WANT_SKB, }; int ret, len; size_t query_len, matches_len; int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw); ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret); return ret; } if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { query_len = sizeof(struct iwl_scan_offload_profiles_query); matches_len = sizeof(struct iwl_scan_offload_profile_match) * max_profiles; } else { query_len = sizeof(struct iwl_scan_offload_profiles_query_v1); matches_len = sizeof(struct iwl_scan_offload_profile_match_v1) * max_profiles; } len = iwl_rx_packet_payload_len(cmd.resp_pkt); if (len < query_len) { IWL_ERR(mvm, "Invalid scan offload profiles query response!\n"); ret = -EIO; goto out_free_resp; } query = (void *)cmd.resp_pkt->data; results->matched_profiles = le32_to_cpu(query->matched_profiles); memcpy(results->matches, query->matches, matches_len); #ifdef CONFIG_IWLWIFI_DEBUGFS mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done); #endif out_free_resp: iwl_free_resp(&cmd); return ret; } static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm, struct iwl_mvm_nd_query_results *query, int idx) { int n_chans = 0, i; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { struct iwl_scan_offload_profile_match *matches = (struct iwl_scan_offload_profile_match *)query->matches; for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++) n_chans += hweight8(matches[idx].matching_channels[i]); } else { struct iwl_scan_offload_profile_match_v1 *matches = (struct iwl_scan_offload_profile_match_v1 *)query->matches; for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++) n_chans += hweight8(matches[idx].matching_channels[i]); } return n_chans; } static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm, struct iwl_mvm_nd_query_results *query, struct cfg80211_wowlan_nd_match *match, int idx) { int i; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { struct iwl_scan_offload_profile_match *matches = (struct iwl_scan_offload_profile_match *)query->matches; for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++) if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) match->channels[match->n_channels++] = mvm->nd_channels[i]->center_freq; } else { struct iwl_scan_offload_profile_match_v1 *matches = (struct iwl_scan_offload_profile_match_v1 *)query->matches; for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++) if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) match->channels[match->n_channels++] = mvm->nd_channels[i]->center_freq; } } static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct cfg80211_wowlan_nd_info *net_detect = NULL; struct cfg80211_wowlan_wakeup wakeup = { .pattern_idx = -1, }; struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; struct iwl_wowlan_status_data *status; struct iwl_mvm_nd_query_results query; unsigned long matched_profiles; u32 reasons = 0; int i, n_matches, ret; status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA); if (!IS_ERR(status)) { reasons = status->wakeup_reasons; kfree(status); } if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) wakeup.rfkill_release = true; if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) goto out; ret = iwl_mvm_netdetect_query_results(mvm, &query); if (ret || !query.matched_profiles) { wakeup_report = NULL; goto out; } matched_profiles = query.matched_profiles; if (mvm->n_nd_match_sets) { n_matches = hweight_long(matched_profiles); } else { IWL_ERR(mvm, "no net detect match information available\n"); n_matches = 0; } net_detect = kzalloc(struct_size(net_detect, matches, n_matches), GFP_KERNEL); if (!net_detect || !n_matches) goto out_report_nd; for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) { struct cfg80211_wowlan_nd_match *match; int idx, n_channels = 0; n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i); match = kzalloc(struct_size(match, channels, n_channels), GFP_KERNEL); if (!match) goto out_report_nd; net_detect->matches[net_detect->n_matches++] = match; /* We inverted the order of the SSIDs in the scan * request, so invert the index here. */ idx = mvm->n_nd_match_sets - i - 1; match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len; memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid, match->ssid.ssid_len); if (mvm->n_nd_channels < n_channels) continue; iwl_mvm_query_set_freqs(mvm, &query, match, i); } out_report_nd: wakeup.net_detect = net_detect; out: iwl_mvm_free_nd(mvm); mutex_unlock(&mvm->mutex); ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); if (net_detect) { for (i = 0; i < net_detect->n_matches; i++) kfree(net_detect->matches[i]); kfree(net_detect); } } static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { /* skip the one we keep connection on */ if (data == vif) return; if (vif->type == NL80211_IFTYPE_STATION) ieee80211_resume_disconnect(vif); } static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id) { struct error_table_start { /* cf. struct iwl_error_event_table */ u32 valid; __le32 err_id; } err_info; if (!base) return false; iwl_trans_read_mem_bytes(trans, base, &err_info, sizeof(err_info)); if (err_info.valid && err_id) *err_id = le32_to_cpu(err_info.err_id); return !!err_info.valid; } static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { u32 err_id; /* check for lmac1 error */ if (iwl_mvm_rt_status(mvm->trans, mvm->trans->dbg.lmac_error_event_table[0], &err_id)) { if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) { struct cfg80211_wowlan_wakeup wakeup = { .rfkill_release = true, }; ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL); } return true; } /* check if we have lmac2 set and check for error */ if (iwl_mvm_rt_status(mvm->trans, mvm->trans->dbg.lmac_error_event_table[1], NULL)) return true; /* check for umac error */ if (iwl_mvm_rt_status(mvm->trans, mvm->trans->dbg.umac_error_event_table, NULL)) return true; return false; } static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) { struct ieee80211_vif *vif = NULL; int ret = 1; enum iwl_d3_status d3_status; bool keep = false; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST); mutex_lock(&mvm->mutex); mvm->last_reset_or_resume_time_jiffies = jiffies; /* get the BSS vif pointer again */ vif = iwl_mvm_get_bss_vif(mvm); if (IS_ERR_OR_NULL(vif)) goto err; iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); if (iwl_mvm_check_rt_status(mvm, vif)) { set_bit(STATUS_FW_ERROR, &mvm->trans->status); iwl_mvm_dump_nic_error_log(mvm); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL); iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, false, 0); ret = 1; goto err; } ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); if (ret) goto err; if (d3_status != IWL_D3_STATUS_ALIVE) { IWL_INFO(mvm, "Device was reset during suspend\n"); goto err; } if (d0i3_first) { struct iwl_host_cmd cmd = { .id = D0I3_END_CMD, .flags = CMD_WANT_SKB | CMD_SEND_IN_D3, }; int len; ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret < 0) { IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n", ret); goto err; } switch (mvm->cmd_ver.d0i3_resp) { case 0: break; case 1: len = iwl_rx_packet_payload_len(cmd.resp_pkt); if (len != sizeof(u32)) { IWL_ERR(mvm, "Error with D0I3_END_CMD response size (%d)\n", len); goto err; } if (IWL_D0I3_RESET_REQUIRE & le32_to_cpu(*(__le32 *)cmd.resp_pkt->data)) { iwl_write32(mvm->trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); iwl_free_resp(&cmd); } break; default: WARN_ON(1); } } /* after the successful handshake, we're out of D3 */ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; /* * Query the current location and source from the D3 firmware so we * can play it back when we re-intiailize the D0 firmware */ iwl_mvm_update_changed_regdom(mvm); /* Re-configure PPAG settings */ iwl_mvm_ppag_send_cmd(mvm); if (!unified_image) /* Re-configure default SAR profile */ iwl_mvm_sar_select_profile(mvm, 1, 1); if (mvm->net_detect) { /* If this is a non-unified image, we restart the FW, * so no need to stop the netdetect scan. If that * fails, continue and try to get the wake-up reasons, * but trigger a HW restart by keeping a failure code * in ret. */ if (unified_image) ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, false); iwl_mvm_query_netdetect_reasons(mvm, vif); /* has unlocked the mutex, so skip that */ goto out; } else { keep = iwl_mvm_query_wakeup_reasons(mvm, vif); #ifdef CONFIG_IWLWIFI_DEBUGFS if (keep) mvm->keep_vif = vif; #endif /* has unlocked the mutex, so skip that */ goto out_iterate; } err: iwl_mvm_free_nd(mvm); mutex_unlock(&mvm->mutex); out_iterate: if (!test) ieee80211_iterate_active_interfaces_mtx(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); out: clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); /* no need to reset the device in unified images, if successful */ if (unified_image && !ret) { /* nothing else to do if we already sent D0I3_END_CMD */ if (d0i3_first) return 0; ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); if (!ret) return 0; } /* * Reconfigure the device in one of the following cases: * 1. We are not using a unified image * 2. We are using a unified image but had an error while exiting D3 */ set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); /* regardless of what happened, we're now out of D3 */ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; return 1; } int iwl_mvm_resume(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; ret = __iwl_mvm_resume(mvm, false); iwl_mvm_resume_tcm(mvm); iwl_fw_runtime_resume(&mvm->fwrt); return ret; } void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); device_set_wakeup_enable(mvm->trans->dev, enabled); } #ifdef CONFIG_IWLWIFI_DEBUGFS static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) { struct iwl_mvm *mvm = inode->i_private; int err; if (mvm->d3_test_active) return -EBUSY; file->private_data = inode->i_private; iwl_mvm_pause_tcm(mvm, true); iwl_fw_runtime_suspend(&mvm->fwrt); /* start pseudo D3 */ rtnl_lock(); wiphy_lock(mvm->hw->wiphy); err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); wiphy_unlock(mvm->hw->wiphy); rtnl_unlock(); if (err > 0) err = -EINVAL; if (err) return err; mvm->d3_test_active = true; mvm->keep_vif = NULL; return 0; } static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; u32 pme_asserted; while (true) { /* read pme_ptr if available */ if (mvm->d3_test_pme_ptr) { pme_asserted = iwl_trans_read_mem32(mvm->trans, mvm->d3_test_pme_ptr); if (pme_asserted) break; } if (msleep_interruptible(100)) break; } return 0; } static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { /* skip the one we keep connection on */ if (_data == vif) return; if (vif->type == NL80211_IFTYPE_STATION) ieee80211_connection_loss(vif); } static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) { struct iwl_mvm *mvm = inode->i_private; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); mvm->d3_test_active = false; iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); rtnl_lock(); wiphy_lock(mvm->hw->wiphy); __iwl_mvm_resume(mvm, true); wiphy_unlock(mvm->hw->wiphy); rtnl_unlock(); iwl_mvm_resume_tcm(mvm); iwl_fw_runtime_resume(&mvm->fwrt); iwl_abort_notification_waits(&mvm->notif_wait); if (!unified_image) { int remaining_time = 10; ieee80211_restart_hw(mvm->hw); /* wait for restart and disconnect all interfaces */ while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && remaining_time > 0) { remaining_time--; msleep(1000); } if (remaining_time == 0) IWL_ERR(mvm, "Timed out waiting for HW restart!\n"); } ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif); return 0; } const struct file_operations iwl_dbgfs_d3_test_ops = { .llseek = no_llseek, .open = iwl_mvm_d3_test_open, .read = iwl_mvm_d3_test_read, .release = iwl_mvm_d3_test_release, }; #endif diff --git a/sys/contrib/dev/iwlwifi/mvm/mac80211.c b/sys/contrib/dev/iwlwifi/mvm/mac80211.c index a7798713788e..8c1e499fa0f1 100644 --- a/sys/contrib/dev/iwlwifi/mvm/mac80211.c +++ b/sys/contrib/dev/iwlwifi/mvm/mac80211.c @@ -1,5491 +1,5491 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include #include #include #include #include #if defined(__FreeBSD__) #include #endif #include #include #include #if defined(__FreeBSD__) #include #endif #include "iwl-drv.h" #include "iwl-op-mode.h" #include "iwl-io.h" #include "mvm.h" #include "sta.h" #include "time-event.h" #include "iwl-eeprom-parse.h" #include "iwl-phy-db.h" #ifdef CONFIG_NL80211_TESTMODE #include "testmode.h" #endif #include "fw/error-dump.h" #include "iwl-prph.h" #include "iwl-nvm-parse.h" static const struct ieee80211_iface_limit iwl_mvm_limits[] = { { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), }, { .max = 1, .types = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), }, { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE), }, }; static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { { .num_different_channels = 2, .max_interfaces = 3, .limits = iwl_mvm_limits, .n_limits = ARRAY_SIZE(iwl_mvm_limits), }, }; static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { .max_peers = IWL_MVM_TOF_MAX_APS, .report_ap_tsf = 1, .randomize_mac_addr = 1, .ftm = { .supported = 1, .asap = 1, .non_asap = 1, .request_lci = 1, .request_civicloc = 1, .trigger_based = 1, .non_trigger_based = 1, .max_bursts_exponent = -1, /* all supported */ .max_ftms_per_burst = 0, /* no limits */ .bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | BIT(NL80211_CHAN_WIDTH_40) | BIT(NL80211_CHAN_WIDTH_80) | BIT(NL80211_CHAN_WIDTH_160), .preambles = BIT(NL80211_PREAMBLE_LEGACY) | BIT(NL80211_PREAMBLE_HT) | BIT(NL80211_PREAMBLE_VHT) | BIT(NL80211_PREAMBLE_HE), }, }; static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key); static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) { int i; memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts)); for (i = 0; i < NUM_PHY_CTX; i++) { mvm->phy_ctxts[i].id = i; mvm->phy_ctxts[i].ref = 0; } } struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, const char *alpha2, enum iwl_mcc_source src_id, bool *changed) { struct ieee80211_regdomain *regd = NULL; struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcc_update_resp *resp; u8 resp_ver; IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); lockdep_assert_held(&mvm->mutex); resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); if (IS_ERR_OR_NULL(resp)) { IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", PTR_ERR_OR_ZERO(resp)); resp = NULL; goto out; } if (changed) { u32 status = le32_to_cpu(resp->status); *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || status == MCC_RESP_ILLEGAL); } resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, MCC_UPDATE_CMD, 0); IWL_DEBUG_LAR(mvm, "MCC update response version: %d\n", resp_ver); regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, __le32_to_cpu(resp->n_channels), resp->channels, __le16_to_cpu(resp->mcc), __le16_to_cpu(resp->geo_info), __le16_to_cpu(resp->cap), resp_ver); /* Store the return source id */ src_id = resp->source_id; if (IS_ERR_OR_NULL(regd)) { IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", PTR_ERR_OR_ZERO(regd)); goto out; } IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n", regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id); mvm->lar_regdom_set = true; mvm->mcc_src = src_id; iwl_mei_set_country_code(__le16_to_cpu(resp->mcc)); out: kfree(resp); return regd; } void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm) { bool changed; struct ieee80211_regdomain *regd; if (!iwl_mvm_is_lar_supported(mvm)) return; regd = iwl_mvm_get_current_regdomain(mvm, &changed); if (!IS_ERR_OR_NULL(regd)) { /* only update the regulatory core if changed */ if (changed) regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); kfree(regd); } } struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, bool *changed) { return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ", iwl_mvm_is_wifi_mcc_supported(mvm) ? MCC_SOURCE_GET_CURRENT : MCC_SOURCE_OLD_FW, changed); } int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) { enum iwl_mcc_source used_src; struct ieee80211_regdomain *regd; int ret; bool changed; const struct ieee80211_regdomain *r = wiphy_dereference(mvm->hw->wiphy, mvm->hw->wiphy->regd); if (!r) return -ENOENT; /* save the last source in case we overwrite it below */ used_src = mvm->mcc_src; if (iwl_mvm_is_wifi_mcc_supported(mvm)) { /* Notify the firmware we support wifi location updates */ regd = iwl_mvm_get_current_regdomain(mvm, NULL); if (!IS_ERR_OR_NULL(regd)) kfree(regd); } /* Now set our last stored MCC and source */ regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, &changed); if (IS_ERR_OR_NULL(regd)) return -EIO; /* update cfg80211 if the regdomain was changed */ if (changed) ret = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd); else ret = 0; kfree(regd); return ret; } static const u8 he_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, }; static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { { .iftype = NL80211_IFTYPE_STATION, .extended_capabilities = he_if_types_ext_capa_sta, .extended_capabilities_mask = he_if_types_ext_capa_sta, .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta), }, }; static int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); *tx_ant = iwl_mvm_get_valid_tx_ant(mvm); *rx_ant = iwl_mvm_get_valid_rx_ant(mvm); return 0; } int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) { struct ieee80211_hw *hw = mvm->hw; int num_mac, ret, i; static const u32 mvm_ciphers[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, }; #ifdef CONFIG_PM_SLEEP bool unified = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); #endif /* Tell mac80211 our characteristics */ ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, SPECTRUM_MGMT); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(hw, WANT_MONITOR_VIF); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, TIMING_BEACON_ONLY); ieee80211_hw_set(hw, CONNECTION_MONITOR); ieee80211_hw_set(hw, CHANCTX_STA_CSA); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); ieee80211_hw_set(hw, STA_MMPDU_TXQ); /* * On older devices, enabling TX A-MSDU occasionally leads to * something getting messed up, the command read from the FIFO * gets out of sync and isn't a TX command, so that we have an * assert EDC. * * It's not clear where the bug is, but since we didn't used to * support A-MSDU until moving the mac80211 iTXQs, just leave it * for older devices. We also don't see this issue on any newer * devices. */ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) ieee80211_hw_set(hw, TX_AMSDU); ieee80211_hw_set(hw, TX_FRAG_LIST); if (iwl_mvm_has_tlc_offload(mvm)) { ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); ieee80211_hw_set(hw, HAS_RATE_CONTROL); } if (iwl_mvm_has_new_rx_api(mvm)) ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) { ieee80211_hw_set(hw, AP_LINK_PS); } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { /* * we absolutely need this for the new TX API since that comes * with many more queues than the current code can deal with * for station powersave */ return -EINVAL; } if (mvm->trans->num_rx_queues > 1) ieee80211_hw_set(hw, USES_RSS); if (mvm->trans->max_skb_frags) hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; hw->queues = IEEE80211_NUM_ACS; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | IEEE80211_RADIOTAP_MCS_HAVE_STBC; hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; hw->radiotap_timestamp.units_pos = IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US | IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ; /* this is the case for CCK frames, it's better (only 8) for OFDM */ hw->radiotap_timestamp.accuracy = 22; if (!iwl_mvm_has_tlc_offload(mvm)) hw->rate_control_algorithm = RS_NAME; hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; hw->max_tx_fragments = mvm->trans->max_skb_frags; BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); hw->wiphy->cipher_suites = mvm->ciphers; if (iwl_mvm_has_new_rx_api(mvm)) { mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_GCMP; hw->wiphy->n_cipher_suites++; mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_GCMP_256; hw->wiphy->n_cipher_suites++; } if (iwlwifi_mod_params.swcrypto) IWL_ERR(mvm, "iwlmvm doesn't allow to disable HW crypto, check swcrypto module parameter\n"); if (!iwlwifi_mod_params.bt_coex_active) IWL_ERR(mvm, "iwlmvm doesn't allow to disable BT Coex, check bt_coex_active module parameter\n"); ieee80211_hw_set(hw, MFP_CAPABLE); mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC; hw->wiphy->n_cipher_suites++; if (iwl_mvm_has_new_rx_api(mvm)) { mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_BIP_GMAC_128; hw->wiphy->n_cipher_suites++; mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_BIP_GMAC_256; hw->wiphy->n_cipher_suites++; } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) { wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa; } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT)) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT); ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); hw->wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_ND_RANDOM_MAC_ADDR; hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); hw->chanctx_data_size = sizeof(u16); hw->txq_data_size = sizeof(struct iwl_mvm_txq); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_ADHOC); hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); /* The new Tx API does not allow to pass the key or keyid of a MPDU to * the hw, preventing us to control which key(id) to use per MPDU. * Till that's fixed we can't use Extended Key ID for the newer cards. */ if (!iwl_mvm_has_new_tx_api(mvm)) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_EXT_KEY_ID); hw->wiphy->features |= NL80211_FEATURE_HT_IBSS; hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; if (iwl_mvm_is_lar_supported(mvm)) hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; else hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | REGULATORY_DISABLE_BEACON_HINTS; hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ; hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; hw->wiphy->n_iface_combinations = ARRAY_SIZE(iwl_mvm_iface_combinations); hw->wiphy->max_remain_on_channel_duration = 10000; hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; /* Extract MAC address */ memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); hw->wiphy->addresses = mvm->addresses; hw->wiphy->n_addresses = 1; /* Extract additional MAC addresses if available */ num_mac = (mvm->nvm_data->n_hw_addrs > 1) ? min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1; for (i = 1; i < num_mac; i++) { memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr, ETH_ALEN); mvm->addresses[i].addr[5]++; hw->wiphy->n_addresses++; } iwl_mvm_reset_phy_ctxts(mvm); hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm); hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; else mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) hw->wiphy->bands[NL80211_BAND_2GHZ] = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) { hw->wiphy->bands[NL80211_BAND_5GHZ] = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS)) hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT) && mvm->nvm_data->bands[NL80211_BAND_6GHZ].n_channels) hw->wiphy->bands[NL80211_BAND_6GHZ] = &mvm->nvm_data->bands[NL80211_BAND_6GHZ]; hw->wiphy->hw_version = mvm->trans->hw_id; if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; else hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->max_sched_scan_reqs = 1; hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; hw->wiphy->max_match_sets = iwl_umac_scan_get_max_profiles(mvm->fw); /* we create the 802.11 header and zero length SSID IE. */ hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS; hw->wiphy->max_sched_scan_plan_interval = U16_MAX; /* * the firmware uses u8 for num of iterations, but 0xff is saved for * infinite loop, so the maximum number of iterations is actually 254. */ hw->wiphy->max_sched_scan_plan_iterations = 254; hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_P2P_GO_OPPPS | NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | NL80211_FEATURE_DYNAMIC_SMPS | NL80211_FEATURE_STATIC_SMPS | NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_QUIET; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL, IWL_FW_CMD_VER_UNKNOWN) == 3) hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) { wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_START_TIME); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BSS_PARENT_TSF); } if (iwl_mvm_is_oce_supported(mvm)) { u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, 0); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); /* Old firmware also supports probe deferral and suppression */ if (scan_ver < 15) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION); } if (mvm->nvm_data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) { hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa; hw->wiphy->num_iftype_ext_capab = ARRAY_SIZE(he_iftypes_ext_capa); ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); } mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; #ifdef CONFIG_PM_SLEEP if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) && mvm->trans->ops->d3_suspend && mvm->trans->ops->d3_resume && device_can_wakeup(mvm->trans->dev)) { mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | WIPHY_WOWLAN_EAP_IDENTITY_REQ | WIPHY_WOWLAN_RFKILL_RELEASE | WIPHY_WOWLAN_NET_DETECT; mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | WIPHY_WOWLAN_GTK_REKEY_FAILURE | WIPHY_WOWLAN_4WAY_HANDSHAKE; mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; mvm->wowlan.max_nd_match_sets = iwl_umac_scan_get_max_profiles(mvm->fw); hw->wiphy->wowlan = &mvm->wowlan; } #endif ret = iwl_mvm_leds_init(mvm); if (ret) return ret; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; ieee80211_hw_set(hw, TDLS_WIDER_BW); } if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) { IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; } hw->netdev_features |= mvm->cfg->features; if (!iwl_mvm_is_csum_supported(mvm)) hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK; if (mvm->cfg->vht_mu_mimo_supported) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT)) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_PROTECTED_TWT); iwl_mvm_vendor_cmds_register(mvm); hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm); hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm); ret = ieee80211_register_hw(mvm->hw); if (ret) { iwl_mvm_leds_exit(mvm); } return ret; } static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta) { if (likely(sta)) { if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0)) return; } else { if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0)) return; } ieee80211_free_txskb(mvm->hw, skb); } static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_sta *sta = control->sta; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; bool offchannel = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_TX_OFFCHAN; if (iwl_mvm_is_radio_killed(mvm)) { IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); goto drop; } if (offchannel && !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) goto drop; /* * bufferable MMPDUs or MMPDUs on STA interfaces come via TXQs * so we treat the others as broadcast */ if (ieee80211_is_mgmt(hdr->frame_control)) sta = NULL; /* If there is no sta, and it's not offchannel - send through AP */ if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION && !offchannel) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); if (ap_sta_id < mvm->fw->ucode_capa.num_stations) { /* mac80211 holds rcu read lock */ sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); if (IS_ERR_OR_NULL(sta)) goto drop; } } iwl_mvm_tx_skb(mvm, skb, sta); return; drop: ieee80211_free_txskb(hw, skb); } void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); struct sk_buff *skb = NULL; /* * No need for threads to be pending here, they can leave the first * taker all the work. * * mvmtxq->tx_request logic: * * If 0, no one is currently TXing, set to 1 to indicate current thread * will now start TX and other threads should quit. * * If 1, another thread is currently TXing, set to 2 to indicate to * that thread that there was another request. Since that request may * have raced with the check whether the queue is empty, the TXing * thread should check the queue's status one more time before leaving. * This check is done in order to not leave any TX hanging in the queue * until the next TX invocation (which may not even happen). * * If 2, another thread is currently TXing, and it will already double * check the queue, so do nothing. */ if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2)) return; rcu_read_lock(); do { while (likely(!mvmtxq->stopped && !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) { skb = ieee80211_tx_dequeue(hw, txq); if (!skb) { if (txq->sta) IWL_DEBUG_TX(mvm, "TXQ of sta %pM tid %d is now empty\n", txq->sta->addr, txq->tid); break; } iwl_mvm_tx_skb(mvm, skb, txq->sta); } } while (atomic_dec_return(&mvmtxq->tx_request)); rcu_read_unlock(); } static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); /* * Please note that racing is handled very carefully here: * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is * deleted afterwards. * This means that if: * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list): * queue is allocated and we can TX. * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list): * a race, should defer the frame. * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list): * need to allocate the queue and defer the frame. * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list): * queue is already scheduled for allocation, no need to allocate, * should defer the frame. */ /* If the queue is allocated TX and return. */ if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) { /* * Check that list is empty to avoid a race where txq_id is * already updated, but the queue allocation work wasn't * finished */ if (unlikely(txq->sta && !list_empty(&mvmtxq->list))) return; iwl_mvm_mac_itxq_xmit(hw, txq); return; } /* The list is being deleted only after the queue is fully allocated. */ if (!list_empty(&mvmtxq->list)) return; list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); schedule_work(&mvm->add_stream_wk); } #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ do { \ if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ break; \ iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \ } while (0) static void iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn, enum ieee80211_ampdu_mlme_action action) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_BA); if (!trig) return; ba_trig = (void *)trig->data; switch (action) { case IEEE80211_AMPDU_TX_OPERATIONAL: { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid, "TX AGG START: MAC %pM tid %d ssn %d\n", sta->addr, tid, tid_data->ssn); break; } case IEEE80211_AMPDU_TX_STOP_CONT: CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid, "TX AGG STOP: MAC %pM tid %d\n", sta->addr, tid); break; case IEEE80211_AMPDU_RX_START: CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid, "RX AGG START: MAC %pM tid %d ssn %d\n", sta->addr, tid, rx_ba_ssn); break; case IEEE80211_AMPDU_RX_STOP: CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid, "RX AGG STOP: MAC %pM tid %d\n", sta->addr, tid); break; default: break; } } static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; struct ieee80211_sta *sta = params->sta; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; u16 *ssn = ¶ms->ssn; u16 buf_size = params->buf_size; bool amsdu = params->amsdu; u16 timeout = params->timeout; IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", sta->addr, tid, action); if (!(mvm->nvm_data->sku_cap_11n_enable)) return -EACCES; mutex_lock(&mvm->mutex); switch (action) { case IEEE80211_AMPDU_RX_START: if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id == iwl_mvm_sta_from_mac80211(sta)->sta_id) { struct iwl_mvm_vif *mvmvif; u16 macid = iwl_mvm_vif_from_mac80211(vif)->id; struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid]; mdata->opened_rx_ba_sessions = true; mvmvif = iwl_mvm_vif_from_mac80211(vif); cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk); } if (!iwl_enable_rx_ampdu()) { ret = -EINVAL; break; } ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size, timeout); break; case IEEE80211_AMPDU_RX_STOP: ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size, timeout); break; case IEEE80211_AMPDU_TX_START: if (!iwl_enable_tx_ampdu()) { ret = -EINVAL; break; } ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); break; case IEEE80211_AMPDU_TX_STOP_CONT: ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); break; case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size, amsdu); break; default: WARN_ON_ONCE(1); ret = -EINVAL; break; } if (!ret) { u16 rx_ba_ssn = 0; if (action == IEEE80211_AMPDU_RX_START) rx_ba_ssn = *ssn; iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid, rx_ba_ssn, action); } mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->uploaded = false; mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); spin_unlock_bh(&mvm->time_event_lock); mvmvif->phy_ctxt = NULL; memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); memset(&mvmvif->probe_resp_data, 0, sizeof(mvmvif->probe_resp_data)); } static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) { iwl_mvm_stop_device(mvm); mvm->cur_aid = 0; mvm->scan_status = 0; mvm->ps_disabled = false; mvm->rfkill_safe_init_done = false; /* just in case one was running */ iwl_mvm_cleanup_roc_te(mvm); ieee80211_remain_on_channel_expired(mvm->hw); iwl_mvm_ftm_restart(mvm); /* * cleanup all interfaces, even inactive ones, as some might have * gone down during the HW restart */ ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); mvm->p2p_device_vif = NULL; iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); ieee80211_wake_queues(mvm->hw); mvm->vif_count = 0; mvm->rx_ba_sessions = 0; mvm->fwrt.dump.conf = FW_DBG_INVALID; mvm->monitor_on = false; /* keep statistics ticking */ iwl_mvm_accu_radio_stats(mvm); } int __iwl_mvm_mac_start(struct iwl_mvm *mvm) { int ret; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_mei_get_ownership(mvm); if (ret) return ret; if (mvm->mei_nvm_data) { /* We got the NIC, we can now free the MEI NVM data */ kfree(mvm->mei_nvm_data); mvm->mei_nvm_data = NULL; /* * We can't free the nvm_data we allocated based on the SAP * data because we registered to cfg80211 with the channels * allocated on mvm->nvm_data. Keep a pointer in temp_nvm_data * just in order to be able free it later. * NULLify nvm_data so that we will read the NVM from the * firmware this time. */ mvm->temp_nvm_data = mvm->nvm_data; mvm->nvm_data = NULL; } if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { /* * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART * so later code will - from now on - see that we're doing it. */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); /* Clean up some internal and mac80211 state on restart */ iwl_mvm_restart_cleanup(mvm); } ret = iwl_mvm_up(mvm); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT, NULL); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC, NULL); mvm->last_reset_or_resume_time_jiffies = jiffies; if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { /* Something went wrong - we need to finish some cleanup * that normally iwl_mvm_mac_restart_complete() below * would do. */ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); } return ret; } static int iwl_mvm_mac_start(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; int retry, max_retry = 0; mutex_lock(&mvm->mutex); /* we are starting the mac not in error flow, and restart is enabled */ if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) && iwlwifi_mod_params.fw_restart) { max_retry = IWL_MAX_INIT_RETRY; /* * This will prevent mac80211 recovery flows to trigger during * init failures */ set_bit(IWL_MVM_STATUS_STARTING, &mvm->status); } for (retry = 0; retry <= max_retry; retry++) { ret = __iwl_mvm_mac_start(mvm); if (!ret) break; IWL_ERR(mvm, "mac start retry %d\n", retry); } clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status); mutex_unlock(&mvm->mutex); iwl_mvm_mei_set_sw_rfkill_state(mvm); return ret; } static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) { int ret; mutex_lock(&mvm->mutex); clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); ret = iwl_mvm_update_quotas(mvm, true, NULL); if (ret) IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", ret); iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_END_OF_RECOVERY); /* * If we have TDLS peers, remove them. We don't know the last seqno/PN * of packets the FW sent out, so we must reconnect. */ iwl_mvm_teardown_tdls_peers(mvm); mutex_unlock(&mvm->mutex); } static void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); switch (reconfig_type) { case IEEE80211_RECONFIG_TYPE_RESTART: iwl_mvm_restart_complete(mvm); break; case IEEE80211_RECONFIG_TYPE_SUSPEND: break; } } void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) { lockdep_assert_held(&mvm->mutex); iwl_mvm_ftm_initiator_smooth_stop(mvm); /* firmware counters are obviously reset now, but we shouldn't * partially track so also clear the fw_reset_accu counters. */ memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats)); /* async_handlers_wk is now blocked */ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) iwl_mvm_rm_aux_sta(mvm); iwl_mvm_stop_device(mvm); iwl_mvm_async_handlers_purge(mvm); /* async_handlers_list is empty and will stay empty: HW is stopped */ /* * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the * hw (as restart_complete() won't be called in this case) and mac80211 * won't execute the restart. * But make sure to cleanup interfaces that have gone down before/during * HW restart was requested. */ if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); /* We shouldn't have any UIDs still set. Loop over all the UIDs to * make sure there's nothing left there and warn if any is found. */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { int i; for (i = 0; i < mvm->max_scans; i++) { if (WARN_ONCE(mvm->scan_uid_status[i], "UMAC scan UID %d status was not cleaned\n", i)) mvm->scan_uid_status[i] = 0; } } } static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); flush_work(&mvm->async_handlers_wk); flush_work(&mvm->add_stream_wk); /* * Lock and clear the firmware running bit here already, so that * new commands coming in elsewhere, e.g. from debugfs, will not * be able to proceed. This is important here because one of those * debugfs files causes the firmware dump to be triggered, and if we * don't stop debugfs accesses before canceling that it could be * retriggered after we flush it but before we've cleared the bit. */ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); cancel_delayed_work_sync(&mvm->scan_timeout_dwork); /* * The work item could be running or queued if the * ROC time event stops just as we get here. */ flush_work(&mvm->roc_done_wk); iwl_mvm_mei_set_sw_rfkill_state(mvm); mutex_lock(&mvm->mutex); __iwl_mvm_mac_stop(mvm); mutex_unlock(&mvm->mutex); /* * The worker might have been waiting for the mutex, let it run and * discover that its list is now empty. */ cancel_work_sync(&mvm->async_handlers_wk); } static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) { u16 i; lockdep_assert_held(&mvm->mutex); for (i = 0; i < NUM_PHY_CTX; i++) if (!mvm->phy_ctxts[i].ref) return &mvm->phy_ctxts[i]; IWL_ERR(mvm, "No available PHY context\n"); return NULL; } static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, s16 tx_power) { u32 cmd_id = REDUCE_TX_POWER_CMD; int len; struct iwl_dev_tx_power_cmd cmd = { .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), .common.mac_context_id = cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), .common.pwr_restriction = cpu_to_le16(8 * tx_power), }; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); if (tx_power == IWL_DEFAULT_MAX_TX_POWER) cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); if (cmd_ver == 7) len = sizeof(cmd.v7); else if (cmd_ver == 6) len = sizeof(cmd.v6); else if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_REDUCE_TX_POWER)) len = sizeof(cmd.v5); else if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) len = sizeof(cmd.v4); else len = sizeof(cmd.v3); /* all structs have the same common part, add it */ len += sizeof(cmd.common); return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); } static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); if (vif->type == NL80211_IFTYPE_STATION) { struct iwl_mvm_sta *mvmsta; mvmvif->csa_bcn_pending = false; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id); if (WARN_ON(!mvmsta)) { ret = -EIO; goto out_unlock; } iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); if (ret) goto out_unlock; iwl_mvm_stop_session_protection(mvm, vif); } } mvmvif->ps_disabled = false; ret = iwl_mvm_power_update_ps(mvm); out_unlock: if (mvmvif->csa_failed) ret = -EIO; mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_chan_switch_te_cmd cmd = { .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), }; /* * In the new flow since FW is in charge of the timing, * if driver has canceled the channel switch he will receive the * CHANNEL_SWITCH_START_NOTIF notification from FW and then cancel it */ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, 0)) return; IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id); mutex_lock(&mvm->mutex); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) iwl_mvm_remove_csa_period(mvm, vif); else WARN_ON(iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd)); mvmvif->csa_failed = true; mutex_unlock(&mvm->mutex); iwl_mvm_post_channel_switch(hw, vif); } static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) { struct iwl_mvm_vif *mvmvif; struct ieee80211_vif *vif; mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work); vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); /* Trigger disconnect (should clear the CSA state) */ ieee80211_chswitch_done(vif, false); } static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; mvmvif->mvm = mvm; RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); /* * Not much to do here. The stack will not allow interface * types or combinations that we didn't advertise, so we * don't really have to check the types. */ mutex_lock(&mvm->mutex); /* make sure that beacon statistics don't go backwards with FW reset */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) mvmvif->beacon_stats.accu_num_beacons += mvmvif->beacon_stats.num_beacons; /* Allocate resources for the MAC context, and add it to the fw */ ret = iwl_mvm_mac_ctxt_init(mvm, vif); if (ret) goto out_unlock; rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); /* Counting number of interfaces is needed for legacy PM */ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count++; /* * The AP binding flow can be done only after the beacon * template is configured (which happens only in the mac80211 * start_ap() flow), and adding the broadcast station can happen * only after the binding. * In addition, since modifying the MAC before adding a bcast * station is not allowed by the FW, delay the adding of MAC context to * the point where we can also add the bcast station. * In short: there's not much we can do at this point, other than * allocating resources :) */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { ret = iwl_mvm_alloc_bcast_sta(mvm, vif); if (ret) { IWL_ERR(mvm, "Failed to allocate bcast sta\n"); goto out_release; } /* * Only queue for this station is the mcast queue, * which shouldn't be in TFD mask anyway */ ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, 0, vif->type, IWL_STA_MULTICAST); if (ret) goto out_release; iwl_mvm_vif_dbgfs_register(mvm, vif); goto out_unlock; } mvmvif->features |= hw->netdev_features; ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) goto out_release; ret = iwl_mvm_power_update_mac(mvm); if (ret) goto out_remove_mac; /* beacon filtering */ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); if (ret) goto out_remove_mac; if (!mvm->bf_allowed_vif && vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { mvm->bf_allowed_vif = mvmvif; vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI; } /* * P2P_DEVICE interface does not have a channel context assigned to it, * so a dedicated PHY context is allocated to it and the corresponding * MAC context is bound to it at this stage. */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); if (!mvmvif->phy_ctxt) { ret = -ENOSPC; goto out_free_bf; } iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) goto out_unref_phy; ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif); if (ret) goto out_unbind; /* Save a pointer to p2p device vif, so it can later be used to * update the p2p device MAC when a GO is started/stopped */ mvm->p2p_device_vif = vif; } iwl_mvm_tcm_add_vif(mvm, vif); INIT_DELAYED_WORK(&mvmvif->csa_work, iwl_mvm_channel_switch_disconnect_wk); if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = true; iwl_mvm_vif_dbgfs_register(mvm, vif); if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && vif->type == NL80211_IFTYPE_STATION && !vif->p2p && !mvm->csme_vif && mvm->mei_registered) { iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr); iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev); mvm->csme_vif = vif; } goto out_unlock; out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); out_unref_phy: iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); out_free_bf: if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI); } out_remove_mac: mvmvif->phy_ctxt = NULL; iwl_mvm_mac_ctxt_remove(mvm, vif); out_release: if (vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count--; out_unlock: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { /* * Flush the ROC worker which will flush the OFFCHANNEL queue. * We assume here that all the packets sent to the OFFCHANNEL * queue are sent in ROC session. */ flush_work(&mvm->roc_done_wk); } } static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_probe_resp_data *probe_data; iwl_mvm_prepare_mac_removal(mvm, vif); if (!(vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC)) iwl_mvm_tcm_rm_vif(mvm, vif); mutex_lock(&mvm->mutex); if (vif == mvm->csme_vif) { iwl_mei_set_netdev(NULL); mvm->csme_vif = NULL; } probe_data = rcu_dereference_protected(mvmvif->probe_resp_data, lockdep_is_held(&mvm->mutex)); RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); if (probe_data) kfree_rcu(probe_data, rcu_head); if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI); } if (vif->bss_conf.ftm_responder) memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); iwl_mvm_vif_dbgfs_clean(mvm, vif); /* * For AP/GO interface, the tear down of the resources allocated to the * interface is be handled as part of the stop_ap flow. */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { #ifdef CONFIG_NL80211_TESTMODE if (vif == mvm->noa_vif) { mvm->noa_vif = NULL; mvm->noa_duration = 0; } #endif iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta); iwl_mvm_dealloc_bcast_sta(mvm, vif); goto out_release; } if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvm->p2p_device_vif = NULL; iwl_mvm_rm_p2p_bcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); mvmvif->phy_ctxt = NULL; } if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count--; iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = false; out_release: mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) { return 0; } struct iwl_mvm_mc_iter_data { struct iwl_mvm *mvm; int port_id; }; static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mc_iter_data *data = _data; struct iwl_mvm *mvm = data->mvm; struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; struct iwl_host_cmd hcmd = { .id = MCAST_FILTER_CMD, .flags = CMD_ASYNC, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; int ret, len; /* if we don't have free ports, mcast frames will be dropped */ if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM)) return; if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) return; cmd->port_id = data->port_id++; memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); hcmd.len[0] = len; hcmd.data[0] = cmd; ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); } static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) { struct iwl_mvm_mc_iter_data iter_data = { .mvm = mvm, }; int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) return; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mc_iface_iterator, &iter_data); /* * Send a (synchronous) ech command so that we wait for the * multiple asynchronous MCAST_FILTER_CMD commands sent by * the interface iterator. Otherwise, we might get here over * and over again (by userspace just sending a lot of these) * and the CPU can send them faster than the firmware can * process them. * Note that the CPU is still faster - but with this we'll * actually send fewer commands overall because the CPU will * not schedule the work in mac80211 as frequently if it's * still running when rescheduled (possibly multiple times). */ ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); if (ret) IWL_ERR(mvm, "Failed to synchronize multicast groups update\n"); } static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcast_filter_cmd *cmd; struct netdev_hw_addr *addr; int addr_count; bool pass_all; int len; addr_count = netdev_hw_addr_list_count(mc_list); pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES || IWL_MVM_FW_MCAST_FILTER_PASS_ALL; if (pass_all) addr_count = 0; len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4); cmd = kzalloc(len, GFP_ATOMIC); if (!cmd) return 0; if (pass_all) { cmd->pass_all = 1; #if defined(__linux__) return (u64)(unsigned long)cmd; #elif defined(__FreeBSD__) return (u64)(uintptr_t)cmd; #endif } netdev_hw_addr_list_for_each(addr, mc_list) { #if defined(__linux__) IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n", cmd->count, addr->addr); #elif defined(__FreeBSD__) IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %6D\n", cmd->count, addr->addr, ":"); #endif memcpy(&cmd->addr_list[cmd->count * ETH_ALEN], addr->addr, ETH_ALEN); cmd->count++; } #if defined(__linux__) return (u64)(unsigned long)cmd; #elif defined(__FreeBSD__) return (u64)(uintptr_t)cmd; #endif } static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); #if defined(__linux__) struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; #elif defined(__FreeBSD__) struct iwl_mcast_filter_cmd *cmd = (void *)(uintptr_t)multicast; #endif mutex_lock(&mvm->mutex); /* replace previous configuration */ kfree(mvm->mcast_filter_cmd); mvm->mcast_filter_cmd = cmd; if (!cmd) goto out; if (changed_flags & FIF_ALLMULTI) cmd->pass_all = !!(*total_flags & FIF_ALLMULTI); if (cmd->pass_all) cmd->count = 0; iwl_mvm_recalc_multicast(mvm); out: mutex_unlock(&mvm->mutex); *total_flags = 0; } static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int filter_flags, unsigned int changed_flags) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* We support only filter for probe requests */ if (!(changed_flags & FIF_PROBE_REQ)) return; /* Supported only for p2p client interfaces */ if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || !vif->p2p) return; mutex_lock(&mvm->mutex); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); mutex_unlock(&mvm->mutex); } static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mu_group_mgmt_cmd cmd = {}; memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership, WLAN_MEMBERSHIP_LEN); memcpy(cmd.user_position, vif->bss_conf.mu_group.position, WLAN_USER_POSITION_LEN); return iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(DATA_PATH_GROUP, UPDATE_MU_GROUPS_CMD), 0, sizeof(cmd), &cmd); } static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { if (vif->mu_mimo_owner) { struct iwl_mu_group_mgmt_notif *notif = _data; /* * MU-MIMO Group Id action frame is little endian. We treat * the data received from firmware as if it came from the * action frame, so no conversion is needed. */ ieee80211_update_mu_groups(vif, (u8 *)¬if->membership_status, (u8 *)¬if->user_position); } } void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mu_mimo_iface_iterator, notif); } static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit) { u8 byte_num = ppe_pos_bit / 8; u8 bit_num = ppe_pos_bit % 8; u8 residue_bits; u8 res; if (bit_num <= 5) return (ppe[byte_num] >> bit_num) & (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1); /* * If bit_num > 5, we have to combine bits with next byte. * Calculate how many bits we need to take from current byte (called * here "residue_bits"), and add them to bits from next byte. */ residue_bits = 8 - bit_num; res = (ppe[byte_num + 1] & (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) << residue_bits; res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1); return res; } static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm, struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss, u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit) { int i; /* * FW currently supports only nss == MAX_HE_SUPP_NSS * * If nss > MAX: we can ignore values we don't support * If nss < MAX: we can set zeros in other streams */ if (nss > MAX_HE_SUPP_NSS) { IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, MAX_HE_SUPP_NSS); nss = MAX_HE_SUPP_NSS; } for (i = 0; i < nss; i++) { u8 ru_index_tmp = ru_index_bitmap << 1; u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE; u8 bw; for (bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); bw++) { ru_index_tmp >>= 1; if (!(ru_index_tmp & 1)) continue; high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; } } } static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_he_pkt_ext_v2 *pkt_ext) { - u8 nss = (sta->he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1; - u8 *ppe = &sta->he_cap.ppe_thres[0]; + u8 nss = (sta->deflink.he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1; + u8 *ppe = &sta->deflink.he_cap.ppe_thres[0]; u8 ru_index_bitmap = u8_get_bits(*ppe, IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); /* Starting after PPE header */ u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE; iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit); } static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nominal_padding, u32 *flags) { int low_th = -1; int high_th = -1; int i; switch (nominal_padding) { case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US: low_th = IWL_HE_PKT_EXT_NONE; high_th = IWL_HE_PKT_EXT_NONE; break; case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US: low_th = IWL_HE_PKT_EXT_BPSK; high_th = IWL_HE_PKT_EXT_NONE; break; case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US: low_th = IWL_HE_PKT_EXT_NONE; high_th = IWL_HE_PKT_EXT_BPSK; break; } /* Set the PPE thresholds accordingly */ if (low_th >= 0 && high_th >= 0) { for (i = 0; i < MAX_HE_SUPP_NSS; i++) { u8 bw; for (bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); bw++) { pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; } } *flags |= STA_CTXT_HE_PACKET_EXT; } } static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 sta_id) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_he_sta_context_cmd_v3 sta_ctxt_cmd = { .sta_id = sta_id, .tid_limit = IWL_MAX_TID_COUNT, .bss_color = vif->bss_conf.he_bss_color.color, .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext, .frame_time_rts_th = cpu_to_le16(vif->bss_conf.frame_time_rts_th), }; struct iwl_he_sta_context_cmd_v2 sta_ctxt_cmd_v2 = {}; u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, STA_HE_CTXT_CMD); u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 2); int size; struct ieee80211_sta *sta; u32 flags; int i; const struct ieee80211_sta_he_cap *own_he_cap = NULL; struct ieee80211_chanctx_conf *chanctx_conf; const struct ieee80211_supported_band *sband; void *cmd; if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE)) ver = 1; switch (ver) { case 1: /* same layout as v2 except some data at the end */ cmd = &sta_ctxt_cmd_v2; size = sizeof(struct iwl_he_sta_context_cmd_v1); break; case 2: cmd = &sta_ctxt_cmd_v2; size = sizeof(struct iwl_he_sta_context_cmd_v2); break; case 3: cmd = &sta_ctxt_cmd; size = sizeof(struct iwl_he_sta_context_cmd_v3); break; default: IWL_ERR(mvm, "bad STA_HE_CTXT_CMD version %d\n", ver); return; } rcu_read_lock(); chanctx_conf = rcu_dereference(vif->chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); return; } sband = mvm->hw->wiphy->bands[chanctx_conf->def.chan->band]; own_he_cap = ieee80211_get_he_iftype_cap(sband, ieee80211_vif_type_p2p(vif)); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); if (IS_ERR_OR_NULL(sta)) { rcu_read_unlock(); WARN(1, "Can't find STA to configure HE\n"); return; } - if (!sta->he_cap.has_he) { + if (!sta->deflink.he_cap.has_he) { rcu_read_unlock(); return; } flags = 0; /* Block 26-tone RU OFDMA transmissions */ if (mvmvif->he_ru_2mhz_block) flags |= STA_CTXT_HE_RU_2MHZ_BLOCK; /* HTC flags */ - if (sta->he_cap.he_cap_elem.mac_cap_info[0] & + if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT); - if ((sta->he_cap.he_cap_elem.mac_cap_info[1] & + if ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || - (sta->he_cap.he_cap_elem.mac_cap_info[2] & + (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { u8 link_adap = - ((sta->he_cap.he_cap_elem.mac_cap_info[2] & + ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + - (sta->he_cap.he_cap_elem.mac_cap_info[1] & + (sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); if (link_adap == 2) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED); else if (link_adap == 3) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); } - if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) + if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); - if (sta->he_cap.he_cap_elem.mac_cap_info[3] & + if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP); - if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) + if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); /* * Initialize the PPE thresholds to "None" (7), as described in Table * 9-262ac of 80211.ax/D3.0. */ memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE, sizeof(sta_ctxt_cmd.pkt_ext)); /* If PPE Thresholds exist, parse them into a FW-familiar format. */ - if (sta->he_cap.he_cap_elem.phy_cap_info[6] & + if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta, &sta_ctxt_cmd.pkt_ext); flags |= STA_CTXT_HE_PACKET_EXT; /* PPE Thresholds doesn't exist - set the API PPE values * according to Common Nominal Packet Padding fiels. */ } else { u8 nominal_padding = - u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9], + u8_get_bits(sta->deflink.he_cap.he_cap_elem.phy_cap_info[9], IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext, nominal_padding, &flags); } - if (sta->he_cap.he_cap_elem.mac_cap_info[2] & + if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP) flags |= STA_CTXT_HE_32BIT_BA_BITMAP; - if (sta->he_cap.he_cap_elem.mac_cap_info[2] & + if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_ACK_EN) flags |= STA_CTXT_HE_ACK_ENABLED; rcu_read_unlock(); /* Mark MU EDCA as enabled, unless none detected on some AC */ flags |= STA_CTXT_HE_MU_EDCA_CW; for (i = 0; i < IEEE80211_NUM_ACS; i++) { struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = &mvmvif->queue_params[i].mu_edca_param_rec; u8 ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); if (!mvmvif->queue_params[i].mu_edca) { flags &= ~STA_CTXT_HE_MU_EDCA_CW; break; } sta_ctxt_cmd.trig_based_txf[ac].cwmin = cpu_to_le16(mu_edca->ecw_min_max & 0xf); sta_ctxt_cmd.trig_based_txf[ac].cwmax = cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); sta_ctxt_cmd.trig_based_txf[ac].aifsn = cpu_to_le16(mu_edca->aifsn); sta_ctxt_cmd.trig_based_txf[ac].mu_time = cpu_to_le16(mu_edca->mu_edca_timer); } if (vif->bss_conf.uora_exists) { flags |= STA_CTXT_HE_TRIG_RND_ALLOC; sta_ctxt_cmd.rand_alloc_ecwmin = vif->bss_conf.uora_ocw_range & 0x7; sta_ctxt_cmd.rand_alloc_ecwmax = (vif->bss_conf.uora_ocw_range >> 3) & 0x7; } if (own_he_cap && !(own_he_cap->he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_ACK_EN)) flags |= STA_CTXT_HE_NIC_NOT_ACK_ENABLED; if (vif->bss_conf.nontransmitted) { flags |= STA_CTXT_HE_REF_BSSID_VALID; ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr, vif->bss_conf.transmitter_bssid); sta_ctxt_cmd.max_bssid_indicator = vif->bss_conf.bssid_indicator; sta_ctxt_cmd.bssid_index = vif->bss_conf.bssid_index; sta_ctxt_cmd.ema_ap = vif->bss_conf.ema_ap; sta_ctxt_cmd.profile_periodicity = vif->bss_conf.profile_periodicity; } sta_ctxt_cmd.flags = cpu_to_le32(flags); if (ver < 3) { /* fields before pkt_ext */ BUILD_BUG_ON(offsetof(typeof(sta_ctxt_cmd), pkt_ext) != offsetof(typeof(sta_ctxt_cmd_v2), pkt_ext)); memcpy(&sta_ctxt_cmd_v2, &sta_ctxt_cmd, offsetof(typeof(sta_ctxt_cmd), pkt_ext)); /* pkt_ext */ for (i = 0; i < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th); i++) { u8 bw; for (bw = 0; bw < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i]); bw++) { BUILD_BUG_ON(sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]) != sizeof(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw])); memcpy(&sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw], &sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw], sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw])); } } /* fields after pkt_ext */ BUILD_BUG_ON(sizeof(sta_ctxt_cmd) - offsetofend(typeof(sta_ctxt_cmd), pkt_ext) != sizeof(sta_ctxt_cmd_v2) - offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext)); memcpy((u8 *)&sta_ctxt_cmd_v2 + offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext), (u8 *)&sta_ctxt_cmd + offsetofend(typeof(sta_ctxt_cmd), pkt_ext), sizeof(sta_ctxt_cmd) - offsetofend(typeof(sta_ctxt_cmd), pkt_ext)); sta_ctxt_cmd_v2.reserved3 = 0; } if (iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, size, cmd)) IWL_ERR(mvm, "Failed to config FW to work HE!\n"); } static void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration_override) { u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; if (duration_override > duration) duration = duration_override; /* Try really hard to protect the session and hear a beacon * The new session protection command allows us to protect the * session for a much longer time since the firmware will internally * create two events: a 300TU one with a very high priority that * won't be fragmented which should be enough for 99% of the cases, * and another one (which we configure here to be 900TU long) which * will have a slightly lower priority, but more importantly, can be * fragmented so that it'll allow other activities to run. */ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) iwl_mvm_schedule_session_protection(mvm, vif, 900, min_duration, false); else iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); } static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changes) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; /* * Re-calculate the tsf id, as the leader-follower relations depend * on the beacon interval, which was not known when the station * interface was added. */ if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); } /* Update MU EDCA params */ if (changes & BSS_CHANGED_QOS && mvmvif->associated && bss_conf->assoc && vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); /* * If we're not associated yet, take the (new) BSSID before associating * so the firmware knows. If we're already associated, then use the old * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC * branch for disassociation below. */ if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); if (ret) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); /* after sending it once, adopt mac80211 data */ memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); mvmvif->associated = bss_conf->assoc; if (changes & BSS_CHANGED_ASSOC) { if (bss_conf->assoc) { /* clear statistics to get clean beacon counter */ iwl_mvm_request_statistics(mvm, true); memset(&mvmvif->beacon_stats, 0, sizeof(mvmvif->beacon_stats)); /* add quota for this interface */ ret = iwl_mvm_update_quotas(mvm, true, NULL); if (ret) { IWL_ERR(mvm, "failed to update quotas\n"); return; } if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && !fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { /* * If we're restarting then the firmware will * obviously have lost synchronisation with * the AP. It will attempt to synchronise by * itself, but we can make it more reliable by * scheduling a session protection time event. * * The firmware needs to receive a beacon to * catch up with synchronisation, use 110% of * the beacon interval. * * Set a large maximum delay to allow for more * than a single interface. * * For new firmware versions, rely on the * firmware. This is relevant for DCM scenarios * only anyway. */ u32 dur = (11 * vif->bss_conf.beacon_int) / 10; iwl_mvm_protect_session(mvm, vif, dur, dur, 5 * dur, false); } else if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && !vif->bss_conf.dtim_period) { /* * If we're not restarting and still haven't * heard a beacon (dtim period unknown) then * make sure we still have enough minimum time * remaining in the time event, since the auth * might actually have taken quite a while * (especially for SAE) and so the remaining * time could be small without us having heard * a beacon yet. */ iwl_mvm_protect_assoc(mvm, vif, 0); } iwl_mvm_sf_update(mvm, vif, false); iwl_mvm_power_vif_assoc(mvm, vif); if (vif->p2p) { iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_PROT, IEEE80211_SMPS_DYNAMIC); } } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { iwl_mvm_mei_host_disassociated(mvm); /* * If update fails - SF might be running in associated * mode while disassociated - which is forbidden. */ ret = iwl_mvm_sf_update(mvm, vif, false); WARN_ONCE(ret && !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status), "Failed to update SF upon disassociation\n"); /* * If we get an assert during the connection (after the * station has been added, but before the vif is set * to associated), mac80211 will re-add the station and * then configure the vif. Since the vif is not * associated, we would remove the station here and * this would fail the recovery. */ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { /* * Remove AP station now that * the MAC is unassoc */ ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); if (ret) IWL_ERR(mvm, "failed to remove AP station\n"); mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; } /* remove quota for this interface */ ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) IWL_ERR(mvm, "failed to update quotas\n"); /* this will take the cleared BSSID from bss_conf */ ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (ret) IWL_ERR(mvm, "failed to update MAC %pM (clear after unassoc)\n", vif->addr); } /* * The firmware tracks the MU-MIMO group on its own. * However, on HW restart we should restore this data. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) { ret = iwl_mvm_update_mu_groups(mvm, vif); if (ret) IWL_ERR(mvm, "failed to update VHT MU_MIMO groups\n"); } iwl_mvm_recalc_multicast(mvm); /* reset rssi values */ mvmvif->bf_data.ave_beacon_signal = 0; iwl_mvm_bt_coex_vif_change(mvm); iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, IEEE80211_SMPS_AUTOMATIC); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); } if (changes & BSS_CHANGED_BEACON_INFO) { /* * We received a beacon from the associated AP so * remove the session protection. */ iwl_mvm_stop_session_protection(mvm, vif); iwl_mvm_sf_update(mvm, vif, false); WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); } if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | /* * Send power command on every beacon change, * because we may have not enabled beacon abort yet. */ BSS_CHANGED_BEACON_INFO)) { ret = iwl_mvm_power_update_mac(mvm); if (ret) IWL_ERR(mvm, "failed to update power mode\n"); } if (changes & BSS_CHANGED_CQM) { IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); /* reset cqm events tracking */ mvmvif->bf_data.last_cqm_event = 0; if (mvmvif->bf_data.bf_enabled) { ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); if (ret) IWL_ERR(mvm, "failed to update CQM thresholds\n"); } } if (changes & BSS_CHANGED_BANDWIDTH) iwl_mvm_apply_fw_smps_request(vif); } static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret, i; mutex_lock(&mvm->mutex); /* Send the beacon template */ ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif); if (ret) goto out_unlock; /* * Re-calculate the tsf id, as the leader-follower relations depend on * the beacon interval, which was not known when the AP interface * was added. */ if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); mvmvif->ap_assoc_sta_count = 0; /* Add the mac context */ ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) goto out_unlock; /* Perform the binding */ ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) goto out_remove; /* * This is not very nice, but the simplest: * For older FWs adding the mcast sta before the bcast station may * cause assert 0x2b00. * This is fixed in later FW so make the order of removal depend on * the TLV */ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { ret = iwl_mvm_add_mcast_sta(mvm, vif); if (ret) goto out_unbind; /* * Send the bcast station. At this stage the TBTT and DTIM time * events are added and applied to the scheduler */ ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) { iwl_mvm_rm_mcast_sta(mvm, vif); goto out_unbind; } } else { /* * Send the bcast station. At this stage the TBTT and DTIM time * events are added and applied to the scheduler */ ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) goto out_unbind; ret = iwl_mvm_add_mcast_sta(mvm, vif); if (ret) { iwl_mvm_send_rm_bcast_sta(mvm, vif); goto out_unbind; } } /* must be set before quota calculations */ mvmvif->ap_ibss_active = true; /* send all the early keys to the device now */ for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { struct ieee80211_key_conf *key = mvmvif->ap_early_keys[i]; if (!key) continue; mvmvif->ap_early_keys[i] = NULL; ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key); if (ret) goto out_quota_failed; } if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { iwl_mvm_vif_set_low_latency(mvmvif, true, LOW_LATENCY_VIF_TYPE); iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id); } /* power updated needs to be done before quotas */ iwl_mvm_power_update_mac(mvm); ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) goto out_quota_failed; /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_bt_coex_vif_change(mvm); /* we don't support TDLS during DCM */ if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); iwl_mvm_ftm_restart_responder(mvm, vif); goto out_unlock; out_quota_failed: iwl_mvm_power_update_mac(mvm); mvmvif->ap_ibss_active = false; iwl_mvm_send_rm_bcast_sta(mvm, vif); iwl_mvm_rm_mcast_sta(mvm, vif); out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); out_remove: iwl_mvm_mac_ctxt_remove(mvm, vif); out_unlock: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); iwl_mvm_prepare_mac_removal(mvm, vif); mutex_lock(&mvm->mutex); /* Handle AP stop while in CSA */ if (rcu_access_pointer(mvm->csa_vif) == vif) { iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); RCU_INIT_POINTER(mvm->csa_vif, NULL); mvmvif->csa_countdown = false; } if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); mvm->csa_tx_block_bcn_timeout = 0; } mvmvif->ap_ibss_active = false; mvm->ap_last_beacon_gp2 = 0; if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { iwl_mvm_vif_set_low_latency(mvmvif, false, LOW_LATENCY_VIF_TYPE); iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id); } iwl_mvm_bt_coex_vif_change(mvm); /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_update_quotas(mvm, false, NULL); iwl_mvm_ftm_responder_clear(mvm, vif); /* * This is not very nice, but the simplest: * For older FWs removing the mcast sta before the bcast station may * cause assert 0x2b00. * This is fixed in later FW (which will stop beaconing when removing * bcast station). * So make the order of removal depend on the TLV */ if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) iwl_mvm_rm_mcast_sta(mvm, vif); iwl_mvm_send_rm_bcast_sta(mvm, vif); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) iwl_mvm_rm_mcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); mutex_unlock(&mvm->mutex); } static void iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changes) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); /* Changes will be applied when the AP/IBSS is started */ if (!mvmvif->ap_ibss_active) return; if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) && iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL)) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); /* Need to send a new beacon template to the FW */ if (changes & BSS_CHANGED_BEACON && iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) IWL_WARN(mvm, "Failed updating beacon data\n"); if (changes & BSS_CHANGED_FTM_RESPONDER) { int ret = iwl_mvm_ftm_start_responder(mvm, vif); if (ret) IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n", ret); } } static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changes) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); switch (vif->type) { case NL80211_IFTYPE_STATION: iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); break; case NL80211_IFTYPE_MONITOR: if (changes & BSS_CHANGED_MU_GROUPS) iwl_mvm_update_mu_groups(mvm, vif); break; default: /* shouldn't happen */ WARN_ON_ONCE(1); } if (changes & BSS_CHANGED_TXPOWER) { IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d dBm\n", bss_conf->txpower); iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); } mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; if (hw_req->req.n_channels == 0 || hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) return -EINVAL; mutex_lock(&mvm->mutex); ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); /* Due to a race condition, it's possible that mac80211 asks * us to stop a hw_scan when it's already stopped. This can * happen, for instance, if we stopped the scan ourselves, * called ieee80211_scan_completed() and the userspace called * cancel scan scan before ieee80211_scan_work() could run. * To handle that, simply return if the scan is not running. */ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); mutex_unlock(&mvm->mutex); } static void iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* Called when we need to transmit (a) frame(s) from mac80211 */ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, tids, more_data, false); } static void iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, tids, more_data, true); } static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); unsigned long txqs = 0, tids = 0; int tid; /* * If we have TVQM then we get too high queue numbers - luckily * we really shouldn't get here with that because such hardware * should have firmware supporting buffer station offload. */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return; spin_lock_bh(&mvmsta->lock); for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) continue; __set_bit(tid_data->txq_id, &txqs); if (iwl_mvm_tid_queued(mvm, tid_data) == 0) continue; __set_bit(tid, &tids); } switch (cmd) { case STA_NOTIFY_SLEEP: for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) ieee80211_sta_set_buffered(sta, tid, true); if (txqs) iwl_trans_freeze_txq_timer(mvm->trans, txqs, true); /* * The fw updates the STA to be asleep. Tx packets on the Tx * queues to this station will not be transmitted. The fw will * send a Tx response with TX_STATUS_FAIL_DEST_PS. */ break; case STA_NOTIFY_AWAKE: if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA)) break; if (txqs) iwl_trans_freeze_txq_timer(mvm->trans, txqs, false); iwl_mvm_sta_modify_ps_wake(mvm, sta); break; default: break; } spin_unlock_bh(&mvmsta->lock); } static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { __iwl_mvm_mac_sta_notify(hw, cmd, sta); } void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE); if (WARN_ON(notif->sta_id >= mvm->fw->ucode_capa.num_stations)) return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); if (WARN_ON(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return; } mvmsta = iwl_mvm_sta_from_mac80211(sta); if (!mvmsta->vif || mvmsta->vif->type != NL80211_IFTYPE_AP) { rcu_read_unlock(); return; } if (mvmsta->sleeping != sleeping) { mvmsta->sleeping = sleeping; __iwl_mvm_mac_sta_notify(mvm->hw, sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, sta); ieee80211_sta_ps_transition(sta, sleeping); } if (sleeping) { switch (notif->type) { case IWL_MVM_PM_EVENT_AWAKE: case IWL_MVM_PM_EVENT_ASLEEP: break; case IWL_MVM_PM_EVENT_UAPSD: ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS); break; case IWL_MVM_PM_EVENT_PS_POLL: ieee80211_sta_pspoll(sta); break; default: break; } } rcu_read_unlock(); } static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); /* * This is called before mac80211 does RCU synchronisation, * so here we already invalidate our internal RCU-protected * station pointer. The rest of the code will thus no longer * be able to find the station this way, and we don't rely * on further RCU synchronisation after the sta_state() * callback deleted the station. */ mutex_lock(&mvm->mutex); if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id])) rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], ERR_PTR(-ENOENT)); mutex_unlock(&mvm->mutex); } static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const u8 *bssid) { int i; if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_tcm_mac *mdata; mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id]; ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); mdata->opened_rx_ba_sessions = false; } if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) return; if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } if (!vif->p2p && (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) { if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } } vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; } static void iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *peer_addr, enum nl80211_tdls_operation action) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tdls *tdls_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_TDLS); if (!trig) return; tdls_trig = (void *)trig->data; if (!(tdls_trig->action_bitmap & BIT(action))) return; if (tdls_trig->peer_mode && memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "TDLS event occurred, peer %pM, action %d", peer_addr, action); } struct iwl_mvm_he_obss_narrow_bw_ru_data { bool tolerated; }; static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, struct cfg80211_bss *bss, void *_data) { struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data; const struct cfg80211_bss_ies *ies; const struct element *elem; rcu_read_lock(); ies = rcu_dereference(bss->ies); elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data, ies->len); if (!elem || elem->datalen < 10 || !(elem->data[10] & WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) { data->tolerated = false; } rcu_read_unlock(); } static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_he_obss_narrow_bw_ru_data iter_data = { .tolerated = true, }; if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR)) { mvmvif->he_ru_2mhz_block = false; return; } cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef, iwl_mvm_check_he_obss_narrow_bw_ru_iter, &iter_data); /* * If there is at least one AP on radar channel that cannot * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU. */ mvmvif->he_ru_2mhz_block = !iter_data.tolerated; } static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct ieee80211_supported_band *sband; const struct ieee80211_sta_he_cap *he_cap; if (vif->type != NL80211_IFTYPE_STATION) return; if (!mvm->cca_40mhz_workaround) return; /* decrement and check that we reached zero */ mvm->cca_40mhz_workaround--; if (mvm->cca_40mhz_workaround) return; sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]; sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; he_cap = ieee80211_get_he_iftype_cap(sband, ieee80211_vif_type_p2p(vif)); if (he_cap) { /* we know that ours is writable */ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap; he->he_cap_elem.phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; } } static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_sta *mvm_sta) { #if IS_ENABLED(CONFIG_IWLMEI) struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mei_conn_info conn_info = { .ssid_len = vif->bss_conf.ssid_len, .channel = vif->bss_conf.chandef.chan->hw_value, }; if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) return; if (!mvm->mei_registered) return; switch (mvm_sta->pairwise_cipher) { case WLAN_CIPHER_SUITE_CCMP: conn_info.pairwise_cipher = IWL_MEI_CIPHER_CCMP; break; case WLAN_CIPHER_SUITE_GCMP: conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP; break; case WLAN_CIPHER_SUITE_GCMP_256: conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP_256; break; case 0: /* open profile */ break; default: /* cipher not supported, don't send anything to iwlmei */ return; } switch (mvmvif->rekey_data.akm) { case WLAN_AKM_SUITE_SAE & 0xff: conn_info.auth_mode = IWL_MEI_AKM_AUTH_SAE; break; case WLAN_AKM_SUITE_PSK & 0xff: conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA_PSK; break; case WLAN_AKM_SUITE_8021X & 0xff: conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA; break; case 0: /* open profile */ conn_info.auth_mode = IWL_MEI_AKM_AUTH_OPEN; break; default: /* auth method / AKM not supported */ /* TODO: All the FT vesions of these? */ return; } memcpy(conn_info.ssid, vif->bss_conf.ssid, vif->bss_conf.ssid_len); memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN); /* TODO: add support for collocated AP data */ iwl_mei_host_associated(&conn_info, NULL); #endif } static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); int ret; IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", sta->addr, old_state, new_state); /* this would be a mac80211 bug ... but don't crash */ if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL; /* * If we are in a STA removal flow and in DQA mode: * * This is after the sync_rcu part, so the queues have already been * flushed. No more TXs on their way in mac80211's path, and no more in * the queues. * Also, we won't be getting any new TX frames for this station. * What we might have are deferred TX frames that need to be taken care * of. * * Drop any still-queued deferred-frame before removing the STA, and * make sure the worker is no longer handling frames for this STA. */ if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { flush_work(&mvm->add_stream_wk); /* * No need to make sure deferred TX indication is off since the * worker will already remove it if it was on */ /* * Additionally, reset the 40 MHz capability if we disconnected * from the AP now. */ iwl_mvm_reset_cca_40mhz_workaround(mvm, vif); } mutex_lock(&mvm->mutex); /* track whether or not the station is associated */ mvm_sta->sta_state = new_state; if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { /* * Firmware bug - it'll crash if the beacon interval is less * than 16. We can't avoid connecting at all, so refuse the * station state change, this will cause mac80211 to abandon * attempts to connect to this AP, and eventually wpa_s will * blocklist the AP... */ if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.beacon_int < 16) { IWL_ERR(mvm, "AP %pM beacon interval is %d, refusing due to firmware bug!\n", sta->addr, vif->bss_conf.beacon_int); ret = -EINVAL; goto out_unlock; } if (vif->type == NL80211_IFTYPE_STATION) - vif->bss_conf.he_support = sta->he_cap.has_he; + vif->bss_conf.he_support = sta->deflink.he_cap.has_he; if (sta->tdls && (vif->p2p || iwl_mvm_tdls_sta_count(mvm, NULL) == IWL_MVM_TDLS_STA_COUNT || iwl_mvm_phy_ctx_count(mvm) > 1)) { IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); ret = -EBUSY; goto out_unlock; } ret = iwl_mvm_add_sta(mvm, vif, sta); if (sta->tdls && ret == 0) { iwl_mvm_recalc_tdls_state(mvm, vif, true); iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_SETUP); } sta->max_rc_amsdu_len = 1; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_AUTH) { /* * EBS may be disabled due to previous failures reported by FW. * Reset EBS status here assuming environment has been changed. */ mvm->last_ebs_successful = true; iwl_mvm_check_uapsd(mvm, vif, sta->addr); ret = 0; } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) { if (vif->type == NL80211_IFTYPE_AP) { - vif->bss_conf.he_support = sta->he_cap.has_he; + vif->bss_conf.he_support = sta->deflink.he_cap.has_he; mvmvif->ap_assoc_sta_count++; iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id); } else if (vif->type == NL80211_IFTYPE_STATION) { - vif->bss_conf.he_support = sta->he_cap.has_he; + vif->bss_conf.he_support = sta->deflink.he_cap.has_he; mvmvif->he_ru_2mhz_block = false; - if (sta->he_cap.has_he) + if (sta->deflink.he_cap.has_he) iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, false); ret = iwl_mvm_update_sta(mvm, vif, sta); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { ret = 0; /* we don't support TDLS during DCM */ if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); if (sta->tdls) { iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_ENABLE_LINK); } else { /* enable beacon filtering */ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); mvmvif->authorized = 1; /* * Now that the station is authorized, i.e., keys were already * installed, need to indicate to the FW that * multicast data frames can be forwarded to the driver */ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); } iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, true); } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { /* once we move into assoc state, need to update rate scale to * disable using wide bandwidth */ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, false); if (!sta->tdls) { /* Multicast data frames are no longer allowed */ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); /* * Set this after the above iwl_mvm_mac_ctxt_changed() * to avoid sending high prio again for a little time. */ mvmvif->authorized = 0; /* disable beacon filtering */ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); WARN_ON(ret && !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)); } ret = 0; } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) { if (vif->type == NL80211_IFTYPE_AP) { mvmvif->ap_assoc_sta_count--; iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) iwl_mvm_stop_session_protection(mvm, vif); ret = 0; } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_NONE) { ret = 0; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) iwl_mvm_stop_session_protection(mvm, vif); ret = iwl_mvm_rm_sta(mvm, vif, sta); if (sta->tdls) { iwl_mvm_recalc_tdls_state(mvm, vif, false); iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_DISABLE_LINK); } if (unlikely(ret && test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status))) ret = 0; } else { ret = -EIO; } out_unlock: mutex_unlock(&mvm->mutex); if (sta->tdls && ret == 0) { if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID); else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID); } return ret; } static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mvm->rts_threshold = value; return 0; } static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_SUPP_RATES_CHANGED | IEEE80211_RC_NSS_CHANGED)) iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, true); if (vif->type == NL80211_IFTYPE_STATION && changed & IEEE80211_RC_NSS_CHANGED) iwl_mvm_sf_update(mvm, vif, false); } static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 ac, const struct ieee80211_tx_queue_params *params) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); mvmvif->queue_params[ac] = *params; /* * No need to update right away, we'll get BSS_CHANGED_QOS * The exception is P2P_DEVICE interface which needs immediate update. */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { int ret; mutex_lock(&mvm->mutex); ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); mutex_unlock(&mvm->mutex); return ret; } return 0; } static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *info) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); iwl_mvm_protect_assoc(mvm, vif, info->duration); mutex_unlock(&mvm->mutex); } static void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *info) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); /* for successful cases (auth/assoc), don't cancel session protection */ if (info->success) return; mutex_lock(&mvm->mutex); iwl_mvm_stop_session_protection(mvm, vif); mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); if (!vif->bss_conf.idle) { ret = -EBUSY; goto out; } ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); out: mutex_unlock(&mvm->mutex); return ret; } static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); /* Due to a race condition, it's possible that mac80211 asks * us to stop a sched_scan when it's already stopped. This * can happen, for instance, if we stopped the scan ourselves, * called ieee80211_sched_scan_stopped() and the userspace called * stop sched scan scan before ieee80211_sched_scan_stopped_work() * could run. To handle this, simply return if the scan is * not running. */ if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) { mutex_unlock(&mvm->mutex); return 0; } ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false); mutex_unlock(&mvm->mutex); iwl_mvm_wait_for_async_handlers(mvm); return ret; } static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvmsta = NULL; struct iwl_mvm_key_pn *ptk_pn; int keyidx = key->keyidx; int ret, i; u8 key_offset; if (sta) mvmsta = iwl_mvm_sta_from_mac80211(sta); switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: if (!mvm->trans->trans_cfg->gen2) { key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; } else if (vif->type == NL80211_IFTYPE_STATION) { key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE; } else { IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n"); return -EOPNOTSUPP; } break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (!iwl_mvm_has_new_tx_api(mvm)) key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: if (vif->type == NL80211_IFTYPE_STATION) break; if (iwl_mvm_has_new_tx_api(mvm)) return -EOPNOTSUPP; /* support HW crypto on TX */ return 0; default: return -EOPNOTSUPP; } switch (cmd) { case SET_KEY: if (keyidx == 6 || keyidx == 7) rcu_assign_pointer(mvmvif->bcn_prot.keys[keyidx - 6], key); if ((vif->type == NL80211_IFTYPE_ADHOC || vif->type == NL80211_IFTYPE_AP) && !sta) { /* * GTK on AP interface is a TX-only key, return 0; * on IBSS they're per-station and because we're lazy * we don't support them for RX, so do the same. * CMAC/GMAC in AP/IBSS modes must be done in software. */ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { ret = -EOPNOTSUPP; break; } if (key->cipher != WLAN_CIPHER_SUITE_GCMP && key->cipher != WLAN_CIPHER_SUITE_GCMP_256 && !iwl_mvm_has_new_tx_api(mvm)) { key->hw_key_idx = STA_KEY_IDX_INVALID; ret = 0; break; } if (!mvmvif->ap_ibss_active) { for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { if (!mvmvif->ap_early_keys[i]) { mvmvif->ap_early_keys[i] = key; break; } } if (i >= ARRAY_SIZE(mvmvif->ap_early_keys)) ret = -ENOSPC; else ret = 0; break; } } /* During FW restart, in order to restore the state as it was, * don't try to reprogram keys we previously failed for. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && key->hw_key_idx == STA_KEY_IDX_INVALID) { IWL_DEBUG_MAC80211(mvm, "skip invalid idx key programming during restart\n"); ret = 0; break; } if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && mvmsta && iwl_mvm_has_new_rx_api(mvm) && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && (key->cipher == WLAN_CIPHER_SUITE_CCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { struct ieee80211_key_seq seq; int tid, q; WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); ptk_pn = kzalloc(struct_size(ptk_pn, q, mvm->trans->num_rx_queues), GFP_KERNEL); if (!ptk_pn) { ret = -ENOMEM; break; } for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { ieee80211_get_key_rx_seq(key, tid, &seq); for (q = 0; q < mvm->trans->num_rx_queues; q++) memcpy(ptk_pn->q[q].pn[tid], seq.ccmp.pn, IEEE80211_CCMP_PN_LEN); } rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn); } /* in HW restart reuse the index, otherwise request a new one */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) key_offset = key->hw_key_idx; else key_offset = STA_KEY_IDX_INVALID; if (mvmsta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE) mvmsta->pairwise_cipher = key->cipher; IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); if (ret) { IWL_WARN(mvm, "set key failed\n"); key->hw_key_idx = STA_KEY_IDX_INVALID; /* * can't add key for RX, but we don't need it * in the device for TX so still return 0, * unless we have new TX API where we cannot * put key material into the TX_CMD */ if (iwl_mvm_has_new_tx_api(mvm)) ret = -EOPNOTSUPP; else ret = 0; } break; case DISABLE_KEY: if (keyidx == 6 || keyidx == 7) RCU_INIT_POINTER(mvmvif->bcn_prot.keys[keyidx - 6], NULL); ret = -ENOENT; for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { if (mvmvif->ap_early_keys[i] == key) { mvmvif->ap_early_keys[i] = NULL; ret = 0; } } /* found in pending list - don't do anything else */ if (ret == 0) break; if (key->hw_key_idx == STA_KEY_IDX_INVALID) { ret = 0; break; } if (mvmsta && iwl_mvm_has_new_rx_api(mvm) && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && (key->cipher == WLAN_CIPHER_SUITE_CCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { ptk_pn = rcu_dereference_protected( mvmsta->ptk_pn[keyidx], lockdep_is_held(&mvm->mutex)); RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL); if (ptk_pn) kfree_rcu(ptk_pn, rcu_head); } IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); break; default: ret = -EINVAL; } return ret; } static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key); mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) return; iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key); } static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_hs20_roc_res *resp; int resp_len = iwl_rx_packet_payload_len(pkt); struct iwl_mvm_time_event_data *te_data = data; if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD)) return true; if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n"); return true; } resp = (void *)pkt->data; IWL_DEBUG_TE(mvm, "Aux ROC: Received response from ucode: status=%d uid=%d\n", resp->status, resp->event_unique_id); te_data->uid = le32_to_cpu(resp->event_unique_id); IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", te_data->uid); spin_lock_bh(&mvm->time_event_lock); list_add_tail(&te_data->list, &mvm->aux_roc_te_list); spin_unlock_bh(&mvm->time_event_lock); return true; } #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100) #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200) #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, struct ieee80211_channel *channel, struct ieee80211_vif *vif, int duration) { int res; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; static const u16 time_event_response[] = { HOT_SPOT_CMD }; struct iwl_notification_wait wait_time_event; u32 dtim_interval = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; u32 req_dur, delay; struct iwl_hs20_roc_req aux_roc_req = { .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), }; struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, &aux_roc_req.channel_info); u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm); /* Set the channel info data */ iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value, iwl_mvm_phy_band_from_nl80211(channel->band), PHY_VHT_CHANNEL_MODE20, 0); /* Set the time and duration */ tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm)); delay = AUX_ROC_MIN_DELAY; req_dur = MSEC_TO_TU(duration); /* * If we are associated we want the delay time to be at least one * dtim interval so that the FW can wait until after the DTIM and * then start the time event, this will potentially allow us to * remain off-channel for the max duration. * Since we want to use almost a whole dtim interval we would also * like the delay to be for 2-3 dtim intervals, in case there are * other time events with higher priority. */ if (vif->bss_conf.assoc) { delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); /* We cannot remain off-channel longer than the DTIM interval */ if (dtim_interval <= req_dur) { req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER; if (req_dur <= AUX_ROC_MIN_DURATION) req_dur = dtim_interval - AUX_ROC_MIN_SAFETY_BUFFER; } } tail->duration = cpu_to_le32(req_dur); tail->apply_time_max_delay = cpu_to_le32(delay); IWL_DEBUG_TE(mvm, "ROC: Requesting to remain on channel %u for %ums\n", channel->hw_value, req_dur); IWL_DEBUG_TE(mvm, "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", duration, delay, dtim_interval); /* Set the node address */ memcpy(tail->node_addr, vif->addr, ETH_ALEN); lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); if (WARN_ON(te_data->id == HOT_SPOT_CMD)) { spin_unlock_bh(&mvm->time_event_lock); return -EIO; } te_data->vif = vif; te_data->duration = duration; te_data->id = HOT_SPOT_CMD; spin_unlock_bh(&mvm->time_event_lock); /* * Use a notification wait, which really just processes the * command response and doesn't wait for anything, in order * to be able to process the response and get the UID inside * the RX path. Using CMD_WANT_SKB doesn't work because it * stores the buffer and then wakes up this thread, by which * time another notification (that the time event started) * might already be processed unsuccessfully. */ iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, time_event_response, ARRAY_SIZE(time_event_response), iwl_mvm_rx_aux_roc, te_data); res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len, &aux_roc_req); if (res) { IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res); iwl_remove_notification(&mvm->notif_wait, &wait_time_event); goto out_clear_te; } /* No need to wait for anything, so just pass 1 (0 isn't valid) */ res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); /* should never fail */ WARN_ON_ONCE(res); if (res) { out_clear_te: spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, te_data); spin_unlock_bh(&mvm->time_event_lock); } return res; } static int iwl_mvm_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *channel, int duration, enum ieee80211_roc_type type) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct cfg80211_chan_def chandef; struct iwl_mvm_phy_ctxt *phy_ctxt; bool band_change_removal; int ret, i; IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, duration, type); /* * Flush the done work, just in case it's still pending, so that * the work it does can complete and we can accept new frames. */ flush_work(&mvm->roc_done_wk); mutex_lock(&mvm->mutex); switch (vif->type) { case NL80211_IFTYPE_STATION: if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { /* Use aux roc framework (HS20) */ if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) { u32 lmac_id; lmac_id = iwl_mvm_get_lmac_id(mvm->fw, channel->band); ret = iwl_mvm_add_aux_sta(mvm, lmac_id); if (WARN(ret, "Failed to allocate aux station")) goto out_unlock; } ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration); goto out_unlock; } IWL_ERR(mvm, "hotspot not supported\n"); ret = -EINVAL; goto out_unlock; case NL80211_IFTYPE_P2P_DEVICE: /* handle below */ break; default: IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); ret = -EINVAL; goto out_unlock; } for (i = 0; i < NUM_PHY_CTX; i++) { phy_ctxt = &mvm->phy_ctxts[i]; if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) continue; if (phy_ctxt->ref && channel == phy_ctxt->channel) { /* * Unbind the P2P_DEVICE from the current PHY context, * and if the PHY context is not used remove it. */ ret = iwl_mvm_binding_remove_vif(mvm, vif); if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) goto out_unlock; iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); /* Bind the P2P_DEVICE to the current PHY Context */ mvmvif->phy_ctxt = phy_ctxt; ret = iwl_mvm_binding_add_vif(mvm, vif); if (WARN(ret, "Failed binding P2P_DEVICE\n")) goto out_unlock; iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); goto schedule_time_event; } } /* Need to update the PHY context only if the ROC channel changed */ if (channel == mvmvif->phy_ctxt->channel) goto schedule_time_event; cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); /* * Check if the remain-on-channel is on a different band and that * requires context removal, see iwl_mvm_phy_ctxt_changed(). If * so, we'll need to release and then re-configure here, since we * must not remove a PHY context that's part of a binding. */ band_change_removal = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) && mvmvif->phy_ctxt->channel->band != chandef.chan->band; if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) { /* * Change the PHY context configuration as it is currently * referenced only by the P2P Device MAC (and we can modify it) */ ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, &chandef, 1, 1); if (ret) goto out_unlock; } else { /* * The PHY context is shared with other MACs (or we're trying to * switch bands), so remove the P2P Device from the binding, * allocate an new PHY context and create a new binding. */ phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); if (!phy_ctxt) { ret = -ENOSPC; goto out_unlock; } ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, 1, 1); if (ret) { IWL_ERR(mvm, "Failed to change PHY context\n"); goto out_unlock; } /* Unbind the P2P_DEVICE from the current PHY context */ ret = iwl_mvm_binding_remove_vif(mvm, vif); if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) goto out_unlock; iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); /* Bind the P2P_DEVICE to the new allocated PHY context */ mvmvif->phy_ctxt = phy_ctxt; ret = iwl_mvm_binding_add_vif(mvm, vif); if (WARN(ret, "Failed binding P2P_DEVICE\n")) goto out_unlock; iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); } schedule_time_event: /* Schedule the time events */ ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); out_unlock: mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); return ret; } static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); IWL_DEBUG_MAC80211(mvm, "enter\n"); mutex_lock(&mvm->mutex); iwl_mvm_stop_roc(mvm, vif); mutex_unlock(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "leave\n"); return 0; } struct iwl_mvm_ftm_responder_iter_data { bool responder; struct ieee80211_chanctx_conf *ctx; }; static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_ftm_responder_iter_data *data = _data; if (rcu_access_pointer(vif->chanctx_conf) == data->ctx && vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params) data->responder = true; } static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm_ftm_responder_iter_data data = { .responder = false, .ctx = ctx, }; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_ftm_responder_chanctx_iter, &data); return data.responder; } static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt; bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; int ret; lockdep_assert_held(&mvm->mutex); IWL_DEBUG_MAC80211(mvm, "Add channel context\n"); phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); if (!phy_ctxt) { ret = -ENOSPC; goto out; } ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, ctx->rx_chains_static, ctx->rx_chains_dynamic); if (ret) { IWL_ERR(mvm, "Failed to add PHY context\n"); goto out; } iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); *phy_ctxt_id = phy_ctxt->id; out: return ret; } static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = __iwl_mvm_add_chanctx(mvm, ctx); mutex_unlock(&mvm->mutex); return ret; } static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; lockdep_assert_held(&mvm->mutex); iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); } static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); __iwl_mvm_remove_chanctx(mvm, ctx); mutex_unlock(&mvm->mutex); } static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; if (WARN_ONCE((phy_ctxt->ref > 1) && (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | IEEE80211_CHANCTX_CHANGE_RX_CHAINS | IEEE80211_CHANCTX_CHANGE_RADAR | IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)), "Cannot change PHY. Ref=%d, changed=0x%X\n", phy_ctxt->ref, changed)) return; mutex_lock(&mvm->mutex); /* we are only changing the min_width, may be a noop */ if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) { if (phy_ctxt->width == def->width) goto out_unlock; /* we are just toggling between 20_NOHT and 20 */ if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 && def->width <= NL80211_CHAN_WIDTH_20) goto out_unlock; } iwl_mvm_bt_coex_vif_change(mvm); iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, ctx->rx_chains_static, ctx->rx_chains_dynamic); out_unlock: mutex_unlock(&mvm->mutex); } static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx, bool switching_chanctx) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); mvmvif->phy_ctxt = phy_ctxt; switch (vif->type) { case NL80211_IFTYPE_AP: /* only needed if we're switching chanctx (i.e. during CSA) */ if (switching_chanctx) { mvmvif->ap_ibss_active = true; break; } fallthrough; case NL80211_IFTYPE_ADHOC: /* * The AP binding flow is handled as part of the start_ap flow * (in bss_info_changed), similarly for IBSS. */ ret = 0; goto out; case NL80211_IFTYPE_STATION: mvmvif->csa_bcn_pending = false; break; case NL80211_IFTYPE_MONITOR: /* always disable PS when a monitor interface is active */ mvmvif->ps_disabled = true; break; default: ret = -EINVAL; goto out; } ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) goto out; /* * Power state must be updated before quotas, * otherwise fw will complain. */ iwl_mvm_power_update_mac(mvm); /* Setting the quota at this stage is only required for monitor * interfaces. For the other types, the bss_info changed flow * will handle quota settings. */ if (vif->type == NL80211_IFTYPE_MONITOR) { mvmvif->monitor_active = true; ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) goto out_remove_binding; ret = iwl_mvm_add_snif_sta(mvm, vif); if (ret) goto out_remove_binding; } /* Handle binding during CSA */ if (vif->type == NL80211_IFTYPE_AP) { iwl_mvm_update_quotas(mvm, false, NULL); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { mvmvif->csa_bcn_pending = true; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { u32 duration = 3 * vif->bss_conf.beacon_int; /* Protect the session to make sure we hear the first * beacon on the new channel. */ iwl_mvm_protect_session(mvm, vif, duration, duration, vif->bss_conf.beacon_int / 2, true); } iwl_mvm_update_quotas(mvm, false, NULL); } goto out; out_remove_binding: iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_power_update_mac(mvm); out: if (ret) mvmvif->phy_ctxt = NULL; return ret; } static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false); mutex_unlock(&mvm->mutex); return ret; } static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx, bool switching_chanctx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_vif *disabled_vif = NULL; lockdep_assert_held(&mvm->mutex); iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); switch (vif->type) { case NL80211_IFTYPE_ADHOC: goto out; case NL80211_IFTYPE_MONITOR: mvmvif->monitor_active = false; mvmvif->ps_disabled = false; iwl_mvm_rm_snif_sta(mvm, vif); break; case NL80211_IFTYPE_AP: /* This part is triggered only during CSA */ if (!switching_chanctx || !mvmvif->ap_ibss_active) goto out; mvmvif->csa_countdown = false; /* Set CS bit on all the stations */ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); /* Save blocked iface, the timeout is set on the next beacon */ rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif); mvmvif->ap_ibss_active = false; break; case NL80211_IFTYPE_STATION: if (!switching_chanctx) break; disabled_vif = vif; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); break; default: break; } iwl_mvm_update_quotas(mvm, false, disabled_vif); iwl_mvm_binding_remove_vif(mvm, vif); out: if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) && switching_chanctx) return; mvmvif->phy_ctxt = NULL; iwl_mvm_power_update_mac(mvm); } static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false); mutex_unlock(&mvm->mutex); } static int iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, struct ieee80211_vif_chanctx_switch *vifs) { int ret; mutex_lock(&mvm->mutex); __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx); if (ret) { IWL_ERR(mvm, "failed to add new_ctx during channel switch\n"); goto out_reassign; } ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, true); if (ret) { IWL_ERR(mvm, "failed to assign new_ctx during channel switch\n"); goto out_remove; } /* we don't support TDLS during DCM - can be caused by channel switch */ if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); goto out; out_remove: __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); out_reassign: if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) { IWL_ERR(mvm, "failed to add old_ctx back after failure.\n"); goto out_restart; } if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true)) { IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); goto out_restart; } goto out; out_restart: /* things keep failing, better restart the hw */ iwl_mvm_nic_restart(mvm, false); out: mutex_unlock(&mvm->mutex); return ret; } static int iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, struct ieee80211_vif_chanctx_switch *vifs) { int ret; mutex_lock(&mvm->mutex); __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, true); if (ret) { IWL_ERR(mvm, "failed to assign new_ctx during channel switch\n"); goto out_reassign; } goto out; out_reassign: if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true)) { IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); goto out_restart; } goto out; out_restart: /* things keep failing, better restart the hw */ iwl_mvm_nic_restart(mvm, false); out: mutex_unlock(&mvm->mutex); return ret; } static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs, enum ieee80211_chanctx_switch_mode mode) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; /* we only support a single-vif right now */ if (n_vifs > 1) return -EOPNOTSUPP; switch (mode) { case CHANCTX_SWMODE_SWAP_CONTEXTS: ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs); break; case CHANCTX_SWMODE_REASSIGN_VIF: ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs); break; default: ret = -EOPNOTSUPP; break; } return ret; } static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); return mvm->ibss_manager; } static int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); if (!mvm_sta || !mvm_sta->vif) { IWL_ERR(mvm, "Station is not associated to a vif\n"); return -EINVAL; } return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); } #ifdef CONFIG_NL80211_TESTMODE static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = { [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 }, [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 }, [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 }, }; static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, void *data, int len) { struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1]; int err; u32 noa_duration; err = nla_parse_deprecated(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy, NULL); if (err) return err; if (!tb[IWL_MVM_TM_ATTR_CMD]) return -EINVAL; switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) { case IWL_MVM_TM_CMD_SET_NOA: if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p || !vif->bss_conf.enable_beacon || !tb[IWL_MVM_TM_ATTR_NOA_DURATION]) return -EINVAL; noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]); if (noa_duration >= vif->bss_conf.beacon_int) return -EINVAL; mvm->noa_duration = noa_duration; mvm->noa_vif = vif; return iwl_mvm_update_quotas(mvm, true, NULL); case IWL_MVM_TM_CMD_SET_BEACON_FILTER: /* must be associated client vif - ignore authorized */ if (!vif || vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || !vif->bss_conf.dtim_period || !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]) return -EINVAL; if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) return iwl_mvm_enable_beacon_filter(mvm, vif, 0); return iwl_mvm_disable_beacon_filter(mvm, vif, 0); } return -EOPNOTSUPP; } static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *data, int len) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int err; mutex_lock(&mvm->mutex); err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); mutex_unlock(&mvm->mutex); return err; } #endif static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { /* By implementing this operation, we prevent mac80211 from * starting its own channel switch timer, so that we can call * ieee80211_chswitch_done() ourselves at the right time * (which is when the absence time event starts). */ IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), "dummy channel switch op\n"); } static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_chan_switch_te_cmd cmd = { .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .tsf = cpu_to_le32(chsw->timestamp), .cs_count = chsw->count, .cs_mode = chsw->block_tx, }; lockdep_assert_held(&mvm->mutex); if (chsw->delay) cmd.cs_delayed_bcn_count = DIV_ROUND_UP(chsw->delay, vif->bss_conf.beacon_int); return iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd); } static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u32 apply_time; /* Schedule the time event to a bit before beacon 1, * to make sure we're in the new channel when the * GO/AP arrives. In case count <= 1 immediately schedule the * TE (this might result with some packet loss or connection * loss). */ if (chsw->count <= 1) apply_time = 0; else apply_time = chsw->device_timestamp + ((vif->bss_conf.beacon_int * (chsw->count - 1) - IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); if (chsw->block_tx) iwl_mvm_csa_client_absent(mvm, vif); if (mvmvif->bf_data.bf_enabled) { int ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); if (ret) return ret; } iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, apply_time); return 0; } #define IWL_MAX_CSA_BLOCK_TX 1500 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_vif *csa_vif; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; mutex_lock(&mvm->mutex); mvmvif->csa_failed = false; IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", chsw->chandef.center_freq1); iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_CHANNEL_SWITCH); switch (vif->type) { case NL80211_IFTYPE_AP: csa_vif = rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex)); if (WARN_ONCE(csa_vif && csa_vif->csa_active, "Another CSA is already in progress")) { ret = -EBUSY; goto out_unlock; } /* we still didn't unblock tx. prevent new CS meanwhile */ if (rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex))) { ret = -EBUSY; goto out_unlock; } rcu_assign_pointer(mvm->csa_vif, vif); if (WARN_ONCE(mvmvif->csa_countdown, "Previous CSA countdown didn't complete")) { ret = -EBUSY; goto out_unlock; } mvmvif->csa_target_freq = chsw->chandef.chan->center_freq; break; case NL80211_IFTYPE_STATION: /* * In the new flow FW is in charge of timing the switch so there * is no need for all of this */ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, 0)) break; /* * We haven't configured the firmware to be associated yet since * we don't know the dtim period. In this case, the firmware can't * track the beacons. */ if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period) { ret = -EBUSY; goto out_unlock; } if (chsw->delay > IWL_MAX_CSA_BLOCK_TX) schedule_delayed_work(&mvmvif->csa_work, 0); if (chsw->block_tx) { /* * In case of undetermined / long time with immediate * quiet monitor status to gracefully disconnect */ if (!chsw->count || chsw->count * vif->bss_conf.beacon_int > IWL_MAX_CSA_BLOCK_TX) schedule_delayed_work(&mvmvif->csa_work, msecs_to_jiffies(IWL_MAX_CSA_BLOCK_TX)); } if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { ret = iwl_mvm_old_pre_chan_sw_sta(mvm, vif, chsw); if (ret) goto out_unlock; } else { iwl_mvm_schedule_client_csa(mvm, vif, chsw); } mvmvif->csa_count = chsw->count; mvmvif->csa_misbehave = false; break; default: break; } mvmvif->ps_disabled = true; ret = iwl_mvm_power_update_ps(mvm); if (ret) goto out_unlock; /* we won't be on this channel any longer */ iwl_mvm_teardown_tdls_peers(mvm); out_unlock: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_chan_switch_te_cmd cmd = { .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY), .tsf = cpu_to_le32(chsw->timestamp), .cs_count = chsw->count, .cs_mode = chsw->block_tx, }; /* * In the new flow FW is in charge of timing the switch so there is no * need for all of this */ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, 0)) return; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY)) return; IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d count = %d (old %d) mode = %d\n", mvmvif->id, chsw->count, mvmvif->csa_count, chsw->block_tx); if (chsw->count >= mvmvif->csa_count && chsw->block_tx) { if (mvmvif->csa_misbehave) { /* Second time, give up on this AP*/ iwl_mvm_abort_channel_switch(hw, vif); ieee80211_chswitch_done(vif, false); mvmvif->csa_misbehave = false; return; } mvmvif->csa_misbehave = true; } mvmvif->csa_count = chsw->count; mutex_lock(&mvm->mutex); if (mvmvif->csa_failed) goto out_unlock; WARN_ON(iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd)); out_unlock: mutex_unlock(&mvm->mutex); } static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) { int i; if (!iwl_mvm_has_new_tx_api(mvm)) { if (drop) { mutex_lock(&mvm->mutex); iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm) & queues); mutex_unlock(&mvm->mutex); } else { iwl_trans_wait_tx_queues_empty(mvm->trans, queues); } return; } mutex_lock(&mvm->mutex); for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { struct ieee80211_sta *sta; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) continue; if (drop) iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF); else iwl_mvm_wait_sta_queues_empty(mvm, iwl_mvm_sta_from_mac80211(sta)); } mutex_unlock(&mvm->mutex); } static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif; struct iwl_mvm_sta *mvmsta; struct ieee80211_sta *sta; int i; u32 msk = 0; if (!vif) { iwl_mvm_flush_no_vif(mvm, queues, drop); return; } if (vif->type != NL80211_IFTYPE_STATION) return; /* Make sure we're done with the deferred traffic before flushing */ flush_work(&mvm->add_stream_wk); mutex_lock(&mvm->mutex); mvmvif = iwl_mvm_vif_from_mac80211(vif); /* flush the AP-station and all TDLS peers */ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) continue; mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->vif != vif) continue; /* make sure only TDLS peers or the AP are flushed */ WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); if (drop) { if (iwl_mvm_flush_sta(mvm, mvmsta, false)) IWL_ERR(mvm, "flush request fail\n"); } else { msk |= mvmsta->tfd_queue_msk; if (iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_wait_sta_queues_empty(mvm, mvmsta); } } mutex_unlock(&mvm->mutex); /* this can take a while, and we may need/want other operations * to succeed while doing this, so do it without the mutex held */ if (!drop && !iwl_mvm_has_new_tx_api(mvm)) iwl_trans_wait_tx_queues_empty(mvm->trans, msk); } static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; memset(survey, 0, sizeof(*survey)); /* only support global statistics right now */ if (idx != 0) return -ENOENT; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) return -ENOENT; mutex_lock(&mvm->mutex); if (iwl_mvm_firmware_running(mvm)) { ret = iwl_mvm_request_statistics(mvm, false); if (ret) goto out; } survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_TX | SURVEY_INFO_TIME_SCAN; survey->time = mvm->accu_radio_stats.on_time_rf + mvm->radio_stats.on_time_rf; do_div(survey->time, USEC_PER_MSEC); survey->time_rx = mvm->accu_radio_stats.rx_time + mvm->radio_stats.rx_time; do_div(survey->time_rx, USEC_PER_MSEC); survey->time_tx = mvm->accu_radio_stats.tx_time + mvm->radio_stats.tx_time; do_div(survey->time_tx, USEC_PER_MSEC); survey->time_scan = mvm->accu_radio_stats.on_time_scan + mvm->radio_stats.on_time_scan; do_div(survey->time_scan, USEC_PER_MSEC); ret = 0; out: mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) { u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: rinfo->bw = RATE_INFO_BW_20; break; case RATE_MCS_CHAN_WIDTH_40: rinfo->bw = RATE_INFO_BW_40; break; case RATE_MCS_CHAN_WIDTH_80: rinfo->bw = RATE_INFO_BW_80; break; case RATE_MCS_CHAN_WIDTH_160: rinfo->bw = RATE_INFO_BW_160; break; } if (format == RATE_MCS_CCK_MSK || format == RATE_MCS_LEGACY_OFDM_MSK) { int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK); /* add the offset needed to get to the legacy ofdm indices */ if (format == RATE_MCS_LEGACY_OFDM_MSK) rate += IWL_FIRST_OFDM_RATE; switch (rate) { case IWL_RATE_1M_INDEX: rinfo->legacy = 10; break; case IWL_RATE_2M_INDEX: rinfo->legacy = 20; break; case IWL_RATE_5M_INDEX: rinfo->legacy = 55; break; case IWL_RATE_11M_INDEX: rinfo->legacy = 110; break; case IWL_RATE_6M_INDEX: rinfo->legacy = 60; break; case IWL_RATE_9M_INDEX: rinfo->legacy = 90; break; case IWL_RATE_12M_INDEX: rinfo->legacy = 120; break; case IWL_RATE_18M_INDEX: rinfo->legacy = 180; break; case IWL_RATE_24M_INDEX: rinfo->legacy = 240; break; case IWL_RATE_36M_INDEX: rinfo->legacy = 360; break; case IWL_RATE_48M_INDEX: rinfo->legacy = 480; break; case IWL_RATE_54M_INDEX: rinfo->legacy = 540; } return; } rinfo->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1; rinfo->mcs = format == RATE_MCS_HT_MSK ? RATE_HT_MCS_INDEX(rate_n_flags) : u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK); if (format == RATE_MCS_HE_MSK) { u32 gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK); rinfo->flags |= RATE_INFO_FLAGS_HE_MCS; if (rate_n_flags & RATE_MCS_HE_106T_MSK) { rinfo->bw = RATE_INFO_BW_HE_RU; rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106; } switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) { case RATE_MCS_HE_TYPE_SU: case RATE_MCS_HE_TYPE_EXT_SU: if (gi_ltf == 0 || gi_ltf == 1) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; else if (gi_ltf == 2) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else if (gi_ltf == 3) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; else rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; break; case RATE_MCS_HE_TYPE_MU: if (gi_ltf == 0 || gi_ltf == 1) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; else if (gi_ltf == 2) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; break; case RATE_MCS_HE_TYPE_TRIG: if (gi_ltf == 0 || gi_ltf == 1) rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; else rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; break; } if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK) rinfo->he_dcm = 1; return; } if (rate_n_flags & RATE_MCS_SGI_MSK) rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; if (format == RATE_MCS_HT_MSK) { rinfo->flags |= RATE_INFO_FLAGS_MCS; } else if (format == RATE_MCS_VHT_MSK) { rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; } } static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct station_info *sinfo) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->avg_energy) { sinfo->signal_avg = -(s8)mvmsta->avg_energy; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } if (iwl_mvm_has_tlc_offload(mvm)) { struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } /* if beacon filtering isn't on mac80211 does it anyway */ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) return; if (!vif->bss_conf.assoc) return; mutex_lock(&mvm->mutex); if (mvmvif->ap_sta_id != mvmsta->sta_id) goto unlock; if (iwl_mvm_request_statistics(mvm, false)) goto unlock; sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + mvmvif->beacon_stats.accu_num_beacons; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); if (mvmvif->beacon_stats.avg_signal) { /* firmware only reports a value after RXing a few beacons */ sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); } unlock: mutex_unlock(&mvm->mutex); } static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_mlme_event *mlme) { if ((mlme->data == ASSOC_EVENT || mlme->data == AUTH_EVENT) && (mlme->status == MLME_DENIED || mlme->status == MLME_TIMEOUT)) { iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_ASSOC_FAILED, NULL); return; } if (mlme->data == DEAUTH_RX_EVENT || mlme->data == DEAUTH_TX_EVENT) { iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_DEASSOC, NULL); return; } } static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_event *event) { #define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ do { \ if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ break; \ iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \ } while (0) struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; if (iwl_trans_dbg_ini_valid(mvm->trans)) { iwl_mvm_event_mlme_callback_ini(mvm, vif, &event->u.mlme); return; } trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MLME); if (!trig) return; trig_mlme = (void *)trig->data; if (event->u.mlme.data == ASSOC_EVENT) { if (event->u.mlme.status == MLME_DENIED) CHECK_MLME_TRIGGER(stop_assoc_denied, "DENIED ASSOC: reason %d", event->u.mlme.reason); else if (event->u.mlme.status == MLME_TIMEOUT) CHECK_MLME_TRIGGER(stop_assoc_timeout, "ASSOC TIMEOUT"); } else if (event->u.mlme.data == AUTH_EVENT) { if (event->u.mlme.status == MLME_DENIED) CHECK_MLME_TRIGGER(stop_auth_denied, "DENIED AUTH: reason %d", event->u.mlme.reason); else if (event->u.mlme.status == MLME_TIMEOUT) CHECK_MLME_TRIGGER(stop_auth_timeout, "AUTH TIMEOUT"); } else if (event->u.mlme.data == DEAUTH_RX_EVENT) { CHECK_MLME_TRIGGER(stop_rx_deauth, "DEAUTH RX %d", event->u.mlme.reason); } else if (event->u.mlme.data == DEAUTH_TX_EVENT) { CHECK_MLME_TRIGGER(stop_tx_deauth, "DEAUTH TX %d", event->u.mlme.reason); } #undef CHECK_MLME_TRIGGER } static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_event *event) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_BA); if (!trig) return; ba_trig = (void *)trig->data; if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "BAR received from %pM, tid %d, ssn %d", event->u.ba.sta->addr, event->u.ba.tid, event->u.ba.ssn); } static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct ieee80211_event *event) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); switch (event->type) { case MLME_EVENT: iwl_mvm_event_mlme_callback(mvm, vif, event); break; case BAR_RX_EVENT: iwl_mvm_event_bar_rx_callback(mvm, vif, event); break; case BA_FRAME_TIMEOUT: iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta, event->u.ba.tid); break; default: break; } } void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, enum iwl_mvm_rxq_notif_type type, bool sync, const void *data, u32 size) { struct { struct iwl_rxq_sync_cmd cmd; struct iwl_mvm_internal_rxq_notif notif; } __packed cmd = { .cmd.rxq_mask = cpu_to_le32(BIT(mvm->trans->num_rx_queues) - 1), .cmd.count = cpu_to_le32(sizeof(struct iwl_mvm_internal_rxq_notif) + size), .notif.type = type, .notif.sync = sync, }; struct iwl_host_cmd hcmd = { .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD), .data[0] = &cmd, .len[0] = sizeof(cmd), .data[1] = data, .len[1] = size, .flags = sync ? 0 : CMD_ASYNC, }; int ret; /* size must be a multiple of DWORD */ if (WARN_ON(cmd.cmd.count & cpu_to_le32(3))) return; if (!iwl_mvm_has_new_rx_api(mvm)) return; if (sync) { cmd.notif.cookie = mvm->queue_sync_cookie; mvm->queue_sync_state = (1 << mvm->trans->num_rx_queues) - 1; } ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) { IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); goto out; } if (sync) { lockdep_assert_held(&mvm->mutex); ret = wait_event_timeout(mvm->rx_sync_waitq, READ_ONCE(mvm->queue_sync_state) == 0 || iwl_mvm_is_radio_killed(mvm), HZ); WARN_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm), "queue sync: failed to sync, state is 0x%lx\n", mvm->queue_sync_state); } out: if (sync) { mvm->queue_sync_state = 0; mvm->queue_sync_cookie++; } } static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0); mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_ftm_responder_stats *stats) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->p2p || vif->type != NL80211_IFTYPE_AP || !mvmvif->ap_ibss_active || !vif->bss_conf.ftm_responder) return -EINVAL; mutex_lock(&mvm->mutex); *stats = mvm->ftm_resp_stats; mutex_unlock(&mvm->mutex); stats->filled = BIT(NL80211_FTM_STATS_SUCCESS_NUM) | BIT(NL80211_FTM_STATS_PARTIAL_NUM) | BIT(NL80211_FTM_STATS_FAILED_NUM) | BIT(NL80211_FTM_STATS_ASAP_NUM) | BIT(NL80211_FTM_STATS_NON_ASAP_NUM) | BIT(NL80211_FTM_STATS_TOTAL_DURATION_MSEC) | BIT(NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM) | BIT(NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM) | BIT(NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM); return 0; } static int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); ret = iwl_mvm_ftm_start(mvm, vif, request); mutex_unlock(&mvm->mutex); return ret; } static void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); iwl_mvm_ftm_abort(mvm, request); mutex_unlock(&mvm->mutex); } static bool iwl_mvm_can_hw_csum(struct sk_buff *skb) { u8 protocol = ip_hdr(skb)->protocol; if (!IS_ENABLED(CONFIG_INET)) return false; return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP; } static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, struct sk_buff *head, struct sk_buff *skb) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) return iwl_mvm_tx_csum_bz(mvm, head, true) == iwl_mvm_tx_csum_bz(mvm, skb, true); /* For now don't aggregate IPv6 in AMSDU */ if (skb->protocol != htons(ETH_P_IP)) return false; if (!iwl_mvm_is_csum_supported(mvm)) return true; return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head); } const struct ieee80211_ops iwl_mvm_hw_ops = { .tx = iwl_mvm_mac_tx, .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, .ampdu_action = iwl_mvm_mac_ampdu_action, .get_antenna = iwl_mvm_op_get_antenna, .start = iwl_mvm_mac_start, .reconfig_complete = iwl_mvm_mac_reconfig_complete, .stop = iwl_mvm_mac_stop, .add_interface = iwl_mvm_mac_add_interface, .remove_interface = iwl_mvm_mac_remove_interface, .config = iwl_mvm_mac_config, .prepare_multicast = iwl_mvm_prepare_multicast, .configure_filter = iwl_mvm_configure_filter, .config_iface_filter = iwl_mvm_config_iface_filter, .bss_info_changed = iwl_mvm_bss_info_changed, .hw_scan = iwl_mvm_mac_hw_scan, .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove, .sta_state = iwl_mvm_mac_sta_state, .sta_notify = iwl_mvm_mac_sta_notify, .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, .sta_rc_update = iwl_mvm_sta_rc_update, .conf_tx = iwl_mvm_mac_conf_tx, .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, .mgd_complete_tx = iwl_mvm_mac_mgd_complete_tx, .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, .flush = iwl_mvm_mac_flush, .sched_scan_start = iwl_mvm_mac_sched_scan_start, .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, .set_key = iwl_mvm_mac_set_key, .update_tkip_key = iwl_mvm_mac_update_tkip_key, .remain_on_channel = iwl_mvm_roc, .cancel_remain_on_channel = iwl_mvm_cancel_roc, .add_chanctx = iwl_mvm_add_chanctx, .remove_chanctx = iwl_mvm_remove_chanctx, .change_chanctx = iwl_mvm_change_chanctx, .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx, .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx, .start_ap = iwl_mvm_start_ap_ibss, .stop_ap = iwl_mvm_stop_ap_ibss, .join_ibss = iwl_mvm_start_ap_ibss, .leave_ibss = iwl_mvm_stop_ap_ibss, .tx_last_beacon = iwl_mvm_tx_last_beacon, .set_tim = iwl_mvm_set_tim, .channel_switch = iwl_mvm_channel_switch, .pre_channel_switch = iwl_mvm_pre_channel_switch, .post_channel_switch = iwl_mvm_post_channel_switch, .abort_channel_switch = iwl_mvm_abort_channel_switch, .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, .tdls_channel_switch = iwl_mvm_tdls_channel_switch, .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, .event_callback = iwl_mvm_mac_event_callback, .sync_rx_queues = iwl_mvm_sync_rx_queues, CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) #ifdef CONFIG_PM_SLEEP /* look at d3.c */ .suspend = iwl_mvm_suspend, .resume = iwl_mvm_resume, .set_wakeup = iwl_mvm_set_wakeup, .set_rekey_data = iwl_mvm_set_rekey_data, #if IS_ENABLED(CONFIG_IPV6) .ipv6_addr_change = iwl_mvm_ipv6_addr_change, #endif .set_default_unicast_key = iwl_mvm_set_default_unicast_key, #endif .get_survey = iwl_mvm_mac_get_survey, .sta_statistics = iwl_mvm_mac_sta_statistics, .get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats, .start_pmsr = iwl_mvm_start_pmsr, .abort_pmsr = iwl_mvm_abort_pmsr, .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate, #ifdef CONFIG_IWLWIFI_DEBUGFS .sta_add_debugfs = iwl_mvm_sta_add_debugfs, #endif }; diff --git a/sys/contrib/dev/iwlwifi/mvm/rs-fw.c b/sys/contrib/dev/iwlwifi/mvm/rs-fw.c index 9830d2663689..d8c3d7ff4f44 100644 --- a/sys/contrib/dev/iwlwifi/mvm/rs-fw.c +++ b/sys/contrib/dev/iwlwifi/mvm/rs-fw.c @@ -1,533 +1,533 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH * Copyright (C) 2018-2022 Intel Corporation */ #include "rs.h" #include "fw-api.h" #include "sta.h" #include "iwl-op-mode.h" #include "mvm.h" static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta) { - switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_160: return IWL_TLC_MNG_CH_WIDTH_160MHZ; case IEEE80211_STA_RX_BW_80: return IWL_TLC_MNG_CH_WIDTH_80MHZ; case IEEE80211_STA_RX_BW_40: return IWL_TLC_MNG_CH_WIDTH_40MHZ; case IEEE80211_STA_RX_BW_20: default: return IWL_TLC_MNG_CH_WIDTH_20MHZ; } } static u8 rs_fw_set_active_chains(u8 chains) { u8 fw_chains = 0; if (chains & ANT_A) fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK; if (chains & ANT_B) fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK; return fw_chains; } static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta) { - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; + struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; u8 supp = 0; if (he_cap->has_he) return 0; if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ); if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) supp |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ); if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80) supp |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ); if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160) supp |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ); return supp; } static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct ieee80211_supported_band *sband) { - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; + struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; bool vht_ena = vht_cap->vht_supported; u16 flags = 0; /* get STBC flags */ if (mvm->cfg->ht_params->stbc && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) { if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK) flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; } if (mvm->cfg->ht_params->ldpc && ((ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) || (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; /* consider LDPC support in case of HE */ if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; if (sband->iftype_data && sband->iftype_data->he_cap.has_he && !(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) flags &= ~IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK && sband->iftype_data && sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK)) flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK; return flags; } static int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap *vht_cap, int nss) { u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) & (0x3 << (2 * (nss - 1))); rx_mcs >>= (2 * (nss - 1)); switch (rx_mcs) { case IEEE80211_VHT_MCS_SUPPORT_0_7: return IWL_TLC_MNG_HT_RATE_MCS7; case IEEE80211_VHT_MCS_SUPPORT_0_8: return IWL_TLC_MNG_HT_RATE_MCS8; case IEEE80211_VHT_MCS_SUPPORT_0_9: return IWL_TLC_MNG_HT_RATE_MCS9; default: WARN_ON_ONCE(1); break; } return 0; } static void rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, const struct ieee80211_sta_vht_cap *vht_cap, struct iwl_tlc_config_cmd_v4 *cmd) { u16 supp; int i, highest_mcs; - u8 max_nss = sta->rx_nss; + u8 max_nss = sta->deflink.rx_nss; struct ieee80211_vht_cap ieee_vht_cap = { .vht_cap_info = cpu_to_le32(vht_cap->cap), .supp_mcs = vht_cap->vht_mcs, }; /* the station support only a single receive chain */ if (sta->smps_mode == IEEE80211_SMPS_STATIC) max_nss = 1; for (i = 0; i < max_nss && i < IWL_TLC_NSS_MAX; i++) { int nss = i + 1; highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, nss); if (!highest_mcs) continue; supp = BIT(highest_mcs + 1) - 1; - if (sta->bandwidth == IEEE80211_STA_RX_BW_20) + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9); cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp); /* * Check if VHT extended NSS indicates that the bandwidth/NSS * configuration is supported - only for MCS 0 since we already * decoded the MCS bits anyway ourselves. */ - if (sta->bandwidth == IEEE80211_STA_RX_BW_160 && + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160 && ieee80211_get_vht_max_nss(&ieee_vht_cap, IEEE80211_VHT_CHANWIDTH_160MHZ, 0, true, nss) >= nss) cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] = cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80]; } } static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs) { switch (mcs) { case IEEE80211_HE_MCS_SUPPORT_0_7: return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1; case IEEE80211_HE_MCS_SUPPORT_0_9: return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1; case IEEE80211_HE_MCS_SUPPORT_0_11: return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1; case IEEE80211_HE_MCS_NOT_SUPPORTED: return 0; } WARN(1, "invalid HE MCS %d\n", mcs); return 0; } static void rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, struct ieee80211_supported_band *sband, struct iwl_tlc_config_cmd_v4 *cmd) { - const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); u16 tx_mcs_80 = le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80); u16 tx_mcs_160 = le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160); int i; - u8 nss = sta->rx_nss; + u8 nss = sta->deflink.rx_nss; /* the station support only a single receive chain */ if (sta->smps_mode == IEEE80211_SMPS_STATIC) nss = 1; for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) { u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3; u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3; u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3; u16 _tx_mcs_80 = (tx_mcs_80 >> (2 * i)) & 0x3; /* If one side doesn't support - mark both as not supporting */ if (_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED || _tx_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED) { _mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED; _tx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED; } if (_mcs_80 > _tx_mcs_80) _mcs_80 = _tx_mcs_80; cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80)); /* If one side doesn't support - mark both as not supporting */ if (_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED || _tx_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED) { _mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED; _tx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED; } if (_mcs_160 > _tx_mcs_160) _mcs_160 = _tx_mcs_160; cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] = cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160)); } } static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, struct ieee80211_supported_band *sband, struct iwl_tlc_config_cmd_v4 *cmd) { int i; u16 supp = 0; unsigned long tmp; /* must be unsigned long for for_each_set_bit */ - const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; + const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; + const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; /* non HT rates */ - tmp = sta->supp_rates[sband->band]; + tmp = sta->deflink.supp_rates[sband->band]; for_each_set_bit(i, &tmp, BITS_PER_LONG) supp |= BIT(sband->bitrates[i].hw_value); cmd->non_ht_rates = cpu_to_le16(supp); cmd->mode = IWL_TLC_MNG_MODE_NON_HT; /* HT/VHT rates */ if (he_cap->has_he) { cmd->mode = IWL_TLC_MNG_MODE_HE; rs_fw_he_set_enabled_rates(sta, sband, cmd); } else if (vht_cap->vht_supported) { cmd->mode = IWL_TLC_MNG_MODE_VHT; rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd); } else if (ht_cap->ht_supported) { cmd->mode = IWL_TLC_MNG_MODE_HT; cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(ht_cap->mcs.rx_mask[0]); /* the station support only a single receive chain */ if (sta->smps_mode == IEEE80211_SMPS_STATIC) cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] = 0; else cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(ht_cap->mcs.rx_mask[1]); } } void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_tlc_update_notif *notif; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; struct iwl_lq_sta_rs_fw *lq_sta; u32 flags; rcu_read_lock(); notif = (void *)pkt->data; sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); if (IS_ERR_OR_NULL(sta)) { /* can happen in remove station flow where mvm removed internally * the station before removing from FW */ IWL_DEBUG_RATE(mvm, "Invalid mvm RCU pointer for sta id (%d) in TLC notification\n", notif->sta_id); goto out; } mvmsta = iwl_mvm_sta_from_mac80211(sta); if (!mvmsta) { IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", notif->sta_id); goto out; } flags = le32_to_cpu(notif->flags); lq_sta = &mvmsta->lq_sta.rs_fw; if (flags & IWL_TLC_NOTIF_FLAG_RATE) { char pretty_rate[100]; if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF, 0) < 3) { rs_pretty_print_rate_v1(pretty_rate, sizeof(pretty_rate), le32_to_cpu(notif->rate)); IWL_DEBUG_RATE(mvm, "Got rate in old format. Rate: %s. Converting.\n", pretty_rate); lq_sta->last_rate_n_flags = iwl_new_rate_from_v1(le32_to_cpu(notif->rate)); } else { lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate); } rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate), lq_sta->last_rate_n_flags); IWL_DEBUG_RATE(mvm, "new rate: %s\n", pretty_rate); } if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvmsta->orig_amsdu_len) { u16 size = le32_to_cpu(notif->amsdu_size); int i; if (sta->max_amsdu_len < size) { /* * In debug sta->max_amsdu_len < size * so also check with orig_amsdu_len which holds the * original data before debugfs changed the value */ WARN_ON(mvmsta->orig_amsdu_len < size); goto out; } mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled); mvmsta->max_amsdu_len = size; sta->max_rc_amsdu_len = mvmsta->max_amsdu_len; for (i = 0; i < IWL_MAX_TID_COUNT; i++) { if (mvmsta->amsdu_enabled & BIT(i)) sta->max_tid_amsdu_len[i] = iwl_mvm_max_amsdu_size(mvm, sta, i); else /* * Not so elegant, but this will effectively * prevent AMSDU on this TID */ sta->max_tid_amsdu_len[i] = 1; } IWL_DEBUG_RATE(mvm, "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n", le32_to_cpu(notif->amsdu_size), size, mvmsta->amsdu_enabled); } out: rcu_read_unlock(); } u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; + const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; if (mvmsta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) { - switch (le16_get_bits(sta->he_6ghz_capa.capa, + switch (le16_get_bits(sta->deflink.he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN)) { case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: return IEEE80211_MAX_MPDU_LEN_VHT_11454; case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991: return IEEE80211_MAX_MPDU_LEN_VHT_7991; default: return IEEE80211_MAX_MPDU_LEN_VHT_3895; } } else if (vht_cap->vht_supported) { switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) { case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: return IEEE80211_MAX_MPDU_LEN_VHT_11454; case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991: return IEEE80211_MAX_MPDU_LEN_VHT_7991; default: return IEEE80211_MAX_MPDU_LEN_VHT_3895; } } else if (ht_cap->ht_supported) { if (ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU) /* * agg is offloaded so we need to assume that agg * are enabled and max mpdu in ampdu is 4095 * (spec 802.11-2016 9.3.2.1) */ return IEEE80211_MAX_MPDU_LEN_HT_BA; else return IEEE80211_MAX_MPDU_LEN_HT_3839; } /* in legacy mode no amsdu is enabled so return zero */ return 0; } void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum nl80211_band band, bool update) { struct ieee80211_hw *hw = mvm->hw; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD); struct ieee80211_supported_band *sband = hw->wiphy->bands[band]; u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta); struct iwl_tlc_config_cmd_v4 cfg_cmd = { .sta_id = mvmsta->sta_id, .max_ch_width = update ? rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20, .flags = cpu_to_le16(rs_fw_get_config_flags(mvm, sta, sband)), .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)), .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta), .max_mpdu_len = iwl_mvm_is_csum_supported(mvm) ? cpu_to_le16(max_amsdu_len) : 0, }; int ret; int cmd_ver; memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers)); #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_mvm_reset_frame_stats(mvm); #endif rs_fw_set_supp_rates(sta, sband, &cfg_cmd); /* * since TLC offload works with one mode we can assume * that only vht/ht is used and also set it as station max amsdu */ sta->max_amsdu_len = max_amsdu_len; cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD), 0); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n", cfg_cmd.sta_id, cfg_cmd.max_ch_width, cfg_cmd.mode); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, chains=0x%X, ch_wid_supp=%d, flags=0x%X\n", cfg_cmd.chains, cfg_cmd.sgi_ch_width_supp, cfg_cmd.flags); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, mpdu_len=%d, no_ht_rate=0x%X, tx_op=%d\n", cfg_cmd.max_mpdu_len, cfg_cmd.non_ht_rates, cfg_cmd.max_tx_op); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][0]=0x%X, ht_rate[1][0]=0x%X\n", cfg_cmd.ht_rates[0][0], cfg_cmd.ht_rates[1][0]); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][1]=0x%X, ht_rate[1][1]=0x%X\n", cfg_cmd.ht_rates[0][1], cfg_cmd.ht_rates[1][1]); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][2]=0x%X, ht_rate[1][2]=0x%X\n", cfg_cmd.ht_rates[0][2], cfg_cmd.ht_rates[1][2]); if (cmd_ver == 4) { ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, sizeof(cfg_cmd), &cfg_cmd); } else if (cmd_ver < 4) { struct iwl_tlc_config_cmd_v3 cfg_cmd_v3 = { .sta_id = cfg_cmd.sta_id, .max_ch_width = cfg_cmd.max_ch_width, .mode = cfg_cmd.mode, .chains = cfg_cmd.chains, .amsdu = !!cfg_cmd.max_mpdu_len, .flags = cfg_cmd.flags, .non_ht_rates = cfg_cmd.non_ht_rates, .ht_rates[0][0] = cfg_cmd.ht_rates[0][0], .ht_rates[0][1] = cfg_cmd.ht_rates[0][1], .ht_rates[1][0] = cfg_cmd.ht_rates[1][0], .ht_rates[1][1] = cfg_cmd.ht_rates[1][1], .sgi_ch_width_supp = cfg_cmd.sgi_ch_width_supp, .max_mpdu_len = cfg_cmd.max_mpdu_len, }; u16 cmd_size = sizeof(cfg_cmd_v3); /* In old versions of the API the struct is 4 bytes smaller */ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD), 0) < 3) cmd_size -= 4; ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size, &cfg_cmd_v3); } else { ret = -EINVAL; } if (ret) IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret); } int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable) { /* TODO: need to introduce a new FW cmd since LQ cmd is not relevant */ IWL_DEBUG_RATE(mvm, "tx protection - not implemented yet.\n"); return 0; } void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta) { struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; IWL_DEBUG_RATE(mvm, "create station rate scale window\n"); lq_sta->pers.drv = mvm; lq_sta->pers.sta_id = mvmsta->sta_id; lq_sta->pers.chains = 0; memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal)); lq_sta->pers.last_rssi = S8_MIN; lq_sta->last_rate_n_flags = 0; #ifdef CONFIG_MAC80211_DEBUGFS lq_sta->pers.dbg_fixed_rate = 0; #endif } diff --git a/sys/contrib/dev/iwlwifi/mvm/sf.c b/sys/contrib/dev/iwlwifi/mvm/sf.c index 655da8856c75..693752d8f65b 100644 --- a/sys/contrib/dev/iwlwifi/mvm/sf.c +++ b/sys/contrib/dev/iwlwifi/mvm/sf.c @@ -1,279 +1,279 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2013-2014, 2018-2019 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH */ #include "mvm.h" /* For counting bound interfaces */ struct iwl_mvm_active_iface_iterator_data { struct ieee80211_vif *ignore_vif; u8 sta_vif_ap_sta_id; enum iwl_sf_state sta_vif_state; u32 num_active_macs; }; /* * Count bound interfaces which are not p2p, besides data->ignore_vif. * data->station_vif will point to one bound vif of type station, if exists. */ static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_active_iface_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif == data->ignore_vif || !mvmvif->phy_ctxt || vif->type == NL80211_IFTYPE_P2P_DEVICE) return; data->num_active_macs++; if (vif->type == NL80211_IFTYPE_STATION) { data->sta_vif_ap_sta_id = mvmvif->ap_sta_id; if (vif->bss_conf.assoc) data->sta_vif_state = SF_FULL_ON; else data->sta_vif_state = SF_INIT_OFF; } } /* * Aging and idle timeouts for the different possible scenarios * in default configuration */ static const __le32 sf_full_timeout_def[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = { { cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER_DEF), cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER_DEF) }, { cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER_DEF), cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER_DEF) }, { cpu_to_le32(SF_MCAST_AGING_TIMER_DEF), cpu_to_le32(SF_MCAST_IDLE_TIMER_DEF) }, { cpu_to_le32(SF_BA_AGING_TIMER_DEF), cpu_to_le32(SF_BA_IDLE_TIMER_DEF) }, { cpu_to_le32(SF_TX_RE_AGING_TIMER_DEF), cpu_to_le32(SF_TX_RE_IDLE_TIMER_DEF) }, }; /* * Aging and idle timeouts for the different possible scenarios * in single BSS MAC configuration. */ static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = { { cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER), cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER) }, { cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER), cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER) }, { cpu_to_le32(SF_MCAST_AGING_TIMER), cpu_to_le32(SF_MCAST_IDLE_TIMER) }, { cpu_to_le32(SF_BA_AGING_TIMER), cpu_to_le32(SF_BA_IDLE_TIMER) }, { cpu_to_le32(SF_TX_RE_AGING_TIMER), cpu_to_le32(SF_TX_RE_IDLE_TIMER) }, }; static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm, struct iwl_sf_cfg_cmd *sf_cmd, struct ieee80211_sta *sta) { int i, j, watermark; sf_cmd->watermark[SF_LONG_DELAY_ON] = cpu_to_le32(SF_W_MARK_SCAN); /* * If we are in association flow - check antenna configuration * capabilities of the AP station, and choose the watermark accordingly. */ if (sta) { - if (sta->ht_cap.ht_supported || - sta->vht_cap.vht_supported || - sta->he_cap.has_he) { - switch (sta->rx_nss) { + if (sta->deflink.ht_cap.ht_supported || + sta->deflink.vht_cap.vht_supported || + sta->deflink.he_cap.has_he) { + switch (sta->deflink.rx_nss) { case 1: watermark = SF_W_MARK_SISO; break; case 2: watermark = SF_W_MARK_MIMO2; break; default: watermark = SF_W_MARK_MIMO3; break; } } else { watermark = SF_W_MARK_LEGACY; } /* default watermark value for unassociated mode. */ } else { watermark = SF_W_MARK_MIMO2; } sf_cmd->watermark[SF_FULL_ON] = cpu_to_le32(watermark); for (i = 0; i < SF_NUM_SCENARIO; i++) { for (j = 0; j < SF_NUM_TIMEOUT_TYPES; j++) { sf_cmd->long_delay_timeouts[i][j] = cpu_to_le32(SF_LONG_DELAY_AGING_TIMER); } } if (sta) { BUILD_BUG_ON(sizeof(sf_full_timeout) != sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES); memcpy(sf_cmd->full_on_timeouts, sf_full_timeout, sizeof(sf_full_timeout)); } else { BUILD_BUG_ON(sizeof(sf_full_timeout_def) != sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES); memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def, sizeof(sf_full_timeout_def)); } } static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, enum iwl_sf_state new_state) { struct iwl_sf_cfg_cmd sf_cmd = { .state = cpu_to_le32(new_state), }; struct ieee80211_sta *sta; int ret = 0; if (mvm->cfg->disable_dummy_notification) sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF); /* * If an associated AP sta changed its antenna configuration, the state * will remain FULL_ON but SF parameters need to be reconsidered. */ if (new_state != SF_FULL_ON && mvm->sf_state == new_state) return 0; switch (new_state) { case SF_UNINIT: iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); break; case SF_FULL_ON: if (sta_id == IWL_MVM_INVALID_STA) { IWL_ERR(mvm, "No station: Cannot switch SF to FULL_ON\n"); return -EINVAL; } rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (IS_ERR_OR_NULL(sta)) { IWL_ERR(mvm, "Invalid station id\n"); rcu_read_unlock(); return -EINVAL; } iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta); rcu_read_unlock(); break; case SF_INIT_OFF: iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); break; default: WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n", new_state); return -EINVAL; } ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_SF_CFG_CMD, CMD_ASYNC, sizeof(sf_cmd), &sf_cmd); if (!ret) mvm->sf_state = new_state; return ret; } /* * Update Smart fifo: * Count bound interfaces that are not to be removed, ignoring p2p devices, * and set new state accordingly. */ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif, bool remove_vif) { enum iwl_sf_state new_state; u8 sta_id = IWL_MVM_INVALID_STA; struct iwl_mvm_vif *mvmvif = NULL; struct iwl_mvm_active_iface_iterator_data data = { .ignore_vif = changed_vif, .sta_vif_state = SF_UNINIT, .sta_vif_ap_sta_id = IWL_MVM_INVALID_STA, }; /* * Ignore the call if we are in HW Restart flow, or if the handled * vif is a p2p device. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || (changed_vif && changed_vif->type == NL80211_IFTYPE_P2P_DEVICE)) return 0; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bound_iface_iterator, &data); /* If changed_vif exists and is not to be removed, add to the count */ if (changed_vif && !remove_vif) data.num_active_macs++; switch (data.num_active_macs) { case 0: /* If there are no active macs - change state to SF_INIT_OFF */ new_state = SF_INIT_OFF; break; case 1: if (remove_vif) { /* The one active mac left is of type station * and we filled the relevant data during iteration */ new_state = data.sta_vif_state; sta_id = data.sta_vif_ap_sta_id; } else { if (WARN_ON(!changed_vif)) return -EINVAL; if (changed_vif->type != NL80211_IFTYPE_STATION) { new_state = SF_UNINIT; } else if (changed_vif->bss_conf.assoc && changed_vif->bss_conf.dtim_period) { mvmvif = iwl_mvm_vif_from_mac80211(changed_vif); sta_id = mvmvif->ap_sta_id; new_state = SF_FULL_ON; } else { new_state = SF_INIT_OFF; } } break; default: /* If there are multiple active macs - change to SF_UNINIT */ new_state = SF_UNINIT; } return iwl_mvm_sf_config(mvm, sta_id, new_state); } diff --git a/sys/contrib/dev/iwlwifi/mvm/sta.c b/sys/contrib/dev/iwlwifi/mvm/sta.c index d843d3fff346..cf3d0922cf83 100644 --- a/sys/contrib/dev/iwlwifi/mvm/sta.c +++ b/sys/contrib/dev/iwlwifi/mvm/sta.c @@ -1,4176 +1,4175 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2015, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #if defined(__FreeBSD__) #include #endif #include "mvm.h" #include "sta.h" #include "rs.h" /* * New version of ADD_STA_sta command added new fields at the end of the * structure, so sending the size of the relevant API's structure is enough to * support both API versions. */ static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) { if (iwl_mvm_has_new_rx_api(mvm) || fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) return sizeof(struct iwl_mvm_add_sta_cmd); else return sizeof(struct iwl_mvm_add_sta_cmd_v7); } static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype) { int sta_id; u32 reserved_ids = 0; BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32); WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); lockdep_assert_held(&mvm->mutex); /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */ if (iftype != NL80211_IFTYPE_STATION) reserved_ids = BIT(0); /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) { if (BIT(sta_id) & reserved_ids) continue; if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex))) return sta_id; } return IWL_MVM_INVALID_STA; } /* send station add/update command to firmware */ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool update, unsigned int flags) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd add_sta_cmd = { .sta_id = mvm_sta->sta_id, .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), .add_modify = update ? 1 : 0, .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | STA_FLG_MIMO_EN_MSK | STA_FLG_RTS_MIMO_PROT), .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), }; int ret; u32 status; u32 agg_size = 0, mpdu_dens = 0; if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) add_sta_cmd.station_type = mvm_sta->sta_type; if (!update || (flags & STA_MODIFY_QUEUES)) { memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); if (!iwl_mvm_has_new_tx_api(mvm)) { add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); if (flags & STA_MODIFY_QUEUES) add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; } else { WARN_ON(flags & STA_MODIFY_QUEUES); } } - switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_320: case IEEE80211_STA_RX_BW_160: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); fallthrough; case IEEE80211_STA_RX_BW_80: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ); fallthrough; case IEEE80211_STA_RX_BW_40: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); fallthrough; case IEEE80211_STA_RX_BW_20: - if (sta->ht_cap.ht_supported) + if (sta->deflink.ht_cap.ht_supported) add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_20MHZ); break; } - switch (sta->rx_nss) { + switch (sta->deflink.rx_nss) { case 1: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); break; case 2: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2); break; case 3 ... 8: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3); break; } switch (sta->smps_mode) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); break; case IEEE80211_SMPS_STATIC: /* override NSS */ add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK); add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); break; case IEEE80211_SMPS_DYNAMIC: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT); break; case IEEE80211_SMPS_OFF: /* nothing */ break; } - if (sta->ht_cap.ht_supported) { + if (sta->deflink.ht_cap.ht_supported) { add_sta_cmd.station_flags_msk |= cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | STA_FLG_AGG_MPDU_DENS_MSK); - mpdu_dens = sta->ht_cap.ampdu_density; + mpdu_dens = sta->deflink.ht_cap.ampdu_density; } if (mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) { add_sta_cmd.station_flags_msk |= cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | STA_FLG_AGG_MPDU_DENS_MSK); - mpdu_dens = le16_get_bits(sta->he_6ghz_capa.capa, + mpdu_dens = le16_get_bits(sta->deflink.he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); - agg_size = le16_get_bits(sta->he_6ghz_capa.capa, - IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); - } else - if (sta->vht_cap.vht_supported) { - agg_size = sta->vht_cap.cap & + agg_size = le16_get_bits(sta->deflink.he_6ghz_capa.capa, + IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); + } else if (sta->deflink.vht_cap.vht_supported) { + agg_size = sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; agg_size >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; - } else if (sta->ht_cap.ht_supported) { - agg_size = sta->ht_cap.ampdu_factor; + } else if (sta->deflink.ht_cap.ht_supported) { + agg_size = sta->deflink.ht_cap.ampdu_factor; } /* D6.0 10.12.2 A-MPDU length limit rules * A STA indicates the maximum length of the A-MPDU preEOF padding * that it can receive in an HE PPDU in the Maximum A-MPDU Length * Exponent field in its HT Capabilities, VHT Capabilities, * and HE 6 GHz Band Capabilities elements (if present) and the * Maximum AMPDU Length Exponent Extension field in its HE * Capabilities element */ - if (sta->he_cap.has_he) - agg_size += u8_get_bits(sta->he_cap.he_cap_elem.mac_cap_info[3], + if (sta->deflink.he_cap.has_he) + agg_size += u8_get_bits(sta->deflink.he_cap.he_cap_elem.mac_cap_info[3], IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); /* Limit to max A-MPDU supported by FW */ if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT)) agg_size = (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT); add_sta_cmd.station_flags |= cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); add_sta_cmd.station_flags |= cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC) add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); if (sta->wme) { add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) add_sta_cmd.uapsd_acs |= BIT(AC_BK); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) add_sta_cmd.uapsd_acs |= BIT(AC_BE); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) add_sta_cmd.uapsd_acs |= BIT(AC_VI); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) add_sta_cmd.uapsd_acs |= BIT(AC_VO); add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4; add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128; } status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &add_sta_cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); break; default: ret = -EIO; IWL_ERR(mvm, "ADD_STA failed\n"); break; } return ret; } static void iwl_mvm_rx_agg_session_expired(struct timer_list *t) { struct iwl_mvm_baid_data *data = from_timer(data, t, session_timer); struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr; struct iwl_mvm_baid_data *ba_data; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvm_sta; unsigned long timeout; rcu_read_lock(); ba_data = rcu_dereference(*rcu_ptr); if (WARN_ON(!ba_data)) goto unlock; if (!ba_data->timeout) goto unlock; timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2); if (time_is_after_jiffies(timeout)) { mod_timer(&ba_data->session_timer, timeout); goto unlock; } /* Timer expired */ sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); /* * sta should be valid unless the following happens: * The firmware asserts which triggers a reconfig flow, but * the reconfig fails before we set the pointer to sta into * the fw_id_to_mac_id pointer table. Mac80211 can't stop * A-MDPU and hence the timer continues to run. Then, the * timer expires and sta is NULL. */ if (!sta) goto unlock; mvm_sta = iwl_mvm_sta_from_mac80211(sta); ieee80211_rx_ba_timer_expired(mvm_sta->vif, sta->addr, ba_data->tid); unlock: rcu_read_unlock(); } /* Disable aggregations for a bitmap of TIDs for a given station */ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, unsigned long disable_agg_tids, bool remove_queue) { struct iwl_mvm_add_sta_cmd cmd = {}; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; u32 status; u8 sta_id; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; sta_id = mvm->queue_info[queue].ra_sta_id; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return -EINVAL; } mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmsta->tid_disable_agg |= disable_agg_tids; cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); cmd.sta_id = mvmsta->sta_id; cmd.add_modify = STA_MODE_MODIFY; cmd.modify_mask = STA_MODIFY_QUEUES; if (disable_agg_tids) cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; if (remove_queue) cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); rcu_read_unlock(); /* Notify FW of queue removal from the STA queues */ status = ADD_STA_SUCCESS; return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); } static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u16 *queueptr, u8 tid) { int queue = *queueptr; struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, }; int ret; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_has_new_tx_api(mvm)) { if (mvm->sta_remove_requires_queue_remove) { u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD); struct iwl_scd_queue_cfg_cmd remove_cmd = { .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE), .u.remove.queue = cpu_to_le32(queue), }; ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(remove_cmd), &remove_cmd); } else { ret = 0; } iwl_trans_txq_free(mvm->trans, queue); *queueptr = IWL_MVM_INVALID_QUEUE; return ret; } if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) return 0; mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); cmd.action = mvm->queue_info[queue].tid_bitmap ? SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; if (cmd.action == SCD_CFG_DISABLE_QUEUE) mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; IWL_DEBUG_TX_QUEUES(mvm, "Disabling TXQ #%d tids=0x%x\n", queue, mvm->queue_info[queue].tid_bitmap); /* If the queue is still enabled - nothing left to do in this func */ if (cmd.action == SCD_CFG_ENABLE_QUEUE) return 0; cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.tid = mvm->queue_info[queue].txq_tid; /* Make sure queue info is correct even though we overwrite it */ WARN(mvm->queue_info[queue].tid_bitmap, "TXQ #%d info out-of-sync - tids=0x%x\n", queue, mvm->queue_info[queue].tid_bitmap); /* If we are here - the queue is freed and we can zero out these vals */ mvm->queue_info[queue].tid_bitmap = 0; if (sta) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; } /* Regardless if this is a reserved TXQ for a STA - mark it as false */ mvm->queue_info[queue].reserved = false; iwl_trans_txq_disable(mvm->trans, queue, false); ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); if (ret) IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", queue, ret); return ret; } static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; unsigned long tid_bitmap; unsigned long agg_tids = 0; u8 sta_id; int tid; lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) return -EINVAL; mvmsta = iwl_mvm_sta_from_mac80211(sta); spin_lock_bh(&mvmsta->lock); for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { if (mvmsta->tid_data[tid].state == IWL_AGG_ON) agg_tids |= BIT(tid); } spin_unlock_bh(&mvmsta->lock); return agg_tids; } /* * Remove a queue from a station's resources. * Note that this only marks as free. It DOESN'T delete a BA agreement, and * doesn't disable the queue */ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; unsigned long tid_bitmap; unsigned long disable_agg_tids = 0; u8 sta_id; int tid; lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return 0; } mvmsta = iwl_mvm_sta_from_mac80211(sta); spin_lock_bh(&mvmsta->lock); /* Unmap MAC queues and TIDs from this queue */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); if (mvmsta->tid_data[tid].state == IWL_AGG_ON) disable_agg_tids |= BIT(tid); mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; } mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ spin_unlock_bh(&mvmsta->lock); rcu_read_unlock(); /* * The TX path may have been using this TXQ_ID from the tid_data, * so make sure it's no longer running so that we can safely reuse * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE * above, but nothing guarantees we've stopped using them. Thus, * without this, we could get to iwl_mvm_disable_txq() and remove * the queue while still sending frames to it. */ synchronize_net(); return disable_agg_tids; } static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, struct ieee80211_sta *old_sta, u8 new_sta_id) { struct iwl_mvm_sta *mvmsta; u8 sta_id, tid; unsigned long disable_agg_tids = 0; bool same_sta; u16 queue_tmp = queue; int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; sta_id = mvm->queue_info[queue].ra_sta_id; tid = mvm->queue_info[queue].txq_tid; same_sta = sta_id == new_sta_id; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); if (WARN_ON(!mvmsta)) return -EINVAL; disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); /* Disable the queue */ if (disable_agg_tids) iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid); if (ret) { IWL_ERR(mvm, "Failed to free inactive queue %d (ret=%d)\n", queue, ret); return ret; } /* If TXQ is allocated to another STA, update removal in FW */ if (!same_sta) iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); return 0; } static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, unsigned long tfd_queue_mask, u8 ac) { int queue = 0; u8 ac_to_queue[IEEE80211_NUM_ACS]; int i; /* * This protects us against grabbing a queue that's being reconfigured * by the inactivity checker. */ lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); /* See what ACs the existing queues for this STA have */ for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) { /* Only DATA queues can be shared */ if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) continue; ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; } /* * The queue to share is chosen only from DATA queues as follows (in * descending priority): * 1. An AC_BE queue * 2. Same AC queue * 3. Highest AC queue that is lower than new AC * 4. Any existing AC (there always is at least 1 DATA queue) */ /* Priority 1: An AC_BE queue */ if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_BE]; /* Priority 2: Same AC queue */ else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[ac]; /* Priority 3a: If new AC is VO and VI exists - use VI */ else if (ac == IEEE80211_AC_VO && ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_VI]; /* Priority 3b: No BE so only AC less than the new one is BK */ else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_BK]; /* Priority 4a: No BE nor BK - use VI if exists */ else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_VI]; /* Priority 4b: No BE, BK nor VI - use VO if exists */ else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) queue = ac_to_queue[IEEE80211_AC_VO]; /* Make sure queue found (or not) is legal */ if (!iwl_mvm_is_dqa_data_queue(mvm, queue) && !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) && (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) { IWL_ERR(mvm, "No DATA queues available to share\n"); return -ENOSPC; } return queue; } /* Re-configure the SCD for a queue that has already been configured */ static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, int tid, int frame_limit, u16 ssn) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_ENABLE_QUEUE, .window = frame_limit, .sta_id = sta_id, .ssn = cpu_to_le16(ssn), .tx_fifo = fifo, .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE), .tid = tid, }; int ret; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; if (WARN(mvm->queue_info[queue].tid_bitmap == 0, "Trying to reconfig unallocated queue %d\n", queue)) return -ENXIO; IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n", queue, fifo, ret); return ret; } /* * If a given queue has a higher AC than the TID stream that is being compared * to, the queue needs to be redirected to the lower AC. This function does that * in such a case, otherwise - if no redirection required - it does nothing, * unless the %force param is true. */ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, int ac, int ssn, unsigned int wdg_timeout, bool force, struct iwl_mvm_txq *txq) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, }; bool shared_queue; int ret; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; /* * If the AC is lower than current one - FIFO needs to be redirected to * the lowest one of the streams in the queue. Check if this is needed * here. * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with * value 3 and VO with value 0, so to check if ac X is lower than ac Y * we need to check if the numerical value of X is LARGER than of Y. */ if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { IWL_DEBUG_TX_QUEUES(mvm, "No redirection needed on TXQ #%d\n", queue); return 0; } cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; cmd.tid = mvm->queue_info[queue].txq_tid; shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", queue, iwl_mvm_ac_to_tx_fifo[ac]); /* Stop the queue and wait for it to empty */ txq->stopped = true; ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); if (ret) { IWL_ERR(mvm, "Error draining queue %d before reconfig\n", queue); ret = -EIO; goto out; } /* Before redirecting the queue we need to de-activate it */ iwl_trans_txq_disable(mvm->trans, queue, false); ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); if (ret) IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, ret); /* Make sure the SCD wrptr is correctly set before reconfiguring */ iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); /* Update the TID "owner" of the queue */ mvm->queue_info[queue].txq_tid = tid; /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ /* Redirect to lower AC */ iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn); /* Update AC marking of the queue */ mvm->queue_info[queue].mac80211_ac = ac; /* * Mark queue as shared in transport if shared * Note this has to be done after queue enablement because enablement * can also set this value, and there is no indication there to shared * queues */ if (shared_queue) iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); out: /* Continue using the queue */ txq->stopped = false; return ret; } static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq) { int i; lockdep_assert_held(&mvm->mutex); if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues, "max queue %d >= num_of_queues (%d)", maxq, mvm->trans->trans_cfg->base_params->num_of_queues)) maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1; /* This should not be hit with new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -ENOSPC; /* Start by looking for a free queue */ for (i = minq; i <= maxq; i++) if (mvm->queue_info[i].tid_bitmap == 0 && mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) return i; return -ENOSPC; } static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, u8 sta_id, u8 tid, unsigned int timeout) { int queue, size; if (tid == IWL_MAX_TID_COUNT) { tid = IWL_MGMT_TID; size = max_t(u32, IWL_MGMT_QUEUE_SIZE, mvm->trans->cfg->min_txq_size); } else { struct ieee80211_sta *sta; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* this queue isn't used for traffic (cab_queue) */ if (IS_ERR_OR_NULL(sta)) { size = IWL_MGMT_QUEUE_SIZE; - } else if (sta->he_cap.has_he) { + } else if (sta->deflink.he_cap.has_he) { /* support for 256 ba size */ size = IWL_DEFAULT_QUEUE_SIZE_HE; } else { size = IWL_DEFAULT_QUEUE_SIZE; } rcu_read_unlock(); } /* take the min with bc tbl entries allowed */ size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16)); /* size needs to be power of 2 values for calculating read/write pointers */ size = rounddown_pow_of_two(size); do { queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id), tid, size, timeout); if (queue < 0) IWL_DEBUG_TX_QUEUES(mvm, "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n", size, sta_id, tid, queue); size /= 2; } while (queue < 0 && size >= 16); if (queue < 0) return queue; IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", queue, sta_id, tid); return queue; } static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 ac, int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); int queue = -1; lockdep_assert_held(&mvm->mutex); IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue for sta %d on tid %d\n", mvmsta->sta_id, tid); queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout); if (queue < 0) return queue; mvmtxq->txq_id = queue; mvm->tvqm_info[queue].txq_tid = tid; mvm->tvqm_info[queue].sta_id = mvmsta->sta_id; IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); spin_lock_bh(&mvmsta->lock); mvmsta->tid_data[tid].txq_id = queue; spin_unlock_bh(&mvmsta->lock); return 0; } static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int queue, u8 sta_id, u8 tid) { bool enable_queue = true; /* Make sure this TID isn't already enabled */ if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", queue, tid); return false; } /* Update mappings and refcounts */ if (mvm->queue_info[queue].tid_bitmap) enable_queue = false; mvm->queue_info[queue].tid_bitmap |= BIT(tid); mvm->queue_info[queue].ra_sta_id = sta_id; if (enable_queue) { if (tid != IWL_MAX_TID_COUNT) mvm->queue_info[queue].mac80211_ac = tid_to_mac80211_ac[tid]; else mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; mvm->queue_info[queue].txq_tid = tid; } if (sta) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); mvmtxq->txq_id = queue; } IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d tids=0x%x\n", queue, mvm->queue_info[queue].tid_bitmap); return enable_queue; } static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int wdg_timeout) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_ENABLE_QUEUE, .window = cfg->frame_limit, .sta_id = cfg->sta_id, .ssn = cpu_to_le16(ssn), .tx_fifo = cfg->fifo, .aggregate = cfg->aggregate, .tid = cfg->tid, }; bool inc_ssn; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return false; /* Send the enabling command if we need to */ if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid)) return false; inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); if (inc_ssn) le16_add_cpu(&cmd.ssn, 1); WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); return inc_ssn; } static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_UPDATE_QUEUE_TID, }; int tid; unsigned long tid_bitmap; int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return; tid_bitmap = mvm->queue_info[queue].tid_bitmap; if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) return; /* Find any TID for queue */ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); cmd.tid = tid; cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); if (ret) { IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", queue, ret); return; } mvm->queue_info[queue].txq_tid = tid; IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", queue, tid); } static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; u8 sta_id; int tid = -1; unsigned long tid_bitmap; unsigned int wdg_timeout; int ssn; int ret = true; /* queue sharing is disabled on new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return; lockdep_assert_held(&mvm->mutex); sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; /* Find TID for queue, and make sure it is the only one on the queue */ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); if (tid_bitmap != BIT(tid)) { IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", queue, tid_bitmap); return; } IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, tid); sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) return; mvmsta = iwl_mvm_sta_from_mac80211(sta); wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); ret = iwl_mvm_redirect_queue(mvm, queue, tid, tid_to_mac80211_ac[tid], ssn, wdg_timeout, true, iwl_mvm_txq_from_tid(sta, tid)); if (ret) { IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); return; } /* If aggs should be turned back on - do it */ if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { struct iwl_mvm_add_sta_cmd cmd = {0}; mvmsta->tid_disable_agg &= ~BIT(tid); cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); cmd.sta_id = mvmsta->sta_id; cmd.add_modify = STA_MODE_MODIFY; cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (!ret) { IWL_DEBUG_TX_QUEUES(mvm, "TXQ #%d is now aggregated again\n", queue); /* Mark queue intenally as aggregating again */ iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); } } mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; } /* * Remove inactive TIDs of a given queue. * If all queue TIDs are inactive - mark the queue as inactive * If only some the queue TIDs are inactive - unmap them from the queue * * Returns %true if all TIDs were removed and the queue could be reused. */ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, int queue, unsigned long tid_bitmap, unsigned long *unshare_queues, unsigned long *changetid_queues) { int tid; lockdep_assert_held(&mvmsta->lock); lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return false; /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { /* If some TFDs are still queued - don't mark TID as inactive */ if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) tid_bitmap &= ~BIT(tid); /* Don't mark as inactive any TID that has an active BA */ if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) tid_bitmap &= ~BIT(tid); } /* If all TIDs in the queue are inactive - return it can be reused */ if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue); return true; } /* * If we are here, this is a shared queue and not all TIDs timed-out. * Remove the ones that did. */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { u16 q_tid_bitmap; mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); q_tid_bitmap = mvm->queue_info[queue].tid_bitmap; /* * We need to take into account a situation in which a TXQ was * allocated to TID x, and then turned shared by adding TIDs y * and z. If TID x becomes inactive and is removed from the TXQ, * ownership must be given to one of the remaining TIDs. * This is mainly because if TID x continues - a new queue can't * be allocated for it as long as it is an owner of another TXQ. * * Mark this queue in the right bitmap, we'll send the command * to the firmware later. */ if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) set_bit(queue, changetid_queues); IWL_DEBUG_TX_QUEUES(mvm, "Removing inactive TID %d from shared Q:%d\n", tid, queue); } IWL_DEBUG_TX_QUEUES(mvm, "TXQ #%d left with tid bitmap 0x%x\n", queue, mvm->queue_info[queue].tid_bitmap); /* * There may be different TIDs with the same mac queues, so make * sure all TIDs have existing corresponding mac queues enabled */ tid_bitmap = mvm->queue_info[queue].tid_bitmap; /* If the queue is marked as shared - "unshare" it */ if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 && mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", queue); set_bit(queue, unshare_queues); } return false; } /* * Check for inactivity - this includes checking if any queue * can be unshared and finding one (and only one) that can be * reused. * This function is also invoked as a sort of clean-up task, * in which case @alloc_for_sta is IWL_MVM_INVALID_STA. * * Returns the queue number, or -ENOSPC. */ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) { unsigned long now = jiffies; unsigned long unshare_queues = 0; unsigned long changetid_queues = 0; int i, ret, free_queue = -ENOSPC; struct ieee80211_sta *queue_owner = NULL; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_has_new_tx_api(mvm)) return -ENOSPC; rcu_read_lock(); /* we skip the CMD queue below by starting at 1 */ BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0); for (i = 1; i < IWL_MAX_HW_QUEUES; i++) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; u8 sta_id; int tid; unsigned long inactive_tid_bitmap = 0; unsigned long queue_tid_bitmap; queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; if (!queue_tid_bitmap) continue; /* If TXQ isn't in active use anyway - nothing to do here... */ if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) continue; /* Check to see if there are inactive TIDs on this queue */ for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) { if (time_after(mvm->queue_info[i].last_frame_time[tid] + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) continue; inactive_tid_bitmap |= BIT(tid); } /* If all TIDs are active - finish check on this queue */ if (!inactive_tid_bitmap) continue; /* * If we are here - the queue hadn't been served recently and is * in use */ sta_id = mvm->queue_info[i].ra_sta_id; sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* * If the STA doesn't exist anymore, it isn't an error. It could * be that it was removed since getting the queues, and in this * case it should've inactivated its queues anyway. */ if (IS_ERR_OR_NULL(sta)) continue; mvmsta = iwl_mvm_sta_from_mac80211(sta); spin_lock_bh(&mvmsta->lock); ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, inactive_tid_bitmap, &unshare_queues, &changetid_queues); if (ret && free_queue < 0) { queue_owner = sta; free_queue = i; } /* only unlock sta lock - we still need the queue info lock */ spin_unlock_bh(&mvmsta->lock); } /* Reconfigure queues requiring reconfiguation */ for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) iwl_mvm_unshare_queue(mvm, i); for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) iwl_mvm_change_queue_tid(mvm, i); rcu_read_unlock(); if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, alloc_for_sta); if (ret) return ret; } return free_queue; } static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 ac, int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_trans_txq_scd_cfg cfg = { .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), .sta_id = mvmsta->sta_id, .tid = tid, .frame_limit = IWL_FRAME_LIMIT, }; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); int queue = -1; u16 queue_tmp; unsigned long disable_agg_tids = 0; enum iwl_mvm_agg_state queue_state; bool shared_queue = false, inc_ssn; int ssn; unsigned long tfd_queue_mask; int ret; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_has_new_tx_api(mvm)) return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); spin_lock_bh(&mvmsta->lock); tfd_queue_mask = mvmsta->tfd_queue_msk; ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); spin_unlock_bh(&mvmsta->lock); if (tid == IWL_MAX_TID_COUNT) { queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_MGMT_QUEUE, IWL_MVM_DQA_MAX_MGMT_QUEUE); if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", queue); /* If no such queue is found, we'll use a DATA queue instead */ } if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && (mvm->queue_info[mvmsta->reserved_queue].status == IWL_MVM_QUEUE_RESERVED)) { queue = mvmsta->reserved_queue; mvm->queue_info[queue].reserved = true; IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); } if (queue < 0) queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (queue < 0) { /* try harder - perhaps kill an inactive queue */ queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); } /* No free queue - we'll have to share */ if (queue <= 0) { queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); if (queue > 0) { shared_queue = true; mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; } } /* * Mark TXQ as ready, even though it hasn't been fully configured yet, * to make sure no one else takes it. * This will allow avoiding re-acquiring the lock at the end of the * configuration. On error we'll mark it back as free. */ if (queue > 0 && !shared_queue) mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; /* This shouldn't happen - out of queues */ if (WARN_ON(queue <= 0)) { IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", tid, cfg.sta_id); return queue; } /* * Actual en/disablement of aggregations is through the ADD_STA HCMD, * but for configuring the SCD to send A-MPDUs we need to mark the queue * as aggregatable. * Mark all DATA queues as allowing to be aggregated at some point */ cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); IWL_DEBUG_TX_QUEUES(mvm, "Allocating %squeue #%d to sta %d on tid %d\n", shared_queue ? "shared " : "", queue, mvmsta->sta_id, tid); if (shared_queue) { /* Disable any open aggs on this queue */ disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); if (disable_agg_tids) { IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", queue); iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); } } inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); /* * Mark queue as shared in transport if shared * Note this has to be done after queue enablement because enablement * can also set this value, and there is no indication there to shared * queues */ if (shared_queue) iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); spin_lock_bh(&mvmsta->lock); /* * This looks racy, but it is not. We have only one packet for * this ra/tid in our Tx path since we stop the Qdisc when we * need to allocate a new TFD queue. */ if (inc_ssn) { mvmsta->tid_data[tid].seq_number += 0x10; ssn = (ssn + 1) & IEEE80211_SCTL_SEQ; } mvmsta->tid_data[tid].txq_id = queue; mvmsta->tfd_queue_msk |= BIT(queue); queue_state = mvmsta->tid_data[tid].state; if (mvmsta->reserved_queue == queue) mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; spin_unlock_bh(&mvmsta->lock); if (!shared_queue) { ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); if (ret) goto out_err; /* If we need to re-enable aggregations... */ if (queue_state == IWL_AGG_ON) { ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) goto out_err; } } else { /* Redirect queue, if needed */ ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn, wdg_timeout, false, iwl_mvm_txq_from_tid(sta, tid)); if (ret) goto out_err; } return 0; out_err: queue_tmp = queue; iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid); return ret; } void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, add_stream_wk); mutex_lock(&mvm->mutex); iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); while (!list_empty(&mvm->add_stream_txqs)) { struct iwl_mvm_txq *mvmtxq; struct ieee80211_txq *txq; u8 tid; mvmtxq = list_first_entry(&mvm->add_stream_txqs, struct iwl_mvm_txq, list); txq = container_of((void *)mvmtxq, struct ieee80211_txq, drv_priv); tid = txq->tid; if (tid == IEEE80211_NUM_TIDS) tid = IWL_MAX_TID_COUNT; /* * We can't really do much here, but if this fails we can't * transmit anyway - so just don't transmit the frame etc. * and let them back up ... we've tried our best to allocate * a queue in the function itself. */ if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) { list_del_init(&mvmtxq->list); continue; } list_del_init(&mvmtxq->list); local_bh_disable(); iwl_mvm_mac_itxq_xmit(mvm->hw, txq); local_bh_enable(); } mutex_unlock(&mvm->mutex); } static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum nl80211_iftype vif_type) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int queue; /* queue reserving is disabled on new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return 0; /* run the general cleanup/unsharing of queues */ iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); /* Make sure we have free resources for this STA */ if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap && (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == IWL_MVM_QUEUE_FREE)) queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; else queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (queue < 0) { /* try again - this time kick out a queue if needed */ queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); if (queue < 0) { IWL_ERR(mvm, "No available queues for new station\n"); return -ENOSPC; } } mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; mvmsta->reserved_queue = queue; IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", queue, mvmsta->sta_id); return 0; } /* * In DQA mode, after a HW restart the queues should be allocated as before, in * order to avoid race conditions when there are shared queues. This function * does the re-mapping and queue allocation. * * Note that re-enabling aggregations isn't done in this function. */ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); unsigned int wdg = iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); int i; struct iwl_trans_txq_scd_cfg cfg = { .sta_id = mvm_sta->sta_id, .frame_limit = IWL_FRAME_LIMIT, }; /* Make sure reserved queue is still marked as such (if allocated) */ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) mvm->queue_info[mvm_sta->reserved_queue].status = IWL_MVM_QUEUE_RESERVED; for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; int txq_id = tid_data->txq_id; int ac; if (txq_id == IWL_MVM_INVALID_QUEUE) continue; ac = tid_to_mac80211_ac[i]; if (iwl_mvm_has_new_tx_api(mvm)) { IWL_DEBUG_TX_QUEUES(mvm, "Re-mapping sta %d tid %d\n", mvm_sta->sta_id, i); txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id, i, wdg); /* * on failures, just set it to IWL_MVM_INVALID_QUEUE * to try again later, we have no other good way of * failing here */ if (txq_id < 0) txq_id = IWL_MVM_INVALID_QUEUE; tid_data->txq_id = txq_id; /* * Since we don't set the seq number after reset, and HW * sets it now, FW reset will cause the seq num to start * at 0 again, so driver will need to update it * internally as well, so it keeps in sync with real val */ tid_data->seq_number = 0; } else { u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number); cfg.tid = i; cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE); IWL_DEBUG_TX_QUEUES(mvm, "Re-mapping sta %d tid %d to queue %d\n", mvm_sta->sta_id, i, txq_id); iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg); mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; } } } static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, const u8 *addr, u16 mac_id, u16 color) { struct iwl_mvm_add_sta_cmd cmd; int ret; u32 status = ADD_STA_SUCCESS; lockdep_assert_held(&mvm->mutex); memset(&cmd, 0, sizeof(cmd)); cmd.sta_id = sta->sta_id; if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 && sta->type == IWL_STA_AUX_ACTIVITY) cmd.mac_id_n_color = cpu_to_le32(mac_id); else cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, color)); if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) cmd.station_type = sta->type; if (!iwl_mvm_has_new_tx_api(mvm)) cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(0xffff); if (addr) memcpy(cmd.addr, addr, ETH_ALEN); ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "Internal station added.\n"); return 0; default: ret = -EIO; IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", status); break; } return ret; } int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_rxq_dup_data *dup_data; int i, ret, sta_id; bool sta_update = false; unsigned int sta_flags = 0; lockdep_assert_held(&mvm->mutex); if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) sta_id = iwl_mvm_find_free_sta_id(mvm, ieee80211_vif_type_p2p(vif)); else sta_id = mvm_sta->sta_id; if (sta_id == IWL_MVM_INVALID_STA) return -ENOSPC; spin_lock_init(&mvm_sta->lock); /* if this is a HW restart re-alloc existing queues */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_int_sta tmp_sta = { .sta_id = sta_id, .type = mvm_sta->sta_type, }; /* * First add an empty station since allocating * a queue requires a valid station */ ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, mvmvif->id, mvmvif->color); if (ret) goto err; iwl_mvm_realloc_queues_after_restart(mvm, sta); sta_update = true; sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; goto update_fw; } mvm_sta->sta_id = sta_id; mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); mvm_sta->vif = vif; if (!mvm->trans->trans_cfg->gen2) mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; else mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF; mvm_sta->tx_protection = 0; mvm_sta->tt_tx_protection = false; mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK; /* HW restart, don't assume the memory has been zeroed */ mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ mvm_sta->tfd_queue_msk = 0; /* for HW restart - reset everything but the sequence number */ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { u16 seq = mvm_sta->tid_data[i].seq_number; memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); mvm_sta->tid_data[i].seq_number = seq; /* * Mark all queues for this STA as unallocated and defer TX * frames until the queue is allocated */ mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; } for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(sta->txq[i]); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; INIT_LIST_HEAD(&mvmtxq->list); atomic_set(&mvmtxq->tx_request, 0); } mvm_sta->agg_tids = 0; if (iwl_mvm_has_new_rx_api(mvm) && !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { int q; dup_data = kcalloc(mvm->trans->num_rx_queues, sizeof(*dup_data), GFP_KERNEL); if (!dup_data) return -ENOMEM; /* * Initialize all the last_seq values to 0xffff which can never * compare equal to the frame's seq_ctrl in the check in * iwl_mvm_is_dup() since the lower 4 bits are the fragment * number and fragmented packets don't reach that function. * * This thus allows receiving a packet with seqno 0 and the * retry bit set as the very first packet on a new TID. */ for (q = 0; q < mvm->trans->num_rx_queues; q++) memset(dup_data[q].last_seq, 0xff, sizeof(dup_data[q].last_seq)); mvm_sta->dup_data = dup_data; } if (!iwl_mvm_has_new_tx_api(mvm)) { ret = iwl_mvm_reserve_sta_stream(mvm, sta, ieee80211_vif_type_p2p(vif)); if (ret) goto err; } /* * if rs is registered with mac80211, then "add station" will be handled * via the corresponding ops, otherwise need to notify rate scaling here */ if (iwl_mvm_has_tlc_offload(mvm)) iwl_mvm_rs_add_sta(mvm, mvm_sta); else spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock); iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); update_fw: ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); if (ret) goto err; if (vif->type == NL80211_IFTYPE_STATION) { if (!sta->tdls) { WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA); mvmvif->ap_sta_id = sta_id; } else { WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA); } } rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); return 0; err: return ret; } int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool drain) { struct iwl_mvm_add_sta_cmd cmd = {}; int ret; u32 status; lockdep_assert_held(&mvm->mutex); cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); cmd.sta_id = mvmsta->sta_id; cmd.add_modify = STA_MODE_MODIFY; cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", mvmsta->sta_id); break; default: ret = -EIO; #if defined(__linux__) IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", mvmsta->sta_id); #elif defined(__FreeBSD__) IWL_ERR(mvm, "Couldn't drain frames for staid %d, status %#x\n", mvmsta->sta_id, status); #endif break; } return ret; } /* * Remove a station from the FW table. Before sending the command to remove * the station validate that the station is indeed known to the driver (sanity * only). */ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) { struct ieee80211_sta *sta; struct iwl_mvm_rm_sta_cmd rm_sta_cmd = { .sta_id = sta_id, }; int ret; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); /* Note: internal stations are marked as error values */ if (!sta) { IWL_ERR(mvm, "Invalid station id\n"); return -EINVAL; } ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, sizeof(rm_sta_cmd), &rm_sta_cmd); if (ret) { IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); return ret; } return 0; } static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); int i; lockdep_assert_held(&mvm->mutex); for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) continue; iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i); mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; } for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(sta->txq[i]); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; } } int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta) { int i; for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { u16 txq_id; int ret; spin_lock_bh(&mvm_sta->lock); txq_id = mvm_sta->tid_data[i].txq_id; spin_unlock_bh(&mvm_sta->lock); if (txq_id == IWL_MVM_INVALID_QUEUE) continue; ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id); if (ret) return ret; } return 0; } int iwl_mvm_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); u8 sta_id = mvm_sta->sta_id; int ret; lockdep_assert_held(&mvm->mutex); #if defined(__linux__) if (iwl_mvm_has_new_rx_api(mvm)) kfree(mvm_sta->dup_data); #elif defined(__FreeBSD__) if (iwl_mvm_has_new_rx_api(mvm)) { kfree(mvm_sta->dup_data); mvm_sta->dup_data = NULL; } #endif ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); if (ret) return ret; /* flush its queues here since we are freeing mvm_sta */ ret = iwl_mvm_flush_sta(mvm, mvm_sta, false); if (ret) return ret; if (iwl_mvm_has_new_tx_api(mvm)) { ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); } else { u32 q_mask = mvm_sta->tfd_queue_msk; ret = iwl_trans_wait_tx_queues_empty(mvm->trans, q_mask); } if (ret) return ret; ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); iwl_mvm_disable_sta_queues(mvm, vif, sta); /* If there is a TXQ still marked as reserved - free it */ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { u8 reserved_txq = mvm_sta->reserved_queue; enum iwl_mvm_queue_status *status; /* * If no traffic has gone through the reserved TXQ - it * is still marked as IWL_MVM_QUEUE_RESERVED, and * should be manually marked as free again */ status = &mvm->queue_info[reserved_txq].status; if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && (*status != IWL_MVM_QUEUE_FREE), "sta_id %d reserved txq %d status %d", sta_id, reserved_txq, *status)) return -EINVAL; *status = IWL_MVM_QUEUE_FREE; } if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id == sta_id) { /* if associated - we can't remove the AP STA now */ if (vif->bss_conf.assoc) return ret; /* unassoc - go ahead - remove the AP STA now */ mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; } /* * This shouldn't happen - the TDLS channel switch should be canceled * before the STA is removed. */ if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; cancel_delayed_work(&mvm->tdls_cs.dwork); } /* * Make sure that the tx response code sees the station as -EBUSY and * calls the drain worker. */ spin_lock_bh(&mvm_sta->lock); spin_unlock_bh(&mvm_sta->lock); ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); return ret; } int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 sta_id) { int ret = iwl_mvm_rm_sta_common(mvm, sta_id); lockdep_assert_held(&mvm->mutex); RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); return ret; } int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, u32 qmask, enum nl80211_iftype iftype, enum iwl_sta_type type) { if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || sta->sta_id == IWL_MVM_INVALID_STA) { sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) return -ENOSPC; } sta->tfd_queue_msk = qmask; sta->type = type; /* put a non-NULL value so iterating over the stations won't stop */ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); return 0; } void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) { RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); sta->sta_id = IWL_MVM_INVALID_STA; } static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue, u8 sta_id, u8 fifo) { unsigned int wdg_timeout = mvm->trans->trans_cfg->base_params->wd_timeout; struct iwl_trans_txq_scd_cfg cfg = { .fifo = fifo, .sta_id = sta_id, .tid = IWL_MAX_TID_COUNT, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; WARN_ON(iwl_mvm_has_new_tx_api(mvm)); iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); } static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id) { unsigned int wdg_timeout = mvm->trans->trans_cfg->base_params->wd_timeout; WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT, wdg_timeout); } static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx, int maccolor, u8 *addr, struct iwl_mvm_int_sta *sta, u16 *queue, int fifo) { int ret; /* Map queue to fifo - needs to happen before adding station */ if (!iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo); ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor); if (ret) { if (!iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT); return ret; } /* * For 22000 firmware and on we cannot add queue to a station unknown * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) { int txq; txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id); if (txq < 0) { iwl_mvm_rm_sta_common(mvm, sta->sta_id); return txq; } *queue = txq; } return 0; } int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id) { int ret; lockdep_assert_held(&mvm->mutex); /* Allocate aux station and assign to it the aux queue */ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), NL80211_IFTYPE_UNSPECIFIED, IWL_STA_AUX_ACTIVITY); if (ret) return ret; /* * In CDB NICs we need to specify which lmac to use for aux activity * using the mac_id argument place to send lmac_id to the function */ ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL, &mvm->aux_sta, &mvm->aux_queue, IWL_MVM_TX_FIFO_MCAST); if (ret) { iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); return ret; } return 0; } int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); lockdep_assert_held(&mvm->mutex); return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color, NULL, &mvm->snif_sta, &mvm->snif_queue, IWL_MVM_TX_FIFO_BE); } int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) return -EINVAL; iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT); ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); return ret; } int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) { int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) return -EINVAL; iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT); ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); return ret; } void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) { iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); } /* * Send the add station command for the vif's broadcast station. * Assumes that the station was already allocated. * * @mvm: the mvm component * @vif: the interface to which the broadcast station is added * @bsta: the broadcast station to add. */ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; const u8 *baddr = _baddr; int queue; int ret; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_VO, .sta_id = mvmvif->bcast_sta.sta_id, .tid = IWL_MAX_TID_COUNT, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; lockdep_assert_held(&mvm->mutex); if (!iwl_mvm_has_new_tx_api(mvm)) { if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { queue = mvm->probe_queue; } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { queue = mvm->p2p_dev_queue; } else { WARN(1, "Missing required TXQ for adding bcast STA\n"); return -EINVAL; } bsta->tfd_queue_msk |= BIT(queue); iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); } if (vif->type == NL80211_IFTYPE_ADHOC) baddr = vif->bss_conf.bssid; if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA)) return -ENOSPC; ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, mvmvif->id, mvmvif->color); if (ret) return ret; /* * For 22000 firmware and on we cannot add queue to a station unknown * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) { queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id, IWL_MAX_TID_COUNT, wdg_timeout); if (queue < 0) { iwl_mvm_rm_sta_common(mvm, bsta->sta_id); return queue; } if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) mvm->probe_queue = queue; else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) mvm->p2p_dev_queue = queue; } return 0; } static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 *queueptr, queue; lockdep_assert_held(&mvm->mutex); iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true); switch (vif->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: queueptr = &mvm->probe_queue; break; case NL80211_IFTYPE_P2P_DEVICE: queueptr = &mvm->p2p_dev_queue; break; default: WARN(1, "Can't free bcast queue on vif type %d\n", vif->type); return; } queue = *queueptr; iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT); if (iwl_mvm_has_new_tx_api(mvm)) return; WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue))); mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue); } /* Send the FW a request to remove the station from it's internal data * structures, but DO NOT remove the entry from the local data structures. */ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); iwl_mvm_free_bcast_sta_queues(mvm, vif); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); return ret; } int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); lockdep_assert_held(&mvm->mutex); return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0, ieee80211_vif_type_p2p(vif), IWL_STA_GENERAL_PURPOSE); } /* Allocate a new station entry for the broadcast station to the given vif, * and send it to the FW. * Note that each P2P mac should have its own broadcast station. * * @mvm: the mvm component * @vif: the interface to which the broadcast station is added * @bsta: the broadcast station to add. */ int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; int ret; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_alloc_bcast_sta(mvm, vif); if (ret) return ret; ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) iwl_mvm_dealloc_int_sta(mvm, bsta); return ret; } void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); } /* * Send the FW a request to remove the station from it's internal data * structures, and in addition remove it from the local data structure. */ int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { int ret; lockdep_assert_held(&mvm->mutex); ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); iwl_mvm_dealloc_bcast_sta(mvm, vif); return ret; } /* * Allocate a new station entry for the multicast station to the given vif, * and send it to the FW. * Note that each AP/GO mac should have its own multicast station. * * @mvm: the mvm component * @vif: the interface to which the multicast station is added */ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta; static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; const u8 *maddr = _maddr; struct iwl_trans_txq_scd_cfg cfg = { .fifo = vif->type == NL80211_IFTYPE_AP ? IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE, .sta_id = msta->sta_id, .tid = 0, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, }; unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); int ret; lockdep_assert_held(&mvm->mutex); if (WARN_ON(vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC)) return -ENOTSUPP; /* * In IBSS, ieee80211_check_queues() sets the cab_queue to be * invalid, so make sure we use the queue we want. * Note that this is done here as we want to avoid making DQA * changes in mac80211 layer. */ if (vif->type == NL80211_IFTYPE_ADHOC) mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; /* * While in previous FWs we had to exclude cab queue from TFD queue * mask, now it is needed as any other queue. */ if (!iwl_mvm_has_new_tx_api(mvm) && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, timeout); msta->tfd_queue_msk |= BIT(mvmvif->cab_queue); } ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, mvmvif->id, mvmvif->color); if (ret) goto err; /* * Enable cab queue after the ADD_STA command is sent. * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG * command with unknown station id, and for FW that doesn't support * station API since the cab queue is not included in the * tfd_queue_mask. */ if (iwl_mvm_has_new_tx_api(mvm)) { int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id, 0, timeout); if (queue < 0) { ret = queue; goto err; } mvmvif->cab_queue = queue; } else if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, timeout); return 0; err: iwl_mvm_dealloc_int_sta(mvm, msta); return ret; } static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, struct ieee80211_key_conf *keyconf, bool mcast) { union { struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; struct iwl_mvm_add_sta_key_cmd cmd; } u = {}; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS); __le16 key_flags; int ret, size; u32 status; /* This is a valid situation for GTK removal */ if (sta_id == IWL_MVM_INVALID_STA) return 0; key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK); key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); /* * The fields assigned here are in the same location at the start * of the command, so we can do this union trick. */ u.cmd.common.key_flags = key_flags; u.cmd.common.key_offset = keyconf->hw_key_idx; u.cmd.common.sta_id = sta_id; size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1); status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, &status); switch (status) { case ADD_STA_SUCCESS: IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); break; default: ret = -EIO; IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); break; } return ret; } /* * Send the FW a request to remove the station from it's internal data * structures, and in addition remove it from the local data structure. */ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true); iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); return ret; } static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) { struct iwl_mvm_delba_data notif = { .baid = baid, }; iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true, ¬if, sizeof(notif)); }; static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, struct iwl_mvm_baid_data *data) { int i; iwl_mvm_sync_rxq_del_ba(mvm, data->baid); for (i = 0; i < mvm->trans->num_rx_queues; i++) { int j; struct iwl_mvm_reorder_buffer *reorder_buf = &data->reorder_buf[i]; struct iwl_mvm_reorder_buf_entry *entries = &data->entries[i * data->entries_per_queue]; spin_lock_bh(&reorder_buf->lock); if (likely(!reorder_buf->num_stored)) { spin_unlock_bh(&reorder_buf->lock); continue; } /* * This shouldn't happen in regular DELBA since the internal * delBA notification should trigger a release of all frames in * the reorder buffer. */ WARN_ON(1); for (j = 0; j < reorder_buf->buf_size; j++) __skb_queue_purge(&entries[j].e.frames); /* * Prevent timer re-arm. This prevents a very far fetched case * where we timed out on the notification. There may be prior * RX frames pending in the RX queue before the notification * that might get processed between now and the actual deletion * and we would re-arm the timer although we are deleting the * reorder buffer. */ reorder_buf->removed = true; spin_unlock_bh(&reorder_buf->lock); del_timer_sync(&reorder_buf->reorder_timer); } } static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, struct iwl_mvm_baid_data *data, u16 ssn, u16 buf_size) { int i; for (i = 0; i < mvm->trans->num_rx_queues; i++) { struct iwl_mvm_reorder_buffer *reorder_buf = &data->reorder_buf[i]; struct iwl_mvm_reorder_buf_entry *entries = &data->entries[i * data->entries_per_queue]; int j; reorder_buf->num_stored = 0; reorder_buf->head_sn = ssn; reorder_buf->buf_size = buf_size; /* rx reorder timer */ timer_setup(&reorder_buf->reorder_timer, iwl_mvm_reorder_timer_expired, 0); spin_lock_init(&reorder_buf->lock); reorder_buf->mvm = mvm; reorder_buf->queue = i; reorder_buf->valid = false; for (j = 0; j < reorder_buf->buf_size; j++) __skb_queue_head_init(&entries[j].e.frames); } } static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta, bool start, int tid, u16 ssn, u16 buf_size) { struct iwl_mvm_add_sta_cmd cmd = { .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), .sta_id = mvm_sta->sta_id, .add_modify = STA_MODE_MODIFY, }; u32 status; int ret; if (start) { cmd.add_immediate_ba_tid = tid; cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); cmd.rx_ba_window = cpu_to_le16(buf_size); cmd.modify_mask = STA_MODIFY_ADD_BA_TID; } else { cmd.remove_immediate_ba_tid = tid; cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID; } status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", start ? "start" : "stopp"); if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) && !(status & IWL_ADD_STA_BAID_VALID_MASK))) return -EINVAL; return u32_get_bits(status, IWL_ADD_STA_BAID_MASK); case ADD_STA_IMMEDIATE_BA_FAILURE: IWL_WARN(mvm, "RX BA Session refused by fw\n"); return -ENOSPC; default: IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", start ? "start" : "stopp", status); return -EIO; } } static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta, bool start, int tid, u16 ssn, u16 buf_size, int baid) { struct iwl_rx_baid_cfg_cmd cmd = { .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) : cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE), }; u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD); int ret; BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); if (start) { cmd.alloc.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id)); cmd.alloc.tid = tid; cmd.alloc.ssn = cpu_to_le16(ssn); cmd.alloc.win_size = cpu_to_le16(buf_size); baid = -EIO; } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) { cmd.remove_v1.baid = cpu_to_le32(baid); BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove)); } else { cmd.remove.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id)); cmd.remove.tid = cpu_to_le32(tid); } ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd), &cmd, &baid); if (ret) return ret; if (!start) { /* ignore firmware baid on remove */ baid = 0; } IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", start ? "start" : "stopp"); if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map)) return -EINVAL; return baid; } static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta, bool start, int tid, u16 ssn, u16 buf_size, int baid) { if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start, tid, ssn, buf_size, baid); return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start, tid, ssn, buf_size); } int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u16 ssn, bool start, u16 buf_size, u16 timeout) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_baid_data *baid_data = NULL; int ret, baid; u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID : IWL_MAX_BAID_OLD; lockdep_assert_held(&mvm->mutex); if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) { IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); return -ENOSPC; } if (iwl_mvm_has_new_rx_api(mvm) && start) { u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); /* sparse doesn't like the __align() so don't check */ #ifndef __CHECKER__ /* * The division below will be OK if either the cache line size * can be divided by the entry size (ALIGN will round up) or if * if the entry size can be divided by the cache line size, in * which case the ALIGN() will do nothing. */ BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) && sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES); #endif /* * Upward align the reorder buffer size to fill an entire cache * line for each queue, to avoid sharing cache lines between * different queues. */ reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES); /* * Allocate here so if allocation fails we can bail out early * before starting the BA session in the firmware */ baid_data = kzalloc(sizeof(*baid_data) + mvm->trans->num_rx_queues * reorder_buf_size, GFP_KERNEL); if (!baid_data) return -ENOMEM; /* * This division is why we need the above BUILD_BUG_ON(), * if that doesn't hold then this will not be right. */ baid_data->entries_per_queue = reorder_buf_size / sizeof(baid_data->entries[0]); } if (iwl_mvm_has_new_rx_api(mvm) && !start) { baid = mvm_sta->tid_to_baid[tid]; } else { /* we don't really need it in this case */ baid = -1; } /* Don't send command to remove (start=0) BAID during restart */ if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size, baid); if (baid < 0) { ret = baid; goto out_free; } if (start) { mvm->rx_ba_sessions++; if (!iwl_mvm_has_new_rx_api(mvm)) return 0; baid_data->baid = baid; baid_data->timeout = timeout; baid_data->last_rx = jiffies; baid_data->rcu_ptr = &mvm->baid_map[baid]; timer_setup(&baid_data->session_timer, iwl_mvm_rx_agg_session_expired, 0); baid_data->mvm = mvm; baid_data->tid = tid; baid_data->sta_id = mvm_sta->sta_id; mvm_sta->tid_to_baid[tid] = baid; if (timeout) mod_timer(&baid_data->session_timer, TU_TO_EXP_TIME(timeout * 2)); iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size); /* * protect the BA data with RCU to cover a case where our * internal RX sync mechanism will timeout (not that it's * supposed to happen) and we will free the session data while * RX is being processed in parallel */ IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", mvm_sta->sta_id, tid, baid); WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); rcu_assign_pointer(mvm->baid_map[baid], baid_data); } else { baid = mvm_sta->tid_to_baid[tid]; if (mvm->rx_ba_sessions > 0) /* check that restart flow didn't zero the counter */ mvm->rx_ba_sessions--; if (!iwl_mvm_has_new_rx_api(mvm)) return 0; if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) return -EINVAL; baid_data = rcu_access_pointer(mvm->baid_map[baid]); if (WARN_ON(!baid_data)) return -EINVAL; /* synchronize all rx queues so we can safely delete */ iwl_mvm_free_reorder(mvm, baid_data); del_timer_sync(&baid_data->session_timer); RCU_INIT_POINTER(mvm->baid_map[baid], NULL); kfree_rcu(baid_data, rcu_head); IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); /* * After we've deleted it, do another queue sync * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently * running it won't find a new session in the old * BAID. It can find the NULL pointer for the BAID, * but we must not have it find a different session. */ iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0); } return 0; out_free: kfree(baid_data); return ret; } int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u8 queue, bool start) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = {}; int ret; u32 status; lockdep_assert_held(&mvm->mutex); if (start) { mvm_sta->tfd_queue_msk |= BIT(queue); mvm_sta->tid_disable_agg &= ~BIT(tid); } else { /* In DQA-mode the queue isn't removed on agg termination */ mvm_sta->tid_disable_agg |= BIT(tid); } cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); cmd.sta_id = mvm_sta->sta_id; cmd.add_modify = STA_MODE_MODIFY; if (!iwl_mvm_has_new_tx_api(mvm)) cmd.modify_mask = STA_MODIFY_QUEUES; cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); status = ADD_STA_SUCCESS; ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: break; default: ret = -EIO; IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", start ? "start" : "stopp", status); break; } return ret; } const u8 tid_to_mac80211_ac[] = { IEEE80211_AC_BE, IEEE80211_AC_BK, IEEE80211_AC_BK, IEEE80211_AC_BE, IEEE80211_AC_VI, IEEE80211_AC_VI, IEEE80211_AC_VO, IEEE80211_AC_VO, IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ }; static const u8 tid_to_ucode_ac[] = { AC_BE, AC_BK, AC_BK, AC_BE, AC_VI, AC_VI, AC_VO, AC_VO, }; int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data; u16 normalized_ssn; u16 txq_id; int ret; if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) return -EINVAL; if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED && mvmsta->tid_data[tid].state != IWL_AGG_OFF) { IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n", mvmsta->tid_data[tid].state); return -ENXIO; } lockdep_assert_held(&mvm->mutex); if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE && iwl_mvm_has_new_tx_api(mvm)) { u8 ac = tid_to_mac80211_ac[tid]; ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); if (ret) return ret; } spin_lock_bh(&mvmsta->lock); /* * Note the possible cases: * 1. An enabled TXQ - TXQ needs to become agg'ed * 2. The TXQ hasn't yet been enabled, so find a free one and mark * it as reserved */ txq_id = mvmsta->tid_data[tid].txq_id; if (txq_id == IWL_MVM_INVALID_QUEUE) { ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (ret < 0) { IWL_ERR(mvm, "Failed to allocate agg queue\n"); goto out; } txq_id = ret; /* TXQ hasn't yet been enabled, so mark it only as reserved */ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) { ret = -ENXIO; IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n", tid, IWL_MAX_HW_QUEUES - 1); goto out; } else if (unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) { ret = -ENXIO; IWL_DEBUG_TX_QUEUES(mvm, "Can't start tid %d agg on shared queue!\n", tid); goto out; } IWL_DEBUG_TX_QUEUES(mvm, "AGG for tid %d will be on queue #%d\n", tid, txq_id); tid_data = &mvmsta->tid_data[tid]; tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); tid_data->txq_id = txq_id; *ssn = tid_data->ssn; IWL_DEBUG_TX_QUEUES(mvm, "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", mvmsta->sta_id, tid, txq_id, tid_data->ssn, tid_data->next_reclaimed); /* * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ normalized_ssn = tid_data->ssn; if (mvm->trans->trans_cfg->gen2) normalized_ssn &= 0xff; if (normalized_ssn == tid_data->next_reclaimed) { tid_data->state = IWL_AGG_STARTING; ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; } else { tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA; } out: spin_unlock_bh(&mvmsta->lock); return ret; } int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 buf_size, bool amsdu) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); int queue, ret; bool alloc_queue = true; enum iwl_mvm_queue_status queue_status; u16 ssn; struct iwl_trans_txq_scd_cfg cfg = { .sta_id = mvmsta->sta_id, .tid = tid, .frame_limit = buf_size, .aggregate = true, }; /* * When FW supports TLC_OFFLOAD, it also implements Tx aggregation * manager, so this function should never be called in this case. */ if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm))) return -EINVAL; BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) != IWL_MAX_TID_COUNT); spin_lock_bh(&mvmsta->lock); ssn = tid_data->ssn; queue = tid_data->txq_id; tid_data->state = IWL_AGG_ON; mvmsta->agg_tids |= BIT(tid); tid_data->ssn = 0xffff; tid_data->amsdu_in_ampdu_allowed = amsdu; spin_unlock_bh(&mvmsta->lock); if (iwl_mvm_has_new_tx_api(mvm)) { /* * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start() * would have failed, so if we are here there is no need to * allocate a queue. * However, if aggregation size is different than the default * size, the scheduler should be reconfigured. * We cannot do this with the new TX API, so return unsupported * for now, until it will be offloaded to firmware.. * Note that if SCD default value changes - this condition * should be updated as well. */ if (buf_size < IWL_FRAME_LIMIT) return -ENOTSUPP; ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) return -EIO; goto out; } cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; queue_status = mvm->queue_info[queue].status; /* Maybe there is no need to even alloc a queue... */ if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) alloc_queue = false; /* * Only reconfig the SCD for the queue if the window size has * changed from current (become smaller) */ if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) { /* * If reconfiguring an existing queue, it first must be * drained */ ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); if (ret) { IWL_ERR(mvm, "Error draining queue before reconfig\n"); return ret; } ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, mvmsta->sta_id, tid, buf_size, ssn); if (ret) { IWL_ERR(mvm, "Error reconfiguring TXQ #%d\n", queue); return ret; } } if (alloc_queue) iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); /* Send ADD_STA command to enable aggs only if the queue isn't shared */ if (queue_status != IWL_MVM_QUEUE_SHARED) { ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) return -EIO; } /* No need to mark as reserved */ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; out: /* * Even though in theory the peer could have different * aggregation reorder buffer sizes for different sessions, * our ucode doesn't allow for that and has a global limit * for each station. Therefore, use the minimum of all the * aggregation sessions and our default value. */ mvmsta->max_agg_bufsize = min(mvmsta->max_agg_bufsize, buf_size); mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", sta->addr, tid); return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq); } static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, struct iwl_mvm_tid_data *tid_data) { u16 txq_id = tid_data->txq_id; lockdep_assert_held(&mvm->mutex); if (iwl_mvm_has_new_tx_api(mvm)) return; /* * The TXQ is marked as reserved only if no traffic came through yet * This means no traffic has been sent on this TID (agg'd or not), so * we no longer have use for the queue. Since it hasn't even been * allocated through iwl_mvm_enable_txq, so we can just mark it back as * free. */ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) { mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; tid_data->txq_id = IWL_MVM_INVALID_QUEUE; } } int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; u16 txq_id; int err; /* * If mac80211 is cleaning its state, then say that we finished since * our state has been cleared anyway. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); return 0; } spin_lock_bh(&mvmsta->lock); txq_id = tid_data->txq_id; IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", mvmsta->sta_id, tid, txq_id, tid_data->state); mvmsta->agg_tids &= ~BIT(tid); iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); switch (tid_data->state) { case IWL_AGG_ON: tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); IWL_DEBUG_TX_QUEUES(mvm, "ssn = %d, next_recl = %d\n", tid_data->ssn, tid_data->next_reclaimed); tid_data->ssn = 0xffff; tid_data->state = IWL_AGG_OFF; spin_unlock_bh(&mvmsta->lock); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); return 0; case IWL_AGG_STARTING: case IWL_EMPTYING_HW_QUEUE_ADDBA: /* * The agg session has been stopped before it was set up. This * can happen when the AddBA timer times out for example. */ /* No barriers since we are under mutex */ lockdep_assert_held(&mvm->mutex); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); tid_data->state = IWL_AGG_OFF; err = 0; break; default: IWL_ERR(mvm, "Stopping AGG while state not ON or starting for %d on %d (%d)\n", mvmsta->sta_id, tid, tid_data->state); IWL_ERR(mvm, "\ttid_data->txq_id = %d\n", tid_data->txq_id); err = -EINVAL; } spin_unlock_bh(&mvmsta->lock); return err; } int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; u16 txq_id; enum iwl_mvm_agg_state old_state; /* * First set the agg state to OFF to avoid calling * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty. */ spin_lock_bh(&mvmsta->lock); txq_id = tid_data->txq_id; IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", mvmsta->sta_id, tid, txq_id, tid_data->state); old_state = tid_data->state; tid_data->state = IWL_AGG_OFF; mvmsta->agg_tids &= ~BIT(tid); spin_unlock_bh(&mvmsta->lock); iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); if (old_state >= IWL_AGG_ON) { iwl_mvm_drain_sta(mvm, mvmsta, true); if (iwl_mvm_has_new_tx_api(mvm)) { if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id, BIT(tid))) IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); iwl_trans_wait_txq_empty(mvm->trans, txq_id); } else { if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id))) IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id)); } iwl_mvm_drain_sta(mvm, mvmsta, false); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); } return 0; } static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) { int i, max = -1, max_offs = -1; lockdep_assert_held(&mvm->mutex); /* Pick the unused key offset with the highest 'deleted' * counter. Every time a key is deleted, all the counters * are incremented and the one that was just deleted is * reset to zero. Thus, the highest counter is the one * that was deleted longest ago. Pick that one. */ for (i = 0; i < STA_KEY_MAX_NUM; i++) { if (test_bit(i, mvm->fw_key_table)) continue; if (mvm->fw_key_deleted[i] > max) { max = mvm->fw_key_deleted[i]; max_offs = i; } } if (max_offs < 0) return STA_KEY_IDX_INVALID; return max_offs; } static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (sta) return iwl_mvm_sta_from_mac80211(sta); /* * The device expects GTKs for station interfaces to be * installed as GTKs for the AP station. If we have no * station ID, then use AP's station ID. */ if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { u8 sta_id = mvmvif->ap_sta_id; sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); /* * It is possible that the 'sta' parameter is NULL, * for example when a GTK is removed - the sta_id will then * be the AP ID, and no station was passed by mac80211. */ if (IS_ERR_OR_NULL(sta)) return NULL; return iwl_mvm_sta_from_mac80211(sta); } return NULL; } static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len) { int i; for (i = len - 1; i >= 0; i--) { if (pn1[i] > pn2[i]) return 1; if (pn1[i] < pn2[i]) return -1; } return 0; } static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, u32 sta_id, struct ieee80211_key_conf *key, bool mcast, u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, u8 key_offset, bool mfp) { union { struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; struct iwl_mvm_add_sta_key_cmd cmd; } u = {}; __le16 key_flags; int ret; u32 status; u16 keyidx; u64 pn = 0; int i, size; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS); int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY, new_api ? 2 : 1); if (sta_id == IWL_MVM_INVALID_STA) return -EINVAL; keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & STA_KEY_FLG_KEYID_MSK; key_flags = cpu_to_le16(keyidx); key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); if (api_ver >= 2) { memcpy((void *)&u.cmd.tx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], IWL_MIC_KEY_SIZE); memcpy((void *)&u.cmd.rx_mic_key, &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], IWL_MIC_KEY_SIZE); pn = atomic64_read(&key->tx_pn); } else { u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32; for (i = 0; i < 5; i++) u.cmd_v1.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); } memcpy(u.cmd.common.key, key->key, key->keylen); break; case WLAN_CIPHER_SUITE_CCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); memcpy(u.cmd.common.key, key->key, key->keylen); if (api_ver >= 2) pn = atomic64_read(&key->tx_pn); break; case WLAN_CIPHER_SUITE_WEP104: key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); fallthrough; case WLAN_CIPHER_SUITE_WEP40: key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); memcpy(u.cmd.common.key + 3, key->key, key->keylen); break; case WLAN_CIPHER_SUITE_GCMP_256: key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); fallthrough; case WLAN_CIPHER_SUITE_GCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); memcpy(u.cmd.common.key, key->key, key->keylen); if (api_ver >= 2) pn = atomic64_read(&key->tx_pn); break; default: key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); memcpy(u.cmd.common.key, key->key, key->keylen); } if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); if (mfp) key_flags |= cpu_to_le16(STA_KEY_MFP); u.cmd.common.key_offset = key_offset; u.cmd.common.key_flags = key_flags; u.cmd.common.sta_id = sta_id; if (key->cipher == WLAN_CIPHER_SUITE_TKIP) i = 0; else i = -1; for (; i < IEEE80211_NUM_TIDS; i++) { struct ieee80211_key_seq seq = {}; u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn; int rx_pn_len = 8; /* there's a hole at 2/3 in FW format depending on version */ int hole = api_ver >= 3 ? 0 : 2; ieee80211_get_key_rx_seq(key, i, &seq); if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { rx_pn[0] = seq.tkip.iv16; rx_pn[1] = seq.tkip.iv16 >> 8; rx_pn[2 + hole] = seq.tkip.iv32; rx_pn[3 + hole] = seq.tkip.iv32 >> 8; rx_pn[4 + hole] = seq.tkip.iv32 >> 16; rx_pn[5 + hole] = seq.tkip.iv32 >> 24; } else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) { rx_pn = seq.hw.seq; rx_pn_len = seq.hw.seq_len; } else { rx_pn[0] = seq.ccmp.pn[0]; rx_pn[1] = seq.ccmp.pn[1]; rx_pn[2 + hole] = seq.ccmp.pn[2]; rx_pn[3 + hole] = seq.ccmp.pn[3]; rx_pn[4 + hole] = seq.ccmp.pn[4]; rx_pn[5 + hole] = seq.ccmp.pn[5]; } if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt, rx_pn_len) > 0) memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn, rx_pn_len); } if (api_ver >= 2) { u.cmd.transmit_seq_cnt = cpu_to_le64(pn); size = sizeof(u.cmd); } else { size = sizeof(u.cmd_v1); } status = ADD_STA_SUCCESS; if (cmd_flags & CMD_ASYNC) ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size, &u.cmd); else ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, &status); switch (status) { case ADD_STA_SUCCESS: IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); break; default: ret = -EIO; IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); break; } return ret; } static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, struct ieee80211_key_conf *keyconf, u8 sta_id, bool remove_key) { struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; /* verify the key details match the required command's expectations */ if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || (keyconf->keyidx != 4 && keyconf->keyidx != 5 && keyconf->keyidx != 6 && keyconf->keyidx != 7) || (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC && keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 && keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256))) return -EINVAL; if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) && keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC)) return -EINVAL; igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); igtk_cmd.sta_id = cpu_to_le32(sta_id); if (remove_key) { /* This is a valid situation for IGTK */ if (sta_id == IWL_MVM_INVALID_STA) return 0; igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); } else { struct ieee80211_key_seq seq; const u8 *pn; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_AES_CMAC: igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP); break; default: return -EINVAL; } memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen); if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_KEY_32BYTES); ieee80211_get_key_rx_seq(keyconf, 0, &seq); pn = seq.aes_cmac.pn; igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | ((u64) pn[4] << 8) | ((u64) pn[3] << 16) | ((u64) pn[2] << 24) | ((u64) pn[1] << 32) | ((u64) pn[0] << 40)); } IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n", remove_key ? "removing" : "installing", keyconf->keyidx >= 6 ? "B" : "", keyconf->keyidx, igtk_cmd.sta_id); if (!iwl_mvm_has_new_rx_api(mvm)) { struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = { .ctrl_flags = igtk_cmd.ctrl_flags, .key_id = igtk_cmd.key_id, .sta_id = igtk_cmd.sta_id, .receive_seq_cnt = igtk_cmd.receive_seq_cnt }; memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk, ARRAY_SIZE(igtk_cmd_v1.igtk)); return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, sizeof(igtk_cmd_v1), &igtk_cmd_v1); } return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, sizeof(igtk_cmd), &igtk_cmd); } static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (sta) return sta->addr; if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { u8 sta_id = mvmvif->ap_sta_id; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); return sta->addr; } return NULL; } static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf, u8 key_offset, bool mcast) { const u8 *addr; struct ieee80211_key_seq seq; u16 p1k[5]; u32 sta_id; bool mfp = false; if (sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); sta_id = mvm_sta->sta_id; mfp = sta->mfp; } else if (vif->type == NL80211_IFTYPE_AP && !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); sta_id = mvmvif->mcast_sta.sta_id; } else { IWL_ERR(mvm, "Failed to find station id\n"); return -EINVAL; } if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) { addr = iwl_mvm_get_mac_addr(mvm, vif, sta); /* get phase 1 key from mac80211 */ ieee80211_get_key_rx_seq(keyconf, 0, &seq); ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, seq.tkip.iv32, p1k, 0, key_offset, mfp); } return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 0, NULL, 0, key_offset, mfp); } int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf, u8 key_offset) { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); struct iwl_mvm_sta *mvm_sta; u8 sta_id = IWL_MVM_INVALID_STA; int ret; static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; lockdep_assert_held(&mvm->mutex); if (vif->type != NL80211_IFTYPE_AP || keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) { /* Get the station id from the mvm local station table */ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); if (!mvm_sta) { IWL_ERR(mvm, "Failed to find station\n"); return -EINVAL; } sta_id = mvm_sta->sta_id; /* * It is possible that the 'sta' parameter is NULL, and thus * there is a need to retrieve the sta from the local station * table. */ if (!sta) { sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(sta)) { IWL_ERR(mvm, "Invalid station id\n"); return -EINVAL; } } if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) return -EINVAL; } else { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); sta_id = mvmvif->mcast_sta.sta_id; } if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); goto end; } /* If the key_offset is not pre-assigned, we need to find a * new offset to use. In normal cases, the offset is not * pre-assigned, but during HW_RESTART we want to reuse the * same indices, so we pass them when this function is called. * * In D3 entry, we need to hardcoded the indices (because the * firmware hardcodes the PTK offset to 0). In this case, we * need to make sure we don't overwrite the hw_key_idx in the * keyconf structure, because otherwise we cannot configure * the original ones back when resuming. */ if (key_offset == STA_KEY_IDX_INVALID) { key_offset = iwl_mvm_set_fw_key_idx(mvm); if (key_offset == STA_KEY_IDX_INVALID) return -ENOSPC; keyconf->hw_key_idx = key_offset; } ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); if (ret) goto end; /* * For WEP, the same key is used for multicast and unicast. Upload it * again, using the same key offset, and now pointing the other one * to the same key slot (offset). * If this fails, remove the original as well. */ if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) && sta) { ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, !mcast); if (ret) { __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); goto end; } } __set_bit(key_offset, mvm->fw_key_table); end: IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta ? sta->addr : zero_addr, ret); return ret; } int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf) { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); struct iwl_mvm_sta *mvm_sta; u8 sta_id = IWL_MVM_INVALID_STA; int ret, i; lockdep_assert_held(&mvm->mutex); /* Get the station from the mvm local station table */ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); if (mvm_sta) sta_id = mvm_sta->sta_id; else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast) sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id; IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id); if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { IWL_ERR(mvm, "offset %d not used in fw key table.\n", keyconf->hw_key_idx); return -ENOENT; } /* track which key was deleted last */ for (i = 0; i < STA_KEY_MAX_NUM; i++) { if (mvm->fw_key_deleted[i] < U8_MAX) mvm->fw_key_deleted[i]++; } mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; if (sta && !mvm_sta) { IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); return 0; } ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); if (ret) return ret; /* delete WEP key twice to get rid of (now useless) offset */ if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); return ret; } void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) { struct iwl_mvm_sta *mvm_sta; bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); bool mfp = sta ? sta->mfp : false; rcu_read_lock(); mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); if (WARN_ON_ONCE(!mvm_sta)) goto unlock; iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast, iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx, mfp); unlock: rcu_read_unlock(); } void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, .sta_id = mvmsta->sta_id, .station_flags_msk = cpu_to_le32(STA_FLG_PS), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), }; int ret; ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum ieee80211_frame_release_type reason, u16 cnt, u16 tids, bool more_data, bool single_sta_queue) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, .sta_id = mvmsta->sta_id, .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, .sleep_tx_count = cpu_to_le16(cnt), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), }; int tid, ret; unsigned long _tids = tids; /* convert TIDs to ACs - we don't support TSPEC so that's OK * Note that this field is reserved and unused by firmware not * supporting GO uAPSD, so it's safe to always do this. */ for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); /* If we're releasing frames from aggregation or dqa queues then check * if all the queues that we're releasing frames from, combined, have: * - more frames than the service period, in which case more_data * needs to be set * - fewer than 'cnt' frames, in which case we need to adjust the * firmware command (but do that unconditionally) */ if (single_sta_queue) { int remaining = cnt; int sleep_tx_count; spin_lock_bh(&mvmsta->lock); for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) { struct iwl_mvm_tid_data *tid_data; u16 n_queued; tid_data = &mvmsta->tid_data[tid]; n_queued = iwl_mvm_tid_queued(mvm, tid_data); if (n_queued > remaining) { more_data = true; remaining = 0; break; } remaining -= n_queued; } sleep_tx_count = cnt - remaining; if (reason == IEEE80211_FRAME_RELEASE_UAPSD) mvmsta->sleep_tx_count = sleep_tx_count; spin_unlock_bh(&mvmsta->lock); cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count); if (WARN_ON(cnt - remaining == 0)) { ieee80211_sta_eosp(sta); return; } } /* Note: this is ignored by firmware not supporting GO uAPSD */ if (more_data) cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA; if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { mvmsta->next_status_eosp = true; cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL; } else { cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD; } /* block the Tx queues until the FW updated the sleep Tx count */ iwl_trans_block_txq_ptrs(mvm->trans, true); ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; struct ieee80211_sta *sta; u32 sta_id = le32_to_cpu(notif->sta_id); if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (!IS_ERR_OR_NULL(sta)) ieee80211_sta_eosp(sta); rcu_read_unlock(); } void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool disable) { struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, .sta_id = mvmsta->sta_id, .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), }; int ret; ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool disable) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); spin_lock_bh(&mvm_sta->lock); if (mvm_sta->disable_tx == disable) { spin_unlock_bh(&mvm_sta->lock); return; } mvm_sta->disable_tx = disable; /* * If sta PS state is handled by mac80211, tell it to start/stop * queuing tx for this station. */ if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS)) ieee80211_sta_block_awake(mvm->hw, sta, disable); iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); spin_unlock_bh(&mvm_sta->lock); } static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, struct iwl_mvm_int_sta *sta, bool disable) { u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, .sta_id = sta->sta_id, .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), .mac_id_n_color = cpu_to_le32(id), }; int ret; ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, bool disable) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvm_sta; int i; rcu_read_lock(); /* Block/unblock all the stations of the given mvmvif */ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { sta = rcu_dereference(mvm->fw_id_to_mac_id[i]); if (IS_ERR_OR_NULL(sta)) continue; mvm_sta = iwl_mvm_sta_from_mac80211(sta); if (mvm_sta->mac_id_n_color != FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) continue; iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); } rcu_read_unlock(); if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) return; /* Need to block/unblock also multicast station */ if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA) iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, &mvmvif->mcast_sta, disable); /* * Only unblock the broadcast station (FW blocks it for immediate * quiet, not the driver) */ if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA) iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, &mvmvif->bcast_sta, disable); } void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta; rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); if (mvmsta) iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); rcu_read_unlock(); } u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data) { u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); /* * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ if (mvm->trans->trans_cfg->gen2) sn &= 0xff; return ieee80211_sn_sub(sn, tid_data->next_reclaimed); } int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, u8 *key, u32 key_len) { int ret; u16 queue; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_key_conf *keyconf; ret = iwl_mvm_allocate_int_sta(mvm, sta, 0, NL80211_IFTYPE_UNSPECIFIED, IWL_STA_LINK); if (ret) return ret; ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color, addr, sta, &queue, IWL_MVM_TX_FIFO_BE); if (ret) goto out; keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL); if (!keyconf) { ret = -ENOBUFS; goto out; } keyconf->cipher = cipher; memcpy(keyconf->key, key, key_len); keyconf->keylen = key_len; ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false, 0, NULL, 0, 0, true); kfree(keyconf); return 0; out: iwl_mvm_dealloc_int_sta(mvm, sta); return ret; } void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 mac_id) { struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = { .mac_id = cpu_to_le32(mac_id), }; int ret; ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD), CMD_ASYNC, sizeof(cancel_channel_switch_cmd), &cancel_channel_switch_cmd); if (ret) IWL_ERR(mvm, "Failed to cancel the channel switch\n"); } diff --git a/sys/contrib/dev/iwlwifi/mvm/tx.c b/sys/contrib/dev/iwlwifi/mvm/tx.c index 7763037b93ed..8125bb76f59e 100644 --- a/sys/contrib/dev/iwlwifi/mvm/tx.c +++ b/sys/contrib/dev/iwlwifi/mvm/tx.c @@ -1,2225 +1,2225 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include #include #include #include #include "iwl-trans.h" #include "iwl-eeprom-parse.h" #include "mvm.h" #include "sta.h" static void iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, u16 tid, u16 ssn) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_ba *ba_trig; trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA); if (!trig) return; ba_trig = (void *)trig->data; if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) return; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "BAR sent to %pM, tid %d, ssn %d", addr, tid, ssn); } #define OPT_HDR(type, skb, off) \ (type *)(skb_network_header(skb) + (off)) static u16 iwl_mvm_tx_csum_pre_bz(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, bool amsdu) { struct ieee80211_hdr *hdr = (void *)skb->data; u16 offload_assist = 0; #if IS_ENABLED(CONFIG_INET) u16 mh_len = ieee80211_hdrlen(hdr->frame_control); u8 protocol = 0; /* Do not compute checksum if already computed */ if (skb->ip_summed != CHECKSUM_PARTIAL) goto out; /* We do not expect to be requested to csum stuff we do not support */ if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) || (skb->protocol != htons(ETH_P_IP) && skb->protocol != htons(ETH_P_IPV6)), "No support for requested checksum\n")) { skb_checksum_help(skb); goto out; } if (skb->protocol == htons(ETH_P_IP)) { protocol = ip_hdr(skb)->protocol; } else { #if IS_ENABLED(CONFIG_IPV6) struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_network_header(skb); unsigned int off = sizeof(*ipv6h); protocol = ipv6h->nexthdr; while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) { struct ipv6_opt_hdr *hp; /* only supported extension headers */ if (protocol != NEXTHDR_ROUTING && protocol != NEXTHDR_HOP && protocol != NEXTHDR_DEST) { skb_checksum_help(skb); goto out; } hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); protocol = hp->nexthdr; off += ipv6_optlen(hp); } /* if we get here - protocol now should be TCP/UDP */ #endif } if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) { WARN_ON_ONCE(1); skb_checksum_help(skb); goto out; } /* enable L4 csum */ offload_assist |= BIT(TX_CMD_OFFLD_L4_EN); /* * Set offset to IP header (snap). * We don't support tunneling so no need to take care of inner header. * Size is in words. */ offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR); /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */ if (skb->protocol == htons(ETH_P_IP) && amsdu) { ip_hdr(skb)->check = 0; offload_assist |= BIT(TX_CMD_OFFLD_L3_EN); } /* reset UDP/TCP header csum */ if (protocol == IPPROTO_TCP) tcp_hdr(skb)->check = 0; else udp_hdr(skb)->check = 0; /* * mac header len should include IV, size is in words unless * the IV is added by the firmware like in WEP. * In new Tx API, the IV is always added by the firmware. */ if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key && info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 && info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104) mh_len += info->control.hw_key->iv_len; mh_len /= 2; offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; out: #endif if (amsdu) offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); else if (ieee80211_hdrlen(hdr->frame_control) % 4) /* padding is inserted later in transport */ offload_assist |= BIT(TX_CMD_OFFLD_PAD); return offload_assist; } u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu) { struct ieee80211_hdr *hdr = (void *)skb->data; u32 offload_assist = IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM; unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); unsigned int csum_start = skb_checksum_start_offset(skb); offload_assist |= u32_encode_bits(hdrlen / 2, IWL_TX_CMD_OFFLD_BZ_MH_LEN); if (amsdu) offload_assist |= IWL_TX_CMD_OFFLD_BZ_AMSDU; else if (hdrlen % 4) /* padding is inserted later in transport */ offload_assist |= IWL_TX_CMD_OFFLD_BZ_MH_PAD; if (skb->ip_summed != CHECKSUM_PARTIAL) return offload_assist; offload_assist |= IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM | IWL_TX_CMD_OFFLD_BZ_ZERO2ONES; /* * mac80211 will always calculate checksum in software for * non-fast-xmit, and so we can only do offloaded checksum * for fast-xmit frames. In this case, we always have the * RFC 1042 header present. skb_checksum_start_offset() * returns the offset from the beginning, but the hardware * needs it from after the header & SNAP header. */ csum_start -= hdrlen + 8; offload_assist |= u32_encode_bits(csum_start, IWL_TX_CMD_OFFLD_BZ_START_OFFS); offload_assist |= u32_encode_bits(csum_start + skb->csum_offset, IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS); return offload_assist; } static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, bool amsdu) { if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) return iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu); return iwl_mvm_tx_csum_bz(mvm, skb, amsdu); } /* * Sets most of the Tx cmd's fields */ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, u8 sta_id) { struct ieee80211_hdr *hdr = (void *)skb->data; __le16 fc = hdr->frame_control; u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); u32 len = skb->len + FCS_LEN; bool amsdu = false; u8 ac; if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) || (ieee80211_is_probe_resp(fc) && !is_multicast_ether_addr(hdr->addr1))) tx_flags |= TX_CMD_FLG_ACK; else tx_flags &= ~TX_CMD_FLG_ACK; if (ieee80211_is_probe_resp(fc)) tx_flags |= TX_CMD_FLG_TSF; if (ieee80211_has_morefrags(fc)) tx_flags |= TX_CMD_FLG_MORE_FRAG; if (ieee80211_is_data_qos(fc)) { u8 *qc = ieee80211_get_qos_ctl(hdr); tx_cmd->tid_tspec = qc[0] & 0xf; tx_flags &= ~TX_CMD_FLG_SEQ_CTL; amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT; } else if (ieee80211_is_back_req(fc)) { struct ieee80211_bar *bar = (void *)skb->data; u16 control = le16_to_cpu(bar->control); u16 ssn = le16_to_cpu(bar->start_seq_num); tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; tx_cmd->tid_tspec = (control & IEEE80211_BAR_CTRL_TID_INFO_MASK) >> IEEE80211_BAR_CTRL_TID_INFO_SHIFT; WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec, ssn); } else { if (ieee80211_is_data(fc)) tx_cmd->tid_tspec = IWL_TID_NON_QOS; else tx_cmd->tid_tspec = IWL_MAX_TID_COUNT; if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) tx_flags |= TX_CMD_FLG_SEQ_CTL; else tx_flags &= ~TX_CMD_FLG_SEQ_CTL; } /* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */ if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; else ac = tid_to_mac80211_ac[0]; tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << TX_CMD_FLG_BT_PRIO_POS; if (ieee80211_is_mgmt(fc)) { if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC); else if (ieee80211_is_action(fc)) tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); else tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); /* The spec allows Action frames in A-MPDU, we don't support * it */ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); } else { tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); } if (ieee80211_is_data(fc) && len > mvm->rts_threshold && !is_multicast_ether_addr(hdr->addr1)) tx_flags |= TX_CMD_FLG_PROT_REQUIRE; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) && ieee80211_action_contains_tpc(skb)) tx_flags |= TX_CMD_FLG_WRITE_TX_POWER; tx_cmd->tx_flags = cpu_to_le32(tx_flags); /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */ tx_cmd->len = cpu_to_le16((u16)skb->len); tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_cmd->sta_id = sta_id; tx_cmd->offload_assist = cpu_to_le16(iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu)); } static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { if (info->band == NL80211_BAND_2GHZ && !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; if (sta && ieee80211_is_data(fc)) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS; } return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; } static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { int rate_idx = -1; u8 rate_plcp; u32 rate_flags = 0; bool is_cck; /* info->control is only relevant for non HW rate control */ if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) { /* HT rate doesn't make sense for a non data frame */ WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS && !ieee80211_is_data(fc), "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n", info->control.rates[0].flags, info->control.rates[0].idx, le16_to_cpu(fc), sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1); rate_idx = info->control.rates[0].idx; } /* if the rate isn't a well known legacy rate, take the lowest one */ if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) rate_idx = rate_lowest_index( &mvm->nvm_data->bands[info->band], sta); /* * For non 2 GHZ band, remap mac80211 rate * indices into driver indices */ if (info->band != NL80211_BAND_2GHZ) rate_idx += IWL_FIRST_OFDM_RATE; /* For 2.4 GHZ band, check that there is no need to remap */ BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); /* Get PLCP rate for tx_cmd->rate_n_flags */ rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx); is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE); /* Set CCK or OFDM flag */ if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) { if (!is_cck) rate_flags |= RATE_MCS_LEGACY_OFDM_MSK; else rate_flags |= RATE_MCS_CCK_MSK; } else if (is_cck) { rate_flags |= RATE_MCS_CCK_MSK_V1; } return (u32)rate_plcp | rate_flags; } static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { return iwl_mvm_get_tx_rate(mvm, info, sta, fc) | iwl_mvm_get_tx_ant(mvm, info, sta, fc); } /* * Sets the fields in the Tx cmd that are rate related */ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) { /* Set retry limit on RTS packets */ tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT; /* Set retry limit on DATA packets and Probe Responses*/ if (ieee80211_is_probe_resp(fc)) { tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT; tx_cmd->rts_retry_limit = min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit); } else if (ieee80211_is_back_req(fc)) { tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT; } else { tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY; } /* * for data packets, rate info comes from the table inside the fw. This * table is controlled by LINK_QUALITY commands */ if (ieee80211_is_data(fc) && sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); return; } } else if (ieee80211_is_back_req(fc)) { tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); } /* Set the rate in the TX cmd */ tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc)); } static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, u8 *crypto_hdr) { struct ieee80211_key_conf *keyconf = info->control.hw_key; u64 pn; pn = atomic64_inc_return(&keyconf->tx_pn); crypto_hdr[0] = pn; crypto_hdr[2] = 0; crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6); crypto_hdr[1] = pn >> 8; crypto_hdr[4] = pn >> 16; crypto_hdr[5] = pn >> 24; crypto_hdr[6] = pn >> 32; crypto_hdr[7] = pn >> 40; } /* * Sets the fields in the Tx cmd that are crypto related */ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct iwl_tx_cmd *tx_cmd, struct sk_buff *skb_frag, int hdrlen) { struct ieee80211_key_conf *keyconf = info->control.hw_key; u8 *crypto_hdr = skb_frag->data + hdrlen; enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM; u64 pn; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_CCMP: iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); break; case WLAN_CIPHER_SUITE_TKIP: tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; pn = atomic64_inc_return(&keyconf->tx_pn); ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn); ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); break; case WLAN_CIPHER_SUITE_WEP104: tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; fallthrough; case WLAN_CIPHER_SUITE_WEP40: tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) & TX_CMD_SEC_WEP_KEY_IDX_MSK); memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: type = TX_CMD_SEC_GCMP; fallthrough; case WLAN_CIPHER_SUITE_CCMP_256: /* TODO: Taking the key from the table might introduce a race * when PTK rekeying is done, having an old packets with a PN * based on the old key but the message encrypted with a new * one. * Need to handle this. */ tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE; tx_cmd->key[0] = keyconf->hw_key_idx; iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); break; default: tx_cmd->sec_ctl |= TX_CMD_SEC_EXT; } } /* * Allocates and sets the Tx cmd the driver data pointers in the skb */ static struct iwl_device_tx_cmd * iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, int hdrlen, struct ieee80211_sta *sta, u8 sta_id) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_device_tx_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); if (unlikely(!dev_cmd)) return NULL; dev_cmd->hdr.cmd = TX_CMD; if (iwl_mvm_has_new_tx_api(mvm)) { u32 rate_n_flags = 0; u16 flags = 0; struct iwl_mvm_sta *mvmsta = sta ? iwl_mvm_sta_from_mac80211(sta) : NULL; bool amsdu = false; if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc = ieee80211_get_qos_ctl(hdr); amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT; } if (!info->control.hw_key) flags |= IWL_TX_FLAGS_ENCRYPT_DIS; /* * For data packets rate info comes from the fw. Only * set rate/antenna during connection establishment or in case * no station is given. */ if (!sta || !ieee80211_is_data(hdr->frame_control) || mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) { flags |= IWL_TX_FLAGS_CMD_RATE; rate_n_flags = iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, hdr->frame_control); } if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload; u32 offload_assist = iwl_mvm_tx_csum(mvm, skb, info, amsdu); cmd->offload_assist = cpu_to_le32(offload_assist); /* Total # bytes to be transmitted */ cmd->len = cpu_to_le16((u16)skb->len); /* Copy MAC header from skb into command buffer */ memcpy(cmd->hdr, hdr, hdrlen); cmd->flags = cpu_to_le16(flags); cmd->rate_n_flags = cpu_to_le32(rate_n_flags); } else { struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; u16 offload_assist = iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu); cmd->offload_assist = cpu_to_le16(offload_assist); /* Total # bytes to be transmitted */ cmd->len = cpu_to_le16((u16)skb->len); /* Copy MAC header from skb into command buffer */ memcpy(cmd->hdr, hdr, hdrlen); cmd->flags = cpu_to_le32(flags); cmd->rate_n_flags = cpu_to_le32(rate_n_flags); } goto out; } tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; if (info->control.hw_key) iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen); iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id); iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, hdrlen); out: return dev_cmd; } static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, struct iwl_device_tx_cmd *cmd) { struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); memset(&skb_info->status, 0, sizeof(skb_info->status)); memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data)); skb_info->driver_data[1] = cmd; } static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_hdr *hdr) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); __le16 fc = hdr->frame_control; switch (info->control.vif->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: /* * Non-bufferable frames use the broadcast station, thus they * use the probe queue. * Also take care of the case where we send a deauth to a * station that we don't have, or similarly an association * response (with non-success status) for a station we can't * accept. * Also, disassociate frames might happen, particular with * reason 7 ("Class 3 frame received from nonassociated STA"). */ if (ieee80211_is_mgmt(fc) && (!ieee80211_is_bufferable_mmpdu(fc) || ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc))) return mvm->probe_queue; if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) && is_multicast_ether_addr(hdr->addr1)) return mvmvif->cab_queue; WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, "fc=0x%02x", le16_to_cpu(fc)); return mvm->probe_queue; case NL80211_IFTYPE_P2P_DEVICE: if (ieee80211_is_mgmt(fc)) return mvm->p2p_dev_queue; WARN_ON_ONCE(1); return mvm->p2p_dev_queue; default: WARN_ONCE(1, "Not a ctrl vif, no available queue\n"); return -1; } } static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt; struct iwl_probe_resp_data *resp_data; const u8 *ie; u8 *pos; u8 match[] = { (WLAN_OUI_WFA >> 16) & 0xff, (WLAN_OUI_WFA >> 8) & 0xff, WLAN_OUI_WFA & 0xff, WLAN_OUI_TYPE_WFA_P2P, }; rcu_read_lock(); resp_data = rcu_dereference(mvmvif->probe_resp_data); if (!resp_data) goto out; if (!resp_data->notif.noa_active) goto out; ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC, mgmt->u.probe_resp.variable, skb->len - base_len, match, 4, 2); if (!ie) { IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n"); goto out; } if (skb_tailroom(skb) < resp_data->noa_len) { if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) { IWL_ERR(mvm, "Failed to reallocate probe resp\n"); goto out; } } pos = skb_put(skb, resp_data->noa_len); *pos++ = WLAN_EID_VENDOR_SPECIFIC; /* Set length of IE body (not including ID and length itself) */ *pos++ = resp_data->noa_len - 2; *pos++ = (WLAN_OUI_WFA >> 16) & 0xff; *pos++ = (WLAN_OUI_WFA >> 8) & 0xff; *pos++ = WLAN_OUI_WFA & 0xff; *pos++ = WLAN_OUI_TYPE_WFA_P2P; memcpy(pos, &resp_data->notif.noa_attr, resp_data->noa_len - sizeof(struct ieee80211_vendor_ie)); out: rcu_read_unlock(); } int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info info; struct iwl_device_tx_cmd *dev_cmd; u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); __le16 fc = hdr->frame_control; bool offchannel = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_TX_OFFCHAN; int queue = -1; if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) return -1; memcpy(&info, skb->cb, sizeof(info)); if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen)) return -1; if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) return -1; if (info.control.vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info.control.vif); if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || info.control.vif->type == NL80211_IFTYPE_AP || info.control.vif->type == NL80211_IFTYPE_ADHOC) { if (!ieee80211_is_data(hdr->frame_control)) sta_id = mvmvif->bcast_sta.sta_id; else sta_id = mvmvif->mcast_sta.sta_id; queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr); } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { queue = mvm->snif_queue; sta_id = mvm->snif_sta.sta_id; } else if (info.control.vif->type == NL80211_IFTYPE_STATION && offchannel) { /* * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets * that can be used in 2 different types of vifs, P2P & * STATION. * P2P uses the offchannel queue. * STATION (HS2.0) uses the auxiliary context of the FW, * and hence needs to be sent on the aux queue. */ sta_id = mvm->aux_sta.sta_id; queue = mvm->aux_queue; } } if (queue < 0) { IWL_ERR(mvm, "No queue was found. Dropping TX\n"); return -1; } if (unlikely(ieee80211_is_probe_resp(fc))) iwl_mvm_probe_resp_set_noa(mvm, skb); IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id); if (!dev_cmd) return -1; /* From now on, we cannot access info->control */ iwl_mvm_skb_prepare_status(skb, dev_cmd); if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); return -1; } return 0; } unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, struct ieee80211_sta *sta, unsigned int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band; u8 ac = tid_to_mac80211_ac[tid]; unsigned int txf; int lmac = iwl_mvm_get_lmac_id(mvm->fw, band); /* For HE redirect to trigger based fifos */ - if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) + if (sta->deflink.he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) ac += 4; txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); /* * Don't send an AMSDU that will be longer than the TXF. * Add a security margin of 256 for the TX command + headers. * We also want to have the start of the next packet inside the * fifo to be able to send bursts. */ return min_t(unsigned int, mvmsta->max_amsdu_len, mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); } #ifdef CONFIG_INET static int iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, netdev_features_t netdev_flags, struct sk_buff_head *mpdus_skb) { struct sk_buff *tmp, *next; struct ieee80211_hdr *hdr = (void *)skb->data; char cb[sizeof(skb->cb)]; u16 i = 0; unsigned int tcp_payload_len; unsigned int mss = skb_shinfo(skb)->gso_size; bool ipv4 = (skb->protocol == htons(ETH_P_IP)); bool qos = ieee80211_is_data_qos(hdr->frame_control); u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; skb_shinfo(skb)->gso_size = num_subframes * mss; memcpy(cb, skb->cb, sizeof(cb)); next = skb_gso_segment(skb, netdev_flags); skb_shinfo(skb)->gso_size = mss; skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; if (WARN_ON_ONCE(IS_ERR(next))) return -EINVAL; else if (next) consume_skb(skb); skb_list_walk_safe(next, tmp, next) { memcpy(tmp->cb, cb, sizeof(tmp->cb)); /* * Compute the length of all the data added for the A-MSDU. * This will be used to compute the length to write in the TX * command. We have: SNAP + IP + TCP for n -1 subframes and * ETH header for n subframes. */ tcp_payload_len = skb_tail_pointer(tmp) - skb_transport_header(tmp) - tcp_hdrlen(tmp) + tmp->data_len; if (ipv4) ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); if (tcp_payload_len > mss) { skb_shinfo(tmp)->gso_size = mss; skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; } else { if (qos) { u8 *qc; if (ipv4) ip_send_check(ip_hdr(tmp)); qc = ieee80211_get_qos_ctl((void *)tmp->data); *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; } skb_shinfo(tmp)->gso_size = 0; } skb_mark_not_on_list(tmp); __skb_queue_tail(mpdus_skb, tmp); i++; } return 0; } static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff_head *mpdus_skb) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int mss = skb_shinfo(skb)->gso_size; unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len; u16 snap_ip_tcp, pad; netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG; u8 tid; snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + tcp_hdrlen(skb); if (!mvmsta->max_amsdu_len || !ieee80211_is_data_qos(hdr->frame_control) || !mvmsta->amsdu_enabled) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); /* * Do not build AMSDU for IPv6 with extension headers. * ask stack to segment and checkum the generated MPDUs for us. */ if (skb->protocol == htons(ETH_P_IPV6) && ((struct ipv6hdr *)skb_network_header(skb))->nexthdr != IPPROTO_TCP) { netdev_flags &= ~NETIF_F_CSUM_MASK; return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); } tid = ieee80211_get_tid(hdr); if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) return -EINVAL; /* * No need to lock amsdu_in_ampdu_allowed since it can't be modified * during an BA session. */ if ((info->flags & IEEE80211_TX_CTL_AMPDU && !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) || !(mvmsta->amsdu_enabled & BIT(tid))) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); /* * Take the min of ieee80211 station and mvm station */ max_amsdu_len = min_t(unsigned int, sta->max_amsdu_len, iwl_mvm_max_amsdu_size(mvm, sta, tid)); /* * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not * supported. This is a spec requirement (IEEE 802.11-2015 * section 8.7.3 NOTE 3). */ if (info->flags & IEEE80211_TX_CTL_AMPDU && - !sta->vht_cap.vht_supported) + !sta->deflink.vht_cap.vht_supported) max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095); /* Sub frame header + SNAP + IP header + TCP header + MSS */ subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss; pad = (4 - subf_len) & 0x3; /* * If we have N subframes in the A-MSDU, then the A-MSDU's size is * N * subf_len + (N - 1) * pad. */ num_subframes = (max_amsdu_len + pad) / (subf_len + pad); if (sta->max_amsdu_subframes && num_subframes > sta->max_amsdu_subframes) num_subframes = sta->max_amsdu_subframes; tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - tcp_hdrlen(skb) + skb->data_len; /* * Make sure we have enough TBs for the A-MSDU: * 2 for each subframe * 1 more for each fragment * 1 more for the potential data in the header */ if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) > mvm->trans->max_skb_frags) num_subframes = 1; if (num_subframes > 1) *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; /* This skb fits in one single A-MSDU */ if (num_subframes * mss >= tcp_payload_len) { __skb_queue_tail(mpdus_skb, skb); return 0; } /* * Trick the segmentation function to make it * create SKBs that can fit into one A-MSDU. */ return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skb); } #else /* CONFIG_INET */ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff_head *mpdus_skb) { /* Impossible to get TSO with CONFIG_INET */ WARN_ON(1); return -1; } #endif /* Check if there are any timed-out TIDs on a given shared TXQ */ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) { unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap; unsigned long now = jiffies; int tid; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return false; for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) { if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) return true; } return false; } static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, int airtime) { int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; struct iwl_mvm_tcm_mac *mdata; if (mac >= NUM_MAC_INDEX_DRIVER) return; mdata = &mvm->tcm.data[mac]; if (mvm->tcm.paused) return; if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) schedule_delayed_work(&mvm->tcm.work, 0); mdata->tx.airtime += airtime; } static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, int tid) { u32 ac = tid_to_mac80211_ac[tid]; int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; struct iwl_mvm_tcm_mac *mdata; if (mac >= NUM_MAC_INDEX_DRIVER) return -EINVAL; mdata = &mvm->tcm.data[mac]; mdata->tx.pkts[ac]++; return 0; } /* * Sets the fields in the Tx cmd that are crypto related. * * This function must be called with BHs disabled. */ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_mvm_sta *mvmsta; struct iwl_device_tx_cmd *dev_cmd; __le16 fc; u16 seq_number = 0; u8 tid = IWL_MAX_TID_COUNT; u16 txq_id; bool is_ampdu = false; int hdrlen; mvmsta = iwl_mvm_sta_from_mac80211(sta); fc = hdr->frame_control; hdrlen = ieee80211_hdrlen(fc); if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) return -1; if (WARN_ON_ONCE(!mvmsta)) return -1; if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) return -1; - if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->he_cap.has_he) + if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->deflink.he_cap.has_he) return -1; if (unlikely(ieee80211_is_probe_resp(fc))) iwl_mvm_probe_resp_set_noa(mvm, skb); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, sta, mvmsta->sta_id); if (!dev_cmd) goto drop; /* * we handle that entirely ourselves -- for uAPSD the firmware * will always send a notification, and for PS-Poll responses * we'll notify mac80211 when getting frame status */ info->flags &= ~IEEE80211_TX_STATUS_EOSP; spin_lock(&mvmsta->lock); /* nullfunc frames should go to the MGMT queue regardless of QOS, * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default * assignment of MGMT TID */ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { tid = ieee80211_get_tid(hdr); if (WARN_ONCE(tid >= IWL_MAX_TID_COUNT, "Invalid TID %d", tid)) goto drop_unlock_sta; is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; if (WARN_ONCE(is_ampdu && mvmsta->tid_data[tid].state != IWL_AGG_ON, "Invalid internal agg state %d for TID %d", mvmsta->tid_data[tid].state, tid)) goto drop_unlock_sta; seq_number = mvmsta->tid_data[tid].seq_number; seq_number &= IEEE80211_SCTL_SEQ; if (!iwl_mvm_has_new_tx_api(mvm)) { struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl |= cpu_to_le16(seq_number); /* update the tx_cmd hdr as it was already copied */ tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl; } } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) { tid = IWL_TID_NON_QOS; } txq_id = mvmsta->tid_data[tid].txq_id; WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); return -1; } if (!iwl_mvm_has_new_tx_api(mvm)) { /* Keep track of the time of the last frame for this RA/TID */ mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; /* * If we have timed-out TIDs - schedule the worker that will * reconfig the queues and update them * * Note that the no lock is taken here in order to not serialize * the TX flow. This isn't dangerous because scheduling * mvm->add_stream_wk can't ruin the state, and if we DON'T * schedule it due to some race condition then next TX we get * here we will. */ if (unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED && iwl_mvm_txq_should_update(mvm, txq_id))) schedule_work(&mvm->add_stream_wk); } IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n", mvmsta->sta_id, tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number), skb->len); /* From now on, we cannot access info->control */ iwl_mvm_skb_prepare_status(skb, dev_cmd); if (ieee80211_is_data(fc)) iwl_mvm_mei_tx_copy_to_csme(mvm, skb, info->control.hw_key ? info->control.hw_key->iv_len : 0); if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) goto drop_unlock_sta; if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc)) mvmsta->tid_data[tid].seq_number = seq_number + 0x10; spin_unlock(&mvmsta->lock); if (iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid)) goto drop; return 0; drop_unlock_sta: iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); drop: IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->sta_id, tid); return -1; } int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_tx_info info; struct sk_buff_head mpdus_skbs; unsigned int payload_len; int ret; if (WARN_ON_ONCE(!mvmsta)) return -1; if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) return -1; memcpy(&info, skb->cb, sizeof(info)); if (!skb_is_gso(skb)) return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - tcp_hdrlen(skb) + skb->data_len; if (payload_len <= skb_shinfo(skb)->gso_size) return iwl_mvm_tx_mpdu(mvm, skb, &info, sta); __skb_queue_head_init(&mpdus_skbs); ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs); if (ret) return ret; if (WARN_ON(skb_queue_empty(&mpdus_skbs))) return ret; while (!skb_queue_empty(&mpdus_skbs)) { skb = __skb_dequeue(&mpdus_skbs); ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); if (ret) { __skb_queue_purge(&mpdus_skbs); return ret; } } return 0; } static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; struct ieee80211_vif *vif = mvmsta->vif; u16 normalized_ssn; lockdep_assert_held(&mvmsta->lock); if ((tid_data->state == IWL_AGG_ON || tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && iwl_mvm_tid_queued(mvm, tid_data) == 0) { /* * Now that this aggregation or DQA queue is empty tell * mac80211 so it knows we no longer have frames buffered for * the station on this TID (for the TIM bitmap calculation.) */ ieee80211_sta_set_buffered(sta, tid, false); } /* * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need * to align the wrap around of ssn so we compare relevant values. */ normalized_ssn = tid_data->ssn; if (mvm->trans->trans_cfg->gen2) normalized_ssn &= 0xff; if (normalized_ssn != tid_data->next_reclaimed) return; switch (tid_data->state) { case IWL_EMPTYING_HW_QUEUE_ADDBA: IWL_DEBUG_TX_QUEUES(mvm, "Can continue addBA flow ssn = next_recl = %d\n", tid_data->next_reclaimed); tid_data->state = IWL_AGG_STARTING; ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IWL_EMPTYING_HW_QUEUE_DELBA: IWL_DEBUG_TX_QUEUES(mvm, "Can continue DELBA flow ssn = next_recl = %d\n", tid_data->next_reclaimed); tid_data->state = IWL_AGG_OFF; ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; default: break; } } #ifdef CONFIG_IWLWIFI_DEBUG const char *iwl_mvm_get_tx_fail_reason(u32 status) { #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x switch (status & TX_STATUS_MSK) { case TX_STATUS_SUCCESS: return "SUCCESS"; TX_STATUS_POSTPONE(DELAY); TX_STATUS_POSTPONE(FEW_BYTES); TX_STATUS_POSTPONE(BT_PRIO); TX_STATUS_POSTPONE(QUIET_PERIOD); TX_STATUS_POSTPONE(CALC_TTAK); TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); TX_STATUS_FAIL(SHORT_LIMIT); TX_STATUS_FAIL(LONG_LIMIT); TX_STATUS_FAIL(UNDERRUN); TX_STATUS_FAIL(DRAIN_FLOW); TX_STATUS_FAIL(RFKILL_FLUSH); TX_STATUS_FAIL(LIFE_EXPIRE); TX_STATUS_FAIL(DEST_PS); TX_STATUS_FAIL(HOST_ABORTED); TX_STATUS_FAIL(BT_RETRY); TX_STATUS_FAIL(STA_INVALID); TX_STATUS_FAIL(FRAG_DROPPED); TX_STATUS_FAIL(TID_DISABLE); TX_STATUS_FAIL(FIFO_FLUSHED); TX_STATUS_FAIL(SMALL_CF_POLL); TX_STATUS_FAIL(FW_DROP); TX_STATUS_FAIL(STA_COLOR_MISMATCH); } return "UNKNOWN"; #undef TX_STATUS_FAIL #undef TX_STATUS_POSTPONE } #endif /* CONFIG_IWLWIFI_DEBUG */ static int iwl_mvm_get_hwrate_chan_width(u32 chan_width) { switch (chan_width) { case RATE_MCS_CHAN_WIDTH_20: return 0; case RATE_MCS_CHAN_WIDTH_40: return IEEE80211_TX_RC_40_MHZ_WIDTH; case RATE_MCS_CHAN_WIDTH_80: return IEEE80211_TX_RC_80_MHZ_WIDTH; case RATE_MCS_CHAN_WIDTH_160: return IEEE80211_TX_RC_160_MHZ_WIDTH; default: return 0; } } void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, enum nl80211_band band, struct ieee80211_tx_rate *r) { u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; u32 rate = format == RATE_MCS_HT_MSK ? RATE_HT_MCS_INDEX(rate_n_flags) : rate_n_flags & RATE_MCS_CODE_MSK; r->flags |= iwl_mvm_get_hwrate_chan_width(rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK); if (rate_n_flags & RATE_MCS_SGI_MSK) r->flags |= IEEE80211_TX_RC_SHORT_GI; if (format == RATE_MCS_HT_MSK) { r->flags |= IEEE80211_TX_RC_MCS; r->idx = rate; } else if (format == RATE_MCS_VHT_MSK) { ieee80211_rate_set_vht(r, rate, ((rate_n_flags & RATE_MCS_NSS_MSK) >> RATE_MCS_NSS_POS) + 1); r->flags |= IEEE80211_TX_RC_VHT_MCS; } else if (format == RATE_MCS_HE_MSK) { /* mac80211 cannot do this without ieee80211_tx_status_ext() * but it only matters for radiotap */ r->idx = 0; } else { r->idx = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, band); } } void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags, enum nl80211_band band, struct ieee80211_tx_rate *r) { if (rate_n_flags & RATE_HT_MCS_GF_MSK) r->flags |= IEEE80211_TX_RC_GREEN_FIELD; r->flags |= iwl_mvm_get_hwrate_chan_width(rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1); if (rate_n_flags & RATE_MCS_SGI_MSK_V1) r->flags |= IEEE80211_TX_RC_SHORT_GI; if (rate_n_flags & RATE_MCS_HT_MSK_V1) { r->flags |= IEEE80211_TX_RC_MCS; r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1; } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) { ieee80211_rate_set_vht( r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK, ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> RATE_VHT_MCS_NSS_POS) + 1); r->flags |= IEEE80211_TX_RC_VHT_MCS; } else { r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, band); } } /* * translate ucode response to mac80211 tx status control values */ static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw, u32 rate_n_flags, struct ieee80211_tx_info *info) { struct ieee80211_tx_rate *r = &info->status.rates[0]; if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP, TX_CMD, 0) <= 6) rate_n_flags = iwl_new_rate_from_v1(rate_n_flags); info->status.antenna = ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS); iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r); } static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, u32 status, __le16 frame_control) { struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_tx_status *status_trig; int i; if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS) { enum iwl_fw_ini_time_point tp = IWL_FW_INI_TIME_POINT_TX_FAILED; if (ieee80211_is_action(frame_control)) tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED; iwl_dbg_tlv_time_point(&mvm->fwrt, tp, NULL); return; } trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_TX_STATUS); if (!trig) return; status_trig = (void *)trig->data; for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { /* don't collect on status 0 */ if (!status_trig->statuses[i].status) break; if (status_trig->statuses[i].status != (status & TX_STATUS_MSK)) continue; iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "Tx status %d was received", status & TX_STATUS_MSK); break; } } /* * iwl_mvm_get_scd_ssn - returns the SSN of the SCD * @tx_resp: the Tx response from the fw (agg or non-agg) * * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since * it can't know that everything will go well until the end of the AMPDU, it * can't know in advance the number of MPDUs that will be sent in the current * batch. This is why it writes the agg Tx response while it fetches the MPDUs. * Hence, it can't know in advance what the SSN of the SCD will be at the end * of the batch. This is why the SSN of the SCD is written at the end of the * whole struct at a variable offset. This function knows how to cope with the * variable offset and returns the SSN of the SCD. */ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm, struct iwl_mvm_tx_resp *tx_resp) { return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) + tx_resp->frame_count) & 0xfff; } static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct ieee80211_sta *sta; u16 sequence = le16_to_cpu(pkt->hdr.sequence); int txq_id = SEQ_TO_QUEUE(sequence); /* struct iwl_mvm_tx_resp_v3 is almost the same */ struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); struct agg_tx_status *agg_status = iwl_mvm_get_agg_status(mvm, tx_resp); u32 status = le16_to_cpu(agg_status->status); u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp); struct sk_buff_head skbs; u8 skb_freed = 0; u8 lq_color; u16 next_reclaimed, seq_ctl; bool is_ndp = false; __skb_queue_head_init(&skbs); if (iwl_mvm_has_new_tx_api(mvm)) txq_id = le16_to_cpu(tx_resp->tx_queue); seq_ctl = le16_to_cpu(tx_resp->seq_ctl); /* we can free until ssn % q.n_bd not inclusive */ iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs); while (!skb_queue_empty(&skbs)) { struct sk_buff *skb = __skb_dequeue(&skbs); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; bool flushed = false; skb_freed++; iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); memset(&info->status, 0, sizeof(info->status)); /* inform mac80211 about what happened with the frame */ switch (status & TX_STATUS_MSK) { case TX_STATUS_SUCCESS: case TX_STATUS_DIRECT_DONE: info->flags |= IEEE80211_TX_STAT_ACK; break; case TX_STATUS_FAIL_FIFO_FLUSHED: case TX_STATUS_FAIL_DRAIN_FLOW: flushed = true; break; case TX_STATUS_FAIL_DEST_PS: /* the FW should have stopped the queue and not * return this status */ IWL_ERR_LIMIT(mvm, "FW reported TX filtered, status=0x%x, FC=0x%x\n", status, le16_to_cpu(hdr->frame_control)); info->flags |= IEEE80211_TX_STAT_TX_FILTERED; break; default: break; } if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS && ieee80211_is_mgmt(hdr->frame_control)) iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); /* * If we are freeing multiple frames, mark all the frames * but the first one as acked, since they were acknowledged * before * */ if (skb_freed > 1) info->flags |= IEEE80211_TX_STAT_ACK; iwl_mvm_tx_status_check_trigger(mvm, status, hdr->frame_control); info->status.rates[0].count = tx_resp->failure_frame + 1; iwl_mvm_hwrate_to_tx_status(mvm->fw, le32_to_cpu(tx_resp->initial_rate), info); /* Don't assign the converted initial_rate, because driver * TLC uses this and doesn't support the new FW rate */ info->status.status_driver_data[1] = (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate); /* Single frame failure in an AMPDU queue => send BAR */ if (info->flags & IEEE80211_TX_CTL_AMPDU && !(info->flags & IEEE80211_TX_STAT_ACK) && !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed) info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; info->flags &= ~IEEE80211_TX_CTL_AMPDU; /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */ if (ieee80211_is_back_req(hdr->frame_control)) seq_ctl = 0; else if (status != TX_STATUS_SUCCESS) seq_ctl = le16_to_cpu(hdr->seq_ctrl); if (unlikely(!seq_ctl)) { /* * If it is an NDP, we can't update next_reclaim since * its sequence control is 0. Note that for that same * reason, NDPs are never sent to A-MPDU'able queues * so that we can never have more than one freed frame * for a single Tx resonse (see WARN_ON below). */ if (ieee80211_is_qos_nullfunc(hdr->frame_control)) is_ndp = true; } /* * TODO: this is not accurate if we are freeing more than one * packet. */ info->status.tx_time = le16_to_cpu(tx_resp->wireless_media_time); BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); info->status.status_driver_data[0] = RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc); ieee80211_tx_status(mvm->hw, skb); } /* This is an aggregation queue or might become one, so we use * the ssn since: ssn = wifi seq_num % 256. * The seq_ctl is the sequence control of the packet to which * this Tx response relates. But if there is a hole in the * bitmap of the BA we received, this Tx response may allow to * reclaim the hole and all the subsequent packets that were * already acked. In that case, seq_ctl != ssn, and the next * packet to be reclaimed will be ssn and not seq_ctl. In that * case, several packets will be reclaimed even if * frame_count = 1. * * The ssn is the index (% 256) of the latest packet that has * treated (acked / dropped) + 1. */ next_reclaimed = ssn; IWL_DEBUG_TX_REPLY(mvm, "TXQ %d status %s (0x%08x)\n", txq_id, iwl_mvm_get_tx_fail_reason(status), status); IWL_DEBUG_TX_REPLY(mvm, "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n", le32_to_cpu(tx_resp->initial_rate), tx_resp->failure_frame, SEQ_TO_INDEX(sequence), ssn, next_reclaimed, seq_ctl); rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* * sta can't be NULL otherwise it'd mean that the sta has been freed in * the firmware while we still have packets for it in the Tx queues. */ if (WARN_ON_ONCE(!sta)) goto out; if (!IS_ERR(sta)) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); iwl_mvm_tx_airtime(mvm, mvmsta, le16_to_cpu(tx_resp->wireless_media_time)); if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS && mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant); if (sta->wme && tid != IWL_MGMT_TID) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; bool send_eosp_ndp = false; spin_lock_bh(&mvmsta->lock); if (!is_ndp) { tid_data->next_reclaimed = next_reclaimed; IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n", next_reclaimed); } else { IWL_DEBUG_TX_REPLY(mvm, "NDP - don't update next_reclaimed\n"); } iwl_mvm_check_ratid_empty(mvm, sta, tid); if (mvmsta->sleep_tx_count) { mvmsta->sleep_tx_count--; if (mvmsta->sleep_tx_count && !iwl_mvm_tid_queued(mvm, tid_data)) { /* * The number of frames in the queue * dropped to 0 even if we sent less * frames than we thought we had on the * Tx queue. * This means we had holes in the BA * window that we just filled, ask * mac80211 to send EOSP since the * firmware won't know how to do that. * Send NDP and the firmware will send * EOSP notification that will trigger * a call to ieee80211_sta_eosp(). */ send_eosp_ndp = true; } } spin_unlock_bh(&mvmsta->lock); if (send_eosp_ndp) { iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, IEEE80211_FRAME_RELEASE_UAPSD, 1, tid, false, false); mvmsta->sleep_tx_count = 0; ieee80211_send_eosp_nullfunc(sta, tid); } } if (mvmsta->next_status_eosp) { mvmsta->next_status_eosp = false; ieee80211_sta_eosp(sta); } } out: rcu_read_unlock(); } #ifdef CONFIG_IWLWIFI_DEBUG #define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x static const char *iwl_get_agg_tx_status(u16 status) { switch (status & AGG_TX_STATE_STATUS_MSK) { AGG_TX_STATE_(TRANSMITTED); AGG_TX_STATE_(UNDERRUN); AGG_TX_STATE_(BT_PRIO); AGG_TX_STATE_(FEW_BYTES); AGG_TX_STATE_(ABORT); AGG_TX_STATE_(TX_ON_AIR_DROP); AGG_TX_STATE_(LAST_SENT_TRY_CNT); AGG_TX_STATE_(LAST_SENT_BT_KILL); AGG_TX_STATE_(SCD_QUERY); AGG_TX_STATE_(TEST_BAD_CRC32); AGG_TX_STATE_(RESPONSE); AGG_TX_STATE_(DUMP_TX); AGG_TX_STATE_(DELAY_TX); } return "UNKNOWN"; } static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; struct agg_tx_status *frame_status = iwl_mvm_get_agg_status(mvm, tx_resp); int i; bool tirgger_timepoint = false; for (i = 0; i < tx_resp->frame_count; i++) { u16 fstatus = le16_to_cpu(frame_status[i].status); /* In case one frame wasn't transmitted trigger time point */ tirgger_timepoint |= ((fstatus & AGG_TX_STATE_STATUS_MSK) != AGG_TX_STATE_TRANSMITTED); IWL_DEBUG_TX_REPLY(mvm, "status %s (0x%04x), try-count (%d) seq (0x%x)\n", iwl_get_agg_tx_status(fstatus), fstatus & AGG_TX_STATE_STATUS_MSK, (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >> AGG_TX_STATE_TRY_CNT_POS, le16_to_cpu(frame_status[i].sequence)); } if (tirgger_timepoint) iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_TX_FAILED, NULL); } #else static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) {} #endif /* CONFIG_IWLWIFI_DEBUG */ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid); int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid); u16 sequence = le16_to_cpu(pkt->hdr.sequence); struct iwl_mvm_sta *mvmsta; int queue = SEQ_TO_QUEUE(sequence); struct ieee80211_sta *sta; if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE && (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))) return; iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt); rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (WARN_ON_ONCE(!sta || !sta->wme)) { rcu_read_unlock(); return; } if (!WARN_ON_ONCE(!mvmsta)) { mvmsta->tid_data[tid].rate_n_flags = le32_to_cpu(tx_resp->initial_rate); mvmsta->tid_data[tid].tx_time = le16_to_cpu(tx_resp->wireless_media_time); mvmsta->tid_data[tid].lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); iwl_mvm_tx_airtime(mvm, mvmsta, le16_to_cpu(tx_resp->wireless_media_time)); } rcu_read_unlock(); } void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; if (tx_resp->frame_count == 1) iwl_mvm_rx_tx_cmd_single(mvm, pkt); else iwl_mvm_rx_tx_cmd_agg(mvm, pkt); } static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, int txq, int index, struct ieee80211_tx_info *tx_info, u32 rate, bool is_flush) { struct sk_buff_head reclaimed_skbs; struct iwl_mvm_tid_data *tid_data = NULL; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta = NULL; struct sk_buff *skb; int freed; if (WARN_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations || tid > IWL_MAX_TID_COUNT, "sta_id %d tid %d", sta_id, tid)) return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); /* Reclaiming frames for a station that has been deleted ? */ if (WARN_ON_ONCE(!sta)) { rcu_read_unlock(); return; } __skb_queue_head_init(&reclaimed_skbs); /* * Release all TFDs before the SSN, i.e. all TFDs in front of * block-ack window (we assume that they've been successfully * transmitted ... if not, it's too late anyway). */ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs); skb_queue_walk(&reclaimed_skbs, skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); memset(&info->status, 0, sizeof(info->status)); /* Packet was transmitted successfully, failures come as single * frames because before failing a frame the firmware transmits * it without aggregation at least once. */ if (!is_flush) info->flags |= IEEE80211_TX_STAT_ACK; } /* * It's possible to get a BA response after invalidating the rcu (rcu is * invalidated in order to prevent new Tx from being sent, but there may * be some frames already in-flight). * In this case we just want to reclaim, and could skip all the * sta-dependent stuff since it's in the middle of being removed * anyways. */ if (IS_ERR(sta)) goto out; mvmsta = iwl_mvm_sta_from_mac80211(sta); tid_data = &mvmsta->tid_data[tid]; if (tid_data->txq_id != txq) { IWL_ERR(mvm, "invalid reclaim request: Q %d, tid %d\n", tid_data->txq_id, tid); rcu_read_unlock(); return; } spin_lock_bh(&mvmsta->lock); tid_data->next_reclaimed = index; iwl_mvm_check_ratid_empty(mvm, sta, tid); freed = 0; /* pack lq color from tid_data along the reduced txp */ tx_info->status.status_driver_data[0] = RS_DRV_DATA_PACK(tid_data->lq_color, tx_info->status.status_driver_data[0]); tx_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; skb_queue_walk(&reclaimed_skbs, skb) { struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (!is_flush) { if (ieee80211_is_data_qos(hdr->frame_control)) freed++; else WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT); } /* this is the first skb we deliver in this batch */ /* put the rate scaling data there */ if (freed == 1) { info->flags |= IEEE80211_TX_STAT_AMPDU; memcpy(&info->status, &tx_info->status, sizeof(tx_info->status)); iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, info); } } spin_unlock_bh(&mvmsta->lock); /* We got a BA notif with 0 acked or scd_ssn didn't progress which is * possible (i.e. first MPDU in the aggregation wasn't acked) * Still it's important to update RS about sent vs. acked. */ if (!is_flush && skb_queue_empty(&reclaimed_skbs)) { struct ieee80211_chanctx_conf *chanctx_conf = NULL; if (mvmsta->vif) chanctx_conf = rcu_dereference(mvmsta->vif->chanctx_conf); if (WARN_ON_ONCE(!chanctx_conf)) goto out; tx_info->band = chanctx_conf->def.chan->band; iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, tx_info); if (!iwl_mvm_has_tlc_offload(mvm)) { IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n"); iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false); } } out: rcu_read_unlock(); while (!skb_queue_empty(&reclaimed_skbs)) { skb = __skb_dequeue(&reclaimed_skbs); ieee80211_tx_status(mvm->hw, skb); } } void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); int sta_id, tid, txq, index; struct ieee80211_tx_info ba_info = {}; struct iwl_mvm_ba_notif *ba_notif; struct iwl_mvm_tid_data *tid_data; struct iwl_mvm_sta *mvmsta; ba_info.flags = IEEE80211_TX_STAT_AMPDU; if (iwl_mvm_has_new_tx_api(mvm)) { struct iwl_mvm_compressed_ba_notif *ba_res = (void *)pkt->data; u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info); u16 tfd_cnt; int i; if (unlikely(sizeof(*ba_res) > pkt_len)) return; sta_id = ba_res->sta_id; ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done); ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed); ba_info.status.tx_time = (u16)le32_to_cpu(ba_res->wireless_time); ba_info.status.status_driver_data[0] = (void *)(uintptr_t)ba_res->reduced_txp; tfd_cnt = le16_to_cpu(ba_res->tfd_cnt); if (!tfd_cnt || struct_size(ba_res, tfd, tfd_cnt) > pkt_len) return; rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); /* * It's possible to get a BA response after invalidating the rcu * (rcu is invalidated in order to prevent new Tx from being * sent, but there may be some frames already in-flight). * In this case we just want to reclaim, and could skip all the * sta-dependent stuff since it's in the middle of being removed * anyways. */ /* Free per TID */ for (i = 0; i < tfd_cnt; i++) { struct iwl_mvm_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i]; tid = ba_tfd->tid; if (tid == IWL_MGMT_TID) tid = IWL_MAX_TID_COUNT; if (mvmsta) mvmsta->tid_data[i].lq_color = lq_color; iwl_mvm_tx_reclaim(mvm, sta_id, tid, (int)(le16_to_cpu(ba_tfd->q_num)), le16_to_cpu(ba_tfd->tfd_index), &ba_info, le32_to_cpu(ba_res->tx_rate), false); } if (mvmsta) iwl_mvm_tx_airtime(mvm, mvmsta, le32_to_cpu(ba_res->wireless_time)); rcu_read_unlock(); IWL_DEBUG_TX_REPLY(mvm, "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n", sta_id, le32_to_cpu(ba_res->flags), le16_to_cpu(ba_res->txed), le16_to_cpu(ba_res->done)); return; } ba_notif = (void *)pkt->data; sta_id = ba_notif->sta_id; tid = ba_notif->tid; /* "flow" corresponds to Tx queue */ txq = le16_to_cpu(ba_notif->scd_flow); /* "ssn" is start of block-ack Tx window, corresponds to index * (in Tx queue's circular buffer) of first TFD/frame in window */ index = le16_to_cpu(ba_notif->scd_ssn); rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); if (WARN_ON_ONCE(!mvmsta)) { rcu_read_unlock(); return; } tid_data = &mvmsta->tid_data[tid]; ba_info.status.ampdu_ack_len = ba_notif->txed_2_done; ba_info.status.ampdu_len = ba_notif->txed; ba_info.status.tx_time = tid_data->tx_time; ba_info.status.status_driver_data[0] = (void *)(uintptr_t)ba_notif->reduced_txp; rcu_read_unlock(); iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info, tid_data->rate_n_flags, false); IWL_DEBUG_TX_REPLY(mvm, "BA_NOTIFICATION Received from %pM, sta_id = %d\n", ba_notif->sta_addr, ba_notif->sta_id); IWL_DEBUG_TX_REPLY(mvm, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n", ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl), le64_to_cpu(ba_notif->bitmap), txq, index, ba_notif->txed, ba_notif->txed_2_done); IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n", ba_notif->reduced_txp); } /* * Note that there are transports that buffer frames before they reach * the firmware. This means that after flush_tx_path is called, the * queue might not be empty. The race-free way to handle this is to: * 1) set the station as draining * 2) flush the Tx path * 3) wait for the transport queues to be empty */ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk) { int ret; struct iwl_tx_path_flush_cmd_v1 flush_cmd = { .queues_ctl = cpu_to_le32(tfd_msk), .flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH), }; WARN_ON(iwl_mvm_has_new_tx_api(mvm)); ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, 0, sizeof(flush_cmd), &flush_cmd); if (ret) IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); return ret; } int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids) { int ret; struct iwl_tx_path_flush_cmd_rsp *rsp; struct iwl_tx_path_flush_cmd flush_cmd = { .sta_id = cpu_to_le32(sta_id), .tid_mask = cpu_to_le16(tids), }; struct iwl_host_cmd cmd = { .id = TXPATH_FLUSH, .len = { sizeof(flush_cmd), }, .data = { &flush_cmd, }, }; WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0) cmd.flags |= CMD_WANT_SKB; IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n", sta_id, tids); ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret); return ret; } if (cmd.flags & CMD_WANT_SKB) { int i; int num_flushed_queues; if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != sizeof(*rsp))) { ret = -EIO; goto free_rsp; } rsp = (void *)cmd.resp_pkt->data; if (WARN_ONCE(le16_to_cpu(rsp->sta_id) != sta_id, "sta_id %d != rsp_sta_id %d", sta_id, le16_to_cpu(rsp->sta_id))) { ret = -EIO; goto free_rsp; } num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues); if (WARN_ONCE(num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP, "num_flushed_queues %d", num_flushed_queues)) { ret = -EIO; goto free_rsp; } for (i = 0; i < num_flushed_queues; i++) { struct ieee80211_tx_info tx_info = {}; struct iwl_flush_queue_info *queue_info = &rsp->queues[i]; int tid = le16_to_cpu(queue_info->tid); int read_before = le16_to_cpu(queue_info->read_before_flush); int read_after = le16_to_cpu(queue_info->read_after_flush); int queue_num = le16_to_cpu(queue_info->queue_num); if (tid == IWL_MGMT_TID) tid = IWL_MAX_TID_COUNT; IWL_DEBUG_TX_QUEUES(mvm, "tid %d queue_id %d read-before %d read-after %d\n", tid, queue_num, read_before, read_after); iwl_mvm_tx_reclaim(mvm, sta_id, tid, queue_num, read_after, &tx_info, 0, true); } free_rsp: iwl_free_resp(&cmd); } return ret; } int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal) { struct iwl_mvm_int_sta *int_sta = sta; struct iwl_mvm_sta *mvm_sta = sta; BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) != offsetof(struct iwl_mvm_sta, sta_id)); if (iwl_mvm_has_new_tx_api(mvm)) return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff); if (internal) return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk); return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk); } diff --git a/sys/contrib/dev/rtw88/bf.c b/sys/contrib/dev/rtw88/bf.c index a9044788b455..d207fa5c122f 100644 --- a/sys/contrib/dev/rtw88/bf.c +++ b/sys/contrib/dev/rtw88/bf.c @@ -1,414 +1,414 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2018-2019 Realtek Corporation. */ #include "main.h" #include "reg.h" #include "bf.h" #include "debug.h" void rtw_bf_disassoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf) { struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; struct rtw_bfee *bfee = &rtwvif->bfee; struct rtw_bf_info *bfinfo = &rtwdev->bf_info; if (bfee->role == RTW_BFEE_NONE) return; if (bfee->role == RTW_BFEE_MU) bfinfo->bfer_mu_cnt--; else if (bfee->role == RTW_BFEE_SU) bfinfo->bfer_su_cnt--; rtw_chip_config_bfee(rtwdev, rtwvif, bfee, false); bfee->role = RTW_BFEE_NONE; } void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf) { struct ieee80211_hw *hw = rtwdev->hw; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; struct rtw_bfee *bfee = &rtwvif->bfee; struct rtw_bf_info *bfinfo = &rtwdev->bf_info; struct rtw_chip_info *chip = rtwdev->chip; struct ieee80211_sta *sta; struct ieee80211_sta_vht_cap *vht_cap; struct ieee80211_sta_vht_cap *ic_vht_cap; const u8 *bssid = bss_conf->bssid; u32 sound_dim; u8 i; if (!(chip->band & RTW_BAND_5G)) return; rcu_read_lock(); sta = ieee80211_find_sta(vif, bssid); if (!sta) { #if defined(__linux__) rtw_warn(rtwdev, "failed to find station entry for bss %pM\n", bssid); #elif defined(__FreeBSD__) rtw_warn(rtwdev, "failed to find station entry for bss %6D\n", bssid, ":"); #endif goto out_unlock; } ic_vht_cap = &hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap; - vht_cap = &sta->vht_cap; + vht_cap = &sta->deflink.vht_cap; if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) && (vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { if (bfinfo->bfer_mu_cnt >= chip->bfer_mu_max_num) { rtw_dbg(rtwdev, RTW_DBG_BF, "mu bfer number over limit\n"); goto out_unlock; } ether_addr_copy(bfee->mac_addr, bssid); bfee->role = RTW_BFEE_MU; bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7); bfee->aid = bss_conf->aid; bfinfo->bfer_mu_cnt++; rtw_chip_config_bfee(rtwdev, rtwvif, bfee, true); } else if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) && (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { if (bfinfo->bfer_su_cnt >= chip->bfer_su_max_num) { rtw_dbg(rtwdev, RTW_DBG_BF, "su bfer number over limit\n"); goto out_unlock; } sound_dim = vht_cap->cap & IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; ether_addr_copy(bfee->mac_addr, bssid); bfee->role = RTW_BFEE_SU; bfee->sound_dim = (u8)sound_dim; bfee->g_id = 0; bfee->p_aid = (bssid[5] << 1) | (bssid[4] >> 7); bfinfo->bfer_su_cnt++; for (i = 0; i < chip->bfer_su_max_num; i++) { if (!test_bit(i, bfinfo->bfer_su_reg_maping)) { set_bit(i, bfinfo->bfer_su_reg_maping); bfee->su_reg_index = i; break; } } rtw_chip_config_bfee(rtwdev, rtwvif, bfee, true); } out_unlock: rcu_read_unlock(); } void rtw_bf_init_bfer_entry_mu(struct rtw_dev *rtwdev, struct mu_bfer_init_para *param) { u16 mu_bf_ctl = 0; u8 *addr = param->bfer_address; int i; for (i = 0; i < ETH_ALEN; i++) rtw_write8(rtwdev, REG_ASSOCIATED_BFMER0_INFO + i, addr[i]); rtw_write16(rtwdev, REG_ASSOCIATED_BFMER0_INFO + 6, param->paid); rtw_write16(rtwdev, REG_TX_CSI_RPT_PARAM_BW20, param->csi_para); mu_bf_ctl = rtw_read16(rtwdev, REG_WMAC_MU_BF_CTL) & 0xC000; mu_bf_ctl |= param->my_aid | (param->csi_length_sel << 12); rtw_write16(rtwdev, REG_WMAC_MU_BF_CTL, mu_bf_ctl); } void rtw_bf_cfg_sounding(struct rtw_dev *rtwdev, struct rtw_vif *vif, enum rtw_trx_desc_rate rate) { u32 psf_ctl = 0; u8 csi_rsc = 0x1; psf_ctl = rtw_read32(rtwdev, REG_BBPSF_CTRL) | BIT_WMAC_USE_NDPARATE | (csi_rsc << 13); rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM, RTW_SND_CTRL_SOUNDING); rtw_write8(rtwdev, REG_SND_PTCL_CTRL + 3, 0x26); rtw_write8_clr(rtwdev, REG_RXFLTMAP1, BIT_RXFLTMAP1_BF_REPORT_POLL); rtw_write8_clr(rtwdev, REG_RXFLTMAP4, BIT_RXFLTMAP4_BF_REPORT_POLL); if (vif->net_type == RTW_NET_AP_MODE) rtw_write32(rtwdev, REG_BBPSF_CTRL, psf_ctl | BIT(12)); else rtw_write32(rtwdev, REG_BBPSF_CTRL, psf_ctl & ~BIT(12)); } void rtw_bf_cfg_mu_bfee(struct rtw_dev *rtwdev, struct cfg_mumimo_para *param) { u8 mu_tbl_sel; u8 mu_valid; mu_valid = rtw_read8(rtwdev, REG_MU_TX_CTL) & ~BIT_MASK_R_MU_TABLE_VALID; rtw_write8(rtwdev, REG_MU_TX_CTL, (mu_valid | BIT(0) | BIT(1)) & ~(BIT(7))); mu_tbl_sel = rtw_read8(rtwdev, REG_MU_TX_CTL + 1) & 0xF8; rtw_write8(rtwdev, REG_MU_TX_CTL + 1, mu_tbl_sel); rtw_write32(rtwdev, REG_MU_STA_GID_VLD, param->given_gid_tab[0]); rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO, param->given_user_pos[0]); rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO + 4, param->given_user_pos[1]); rtw_write8(rtwdev, REG_MU_TX_CTL + 1, mu_tbl_sel | 1); rtw_write32(rtwdev, REG_MU_STA_GID_VLD, param->given_gid_tab[1]); rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO, param->given_user_pos[2]); rtw_write32(rtwdev, REG_MU_STA_USER_POS_INFO + 4, param->given_user_pos[3]); } void rtw_bf_del_bfer_entry_mu(struct rtw_dev *rtwdev) { rtw_write32(rtwdev, REG_ASSOCIATED_BFMER0_INFO, 0); rtw_write32(rtwdev, REG_ASSOCIATED_BFMER0_INFO + 4, 0); rtw_write16(rtwdev, REG_WMAC_MU_BF_CTL, 0); rtw_write8(rtwdev, REG_MU_TX_CTL, 0); } void rtw_bf_del_sounding(struct rtw_dev *rtwdev) { rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM, 0); } void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif, struct rtw_bfee *bfee) { u8 nc_index = hweight8(rtwdev->hal.antenna_rx) - 1; u8 nr_index = bfee->sound_dim; u8 grouping = 0, codebookinfo = 1, coefficientsize = 3; u32 addr_bfer_info, addr_csi_rpt, csi_param; u8 i; rtw_dbg(rtwdev, RTW_DBG_BF, "config as an su bfee\n"); switch (bfee->su_reg_index) { case 1: addr_bfer_info = REG_ASSOCIATED_BFMER1_INFO; addr_csi_rpt = REG_TX_CSI_RPT_PARAM_BW20 + 2; break; case 0: default: addr_bfer_info = REG_ASSOCIATED_BFMER0_INFO; addr_csi_rpt = REG_TX_CSI_RPT_PARAM_BW20; break; } /* Sounding protocol control */ rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM, RTW_SND_CTRL_SOUNDING); /* MAC address/Partial AID of Beamformer */ for (i = 0; i < ETH_ALEN; i++) rtw_write8(rtwdev, addr_bfer_info + i, bfee->mac_addr[i]); csi_param = (u16)((coefficientsize << 10) | (codebookinfo << 8) | (grouping << 6) | (nr_index << 3) | nc_index); rtw_write16(rtwdev, addr_csi_rpt, csi_param); /* ndp rx standby timer */ rtw_write8(rtwdev, REG_SND_PTCL_CTRL + 3, RTW_NDP_RX_STANDBY_TIME); } EXPORT_SYMBOL(rtw_bf_enable_bfee_su); /* nc index: 1 2T2R 0 1T1R * nr index: 1 use Nsts 0 use reg setting * codebookinfo: 1 802.11ac 3 802.11n */ void rtw_bf_enable_bfee_mu(struct rtw_dev *rtwdev, struct rtw_vif *vif, struct rtw_bfee *bfee) { struct rtw_bf_info *bf_info = &rtwdev->bf_info; struct mu_bfer_init_para param; u8 nc_index = hweight8(rtwdev->hal.antenna_rx) - 1; u8 nr_index = 1; u8 grouping = 0, codebookinfo = 1, coefficientsize = 0; u32 csi_param; rtw_dbg(rtwdev, RTW_DBG_BF, "config as an mu bfee\n"); csi_param = (u16)((coefficientsize << 10) | (codebookinfo << 8) | (grouping << 6) | (nr_index << 3) | nc_index); rtw_dbg(rtwdev, RTW_DBG_BF, "nc=%d nr=%d group=%d codebookinfo=%d coefficientsize=%d\n", nc_index, nr_index, grouping, codebookinfo, coefficientsize); param.paid = bfee->p_aid; param.csi_para = csi_param; param.my_aid = bfee->aid & 0xfff; param.csi_length_sel = HAL_CSI_SEG_4K; ether_addr_copy(param.bfer_address, bfee->mac_addr); rtw_bf_init_bfer_entry_mu(rtwdev, ¶m); bf_info->cur_csi_rpt_rate = DESC_RATE6M; rtw_bf_cfg_sounding(rtwdev, vif, DESC_RATE6M); /* accept action_no_ack */ rtw_write16_set(rtwdev, REG_RXFLTMAP0, BIT_RXFLTMAP0_ACTIONNOACK); /* accept NDPA and BF report poll */ rtw_write16_set(rtwdev, REG_RXFLTMAP1, BIT_RXFLTMAP1_BF); } EXPORT_SYMBOL(rtw_bf_enable_bfee_mu); void rtw_bf_remove_bfee_su(struct rtw_dev *rtwdev, struct rtw_bfee *bfee) { struct rtw_bf_info *bfinfo = &rtwdev->bf_info; rtw_dbg(rtwdev, RTW_DBG_BF, "remove as a su bfee\n"); rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM, RTW_SND_CTRL_REMOVE); switch (bfee->su_reg_index) { case 0: rtw_write32(rtwdev, REG_ASSOCIATED_BFMER0_INFO, 0); rtw_write16(rtwdev, REG_ASSOCIATED_BFMER0_INFO + 4, 0); rtw_write16(rtwdev, REG_TX_CSI_RPT_PARAM_BW20, 0); break; case 1: rtw_write32(rtwdev, REG_ASSOCIATED_BFMER1_INFO, 0); rtw_write16(rtwdev, REG_ASSOCIATED_BFMER1_INFO + 4, 0); rtw_write16(rtwdev, REG_TX_CSI_RPT_PARAM_BW20 + 2, 0); break; } clear_bit(bfee->su_reg_index, bfinfo->bfer_su_reg_maping); bfee->su_reg_index = 0xFF; } EXPORT_SYMBOL(rtw_bf_remove_bfee_su); void rtw_bf_remove_bfee_mu(struct rtw_dev *rtwdev, struct rtw_bfee *bfee) { struct rtw_bf_info *bfinfo = &rtwdev->bf_info; rtw_write8_mask(rtwdev, REG_SND_PTCL_CTRL, BIT_MASK_BEAMFORM, RTW_SND_CTRL_REMOVE); rtw_bf_del_bfer_entry_mu(rtwdev); if (bfinfo->bfer_su_cnt == 0 && bfinfo->bfer_mu_cnt == 0) rtw_bf_del_sounding(rtwdev); } EXPORT_SYMBOL(rtw_bf_remove_bfee_mu); void rtw_bf_set_gid_table(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf) { struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; struct rtw_bfee *bfee = &rtwvif->bfee; struct cfg_mumimo_para param; if (bfee->role != RTW_BFEE_MU) { rtw_dbg(rtwdev, RTW_DBG_BF, "this vif is not mu bfee\n"); return; } param.grouping_bitmap = 0; param.mu_tx_en = 0; memset(param.sounding_sts, 0, 6); memcpy(param.given_gid_tab, conf->mu_group.membership, 8); memcpy(param.given_user_pos, conf->mu_group.position, 16); rtw_dbg(rtwdev, RTW_DBG_BF, "STA0: gid_valid=0x%x, user_position_l=0x%x, user_position_h=0x%x\n", param.given_gid_tab[0], param.given_user_pos[0], param.given_user_pos[1]); rtw_dbg(rtwdev, RTW_DBG_BF, "STA1: gid_valid=0x%x, user_position_l=0x%x, user_position_h=0x%x\n", param.given_gid_tab[1], param.given_user_pos[2], param.given_user_pos[3]); rtw_bf_cfg_mu_bfee(rtwdev, ¶m); } EXPORT_SYMBOL(rtw_bf_set_gid_table); void rtw_bf_phy_init(struct rtw_dev *rtwdev) { u8 tmp8; u32 tmp32; u8 retry_limit = 0xA; u8 ndpa_rate = 0x10; u8 ack_policy = 3; tmp32 = rtw_read32(rtwdev, REG_MU_TX_CTL); /* Enable P1 aggr new packet according to P0 transfer time */ tmp32 |= BIT_MU_P1_WAIT_STATE_EN; /* MU Retry Limit */ tmp32 &= ~BIT_MASK_R_MU_RL; tmp32 |= (retry_limit << BIT_SHIFT_R_MU_RL) & BIT_MASK_R_MU_RL; /* Disable Tx MU-MIMO until sounding done */ tmp32 &= ~BIT_EN_MU_MIMO; /* Clear validity of MU STAs */ tmp32 &= ~BIT_MASK_R_MU_TABLE_VALID; rtw_write32(rtwdev, REG_MU_TX_CTL, tmp32); /* MU-MIMO Option as default value */ tmp8 = ack_policy << BIT_SHIFT_WMAC_TXMU_ACKPOLICY; tmp8 |= BIT_WMAC_TXMU_ACKPOLICY_EN; rtw_write8(rtwdev, REG_WMAC_MU_BF_OPTION, tmp8); /* MU-MIMO Control as default value */ rtw_write16(rtwdev, REG_WMAC_MU_BF_CTL, 0); /* Set MU NDPA rate & BW source */ rtw_write32_set(rtwdev, REG_TXBF_CTRL, BIT_USE_NDPA_PARAMETER); /* Set NDPA Rate */ rtw_write8(rtwdev, REG_NDPA_OPT_CTRL, ndpa_rate); rtw_write32_mask(rtwdev, REG_BBPSF_CTRL, BIT_MASK_CSI_RATE, DESC_RATE6M); } EXPORT_SYMBOL(rtw_bf_phy_init); void rtw_bf_cfg_csi_rate(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate, u8 fixrate_en, u8 *new_rate) { u32 csi_cfg; u16 cur_rrsr; csi_cfg = rtw_read32(rtwdev, REG_BBPSF_CTRL) & ~BIT_MASK_CSI_RATE; cur_rrsr = rtw_read16(rtwdev, REG_RRSR); if (rssi >= 40) { if (cur_rate != DESC_RATE54M) { cur_rrsr |= BIT(DESC_RATE54M); csi_cfg |= (DESC_RATE54M & BIT_MASK_CSI_RATE_VAL) << BIT_SHIFT_CSI_RATE; rtw_write16(rtwdev, REG_RRSR, cur_rrsr); rtw_write32(rtwdev, REG_BBPSF_CTRL, csi_cfg); } *new_rate = DESC_RATE54M; } else { if (cur_rate != DESC_RATE24M) { cur_rrsr &= ~BIT(DESC_RATE54M); csi_cfg |= (DESC_RATE54M & BIT_MASK_CSI_RATE_VAL) << BIT_SHIFT_CSI_RATE; rtw_write16(rtwdev, REG_RRSR, cur_rrsr); rtw_write32(rtwdev, REG_BBPSF_CTRL, csi_cfg); } *new_rate = DESC_RATE24M; } } EXPORT_SYMBOL(rtw_bf_cfg_csi_rate); diff --git a/sys/contrib/dev/rtw88/main.c b/sys/contrib/dev/rtw88/main.c index 317e3ec310c8..2f397ace02b3 100644 --- a/sys/contrib/dev/rtw88/main.c +++ b/sys/contrib/dev/rtw88/main.c @@ -1,2122 +1,2122 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2018-2019 Realtek Corporation */ #if defined(__FreeBSD__) #define LINUXKPI_PARAM_PREFIX rtw88_ #endif #include #include "main.h" #include "regd.h" #include "fw.h" #include "ps.h" #include "sec.h" #include "mac.h" #include "coex.h" #include "phy.h" #include "reg.h" #include "efuse.h" #include "tx.h" #include "debug.h" #include "bf.h" #include "sar.h" bool rtw_disable_lps_deep_mode; EXPORT_SYMBOL(rtw_disable_lps_deep_mode); bool rtw_bf_support = true; unsigned int rtw_debug_mask; EXPORT_SYMBOL(rtw_debug_mask); /* EDCCA is enabled during normal behavior. For debugging purpose in * a noisy environment, it can be disabled via edcca debugfs. Because * all rtw88 devices will probably be affected if environment is noisy, * rtw_edcca_enabled is just declared by driver instead of by device. * So, turning it off will take effect for all rtw88 devices before * there is a tough reason to maintain rtw_edcca_enabled by device. */ bool rtw_edcca_enabled = true; module_param_named(disable_lps_deep, rtw_disable_lps_deep_mode, bool, 0644); module_param_named(support_bf, rtw_bf_support, bool, 0644); module_param_named(debug_mask, rtw_debug_mask, uint, 0644); MODULE_PARM_DESC(disable_lps_deep, "Set Y to disable Deep PS"); MODULE_PARM_DESC(support_bf, "Set Y to enable beamformee support"); MODULE_PARM_DESC(debug_mask, "Debugging mask"); static struct ieee80211_channel rtw_channeltable_2g[] = { {.center_freq = 2412, .hw_value = 1,}, {.center_freq = 2417, .hw_value = 2,}, {.center_freq = 2422, .hw_value = 3,}, {.center_freq = 2427, .hw_value = 4,}, {.center_freq = 2432, .hw_value = 5,}, {.center_freq = 2437, .hw_value = 6,}, {.center_freq = 2442, .hw_value = 7,}, {.center_freq = 2447, .hw_value = 8,}, {.center_freq = 2452, .hw_value = 9,}, {.center_freq = 2457, .hw_value = 10,}, {.center_freq = 2462, .hw_value = 11,}, {.center_freq = 2467, .hw_value = 12,}, {.center_freq = 2472, .hw_value = 13,}, {.center_freq = 2484, .hw_value = 14,}, }; static struct ieee80211_channel rtw_channeltable_5g[] = { {.center_freq = 5180, .hw_value = 36,}, {.center_freq = 5200, .hw_value = 40,}, {.center_freq = 5220, .hw_value = 44,}, {.center_freq = 5240, .hw_value = 48,}, {.center_freq = 5260, .hw_value = 52,}, {.center_freq = 5280, .hw_value = 56,}, {.center_freq = 5300, .hw_value = 60,}, {.center_freq = 5320, .hw_value = 64,}, {.center_freq = 5500, .hw_value = 100,}, {.center_freq = 5520, .hw_value = 104,}, {.center_freq = 5540, .hw_value = 108,}, {.center_freq = 5560, .hw_value = 112,}, {.center_freq = 5580, .hw_value = 116,}, {.center_freq = 5600, .hw_value = 120,}, {.center_freq = 5620, .hw_value = 124,}, {.center_freq = 5640, .hw_value = 128,}, {.center_freq = 5660, .hw_value = 132,}, {.center_freq = 5680, .hw_value = 136,}, {.center_freq = 5700, .hw_value = 140,}, {.center_freq = 5720, .hw_value = 144,}, {.center_freq = 5745, .hw_value = 149,}, {.center_freq = 5765, .hw_value = 153,}, {.center_freq = 5785, .hw_value = 157,}, {.center_freq = 5805, .hw_value = 161,}, {.center_freq = 5825, .hw_value = 165, .flags = IEEE80211_CHAN_NO_HT40MINUS}, }; static struct ieee80211_rate rtw_ratetable[] = { {.bitrate = 10, .hw_value = 0x00,}, {.bitrate = 20, .hw_value = 0x01,}, {.bitrate = 55, .hw_value = 0x02,}, {.bitrate = 110, .hw_value = 0x03,}, {.bitrate = 60, .hw_value = 0x04,}, {.bitrate = 90, .hw_value = 0x05,}, {.bitrate = 120, .hw_value = 0x06,}, {.bitrate = 180, .hw_value = 0x07,}, {.bitrate = 240, .hw_value = 0x08,}, {.bitrate = 360, .hw_value = 0x09,}, {.bitrate = 480, .hw_value = 0x0a,}, {.bitrate = 540, .hw_value = 0x0b,}, }; u16 rtw_desc_to_bitrate(u8 desc_rate) { struct ieee80211_rate rate; if (WARN(desc_rate >= ARRAY_SIZE(rtw_ratetable), "invalid desc rate\n")) return 0; rate = rtw_ratetable[desc_rate]; return rate.bitrate; } static struct ieee80211_supported_band rtw_band_2ghz = { .band = NL80211_BAND_2GHZ, .channels = rtw_channeltable_2g, .n_channels = ARRAY_SIZE(rtw_channeltable_2g), .bitrates = rtw_ratetable, .n_bitrates = ARRAY_SIZE(rtw_ratetable), .ht_cap = {0}, .vht_cap = {0}, }; static struct ieee80211_supported_band rtw_band_5ghz = { .band = NL80211_BAND_5GHZ, .channels = rtw_channeltable_5g, .n_channels = ARRAY_SIZE(rtw_channeltable_5g), /* 5G has no CCK rates */ .bitrates = rtw_ratetable + 4, .n_bitrates = ARRAY_SIZE(rtw_ratetable) - 4, .ht_cap = {0}, .vht_cap = {0}, }; struct rtw_watch_dog_iter_data { struct rtw_dev *rtwdev; struct rtw_vif *rtwvif; }; static void rtw_dynamic_csi_rate(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif) { struct rtw_bf_info *bf_info = &rtwdev->bf_info; u8 fix_rate_enable = 0; u8 new_csi_rate_idx; if (rtwvif->bfee.role != RTW_BFEE_SU && rtwvif->bfee.role != RTW_BFEE_MU) return; rtw_chip_cfg_csi_rate(rtwdev, rtwdev->dm_info.min_rssi, bf_info->cur_csi_rpt_rate, fix_rate_enable, &new_csi_rate_idx); if (new_csi_rate_idx != bf_info->cur_csi_rpt_rate) bf_info->cur_csi_rpt_rate = new_csi_rate_idx; } static void rtw_vif_watch_dog_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct rtw_watch_dog_iter_data *iter_data = data; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; if (vif->type == NL80211_IFTYPE_STATION) if (vif->bss_conf.assoc) iter_data->rtwvif = rtwvif; rtw_dynamic_csi_rate(iter_data->rtwdev, rtwvif); rtwvif->stats.tx_unicast = 0; rtwvif->stats.rx_unicast = 0; rtwvif->stats.tx_cnt = 0; rtwvif->stats.rx_cnt = 0; } /* process TX/RX statistics periodically for hardware, * the information helps hardware to enhance performance */ static void rtw_watch_dog_work(struct work_struct *work) { struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, watch_dog_work.work); struct rtw_traffic_stats *stats = &rtwdev->stats; struct rtw_watch_dog_iter_data data = {}; bool busy_traffic = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); bool ps_active; mutex_lock(&rtwdev->mutex); if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags)) goto unlock; ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work, RTW_WATCH_DOG_DELAY_TIME); if (rtwdev->stats.tx_cnt > 100 || rtwdev->stats.rx_cnt > 100) set_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); else clear_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); if (busy_traffic != test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags)) rtw_coex_wl_status_change_notify(rtwdev, 0); if (stats->tx_cnt > RTW_LPS_THRESHOLD || stats->rx_cnt > RTW_LPS_THRESHOLD) ps_active = true; else ps_active = false; ewma_tp_add(&stats->tx_ewma_tp, (u32)(stats->tx_unicast >> RTW_TP_SHIFT)); ewma_tp_add(&stats->rx_ewma_tp, (u32)(stats->rx_unicast >> RTW_TP_SHIFT)); stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp); stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp); /* reset tx/rx statictics */ stats->tx_unicast = 0; stats->rx_unicast = 0; stats->tx_cnt = 0; stats->rx_cnt = 0; if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) goto unlock; /* make sure BB/RF is working for dynamic mech */ rtw_leave_lps(rtwdev); rtw_phy_dynamic_mechanism(rtwdev); data.rtwdev = rtwdev; /* use atomic version to avoid taking local->iflist_mtx mutex */ rtw_iterate_vifs_atomic(rtwdev, rtw_vif_watch_dog_iter, &data); /* fw supports only one station associated to enter lps, if there are * more than two stations associated to the AP, then we can not enter * lps, because fw does not handle the overlapped beacon interval * * mac80211 should iterate vifs and determine if driver can enter * ps by passing IEEE80211_CONF_PS to us, all we need to do is to * get that vif and check if device is having traffic more than the * threshold. */ if (rtwdev->ps_enabled && data.rtwvif && !ps_active && !rtwdev->beacon_loss) rtw_enter_lps(rtwdev, data.rtwvif->port); rtwdev->watch_dog_cnt++; unlock: mutex_unlock(&rtwdev->mutex); } static void rtw_c2h_work(struct work_struct *work) { struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, c2h_work); struct sk_buff *skb, *tmp; skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { skb_unlink(skb, &rtwdev->c2h_queue); rtw_fw_c2h_cmd_handle(rtwdev, skb); dev_kfree_skb_any(skb); } } static u8 rtw_acquire_macid(struct rtw_dev *rtwdev) { unsigned long mac_id; mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM); if (mac_id < RTW_MAX_MAC_ID_NUM) set_bit(mac_id, rtwdev->mac_id_map); return mac_id; } int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, struct ieee80211_vif *vif) { struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; int i; si->mac_id = rtw_acquire_macid(rtwdev); if (si->mac_id >= RTW_MAX_MAC_ID_NUM) return -ENOSPC; si->sta = sta; si->vif = vif; si->init_ra_lv = 1; ewma_rssi_init(&si->avg_rssi); for (i = 0; i < ARRAY_SIZE(sta->txq); i++) rtw_txq_init(rtwdev, sta->txq[i]); rtw_update_sta_info(rtwdev, si); rtw_fw_media_status_report(rtwdev, si->mac_id, true); rtwdev->sta_cnt++; rtwdev->beacon_loss = false; #if defined(__linux__) rtw_info(rtwdev, "sta %pM joined with macid %d\n", sta->addr, si->mac_id); #elif defined(__FreeBSD__) rtw_info(rtwdev, "sta %6D joined with macid %d\n", sta->addr, ":", si->mac_id); #endif return 0; } void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, bool fw_exist) { struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; int i; rtw_release_macid(rtwdev, si->mac_id); if (fw_exist) rtw_fw_media_status_report(rtwdev, si->mac_id, false); for (i = 0; i < ARRAY_SIZE(sta->txq); i++) rtw_txq_cleanup(rtwdev, sta->txq[i]); kfree(si->mask); rtwdev->sta_cnt--; #if defined(__linux__) rtw_info(rtwdev, "sta %pM with macid %d left\n", sta->addr, si->mac_id); #elif defined(__FreeBSD__) rtw_info(rtwdev, "sta %6D with macid %d left\n", sta->addr, ":", si->mac_id); #endif } struct rtw_fwcd_hdr { u32 item; u32 size; u32 padding1; u32 padding2; } __packed; static int rtw_fwcd_prep(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc; const struct rtw_fwcd_segs *segs = chip->fwcd_segs; u32 prep_size = chip->fw_rxff_size + sizeof(struct rtw_fwcd_hdr); u8 i; if (segs) { prep_size += segs->num * sizeof(struct rtw_fwcd_hdr); for (i = 0; i < segs->num; i++) prep_size += segs->segs[i]; } desc->data = vmalloc(prep_size); if (!desc->data) return -ENOMEM; desc->size = prep_size; desc->next = desc->data; return 0; } static u8 *rtw_fwcd_next(struct rtw_dev *rtwdev, u32 item, u32 size) { struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc; struct rtw_fwcd_hdr *hdr; u8 *next; if (!desc->data) { rtw_dbg(rtwdev, RTW_DBG_FW, "fwcd isn't prepared successfully\n"); return NULL; } next = desc->next + sizeof(struct rtw_fwcd_hdr); if (next - desc->data + size > desc->size) { rtw_dbg(rtwdev, RTW_DBG_FW, "fwcd isn't prepared enough\n"); return NULL; } hdr = (struct rtw_fwcd_hdr *)(desc->next); hdr->item = item; hdr->size = size; hdr->padding1 = 0x01234567; hdr->padding2 = 0x89abcdef; desc->next = next + size; return next; } static void rtw_fwcd_dump(struct rtw_dev *rtwdev) { struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc; rtw_dbg(rtwdev, RTW_DBG_FW, "dump fwcd\n"); /* Data will be freed after lifetime of device coredump. After calling * dev_coredump, data is supposed to be handled by the device coredump * framework. Note that a new dump will be discarded if a previous one * hasn't been released yet. */ dev_coredumpv(rtwdev->dev, desc->data, desc->size, GFP_KERNEL); } static void rtw_fwcd_free(struct rtw_dev *rtwdev, bool free_self) { struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc; if (free_self) { rtw_dbg(rtwdev, RTW_DBG_FW, "free fwcd by self\n"); vfree(desc->data); } desc->data = NULL; desc->next = NULL; } static int rtw_fw_dump_crash_log(struct rtw_dev *rtwdev) { u32 size = rtwdev->chip->fw_rxff_size; u32 *buf; u8 seq; buf = (u32 *)rtw_fwcd_next(rtwdev, RTW_FWCD_TLV, size); if (!buf) return -ENOMEM; if (rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0, size, buf)) { rtw_dbg(rtwdev, RTW_DBG_FW, "dump fw fifo fail\n"); return -EINVAL; } if (GET_FW_DUMP_LEN(buf) == 0) { rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's length is 0\n"); return -EINVAL; } seq = GET_FW_DUMP_SEQ(buf); if (seq > 0) { rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's seq is wrong: %d\n", seq); return -EINVAL; } return 0; } int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size, u32 fwcd_item) { u32 rxff = rtwdev->chip->fw_rxff_size; u32 dump_size, done_size = 0; u8 *buf; int ret; buf = rtw_fwcd_next(rtwdev, fwcd_item, size); if (!buf) return -ENOMEM; while (size) { dump_size = size > rxff ? rxff : size; ret = rtw_ddma_to_fw_fifo(rtwdev, ocp_src + done_size, dump_size); if (ret) { rtw_err(rtwdev, "ddma fw 0x%x [+0x%x] to fw fifo fail\n", ocp_src, done_size); return ret; } ret = rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0, dump_size, (u32 *)(buf + done_size)); if (ret) { rtw_err(rtwdev, "dump fw 0x%x [+0x%x] from fw fifo fail\n", ocp_src, done_size); return ret; } size -= dump_size; done_size += dump_size; } return 0; } EXPORT_SYMBOL(rtw_dump_fw); int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size) { u8 *buf; u32 i; if (addr & 0x3) { WARN(1, "should be 4-byte aligned, addr = 0x%08x\n", addr); return -EINVAL; } buf = rtw_fwcd_next(rtwdev, RTW_FWCD_REG, size); if (!buf) return -ENOMEM; for (i = 0; i < size; i += 4) *(u32 *)(buf + i) = rtw_read32(rtwdev, addr + i); return 0; } EXPORT_SYMBOL(rtw_dump_reg); void rtw_vif_assoc_changed(struct rtw_vif *rtwvif, struct ieee80211_bss_conf *conf) { if (conf && conf->assoc) { rtwvif->aid = conf->aid; rtwvif->net_type = RTW_NET_MGD_LINKED; } else { rtwvif->aid = 0; rtwvif->net_type = RTW_NET_NO_LINK; } } static void rtw_reset_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data) { struct rtw_dev *rtwdev = (struct rtw_dev *)data; struct rtw_sec_desc *sec = &rtwdev->sec; rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx); } static void rtw_reset_sta_iter(void *data, struct ieee80211_sta *sta) { struct rtw_dev *rtwdev = (struct rtw_dev *)data; if (rtwdev->sta_cnt == 0) { rtw_warn(rtwdev, "sta count before reset should not be 0\n"); return; } rtw_sta_remove(rtwdev, sta, false); } static void rtw_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct rtw_dev *rtwdev = (struct rtw_dev *)data; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; rtw_bf_disassoc(rtwdev, vif, NULL); rtw_vif_assoc_changed(rtwvif, NULL); rtw_txq_cleanup(rtwdev, vif->txq); } void rtw_fw_recovery(struct rtw_dev *rtwdev) { if (!test_bit(RTW_FLAG_RESTARTING, rtwdev->flags)) ieee80211_queue_work(rtwdev->hw, &rtwdev->fw_recovery_work); } static void __fw_recovery_work(struct rtw_dev *rtwdev) { int ret = 0; set_bit(RTW_FLAG_RESTARTING, rtwdev->flags); clear_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags); ret = rtw_fwcd_prep(rtwdev); if (ret) goto free; ret = rtw_fw_dump_crash_log(rtwdev); if (ret) goto free; ret = rtw_chip_dump_fw_crash(rtwdev); if (ret) goto free; rtw_fwcd_dump(rtwdev); free: rtw_fwcd_free(rtwdev, !!ret); rtw_write8(rtwdev, REG_MCU_TST_CFG, 0); WARN(1, "firmware crash, start reset and recover\n"); rcu_read_lock(); rtw_iterate_keys_rcu(rtwdev, NULL, rtw_reset_key_iter, rtwdev); rcu_read_unlock(); rtw_iterate_stas_atomic(rtwdev, rtw_reset_sta_iter, rtwdev); rtw_iterate_vifs_atomic(rtwdev, rtw_reset_vif_iter, rtwdev); rtw_enter_ips(rtwdev); } static void rtw_fw_recovery_work(struct work_struct *work) { struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, fw_recovery_work); mutex_lock(&rtwdev->mutex); __fw_recovery_work(rtwdev); mutex_unlock(&rtwdev->mutex); ieee80211_restart_hw(rtwdev->hw); } struct rtw_txq_ba_iter_data { }; static void rtw_txq_ba_iter(void *data, struct ieee80211_sta *sta) { struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; int ret; u8 tid; tid = find_first_bit(si->tid_ba, IEEE80211_NUM_TIDS); while (tid != IEEE80211_NUM_TIDS) { clear_bit(tid, si->tid_ba); ret = ieee80211_start_tx_ba_session(sta, tid, 0); if (ret == -EINVAL) { struct ieee80211_txq *txq; struct rtw_txq *rtwtxq; txq = sta->txq[tid]; rtwtxq = (struct rtw_txq *)txq->drv_priv; set_bit(RTW_TXQ_BLOCK_BA, &rtwtxq->flags); } tid = find_first_bit(si->tid_ba, IEEE80211_NUM_TIDS); } } static void rtw_txq_ba_work(struct work_struct *work) { struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ba_work); struct rtw_txq_ba_iter_data data; rtw_iterate_stas_atomic(rtwdev, rtw_txq_ba_iter, &data); } void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel) { if (IS_CH_2G_BAND(channel)) pkt_stat->band = NL80211_BAND_2GHZ; else if (IS_CH_5G_BAND(channel)) pkt_stat->band = NL80211_BAND_5GHZ; else return; pkt_stat->freq = ieee80211_channel_to_frequency(channel, pkt_stat->band); } EXPORT_SYMBOL(rtw_set_rx_freq_band); void rtw_get_channel_params(struct cfg80211_chan_def *chandef, struct rtw_channel_params *chan_params) { struct ieee80211_channel *channel = chandef->chan; enum nl80211_chan_width width = chandef->width; u8 *cch_by_bw = chan_params->cch_by_bw; u32 primary_freq, center_freq; u8 center_chan; u8 bandwidth = RTW_CHANNEL_WIDTH_20; u8 primary_chan_idx = 0; u8 i; center_chan = channel->hw_value; primary_freq = channel->center_freq; center_freq = chandef->center_freq1; /* assign the center channel used while 20M bw is selected */ cch_by_bw[RTW_CHANNEL_WIDTH_20] = channel->hw_value; switch (width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: bandwidth = RTW_CHANNEL_WIDTH_20; primary_chan_idx = RTW_SC_DONT_CARE; break; case NL80211_CHAN_WIDTH_40: bandwidth = RTW_CHANNEL_WIDTH_40; if (primary_freq > center_freq) { primary_chan_idx = RTW_SC_20_UPPER; center_chan -= 2; } else { primary_chan_idx = RTW_SC_20_LOWER; center_chan += 2; } break; case NL80211_CHAN_WIDTH_80: bandwidth = RTW_CHANNEL_WIDTH_80; if (primary_freq > center_freq) { if (primary_freq - center_freq == 10) { primary_chan_idx = RTW_SC_20_UPPER; center_chan -= 2; } else { primary_chan_idx = RTW_SC_20_UPMOST; center_chan -= 6; } /* assign the center channel used * while 40M bw is selected */ cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan + 4; } else { if (center_freq - primary_freq == 10) { primary_chan_idx = RTW_SC_20_LOWER; center_chan += 2; } else { primary_chan_idx = RTW_SC_20_LOWEST; center_chan += 6; } /* assign the center channel used * while 40M bw is selected */ cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan - 4; } break; default: center_chan = 0; break; } chan_params->center_chan = center_chan; chan_params->bandwidth = bandwidth; chan_params->primary_chan_idx = primary_chan_idx; /* assign the center channel used while current bw is selected */ cch_by_bw[bandwidth] = center_chan; for (i = bandwidth + 1; i <= RTW_MAX_CHANNEL_WIDTH; i++) cch_by_bw[i] = 0; } void rtw_set_channel(struct rtw_dev *rtwdev) { struct ieee80211_hw *hw = rtwdev->hw; struct rtw_hal *hal = &rtwdev->hal; struct rtw_chip_info *chip = rtwdev->chip; struct rtw_channel_params ch_param; u8 center_chan, bandwidth, primary_chan_idx; u8 i; rtw_get_channel_params(&hw->conf.chandef, &ch_param); if (WARN(ch_param.center_chan == 0, "Invalid channel\n")) return; center_chan = ch_param.center_chan; bandwidth = ch_param.bandwidth; primary_chan_idx = ch_param.primary_chan_idx; hal->current_band_width = bandwidth; hal->current_channel = center_chan; hal->current_primary_channel_index = primary_chan_idx; hal->current_band_type = center_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G; switch (center_chan) { case 1 ... 14: hal->sar_band = RTW_SAR_BAND_0; break; case 36 ... 64: hal->sar_band = RTW_SAR_BAND_1; break; case 100 ... 144: hal->sar_band = RTW_SAR_BAND_3; break; case 149 ... 177: hal->sar_band = RTW_SAR_BAND_4; break; default: WARN(1, "unknown ch(%u) to SAR band\n", center_chan); hal->sar_band = RTW_SAR_BAND_0; break; } for (i = RTW_CHANNEL_WIDTH_20; i <= RTW_MAX_CHANNEL_WIDTH; i++) hal->cch_by_bw[i] = ch_param.cch_by_bw[i]; chip->ops->set_channel(rtwdev, center_chan, bandwidth, primary_chan_idx); if (hal->current_band_type == RTW_BAND_5G) { rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G); } else { if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G); else rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G_NOFORSCAN); } rtw_phy_set_tx_power_level(rtwdev, center_chan); /* if the channel isn't set for scanning, we will do RF calibration * in ieee80211_ops::mgd_prepare_tx(). Performing the calibration * during scanning on each channel takes too long. */ if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) rtwdev->need_rfk = true; } void rtw_chip_prepare_tx(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; if (rtwdev->need_rfk) { rtwdev->need_rfk = false; chip->ops->phy_calibration(rtwdev); } } static void rtw_vif_write_addr(struct rtw_dev *rtwdev, u32 start, u8 *addr) { int i; for (i = 0; i < ETH_ALEN; i++) rtw_write8(rtwdev, start + i, addr[i]); } void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, u32 config) { u32 addr, mask; if (config & PORT_SET_MAC_ADDR) { addr = rtwvif->conf->mac_addr.addr; rtw_vif_write_addr(rtwdev, addr, rtwvif->mac_addr); } if (config & PORT_SET_BSSID) { addr = rtwvif->conf->bssid.addr; rtw_vif_write_addr(rtwdev, addr, rtwvif->bssid); } if (config & PORT_SET_NET_TYPE) { addr = rtwvif->conf->net_type.addr; mask = rtwvif->conf->net_type.mask; rtw_write32_mask(rtwdev, addr, mask, rtwvif->net_type); } if (config & PORT_SET_AID) { addr = rtwvif->conf->aid.addr; mask = rtwvif->conf->aid.mask; rtw_write32_mask(rtwdev, addr, mask, rtwvif->aid); } if (config & PORT_SET_BCN_CTRL) { addr = rtwvif->conf->bcn_ctrl.addr; mask = rtwvif->conf->bcn_ctrl.mask; rtw_write8_mask(rtwdev, addr, mask, rtwvif->bcn_ctrl); } } static u8 hw_bw_cap_to_bitamp(u8 bw_cap) { u8 bw = 0; switch (bw_cap) { case EFUSE_HW_CAP_IGNORE: case EFUSE_HW_CAP_SUPP_BW80: bw |= BIT(RTW_CHANNEL_WIDTH_80); fallthrough; case EFUSE_HW_CAP_SUPP_BW40: bw |= BIT(RTW_CHANNEL_WIDTH_40); fallthrough; default: bw |= BIT(RTW_CHANNEL_WIDTH_20); break; } return bw; } static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num) { struct rtw_hal *hal = &rtwdev->hal; struct rtw_chip_info *chip = rtwdev->chip; if (hw_ant_num == EFUSE_HW_CAP_IGNORE || hw_ant_num >= hal->rf_path_num) return; switch (hw_ant_num) { case 1: hal->rf_type = RF_1T1R; hal->rf_path_num = 1; if (!chip->fix_rf_phy_num) hal->rf_phy_num = hal->rf_path_num; hal->antenna_tx = BB_PATH_A; hal->antenna_rx = BB_PATH_A; break; default: WARN(1, "invalid hw configuration from efuse\n"); break; } } static u64 get_vht_ra_mask(struct ieee80211_sta *sta) { u64 ra_mask = 0; - u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map); + u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map); u8 vht_mcs_cap; int i, nss; /* 4SS, every two bits for MCS7/8/9 */ for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 10) { vht_mcs_cap = mcs_map & 0x3; switch (vht_mcs_cap) { case 2: /* MCS9 */ ra_mask |= 0x3ffULL << nss; break; case 1: /* MCS8 */ ra_mask |= 0x1ffULL << nss; break; case 0: /* MCS7 */ ra_mask |= 0x0ffULL << nss; break; default: break; } } return ra_mask; } static u8 get_rate_id(u8 wireless_set, enum rtw_bandwidth bw_mode, u8 tx_num) { u8 rate_id = 0; switch (wireless_set) { case WIRELESS_CCK: rate_id = RTW_RATEID_B_20M; break; case WIRELESS_OFDM: rate_id = RTW_RATEID_G; break; case WIRELESS_CCK | WIRELESS_OFDM: rate_id = RTW_RATEID_BG; break; case WIRELESS_OFDM | WIRELESS_HT: if (tx_num == 1) rate_id = RTW_RATEID_GN_N1SS; else if (tx_num == 2) rate_id = RTW_RATEID_GN_N2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR5_N_3SS; break; case WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_HT: if (bw_mode == RTW_CHANNEL_WIDTH_40) { if (tx_num == 1) rate_id = RTW_RATEID_BGN_40M_1SS; else if (tx_num == 2) rate_id = RTW_RATEID_BGN_40M_2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR5_N_3SS; else if (tx_num == 4) rate_id = RTW_RATEID_ARFR7_N_4SS; } else { if (tx_num == 1) rate_id = RTW_RATEID_BGN_20M_1SS; else if (tx_num == 2) rate_id = RTW_RATEID_BGN_20M_2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR5_N_3SS; else if (tx_num == 4) rate_id = RTW_RATEID_ARFR7_N_4SS; } break; case WIRELESS_OFDM | WIRELESS_VHT: if (tx_num == 1) rate_id = RTW_RATEID_ARFR1_AC_1SS; else if (tx_num == 2) rate_id = RTW_RATEID_ARFR0_AC_2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR4_AC_3SS; else if (tx_num == 4) rate_id = RTW_RATEID_ARFR6_AC_4SS; break; case WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_VHT: if (bw_mode >= RTW_CHANNEL_WIDTH_80) { if (tx_num == 1) rate_id = RTW_RATEID_ARFR1_AC_1SS; else if (tx_num == 2) rate_id = RTW_RATEID_ARFR0_AC_2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR4_AC_3SS; else if (tx_num == 4) rate_id = RTW_RATEID_ARFR6_AC_4SS; } else { if (tx_num == 1) rate_id = RTW_RATEID_ARFR2_AC_2G_1SS; else if (tx_num == 2) rate_id = RTW_RATEID_ARFR3_AC_2G_2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR4_AC_3SS; else if (tx_num == 4) rate_id = RTW_RATEID_ARFR6_AC_4SS; } break; default: break; } return rate_id; } #define RA_MASK_CCK_RATES 0x0000f #define RA_MASK_OFDM_RATES 0x00ff0 #define RA_MASK_HT_RATES_1SS (0xff000ULL << 0) #define RA_MASK_HT_RATES_2SS (0xff000ULL << 8) #define RA_MASK_HT_RATES_3SS (0xff000ULL << 16) #define RA_MASK_HT_RATES (RA_MASK_HT_RATES_1SS | \ RA_MASK_HT_RATES_2SS | \ RA_MASK_HT_RATES_3SS) #define RA_MASK_VHT_RATES_1SS (0x3ff000ULL << 0) #define RA_MASK_VHT_RATES_2SS (0x3ff000ULL << 10) #define RA_MASK_VHT_RATES_3SS (0x3ff000ULL << 20) #define RA_MASK_VHT_RATES (RA_MASK_VHT_RATES_1SS | \ RA_MASK_VHT_RATES_2SS | \ RA_MASK_VHT_RATES_3SS) #define RA_MASK_CCK_IN_HT 0x00005 #define RA_MASK_CCK_IN_VHT 0x00005 #define RA_MASK_OFDM_IN_VHT 0x00010 #define RA_MASK_OFDM_IN_HT_2G 0x00010 #define RA_MASK_OFDM_IN_HT_5G 0x00030 static u64 rtw_update_rate_mask(struct rtw_dev *rtwdev, struct rtw_sta_info *si, u64 ra_mask, bool is_vht_enable, u8 wireless_set) { struct rtw_hal *hal = &rtwdev->hal; const struct cfg80211_bitrate_mask *mask = si->mask; u64 cfg_mask = GENMASK_ULL(63, 0); u8 rssi_level, band; if (wireless_set != WIRELESS_CCK) { rssi_level = si->rssi_level; if (rssi_level == 0) ra_mask &= 0xffffffffffffffffULL; else if (rssi_level == 1) ra_mask &= 0xfffffffffffffff0ULL; else if (rssi_level == 2) ra_mask &= 0xffffffffffffefe0ULL; else if (rssi_level == 3) ra_mask &= 0xffffffffffffcfc0ULL; else if (rssi_level == 4) ra_mask &= 0xffffffffffff8f80ULL; else if (rssi_level >= 5) ra_mask &= 0xffffffffffff0f00ULL; } if (!si->use_cfg_mask) return ra_mask; band = hal->current_band_type; if (band == RTW_BAND_2G) { band = NL80211_BAND_2GHZ; cfg_mask = mask->control[band].legacy; } else if (band == RTW_BAND_5G) { band = NL80211_BAND_5GHZ; cfg_mask = u64_encode_bits(mask->control[band].legacy, RA_MASK_OFDM_RATES); } if (!is_vht_enable) { if (ra_mask & RA_MASK_HT_RATES_1SS) cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0], RA_MASK_HT_RATES_1SS); if (ra_mask & RA_MASK_HT_RATES_2SS) cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1], RA_MASK_HT_RATES_2SS); } else { if (ra_mask & RA_MASK_VHT_RATES_1SS) cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0], RA_MASK_VHT_RATES_1SS); if (ra_mask & RA_MASK_VHT_RATES_2SS) cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1], RA_MASK_VHT_RATES_2SS); } ra_mask &= cfg_mask; return ra_mask; } void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; struct ieee80211_sta *sta = si->sta; struct rtw_efuse *efuse = &rtwdev->efuse; struct rtw_hal *hal = &rtwdev->hal; u8 wireless_set; u8 bw_mode; u8 rate_id; u8 rf_type = RF_1T1R; u8 stbc_en = 0; u8 ldpc_en = 0; u8 tx_num = 1; u64 ra_mask = 0; bool is_vht_enable = false; bool is_support_sgi = false; - if (sta->vht_cap.vht_supported) { + if (sta->deflink.vht_cap.vht_supported) { is_vht_enable = true; ra_mask |= get_vht_ra_mask(sta); - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) + if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) stbc_en = VHT_STBC_EN; - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) + if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) ldpc_en = VHT_LDPC_EN; - } else if (sta->ht_cap.ht_supported) { - ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) | - (sta->ht_cap.mcs.rx_mask[0] << 12); - if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) + } else if (sta->deflink.ht_cap.ht_supported) { + ra_mask |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20) | + (sta->deflink.ht_cap.mcs.rx_mask[0] << 12); + if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) stbc_en = HT_STBC_EN; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) + if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) ldpc_en = HT_LDPC_EN; } if (efuse->hw_cap.nss == 1) ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS; if (hal->current_band_type == RTW_BAND_5G) { - ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4; - if (sta->vht_cap.vht_supported) { + ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4; + if (sta->deflink.vht_cap.vht_supported) { ra_mask &= RA_MASK_VHT_RATES | RA_MASK_OFDM_IN_VHT; wireless_set = WIRELESS_OFDM | WIRELESS_VHT; - } else if (sta->ht_cap.ht_supported) { + } else if (sta->deflink.ht_cap.ht_supported) { ra_mask &= RA_MASK_HT_RATES | RA_MASK_OFDM_IN_HT_5G; wireless_set = WIRELESS_OFDM | WIRELESS_HT; } else { wireless_set = WIRELESS_OFDM; } dm_info->rrsr_val_init = RRSR_INIT_5G; } else if (hal->current_band_type == RTW_BAND_2G) { - ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ]; - if (sta->vht_cap.vht_supported) { + ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ]; + if (sta->deflink.vht_cap.vht_supported) { ra_mask &= RA_MASK_VHT_RATES | RA_MASK_CCK_IN_VHT | RA_MASK_OFDM_IN_VHT; wireless_set = WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_HT | WIRELESS_VHT; - } else if (sta->ht_cap.ht_supported) { + } else if (sta->deflink.ht_cap.ht_supported) { ra_mask &= RA_MASK_HT_RATES | RA_MASK_CCK_IN_HT | RA_MASK_OFDM_IN_HT_2G; wireless_set = WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_HT; - } else if (sta->supp_rates[0] <= 0xf) { + } else if (sta->deflink.supp_rates[0] <= 0xf) { wireless_set = WIRELESS_CCK; } else { wireless_set = WIRELESS_CCK | WIRELESS_OFDM; } dm_info->rrsr_val_init = RRSR_INIT_2G; } else { rtw_err(rtwdev, "Unknown band type\n"); wireless_set = 0; } - switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_80: bw_mode = RTW_CHANNEL_WIDTH_80; - is_support_sgi = sta->vht_cap.vht_supported && - (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); + is_support_sgi = sta->deflink.vht_cap.vht_supported && + (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); break; case IEEE80211_STA_RX_BW_40: bw_mode = RTW_CHANNEL_WIDTH_40; - is_support_sgi = sta->ht_cap.ht_supported && - (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40); + is_support_sgi = sta->deflink.ht_cap.ht_supported && + (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40); break; default: bw_mode = RTW_CHANNEL_WIDTH_20; - is_support_sgi = sta->ht_cap.ht_supported && - (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); + is_support_sgi = sta->deflink.ht_cap.ht_supported && + (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20); break; } - if (sta->vht_cap.vht_supported && ra_mask & 0xffc00000) { + if (sta->deflink.vht_cap.vht_supported && ra_mask & 0xffc00000) { tx_num = 2; rf_type = RF_2T2R; - } else if (sta->ht_cap.ht_supported && ra_mask & 0xfff00000) { + } else if (sta->deflink.ht_cap.ht_supported && ra_mask & 0xfff00000) { tx_num = 2; rf_type = RF_2T2R; } rate_id = get_rate_id(wireless_set, bw_mode, tx_num); ra_mask = rtw_update_rate_mask(rtwdev, si, ra_mask, is_vht_enable, wireless_set); si->bw_mode = bw_mode; si->stbc_en = stbc_en; si->ldpc_en = ldpc_en; si->rf_type = rf_type; si->wireless_set = wireless_set; si->sgi_enable = is_support_sgi; si->vht_enable = is_vht_enable; si->ra_mask = ra_mask; si->rate_id = rate_id; rtw_fw_send_ra_info(rtwdev, si); } static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; struct rtw_fw_state *fw; fw = &rtwdev->fw; wait_for_completion(&fw->completion); if (!fw->firmware) return -EINVAL; if (chip->wow_fw_name) { fw = &rtwdev->wow_fw; wait_for_completion(&fw->completion); if (!fw->firmware) return -EINVAL; } return 0; } static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) { struct rtw_chip_info *chip = rtwdev->chip; if (rtw_disable_lps_deep_mode || !chip->lps_deep_mode_supported || !fw->feature) return LPS_DEEP_MODE_NONE; if ((chip->lps_deep_mode_supported & BIT(LPS_DEEP_MODE_PG)) && rtw_fw_feature_check(fw, FW_FEATURE_PG)) return LPS_DEEP_MODE_PG; if ((chip->lps_deep_mode_supported & BIT(LPS_DEEP_MODE_LCLK)) && rtw_fw_feature_check(fw, FW_FEATURE_LCLK)) return LPS_DEEP_MODE_LCLK; return LPS_DEEP_MODE_NONE; } static int rtw_power_on(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; struct rtw_fw_state *fw = &rtwdev->fw; bool wifi_only; int ret; ret = rtw_hci_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup hci\n"); goto err; } /* power on MAC before firmware downloaded */ ret = rtw_mac_power_on(rtwdev); if (ret) { rtw_err(rtwdev, "failed to power on mac\n"); goto err; } ret = rtw_wait_firmware_completion(rtwdev); if (ret) { rtw_err(rtwdev, "failed to wait firmware completion\n"); goto err_off; } ret = rtw_download_firmware(rtwdev, fw); if (ret) { rtw_err(rtwdev, "failed to download firmware\n"); goto err_off; } /* config mac after firmware downloaded */ ret = rtw_mac_init(rtwdev); if (ret) { rtw_err(rtwdev, "failed to configure mac\n"); goto err_off; } chip->ops->phy_set_param(rtwdev); ret = rtw_hci_start(rtwdev); if (ret) { rtw_err(rtwdev, "failed to start hci\n"); goto err_off; } /* send H2C after HCI has started */ rtw_fw_send_general_info(rtwdev); rtw_fw_send_phydm_info(rtwdev); wifi_only = !rtwdev->efuse.btcoex; rtw_coex_power_on_setting(rtwdev); rtw_coex_init_hw_config(rtwdev, wifi_only); return 0; err_off: rtw_mac_power_off(rtwdev); err: return ret; } void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start) { if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_NOTIFY_SCAN)) return; if (start) { rtw_fw_scan_notify(rtwdev, true); } else { reinit_completion(&rtwdev->fw_scan_density); rtw_fw_scan_notify(rtwdev, false); if (!wait_for_completion_timeout(&rtwdev->fw_scan_density, SCAN_NOTIFY_TIMEOUT)) rtw_warn(rtwdev, "firmware failed to report density after scan\n"); } } void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, const u8 *mac_addr, bool hw_scan) { u32 config = 0; int ret = 0; rtw_leave_lps(rtwdev); if (hw_scan && rtwvif->net_type == RTW_NET_NO_LINK) { ret = rtw_leave_ips(rtwdev); if (ret) { rtw_err(rtwdev, "failed to leave idle state\n"); return; } } ether_addr_copy(rtwvif->mac_addr, mac_addr); config |= PORT_SET_MAC_ADDR; rtw_vif_port_config(rtwdev, rtwvif, config); rtw_coex_scan_notify(rtwdev, COEX_SCAN_START); rtw_core_fw_scan_notify(rtwdev, true); set_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags); set_bit(RTW_FLAG_SCANNING, rtwdev->flags); } void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif) { struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; u32 config = 0; clear_bit(RTW_FLAG_SCANNING, rtwdev->flags); clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags); rtw_core_fw_scan_notify(rtwdev, false); ether_addr_copy(rtwvif->mac_addr, vif->addr); config |= PORT_SET_MAC_ADDR; rtw_vif_port_config(rtwdev, rtwvif, config); rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH); } int rtw_core_start(struct rtw_dev *rtwdev) { int ret; ret = rtw_power_on(rtwdev); if (ret) return ret; rtw_sec_enable_sec_engine(rtwdev); rtwdev->lps_conf.deep_mode = rtw_update_lps_deep_mode(rtwdev, &rtwdev->fw); rtwdev->lps_conf.wow_deep_mode = rtw_update_lps_deep_mode(rtwdev, &rtwdev->wow_fw); /* rcr reset after powered on */ rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr); ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work, RTW_WATCH_DOG_DELAY_TIME); set_bit(RTW_FLAG_RUNNING, rtwdev->flags); return 0; } static void rtw_power_off(struct rtw_dev *rtwdev) { rtw_hci_stop(rtwdev); rtw_coex_power_off_setting(rtwdev); rtw_mac_power_off(rtwdev); } void rtw_core_stop(struct rtw_dev *rtwdev) { struct rtw_coex *coex = &rtwdev->coex; clear_bit(RTW_FLAG_RUNNING, rtwdev->flags); clear_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags); mutex_unlock(&rtwdev->mutex); cancel_work_sync(&rtwdev->c2h_work); cancel_delayed_work_sync(&rtwdev->watch_dog_work); cancel_delayed_work_sync(&coex->bt_relink_work); cancel_delayed_work_sync(&coex->bt_reenable_work); cancel_delayed_work_sync(&coex->defreeze_work); cancel_delayed_work_sync(&coex->wl_remain_work); cancel_delayed_work_sync(&coex->bt_remain_work); cancel_delayed_work_sync(&coex->wl_connecting_work); cancel_delayed_work_sync(&coex->bt_multi_link_remain_work); cancel_delayed_work_sync(&coex->wl_ccklock_work); mutex_lock(&rtwdev->mutex); rtw_power_off(rtwdev); } static void rtw_init_ht_cap(struct rtw_dev *rtwdev, struct ieee80211_sta_ht_cap *ht_cap) { struct rtw_efuse *efuse = &rtwdev->efuse; ht_cap->ht_supported = true; ht_cap->cap = 0; ht_cap->cap |= IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_MAX_AMSDU | (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); if (rtw_chip_has_rx_ldpc(rtwdev)) ht_cap->cap |= IEEE80211_HT_CAP_LDPC_CODING; if (rtw_chip_has_tx_stbc(rtwdev)) ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC; if (efuse->hw_cap.bw & BIT(RTW_CHANNEL_WIDTH_40)) ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_SGI_40; ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16; ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; if (efuse->hw_cap.nss > 1) { ht_cap->mcs.rx_mask[0] = 0xFF; ht_cap->mcs.rx_mask[1] = 0xFF; ht_cap->mcs.rx_mask[4] = 0x01; ht_cap->mcs.rx_highest = cpu_to_le16(300); } else { ht_cap->mcs.rx_mask[0] = 0xFF; ht_cap->mcs.rx_mask[1] = 0x00; ht_cap->mcs.rx_mask[4] = 0x01; ht_cap->mcs.rx_highest = cpu_to_le16(150); } } static void rtw_init_vht_cap(struct rtw_dev *rtwdev, struct ieee80211_sta_vht_cap *vht_cap) { struct rtw_efuse *efuse = &rtwdev->efuse; u16 mcs_map; __le16 highest; if (efuse->hw_cap.ptcl != EFUSE_HW_CAP_IGNORE && efuse->hw_cap.ptcl != EFUSE_HW_CAP_PTCL_VHT) return; vht_cap->vht_supported = true; vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_RXSTBC_1 | IEEE80211_VHT_CAP_HTC_VHT | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | 0; if (rtwdev->hal.rf_path_num > 1) vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; vht_cap->cap |= (rtwdev->hal.bfee_sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT); if (rtw_chip_has_rx_ldpc(rtwdev)) vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 14; if (efuse->hw_cap.nss > 1) { highest = cpu_to_le16(780); mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << 2; } else { highest = cpu_to_le16(390); mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << 2; } vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); vht_cap->vht_mcs.rx_highest = highest; vht_cap->vht_mcs.tx_highest = highest; } static void rtw_set_supported_band(struct ieee80211_hw *hw, struct rtw_chip_info *chip) { struct rtw_dev *rtwdev = hw->priv; struct ieee80211_supported_band *sband; if (chip->band & RTW_BAND_2G) { sband = kmemdup(&rtw_band_2ghz, sizeof(*sband), GFP_KERNEL); if (!sband) goto err_out; if (chip->ht_supported) rtw_init_ht_cap(rtwdev, &sband->ht_cap); hw->wiphy->bands[NL80211_BAND_2GHZ] = sband; } if (chip->band & RTW_BAND_5G) { sband = kmemdup(&rtw_band_5ghz, sizeof(*sband), GFP_KERNEL); if (!sband) goto err_out; if (chip->ht_supported) rtw_init_ht_cap(rtwdev, &sband->ht_cap); if (chip->vht_supported) rtw_init_vht_cap(rtwdev, &sband->vht_cap); hw->wiphy->bands[NL80211_BAND_5GHZ] = sband; } return; err_out: rtw_err(rtwdev, "failed to set supported band\n"); } static void rtw_unset_supported_band(struct ieee80211_hw *hw, struct rtw_chip_info *chip) { kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]); kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]); } static void __update_firmware_feature(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) { u32 feature; const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)fw->firmware->data; feature = le32_to_cpu(fw_hdr->feature); fw->feature = feature & FW_FEATURE_SIG ? feature : 0; } static void __update_firmware_info(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) { const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)fw->firmware->data; fw->h2c_version = le16_to_cpu(fw_hdr->h2c_fmt_ver); fw->version = le16_to_cpu(fw_hdr->version); fw->sub_version = fw_hdr->subversion; fw->sub_index = fw_hdr->subindex; __update_firmware_feature(rtwdev, fw); } static void __update_firmware_info_legacy(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) { struct rtw_fw_hdr_legacy *legacy = #if defined(__linux__) (struct rtw_fw_hdr_legacy *)fw->firmware->data; #elif defined(__FreeBSD__) __DECONST(struct rtw_fw_hdr_legacy *, fw->firmware->data); #endif fw->h2c_version = 0; fw->version = le16_to_cpu(legacy->version); fw->sub_version = legacy->subversion1; fw->sub_index = legacy->subversion2; } static void update_firmware_info(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) { if (rtw_chip_wcpu_11n(rtwdev)) __update_firmware_info_legacy(rtwdev, fw); else __update_firmware_info(rtwdev, fw); } static void rtw_load_firmware_cb(const struct firmware *firmware, void *context) { struct rtw_fw_state *fw = context; struct rtw_dev *rtwdev = fw->rtwdev; if (!firmware || !firmware->data) { rtw_err(rtwdev, "failed to request firmware\n"); complete_all(&fw->completion); return; } fw->firmware = firmware; update_firmware_info(rtwdev, fw); complete_all(&fw->completion); rtw_info(rtwdev, "Firmware version %u.%u.%u, H2C version %u\n", fw->version, fw->sub_version, fw->sub_index, fw->h2c_version); } static int rtw_load_firmware(struct rtw_dev *rtwdev, enum rtw_fw_type type) { const char *fw_name; struct rtw_fw_state *fw; int ret; switch (type) { case RTW_WOWLAN_FW: fw = &rtwdev->wow_fw; fw_name = rtwdev->chip->wow_fw_name; break; case RTW_NORMAL_FW: fw = &rtwdev->fw; fw_name = rtwdev->chip->fw_name; break; default: rtw_warn(rtwdev, "unsupported firmware type\n"); return -ENOENT; } fw->rtwdev = rtwdev; init_completion(&fw->completion); ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev, GFP_KERNEL, fw, rtw_load_firmware_cb); if (ret) { rtw_err(rtwdev, "failed to async firmware request\n"); return ret; } return 0; } static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; struct rtw_hal *hal = &rtwdev->hal; struct rtw_efuse *efuse = &rtwdev->efuse; switch (rtw_hci_type(rtwdev)) { case RTW_HCI_TYPE_PCIE: rtwdev->hci.rpwm_addr = 0x03d9; rtwdev->hci.cpwm_addr = 0x03da; break; default: rtw_err(rtwdev, "unsupported hci type\n"); return -EINVAL; } hal->chip_version = rtw_read32(rtwdev, REG_SYS_CFG1); hal->cut_version = BIT_GET_CHIP_VER(hal->chip_version); hal->mp_chip = (hal->chip_version & BIT_RTL_ID) ? 0 : 1; if (hal->chip_version & BIT_RF_TYPE_ID) { hal->rf_type = RF_2T2R; hal->rf_path_num = 2; hal->antenna_tx = BB_PATH_AB; hal->antenna_rx = BB_PATH_AB; } else { hal->rf_type = RF_1T1R; hal->rf_path_num = 1; hal->antenna_tx = BB_PATH_A; hal->antenna_rx = BB_PATH_A; } hal->rf_phy_num = chip->fix_rf_phy_num ? chip->fix_rf_phy_num : hal->rf_path_num; efuse->physical_size = chip->phy_efuse_size; efuse->logical_size = chip->log_efuse_size; efuse->protect_size = chip->ptct_efuse_size; /* default use ack */ rtwdev->hal.rcr |= BIT_VHT_DACK; hal->bfee_sts_cap = 3; return 0; } static int rtw_chip_efuse_enable(struct rtw_dev *rtwdev) { struct rtw_fw_state *fw = &rtwdev->fw; int ret; ret = rtw_hci_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup hci\n"); goto err; } ret = rtw_mac_power_on(rtwdev); if (ret) { rtw_err(rtwdev, "failed to power on mac\n"); goto err; } rtw_write8(rtwdev, REG_C2HEVT, C2H_HW_FEATURE_DUMP); wait_for_completion(&fw->completion); if (!fw->firmware) { ret = -EINVAL; rtw_err(rtwdev, "failed to load firmware\n"); goto err; } ret = rtw_download_firmware(rtwdev, fw); if (ret) { rtw_err(rtwdev, "failed to download firmware\n"); goto err_off; } return 0; err_off: rtw_mac_power_off(rtwdev); err: return ret; } static int rtw_dump_hw_feature(struct rtw_dev *rtwdev) { struct rtw_efuse *efuse = &rtwdev->efuse; u8 hw_feature[HW_FEATURE_LEN]; u8 id; u8 bw; int i; id = rtw_read8(rtwdev, REG_C2HEVT); if (id != C2H_HW_FEATURE_REPORT) { rtw_err(rtwdev, "failed to read hw feature report\n"); return -EBUSY; } for (i = 0; i < HW_FEATURE_LEN; i++) hw_feature[i] = rtw_read8(rtwdev, REG_C2HEVT + 2 + i); rtw_write8(rtwdev, REG_C2HEVT, 0); bw = GET_EFUSE_HW_CAP_BW(hw_feature); efuse->hw_cap.bw = hw_bw_cap_to_bitamp(bw); efuse->hw_cap.hci = GET_EFUSE_HW_CAP_HCI(hw_feature); efuse->hw_cap.nss = GET_EFUSE_HW_CAP_NSS(hw_feature); efuse->hw_cap.ptcl = GET_EFUSE_HW_CAP_PTCL(hw_feature); efuse->hw_cap.ant_num = GET_EFUSE_HW_CAP_ANT_NUM(hw_feature); rtw_hw_config_rf_ant_num(rtwdev, efuse->hw_cap.ant_num); if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE || efuse->hw_cap.nss > rtwdev->hal.rf_path_num) efuse->hw_cap.nss = rtwdev->hal.rf_path_num; rtw_dbg(rtwdev, RTW_DBG_EFUSE, "hw cap: hci=0x%02x, bw=0x%02x, ptcl=0x%02x, ant_num=%d, nss=%d\n", efuse->hw_cap.hci, efuse->hw_cap.bw, efuse->hw_cap.ptcl, efuse->hw_cap.ant_num, efuse->hw_cap.nss); return 0; } static void rtw_chip_efuse_disable(struct rtw_dev *rtwdev) { rtw_hci_stop(rtwdev); rtw_mac_power_off(rtwdev); } static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev) { struct rtw_efuse *efuse = &rtwdev->efuse; int ret; mutex_lock(&rtwdev->mutex); /* power on mac to read efuse */ ret = rtw_chip_efuse_enable(rtwdev); if (ret) goto out_unlock; ret = rtw_parse_efuse_map(rtwdev); if (ret) goto out_disable; ret = rtw_dump_hw_feature(rtwdev); if (ret) goto out_disable; ret = rtw_check_supported_rfe(rtwdev); if (ret) goto out_disable; if (efuse->crystal_cap == 0xff) efuse->crystal_cap = 0; if (efuse->pa_type_2g == 0xff) efuse->pa_type_2g = 0; if (efuse->pa_type_5g == 0xff) efuse->pa_type_5g = 0; if (efuse->lna_type_2g == 0xff) efuse->lna_type_2g = 0; if (efuse->lna_type_5g == 0xff) efuse->lna_type_5g = 0; if (efuse->channel_plan == 0xff) efuse->channel_plan = 0x7f; if (efuse->rf_board_option == 0xff) efuse->rf_board_option = 0; if (efuse->bt_setting & BIT(0)) efuse->share_ant = true; if (efuse->regd == 0xff) efuse->regd = 0; if (efuse->tx_bb_swing_setting_2g == 0xff) efuse->tx_bb_swing_setting_2g = 0; if (efuse->tx_bb_swing_setting_5g == 0xff) efuse->tx_bb_swing_setting_5g = 0; efuse->btcoex = (efuse->rf_board_option & 0xe0) == 0x20; efuse->ext_pa_2g = efuse->pa_type_2g & BIT(4) ? 1 : 0; efuse->ext_lna_2g = efuse->lna_type_2g & BIT(3) ? 1 : 0; efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0; efuse->ext_lna_2g = efuse->lna_type_5g & BIT(3) ? 1 : 0; out_disable: rtw_chip_efuse_disable(rtwdev); out_unlock: mutex_unlock(&rtwdev->mutex); return ret; } static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev) { struct rtw_hal *hal = &rtwdev->hal; const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev); if (!rfe_def) return -ENODEV; rtw_phy_setup_phy_cond(rtwdev, 0); rtw_phy_init_tx_power(rtwdev); if (rfe_def->agc_btg_tbl) rtw_load_table(rtwdev, rfe_def->agc_btg_tbl); rtw_load_table(rtwdev, rfe_def->phy_pg_tbl); rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl); rtw_phy_tx_power_by_rate_config(hal); rtw_phy_tx_power_limit_config(hal); return 0; } int rtw_chip_info_setup(struct rtw_dev *rtwdev) { int ret; ret = rtw_chip_parameter_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup chip parameters\n"); goto err_out; } ret = rtw_chip_efuse_info_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup chip efuse info\n"); goto err_out; } ret = rtw_chip_board_info_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup chip board info\n"); goto err_out; } return 0; err_out: return ret; } EXPORT_SYMBOL(rtw_chip_info_setup); static void rtw_stats_init(struct rtw_dev *rtwdev) { struct rtw_traffic_stats *stats = &rtwdev->stats; struct rtw_dm_info *dm_info = &rtwdev->dm_info; int i; ewma_tp_init(&stats->tx_ewma_tp); ewma_tp_init(&stats->rx_ewma_tp); for (i = 0; i < RTW_EVM_NUM; i++) ewma_evm_init(&dm_info->ewma_evm[i]); for (i = 0; i < RTW_SNR_NUM; i++) ewma_snr_init(&dm_info->ewma_snr[i]); } int rtw_core_init(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; struct rtw_coex *coex = &rtwdev->coex; int ret; INIT_LIST_HEAD(&rtwdev->rsvd_page_list); INIT_LIST_HEAD(&rtwdev->txqs); timer_setup(&rtwdev->tx_report.purge_timer, rtw_tx_report_purge_timer, 0); rtwdev->tx_wq = alloc_workqueue("rtw_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0); INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work); INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work); INIT_DELAYED_WORK(&coex->bt_reenable_work, rtw_coex_bt_reenable_work); INIT_DELAYED_WORK(&coex->defreeze_work, rtw_coex_defreeze_work); INIT_DELAYED_WORK(&coex->wl_remain_work, rtw_coex_wl_remain_work); INIT_DELAYED_WORK(&coex->bt_remain_work, rtw_coex_bt_remain_work); INIT_DELAYED_WORK(&coex->wl_connecting_work, rtw_coex_wl_connecting_work); INIT_DELAYED_WORK(&coex->bt_multi_link_remain_work, rtw_coex_bt_multi_link_remain_work); INIT_DELAYED_WORK(&coex->wl_ccklock_work, rtw_coex_wl_ccklock_work); INIT_WORK(&rtwdev->tx_work, rtw_tx_work); INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work); INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work); INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work); skb_queue_head_init(&rtwdev->c2h_queue); skb_queue_head_init(&rtwdev->coex.queue); skb_queue_head_init(&rtwdev->tx_report.queue); spin_lock_init(&rtwdev->rf_lock); spin_lock_init(&rtwdev->h2c.lock); spin_lock_init(&rtwdev->txq_lock); spin_lock_init(&rtwdev->tx_report.q_lock); mutex_init(&rtwdev->mutex); mutex_init(&rtwdev->coex.mutex); mutex_init(&rtwdev->hal.tx_power_mutex); init_waitqueue_head(&rtwdev->coex.wait); init_completion(&rtwdev->lps_leave_check); init_completion(&rtwdev->fw_scan_density); rtwdev->sec.total_cam_num = 32; rtwdev->hal.current_channel = 1; rtwdev->dm_info.fix_rate = U8_MAX; set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map); rtw_stats_init(rtwdev); /* default rx filter setting */ rtwdev->hal.rcr = BIT_APP_FCS | BIT_APP_MIC | BIT_APP_ICV | BIT_PKTCTL_DLEN | BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS | BIT_AB | BIT_AM | BIT_APM; ret = rtw_load_firmware(rtwdev, RTW_NORMAL_FW); if (ret) { rtw_warn(rtwdev, "no firmware loaded\n"); return ret; } if (chip->wow_fw_name) { ret = rtw_load_firmware(rtwdev, RTW_WOWLAN_FW); if (ret) { rtw_warn(rtwdev, "no wow firmware loaded\n"); wait_for_completion(&rtwdev->fw.completion); if (rtwdev->fw.firmware) release_firmware(rtwdev->fw.firmware); return ret; } } #if defined(__FreeBSD__) rtw_wait_firmware_completion(rtwdev); #endif return 0; } EXPORT_SYMBOL(rtw_core_init); void rtw_core_deinit(struct rtw_dev *rtwdev) { struct rtw_fw_state *fw = &rtwdev->fw; struct rtw_fw_state *wow_fw = &rtwdev->wow_fw; struct rtw_rsvd_page *rsvd_pkt, *tmp; unsigned long flags; rtw_wait_firmware_completion(rtwdev); if (fw->firmware) release_firmware(fw->firmware); if (wow_fw->firmware) release_firmware(wow_fw->firmware); destroy_workqueue(rtwdev->tx_wq); spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags); skb_queue_purge(&rtwdev->tx_report.queue); skb_queue_purge(&rtwdev->coex.queue); spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags); list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, build_list) { list_del(&rsvd_pkt->build_list); kfree(rsvd_pkt); } mutex_destroy(&rtwdev->mutex); mutex_destroy(&rtwdev->coex.mutex); mutex_destroy(&rtwdev->hal.tx_power_mutex); } EXPORT_SYMBOL(rtw_core_deinit); int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) { struct rtw_hal *hal = &rtwdev->hal; int max_tx_headroom = 0; int ret; /* TODO: USB & SDIO may need extra room? */ max_tx_headroom = rtwdev->chip->tx_pkt_desc_sz; hw->extra_tx_headroom = max_tx_headroom; hw->queues = IEEE80211_NUM_ACS; hw->txq_data_size = sizeof(struct rtw_txq); hw->sta_data_size = sizeof(struct rtw_sta_info); hw->vif_data_size = sizeof(struct rtw_vif); ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, TX_AMSDU); ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MESH_POINT); hw->wiphy->available_antennas_tx = hal->antenna_tx; hw->wiphy->available_antennas_rx = hal->antenna_rx; hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | WIPHY_FLAG_TDLS_EXTERNAL_SETUP; hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; hw->wiphy->max_scan_ssids = RTW_SCAN_MAX_SSIDS; hw->wiphy->max_scan_ie_len = RTW_SCAN_MAX_IE_LEN; wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); #ifdef CONFIG_PM hw->wiphy->wowlan = rtwdev->chip->wowlan_stub; hw->wiphy->max_sched_scan_ssids = rtwdev->chip->max_sched_scan_ssids; #endif rtw_set_supported_band(hw, rtwdev->chip); SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr); hw->wiphy->sar_capa = &rtw_sar_capa; ret = rtw_regd_init(rtwdev); if (ret) { rtw_err(rtwdev, "failed to init regd\n"); return ret; } ret = ieee80211_register_hw(hw); if (ret) { rtw_err(rtwdev, "failed to register hw\n"); return ret; } ret = rtw_regd_hint(rtwdev); if (ret) { rtw_err(rtwdev, "failed to hint regd\n"); return ret; } rtw_debugfs_init(rtwdev); rtwdev->bf_info.bfer_mu_cnt = 0; rtwdev->bf_info.bfer_su_cnt = 0; return 0; } EXPORT_SYMBOL(rtw_register_hw); void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) { struct rtw_chip_info *chip = rtwdev->chip; ieee80211_unregister_hw(hw); rtw_unset_supported_band(hw, chip); } EXPORT_SYMBOL(rtw_unregister_hw); MODULE_AUTHOR("Realtek Corporation"); MODULE_DESCRIPTION("Realtek 802.11ac wireless core module"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/sys/contrib/dev/rtw88/tx.c b/sys/contrib/dev/rtw88/tx.c index c6c43331b140..f405682c9cbf 100644 --- a/sys/contrib/dev/rtw88/tx.c +++ b/sys/contrib/dev/rtw88/tx.c @@ -1,695 +1,695 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2018-2019 Realtek Corporation */ #include "main.h" #include "tx.h" #include "fw.h" #include "ps.h" #include "debug.h" static void rtw_tx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, struct sk_buff *skb) { struct ieee80211_hdr *hdr; struct rtw_vif *rtwvif; hdr = (struct ieee80211_hdr *)skb->data; if (!ieee80211_is_data(hdr->frame_control)) return; if (!is_broadcast_ether_addr(hdr->addr1) && !is_multicast_ether_addr(hdr->addr1)) { rtwdev->stats.tx_unicast += skb->len; rtwdev->stats.tx_cnt++; if (vif) { rtwvif = (struct rtw_vif *)vif->drv_priv; rtwvif->stats.tx_unicast += skb->len; rtwvif->stats.tx_cnt++; } } } void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb) { __le32 *txdesc = (__le32 *)skb->data; SET_TX_DESC_TXPKTSIZE(txdesc, pkt_info->tx_pkt_size); SET_TX_DESC_OFFSET(txdesc, pkt_info->offset); SET_TX_DESC_PKT_OFFSET(txdesc, pkt_info->pkt_offset); SET_TX_DESC_QSEL(txdesc, pkt_info->qsel); SET_TX_DESC_BMC(txdesc, pkt_info->bmc); SET_TX_DESC_RATE_ID(txdesc, pkt_info->rate_id); SET_TX_DESC_DATARATE(txdesc, pkt_info->rate); SET_TX_DESC_DISDATAFB(txdesc, pkt_info->dis_rate_fallback); SET_TX_DESC_USE_RATE(txdesc, pkt_info->use_rate); SET_TX_DESC_SEC_TYPE(txdesc, pkt_info->sec_type); SET_TX_DESC_DATA_BW(txdesc, pkt_info->bw); SET_TX_DESC_SW_SEQ(txdesc, pkt_info->seq); SET_TX_DESC_MAX_AGG_NUM(txdesc, pkt_info->ampdu_factor); SET_TX_DESC_AMPDU_DENSITY(txdesc, pkt_info->ampdu_density); SET_TX_DESC_DATA_STBC(txdesc, pkt_info->stbc); SET_TX_DESC_DATA_LDPC(txdesc, pkt_info->ldpc); SET_TX_DESC_AGG_EN(txdesc, pkt_info->ampdu_en); SET_TX_DESC_LS(txdesc, pkt_info->ls); SET_TX_DESC_DATA_SHORT(txdesc, pkt_info->short_gi); SET_TX_DESC_SPE_RPT(txdesc, pkt_info->report); SET_TX_DESC_SW_DEFINE(txdesc, pkt_info->sn); SET_TX_DESC_USE_RTS(txdesc, pkt_info->rts); if (pkt_info->rts) { SET_TX_DESC_RTSRATE(txdesc, DESC_RATE24M); SET_TX_DESC_DATA_RTS_SHORT(txdesc, 1); } SET_TX_DESC_DISQSELSEQ(txdesc, pkt_info->dis_qselseq); SET_TX_DESC_EN_HWSEQ(txdesc, pkt_info->en_hwseq); SET_TX_DESC_HW_SSN_SEL(txdesc, pkt_info->hw_ssn_sel); SET_TX_DESC_NAVUSEHDR(txdesc, pkt_info->nav_use_hdr); SET_TX_DESC_BT_NULL(txdesc, pkt_info->bt_null); } EXPORT_SYMBOL(rtw_tx_fill_tx_desc); static u8 get_tx_ampdu_factor(struct ieee80211_sta *sta) { - u8 exp = sta->ht_cap.ampdu_factor; + u8 exp = sta->deflink.ht_cap.ampdu_factor; /* the least ampdu factor is 8K, and the value in the tx desc is the * max aggregation num, which represents val * 2 packets can be * aggregated in an AMPDU, so here we should use 8/2=4 as the base */ return (BIT(2) << exp) - 1; } static u8 get_tx_ampdu_density(struct ieee80211_sta *sta) { - return sta->ht_cap.ampdu_density; + return sta->deflink.ht_cap.ampdu_density; } static u8 get_highest_ht_tx_rate(struct rtw_dev *rtwdev, struct ieee80211_sta *sta) { u8 rate; - if (rtwdev->hal.rf_type == RF_2T2R && sta->ht_cap.mcs.rx_mask[1] != 0) + if (rtwdev->hal.rf_type == RF_2T2R && sta->deflink.ht_cap.mcs.rx_mask[1] != 0) rate = DESC_RATEMCS15; else rate = DESC_RATEMCS7; return rate; } static u8 get_highest_vht_tx_rate(struct rtw_dev *rtwdev, struct ieee80211_sta *sta) { struct rtw_efuse *efuse = &rtwdev->efuse; u8 rate; u16 tx_mcs_map; - tx_mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.tx_mcs_map); + tx_mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.tx_mcs_map); if (efuse->hw_cap.nss == 1) { switch (tx_mcs_map & 0x3) { case IEEE80211_VHT_MCS_SUPPORT_0_7: rate = DESC_RATEVHT1SS_MCS7; break; case IEEE80211_VHT_MCS_SUPPORT_0_8: rate = DESC_RATEVHT1SS_MCS8; break; default: case IEEE80211_VHT_MCS_SUPPORT_0_9: rate = DESC_RATEVHT1SS_MCS9; break; } } else if (efuse->hw_cap.nss >= 2) { switch ((tx_mcs_map & 0xc) >> 2) { case IEEE80211_VHT_MCS_SUPPORT_0_7: rate = DESC_RATEVHT2SS_MCS7; break; case IEEE80211_VHT_MCS_SUPPORT_0_8: rate = DESC_RATEVHT2SS_MCS8; break; default: case IEEE80211_VHT_MCS_SUPPORT_0_9: rate = DESC_RATEVHT2SS_MCS9; break; } } else { rate = DESC_RATEVHT1SS_MCS9; } return rate; } static void rtw_tx_report_enable(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info) { struct rtw_tx_report *tx_report = &rtwdev->tx_report; /* [11:8], reserved, fills with zero * [7:2], tx report sequence number * [1:0], firmware use, fills with zero */ pkt_info->sn = (atomic_inc_return(&tx_report->sn) << 2) & 0xfc; pkt_info->report = true; } void rtw_tx_report_purge_timer(struct timer_list *t) { struct rtw_dev *rtwdev = from_timer(rtwdev, t, tx_report.purge_timer); struct rtw_tx_report *tx_report = &rtwdev->tx_report; unsigned long flags; #if defined(__linux__) if (skb_queue_len(&tx_report->queue) == 0) return; rtw_warn(rtwdev, "failed to get tx report from firmware\n"); spin_lock_irqsave(&tx_report->q_lock, flags); skb_queue_purge(&tx_report->queue); spin_unlock_irqrestore(&tx_report->q_lock, flags); #elif defined(__FreeBSD__) uint32_t qlen; spin_lock_irqsave(&tx_report->q_lock, flags); qlen = skb_queue_len(&tx_report->queue); if (qlen > 0) skb_queue_purge(&tx_report->queue); spin_unlock_irqrestore(&tx_report->q_lock, flags); /* * XXX while there could be a new enqueue in the queue * simply not yet processed given the timer is updated without * locks after enqueue in rtw_tx_report_enqueue(), the numbers * seen can be in the 100s. We revert to rtw_dbg from * Linux git 584dce175f0461d5d9d63952a1e7955678c91086 . */ rtw_dbg(rtwdev, RTW_DBG_TX, "failed to get tx report from firmware: " "txreport qlen %u\n", qlen); #endif } void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn) { struct rtw_tx_report *tx_report = &rtwdev->tx_report; unsigned long flags; u8 *drv_data; /* pass sn to tx report handler through driver data */ drv_data = (u8 *)IEEE80211_SKB_CB(skb)->status.status_driver_data; *drv_data = sn; spin_lock_irqsave(&tx_report->q_lock, flags); __skb_queue_tail(&tx_report->queue, skb); spin_unlock_irqrestore(&tx_report->q_lock, flags); mod_timer(&tx_report->purge_timer, jiffies + RTW_TX_PROBE_TIMEOUT); } EXPORT_SYMBOL(rtw_tx_report_enqueue); static void rtw_tx_report_tx_status(struct rtw_dev *rtwdev, struct sk_buff *skb, bool acked) { struct ieee80211_tx_info *info; info = IEEE80211_SKB_CB(skb); ieee80211_tx_info_clear_status(info); if (acked) info->flags |= IEEE80211_TX_STAT_ACK; else info->flags &= ~IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(rtwdev->hw, skb); } void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src) { struct rtw_tx_report *tx_report = &rtwdev->tx_report; struct rtw_c2h_cmd *c2h; struct sk_buff *cur, *tmp; unsigned long flags; u8 sn, st; u8 *n; c2h = get_c2h_from_skb(skb); if (src == C2H_CCX_TX_RPT) { sn = GET_CCX_REPORT_SEQNUM_V0(c2h->payload); st = GET_CCX_REPORT_STATUS_V0(c2h->payload); } else { sn = GET_CCX_REPORT_SEQNUM_V1(c2h->payload); st = GET_CCX_REPORT_STATUS_V1(c2h->payload); } spin_lock_irqsave(&tx_report->q_lock, flags); skb_queue_walk_safe(&tx_report->queue, cur, tmp) { n = (u8 *)IEEE80211_SKB_CB(cur)->status.status_driver_data; if (*n == sn) { __skb_unlink(cur, &tx_report->queue); rtw_tx_report_tx_status(rtwdev, cur, st == 0); break; } } spin_unlock_irqrestore(&tx_report->q_lock, flags); } static u8 rtw_get_mgmt_rate(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 lowest_rate, bool ignore_rate) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = tx_info->control.vif; bool force_lowest = test_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags); if (!vif || !vif->bss_conf.basic_rates || ignore_rate || force_lowest) return lowest_rate; return __ffs(vif->bss_conf.basic_rates) + lowest_rate; } static void rtw_tx_pkt_info_update_rate(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb, bool ignore_rate) { if (rtwdev->hal.current_band_type == RTW_BAND_2G) { pkt_info->rate_id = RTW_RATEID_B_20M; pkt_info->rate = rtw_get_mgmt_rate(rtwdev, skb, DESC_RATE1M, ignore_rate); } else { pkt_info->rate_id = RTW_RATEID_G; pkt_info->rate = rtw_get_mgmt_rate(rtwdev, skb, DESC_RATE6M, ignore_rate); } pkt_info->use_rate = true; pkt_info->dis_rate_fallback = true; } static void rtw_tx_pkt_info_update_sec(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); u8 sec_type = 0; if (info && info->control.hw_key) { struct ieee80211_key_conf *key = info->control.hw_key; switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: sec_type = 0x01; break; case WLAN_CIPHER_SUITE_CCMP: sec_type = 0x03; break; default: break; } } pkt_info->sec_type = sec_type; } static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct ieee80211_sta *sta, struct sk_buff *skb) { rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb, false); pkt_info->dis_qselseq = true; pkt_info->en_hwseq = true; pkt_info->hw_ssn_sel = 0; /* TODO: need to change hw port and hw ssn sel for multiple vifs */ } static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct ieee80211_sta *sta, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hw *hw = rtwdev->hw; struct rtw_dm_info *dm_info = &rtwdev->dm_info; struct rtw_sta_info *si; u8 fix_rate; u16 seq; u8 ampdu_factor = 0; u8 ampdu_density = 0; bool ampdu_en = false; u8 rate = DESC_RATE6M; u8 rate_id = 6; u8 bw = RTW_CHANNEL_WIDTH_20; bool stbc = false; bool ldpc = false; seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; /* for broadcast/multicast, use default values */ if (!sta) goto out; if (info->flags & IEEE80211_TX_CTL_AMPDU) { ampdu_en = true; ampdu_factor = get_tx_ampdu_factor(sta); ampdu_density = get_tx_ampdu_density(sta); } if (info->control.use_rts || skb->len > hw->wiphy->rts_threshold) pkt_info->rts = true; - if (sta->vht_cap.vht_supported) + if (sta->deflink.vht_cap.vht_supported) rate = get_highest_vht_tx_rate(rtwdev, sta); - else if (sta->ht_cap.ht_supported) + else if (sta->deflink.ht_cap.ht_supported) rate = get_highest_ht_tx_rate(rtwdev, sta); - else if (sta->supp_rates[0] <= 0xf) + else if (sta->deflink.supp_rates[0] <= 0xf) rate = DESC_RATE11M; else rate = DESC_RATE54M; si = (struct rtw_sta_info *)sta->drv_priv; bw = si->bw_mode; rate_id = si->rate_id; stbc = si->stbc_en; ldpc = si->ldpc_en; out: pkt_info->seq = seq; pkt_info->ampdu_factor = ampdu_factor; pkt_info->ampdu_density = ampdu_density; pkt_info->ampdu_en = ampdu_en; pkt_info->rate = rate; pkt_info->rate_id = rate_id; pkt_info->bw = bw; pkt_info->stbc = stbc; pkt_info->ldpc = ldpc; fix_rate = dm_info->fix_rate; if (fix_rate < DESC_RATE_MAX) { pkt_info->rate = fix_rate; pkt_info->dis_rate_fallback = true; pkt_info->use_rate = true; } } void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct ieee80211_sta *sta, struct sk_buff *skb) { struct rtw_chip_info *chip = rtwdev->chip; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct rtw_sta_info *si; struct ieee80211_vif *vif = NULL; __le16 fc = hdr->frame_control; bool bmc; if (sta) { si = (struct rtw_sta_info *)sta->drv_priv; vif = si->vif; } if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc)) rtw_tx_mgmt_pkt_info_update(rtwdev, pkt_info, sta, skb); else if (ieee80211_is_data(fc)) rtw_tx_data_pkt_info_update(rtwdev, pkt_info, sta, skb); bmc = is_broadcast_ether_addr(hdr->addr1) || is_multicast_ether_addr(hdr->addr1); if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) rtw_tx_report_enable(rtwdev, pkt_info); pkt_info->bmc = bmc; rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb); pkt_info->tx_pkt_size = skb->len; pkt_info->offset = chip->tx_pkt_desc_sz; pkt_info->qsel = skb->priority; pkt_info->ls = true; /* maybe merge with tx status ? */ rtw_tx_stats(rtwdev, vif, skb); } void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb, enum rtw_rsvd_packet_type type) { struct rtw_chip_info *chip = rtwdev->chip; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; bool bmc; /* A beacon or dummy reserved page packet indicates that it is the first * reserved page, and the qsel of it will be set in each hci. */ if (type != RSVD_BEACON && type != RSVD_DUMMY) pkt_info->qsel = TX_DESC_QSEL_MGMT; rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb, true); bmc = is_broadcast_ether_addr(hdr->addr1) || is_multicast_ether_addr(hdr->addr1); pkt_info->bmc = bmc; pkt_info->tx_pkt_size = skb->len; pkt_info->offset = chip->tx_pkt_desc_sz; pkt_info->ls = true; if (type == RSVD_PS_POLL) { pkt_info->nav_use_hdr = true; } else { pkt_info->dis_qselseq = true; pkt_info->en_hwseq = true; pkt_info->hw_ssn_sel = 0; } if (type == RSVD_QOS_NULL) pkt_info->bt_null = true; rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb); /* TODO: need to change hw port and hw ssn sel for multiple vifs */ } struct sk_buff * rtw_tx_write_data_rsvd_page_get(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, u8 *buf, u32 size) { struct rtw_chip_info *chip = rtwdev->chip; struct sk_buff *skb; u32 tx_pkt_desc_sz; u32 length; tx_pkt_desc_sz = chip->tx_pkt_desc_sz; length = size + tx_pkt_desc_sz; skb = dev_alloc_skb(length); if (!skb) { rtw_err(rtwdev, "failed to alloc write data rsvd page skb\n"); return NULL; } skb_reserve(skb, tx_pkt_desc_sz); skb_put_data(skb, buf, size); rtw_tx_rsvd_page_pkt_info_update(rtwdev, pkt_info, skb, RSVD_BEACON); return skb; } EXPORT_SYMBOL(rtw_tx_write_data_rsvd_page_get); struct sk_buff * rtw_tx_write_data_h2c_get(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, u8 *buf, u32 size) { struct rtw_chip_info *chip = rtwdev->chip; struct sk_buff *skb; u32 tx_pkt_desc_sz; u32 length; tx_pkt_desc_sz = chip->tx_pkt_desc_sz; length = size + tx_pkt_desc_sz; skb = dev_alloc_skb(length); if (!skb) { rtw_err(rtwdev, "failed to alloc write data h2c skb\n"); return NULL; } skb_reserve(skb, tx_pkt_desc_sz); skb_put_data(skb, buf, size); pkt_info->tx_pkt_size = size; return skb; } EXPORT_SYMBOL(rtw_tx_write_data_h2c_get); void rtw_tx(struct rtw_dev *rtwdev, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct rtw_tx_pkt_info pkt_info = {0}; int ret; rtw_tx_pkt_info_update(rtwdev, &pkt_info, control->sta, skb); ret = rtw_hci_tx_write(rtwdev, &pkt_info, skb); if (ret) { #if defined(__linux__) rtw_err(rtwdev, "failed to write TX skb to HCI\n"); #elif defined(__FreeBSD__) rtw_err(rtwdev, "%s: failed to write TX skb to HCI: %d\n", __func__, ret); #endif goto out; } rtw_hci_tx_kick_off(rtwdev); return; out: ieee80211_free_txskb(rtwdev->hw, skb); } static void rtw_txq_check_agg(struct rtw_dev *rtwdev, struct rtw_txq *rtwtxq, struct sk_buff *skb) { struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq); struct ieee80211_tx_info *info; struct rtw_sta_info *si; if (test_bit(RTW_TXQ_AMPDU, &rtwtxq->flags)) { info = IEEE80211_SKB_CB(skb); info->flags |= IEEE80211_TX_CTL_AMPDU; return; } if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO) return; if (test_bit(RTW_TXQ_BLOCK_BA, &rtwtxq->flags)) return; if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) return; if (!txq->sta) return; si = (struct rtw_sta_info *)txq->sta->drv_priv; set_bit(txq->tid, si->tid_ba); ieee80211_queue_work(rtwdev->hw, &rtwdev->ba_work); } static int rtw_txq_push_skb(struct rtw_dev *rtwdev, struct rtw_txq *rtwtxq, struct sk_buff *skb) { struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq); struct rtw_tx_pkt_info pkt_info = {0}; int ret; rtw_txq_check_agg(rtwdev, rtwtxq, skb); rtw_tx_pkt_info_update(rtwdev, &pkt_info, txq->sta, skb); ret = rtw_hci_tx_write(rtwdev, &pkt_info, skb); if (ret) { #if defined(__linux__) rtw_err(rtwdev, "failed to write TX skb to HCI\n"); #elif defined(__FreeBSD__) rtw_err(rtwdev, "%s: failed to write TX skb to HCI: %d\n", __func__, ret); #endif return ret; } rtwtxq->last_push = jiffies; return 0; } static struct sk_buff *rtw_txq_dequeue(struct rtw_dev *rtwdev, struct rtw_txq *rtwtxq) { struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq); struct sk_buff *skb; skb = ieee80211_tx_dequeue(rtwdev->hw, txq); if (!skb) return NULL; return skb; } static void rtw_txq_push(struct rtw_dev *rtwdev, struct rtw_txq *rtwtxq, unsigned long frames) { struct sk_buff *skb; int ret; int i; rcu_read_lock(); for (i = 0; i < frames; i++) { skb = rtw_txq_dequeue(rtwdev, rtwtxq); if (!skb) break; ret = rtw_txq_push_skb(rtwdev, rtwtxq, skb); if (ret) { rtw_err(rtwdev, "failed to pusk skb, ret %d\n", ret); break; } } rcu_read_unlock(); } void rtw_tx_work(struct work_struct *w) { struct rtw_dev *rtwdev = container_of(w, struct rtw_dev, tx_work); struct rtw_txq *rtwtxq, *tmp; spin_lock_bh(&rtwdev->txq_lock); list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->txqs, list) { struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq); unsigned long frame_cnt; unsigned long byte_cnt; ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt); rtw_txq_push(rtwdev, rtwtxq, frame_cnt); list_del_init(&rtwtxq->list); } rtw_hci_tx_kick_off(rtwdev); spin_unlock_bh(&rtwdev->txq_lock); } void rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq) { struct rtw_txq *rtwtxq; if (!txq) return; rtwtxq = (struct rtw_txq *)txq->drv_priv; INIT_LIST_HEAD(&rtwtxq->list); } void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq) { struct rtw_txq *rtwtxq; if (!txq) return; rtwtxq = (struct rtw_txq *)txq->drv_priv; spin_lock_bh(&rtwdev->txq_lock); if (!list_empty(&rtwtxq->list)) list_del_init(&rtwtxq->list); spin_unlock_bh(&rtwdev->txq_lock); }