diff --git a/share/man/man4/rtw88.4 b/share/man/man4/rtw88.4 index 0ace002e4bde..43a51405d1d5 100644 --- a/share/man/man4/rtw88.4 +++ b/share/man/man4/rtw88.4 @@ -1,111 +1,111 @@ .\"- .\" Copyright (c) 2022 Bjoern A. Zeeb .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd May 5, 2022 +.Dd June 12, 2022 .Dt rtw88 4 .Os .Sh NAME .Nm rtw88 .Nd Realtek IEEE 802.11n/ac wireless network driver .Sh SYNOPSIS The driver will auto-load without any user interaction using .Xr devmatch 8 if enabled in .Xr rc.conf 5 . .Pp Only if auto-loading is explicitly disabled, place the following lines in .Xr rc.conf 5 to manually load the driver as a module at boot time: .Bd -literal -offset indent -kld_list="${kld_list} if_rtw88" +kld_list="${kld_list} if_rtw88_pci" .Ed .Pp The driver should automatically load any .Xr rtw88fw 4 firmware needed for the particular chipset. .Pp It is discouraged to load the driver from .Xr loader 8 . .Sh DESCRIPTION The .Nm driver is derived from Realtek's Linux rtw88 driver and provides support for the following chipsets: .Pp .Bl -tag -width Ds -offset indent -compact .It Realtek 802.11n wireless 8723de (RTL8723DE) .It Realtek 802.11ac wireless 8821ce (RTL8821CE) .It Realtek 802.11ac wireless 8822be (RTL8822BE) .It Realtek 802.11ac wireless 8822ce (RTL8822CE) .El .Pp The driver uses the .\" No LinuxKPI man pages so no .Xr here. .Em linuxkpi_wlan and .Em linuxkpi compat framework to bridge between the Linux and native FreeBSD driver code as well as to the native .Xr net80211 4 wireless stack. .Pp While .Nm supports all 802.11 a/b/g/n and ac the compatibility code currently only supports 802.11 a/b/g modes. Support for 802.11 n/ac is to come. .Sh LOADER TUNABLES .Bl -tag -width indent .It Va compat.linuxkpi.skb.mem_limit If you are running a 64bit system with more than 4GB of main mmeory you need to set this tunable to .Sy 1 in .Xr loader.conf 5 and reboot once to make it effective. This tunable will work around a problem with DMA and limit allocations for network buffer memory to the lower 32bit of physical memory and make the driver work. .El .Sh BUGS Certainly. .Pp Does not seem to work (reliably) on machines with more than 4GB of main memory. See in the .Sx LOADER TUNABLES section above. .Sh SEE ALSO .Xr rtw88fw 4 , .Xr wlan 4 , .Xr ifconfig 8 , .Xr wpa_supplicant 8 .Sh HISTORY The .Nm driver first appeared in .Fx 14.0 . diff --git a/sys/contrib/dev/rtw88/main.c b/sys/contrib/dev/rtw88/main.c index 2f397ace02b3..2522056c3e4b 100644 --- a/sys/contrib/dev/rtw88/main.c +++ b/sys/contrib/dev/rtw88/main.c @@ -1,2122 +1,2130 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2018-2019 Realtek Corporation */ #if defined(__FreeBSD__) #define LINUXKPI_PARAM_PREFIX rtw88_ #endif #include #include "main.h" #include "regd.h" #include "fw.h" #include "ps.h" #include "sec.h" #include "mac.h" #include "coex.h" #include "phy.h" #include "reg.h" #include "efuse.h" #include "tx.h" #include "debug.h" #include "bf.h" #include "sar.h" bool rtw_disable_lps_deep_mode; EXPORT_SYMBOL(rtw_disable_lps_deep_mode); bool rtw_bf_support = true; unsigned int rtw_debug_mask; EXPORT_SYMBOL(rtw_debug_mask); /* EDCCA is enabled during normal behavior. For debugging purpose in * a noisy environment, it can be disabled via edcca debugfs. Because * all rtw88 devices will probably be affected if environment is noisy, * rtw_edcca_enabled is just declared by driver instead of by device. * So, turning it off will take effect for all rtw88 devices before * there is a tough reason to maintain rtw_edcca_enabled by device. */ bool rtw_edcca_enabled = true; module_param_named(disable_lps_deep, rtw_disable_lps_deep_mode, bool, 0644); module_param_named(support_bf, rtw_bf_support, bool, 0644); module_param_named(debug_mask, rtw_debug_mask, uint, 0644); MODULE_PARM_DESC(disable_lps_deep, "Set Y to disable Deep PS"); MODULE_PARM_DESC(support_bf, "Set Y to enable beamformee support"); MODULE_PARM_DESC(debug_mask, "Debugging mask"); static struct ieee80211_channel rtw_channeltable_2g[] = { {.center_freq = 2412, .hw_value = 1,}, {.center_freq = 2417, .hw_value = 2,}, {.center_freq = 2422, .hw_value = 3,}, {.center_freq = 2427, .hw_value = 4,}, {.center_freq = 2432, .hw_value = 5,}, {.center_freq = 2437, .hw_value = 6,}, {.center_freq = 2442, .hw_value = 7,}, {.center_freq = 2447, .hw_value = 8,}, {.center_freq = 2452, .hw_value = 9,}, {.center_freq = 2457, .hw_value = 10,}, {.center_freq = 2462, .hw_value = 11,}, {.center_freq = 2467, .hw_value = 12,}, {.center_freq = 2472, .hw_value = 13,}, {.center_freq = 2484, .hw_value = 14,}, }; static struct ieee80211_channel rtw_channeltable_5g[] = { {.center_freq = 5180, .hw_value = 36,}, {.center_freq = 5200, .hw_value = 40,}, {.center_freq = 5220, .hw_value = 44,}, {.center_freq = 5240, .hw_value = 48,}, {.center_freq = 5260, .hw_value = 52,}, {.center_freq = 5280, .hw_value = 56,}, {.center_freq = 5300, .hw_value = 60,}, {.center_freq = 5320, .hw_value = 64,}, {.center_freq = 5500, .hw_value = 100,}, {.center_freq = 5520, .hw_value = 104,}, {.center_freq = 5540, .hw_value = 108,}, {.center_freq = 5560, .hw_value = 112,}, {.center_freq = 5580, .hw_value = 116,}, {.center_freq = 5600, .hw_value = 120,}, {.center_freq = 5620, .hw_value = 124,}, {.center_freq = 5640, .hw_value = 128,}, {.center_freq = 5660, .hw_value = 132,}, {.center_freq = 5680, .hw_value = 136,}, {.center_freq = 5700, .hw_value = 140,}, {.center_freq = 5720, .hw_value = 144,}, {.center_freq = 5745, .hw_value = 149,}, {.center_freq = 5765, .hw_value = 153,}, {.center_freq = 5785, .hw_value = 157,}, {.center_freq = 5805, .hw_value = 161,}, {.center_freq = 5825, .hw_value = 165, .flags = IEEE80211_CHAN_NO_HT40MINUS}, }; static struct ieee80211_rate rtw_ratetable[] = { {.bitrate = 10, .hw_value = 0x00,}, {.bitrate = 20, .hw_value = 0x01,}, {.bitrate = 55, .hw_value = 0x02,}, {.bitrate = 110, .hw_value = 0x03,}, {.bitrate = 60, .hw_value = 0x04,}, {.bitrate = 90, .hw_value = 0x05,}, {.bitrate = 120, .hw_value = 0x06,}, {.bitrate = 180, .hw_value = 0x07,}, {.bitrate = 240, .hw_value = 0x08,}, {.bitrate = 360, .hw_value = 0x09,}, {.bitrate = 480, .hw_value = 0x0a,}, {.bitrate = 540, .hw_value = 0x0b,}, }; u16 rtw_desc_to_bitrate(u8 desc_rate) { struct ieee80211_rate rate; if (WARN(desc_rate >= ARRAY_SIZE(rtw_ratetable), "invalid desc rate\n")) return 0; rate = rtw_ratetable[desc_rate]; return rate.bitrate; } static struct ieee80211_supported_band rtw_band_2ghz = { .band = NL80211_BAND_2GHZ, .channels = rtw_channeltable_2g, .n_channels = ARRAY_SIZE(rtw_channeltable_2g), .bitrates = rtw_ratetable, .n_bitrates = ARRAY_SIZE(rtw_ratetable), .ht_cap = {0}, .vht_cap = {0}, }; static struct ieee80211_supported_band rtw_band_5ghz = { .band = NL80211_BAND_5GHZ, .channels = rtw_channeltable_5g, .n_channels = ARRAY_SIZE(rtw_channeltable_5g), /* 5G has no CCK rates */ .bitrates = rtw_ratetable + 4, .n_bitrates = ARRAY_SIZE(rtw_ratetable) - 4, .ht_cap = {0}, .vht_cap = {0}, }; struct rtw_watch_dog_iter_data { struct rtw_dev *rtwdev; struct rtw_vif *rtwvif; }; static void rtw_dynamic_csi_rate(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif) { struct rtw_bf_info *bf_info = &rtwdev->bf_info; u8 fix_rate_enable = 0; u8 new_csi_rate_idx; if (rtwvif->bfee.role != RTW_BFEE_SU && rtwvif->bfee.role != RTW_BFEE_MU) return; rtw_chip_cfg_csi_rate(rtwdev, rtwdev->dm_info.min_rssi, bf_info->cur_csi_rpt_rate, fix_rate_enable, &new_csi_rate_idx); if (new_csi_rate_idx != bf_info->cur_csi_rpt_rate) bf_info->cur_csi_rpt_rate = new_csi_rate_idx; } static void rtw_vif_watch_dog_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct rtw_watch_dog_iter_data *iter_data = data; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; if (vif->type == NL80211_IFTYPE_STATION) if (vif->bss_conf.assoc) iter_data->rtwvif = rtwvif; rtw_dynamic_csi_rate(iter_data->rtwdev, rtwvif); rtwvif->stats.tx_unicast = 0; rtwvif->stats.rx_unicast = 0; rtwvif->stats.tx_cnt = 0; rtwvif->stats.rx_cnt = 0; } /* process TX/RX statistics periodically for hardware, * the information helps hardware to enhance performance */ static void rtw_watch_dog_work(struct work_struct *work) { struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, watch_dog_work.work); struct rtw_traffic_stats *stats = &rtwdev->stats; struct rtw_watch_dog_iter_data data = {}; bool busy_traffic = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); bool ps_active; mutex_lock(&rtwdev->mutex); if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags)) goto unlock; ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work, RTW_WATCH_DOG_DELAY_TIME); if (rtwdev->stats.tx_cnt > 100 || rtwdev->stats.rx_cnt > 100) set_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); else clear_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); if (busy_traffic != test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags)) rtw_coex_wl_status_change_notify(rtwdev, 0); if (stats->tx_cnt > RTW_LPS_THRESHOLD || stats->rx_cnt > RTW_LPS_THRESHOLD) ps_active = true; else ps_active = false; ewma_tp_add(&stats->tx_ewma_tp, (u32)(stats->tx_unicast >> RTW_TP_SHIFT)); ewma_tp_add(&stats->rx_ewma_tp, (u32)(stats->rx_unicast >> RTW_TP_SHIFT)); stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp); stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp); /* reset tx/rx statictics */ stats->tx_unicast = 0; stats->rx_unicast = 0; stats->tx_cnt = 0; stats->rx_cnt = 0; if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) goto unlock; /* make sure BB/RF is working for dynamic mech */ rtw_leave_lps(rtwdev); rtw_phy_dynamic_mechanism(rtwdev); data.rtwdev = rtwdev; /* use atomic version to avoid taking local->iflist_mtx mutex */ rtw_iterate_vifs_atomic(rtwdev, rtw_vif_watch_dog_iter, &data); /* fw supports only one station associated to enter lps, if there are * more than two stations associated to the AP, then we can not enter * lps, because fw does not handle the overlapped beacon interval * * mac80211 should iterate vifs and determine if driver can enter * ps by passing IEEE80211_CONF_PS to us, all we need to do is to * get that vif and check if device is having traffic more than the * threshold. */ if (rtwdev->ps_enabled && data.rtwvif && !ps_active && !rtwdev->beacon_loss) rtw_enter_lps(rtwdev, data.rtwvif->port); rtwdev->watch_dog_cnt++; unlock: mutex_unlock(&rtwdev->mutex); } static void rtw_c2h_work(struct work_struct *work) { struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, c2h_work); struct sk_buff *skb, *tmp; skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { skb_unlink(skb, &rtwdev->c2h_queue); rtw_fw_c2h_cmd_handle(rtwdev, skb); dev_kfree_skb_any(skb); } } static u8 rtw_acquire_macid(struct rtw_dev *rtwdev) { unsigned long mac_id; mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM); if (mac_id < RTW_MAX_MAC_ID_NUM) set_bit(mac_id, rtwdev->mac_id_map); return mac_id; } int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, struct ieee80211_vif *vif) { struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; int i; si->mac_id = rtw_acquire_macid(rtwdev); if (si->mac_id >= RTW_MAX_MAC_ID_NUM) return -ENOSPC; si->sta = sta; si->vif = vif; si->init_ra_lv = 1; ewma_rssi_init(&si->avg_rssi); for (i = 0; i < ARRAY_SIZE(sta->txq); i++) rtw_txq_init(rtwdev, sta->txq[i]); rtw_update_sta_info(rtwdev, si); rtw_fw_media_status_report(rtwdev, si->mac_id, true); rtwdev->sta_cnt++; rtwdev->beacon_loss = false; #if defined(__linux__) rtw_info(rtwdev, "sta %pM joined with macid %d\n", sta->addr, si->mac_id); #elif defined(__FreeBSD__) rtw_info(rtwdev, "sta %6D joined with macid %d\n", sta->addr, ":", si->mac_id); #endif return 0; } void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, bool fw_exist) { struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; int i; rtw_release_macid(rtwdev, si->mac_id); if (fw_exist) rtw_fw_media_status_report(rtwdev, si->mac_id, false); for (i = 0; i < ARRAY_SIZE(sta->txq); i++) rtw_txq_cleanup(rtwdev, sta->txq[i]); kfree(si->mask); rtwdev->sta_cnt--; #if defined(__linux__) rtw_info(rtwdev, "sta %pM with macid %d left\n", sta->addr, si->mac_id); #elif defined(__FreeBSD__) rtw_info(rtwdev, "sta %6D with macid %d left\n", sta->addr, ":", si->mac_id); #endif } struct rtw_fwcd_hdr { u32 item; u32 size; u32 padding1; u32 padding2; } __packed; static int rtw_fwcd_prep(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc; const struct rtw_fwcd_segs *segs = chip->fwcd_segs; u32 prep_size = chip->fw_rxff_size + sizeof(struct rtw_fwcd_hdr); u8 i; if (segs) { prep_size += segs->num * sizeof(struct rtw_fwcd_hdr); for (i = 0; i < segs->num; i++) prep_size += segs->segs[i]; } desc->data = vmalloc(prep_size); if (!desc->data) return -ENOMEM; desc->size = prep_size; desc->next = desc->data; return 0; } static u8 *rtw_fwcd_next(struct rtw_dev *rtwdev, u32 item, u32 size) { struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc; struct rtw_fwcd_hdr *hdr; u8 *next; if (!desc->data) { rtw_dbg(rtwdev, RTW_DBG_FW, "fwcd isn't prepared successfully\n"); return NULL; } next = desc->next + sizeof(struct rtw_fwcd_hdr); if (next - desc->data + size > desc->size) { rtw_dbg(rtwdev, RTW_DBG_FW, "fwcd isn't prepared enough\n"); return NULL; } hdr = (struct rtw_fwcd_hdr *)(desc->next); hdr->item = item; hdr->size = size; hdr->padding1 = 0x01234567; hdr->padding2 = 0x89abcdef; desc->next = next + size; return next; } static void rtw_fwcd_dump(struct rtw_dev *rtwdev) { struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc; rtw_dbg(rtwdev, RTW_DBG_FW, "dump fwcd\n"); /* Data will be freed after lifetime of device coredump. After calling * dev_coredump, data is supposed to be handled by the device coredump * framework. Note that a new dump will be discarded if a previous one * hasn't been released yet. */ dev_coredumpv(rtwdev->dev, desc->data, desc->size, GFP_KERNEL); } static void rtw_fwcd_free(struct rtw_dev *rtwdev, bool free_self) { struct rtw_fwcd_desc *desc = &rtwdev->fw.fwcd_desc; if (free_self) { rtw_dbg(rtwdev, RTW_DBG_FW, "free fwcd by self\n"); vfree(desc->data); } desc->data = NULL; desc->next = NULL; } static int rtw_fw_dump_crash_log(struct rtw_dev *rtwdev) { u32 size = rtwdev->chip->fw_rxff_size; u32 *buf; u8 seq; buf = (u32 *)rtw_fwcd_next(rtwdev, RTW_FWCD_TLV, size); if (!buf) return -ENOMEM; if (rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0, size, buf)) { rtw_dbg(rtwdev, RTW_DBG_FW, "dump fw fifo fail\n"); return -EINVAL; } if (GET_FW_DUMP_LEN(buf) == 0) { rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's length is 0\n"); return -EINVAL; } seq = GET_FW_DUMP_SEQ(buf); if (seq > 0) { rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's seq is wrong: %d\n", seq); return -EINVAL; } return 0; } int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size, u32 fwcd_item) { u32 rxff = rtwdev->chip->fw_rxff_size; u32 dump_size, done_size = 0; u8 *buf; int ret; buf = rtw_fwcd_next(rtwdev, fwcd_item, size); if (!buf) return -ENOMEM; while (size) { dump_size = size > rxff ? rxff : size; ret = rtw_ddma_to_fw_fifo(rtwdev, ocp_src + done_size, dump_size); if (ret) { rtw_err(rtwdev, "ddma fw 0x%x [+0x%x] to fw fifo fail\n", ocp_src, done_size); return ret; } ret = rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0, dump_size, (u32 *)(buf + done_size)); if (ret) { rtw_err(rtwdev, "dump fw 0x%x [+0x%x] from fw fifo fail\n", ocp_src, done_size); return ret; } size -= dump_size; done_size += dump_size; } return 0; } EXPORT_SYMBOL(rtw_dump_fw); int rtw_dump_reg(struct rtw_dev *rtwdev, const u32 addr, const u32 size) { u8 *buf; u32 i; if (addr & 0x3) { WARN(1, "should be 4-byte aligned, addr = 0x%08x\n", addr); return -EINVAL; } buf = rtw_fwcd_next(rtwdev, RTW_FWCD_REG, size); if (!buf) return -ENOMEM; for (i = 0; i < size; i += 4) *(u32 *)(buf + i) = rtw_read32(rtwdev, addr + i); return 0; } EXPORT_SYMBOL(rtw_dump_reg); void rtw_vif_assoc_changed(struct rtw_vif *rtwvif, struct ieee80211_bss_conf *conf) { if (conf && conf->assoc) { rtwvif->aid = conf->aid; rtwvif->net_type = RTW_NET_MGD_LINKED; } else { rtwvif->aid = 0; rtwvif->net_type = RTW_NET_NO_LINK; } } static void rtw_reset_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data) { struct rtw_dev *rtwdev = (struct rtw_dev *)data; struct rtw_sec_desc *sec = &rtwdev->sec; rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx); } static void rtw_reset_sta_iter(void *data, struct ieee80211_sta *sta) { struct rtw_dev *rtwdev = (struct rtw_dev *)data; if (rtwdev->sta_cnt == 0) { rtw_warn(rtwdev, "sta count before reset should not be 0\n"); return; } rtw_sta_remove(rtwdev, sta, false); } static void rtw_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct rtw_dev *rtwdev = (struct rtw_dev *)data; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; rtw_bf_disassoc(rtwdev, vif, NULL); rtw_vif_assoc_changed(rtwvif, NULL); rtw_txq_cleanup(rtwdev, vif->txq); } void rtw_fw_recovery(struct rtw_dev *rtwdev) { if (!test_bit(RTW_FLAG_RESTARTING, rtwdev->flags)) ieee80211_queue_work(rtwdev->hw, &rtwdev->fw_recovery_work); } static void __fw_recovery_work(struct rtw_dev *rtwdev) { int ret = 0; set_bit(RTW_FLAG_RESTARTING, rtwdev->flags); clear_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags); ret = rtw_fwcd_prep(rtwdev); if (ret) goto free; ret = rtw_fw_dump_crash_log(rtwdev); if (ret) goto free; ret = rtw_chip_dump_fw_crash(rtwdev); if (ret) goto free; rtw_fwcd_dump(rtwdev); free: rtw_fwcd_free(rtwdev, !!ret); rtw_write8(rtwdev, REG_MCU_TST_CFG, 0); WARN(1, "firmware crash, start reset and recover\n"); rcu_read_lock(); rtw_iterate_keys_rcu(rtwdev, NULL, rtw_reset_key_iter, rtwdev); rcu_read_unlock(); rtw_iterate_stas_atomic(rtwdev, rtw_reset_sta_iter, rtwdev); rtw_iterate_vifs_atomic(rtwdev, rtw_reset_vif_iter, rtwdev); rtw_enter_ips(rtwdev); } static void rtw_fw_recovery_work(struct work_struct *work) { struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, fw_recovery_work); mutex_lock(&rtwdev->mutex); __fw_recovery_work(rtwdev); mutex_unlock(&rtwdev->mutex); ieee80211_restart_hw(rtwdev->hw); } struct rtw_txq_ba_iter_data { }; static void rtw_txq_ba_iter(void *data, struct ieee80211_sta *sta) { struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; int ret; u8 tid; tid = find_first_bit(si->tid_ba, IEEE80211_NUM_TIDS); while (tid != IEEE80211_NUM_TIDS) { clear_bit(tid, si->tid_ba); ret = ieee80211_start_tx_ba_session(sta, tid, 0); if (ret == -EINVAL) { struct ieee80211_txq *txq; struct rtw_txq *rtwtxq; txq = sta->txq[tid]; rtwtxq = (struct rtw_txq *)txq->drv_priv; set_bit(RTW_TXQ_BLOCK_BA, &rtwtxq->flags); } tid = find_first_bit(si->tid_ba, IEEE80211_NUM_TIDS); } } static void rtw_txq_ba_work(struct work_struct *work) { struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ba_work); struct rtw_txq_ba_iter_data data; rtw_iterate_stas_atomic(rtwdev, rtw_txq_ba_iter, &data); } void rtw_set_rx_freq_band(struct rtw_rx_pkt_stat *pkt_stat, u8 channel) { if (IS_CH_2G_BAND(channel)) pkt_stat->band = NL80211_BAND_2GHZ; else if (IS_CH_5G_BAND(channel)) pkt_stat->band = NL80211_BAND_5GHZ; else return; pkt_stat->freq = ieee80211_channel_to_frequency(channel, pkt_stat->band); } EXPORT_SYMBOL(rtw_set_rx_freq_band); void rtw_get_channel_params(struct cfg80211_chan_def *chandef, struct rtw_channel_params *chan_params) { struct ieee80211_channel *channel = chandef->chan; enum nl80211_chan_width width = chandef->width; u8 *cch_by_bw = chan_params->cch_by_bw; u32 primary_freq, center_freq; u8 center_chan; u8 bandwidth = RTW_CHANNEL_WIDTH_20; u8 primary_chan_idx = 0; u8 i; center_chan = channel->hw_value; primary_freq = channel->center_freq; center_freq = chandef->center_freq1; /* assign the center channel used while 20M bw is selected */ cch_by_bw[RTW_CHANNEL_WIDTH_20] = channel->hw_value; switch (width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: bandwidth = RTW_CHANNEL_WIDTH_20; primary_chan_idx = RTW_SC_DONT_CARE; break; case NL80211_CHAN_WIDTH_40: bandwidth = RTW_CHANNEL_WIDTH_40; if (primary_freq > center_freq) { primary_chan_idx = RTW_SC_20_UPPER; center_chan -= 2; } else { primary_chan_idx = RTW_SC_20_LOWER; center_chan += 2; } break; case NL80211_CHAN_WIDTH_80: bandwidth = RTW_CHANNEL_WIDTH_80; if (primary_freq > center_freq) { if (primary_freq - center_freq == 10) { primary_chan_idx = RTW_SC_20_UPPER; center_chan -= 2; } else { primary_chan_idx = RTW_SC_20_UPMOST; center_chan -= 6; } /* assign the center channel used * while 40M bw is selected */ cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan + 4; } else { if (center_freq - primary_freq == 10) { primary_chan_idx = RTW_SC_20_LOWER; center_chan += 2; } else { primary_chan_idx = RTW_SC_20_LOWEST; center_chan += 6; } /* assign the center channel used * while 40M bw is selected */ cch_by_bw[RTW_CHANNEL_WIDTH_40] = center_chan - 4; } break; default: center_chan = 0; break; } chan_params->center_chan = center_chan; chan_params->bandwidth = bandwidth; chan_params->primary_chan_idx = primary_chan_idx; /* assign the center channel used while current bw is selected */ cch_by_bw[bandwidth] = center_chan; for (i = bandwidth + 1; i <= RTW_MAX_CHANNEL_WIDTH; i++) cch_by_bw[i] = 0; } void rtw_set_channel(struct rtw_dev *rtwdev) { struct ieee80211_hw *hw = rtwdev->hw; struct rtw_hal *hal = &rtwdev->hal; struct rtw_chip_info *chip = rtwdev->chip; struct rtw_channel_params ch_param; u8 center_chan, bandwidth, primary_chan_idx; u8 i; rtw_get_channel_params(&hw->conf.chandef, &ch_param); if (WARN(ch_param.center_chan == 0, "Invalid channel\n")) return; center_chan = ch_param.center_chan; bandwidth = ch_param.bandwidth; primary_chan_idx = ch_param.primary_chan_idx; hal->current_band_width = bandwidth; hal->current_channel = center_chan; hal->current_primary_channel_index = primary_chan_idx; hal->current_band_type = center_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G; switch (center_chan) { case 1 ... 14: hal->sar_band = RTW_SAR_BAND_0; break; case 36 ... 64: hal->sar_band = RTW_SAR_BAND_1; break; case 100 ... 144: hal->sar_band = RTW_SAR_BAND_3; break; case 149 ... 177: hal->sar_band = RTW_SAR_BAND_4; break; default: WARN(1, "unknown ch(%u) to SAR band\n", center_chan); hal->sar_band = RTW_SAR_BAND_0; break; } for (i = RTW_CHANNEL_WIDTH_20; i <= RTW_MAX_CHANNEL_WIDTH; i++) hal->cch_by_bw[i] = ch_param.cch_by_bw[i]; chip->ops->set_channel(rtwdev, center_chan, bandwidth, primary_chan_idx); if (hal->current_band_type == RTW_BAND_5G) { rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G); } else { if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G); else rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G_NOFORSCAN); } rtw_phy_set_tx_power_level(rtwdev, center_chan); /* if the channel isn't set for scanning, we will do RF calibration * in ieee80211_ops::mgd_prepare_tx(). Performing the calibration * during scanning on each channel takes too long. */ if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) rtwdev->need_rfk = true; } void rtw_chip_prepare_tx(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; if (rtwdev->need_rfk) { rtwdev->need_rfk = false; chip->ops->phy_calibration(rtwdev); } } static void rtw_vif_write_addr(struct rtw_dev *rtwdev, u32 start, u8 *addr) { int i; for (i = 0; i < ETH_ALEN; i++) rtw_write8(rtwdev, start + i, addr[i]); } void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, u32 config) { u32 addr, mask; if (config & PORT_SET_MAC_ADDR) { addr = rtwvif->conf->mac_addr.addr; rtw_vif_write_addr(rtwdev, addr, rtwvif->mac_addr); } if (config & PORT_SET_BSSID) { addr = rtwvif->conf->bssid.addr; rtw_vif_write_addr(rtwdev, addr, rtwvif->bssid); } if (config & PORT_SET_NET_TYPE) { addr = rtwvif->conf->net_type.addr; mask = rtwvif->conf->net_type.mask; rtw_write32_mask(rtwdev, addr, mask, rtwvif->net_type); } if (config & PORT_SET_AID) { addr = rtwvif->conf->aid.addr; mask = rtwvif->conf->aid.mask; rtw_write32_mask(rtwdev, addr, mask, rtwvif->aid); } if (config & PORT_SET_BCN_CTRL) { addr = rtwvif->conf->bcn_ctrl.addr; mask = rtwvif->conf->bcn_ctrl.mask; rtw_write8_mask(rtwdev, addr, mask, rtwvif->bcn_ctrl); } } static u8 hw_bw_cap_to_bitamp(u8 bw_cap) { u8 bw = 0; switch (bw_cap) { case EFUSE_HW_CAP_IGNORE: case EFUSE_HW_CAP_SUPP_BW80: bw |= BIT(RTW_CHANNEL_WIDTH_80); fallthrough; case EFUSE_HW_CAP_SUPP_BW40: bw |= BIT(RTW_CHANNEL_WIDTH_40); fallthrough; default: bw |= BIT(RTW_CHANNEL_WIDTH_20); break; } return bw; } static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num) { struct rtw_hal *hal = &rtwdev->hal; struct rtw_chip_info *chip = rtwdev->chip; if (hw_ant_num == EFUSE_HW_CAP_IGNORE || hw_ant_num >= hal->rf_path_num) return; switch (hw_ant_num) { case 1: hal->rf_type = RF_1T1R; hal->rf_path_num = 1; if (!chip->fix_rf_phy_num) hal->rf_phy_num = hal->rf_path_num; hal->antenna_tx = BB_PATH_A; hal->antenna_rx = BB_PATH_A; break; default: WARN(1, "invalid hw configuration from efuse\n"); break; } } static u64 get_vht_ra_mask(struct ieee80211_sta *sta) { u64 ra_mask = 0; u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map); u8 vht_mcs_cap; int i, nss; /* 4SS, every two bits for MCS7/8/9 */ for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 10) { vht_mcs_cap = mcs_map & 0x3; switch (vht_mcs_cap) { case 2: /* MCS9 */ ra_mask |= 0x3ffULL << nss; break; case 1: /* MCS8 */ ra_mask |= 0x1ffULL << nss; break; case 0: /* MCS7 */ ra_mask |= 0x0ffULL << nss; break; default: break; } } return ra_mask; } static u8 get_rate_id(u8 wireless_set, enum rtw_bandwidth bw_mode, u8 tx_num) { u8 rate_id = 0; switch (wireless_set) { case WIRELESS_CCK: rate_id = RTW_RATEID_B_20M; break; case WIRELESS_OFDM: rate_id = RTW_RATEID_G; break; case WIRELESS_CCK | WIRELESS_OFDM: rate_id = RTW_RATEID_BG; break; case WIRELESS_OFDM | WIRELESS_HT: if (tx_num == 1) rate_id = RTW_RATEID_GN_N1SS; else if (tx_num == 2) rate_id = RTW_RATEID_GN_N2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR5_N_3SS; break; case WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_HT: if (bw_mode == RTW_CHANNEL_WIDTH_40) { if (tx_num == 1) rate_id = RTW_RATEID_BGN_40M_1SS; else if (tx_num == 2) rate_id = RTW_RATEID_BGN_40M_2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR5_N_3SS; else if (tx_num == 4) rate_id = RTW_RATEID_ARFR7_N_4SS; } else { if (tx_num == 1) rate_id = RTW_RATEID_BGN_20M_1SS; else if (tx_num == 2) rate_id = RTW_RATEID_BGN_20M_2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR5_N_3SS; else if (tx_num == 4) rate_id = RTW_RATEID_ARFR7_N_4SS; } break; case WIRELESS_OFDM | WIRELESS_VHT: if (tx_num == 1) rate_id = RTW_RATEID_ARFR1_AC_1SS; else if (tx_num == 2) rate_id = RTW_RATEID_ARFR0_AC_2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR4_AC_3SS; else if (tx_num == 4) rate_id = RTW_RATEID_ARFR6_AC_4SS; break; case WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_VHT: if (bw_mode >= RTW_CHANNEL_WIDTH_80) { if (tx_num == 1) rate_id = RTW_RATEID_ARFR1_AC_1SS; else if (tx_num == 2) rate_id = RTW_RATEID_ARFR0_AC_2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR4_AC_3SS; else if (tx_num == 4) rate_id = RTW_RATEID_ARFR6_AC_4SS; } else { if (tx_num == 1) rate_id = RTW_RATEID_ARFR2_AC_2G_1SS; else if (tx_num == 2) rate_id = RTW_RATEID_ARFR3_AC_2G_2SS; else if (tx_num == 3) rate_id = RTW_RATEID_ARFR4_AC_3SS; else if (tx_num == 4) rate_id = RTW_RATEID_ARFR6_AC_4SS; } break; default: break; } return rate_id; } #define RA_MASK_CCK_RATES 0x0000f #define RA_MASK_OFDM_RATES 0x00ff0 #define RA_MASK_HT_RATES_1SS (0xff000ULL << 0) #define RA_MASK_HT_RATES_2SS (0xff000ULL << 8) #define RA_MASK_HT_RATES_3SS (0xff000ULL << 16) #define RA_MASK_HT_RATES (RA_MASK_HT_RATES_1SS | \ RA_MASK_HT_RATES_2SS | \ RA_MASK_HT_RATES_3SS) #define RA_MASK_VHT_RATES_1SS (0x3ff000ULL << 0) #define RA_MASK_VHT_RATES_2SS (0x3ff000ULL << 10) #define RA_MASK_VHT_RATES_3SS (0x3ff000ULL << 20) #define RA_MASK_VHT_RATES (RA_MASK_VHT_RATES_1SS | \ RA_MASK_VHT_RATES_2SS | \ RA_MASK_VHT_RATES_3SS) #define RA_MASK_CCK_IN_HT 0x00005 #define RA_MASK_CCK_IN_VHT 0x00005 #define RA_MASK_OFDM_IN_VHT 0x00010 #define RA_MASK_OFDM_IN_HT_2G 0x00010 #define RA_MASK_OFDM_IN_HT_5G 0x00030 static u64 rtw_update_rate_mask(struct rtw_dev *rtwdev, struct rtw_sta_info *si, u64 ra_mask, bool is_vht_enable, u8 wireless_set) { struct rtw_hal *hal = &rtwdev->hal; const struct cfg80211_bitrate_mask *mask = si->mask; u64 cfg_mask = GENMASK_ULL(63, 0); u8 rssi_level, band; if (wireless_set != WIRELESS_CCK) { rssi_level = si->rssi_level; if (rssi_level == 0) ra_mask &= 0xffffffffffffffffULL; else if (rssi_level == 1) ra_mask &= 0xfffffffffffffff0ULL; else if (rssi_level == 2) ra_mask &= 0xffffffffffffefe0ULL; else if (rssi_level == 3) ra_mask &= 0xffffffffffffcfc0ULL; else if (rssi_level == 4) ra_mask &= 0xffffffffffff8f80ULL; else if (rssi_level >= 5) ra_mask &= 0xffffffffffff0f00ULL; } if (!si->use_cfg_mask) return ra_mask; band = hal->current_band_type; if (band == RTW_BAND_2G) { band = NL80211_BAND_2GHZ; cfg_mask = mask->control[band].legacy; } else if (band == RTW_BAND_5G) { band = NL80211_BAND_5GHZ; cfg_mask = u64_encode_bits(mask->control[band].legacy, RA_MASK_OFDM_RATES); } if (!is_vht_enable) { if (ra_mask & RA_MASK_HT_RATES_1SS) cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0], RA_MASK_HT_RATES_1SS); if (ra_mask & RA_MASK_HT_RATES_2SS) cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1], RA_MASK_HT_RATES_2SS); } else { if (ra_mask & RA_MASK_VHT_RATES_1SS) cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0], RA_MASK_VHT_RATES_1SS); if (ra_mask & RA_MASK_VHT_RATES_2SS) cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1], RA_MASK_VHT_RATES_2SS); } ra_mask &= cfg_mask; return ra_mask; } void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; struct ieee80211_sta *sta = si->sta; struct rtw_efuse *efuse = &rtwdev->efuse; struct rtw_hal *hal = &rtwdev->hal; u8 wireless_set; u8 bw_mode; u8 rate_id; u8 rf_type = RF_1T1R; u8 stbc_en = 0; u8 ldpc_en = 0; u8 tx_num = 1; u64 ra_mask = 0; bool is_vht_enable = false; bool is_support_sgi = false; if (sta->deflink.vht_cap.vht_supported) { is_vht_enable = true; ra_mask |= get_vht_ra_mask(sta); if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) stbc_en = VHT_STBC_EN; if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) ldpc_en = VHT_LDPC_EN; } else if (sta->deflink.ht_cap.ht_supported) { ra_mask |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20) | (sta->deflink.ht_cap.mcs.rx_mask[0] << 12); if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) stbc_en = HT_STBC_EN; if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) ldpc_en = HT_LDPC_EN; } if (efuse->hw_cap.nss == 1) ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS; if (hal->current_band_type == RTW_BAND_5G) { ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4; if (sta->deflink.vht_cap.vht_supported) { ra_mask &= RA_MASK_VHT_RATES | RA_MASK_OFDM_IN_VHT; wireless_set = WIRELESS_OFDM | WIRELESS_VHT; } else if (sta->deflink.ht_cap.ht_supported) { ra_mask &= RA_MASK_HT_RATES | RA_MASK_OFDM_IN_HT_5G; wireless_set = WIRELESS_OFDM | WIRELESS_HT; } else { wireless_set = WIRELESS_OFDM; } dm_info->rrsr_val_init = RRSR_INIT_5G; } else if (hal->current_band_type == RTW_BAND_2G) { ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ]; if (sta->deflink.vht_cap.vht_supported) { ra_mask &= RA_MASK_VHT_RATES | RA_MASK_CCK_IN_VHT | RA_MASK_OFDM_IN_VHT; wireless_set = WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_HT | WIRELESS_VHT; } else if (sta->deflink.ht_cap.ht_supported) { ra_mask &= RA_MASK_HT_RATES | RA_MASK_CCK_IN_HT | RA_MASK_OFDM_IN_HT_2G; wireless_set = WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_HT; } else if (sta->deflink.supp_rates[0] <= 0xf) { wireless_set = WIRELESS_CCK; } else { wireless_set = WIRELESS_CCK | WIRELESS_OFDM; } dm_info->rrsr_val_init = RRSR_INIT_2G; } else { rtw_err(rtwdev, "Unknown band type\n"); wireless_set = 0; } switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_80: bw_mode = RTW_CHANNEL_WIDTH_80; is_support_sgi = sta->deflink.vht_cap.vht_supported && (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); break; case IEEE80211_STA_RX_BW_40: bw_mode = RTW_CHANNEL_WIDTH_40; is_support_sgi = sta->deflink.ht_cap.ht_supported && (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40); break; default: bw_mode = RTW_CHANNEL_WIDTH_20; is_support_sgi = sta->deflink.ht_cap.ht_supported && (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20); break; } if (sta->deflink.vht_cap.vht_supported && ra_mask & 0xffc00000) { tx_num = 2; rf_type = RF_2T2R; } else if (sta->deflink.ht_cap.ht_supported && ra_mask & 0xfff00000) { tx_num = 2; rf_type = RF_2T2R; } rate_id = get_rate_id(wireless_set, bw_mode, tx_num); ra_mask = rtw_update_rate_mask(rtwdev, si, ra_mask, is_vht_enable, wireless_set); si->bw_mode = bw_mode; si->stbc_en = stbc_en; si->ldpc_en = ldpc_en; si->rf_type = rf_type; si->wireless_set = wireless_set; si->sgi_enable = is_support_sgi; si->vht_enable = is_vht_enable; si->ra_mask = ra_mask; si->rate_id = rate_id; rtw_fw_send_ra_info(rtwdev, si); } static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; struct rtw_fw_state *fw; fw = &rtwdev->fw; wait_for_completion(&fw->completion); if (!fw->firmware) return -EINVAL; if (chip->wow_fw_name) { fw = &rtwdev->wow_fw; wait_for_completion(&fw->completion); if (!fw->firmware) return -EINVAL; } return 0; } static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) { struct rtw_chip_info *chip = rtwdev->chip; if (rtw_disable_lps_deep_mode || !chip->lps_deep_mode_supported || !fw->feature) return LPS_DEEP_MODE_NONE; if ((chip->lps_deep_mode_supported & BIT(LPS_DEEP_MODE_PG)) && rtw_fw_feature_check(fw, FW_FEATURE_PG)) return LPS_DEEP_MODE_PG; if ((chip->lps_deep_mode_supported & BIT(LPS_DEEP_MODE_LCLK)) && rtw_fw_feature_check(fw, FW_FEATURE_LCLK)) return LPS_DEEP_MODE_LCLK; return LPS_DEEP_MODE_NONE; } static int rtw_power_on(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; struct rtw_fw_state *fw = &rtwdev->fw; bool wifi_only; int ret; ret = rtw_hci_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup hci\n"); goto err; } /* power on MAC before firmware downloaded */ ret = rtw_mac_power_on(rtwdev); if (ret) { rtw_err(rtwdev, "failed to power on mac\n"); goto err; } ret = rtw_wait_firmware_completion(rtwdev); if (ret) { rtw_err(rtwdev, "failed to wait firmware completion\n"); goto err_off; } ret = rtw_download_firmware(rtwdev, fw); if (ret) { rtw_err(rtwdev, "failed to download firmware\n"); goto err_off; } /* config mac after firmware downloaded */ ret = rtw_mac_init(rtwdev); if (ret) { rtw_err(rtwdev, "failed to configure mac\n"); goto err_off; } chip->ops->phy_set_param(rtwdev); ret = rtw_hci_start(rtwdev); if (ret) { rtw_err(rtwdev, "failed to start hci\n"); goto err_off; } /* send H2C after HCI has started */ rtw_fw_send_general_info(rtwdev); rtw_fw_send_phydm_info(rtwdev); wifi_only = !rtwdev->efuse.btcoex; rtw_coex_power_on_setting(rtwdev); rtw_coex_init_hw_config(rtwdev, wifi_only); return 0; err_off: rtw_mac_power_off(rtwdev); err: return ret; } void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start) { if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_NOTIFY_SCAN)) return; if (start) { rtw_fw_scan_notify(rtwdev, true); } else { reinit_completion(&rtwdev->fw_scan_density); rtw_fw_scan_notify(rtwdev, false); if (!wait_for_completion_timeout(&rtwdev->fw_scan_density, SCAN_NOTIFY_TIMEOUT)) rtw_warn(rtwdev, "firmware failed to report density after scan\n"); } } void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, const u8 *mac_addr, bool hw_scan) { u32 config = 0; int ret = 0; rtw_leave_lps(rtwdev); if (hw_scan && rtwvif->net_type == RTW_NET_NO_LINK) { ret = rtw_leave_ips(rtwdev); if (ret) { rtw_err(rtwdev, "failed to leave idle state\n"); return; } } ether_addr_copy(rtwvif->mac_addr, mac_addr); config |= PORT_SET_MAC_ADDR; rtw_vif_port_config(rtwdev, rtwvif, config); rtw_coex_scan_notify(rtwdev, COEX_SCAN_START); rtw_core_fw_scan_notify(rtwdev, true); set_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags); set_bit(RTW_FLAG_SCANNING, rtwdev->flags); } void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif) { struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; u32 config = 0; clear_bit(RTW_FLAG_SCANNING, rtwdev->flags); clear_bit(RTW_FLAG_DIG_DISABLE, rtwdev->flags); rtw_core_fw_scan_notify(rtwdev, false); ether_addr_copy(rtwvif->mac_addr, vif->addr); config |= PORT_SET_MAC_ADDR; rtw_vif_port_config(rtwdev, rtwvif, config); rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH); } int rtw_core_start(struct rtw_dev *rtwdev) { int ret; ret = rtw_power_on(rtwdev); if (ret) return ret; rtw_sec_enable_sec_engine(rtwdev); rtwdev->lps_conf.deep_mode = rtw_update_lps_deep_mode(rtwdev, &rtwdev->fw); rtwdev->lps_conf.wow_deep_mode = rtw_update_lps_deep_mode(rtwdev, &rtwdev->wow_fw); /* rcr reset after powered on */ rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr); ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work, RTW_WATCH_DOG_DELAY_TIME); set_bit(RTW_FLAG_RUNNING, rtwdev->flags); return 0; } static void rtw_power_off(struct rtw_dev *rtwdev) { rtw_hci_stop(rtwdev); rtw_coex_power_off_setting(rtwdev); rtw_mac_power_off(rtwdev); } void rtw_core_stop(struct rtw_dev *rtwdev) { struct rtw_coex *coex = &rtwdev->coex; clear_bit(RTW_FLAG_RUNNING, rtwdev->flags); clear_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags); mutex_unlock(&rtwdev->mutex); cancel_work_sync(&rtwdev->c2h_work); cancel_delayed_work_sync(&rtwdev->watch_dog_work); cancel_delayed_work_sync(&coex->bt_relink_work); cancel_delayed_work_sync(&coex->bt_reenable_work); cancel_delayed_work_sync(&coex->defreeze_work); cancel_delayed_work_sync(&coex->wl_remain_work); cancel_delayed_work_sync(&coex->bt_remain_work); cancel_delayed_work_sync(&coex->wl_connecting_work); cancel_delayed_work_sync(&coex->bt_multi_link_remain_work); cancel_delayed_work_sync(&coex->wl_ccklock_work); mutex_lock(&rtwdev->mutex); rtw_power_off(rtwdev); } static void rtw_init_ht_cap(struct rtw_dev *rtwdev, struct ieee80211_sta_ht_cap *ht_cap) { struct rtw_efuse *efuse = &rtwdev->efuse; ht_cap->ht_supported = true; ht_cap->cap = 0; ht_cap->cap |= IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_MAX_AMSDU | (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); if (rtw_chip_has_rx_ldpc(rtwdev)) ht_cap->cap |= IEEE80211_HT_CAP_LDPC_CODING; if (rtw_chip_has_tx_stbc(rtwdev)) ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC; if (efuse->hw_cap.bw & BIT(RTW_CHANNEL_WIDTH_40)) ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_SGI_40; ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16; ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; if (efuse->hw_cap.nss > 1) { ht_cap->mcs.rx_mask[0] = 0xFF; ht_cap->mcs.rx_mask[1] = 0xFF; ht_cap->mcs.rx_mask[4] = 0x01; ht_cap->mcs.rx_highest = cpu_to_le16(300); } else { ht_cap->mcs.rx_mask[0] = 0xFF; ht_cap->mcs.rx_mask[1] = 0x00; ht_cap->mcs.rx_mask[4] = 0x01; ht_cap->mcs.rx_highest = cpu_to_le16(150); } } static void rtw_init_vht_cap(struct rtw_dev *rtwdev, struct ieee80211_sta_vht_cap *vht_cap) { struct rtw_efuse *efuse = &rtwdev->efuse; u16 mcs_map; __le16 highest; if (efuse->hw_cap.ptcl != EFUSE_HW_CAP_IGNORE && efuse->hw_cap.ptcl != EFUSE_HW_CAP_PTCL_VHT) return; vht_cap->vht_supported = true; vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_RXSTBC_1 | IEEE80211_VHT_CAP_HTC_VHT | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | 0; if (rtwdev->hal.rf_path_num > 1) vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; vht_cap->cap |= (rtwdev->hal.bfee_sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT); if (rtw_chip_has_rx_ldpc(rtwdev)) vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | IEEE80211_VHT_MCS_NOT_SUPPORTED << 14; if (efuse->hw_cap.nss > 1) { highest = cpu_to_le16(780); mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << 2; } else { highest = cpu_to_le16(390); mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << 2; } vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); vht_cap->vht_mcs.rx_highest = highest; vht_cap->vht_mcs.tx_highest = highest; } static void rtw_set_supported_band(struct ieee80211_hw *hw, struct rtw_chip_info *chip) { struct rtw_dev *rtwdev = hw->priv; struct ieee80211_supported_band *sband; if (chip->band & RTW_BAND_2G) { sband = kmemdup(&rtw_band_2ghz, sizeof(*sband), GFP_KERNEL); if (!sband) goto err_out; if (chip->ht_supported) rtw_init_ht_cap(rtwdev, &sband->ht_cap); hw->wiphy->bands[NL80211_BAND_2GHZ] = sband; } if (chip->band & RTW_BAND_5G) { sband = kmemdup(&rtw_band_5ghz, sizeof(*sband), GFP_KERNEL); if (!sband) goto err_out; if (chip->ht_supported) rtw_init_ht_cap(rtwdev, &sband->ht_cap); if (chip->vht_supported) rtw_init_vht_cap(rtwdev, &sband->vht_cap); hw->wiphy->bands[NL80211_BAND_5GHZ] = sband; } return; err_out: rtw_err(rtwdev, "failed to set supported band\n"); } static void rtw_unset_supported_band(struct ieee80211_hw *hw, struct rtw_chip_info *chip) { kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]); kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]); } static void __update_firmware_feature(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) { u32 feature; const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)fw->firmware->data; feature = le32_to_cpu(fw_hdr->feature); fw->feature = feature & FW_FEATURE_SIG ? feature : 0; } static void __update_firmware_info(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) { const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)fw->firmware->data; fw->h2c_version = le16_to_cpu(fw_hdr->h2c_fmt_ver); fw->version = le16_to_cpu(fw_hdr->version); fw->sub_version = fw_hdr->subversion; fw->sub_index = fw_hdr->subindex; __update_firmware_feature(rtwdev, fw); } static void __update_firmware_info_legacy(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) { struct rtw_fw_hdr_legacy *legacy = #if defined(__linux__) (struct rtw_fw_hdr_legacy *)fw->firmware->data; #elif defined(__FreeBSD__) __DECONST(struct rtw_fw_hdr_legacy *, fw->firmware->data); #endif fw->h2c_version = 0; fw->version = le16_to_cpu(legacy->version); fw->sub_version = legacy->subversion1; fw->sub_index = legacy->subversion2; } static void update_firmware_info(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) { if (rtw_chip_wcpu_11n(rtwdev)) __update_firmware_info_legacy(rtwdev, fw); else __update_firmware_info(rtwdev, fw); } static void rtw_load_firmware_cb(const struct firmware *firmware, void *context) { struct rtw_fw_state *fw = context; struct rtw_dev *rtwdev = fw->rtwdev; if (!firmware || !firmware->data) { rtw_err(rtwdev, "failed to request firmware\n"); complete_all(&fw->completion); return; } fw->firmware = firmware; update_firmware_info(rtwdev, fw); complete_all(&fw->completion); rtw_info(rtwdev, "Firmware version %u.%u.%u, H2C version %u\n", fw->version, fw->sub_version, fw->sub_index, fw->h2c_version); } static int rtw_load_firmware(struct rtw_dev *rtwdev, enum rtw_fw_type type) { const char *fw_name; struct rtw_fw_state *fw; int ret; switch (type) { case RTW_WOWLAN_FW: fw = &rtwdev->wow_fw; fw_name = rtwdev->chip->wow_fw_name; break; case RTW_NORMAL_FW: fw = &rtwdev->fw; fw_name = rtwdev->chip->fw_name; break; default: rtw_warn(rtwdev, "unsupported firmware type\n"); return -ENOENT; } fw->rtwdev = rtwdev; init_completion(&fw->completion); ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev, GFP_KERNEL, fw, rtw_load_firmware_cb); if (ret) { rtw_err(rtwdev, "failed to async firmware request\n"); return ret; } return 0; } static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; struct rtw_hal *hal = &rtwdev->hal; struct rtw_efuse *efuse = &rtwdev->efuse; switch (rtw_hci_type(rtwdev)) { case RTW_HCI_TYPE_PCIE: rtwdev->hci.rpwm_addr = 0x03d9; rtwdev->hci.cpwm_addr = 0x03da; break; default: rtw_err(rtwdev, "unsupported hci type\n"); return -EINVAL; } hal->chip_version = rtw_read32(rtwdev, REG_SYS_CFG1); hal->cut_version = BIT_GET_CHIP_VER(hal->chip_version); hal->mp_chip = (hal->chip_version & BIT_RTL_ID) ? 0 : 1; if (hal->chip_version & BIT_RF_TYPE_ID) { hal->rf_type = RF_2T2R; hal->rf_path_num = 2; hal->antenna_tx = BB_PATH_AB; hal->antenna_rx = BB_PATH_AB; } else { hal->rf_type = RF_1T1R; hal->rf_path_num = 1; hal->antenna_tx = BB_PATH_A; hal->antenna_rx = BB_PATH_A; } hal->rf_phy_num = chip->fix_rf_phy_num ? chip->fix_rf_phy_num : hal->rf_path_num; efuse->physical_size = chip->phy_efuse_size; efuse->logical_size = chip->log_efuse_size; efuse->protect_size = chip->ptct_efuse_size; /* default use ack */ rtwdev->hal.rcr |= BIT_VHT_DACK; hal->bfee_sts_cap = 3; return 0; } static int rtw_chip_efuse_enable(struct rtw_dev *rtwdev) { struct rtw_fw_state *fw = &rtwdev->fw; int ret; ret = rtw_hci_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup hci\n"); goto err; } ret = rtw_mac_power_on(rtwdev); if (ret) { rtw_err(rtwdev, "failed to power on mac\n"); goto err; } rtw_write8(rtwdev, REG_C2HEVT, C2H_HW_FEATURE_DUMP); wait_for_completion(&fw->completion); if (!fw->firmware) { ret = -EINVAL; rtw_err(rtwdev, "failed to load firmware\n"); goto err; } ret = rtw_download_firmware(rtwdev, fw); if (ret) { rtw_err(rtwdev, "failed to download firmware\n"); goto err_off; } return 0; err_off: rtw_mac_power_off(rtwdev); err: return ret; } static int rtw_dump_hw_feature(struct rtw_dev *rtwdev) { struct rtw_efuse *efuse = &rtwdev->efuse; u8 hw_feature[HW_FEATURE_LEN]; u8 id; u8 bw; int i; id = rtw_read8(rtwdev, REG_C2HEVT); if (id != C2H_HW_FEATURE_REPORT) { rtw_err(rtwdev, "failed to read hw feature report\n"); return -EBUSY; } for (i = 0; i < HW_FEATURE_LEN; i++) hw_feature[i] = rtw_read8(rtwdev, REG_C2HEVT + 2 + i); rtw_write8(rtwdev, REG_C2HEVT, 0); bw = GET_EFUSE_HW_CAP_BW(hw_feature); efuse->hw_cap.bw = hw_bw_cap_to_bitamp(bw); efuse->hw_cap.hci = GET_EFUSE_HW_CAP_HCI(hw_feature); efuse->hw_cap.nss = GET_EFUSE_HW_CAP_NSS(hw_feature); efuse->hw_cap.ptcl = GET_EFUSE_HW_CAP_PTCL(hw_feature); efuse->hw_cap.ant_num = GET_EFUSE_HW_CAP_ANT_NUM(hw_feature); rtw_hw_config_rf_ant_num(rtwdev, efuse->hw_cap.ant_num); if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE || efuse->hw_cap.nss > rtwdev->hal.rf_path_num) efuse->hw_cap.nss = rtwdev->hal.rf_path_num; rtw_dbg(rtwdev, RTW_DBG_EFUSE, "hw cap: hci=0x%02x, bw=0x%02x, ptcl=0x%02x, ant_num=%d, nss=%d\n", efuse->hw_cap.hci, efuse->hw_cap.bw, efuse->hw_cap.ptcl, efuse->hw_cap.ant_num, efuse->hw_cap.nss); return 0; } static void rtw_chip_efuse_disable(struct rtw_dev *rtwdev) { rtw_hci_stop(rtwdev); rtw_mac_power_off(rtwdev); } static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev) { struct rtw_efuse *efuse = &rtwdev->efuse; int ret; mutex_lock(&rtwdev->mutex); /* power on mac to read efuse */ ret = rtw_chip_efuse_enable(rtwdev); if (ret) goto out_unlock; ret = rtw_parse_efuse_map(rtwdev); if (ret) goto out_disable; ret = rtw_dump_hw_feature(rtwdev); if (ret) goto out_disable; ret = rtw_check_supported_rfe(rtwdev); if (ret) goto out_disable; if (efuse->crystal_cap == 0xff) efuse->crystal_cap = 0; if (efuse->pa_type_2g == 0xff) efuse->pa_type_2g = 0; if (efuse->pa_type_5g == 0xff) efuse->pa_type_5g = 0; if (efuse->lna_type_2g == 0xff) efuse->lna_type_2g = 0; if (efuse->lna_type_5g == 0xff) efuse->lna_type_5g = 0; if (efuse->channel_plan == 0xff) efuse->channel_plan = 0x7f; if (efuse->rf_board_option == 0xff) efuse->rf_board_option = 0; if (efuse->bt_setting & BIT(0)) efuse->share_ant = true; if (efuse->regd == 0xff) efuse->regd = 0; if (efuse->tx_bb_swing_setting_2g == 0xff) efuse->tx_bb_swing_setting_2g = 0; if (efuse->tx_bb_swing_setting_5g == 0xff) efuse->tx_bb_swing_setting_5g = 0; efuse->btcoex = (efuse->rf_board_option & 0xe0) == 0x20; efuse->ext_pa_2g = efuse->pa_type_2g & BIT(4) ? 1 : 0; efuse->ext_lna_2g = efuse->lna_type_2g & BIT(3) ? 1 : 0; efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0; efuse->ext_lna_2g = efuse->lna_type_5g & BIT(3) ? 1 : 0; out_disable: rtw_chip_efuse_disable(rtwdev); out_unlock: mutex_unlock(&rtwdev->mutex); return ret; } static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev) { struct rtw_hal *hal = &rtwdev->hal; const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev); if (!rfe_def) return -ENODEV; rtw_phy_setup_phy_cond(rtwdev, 0); rtw_phy_init_tx_power(rtwdev); if (rfe_def->agc_btg_tbl) rtw_load_table(rtwdev, rfe_def->agc_btg_tbl); rtw_load_table(rtwdev, rfe_def->phy_pg_tbl); rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl); rtw_phy_tx_power_by_rate_config(hal); rtw_phy_tx_power_limit_config(hal); return 0; } int rtw_chip_info_setup(struct rtw_dev *rtwdev) { int ret; ret = rtw_chip_parameter_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup chip parameters\n"); goto err_out; } ret = rtw_chip_efuse_info_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup chip efuse info\n"); goto err_out; } ret = rtw_chip_board_info_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup chip board info\n"); goto err_out; } return 0; err_out: return ret; } EXPORT_SYMBOL(rtw_chip_info_setup); static void rtw_stats_init(struct rtw_dev *rtwdev) { struct rtw_traffic_stats *stats = &rtwdev->stats; struct rtw_dm_info *dm_info = &rtwdev->dm_info; int i; ewma_tp_init(&stats->tx_ewma_tp); ewma_tp_init(&stats->rx_ewma_tp); for (i = 0; i < RTW_EVM_NUM; i++) ewma_evm_init(&dm_info->ewma_evm[i]); for (i = 0; i < RTW_SNR_NUM; i++) ewma_snr_init(&dm_info->ewma_snr[i]); } int rtw_core_init(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; struct rtw_coex *coex = &rtwdev->coex; int ret; INIT_LIST_HEAD(&rtwdev->rsvd_page_list); INIT_LIST_HEAD(&rtwdev->txqs); timer_setup(&rtwdev->tx_report.purge_timer, rtw_tx_report_purge_timer, 0); rtwdev->tx_wq = alloc_workqueue("rtw_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0); INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work); INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work); INIT_DELAYED_WORK(&coex->bt_reenable_work, rtw_coex_bt_reenable_work); INIT_DELAYED_WORK(&coex->defreeze_work, rtw_coex_defreeze_work); INIT_DELAYED_WORK(&coex->wl_remain_work, rtw_coex_wl_remain_work); INIT_DELAYED_WORK(&coex->bt_remain_work, rtw_coex_bt_remain_work); INIT_DELAYED_WORK(&coex->wl_connecting_work, rtw_coex_wl_connecting_work); INIT_DELAYED_WORK(&coex->bt_multi_link_remain_work, rtw_coex_bt_multi_link_remain_work); INIT_DELAYED_WORK(&coex->wl_ccklock_work, rtw_coex_wl_ccklock_work); INIT_WORK(&rtwdev->tx_work, rtw_tx_work); INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work); INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work); INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work); skb_queue_head_init(&rtwdev->c2h_queue); skb_queue_head_init(&rtwdev->coex.queue); skb_queue_head_init(&rtwdev->tx_report.queue); spin_lock_init(&rtwdev->rf_lock); spin_lock_init(&rtwdev->h2c.lock); spin_lock_init(&rtwdev->txq_lock); spin_lock_init(&rtwdev->tx_report.q_lock); mutex_init(&rtwdev->mutex); mutex_init(&rtwdev->coex.mutex); mutex_init(&rtwdev->hal.tx_power_mutex); init_waitqueue_head(&rtwdev->coex.wait); init_completion(&rtwdev->lps_leave_check); init_completion(&rtwdev->fw_scan_density); rtwdev->sec.total_cam_num = 32; rtwdev->hal.current_channel = 1; rtwdev->dm_info.fix_rate = U8_MAX; set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map); rtw_stats_init(rtwdev); /* default rx filter setting */ rtwdev->hal.rcr = BIT_APP_FCS | BIT_APP_MIC | BIT_APP_ICV | BIT_PKTCTL_DLEN | BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS | BIT_AB | BIT_AM | BIT_APM; ret = rtw_load_firmware(rtwdev, RTW_NORMAL_FW); if (ret) { rtw_warn(rtwdev, "no firmware loaded\n"); return ret; } if (chip->wow_fw_name) { ret = rtw_load_firmware(rtwdev, RTW_WOWLAN_FW); if (ret) { rtw_warn(rtwdev, "no wow firmware loaded\n"); wait_for_completion(&rtwdev->fw.completion); if (rtwdev->fw.firmware) release_firmware(rtwdev->fw.firmware); return ret; } } #if defined(__FreeBSD__) rtw_wait_firmware_completion(rtwdev); #endif return 0; } EXPORT_SYMBOL(rtw_core_init); void rtw_core_deinit(struct rtw_dev *rtwdev) { struct rtw_fw_state *fw = &rtwdev->fw; struct rtw_fw_state *wow_fw = &rtwdev->wow_fw; struct rtw_rsvd_page *rsvd_pkt, *tmp; unsigned long flags; rtw_wait_firmware_completion(rtwdev); if (fw->firmware) release_firmware(fw->firmware); if (wow_fw->firmware) release_firmware(wow_fw->firmware); destroy_workqueue(rtwdev->tx_wq); spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags); skb_queue_purge(&rtwdev->tx_report.queue); skb_queue_purge(&rtwdev->coex.queue); spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags); list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, build_list) { list_del(&rsvd_pkt->build_list); kfree(rsvd_pkt); } mutex_destroy(&rtwdev->mutex); mutex_destroy(&rtwdev->coex.mutex); mutex_destroy(&rtwdev->hal.tx_power_mutex); } EXPORT_SYMBOL(rtw_core_deinit); int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) { struct rtw_hal *hal = &rtwdev->hal; int max_tx_headroom = 0; int ret; /* TODO: USB & SDIO may need extra room? */ max_tx_headroom = rtwdev->chip->tx_pkt_desc_sz; hw->extra_tx_headroom = max_tx_headroom; hw->queues = IEEE80211_NUM_ACS; hw->txq_data_size = sizeof(struct rtw_txq); hw->sta_data_size = sizeof(struct rtw_sta_info); hw->vif_data_size = sizeof(struct rtw_vif); ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, TX_AMSDU); ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MESH_POINT); hw->wiphy->available_antennas_tx = hal->antenna_tx; hw->wiphy->available_antennas_rx = hal->antenna_rx; hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | WIPHY_FLAG_TDLS_EXTERNAL_SETUP; hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; hw->wiphy->max_scan_ssids = RTW_SCAN_MAX_SSIDS; hw->wiphy->max_scan_ie_len = RTW_SCAN_MAX_IE_LEN; wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); #ifdef CONFIG_PM hw->wiphy->wowlan = rtwdev->chip->wowlan_stub; hw->wiphy->max_sched_scan_ssids = rtwdev->chip->max_sched_scan_ssids; #endif rtw_set_supported_band(hw, rtwdev->chip); SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr); hw->wiphy->sar_capa = &rtw_sar_capa; ret = rtw_regd_init(rtwdev); if (ret) { rtw_err(rtwdev, "failed to init regd\n"); return ret; } ret = ieee80211_register_hw(hw); if (ret) { rtw_err(rtwdev, "failed to register hw\n"); return ret; } ret = rtw_regd_hint(rtwdev); if (ret) { rtw_err(rtwdev, "failed to hint regd\n"); return ret; } rtw_debugfs_init(rtwdev); rtwdev->bf_info.bfer_mu_cnt = 0; rtwdev->bf_info.bfer_su_cnt = 0; return 0; } EXPORT_SYMBOL(rtw_register_hw); void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw) { struct rtw_chip_info *chip = rtwdev->chip; ieee80211_unregister_hw(hw); rtw_unset_supported_band(hw, chip); } EXPORT_SYMBOL(rtw_unregister_hw); MODULE_AUTHOR("Realtek Corporation"); MODULE_DESCRIPTION("Realtek 802.11ac wireless core module"); MODULE_LICENSE("Dual BSD/GPL"); +#if defined(__FreeBSD__) +MODULE_VERSION(rtw88_core, 1); +MODULE_DEPEND(rtw88_core, linuxkpi, 1, 1, 1); +MODULE_DEPEND(rtw88_core, linuxkpi_wlan, 1, 1, 1); +#ifdef CONFIG_RTW88_DEBUGFS +MODULE_DEPEND(rtw88_core, debugfs, 1, 1, 1); +#endif +#endif diff --git a/sys/contrib/dev/rtw88/pci.c b/sys/contrib/dev/rtw88/pci.c index 431d0c1c8b98..34d58b0f3db3 100644 --- a/sys/contrib/dev/rtw88/pci.c +++ b/sys/contrib/dev/rtw88/pci.c @@ -1,1945 +1,1943 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2018-2019 Realtek Corporation */ #if defined(__FreeBSD__) #define LINUXKPI_PARAM_PREFIX rtw88_pci_ #endif #include #include #include "main.h" #include "pci.h" #include "reg.h" #include "tx.h" #include "rx.h" #include "fw.h" #include "ps.h" #include "debug.h" #if defined(__FreeBSD__) #include #endif static bool rtw_disable_msi; static bool rtw_pci_disable_aspm; module_param_named(disable_msi, rtw_disable_msi, bool, 0644); module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644); MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support"); MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support"); static u32 rtw_pci_tx_queue_idx_addr[] = { [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ, [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ, [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ, [RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ, [RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ, [RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q, [RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ, }; static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue) { switch (queue) { case RTW_TX_QUEUE_BCN: return TX_DESC_QSEL_BEACON; case RTW_TX_QUEUE_H2C: return TX_DESC_QSEL_H2C; case RTW_TX_QUEUE_MGMT: return TX_DESC_QSEL_MGMT; case RTW_TX_QUEUE_HI0: return TX_DESC_QSEL_HIGH; default: return skb->priority; } }; static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; #if defined(__linux__) return readb(rtwpci->mmap + addr); #elif defined(__FreeBSD__) u8 val; val = bus_read_1((struct resource *)rtwpci->mmap, addr); rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R08 (%#010x) -> %#04x\n", addr, val); return (val); #endif } static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; #if defined(__linux__) return readw(rtwpci->mmap + addr); #elif defined(__FreeBSD__) u16 val; val = bus_read_2((struct resource *)rtwpci->mmap, addr); rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R16 (%#010x) -> %#06x\n", addr, val); return (val); #endif } static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; #if defined(__linux__) return readl(rtwpci->mmap + addr); #elif defined(__FreeBSD__) u32 val; val = bus_read_4((struct resource *)rtwpci->mmap, addr); rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); return (val); #endif } static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; #if defined(__linux__) writeb(val, rtwpci->mmap + addr); #elif defined(__FreeBSD__) rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W08 (%#010x) <- %#04x\n", addr, val); return (bus_write_1((struct resource *)rtwpci->mmap, addr, val)); #endif } static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; #if defined(__linux__) writew(val, rtwpci->mmap + addr); #elif defined(__FreeBSD__) rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W16 (%#010x) <- %#06x\n", addr, val); return (bus_write_2((struct resource *)rtwpci->mmap, addr, val)); #endif } static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; #if defined(__linux__) writel(val, rtwpci->mmap + addr); #elif defined(__FreeBSD__) rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W32 (%#010x) <- %#010x\n", addr, val); return (bus_write_4((struct resource *)rtwpci->mmap, addr, val)); #endif } #if defined(__linux__) && 0 static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx) { int offset = tx_ring->r.desc_size * idx; return tx_ring->r.head + offset; } #endif static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev, struct rtw_pci_tx_ring *tx_ring) { struct pci_dev *pdev = to_pci_dev(rtwdev->dev); struct rtw_pci_tx_data *tx_data; struct sk_buff *skb, *tmp; dma_addr_t dma; /* free every skb remained in tx list */ skb_queue_walk_safe(&tx_ring->queue, skb, tmp) { __skb_unlink(skb, &tx_ring->queue); tx_data = rtw_pci_get_tx_data(skb); dma = tx_data->dma; dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); } } static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev, struct rtw_pci_tx_ring *tx_ring) { struct pci_dev *pdev = to_pci_dev(rtwdev->dev); u8 *head = tx_ring->r.head; u32 len = tx_ring->r.len; int ring_sz = len * tx_ring->r.desc_size; rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring); /* free the ring itself */ dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma); tx_ring->r.head = NULL; } static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev, struct rtw_pci_rx_ring *rx_ring) { struct pci_dev *pdev = to_pci_dev(rtwdev->dev); struct sk_buff *skb; int buf_sz = RTK_PCI_RX_BUF_SIZE; dma_addr_t dma; int i; for (i = 0; i < rx_ring->r.len; i++) { skb = rx_ring->buf[i]; if (!skb) continue; dma = *((dma_addr_t *)skb->cb); dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); dev_kfree_skb(skb); rx_ring->buf[i] = NULL; } } static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev, struct rtw_pci_rx_ring *rx_ring) { struct pci_dev *pdev = to_pci_dev(rtwdev->dev); u8 *head = rx_ring->r.head; int ring_sz = rx_ring->r.desc_size * rx_ring->r.len; rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring); dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma); } static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_pci_tx_ring *tx_ring; struct rtw_pci_rx_ring *rx_ring; int i; for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) { tx_ring = &rtwpci->tx_rings[i]; rtw_pci_free_tx_ring(rtwdev, tx_ring); } for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) { rx_ring = &rtwpci->rx_rings[i]; rtw_pci_free_rx_ring(rtwdev, rx_ring); } } static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev, struct rtw_pci_tx_ring *tx_ring, u8 desc_size, u32 len) { struct pci_dev *pdev = to_pci_dev(rtwdev->dev); int ring_sz = desc_size * len; dma_addr_t dma; u8 *head; if (len > TRX_BD_IDX_MASK) { rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len); return -EINVAL; } head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); if (!head) { rtw_err(rtwdev, "failed to allocate tx ring\n"); return -ENOMEM; } skb_queue_head_init(&tx_ring->queue); tx_ring->r.head = head; tx_ring->r.dma = dma; tx_ring->r.len = len; tx_ring->r.desc_size = desc_size; tx_ring->r.wp = 0; tx_ring->r.rp = 0; return 0; } static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, struct rtw_pci_rx_ring *rx_ring, u32 idx, u32 desc_sz) { struct pci_dev *pdev = to_pci_dev(rtwdev->dev); struct rtw_pci_rx_buffer_desc *buf_desc; int buf_sz = RTK_PCI_RX_BUF_SIZE; dma_addr_t dma; if (!skb) return -EINVAL; dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, dma)) return -EBUSY; *((dma_addr_t *)skb->cb) = dma; buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + idx * desc_sz); memset(buf_desc, 0, sizeof(*buf_desc)); buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); buf_desc->dma = cpu_to_le32(dma); return 0; } static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma, struct rtw_pci_rx_ring *rx_ring, u32 idx, u32 desc_sz) { struct device *dev = rtwdev->dev; struct rtw_pci_rx_buffer_desc *buf_desc; int buf_sz = RTK_PCI_RX_BUF_SIZE; dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE); buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + idx * desc_sz); memset(buf_desc, 0, sizeof(*buf_desc)); buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); buf_desc->dma = cpu_to_le32(dma); } static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev, struct rtw_pci_rx_ring *rx_ring, u8 desc_size, u32 len) { struct pci_dev *pdev = to_pci_dev(rtwdev->dev); struct sk_buff *skb = NULL; dma_addr_t dma; u8 *head; int ring_sz = desc_size * len; int buf_sz = RTK_PCI_RX_BUF_SIZE; int i, allocated; int ret = 0; head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); if (!head) { rtw_err(rtwdev, "failed to allocate rx ring\n"); return -ENOMEM; } rx_ring->r.head = head; for (i = 0; i < len; i++) { skb = dev_alloc_skb(buf_sz); if (!skb) { allocated = i; ret = -ENOMEM; goto err_out; } memset(skb->data, 0, buf_sz); rx_ring->buf[i] = skb; ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size); if (ret) { allocated = i; dev_kfree_skb_any(skb); goto err_out; } } rx_ring->r.dma = dma; rx_ring->r.len = len; rx_ring->r.desc_size = desc_size; rx_ring->r.wp = 0; rx_ring->r.rp = 0; return 0; err_out: for (i = 0; i < allocated; i++) { skb = rx_ring->buf[i]; if (!skb) continue; dma = *((dma_addr_t *)skb->cb); dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); rx_ring->buf[i] = NULL; } dma_free_coherent(&pdev->dev, ring_sz, head, dma); rtw_err(rtwdev, "failed to init rx buffer\n"); return ret; } static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_pci_tx_ring *tx_ring; struct rtw_pci_rx_ring *rx_ring; struct rtw_chip_info *chip = rtwdev->chip; int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0; int tx_desc_size, rx_desc_size; u32 len; int ret; tx_desc_size = chip->tx_buf_desc_sz; for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) { tx_ring = &rtwpci->tx_rings[i]; len = max_num_of_tx_queue(i); ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len); if (ret) goto out; } rx_desc_size = chip->rx_buf_desc_sz; for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) { rx_ring = &rtwpci->rx_rings[j]; ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size, RTK_MAX_RX_DESC_NUM); if (ret) goto out; } return 0; out: tx_alloced = i; for (i = 0; i < tx_alloced; i++) { tx_ring = &rtwpci->tx_rings[i]; rtw_pci_free_tx_ring(rtwdev, tx_ring); } rx_alloced = j; for (j = 0; j < rx_alloced; j++) { rx_ring = &rtwpci->rx_rings[j]; rtw_pci_free_rx_ring(rtwdev, rx_ring); } return ret; } static void rtw_pci_deinit(struct rtw_dev *rtwdev) { rtw_pci_free_trx_ring(rtwdev); } static int rtw_pci_init(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; int ret = 0; rtwpci->irq_mask[0] = IMR_HIGHDOK | IMR_MGNTDOK | IMR_BKDOK | IMR_BEDOK | IMR_VIDOK | IMR_VODOK | IMR_ROK | IMR_BCNDMAINT_E | IMR_C2HCMD | 0; rtwpci->irq_mask[1] = IMR_TXFOVW | 0; rtwpci->irq_mask[3] = IMR_H2CDOK | 0; spin_lock_init(&rtwpci->irq_lock); spin_lock_init(&rtwpci->hwirq_lock); ret = rtw_pci_init_trx_ring(rtwdev); return ret; } static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; u32 len; u8 tmp; dma_addr_t dma; tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3); rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7); dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma; rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma); if (!rtw_chip_wcpu_11n(rtwdev)) { len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len; dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma; rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0; rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0; rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK); rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma); } len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len; dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma; rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0; rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0; rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK); rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma); len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len; dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma; rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0; rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0; rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK); rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma); len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len; dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma; rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0; rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0; rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK); rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma); len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len; dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma; rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0; rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0; rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK); rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma); len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len; dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma; rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0; rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0; rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK); rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma); len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len; dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma; rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0; rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0; rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK); rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma); len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len; dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma; rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0; rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0; rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK); rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma); /* reset read/write point */ rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff); /* reset H2C Queue index in a single write */ if (rtw_chip_wcpu_11ac(rtwdev)) rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX); } static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev) { rtw_pci_reset_buf_desc(rtwdev); } static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, bool exclude_rx) { unsigned long flags; u32 imr0_unmask = exclude_rx ? IMR_ROK : 0; spin_lock_irqsave(&rtwpci->hwirq_lock, flags); rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask); rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]); if (rtw_chip_wcpu_11ac(rtwdev)) rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]); rtwpci->irq_enabled = true; spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); } static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) { unsigned long flags; spin_lock_irqsave(&rtwpci->hwirq_lock, flags); if (!rtwpci->irq_enabled) goto out; rtw_write32(rtwdev, RTK_PCI_HIMR0, 0); rtw_write32(rtwdev, RTK_PCI_HIMR1, 0); if (rtw_chip_wcpu_11ac(rtwdev)) rtw_write32(rtwdev, RTK_PCI_HIMR3, 0); rtwpci->irq_enabled = false; out: spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); } static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) { /* reset dma and rx tag */ rtw_write32_set(rtwdev, RTK_PCI_CTRL, BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN); rtwpci->rx_tag = 0; } static int rtw_pci_setup(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; rtw_pci_reset_trx_ring(rtwdev); rtw_pci_dma_reset(rtwdev, rtwpci); return 0; } static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) { struct rtw_pci_tx_ring *tx_ring; u8 queue; rtw_pci_reset_trx_ring(rtwdev); for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) { tx_ring = &rtwpci->tx_rings[queue]; rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring); } } static void rtw_pci_napi_start(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags)) return; napi_enable(&rtwpci->napi); } static void rtw_pci_napi_stop(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags)) return; napi_synchronize(&rtwpci->napi); napi_disable(&rtwpci->napi); } static int rtw_pci_start(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; rtw_pci_napi_start(rtwdev); spin_lock_bh(&rtwpci->irq_lock); rtwpci->running = true; rtw_pci_enable_interrupt(rtwdev, rtwpci, false); spin_unlock_bh(&rtwpci->irq_lock); return 0; } static void rtw_pci_stop(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct pci_dev *pdev = rtwpci->pdev; spin_lock_bh(&rtwpci->irq_lock); rtwpci->running = false; rtw_pci_disable_interrupt(rtwdev, rtwpci); spin_unlock_bh(&rtwpci->irq_lock); synchronize_irq(pdev->irq); rtw_pci_napi_stop(rtwdev); spin_lock_bh(&rtwpci->irq_lock); rtw_pci_dma_release(rtwdev, rtwpci); spin_unlock_bh(&rtwpci->irq_lock); } static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_pci_tx_ring *tx_ring; bool tx_empty = true; u8 queue; if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) goto enter_deep_ps; lockdep_assert_held(&rtwpci->irq_lock); /* Deep PS state is not allowed to TX-DMA */ for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) { /* BCN queue is rsvd page, does not have DMA interrupt * H2C queue is managed by firmware */ if (queue == RTW_TX_QUEUE_BCN || queue == RTW_TX_QUEUE_H2C) continue; tx_ring = &rtwpci->tx_rings[queue]; /* check if there is any skb DMAing */ if (skb_queue_len(&tx_ring->queue)) { tx_empty = false; break; } } if (!tx_empty) { rtw_dbg(rtwdev, RTW_DBG_PS, "TX path not empty, cannot enter deep power save state\n"); return; } enter_deep_ps: set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags); rtw_power_mode_change(rtwdev, true); } static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev) { #if defined(__linux__) struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; lockdep_assert_held(&rtwpci->irq_lock); #elif defined(__FreeBSD__) lockdep_assert_held(&((struct rtw_pci *)rtwdev->priv)->irq_lock); #endif if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) rtw_power_mode_change(rtwdev, false); } static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; spin_lock_bh(&rtwpci->irq_lock); if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) rtw_pci_deep_ps_enter(rtwdev); if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) rtw_pci_deep_ps_leave(rtwdev); spin_unlock_bh(&rtwpci->irq_lock); } static u8 ac_to_hwq[] = { [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO, [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI, [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE, [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK, }; #if defined(__linux__) static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS); #elif defined(__FreeBSD__) rtw88_static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS); #endif static u8 rtw_hw_queue_mapping(struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; __le16 fc = hdr->frame_control; u8 q_mapping = skb_get_queue_mapping(skb); u8 queue; if (unlikely(ieee80211_is_beacon(fc))) queue = RTW_TX_QUEUE_BCN; else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))) queue = RTW_TX_QUEUE_MGMT; else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq))) queue = ac_to_hwq[IEEE80211_AC_BE]; else queue = ac_to_hwq[q_mapping]; return queue; } static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci, struct rtw_pci_tx_ring *ring) { struct sk_buff *prev = skb_dequeue(&ring->queue); struct rtw_pci_tx_data *tx_data; dma_addr_t dma; if (!prev) return; tx_data = rtw_pci_get_tx_data(prev); dma = tx_data->dma; dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE); dev_kfree_skb_any(prev); } static void rtw_pci_dma_check(struct rtw_dev *rtwdev, struct rtw_pci_rx_ring *rx_ring, u32 idx) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_chip_info *chip = rtwdev->chip; struct rtw_pci_rx_buffer_desc *buf_desc; u32 desc_sz = chip->rx_buf_desc_sz; u16 total_pkt_size; buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + idx * desc_sz); total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size); /* rx tag mismatch, throw a warning */ if (total_pkt_size != rtwpci->rx_tag) rtw_warn(rtwdev, "pci bus timeout, check dma status\n"); rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX; } static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q) { u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q]; u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2); return FIELD_GET(TRX_BD_IDX_MASK, bd_idx); } static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q]; u32 cur_rp; u8 i; /* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a * bit dynamic, it's hard to define a reasonable fixed total timeout to * use read_poll_timeout* helper. Instead, we can ensure a reasonable * polling times, so we just use for loop with udelay here. */ for (i = 0; i < 30; i++) { cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q); if (cur_rp == ring->r.wp) return; udelay(1); } if (!drop) rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n", pci_q); } static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues, bool drop) { u8 q; for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) { /* It may be not necessary to flush BCN and H2C tx queues. */ if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C) continue; if (pci_queues & BIT(q)) __pci_flush_queue(rtwdev, q, drop); } } static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop) { u32 pci_queues = 0; u8 i; /* If all of the hardware queues are requested to flush, * flush all of the pci queues. */ if (queues == BIT(rtwdev->hw->queues) - 1) { pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1; } else { for (i = 0; i < rtwdev->hw->queues; i++) if (queues & BIT(i)) pci_queues |= BIT(ac_to_hwq[i]); } __rtw_pci_flush_queues(rtwdev, pci_queues, drop); } static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_pci_tx_ring *ring; u32 bd_idx; ring = &rtwpci->tx_rings[queue]; bd_idx = rtw_pci_tx_queue_idx_addr[queue]; spin_lock_bh(&rtwpci->irq_lock); if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) rtw_pci_deep_ps_leave(rtwdev); rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK); spin_unlock_bh(&rtwpci->irq_lock); } static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; u8 queue; for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) if (test_and_clear_bit(queue, rtwpci->tx_queued)) rtw_pci_tx_kick_off_queue(rtwdev, queue); } static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb, u8 queue) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_chip_info *chip = rtwdev->chip; struct rtw_pci_tx_ring *ring; struct rtw_pci_tx_data *tx_data; dma_addr_t dma; u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz; u32 tx_buf_desc_sz = chip->tx_buf_desc_sz; u32 size; u32 psb_len; u8 *pkt_desc; struct rtw_pci_tx_buffer_desc *buf_desc; ring = &rtwpci->tx_rings[queue]; size = skb->len; if (queue == RTW_TX_QUEUE_BCN) rtw_pci_release_rsvd_page(rtwpci, ring); else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len)) return -ENOSPC; pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); memset(pkt_desc, 0, tx_pkt_desc_sz); pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue); rtw_tx_fill_tx_desc(pkt_info, skb); dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(&rtwpci->pdev->dev, dma)) return -EBUSY; /* after this we got dma mapped, there is no way back */ buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz); memset(buf_desc, 0, tx_buf_desc_sz); psb_len = (skb->len - 1) / 128 + 1; if (queue == RTW_TX_QUEUE_BCN) psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET; buf_desc[0].psb_len = cpu_to_le16(psb_len); buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz); buf_desc[0].dma = cpu_to_le32(dma); buf_desc[1].buf_size = cpu_to_le16(size); buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz); tx_data = rtw_pci_get_tx_data(skb); tx_data->dma = dma; tx_data->sn = pkt_info->sn; spin_lock_bh(&rtwpci->irq_lock); skb_queue_tail(&ring->queue, skb); if (queue == RTW_TX_QUEUE_BCN) goto out_unlock; /* update write-index, and kick it off later */ set_bit(queue, rtwpci->tx_queued); if (++ring->r.wp >= ring->r.len) ring->r.wp = 0; out_unlock: spin_unlock_bh(&rtwpci->irq_lock); return 0; } static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size) { struct sk_buff *skb; struct rtw_tx_pkt_info pkt_info = {0}; u8 reg_bcn_work; int ret; skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size); if (!skb) return -ENOMEM; ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN); if (ret) { #if defined(__FreeBSD__) dev_kfree_skb_any(skb); #endif rtw_err(rtwdev, "failed to write rsvd page data\n"); return ret; } /* reserved pages go through beacon queue */ reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK); reg_bcn_work |= BIT_PCI_BCNQ_FLAG; rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work); return 0; } static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size) { struct sk_buff *skb; struct rtw_tx_pkt_info pkt_info = {0}; int ret; skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size); if (!skb) return -ENOMEM; ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C); if (ret) { #if defined(__FreeBSD__) dev_kfree_skb_any(skb); #endif rtw_err(rtwdev, "failed to write h2c data\n"); return ret; } rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C); return 0; } static int rtw_pci_tx_write(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_pci_tx_ring *ring; u8 queue = rtw_hw_queue_mapping(skb); int ret; ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue); if (ret) return ret; ring = &rtwpci->tx_rings[queue]; spin_lock_bh(&rtwpci->irq_lock); if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) { ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb)); ring->queue_stopped = true; } spin_unlock_bh(&rtwpci->irq_lock); return 0; } static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, u8 hw_queue) { struct ieee80211_hw *hw = rtwdev->hw; struct ieee80211_tx_info *info; struct rtw_pci_tx_ring *ring; struct rtw_pci_tx_data *tx_data; struct sk_buff *skb; u32 count; u32 bd_idx_addr; u32 bd_idx, cur_rp, rp_idx; u16 q_map; ring = &rtwpci->tx_rings[hw_queue]; bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue]; bd_idx = rtw_read32(rtwdev, bd_idx_addr); cur_rp = bd_idx >> 16; cur_rp &= TRX_BD_IDX_MASK; rp_idx = ring->r.rp; if (cur_rp >= ring->r.rp) count = cur_rp - ring->r.rp; else count = ring->r.len - (ring->r.rp - cur_rp); while (count--) { skb = skb_dequeue(&ring->queue); if (!skb) { rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n", count, hw_queue, bd_idx, ring->r.rp, cur_rp); break; } tx_data = rtw_pci_get_tx_data(skb); dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, DMA_TO_DEVICE); /* just free command packets from host to card */ if (hw_queue == RTW_TX_QUEUE_H2C) { dev_kfree_skb_irq(skb); continue; } if (ring->queue_stopped && avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) { q_map = skb_get_queue_mapping(skb); ieee80211_wake_queue(hw, q_map); ring->queue_stopped = false; } if (++rp_idx >= ring->r.len) rp_idx = 0; skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); info = IEEE80211_SKB_CB(skb); /* enqueue to wait for tx report */ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn); continue; } /* always ACK for others, then they won't be marked as drop */ if (info->flags & IEEE80211_TX_CTL_NO_ACK) info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; else info->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_info_clear_status(info); ieee80211_tx_status_irqsafe(hw, skb); } ring->r.rp = cur_rp; } static void rtw_pci_rx_isr(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct napi_struct *napi = &rtwpci->napi; napi_schedule(napi); } static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) { struct rtw_pci_rx_ring *ring; int count = 0; u32 tmp, cur_wp; ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ); cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK); if (cur_wp >= ring->r.wp) count = cur_wp - ring->r.wp; else count = ring->r.len - (ring->r.wp - cur_wp); return count; } static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, u8 hw_queue, u32 limit) { struct rtw_chip_info *chip = rtwdev->chip; struct napi_struct *napi = &rtwpci->napi; struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; struct rtw_rx_pkt_stat pkt_stat; struct ieee80211_rx_status rx_status; struct sk_buff *skb, *new; u32 cur_rp = ring->r.rp; u32 count, rx_done = 0; u32 pkt_offset; u32 pkt_desc_sz = chip->rx_pkt_desc_sz; u32 buf_desc_sz = chip->rx_buf_desc_sz; u32 new_len; u8 *rx_desc; dma_addr_t dma; count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci); count = min(count, limit); while (count--) { rtw_pci_dma_check(rtwdev, ring, cur_rp); skb = ring->buf[cur_rp]; dma = *((dma_addr_t *)skb->cb); dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE, DMA_FROM_DEVICE); rx_desc = skb->data; chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status); /* offset from rx_desc to payload */ pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + pkt_stat.shift; /* allocate a new skb for this frame, * discard the frame if none available */ new_len = pkt_stat.pkt_len + pkt_offset; new = dev_alloc_skb(new_len); if (WARN_ONCE(!new, "rx routine starvation\n")) goto next_rp; /* put the DMA data including rx_desc from phy to new skb */ skb_put_data(new, skb->data, new_len); if (pkt_stat.is_c2h) { rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new); } else { /* remove rx_desc */ skb_pull(new, pkt_offset); rtw_rx_stats(rtwdev, pkt_stat.vif, new); memcpy(new->cb, &rx_status, sizeof(rx_status)); ieee80211_rx_napi(rtwdev->hw, NULL, new, napi); rx_done++; } next_rp: /* new skb delivered to mac80211, re-enable original skb DMA */ rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp, buf_desc_sz); /* host read next element in ring */ if (++cur_rp >= ring->r.len) cur_rp = 0; } ring->r.rp = cur_rp; /* 'rp', the last position we have read, is seen as previous posistion * of 'wp' that is used to calculate 'count' next time. */ ring->r.wp = cur_rp; rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp); return rx_done; } static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, u32 *irq_status) { unsigned long flags; spin_lock_irqsave(&rtwpci->hwirq_lock, flags); irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0); irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1); if (rtw_chip_wcpu_11ac(rtwdev)) irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3); else irq_status[3] = 0; irq_status[0] &= rtwpci->irq_mask[0]; irq_status[1] &= rtwpci->irq_mask[1]; irq_status[3] &= rtwpci->irq_mask[3]; rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]); rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]); if (rtw_chip_wcpu_11ac(rtwdev)) rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]); spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); } static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev) { struct rtw_dev *rtwdev = dev; struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; /* disable RTW PCI interrupt to avoid more interrupts before the end of * thread function * * disable HIMR here to also avoid new HISR flag being raised before * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs * are cleared, the edge-triggered interrupt will not be generated when * a new HISR flag is set. */ rtw_pci_disable_interrupt(rtwdev, rtwpci); return IRQ_WAKE_THREAD; } static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev) { struct rtw_dev *rtwdev = dev; struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; u32 irq_status[4]; bool rx = false; spin_lock_bh(&rtwpci->irq_lock); rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status); if (irq_status[0] & IMR_MGNTDOK) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT); if (irq_status[0] & IMR_HIGHDOK) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0); if (irq_status[0] & IMR_BEDOK) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE); if (irq_status[0] & IMR_BKDOK) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK); if (irq_status[0] & IMR_VODOK) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO); if (irq_status[0] & IMR_VIDOK) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI); if (irq_status[3] & IMR_H2CDOK) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C); if (irq_status[0] & IMR_ROK) { rtw_pci_rx_isr(rtwdev); rx = true; } if (unlikely(irq_status[0] & IMR_C2HCMD)) rtw_fw_c2h_cmd_isr(rtwdev); /* all of the jobs for this interrupt have been done */ if (rtwpci->running) rtw_pci_enable_interrupt(rtwdev, rtwpci, rx); spin_unlock_bh(&rtwpci->irq_lock); return IRQ_HANDLED; } static int rtw_pci_io_mapping(struct rtw_dev *rtwdev, struct pci_dev *pdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; unsigned long len; u8 bar_id = 2; int ret; ret = pci_request_regions(pdev, KBUILD_MODNAME); if (ret) { rtw_err(rtwdev, "failed to request pci regions\n"); return ret; } #if defined(__FreeBSD__) ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { rtw_err(rtwdev, "failed to set dma mask to 32-bit\n"); goto err_release_regions; } ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { rtw_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); goto err_release_regions; } #endif len = pci_resource_len(pdev, bar_id); #if defined(__FreeBSD__) linuxkpi_pcim_want_to_use_bus_functions(pdev); #endif rtwpci->mmap = pci_iomap(pdev, bar_id, len); if (!rtwpci->mmap) { pci_release_regions(pdev); rtw_err(rtwdev, "failed to map pci memory\n"); return -ENOMEM; } return 0; #if defined(__FreeBSD__) err_release_regions: pci_release_regions(pdev); return ret; #endif } static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev, struct pci_dev *pdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; if (rtwpci->mmap) { pci_iounmap(pdev, rtwpci->mmap); pci_release_regions(pdev); } } static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data) { u16 write_addr; u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK); u8 flag; u8 cnt; write_addr = addr & BITS_DBI_ADDR_MASK; write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN); rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data); rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr); rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16); for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); if (flag == 0) return; udelay(10); } WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr); } static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value) { u16 read_addr = addr & BITS_DBI_ADDR_MASK; u8 flag; u8 cnt; rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr); rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16); for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); if (flag == 0) { read_addr = REG_DBI_RDATA_V1 + (addr & 3); *value = rtw_read8(rtwdev, read_addr); return 0; } udelay(10); } WARN(1, "failed to read DBI register, addr=0x%04x\n", addr); return -EIO; } static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1) { u8 page; u8 wflag; u8 cnt; rtw_write16(rtwdev, REG_MDIO_V1, data); page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1; page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2; rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK); rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page); rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1); for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1); if (wflag == 0) return; udelay(10); } WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr); } static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable) { u8 value; int ret; if (rtw_pci_disable_aspm) return; ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); if (ret) { rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret); return; } if (enable) value |= BIT_CLKREQ_SW_EN; else value &= ~BIT_CLKREQ_SW_EN; rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); } static void rtw_pci_clkreq_pad_low(struct rtw_dev *rtwdev, bool enable) { u8 value; int ret; ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); if (ret) { rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret); return; } if (enable) value &= ~BIT_CLKREQ_N_PAD; else value |= BIT_CLKREQ_N_PAD; rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); } static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable) { u8 value; int ret; if (rtw_pci_disable_aspm) return; ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); if (ret) { rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret); return; } if (enable) value |= BIT_L1_SW_EN; else value &= ~BIT_L1_SW_EN; rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); } static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; /* Like CLKREQ, ASPM is also implemented by two HW modules, and can * only be enabled when host supports it. * * And ASPM mechanism should be enabled when driver/firmware enters * power save mode, without having heavy traffic. Because we've * experienced some inter-operability issues that the link tends * to enter L1 state on the fly even when driver is having high * throughput. This is probably because the ASPM behavior slightly * varies from different SOC. */ if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)) return; if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) || (!enter && atomic_inc_return(&rtwpci->link_usage) == 1)) rtw_pci_aspm_set(rtwdev, enter); } static void rtw_pci_link_cfg(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct pci_dev *pdev = rtwpci->pdev; u16 link_ctrl; int ret; /* RTL8822CE has enabled REFCLK auto calibration, it does not need * to add clock delay to cover the REFCLK timing gap. */ if (chip->id == RTW_CHIP_TYPE_8822C) rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0); /* Though there is standard PCIE configuration space to set the * link control register, but by Realtek's design, driver should * check if host supports CLKREQ/ASPM to enable the HW module. * * These functions are implemented by two HW modules associated, * one is responsible to access PCIE configuration space to * follow the host settings, and another is in charge of doing * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes * the host does not support it, and due to some reasons or wrong * settings (ex. CLKREQ# not Bi-Direction), it could lead to device * loss if HW misbehaves on the link. * * Hence it's designed that driver should first check the PCIE * configuration space is sync'ed and enabled, then driver can turn * on the other module that is actually working on the mechanism. */ ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); if (ret) { rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); return; } if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) rtw_pci_clkreq_set(rtwdev, true); rtwpci->link_ctrl = link_ctrl; } static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; switch (chip->id) { case RTW_CHIP_TYPE_8822C: if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D) rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG, BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1); break; default: break; } } static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; const struct rtw_intf_phy_para *para; u16 cut; u16 value; u16 offset; int i; cut = BIT(0) << rtwdev->hal.cut_version; for (i = 0; i < chip->intf_table->n_gen1_para; i++) { para = &chip->intf_table->gen1_para[i]; if (!(para->cut_mask & cut)) continue; if (para->offset == 0xffff) break; offset = para->offset; value = para->value; if (para->ip_sel == RTW_IP_SEL_PHY) rtw_mdio_write(rtwdev, offset, value, true); else rtw_dbi_write8(rtwdev, offset, value); } for (i = 0; i < chip->intf_table->n_gen2_para; i++) { para = &chip->intf_table->gen2_para[i]; if (!(para->cut_mask & cut)) continue; if (para->offset == 0xffff) break; offset = para->offset; value = para->value; if (para->ip_sel == RTW_IP_SEL_PHY) rtw_mdio_write(rtwdev, offset, value, false); else rtw_dbi_write8(rtwdev, offset, value); } rtw_pci_link_cfg(rtwdev); } static int __maybe_unused rtw_pci_suspend(struct device *dev) { struct ieee80211_hw *hw = dev_get_drvdata(dev); struct rtw_dev *rtwdev = hw->priv; struct rtw_chip_info *chip = rtwdev->chip; struct rtw_efuse *efuse = &rtwdev->efuse; if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) rtw_pci_clkreq_pad_low(rtwdev, true); return 0; } static int __maybe_unused rtw_pci_resume(struct device *dev) { struct ieee80211_hw *hw = dev_get_drvdata(dev); struct rtw_dev *rtwdev = hw->priv; struct rtw_chip_info *chip = rtwdev->chip; struct rtw_efuse *efuse = &rtwdev->efuse; if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) rtw_pci_clkreq_pad_low(rtwdev, false); return 0; } SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume); EXPORT_SYMBOL(rtw_pm_ops); static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev) { int ret; ret = pci_enable_device(pdev); if (ret) { rtw_err(rtwdev, "failed to enable pci device\n"); return ret; } pci_set_master(pdev); pci_set_drvdata(pdev, rtwdev->hw); SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); return 0; } static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev) { pci_clear_master(pdev); pci_disable_device(pdev); } static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev) { struct rtw_pci *rtwpci; int ret; rtwpci = (struct rtw_pci *)rtwdev->priv; rtwpci->pdev = pdev; /* after this driver can access to hw registers */ ret = rtw_pci_io_mapping(rtwdev, pdev); if (ret) { rtw_err(rtwdev, "failed to request pci io region\n"); goto err_out; } ret = rtw_pci_init(rtwdev); if (ret) { rtw_err(rtwdev, "failed to allocate pci resources\n"); goto err_io_unmap; } return 0; err_io_unmap: rtw_pci_io_unmapping(rtwdev, pdev); err_out: return ret; } static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev) { rtw_pci_deinit(rtwdev); rtw_pci_io_unmapping(rtwdev, pdev); } static struct rtw_hci_ops rtw_pci_ops = { .tx_write = rtw_pci_tx_write, .tx_kick_off = rtw_pci_tx_kick_off, .flush_queues = rtw_pci_flush_queues, .setup = rtw_pci_setup, .start = rtw_pci_start, .stop = rtw_pci_stop, .deep_ps = rtw_pci_deep_ps, .link_ps = rtw_pci_link_ps, .interface_cfg = rtw_pci_interface_cfg, .read8 = rtw_pci_read8, .read16 = rtw_pci_read16, .read32 = rtw_pci_read32, .write8 = rtw_pci_write8, .write16 = rtw_pci_write16, .write32 = rtw_pci_write32, .write_data_rsvd_page = rtw_pci_write_data_rsvd_page, .write_data_h2c = rtw_pci_write_data_h2c, }; static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) { unsigned int flags = PCI_IRQ_LEGACY; int ret; if (!rtw_disable_msi) flags |= PCI_IRQ_MSI; ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); if (ret < 0) { rtw_err(rtwdev, "failed to alloc PCI irq vectors\n"); return ret; } ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, rtw_pci_interrupt_handler, rtw_pci_interrupt_threadfn, IRQF_SHARED, KBUILD_MODNAME, rtwdev); if (ret) { rtw_err(rtwdev, "failed to request irq %d\n", ret); pci_free_irq_vectors(pdev); } return ret; } static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) { devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); pci_free_irq_vectors(pdev); } static int rtw_pci_napi_poll(struct napi_struct *napi, int budget) { struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi); struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev, priv); int work_done = 0; if (rtwpci->rx_no_aspm) rtw_pci_link_ps(rtwdev, false); while (work_done < budget) { u32 work_done_once; work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU, budget - work_done); if (work_done_once == 0) break; work_done += work_done_once; } if (work_done < budget) { napi_complete_done(napi, work_done); spin_lock_bh(&rtwpci->irq_lock); if (rtwpci->running) rtw_pci_enable_interrupt(rtwdev, rtwpci, false); spin_unlock_bh(&rtwpci->irq_lock); /* When ISR happens during polling and before napi_complete * while no further data is received. Data on the dma_ring will * not be processed immediately. Check whether dma ring is * empty and perform napi_schedule accordingly. */ if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci)) napi_schedule(napi); } if (rtwpci->rx_no_aspm) rtw_pci_link_ps(rtwdev, true); return work_done; } static void rtw_pci_napi_init(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; init_dummy_netdev(&rtwpci->netdev); netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll, RTW_NAPI_WEIGHT_NUM); } static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; rtw_pci_napi_stop(rtwdev); netif_napi_del(&rtwpci->napi); } int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct pci_dev *bridge = pci_upstream_bridge(pdev); struct ieee80211_hw *hw; struct rtw_dev *rtwdev; struct rtw_pci *rtwpci; int drv_data_size; int ret; drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci); hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops); if (!hw) { dev_err(&pdev->dev, "failed to allocate hw\n"); return -ENOMEM; } rtwdev = hw->priv; rtwdev->hw = hw; rtwdev->dev = &pdev->dev; rtwdev->chip = (struct rtw_chip_info *)id->driver_data; rtwdev->hci.ops = &rtw_pci_ops; rtwdev->hci.type = RTW_HCI_TYPE_PCIE; rtwpci = (struct rtw_pci *)rtwdev->priv; atomic_set(&rtwpci->link_usage, 1); ret = rtw_core_init(rtwdev); if (ret) goto err_release_hw; rtw_dbg(rtwdev, RTW_DBG_PCI, "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n", pdev->vendor, pdev->device, pdev->revision); ret = rtw_pci_claim(rtwdev, pdev); if (ret) { rtw_err(rtwdev, "failed to claim pci device\n"); goto err_deinit_core; } ret = rtw_pci_setup_resource(rtwdev, pdev); if (ret) { rtw_err(rtwdev, "failed to setup pci resources\n"); goto err_pci_declaim; } rtw_pci_napi_init(rtwdev); ret = rtw_chip_info_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup chip information\n"); goto err_destroy_pci; } /* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */ if (pdev->device == 0xc821 && bridge->vendor == PCI_VENDOR_ID_INTEL) rtwpci->rx_no_aspm = true; rtw_pci_phy_cfg(rtwdev); ret = rtw_register_hw(rtwdev, hw); if (ret) { rtw_err(rtwdev, "failed to register hw\n"); goto err_destroy_pci; } ret = rtw_pci_request_irq(rtwdev, pdev); if (ret) { ieee80211_unregister_hw(hw); goto err_destroy_pci; } return 0; err_destroy_pci: rtw_pci_napi_deinit(rtwdev); rtw_pci_destroy(rtwdev, pdev); err_pci_declaim: rtw_pci_declaim(rtwdev, pdev); err_deinit_core: rtw_core_deinit(rtwdev); err_release_hw: ieee80211_free_hw(hw); return ret; } EXPORT_SYMBOL(rtw_pci_probe); void rtw_pci_remove(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct rtw_dev *rtwdev; struct rtw_pci *rtwpci; if (!hw) return; rtwdev = hw->priv; rtwpci = (struct rtw_pci *)rtwdev->priv; rtw_unregister_hw(rtwdev, hw); rtw_pci_disable_interrupt(rtwdev, rtwpci); rtw_pci_napi_deinit(rtwdev); rtw_pci_destroy(rtwdev, pdev); rtw_pci_declaim(rtwdev, pdev); rtw_pci_free_irq(rtwdev, pdev); rtw_core_deinit(rtwdev); ieee80211_free_hw(hw); } EXPORT_SYMBOL(rtw_pci_remove); void rtw_pci_shutdown(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct rtw_dev *rtwdev; struct rtw_chip_info *chip; if (!hw) return; rtwdev = hw->priv; chip = rtwdev->chip; if (chip->ops->shutdown) chip->ops->shutdown(rtwdev); pci_set_power_state(pdev, PCI_D3hot); } EXPORT_SYMBOL(rtw_pci_shutdown); MODULE_AUTHOR("Realtek Corporation"); MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver"); MODULE_LICENSE("Dual BSD/GPL"); #if defined(__FreeBSD__) -MODULE_VERSION(rtw_pci, 1); -MODULE_DEPEND(rtw_pci, linuxkpi, 1, 1, 1); -MODULE_DEPEND(rtw_pci, linuxkpi_wlan, 1, 1, 1); -#ifdef CONFIG_RTW88_DEBUGFS -MODULE_DEPEND(rtw_pci, debugfs, 1, 1, 1); -#endif +MODULE_VERSION(rtw88_pci, 1); +MODULE_DEPEND(rtw88_pci, rtw88_core, 1, 1, 1); +MODULE_DEPEND(rtw88_pci, linuxkpi, 1, 1, 1); +MODULE_DEPEND(rtw88_pci, linuxkpi_wlan, 1, 1, 1); #endif diff --git a/sys/modules/rtw88/Makefile b/sys/modules/rtw88/Makefile index a8880256eeae..972065e7f208 100644 --- a/sys/modules/rtw88/Makefile +++ b/sys/modules/rtw88/Makefile @@ -1,36 +1,7 @@ # $FreeBSD$ -DEVRTW88DIR= ${SRCTOP}/sys/contrib/dev/rtw88 +SUBDIR= core +SUBDIR+= pci +#SUBDIR+= usb -.PATH: ${DEVRTW88DIR} - -WITH_CONFIG_PM= 0 - -KMOD= if_rtw88 - -SRCS= main.c -SRCS+= bf.c coex.c debug.c efuse.c fw.c mac.c mac80211.c -SRCS+= pci.c phy.c ps.c regd.c -SRCS+= rtw8723d.c rtw8723d_table.c rtw8723de.c # 11n -SRCS+= rtw8821c.c rtw8821c_table.c rtw8821ce.c # 11ac -SRCS+= rtw8822b.c rtw8822b_table.c rtw8822be.c # 11ac -SRCS+= rtw8822c.c rtw8822c_table.c rtw8822ce.c # 11ac -SRCS+= rx.c sar.c sec.c tx.c util.c - -.if defined(WITH_CONFIG_PM) && ${WITH_CONFIG_PM} > 0 -SRCR+= wow.c -CFLAGS+= -DCONFIG_PM=${WITH_CONFIG_PM} -.endif - -# Other -SRCS+= ${LINUXKPI_GENSRCS} -SRCS+= opt_wlan.h opt_inet6.h opt_inet.h - -CFLAGS+= -DKBUILD_MODNAME='"rtw88"' - -CFLAGS+= -I${DEVRTW88DIR} -CFLAGS+= -I${SRCTOP}/sys/compat/linuxkpi/common/include -CFLAGS+= -DCONFIG_RTW88_DEBUG -#CFLAGS+= -DCONFIG_RTW88_DEBUGFS - -.include +.include diff --git a/sys/modules/rtw88/Makefile.inc b/sys/modules/rtw88/Makefile.inc new file mode 100644 index 000000000000..b4a682a35519 --- /dev/null +++ b/sys/modules/rtw88/Makefile.inc @@ -0,0 +1,23 @@ +# $FreeBSD$ + +# Common information shared by all submodule builds. + +DEVRTW88DIR= ${SRCTOP}/sys/contrib/dev/rtw88 + +.PATH: ${DEVRTW88DIR} + +WITH_CONFIG_PM= 0 + +# Other +SRCS+= ${LINUXKPI_GENSRCS} +SRCS+= opt_wlan.h opt_inet6.h opt_inet.h + +# Helpful after fresh imports. +CFLAGS+= -ferror-limit=0 + +CFLAGS+= -I${DEVRTW88DIR} +CFLAGS+= -I${SRCTOP}/sys/compat/linuxkpi/common/include +CFLAGS+= -DCONFIG_RTW88_DEBUG +#CFLAGS+= -DCONFIG_RTW88_DEBUGFS + +# end diff --git a/sys/modules/rtw88/core/Makefile b/sys/modules/rtw88/core/Makefile new file mode 100644 index 000000000000..e19103ce8417 --- /dev/null +++ b/sys/modules/rtw88/core/Makefile @@ -0,0 +1,17 @@ +# $FreeBSD$ + +KMOD= rtw88_core + +SRCS= main.c +SRCS+= bf.c coex.c debug.c efuse.c fw.c mac.c mac80211.c +SRCS+= phy.c ps.c regd.c +SRCS+= rx.c sar.c sec.c tx.c util.c + +.if defined(WITH_CONFIG_PM) && ${WITH_CONFIG_PM} > 0 +SRCR+= wow.c +CFLAGS+= -DCONFIG_PM=${WITH_CONFIG_PM} +.endif + +CFLAGS+= -DKBUILD_MODNAME='"rtw88_core"' + +.include diff --git a/sys/modules/rtw88/pci/Makefile b/sys/modules/rtw88/pci/Makefile new file mode 100644 index 000000000000..070e6c8e2bc0 --- /dev/null +++ b/sys/modules/rtw88/pci/Makefile @@ -0,0 +1,13 @@ +# $FreeBSD$ + +KMOD= if_rtw88_pci + +SRCS+= pci.c +SRCS+= rtw8723d.c rtw8723d_table.c rtw8723de.c # 11n +SRCS+= rtw8821c.c rtw8821c_table.c rtw8821ce.c # 11ac +SRCS+= rtw8822b.c rtw8822b_table.c rtw8822be.c # 11ac +SRCS+= rtw8822c.c rtw8822c_table.c rtw8822ce.c # 11ac + +CFLAGS+= -DKBUILD_MODNAME='"rtw88"' + +.include