diff --git a/sys/contrib/dev/rtw88/fw.c b/sys/contrib/dev/rtw88/fw.c index 2f7c036f9022..e1837ea6a13c 100644 --- a/sys/contrib/dev/rtw88/fw.c +++ b/sys/contrib/dev/rtw88/fw.c @@ -1,2167 +1,2170 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2018-2019 Realtek Corporation */ #include #include "main.h" #include "coex.h" #include "fw.h" #include "tx.h" #include "reg.h" #include "sec.h" #include "debug.h" #include "util.h" #include "wow.h" #include "ps.h" static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev, struct sk_buff *skb) { struct rtw_c2h_cmd *c2h; u8 sub_cmd_id; c2h = get_c2h_from_skb(skb); sub_cmd_id = c2h->payload[0]; switch (sub_cmd_id) { case C2H_CCX_RPT: rtw_tx_report_handle(rtwdev, skb, C2H_CCX_RPT); break; case C2H_SCAN_STATUS_RPT: rtw_hw_scan_status_report(rtwdev, skb); break; case C2H_CHAN_SWITCH: rtw_hw_scan_chan_switch(rtwdev, skb); break; default: break; } } static u16 get_max_amsdu_len(u32 bit_rate) { /* lower than ofdm, do not aggregate */ if (bit_rate < 550) return 1; /* lower than 20M 2ss mcs8, make it small */ if (bit_rate < 1800) return 1200; /* lower than 40M 2ss mcs9, make it medium */ if (bit_rate < 4000) return 2600; /* not yet 80M 2ss mcs8/9, make it twice regular packet size */ if (bit_rate < 7000) return 3500; /* unlimited */ return 0; } struct rtw_fw_iter_ra_data { struct rtw_dev *rtwdev; u8 *payload; }; static void rtw_fw_ra_report_iter(void *data, struct ieee80211_sta *sta) { struct rtw_fw_iter_ra_data *ra_data = data; struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; u8 mac_id, rate, sgi, bw; u8 mcs, nss; u32 bit_rate; mac_id = GET_RA_REPORT_MACID(ra_data->payload); if (si->mac_id != mac_id) return; si->ra_report.txrate.flags = 0; rate = GET_RA_REPORT_RATE(ra_data->payload); sgi = GET_RA_REPORT_SGI(ra_data->payload); bw = GET_RA_REPORT_BW(ra_data->payload); if (rate < DESC_RATEMCS0) { si->ra_report.txrate.legacy = rtw_desc_to_bitrate(rate); goto legacy; } rtw_desc_to_mcsrate(rate, &mcs, &nss); if (rate >= DESC_RATEVHT1SS_MCS0) si->ra_report.txrate.flags |= RATE_INFO_FLAGS_VHT_MCS; else if (rate >= DESC_RATEMCS0) si->ra_report.txrate.flags |= RATE_INFO_FLAGS_MCS; if (rate >= DESC_RATEMCS0) { si->ra_report.txrate.mcs = mcs; si->ra_report.txrate.nss = nss; } if (sgi) si->ra_report.txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; if (bw == RTW_CHANNEL_WIDTH_80) si->ra_report.txrate.bw = RATE_INFO_BW_80; else if (bw == RTW_CHANNEL_WIDTH_40) si->ra_report.txrate.bw = RATE_INFO_BW_40; else si->ra_report.txrate.bw = RATE_INFO_BW_20; legacy: bit_rate = cfg80211_calculate_bitrate(&si->ra_report.txrate); si->ra_report.desc_rate = rate; si->ra_report.bit_rate = bit_rate; sta->max_rc_amsdu_len = get_max_amsdu_len(bit_rate); } static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload, u8 length) { struct rtw_fw_iter_ra_data ra_data; if (WARN(length < 7, "invalid ra report c2h length\n")) return; rtwdev->dm_info.tx_rate = GET_RA_REPORT_RATE(payload); ra_data.rtwdev = rtwdev; ra_data.payload = payload; rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data); } struct rtw_beacon_filter_iter_data { struct rtw_dev *rtwdev; u8 *payload; }; static void rtw_fw_bcn_filter_notify_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct rtw_beacon_filter_iter_data *iter_data = data; struct rtw_dev *rtwdev = iter_data->rtwdev; u8 *payload = iter_data->payload; u8 type = GET_BCN_FILTER_NOTIFY_TYPE(payload); u8 event = GET_BCN_FILTER_NOTIFY_EVENT(payload); s8 sig = (s8)GET_BCN_FILTER_NOTIFY_RSSI(payload); switch (type) { case BCN_FILTER_NOTIFY_SIGNAL_CHANGE: event = event ? NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH : NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; ieee80211_cqm_rssi_notify(vif, event, sig, GFP_KERNEL); break; case BCN_FILTER_CONNECTION_LOSS: ieee80211_connection_loss(vif); break; case BCN_FILTER_CONNECTED: rtwdev->beacon_loss = false; break; case BCN_FILTER_NOTIFY_BEACON_LOSS: rtwdev->beacon_loss = true; rtw_leave_lps(rtwdev); break; } } static void rtw_fw_bcn_filter_notify(struct rtw_dev *rtwdev, u8 *payload, u8 length) { struct rtw_beacon_filter_iter_data dev_iter_data; dev_iter_data.rtwdev = rtwdev; dev_iter_data.payload = payload; rtw_iterate_vifs(rtwdev, rtw_fw_bcn_filter_notify_vif_iter, &dev_iter_data); } static void rtw_fw_scan_result(struct rtw_dev *rtwdev, u8 *payload, u8 length) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; dm_info->scan_density = payload[0]; rtw_dbg(rtwdev, RTW_DBG_FW, "scan.density = %x\n", dm_info->scan_density); } static void rtw_fw_adaptivity_result(struct rtw_dev *rtwdev, u8 *payload, u8 length) { struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th; struct rtw_c2h_adaptivity *result = (struct rtw_c2h_adaptivity *)payload; rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "Adaptivity: density %x igi %x l2h_th_init %x l2h %x h2l %x option %x\n", result->density, result->igi, result->l2h_th_init, result->l2h, result->h2l, result->option); rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "Reg Setting: L2H %x H2L %x\n", rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr, edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask), rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr, edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask)); rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "EDCCA Flag %s\n", rtw_read32_mask(rtwdev, REG_EDCCA_REPORT, BIT_EDCCA_FLAG) ? "Set" : "Unset"); } void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb) { struct rtw_c2h_cmd *c2h; u32 pkt_offset; u8 len; pkt_offset = *((u32 *)skb->cb); c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset); len = skb->len - pkt_offset - 2; mutex_lock(&rtwdev->mutex); if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags)) goto unlock; switch (c2h->id) { case C2H_CCX_TX_RPT: rtw_tx_report_handle(rtwdev, skb, C2H_CCX_TX_RPT); break; case C2H_BT_INFO: rtw_coex_bt_info_notify(rtwdev, c2h->payload, len); break; case C2H_WLAN_INFO: rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len); break; case C2H_BCN_FILTER_NOTIFY: rtw_fw_bcn_filter_notify(rtwdev, c2h->payload, len); break; case C2H_HALMAC: rtw_fw_c2h_cmd_handle_ext(rtwdev, skb); break; case C2H_RA_RPT: rtw_fw_ra_report_handle(rtwdev, c2h->payload, len); break; default: rtw_dbg(rtwdev, RTW_DBG_FW, "C2H 0x%x isn't handled\n", c2h->id); break; } unlock: mutex_unlock(&rtwdev->mutex); } void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset, struct sk_buff *skb) { struct rtw_c2h_cmd *c2h; u8 len; c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset); len = skb->len - pkt_offset - 2; *((u32 *)skb->cb) = pkt_offset; rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n", c2h->id, c2h->seq, len); switch (c2h->id) { case C2H_BT_MP_INFO: rtw_coex_info_response(rtwdev, skb); break; case C2H_WLAN_RFON: complete(&rtwdev->lps_leave_check); dev_kfree_skb_any(skb); break; case C2H_SCAN_RESULT: complete(&rtwdev->fw_scan_density); rtw_fw_scan_result(rtwdev, c2h->payload, len); dev_kfree_skb_any(skb); break; case C2H_ADAPTIVITY: rtw_fw_adaptivity_result(rtwdev, c2h->payload, len); dev_kfree_skb_any(skb); break; default: /* pass offset for further operation */ *((u32 *)skb->cb) = pkt_offset; skb_queue_tail(&rtwdev->c2h_queue, skb); ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); break; } } EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe); void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev) { if (rtw_read8(rtwdev, REG_MCU_TST_CFG) == VAL_FW_TRIGGER) rtw_fw_recovery(rtwdev); else rtw_warn(rtwdev, "unhandled firmware c2h interrupt\n"); } EXPORT_SYMBOL(rtw_fw_c2h_cmd_isr); static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev, u8 *h2c) { u8 box; u8 box_state; u32 box_reg, box_ex_reg; int idx; int ret; rtw_dbg(rtwdev, RTW_DBG_FW, "send H2C content %02x%02x%02x%02x %02x%02x%02x%02x\n", h2c[3], h2c[2], h2c[1], h2c[0], h2c[7], h2c[6], h2c[5], h2c[4]); spin_lock(&rtwdev->h2c.lock); box = rtwdev->h2c.last_box_num; switch (box) { case 0: box_reg = REG_HMEBOX0; box_ex_reg = REG_HMEBOX0_EX; break; case 1: box_reg = REG_HMEBOX1; box_ex_reg = REG_HMEBOX1_EX; break; case 2: box_reg = REG_HMEBOX2; box_ex_reg = REG_HMEBOX2_EX; break; case 3: box_reg = REG_HMEBOX3; box_ex_reg = REG_HMEBOX3_EX; break; default: WARN(1, "invalid h2c mail box number\n"); goto out; } ret = read_poll_timeout_atomic(rtw_read8, box_state, !((box_state >> box) & 0x1), 100, 3000, false, rtwdev, REG_HMETFR); if (ret) { rtw_err(rtwdev, "failed to send h2c command\n"); goto out; } for (idx = 0; idx < 4; idx++) rtw_write8(rtwdev, box_reg + idx, h2c[idx]); for (idx = 0; idx < 4; idx++) rtw_write8(rtwdev, box_ex_reg + idx, h2c[idx + 4]); if (++rtwdev->h2c.last_box_num >= 4) rtwdev->h2c.last_box_num = 0; out: spin_unlock(&rtwdev->h2c.lock); } void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c) { rtw_fw_send_h2c_command(rtwdev, h2c); } static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt) { int ret; spin_lock(&rtwdev->h2c.lock); FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, rtwdev->h2c.seq); ret = rtw_hci_write_data_h2c(rtwdev, h2c_pkt, H2C_PKT_SIZE); if (ret) rtw_err(rtwdev, "failed to send h2c packet\n"); rtwdev->h2c.seq++; spin_unlock(&rtwdev->h2c.lock); } void rtw_fw_send_general_info(struct rtw_dev *rtwdev) { struct rtw_fifo_conf *fifo = &rtwdev->fifo; u8 h2c_pkt[H2C_PKT_SIZE] = {0}; u16 total_size = H2C_PKT_HDR_SIZE + 4; if (rtw_chip_wcpu_11n(rtwdev)) return; rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO); SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size); GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt, fifo->rsvd_fw_txbuf_addr - fifo->rsvd_boundary); rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); } void rtw_fw_send_phydm_info(struct rtw_dev *rtwdev) { struct rtw_hal *hal = &rtwdev->hal; struct rtw_efuse *efuse = &rtwdev->efuse; u8 h2c_pkt[H2C_PKT_SIZE] = {0}; u16 total_size = H2C_PKT_HDR_SIZE + 8; u8 fw_rf_type = 0; if (rtw_chip_wcpu_11n(rtwdev)) return; if (hal->rf_type == RF_1T1R) fw_rf_type = FW_RF_1T1R; else if (hal->rf_type == RF_2T2R) fw_rf_type = FW_RF_2T2R; rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_PHYDM_INFO); SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size); PHYDM_INFO_SET_REF_TYPE(h2c_pkt, efuse->rfe_option); PHYDM_INFO_SET_RF_TYPE(h2c_pkt, fw_rf_type); PHYDM_INFO_SET_CUT_VER(h2c_pkt, hal->cut_version); PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, hal->antenna_tx); PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, hal->antenna_rx); rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); } void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; u16 total_size = H2C_PKT_HDR_SIZE + 1; rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_IQK); SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size); IQK_SET_CLEAR(h2c_pkt, para->clear); IQK_SET_SEGMENT_IQK(h2c_pkt, para->segment_iqk); rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); } EXPORT_SYMBOL(rtw_fw_do_iqk); void rtw_fw_inform_rfk_status(struct rtw_dev *rtwdev, bool start) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WIFI_CALIBRATION); RFK_SET_INFORM_START(h2c_pkt, start); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } EXPORT_SYMBOL(rtw_fw_inform_rfk_status); void rtw_fw_query_bt_info(struct rtw_dev *rtwdev) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_INFO); SET_QUERY_BT_INFO(h2c_pkt, true); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_CH_INFO); SET_WL_CH_INFO_LINK(h2c_pkt, link); SET_WL_CH_INFO_CHNL(h2c_pkt, ch); SET_WL_CH_INFO_BW(h2c_pkt, bw); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev, struct rtw_coex_info_req *req) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_MP_INFO); SET_BT_MP_INFO_SEQ(h2c_pkt, req->seq); SET_BT_MP_INFO_OP_CODE(h2c_pkt, req->op_code); SET_BT_MP_INFO_PARA1(h2c_pkt, req->para1); SET_BT_MP_INFO_PARA2(h2c_pkt, req->para2); SET_BT_MP_INFO_PARA3(h2c_pkt, req->para3); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; u8 index = 0 - bt_pwr_dec_lvl; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_FORCE_BT_TX_POWER); SET_BT_TX_POWER_INDEX(h2c_pkt, index); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_IGNORE_WLAN_ACTION); SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, enable); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev, u8 para1, u8 para2, u8 para3, u8 para4, u8 para5) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_COEX_TDMA_TYPE); SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, para1); SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, para2); SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, para3); SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, para4); SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, para5); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BT_WIFI_CONTROL); SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, op_code); SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, *data); SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, *(data + 1)); SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, *(data + 2)); SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, *(data + 3)); SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, *(data + 4)); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; u8 rssi = ewma_rssi_read(&si->avg_rssi); bool stbc_en = si->stbc_en ? true : false; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSSI_MONITOR); SET_RSSI_INFO_MACID(h2c_pkt, si->mac_id); SET_RSSI_INFO_RSSI(h2c_pkt, rssi); SET_RSSI_INFO_STBC(h2c_pkt, stbc_en); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; bool no_update = si->updated; bool disable_pt = true; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO); SET_RA_INFO_MACID(h2c_pkt, si->mac_id); SET_RA_INFO_RATE_ID(h2c_pkt, si->rate_id); SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv); SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable); SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode); SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en); SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update); SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable); SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt); SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff)); SET_RA_INFO_RA_MASK1(h2c_pkt, (si->ra_mask & 0xff00) >> 8); SET_RA_INFO_RA_MASK2(h2c_pkt, (si->ra_mask & 0xff0000) >> 16); SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24); si->init_ra_lv = 0; si->updated = true; rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_MEDIA_STATUS_RPT); MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, connect); MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, mac_id); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev) { struct rtw_traffic_stats *stats = &rtwdev->stats; struct rtw_dm_info *dm_info = &rtwdev->dm_info; u8 h2c_pkt[H2C_PKT_SIZE] = {0}; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_PHY_INFO); SET_WL_PHY_INFO_TX_TP(h2c_pkt, stats->tx_throughput); SET_WL_PHY_INFO_RX_TP(h2c_pkt, stats->rx_throughput); SET_WL_PHY_INFO_TX_RATE_DESC(h2c_pkt, dm_info->tx_rate); SET_WL_PHY_INFO_RX_RATE_DESC(h2c_pkt, dm_info->curr_rx_rate); SET_WL_PHY_INFO_RX_EVM(h2c_pkt, dm_info->rx_evm_dbm[RF_PATH_A]); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect, struct ieee80211_vif *vif) { struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid); static const u8 rssi_min = 0, rssi_max = 100, rssi_offset = 100; struct rtw_sta_info *si = sta ? (struct rtw_sta_info *)sta->drv_priv : NULL; s32 threshold = bss_conf->cqm_rssi_thold + rssi_offset; u8 h2c_pkt[H2C_PKT_SIZE] = {0}; if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER) || !si) return; if (!connect) { SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1); SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); return; } SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P0); ether_addr_copy(&h2c_pkt[1], bss_conf->bssid); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); memset(h2c_pkt, 0, sizeof(h2c_pkt)); threshold = clamp_t(s32, threshold, rssi_min, rssi_max); SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1); SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect); SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt, BCN_FILTER_OFFLOAD_MODE_DEFAULT); SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, (u8)threshold); SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, BCN_LOSS_CNT); SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, si->mac_id); SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, bss_conf->cqm_rssi_hyst); SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, bss_conf->beacon_int); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev) { struct rtw_lps_conf *conf = &rtwdev->lps_conf; u8 h2c_pkt[H2C_PKT_SIZE] = {0}; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SET_PWR_MODE); SET_PWR_MODE_SET_MODE(h2c_pkt, conf->mode); SET_PWR_MODE_SET_RLBM(h2c_pkt, conf->rlbm); SET_PWR_MODE_SET_SMART_PS(h2c_pkt, conf->smart_ps); SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, conf->awake_interval); SET_PWR_MODE_SET_PORT_ID(h2c_pkt, conf->port_id); SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, conf->state); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; struct rtw_fw_wow_keep_alive_para mode = { .adopt = true, .pkt_type = KEEP_ALIVE_NULL_PKT, .period = 5, }; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_KEEP_ALIVE); SET_KEEP_ALIVE_ENABLE(h2c_pkt, enable); SET_KEEP_ALIVE_ADOPT(h2c_pkt, mode.adopt); SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, mode.pkt_type); SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, mode.period); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable) { struct rtw_wow_param *rtw_wow = &rtwdev->wow; u8 h2c_pkt[H2C_PKT_SIZE] = {0}; struct rtw_fw_wow_disconnect_para mode = { .adopt = true, .period = 30, .retry_count = 5, }; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_DISCONNECT_DECISION); if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, enable); SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, mode.adopt); SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, mode.period); SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, mode.retry_count); } rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable) { struct rtw_wow_param *rtw_wow = &rtwdev->wow; u8 h2c_pkt[H2C_PKT_SIZE] = {0}; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WOWLAN); SET_WOWLAN_FUNC_ENABLE(h2c_pkt, enable); if (rtw_wow_mgd_linked(rtwdev)) { if (test_bit(RTW_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, enable); if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, enable); if (test_bit(RTW_WOW_FLAG_EN_REKEY_PKT, rtw_wow->flags)) SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, enable); if (rtw_wow->pattern_cnt) SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, enable); } rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev, u8 pairwise_key_enc, u8 group_key_enc) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_AOAC_GLOBAL_INFO); SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, pairwise_key_enc); SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, group_key_enc); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_REMOTE_WAKE_CTRL); SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, enable); if (rtw_wow_no_link(rtwdev)) SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, enable); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type) { struct rtw_rsvd_page *rsvd_pkt; u8 location = 0; list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) { if (type == rsvd_pkt->type) location = rsvd_pkt->page; } return location; } void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; u8 loc_nlo; loc_nlo = rtw_get_rsvd_page_location(rtwdev, RSVD_NLO_INFO); SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_NLO_INFO); SET_NLO_FUN_EN(h2c_pkt, enable); if (enable) { if (rtw_get_lps_deep_mode(rtwdev) != LPS_DEEP_MODE_NONE) SET_NLO_PS_32K(h2c_pkt, enable); SET_NLO_IGNORE_SECURITY(h2c_pkt, enable); SET_NLO_LOC_NLO_INFO(h2c_pkt, loc_nlo); } rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_set_pg_info(struct rtw_dev *rtwdev) { struct rtw_lps_conf *conf = &rtwdev->lps_conf; u8 h2c_pkt[H2C_PKT_SIZE] = {0}; u8 loc_pg, loc_dpk; loc_pg = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_INFO); loc_dpk = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_DPK); SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_LPS_PG_INFO); LPS_PG_INFO_LOC(h2c_pkt, loc_pg); LPS_PG_DPK_LOC(h2c_pkt, loc_dpk); LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup); LPS_PG_PATTERN_CAM_EN(h2c_pkt, conf->pattern_cam_backup); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } static u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev, struct cfg80211_ssid *ssid) { struct rtw_rsvd_page *rsvd_pkt; u8 location = 0; list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) { if (rsvd_pkt->type != RSVD_PROBE_REQ) continue; if ((!ssid && !rsvd_pkt->ssid) || rtw_ssid_equal(rsvd_pkt->ssid, ssid)) location = rsvd_pkt->page; } return location; } static u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev, struct cfg80211_ssid *ssid) { struct rtw_rsvd_page *rsvd_pkt; u16 size = 0; list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) { if (rsvd_pkt->type != RSVD_PROBE_REQ) continue; if ((!ssid && !rsvd_pkt->ssid) || rtw_ssid_equal(rsvd_pkt->ssid, ssid)) size = rsvd_pkt->probe_req_size; } return size; } void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; u8 location = 0; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSVD_PAGE); location = rtw_get_rsvd_page_location(rtwdev, RSVD_PROBE_RESP); *(h2c_pkt + 1) = location; rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PROBE_RESP loc: %d\n", location); location = rtw_get_rsvd_page_location(rtwdev, RSVD_PS_POLL); *(h2c_pkt + 2) = location; rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PS_POLL loc: %d\n", location); location = rtw_get_rsvd_page_location(rtwdev, RSVD_NULL); *(h2c_pkt + 3) = location; rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_NULL loc: %d\n", location); location = rtw_get_rsvd_page_location(rtwdev, RSVD_QOS_NULL); *(h2c_pkt + 4) = location; rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_QOS_NULL loc: %d\n", location); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw) { struct rtw_dev *rtwdev = hw->priv; struct rtw_chip_info *chip = rtwdev->chip; struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req; struct rtw_nlo_info_hdr *nlo_hdr; struct cfg80211_ssid *ssid; struct sk_buff *skb; u8 *pos, loc; u32 size; int i; if (!pno_req->inited || !pno_req->match_set_cnt) return NULL; size = sizeof(struct rtw_nlo_info_hdr) + pno_req->match_set_cnt * IEEE80211_MAX_SSID_LEN + chip->tx_pkt_desc_sz; skb = alloc_skb(size, GFP_KERNEL); if (!skb) return NULL; skb_reserve(skb, chip->tx_pkt_desc_sz); nlo_hdr = skb_put_zero(skb, sizeof(struct rtw_nlo_info_hdr)); nlo_hdr->nlo_count = pno_req->match_set_cnt; nlo_hdr->hidden_ap_count = pno_req->match_set_cnt; /* pattern check for firmware */ memset(nlo_hdr->pattern_check, 0xA5, FW_NLO_INFO_CHECK_SIZE); for (i = 0; i < pno_req->match_set_cnt; i++) nlo_hdr->ssid_len[i] = pno_req->match_sets[i].ssid.ssid_len; for (i = 0; i < pno_req->match_set_cnt; i++) { ssid = &pno_req->match_sets[i].ssid; loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid); if (!loc) { rtw_err(rtwdev, "failed to get probe req rsvd loc\n"); kfree_skb(skb); return NULL; } nlo_hdr->location[i] = loc; } for (i = 0; i < pno_req->match_set_cnt; i++) { pos = skb_put_zero(skb, IEEE80211_MAX_SSID_LEN); memcpy(pos, pno_req->match_sets[i].ssid.ssid, pno_req->match_sets[i].ssid.ssid_len); } return skb; } static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw) { struct rtw_dev *rtwdev = hw->priv; struct rtw_chip_info *chip = rtwdev->chip; struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req; struct ieee80211_channel *channels = pno_req->channels; struct sk_buff *skb; int count = pno_req->channel_cnt; u8 *pos; int i = 0; skb = alloc_skb(4 * count + chip->tx_pkt_desc_sz, GFP_KERNEL); if (!skb) return NULL; skb_reserve(skb, chip->tx_pkt_desc_sz); for (i = 0; i < count; i++) { pos = skb_put_zero(skb, 4); CHSW_INFO_SET_CH(pos, channels[i].hw_value); if (channels[i].flags & IEEE80211_CHAN_RADAR) CHSW_INFO_SET_ACTION_ID(pos, 0); else CHSW_INFO_SET_ACTION_ID(pos, 1); CHSW_INFO_SET_TIMEOUT(pos, 1); CHSW_INFO_SET_PRI_CH_IDX(pos, 1); CHSW_INFO_SET_BW(pos, 0); } return skb; } static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw) { struct rtw_dev *rtwdev = hw->priv; struct rtw_chip_info *chip = rtwdev->chip; struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; struct rtw_lps_pg_dpk_hdr *dpk_hdr; struct sk_buff *skb; u32 size; size = chip->tx_pkt_desc_sz + sizeof(*dpk_hdr); skb = alloc_skb(size, GFP_KERNEL); if (!skb) return NULL; skb_reserve(skb, chip->tx_pkt_desc_sz); dpk_hdr = skb_put_zero(skb, sizeof(*dpk_hdr)); dpk_hdr->dpk_ch = dpk_info->dpk_ch; dpk_hdr->dpk_path_ok = dpk_info->dpk_path_ok[0]; memcpy(dpk_hdr->dpk_txagc, dpk_info->dpk_txagc, 2); memcpy(dpk_hdr->dpk_gs, dpk_info->dpk_gs, 4); memcpy(dpk_hdr->coef, dpk_info->coef, 160); return skb; } static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw) { struct rtw_dev *rtwdev = hw->priv; struct rtw_chip_info *chip = rtwdev->chip; struct rtw_lps_conf *conf = &rtwdev->lps_conf; struct rtw_lps_pg_info_hdr *pg_info_hdr; struct rtw_wow_param *rtw_wow = &rtwdev->wow; struct sk_buff *skb; u32 size; size = chip->tx_pkt_desc_sz + sizeof(*pg_info_hdr); skb = alloc_skb(size, GFP_KERNEL); if (!skb) return NULL; skb_reserve(skb, chip->tx_pkt_desc_sz); pg_info_hdr = skb_put_zero(skb, sizeof(*pg_info_hdr)); pg_info_hdr->tx_bu_page_count = rtwdev->fifo.rsvd_drv_pg_num; pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM); pg_info_hdr->sec_cam_count = rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam); pg_info_hdr->pattern_count = rtw_wow->pattern_cnt; conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0; conf->pattern_cam_backup = rtw_wow->pattern_cnt != 0; return skb; } static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw, struct rtw_rsvd_page *rsvd_pkt) { struct ieee80211_vif *vif; struct rtw_vif *rtwvif; struct sk_buff *skb_new; struct cfg80211_ssid *ssid; if (rsvd_pkt->type == RSVD_DUMMY) { skb_new = alloc_skb(1, GFP_KERNEL); if (!skb_new) return NULL; skb_put(skb_new, 1); return skb_new; } rtwvif = rsvd_pkt->rtwvif; if (!rtwvif) return NULL; vif = rtwvif_to_vif(rtwvif); switch (rsvd_pkt->type) { case RSVD_BEACON: skb_new = ieee80211_beacon_get(hw, vif); break; case RSVD_PS_POLL: skb_new = ieee80211_pspoll_get(hw, vif); break; case RSVD_PROBE_RESP: skb_new = ieee80211_proberesp_get(hw, vif); break; case RSVD_NULL: skb_new = ieee80211_nullfunc_get(hw, vif, false); break; case RSVD_QOS_NULL: skb_new = ieee80211_nullfunc_get(hw, vif, true); break; case RSVD_LPS_PG_DPK: skb_new = rtw_lps_pg_dpk_get(hw); break; case RSVD_LPS_PG_INFO: skb_new = rtw_lps_pg_info_get(hw); break; case RSVD_PROBE_REQ: ssid = (struct cfg80211_ssid *)rsvd_pkt->ssid; if (ssid) skb_new = ieee80211_probereq_get(hw, vif->addr, ssid->ssid, ssid->ssid_len, 0); else skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0); if (skb_new) rsvd_pkt->probe_req_size = (u16)skb_new->len; break; case RSVD_NLO_INFO: skb_new = rtw_nlo_info_get(hw); break; case RSVD_CH_INFO: skb_new = rtw_cs_channel_info_get(hw); break; default: return NULL; } if (!skb_new) return NULL; return skb_new; } static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, enum rtw_rsvd_packet_type type) { struct rtw_tx_pkt_info pkt_info = {0}; struct rtw_chip_info *chip = rtwdev->chip; u8 *pkt_desc; rtw_tx_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb, type); pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); memset(pkt_desc, 0, chip->tx_pkt_desc_sz); rtw_tx_fill_tx_desc(&pkt_info, skb); } static inline u8 rtw_len_to_page(unsigned int len, u8 page_size) { return DIV_ROUND_UP(len, page_size); } static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size, u8 page_margin, u32 page, u8 *buf, struct rtw_rsvd_page *rsvd_pkt) { struct sk_buff *skb = rsvd_pkt->skb; if (page >= 1) memcpy(buf + page_margin + page_size * (page - 1), skb->data, skb->len); else memcpy(buf, skb->data, skb->len); } static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type, bool txdesc) { struct rtw_rsvd_page *rsvd_pkt = NULL; rsvd_pkt = kzalloc(sizeof(*rsvd_pkt), GFP_KERNEL); if (!rsvd_pkt) return NULL; INIT_LIST_HEAD(&rsvd_pkt->vif_list); INIT_LIST_HEAD(&rsvd_pkt->build_list); rsvd_pkt->type = type; rsvd_pkt->add_txdesc = txdesc; return rsvd_pkt; } static void rtw_insert_rsvd_page(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, struct rtw_rsvd_page *rsvd_pkt) { lockdep_assert_held(&rtwdev->mutex); list_add_tail(&rsvd_pkt->vif_list, &rtwvif->rsvd_page_list); } static void rtw_add_rsvd_page(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, enum rtw_rsvd_packet_type type, bool txdesc) { struct rtw_rsvd_page *rsvd_pkt; rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, type, txdesc); if (!rsvd_pkt) { rtw_err(rtwdev, "failed to alloc rsvd page %d\n", type); return; } rsvd_pkt->rtwvif = rtwvif; rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt); } static void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, struct cfg80211_ssid *ssid) { struct rtw_rsvd_page *rsvd_pkt; rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_PROBE_REQ, true); if (!rsvd_pkt) { rtw_err(rtwdev, "failed to alloc probe req rsvd page\n"); return; } rsvd_pkt->rtwvif = rtwvif; rsvd_pkt->ssid = ssid; rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt); } void rtw_remove_rsvd_page(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif) { struct rtw_rsvd_page *rsvd_pkt, *tmp; lockdep_assert_held(&rtwdev->mutex); /* remove all of the rsvd pages for vif */ list_for_each_entry_safe(rsvd_pkt, tmp, &rtwvif->rsvd_page_list, vif_list) { list_del(&rsvd_pkt->vif_list); if (!list_empty(&rsvd_pkt->build_list)) list_del(&rsvd_pkt->build_list); kfree(rsvd_pkt); } } void rtw_add_rsvd_page_bcn(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif) { struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); if (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC && vif->type != NL80211_IFTYPE_MESH_POINT) { rtw_warn(rtwdev, "Cannot add beacon rsvd page for %d\n", vif->type); return; } rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_BEACON, false); } void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif) { struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); struct rtw_wow_param *rtw_wow = &rtwdev->wow; struct rtw_pno_request *rtw_pno_req = &rtw_wow->pno_req; struct cfg80211_ssid *ssid; int i; if (vif->type != NL80211_IFTYPE_STATION) { rtw_warn(rtwdev, "Cannot add PNO rsvd page for %d\n", vif->type); return; } for (i = 0 ; i < rtw_pno_req->match_set_cnt; i++) { ssid = &rtw_pno_req->match_sets[i].ssid; rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, ssid); } rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, NULL); rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NLO_INFO, false); rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_CH_INFO, true); } void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif) { struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); if (vif->type != NL80211_IFTYPE_STATION) { rtw_warn(rtwdev, "Cannot add sta rsvd page for %d\n", vif->type); return; } rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_PS_POLL, true); rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_QOS_NULL, true); rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NULL, true); rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_DPK, true); rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_INFO, true); } int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, u8 *buf, u32 size) { u8 bckp[2]; u8 val; u16 rsvd_pg_head; u32 bcn_valid_addr; u32 bcn_valid_mask; int ret; lockdep_assert_held(&rtwdev->mutex); if (!size) return -EINVAL; if (rtw_chip_wcpu_11n(rtwdev)) { rtw_write32_set(rtwdev, REG_DWBCN0_CTRL, BIT_BCN_VALID); } else { pg_addr &= BIT_MASK_BCN_HEAD_1_V1; pg_addr |= BIT_BCN_VALID_V1; rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr); } val = rtw_read8(rtwdev, REG_CR + 1); bckp[0] = val; val |= BIT_ENSWBCN >> 8; rtw_write8(rtwdev, REG_CR + 1, val); val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2); bckp[1] = val; val &= ~(BIT_EN_BCNQ_DL >> 16); rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val); ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size); if (ret) { rtw_err(rtwdev, "failed to write data to rsvd page\n"); goto restore; } if (rtw_chip_wcpu_11n(rtwdev)) { bcn_valid_addr = REG_DWBCN0_CTRL; bcn_valid_mask = BIT_BCN_VALID; } else { bcn_valid_addr = REG_FIFOPAGE_CTRL_2; bcn_valid_mask = BIT_BCN_VALID_V1; } if (!check_hw_ready(rtwdev, bcn_valid_addr, bcn_valid_mask, 1)) { rtw_err(rtwdev, "error beacon valid\n"); ret = -EBUSY; } restore: rsvd_pg_head = rtwdev->fifo.rsvd_boundary; rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, rsvd_pg_head | BIT_BCN_VALID_V1); rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]); rtw_write8(rtwdev, REG_CR + 1, bckp[0]); return ret; } static int rtw_download_drv_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size) { u32 pg_size; u32 pg_num = 0; u16 pg_addr = 0; pg_size = rtwdev->chip->page_size; pg_num = size / pg_size + ((size & (pg_size - 1)) ? 1 : 0); if (pg_num > rtwdev->fifo.rsvd_drv_pg_num) return -ENOMEM; pg_addr = rtwdev->fifo.rsvd_drv_addr; return rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size); } static void __rtw_build_rsvd_page_reset(struct rtw_dev *rtwdev) { struct rtw_rsvd_page *rsvd_pkt, *tmp; list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, build_list) { list_del_init(&rsvd_pkt->build_list); /* Don't free except for the dummy rsvd page, * others will be freed when removing vif */ if (rsvd_pkt->type == RSVD_DUMMY) kfree(rsvd_pkt); } } static void rtw_build_rsvd_page_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct rtw_dev *rtwdev = data; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; struct rtw_rsvd_page *rsvd_pkt; list_for_each_entry(rsvd_pkt, &rtwvif->rsvd_page_list, vif_list) { if (rsvd_pkt->type == RSVD_BEACON) list_add(&rsvd_pkt->build_list, &rtwdev->rsvd_page_list); else list_add_tail(&rsvd_pkt->build_list, &rtwdev->rsvd_page_list); } } static int __rtw_build_rsvd_page_from_vifs(struct rtw_dev *rtwdev) { struct rtw_rsvd_page *rsvd_pkt; __rtw_build_rsvd_page_reset(rtwdev); /* gather rsvd page from vifs */ rtw_iterate_vifs_atomic(rtwdev, rtw_build_rsvd_page_iter, rtwdev); rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list, struct rtw_rsvd_page, build_list); if (!rsvd_pkt) { WARN(1, "Should not have an empty reserved page\n"); return -EINVAL; } /* the first rsvd should be beacon, otherwise add a dummy one */ if (rsvd_pkt->type != RSVD_BEACON) { struct rtw_rsvd_page *dummy_pkt; dummy_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_DUMMY, false); if (!dummy_pkt) { rtw_err(rtwdev, "failed to alloc dummy rsvd page\n"); return -ENOMEM; } list_add(&dummy_pkt->build_list, &rtwdev->rsvd_page_list); } return 0; } static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size) { struct ieee80211_hw *hw = rtwdev->hw; struct rtw_chip_info *chip = rtwdev->chip; struct sk_buff *iter; struct rtw_rsvd_page *rsvd_pkt; u32 page = 0; u8 total_page = 0; u8 page_size, page_margin, tx_desc_sz; u8 *buf; int ret; page_size = chip->page_size; tx_desc_sz = chip->tx_pkt_desc_sz; page_margin = page_size - tx_desc_sz; ret = __rtw_build_rsvd_page_from_vifs(rtwdev); if (ret) { rtw_err(rtwdev, "failed to build rsvd page from vifs, ret %d\n", ret); return NULL; } list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) { iter = rtw_get_rsvd_page_skb(hw, rsvd_pkt); if (!iter) { rtw_err(rtwdev, "failed to build rsvd packet\n"); goto release_skb; } /* Fill the tx_desc for the rsvd pkt that requires one. * And iter->len will be added with size of tx_desc_sz. */ if (rsvd_pkt->add_txdesc) rtw_fill_rsvd_page_desc(rtwdev, iter, rsvd_pkt->type); rsvd_pkt->skb = iter; rsvd_pkt->page = total_page; /* Reserved page is downloaded via TX path, and TX path will * generate a tx_desc at the header to describe length of * the buffer. If we are not counting page numbers with the * size of tx_desc added at the first rsvd_pkt (usually a * beacon, firmware default refer to the first page as the * content of beacon), we could generate a buffer which size * is smaller than the actual size of the whole rsvd_page */ if (total_page == 0) { if (rsvd_pkt->type != RSVD_BEACON && rsvd_pkt->type != RSVD_DUMMY) { rtw_err(rtwdev, "first page should be a beacon\n"); goto release_skb; } total_page += rtw_len_to_page(iter->len + tx_desc_sz, page_size); } else { total_page += rtw_len_to_page(iter->len, page_size); } } if (total_page > rtwdev->fifo.rsvd_drv_pg_num) { rtw_err(rtwdev, "rsvd page over size: %d\n", total_page); goto release_skb; } *size = (total_page - 1) * page_size + page_margin; buf = kzalloc(*size, GFP_KERNEL); if (!buf) goto release_skb; /* Copy the content of each rsvd_pkt to the buf, and they should * be aligned to the pages. * * Note that the first rsvd_pkt is a beacon no matter what vif->type. * And that rsvd_pkt does not require tx_desc because when it goes * through TX path, the TX path will generate one for it. */ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) { rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin, page, buf, rsvd_pkt); if (page == 0) page += rtw_len_to_page(rsvd_pkt->skb->len + tx_desc_sz, page_size); else page += rtw_len_to_page(rsvd_pkt->skb->len, page_size); kfree_skb(rsvd_pkt->skb); rsvd_pkt->skb = NULL; } return buf; release_skb: list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) { kfree_skb(rsvd_pkt->skb); rsvd_pkt->skb = NULL; } return NULL; } static int rtw_download_beacon(struct rtw_dev *rtwdev) { struct ieee80211_hw *hw = rtwdev->hw; struct rtw_rsvd_page *rsvd_pkt; struct sk_buff *skb; int ret = 0; rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list, struct rtw_rsvd_page, build_list); if (!rsvd_pkt) { rtw_err(rtwdev, "failed to get rsvd page from build list\n"); return -ENOENT; } if (rsvd_pkt->type != RSVD_BEACON && rsvd_pkt->type != RSVD_DUMMY) { rtw_err(rtwdev, "invalid rsvd page type %d, should be beacon or dummy\n", rsvd_pkt->type); return -EINVAL; } skb = rtw_get_rsvd_page_skb(hw, rsvd_pkt); if (!skb) { rtw_err(rtwdev, "failed to get beacon skb\n"); return -ENOMEM; } ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len); if (ret) rtw_err(rtwdev, "failed to download drv rsvd page\n"); dev_kfree_skb(skb); return ret; } int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev) { u8 *buf; u32 size; int ret; buf = rtw_build_rsvd_page(rtwdev, &size); if (!buf) { rtw_err(rtwdev, "failed to build rsvd page pkt\n"); return -ENOMEM; } ret = rtw_download_drv_rsvd_page(rtwdev, buf, size); if (ret) { rtw_err(rtwdev, "failed to download drv rsvd page\n"); goto free; } /* The last thing is to download the *ONLY* beacon again, because * the previous tx_desc is to describe the total rsvd page. Download * the beacon again to replace the TX desc header, and we will get * a correct tx_desc for the beacon in the rsvd page. */ ret = rtw_download_beacon(rtwdev); if (ret) { rtw_err(rtwdev, "failed to download beacon\n"); goto free; } free: kfree(buf); return ret; } static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size, u32 *buf, u32 residue, u16 start_pg) { u32 i; u16 idx = 0; u16 ctl; ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000; /* disable rx clock gate */ rtw_write32_set(rtwdev, REG_RCR, BIT_DISGCLK); do { rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl); for (i = FIFO_DUMP_ADDR + residue; i < FIFO_DUMP_ADDR + FIFO_PAGE_SIZE; i += 4) { buf[idx++] = rtw_read32(rtwdev, i); size -= 4; if (size == 0) goto out; } residue = 0; start_pg++; } while (size); out: rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl); /* restore rx clock gate */ rtw_write32_clr(rtwdev, REG_RCR, BIT_DISGCLK); } static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel, u32 offset, u32 size, u32 *buf) { struct rtw_chip_info *chip = rtwdev->chip; u32 start_pg, residue; if (sel >= RTW_FW_FIFO_MAX) { rtw_dbg(rtwdev, RTW_DBG_FW, "wrong fw fifo sel\n"); return; } if (sel == RTW_FW_FIFO_SEL_RSVD_PAGE) offset += rtwdev->fifo.rsvd_boundary << TX_PAGE_SIZE_SHIFT; residue = offset & (FIFO_PAGE_SIZE - 1); start_pg = (offset >> FIFO_PAGE_SIZE_SHIFT) + chip->fw_fifo_addr[sel]; rtw_fw_read_fifo_page(rtwdev, offset, size, buf, residue, start_pg); } static bool rtw_fw_dump_check_size(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel, u32 start_addr, u32 size) { switch (sel) { case RTW_FW_FIFO_SEL_TX: case RTW_FW_FIFO_SEL_RX: if ((start_addr + size) > rtwdev->chip->fw_fifo_addr[sel]) return false; fallthrough; default: return true; } } int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size, u32 *buffer) { if (!rtwdev->chip->fw_fifo_addr[0]) { rtw_dbg(rtwdev, RTW_DBG_FW, "chip not support dump fw fifo\n"); return -ENOTSUPP; } if (size == 0 || !buffer) return -EINVAL; if (size & 0x3) { rtw_dbg(rtwdev, RTW_DBG_FW, "not 4byte alignment\n"); return -EINVAL; } if (!rtw_fw_dump_check_size(rtwdev, fifo_sel, addr, size)) { rtw_dbg(rtwdev, RTW_DBG_FW, "fw fifo dump size overflow\n"); return -EINVAL; } rtw_fw_read_fifo(rtwdev, fifo_sel, addr, size, buffer); return 0; } static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size, u8 location) { struct rtw_chip_info *chip = rtwdev->chip; u8 h2c_pkt[H2C_PKT_SIZE] = {0}; u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN; rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_UPDATE_PKT); SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size); UPDATE_PKT_SET_PKT_ID(h2c_pkt, pkt_id); UPDATE_PKT_SET_LOCATION(h2c_pkt, location); /* include txdesc size */ size += chip->tx_pkt_desc_sz; UPDATE_PKT_SET_SIZE(h2c_pkt, size); rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); } void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev, struct cfg80211_ssid *ssid) { u8 loc; u16 size; loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid); if (!loc) { rtw_err(rtwdev, "failed to get probe_req rsvd loc\n"); return; } size = rtw_get_rsvd_page_probe_req_size(rtwdev, ssid); if (!size) { rtw_err(rtwdev, "failed to get probe_req rsvd size\n"); return; } __rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, size, loc); } void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable) { struct rtw_pno_request *rtw_pno_req = &rtwdev->wow.pno_req; u8 h2c_pkt[H2C_PKT_SIZE] = {0}; u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_CH_SWITCH_LEN; u8 loc_ch_info; const struct rtw_ch_switch_option cs_option = { .dest_ch_en = 1, .dest_ch = 1, .periodic_option = 2, .normal_period = 5, .normal_period_sel = 0, .normal_cycle = 10, .slow_period = 1, .slow_period_sel = 1, }; rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_CH_SWITCH); SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size); CH_SWITCH_SET_START(h2c_pkt, enable); CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, cs_option.dest_ch_en); CH_SWITCH_SET_DEST_CH(h2c_pkt, cs_option.dest_ch); CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, cs_option.normal_period); CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, cs_option.normal_period_sel); CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, cs_option.slow_period); CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, cs_option.slow_period_sel); CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, cs_option.normal_cycle); CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, cs_option.periodic_option); CH_SWITCH_SET_CH_NUM(h2c_pkt, rtw_pno_req->channel_cnt); CH_SWITCH_SET_INFO_SIZE(h2c_pkt, rtw_pno_req->channel_cnt * 4); loc_ch_info = rtw_get_rsvd_page_location(rtwdev, RSVD_CH_INFO); CH_SWITCH_SET_INFO_LOC(h2c_pkt, loc_ch_info); rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); } void rtw_fw_adaptivity(struct rtw_dev *rtwdev) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; u8 h2c_pkt[H2C_PKT_SIZE] = {0}; if (!rtw_edcca_enabled) { dm_info->edcca_mode = RTW_EDCCA_NORMAL; rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "EDCCA disabled by debugfs\n"); } SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_ADAPTIVITY); SET_ADAPTIVITY_MODE(h2c_pkt, dm_info->edcca_mode); SET_ADAPTIVITY_OPTION(h2c_pkt, 2); SET_ADAPTIVITY_IGI(h2c_pkt, dm_info->igi_history[0]); SET_ADAPTIVITY_L2H(h2c_pkt, dm_info->l2h_th_ini); SET_ADAPTIVITY_DENSITY(h2c_pkt, dm_info->scan_density); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start) { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SCAN); SET_SCAN_START(h2c_pkt, start); rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } static void rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb, struct sk_buff_head *list, struct rtw_vif *rtwvif) { struct ieee80211_scan_ies *ies = rtwvif->scan_ies; struct rtw_chip_info *chip = rtwdev->chip; struct sk_buff *new; u8 idx; for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { if (!(BIT(idx) & chip->band)) continue; new = skb_copy(skb, GFP_KERNEL); skb_put_data(new, ies->ies[idx], ies->len[idx]); skb_put_data(new, ies->common_ies, ies->common_ie_len); skb_queue_tail(list, new); } } static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_ssids, struct sk_buff_head *probe_req_list) { struct rtw_chip_info *chip = rtwdev->chip; struct sk_buff *skb, *tmp; u8 page_offset = 1, *buf, page_size = chip->page_size; u8 pages = page_offset + num_ssids * RTW_PROBE_PG_CNT; u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc; u16 buf_offset = page_size * page_offset; u8 tx_desc_sz = chip->tx_pkt_desc_sz; unsigned int pkt_len; int ret; buf = kzalloc(page_size * pages, GFP_KERNEL); if (!buf) return -ENOMEM; buf_offset -= tx_desc_sz; skb_queue_walk_safe(probe_req_list, skb, tmp) { skb_unlink(skb, probe_req_list); rtw_fill_rsvd_page_desc(rtwdev, skb, RSVD_PROBE_REQ); if (skb->len > page_size * RTW_PROBE_PG_CNT) { +#if defined(__FreeBSD__) + kfree_skb(skb); +#endif ret = -EINVAL; goto out; } memcpy(buf + buf_offset, skb->data, skb->len); pkt_len = skb->len - tx_desc_sz; loc = pg_addr - rtwdev->fifo.rsvd_boundary + page_offset; __rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, pkt_len, loc); buf_offset += RTW_PROBE_PG_CNT * page_size; page_offset += RTW_PROBE_PG_CNT; kfree_skb(skb); } ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, buf_offset); if (ret) { rtw_err(rtwdev, "Download probe request to firmware failed\n"); goto out; } rtwdev->scan_info.probe_pg_size = page_offset; out: kfree(buf); return ret; } static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif) { struct cfg80211_scan_request *req = rtwvif->scan_req; struct sk_buff_head list; struct sk_buff *skb; u8 num = req->n_ssids, i; skb_queue_head_init(&list); for (i = 0; i < num; i++) { skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, req->ssids[i].ssid, req->ssids[i].ssid_len, req->ie_len); rtw_append_probe_req_ie(rtwdev, skb, &list, rtwvif); kfree_skb(skb); } return _rtw_hw_scan_update_probe_req(rtwdev, num, &list); } static int rtw_add_chan_info(struct rtw_dev *rtwdev, struct rtw_chan_info *info, struct rtw_chan_list *list, u8 *buf) { u8 *chan = &buf[list->size]; u8 info_size = RTW_CH_INFO_SIZE; if (list->size > list->buf_size) return -ENOMEM; CH_INFO_SET_CH(chan, info->channel); CH_INFO_SET_PRI_CH_IDX(chan, info->pri_ch_idx); CH_INFO_SET_BW(chan, info->bw); CH_INFO_SET_TIMEOUT(chan, info->timeout); CH_INFO_SET_ACTION_ID(chan, info->action_id); CH_INFO_SET_EXTRA_INFO(chan, info->extra_info); if (info->extra_info) { EXTRA_CH_INFO_SET_ID(chan, RTW_SCAN_EXTRA_ID_DFS); EXTRA_CH_INFO_SET_INFO(chan, RTW_SCAN_EXTRA_ACTION_SCAN); EXTRA_CH_INFO_SET_SIZE(chan, RTW_EX_CH_INFO_SIZE - RTW_EX_CH_INFO_HDR_SIZE); EXTRA_CH_INFO_SET_DFS_EXT_TIME(chan, RTW_DFS_CHAN_TIME); info_size += RTW_EX_CH_INFO_SIZE; } list->size += info_size; list->ch_num++; return 0; } static int rtw_add_chan_list(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, struct rtw_chan_list *list, u8 *buf) { struct cfg80211_scan_request *req = rtwvif->scan_req; struct rtw_fifo_conf *fifo = &rtwdev->fifo; struct ieee80211_channel *channel; int i, ret = 0; for (i = 0; i < req->n_channels; i++) { struct rtw_chan_info ch_info = {0}; channel = req->channels[i]; ch_info.channel = channel->hw_value; ch_info.bw = RTW_SCAN_WIDTH; ch_info.pri_ch_idx = RTW_PRI_CH_IDX; ch_info.timeout = req->duration_mandatory ? req->duration : RTW_CHANNEL_TIME; if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) { ch_info.action_id = RTW_CHANNEL_RADAR; ch_info.extra_info = 1; /* Overwrite duration for passive scans if necessary */ ch_info.timeout = ch_info.timeout > RTW_PASS_CHAN_TIME ? ch_info.timeout : RTW_PASS_CHAN_TIME; } else { ch_info.action_id = RTW_CHANNEL_ACTIVE; } ret = rtw_add_chan_info(rtwdev, &ch_info, list, buf); if (ret) return ret; } if (list->size > fifo->rsvd_pg_num << TX_PAGE_SIZE_SHIFT) { rtw_err(rtwdev, "List exceeds rsvd page total size\n"); return -EINVAL; } list->addr = fifo->rsvd_h2c_info_addr + rtwdev->scan_info.probe_pg_size; ret = rtw_fw_write_data_rsvd_page(rtwdev, list->addr, buf, list->size); if (ret) rtw_err(rtwdev, "Download channel list failed\n"); return ret; } static void rtw_fw_set_scan_offload(struct rtw_dev *rtwdev, struct rtw_ch_switch_option *opt, struct rtw_vif *rtwvif, struct rtw_chan_list *list) { struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info; struct cfg80211_scan_request *req = rtwvif->scan_req; struct rtw_fifo_conf *fifo = &rtwdev->fifo; /* reserve one dummy page at the beginning for tx descriptor */ u8 pkt_loc = fifo->rsvd_h2c_info_addr - fifo->rsvd_boundary + 1; bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; u8 h2c_pkt[H2C_PKT_SIZE] = {0}; rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_SCAN_OFFLOAD); SET_PKT_H2C_TOTAL_LEN(h2c_pkt, H2C_PKT_CH_SWITCH_LEN); SCAN_OFFLOAD_SET_START(h2c_pkt, opt->switch_en); SCAN_OFFLOAD_SET_BACK_OP_EN(h2c_pkt, opt->back_op_en); SCAN_OFFLOAD_SET_RANDOM_SEQ_EN(h2c_pkt, random_seq); SCAN_OFFLOAD_SET_NO_CCK_EN(h2c_pkt, req->no_cck); SCAN_OFFLOAD_SET_CH_NUM(h2c_pkt, list->ch_num); SCAN_OFFLOAD_SET_CH_INFO_SIZE(h2c_pkt, list->size); SCAN_OFFLOAD_SET_CH_INFO_LOC(h2c_pkt, list->addr - fifo->rsvd_boundary); SCAN_OFFLOAD_SET_OP_CH(h2c_pkt, scan_info->op_chan); SCAN_OFFLOAD_SET_OP_PRI_CH_IDX(h2c_pkt, scan_info->op_pri_ch_idx); SCAN_OFFLOAD_SET_OP_BW(h2c_pkt, scan_info->op_bw); SCAN_OFFLOAD_SET_OP_PORT_ID(h2c_pkt, rtwvif->port); SCAN_OFFLOAD_SET_OP_DWELL_TIME(h2c_pkt, req->duration_mandatory ? req->duration : RTW_CHANNEL_TIME); SCAN_OFFLOAD_SET_OP_GAP_TIME(h2c_pkt, RTW_OFF_CHAN_TIME); SCAN_OFFLOAD_SET_SSID_NUM(h2c_pkt, req->n_ssids); SCAN_OFFLOAD_SET_PKT_LOC(h2c_pkt, pkt_loc); rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); } void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, struct ieee80211_scan_request *scan_req) { struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; struct cfg80211_scan_request *req = &scan_req->req; u8 mac_addr[ETH_ALEN]; rtwdev->scan_info.scanning_vif = vif; rtwvif->scan_ies = &scan_req->ies; rtwvif->scan_req = req; ieee80211_stop_queues(rtwdev->hw); if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) get_random_mask_addr(mac_addr, req->mac_addr, req->mac_addr_mask); else ether_addr_copy(mac_addr, vif->addr); rtw_core_scan_start(rtwdev, rtwvif, mac_addr, true); rtwdev->hal.rcr &= ~BIT_CBSSID_BCN; rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr); } void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, bool aborted) { struct cfg80211_scan_info info = { .aborted = aborted, }; struct rtw_vif *rtwvif; if (!vif) return; rtwdev->hal.rcr |= BIT_CBSSID_BCN; rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr); rtw_core_scan_complete(rtwdev, vif); ieee80211_wake_queues(rtwdev->hw); ieee80211_scan_completed(rtwdev->hw, &info); rtwvif = (struct rtw_vif *)vif->drv_priv; rtwvif->scan_req = NULL; rtwvif->scan_ies = NULL; rtwdev->scan_info.scanning_vif = NULL; } static int rtw_hw_scan_prehandle(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, struct rtw_chan_list *list) { struct cfg80211_scan_request *req = rtwvif->scan_req; int size = req->n_channels * (RTW_CH_INFO_SIZE + RTW_EX_CH_INFO_SIZE); u8 *buf; int ret; buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; ret = rtw_hw_scan_update_probe_req(rtwdev, rtwvif); if (ret) { rtw_err(rtwdev, "Update probe request failed\n"); goto out; } list->buf_size = size; list->size = 0; list->ch_num = 0; ret = rtw_add_chan_list(rtwdev, rtwvif, list, buf); out: kfree(buf); return ret; } int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, bool enable) { struct rtw_vif *rtwvif = vif ? (struct rtw_vif *)vif->drv_priv : NULL; struct rtw_ch_switch_option cs_option = {0}; struct rtw_chan_list chan_list = {0}; int ret = 0; if (!rtwvif) return -EINVAL; cs_option.switch_en = enable; cs_option.back_op_en = rtwvif->net_type == RTW_NET_MGD_LINKED; if (enable) { ret = rtw_hw_scan_prehandle(rtwdev, rtwvif, &chan_list); if (ret) goto out; } rtw_fw_set_scan_offload(rtwdev, &cs_option, rtwvif, &chan_list); out: return ret; } void rtw_hw_scan_abort(struct rtw_dev *rtwdev, struct ieee80211_vif *vif) { if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD)) return; rtw_hw_scan_offload(rtwdev, vif, false); rtw_hw_scan_complete(rtwdev, vif, true); } void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb) { struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; struct rtw_c2h_cmd *c2h; bool aborted; u8 rc; if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) return; c2h = get_c2h_from_skb(skb); rc = GET_SCAN_REPORT_RETURN_CODE(c2h->payload); aborted = rc != RTW_SCAN_REPORT_SUCCESS; rtw_hw_scan_complete(rtwdev, vif, aborted); if (aborted) rtw_info(rtwdev, "HW scan aborted with code: %d\n", rc); } void rtw_store_op_chan(struct rtw_dev *rtwdev) { struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info; struct rtw_hal *hal = &rtwdev->hal; scan_info->op_chan = hal->current_channel; scan_info->op_bw = hal->current_band_width; scan_info->op_pri_ch_idx = hal->current_primary_channel_index; } static bool rtw_is_op_chan(struct rtw_dev *rtwdev, u8 channel) { struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info; return channel == scan_info->op_chan; } void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb) { struct rtw_hal *hal = &rtwdev->hal; struct rtw_c2h_cmd *c2h; enum rtw_scan_notify_id id; u8 chan, status; c2h = get_c2h_from_skb(skb); chan = GET_CHAN_SWITCH_CENTRAL_CH(c2h->payload); id = GET_CHAN_SWITCH_ID(c2h->payload); status = GET_CHAN_SWITCH_STATUS(c2h->payload); if (id == RTW_SCAN_NOTIFY_ID_POSTSWITCH) { if (rtw_is_op_chan(rtwdev, chan)) ieee80211_wake_queues(rtwdev->hw); hal->current_channel = chan; hal->current_band_type = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G; } else if (id == RTW_SCAN_NOTIFY_ID_PRESWITCH) { if (IS_CH_5G_BAND(chan)) { rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G); } else if (IS_CH_2G_BAND(chan)) { u8 chan_type; if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) chan_type = COEX_SWITCH_TO_24G; else chan_type = COEX_SWITCH_TO_24G_NOFORSCAN; rtw_coex_switchband_notify(rtwdev, chan_type); } if (rtw_is_op_chan(rtwdev, chan)) ieee80211_stop_queues(rtwdev->hw); } rtw_dbg(rtwdev, RTW_DBG_HW_SCAN, "Chan switch: %x, id: %x, status: %x\n", chan, id, status); } diff --git a/sys/contrib/dev/rtw88/pci.c b/sys/contrib/dev/rtw88/pci.c index 13e71f44d84f..431d0c1c8b98 100644 --- a/sys/contrib/dev/rtw88/pci.c +++ b/sys/contrib/dev/rtw88/pci.c @@ -1,1939 +1,1945 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2018-2019 Realtek Corporation */ #if defined(__FreeBSD__) #define LINUXKPI_PARAM_PREFIX rtw88_pci_ #endif #include #include #include "main.h" #include "pci.h" #include "reg.h" #include "tx.h" #include "rx.h" #include "fw.h" #include "ps.h" #include "debug.h" #if defined(__FreeBSD__) #include #endif static bool rtw_disable_msi; static bool rtw_pci_disable_aspm; module_param_named(disable_msi, rtw_disable_msi, bool, 0644); module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644); MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support"); MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support"); static u32 rtw_pci_tx_queue_idx_addr[] = { [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ, [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ, [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ, [RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ, [RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ, [RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q, [RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ, }; static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue) { switch (queue) { case RTW_TX_QUEUE_BCN: return TX_DESC_QSEL_BEACON; case RTW_TX_QUEUE_H2C: return TX_DESC_QSEL_H2C; case RTW_TX_QUEUE_MGMT: return TX_DESC_QSEL_MGMT; case RTW_TX_QUEUE_HI0: return TX_DESC_QSEL_HIGH; default: return skb->priority; } }; static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; #if defined(__linux__) return readb(rtwpci->mmap + addr); #elif defined(__FreeBSD__) u8 val; val = bus_read_1((struct resource *)rtwpci->mmap, addr); rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R08 (%#010x) -> %#04x\n", addr, val); return (val); #endif } static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; #if defined(__linux__) return readw(rtwpci->mmap + addr); #elif defined(__FreeBSD__) u16 val; val = bus_read_2((struct resource *)rtwpci->mmap, addr); rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R16 (%#010x) -> %#06x\n", addr, val); return (val); #endif } static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; #if defined(__linux__) return readl(rtwpci->mmap + addr); #elif defined(__FreeBSD__) u32 val; val = bus_read_4((struct resource *)rtwpci->mmap, addr); rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); return (val); #endif } static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; #if defined(__linux__) writeb(val, rtwpci->mmap + addr); #elif defined(__FreeBSD__) rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W08 (%#010x) <- %#04x\n", addr, val); return (bus_write_1((struct resource *)rtwpci->mmap, addr, val)); #endif } static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; #if defined(__linux__) writew(val, rtwpci->mmap + addr); #elif defined(__FreeBSD__) rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W16 (%#010x) <- %#06x\n", addr, val); return (bus_write_2((struct resource *)rtwpci->mmap, addr, val)); #endif } static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; #if defined(__linux__) writel(val, rtwpci->mmap + addr); #elif defined(__FreeBSD__) rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W32 (%#010x) <- %#010x\n", addr, val); return (bus_write_4((struct resource *)rtwpci->mmap, addr, val)); #endif } #if defined(__linux__) && 0 static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx) { int offset = tx_ring->r.desc_size * idx; return tx_ring->r.head + offset; } #endif static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev, struct rtw_pci_tx_ring *tx_ring) { struct pci_dev *pdev = to_pci_dev(rtwdev->dev); struct rtw_pci_tx_data *tx_data; struct sk_buff *skb, *tmp; dma_addr_t dma; /* free every skb remained in tx list */ skb_queue_walk_safe(&tx_ring->queue, skb, tmp) { __skb_unlink(skb, &tx_ring->queue); tx_data = rtw_pci_get_tx_data(skb); dma = tx_data->dma; dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); } } static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev, struct rtw_pci_tx_ring *tx_ring) { struct pci_dev *pdev = to_pci_dev(rtwdev->dev); u8 *head = tx_ring->r.head; u32 len = tx_ring->r.len; int ring_sz = len * tx_ring->r.desc_size; rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring); /* free the ring itself */ dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma); tx_ring->r.head = NULL; } static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev, struct rtw_pci_rx_ring *rx_ring) { struct pci_dev *pdev = to_pci_dev(rtwdev->dev); struct sk_buff *skb; int buf_sz = RTK_PCI_RX_BUF_SIZE; dma_addr_t dma; int i; for (i = 0; i < rx_ring->r.len; i++) { skb = rx_ring->buf[i]; if (!skb) continue; dma = *((dma_addr_t *)skb->cb); dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); dev_kfree_skb(skb); rx_ring->buf[i] = NULL; } } static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev, struct rtw_pci_rx_ring *rx_ring) { struct pci_dev *pdev = to_pci_dev(rtwdev->dev); u8 *head = rx_ring->r.head; int ring_sz = rx_ring->r.desc_size * rx_ring->r.len; rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring); dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma); } static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_pci_tx_ring *tx_ring; struct rtw_pci_rx_ring *rx_ring; int i; for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) { tx_ring = &rtwpci->tx_rings[i]; rtw_pci_free_tx_ring(rtwdev, tx_ring); } for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) { rx_ring = &rtwpci->rx_rings[i]; rtw_pci_free_rx_ring(rtwdev, rx_ring); } } static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev, struct rtw_pci_tx_ring *tx_ring, u8 desc_size, u32 len) { struct pci_dev *pdev = to_pci_dev(rtwdev->dev); int ring_sz = desc_size * len; dma_addr_t dma; u8 *head; if (len > TRX_BD_IDX_MASK) { rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len); return -EINVAL; } head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); if (!head) { rtw_err(rtwdev, "failed to allocate tx ring\n"); return -ENOMEM; } skb_queue_head_init(&tx_ring->queue); tx_ring->r.head = head; tx_ring->r.dma = dma; tx_ring->r.len = len; tx_ring->r.desc_size = desc_size; tx_ring->r.wp = 0; tx_ring->r.rp = 0; return 0; } static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, struct rtw_pci_rx_ring *rx_ring, u32 idx, u32 desc_sz) { struct pci_dev *pdev = to_pci_dev(rtwdev->dev); struct rtw_pci_rx_buffer_desc *buf_desc; int buf_sz = RTK_PCI_RX_BUF_SIZE; dma_addr_t dma; if (!skb) return -EINVAL; dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, dma)) return -EBUSY; *((dma_addr_t *)skb->cb) = dma; buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + idx * desc_sz); memset(buf_desc, 0, sizeof(*buf_desc)); buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); buf_desc->dma = cpu_to_le32(dma); return 0; } static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma, struct rtw_pci_rx_ring *rx_ring, u32 idx, u32 desc_sz) { struct device *dev = rtwdev->dev; struct rtw_pci_rx_buffer_desc *buf_desc; int buf_sz = RTK_PCI_RX_BUF_SIZE; dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE); buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + idx * desc_sz); memset(buf_desc, 0, sizeof(*buf_desc)); buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); buf_desc->dma = cpu_to_le32(dma); } static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev, struct rtw_pci_rx_ring *rx_ring, u8 desc_size, u32 len) { struct pci_dev *pdev = to_pci_dev(rtwdev->dev); struct sk_buff *skb = NULL; dma_addr_t dma; u8 *head; int ring_sz = desc_size * len; int buf_sz = RTK_PCI_RX_BUF_SIZE; int i, allocated; int ret = 0; head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); if (!head) { rtw_err(rtwdev, "failed to allocate rx ring\n"); return -ENOMEM; } rx_ring->r.head = head; for (i = 0; i < len; i++) { skb = dev_alloc_skb(buf_sz); if (!skb) { allocated = i; ret = -ENOMEM; goto err_out; } memset(skb->data, 0, buf_sz); rx_ring->buf[i] = skb; ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size); if (ret) { allocated = i; dev_kfree_skb_any(skb); goto err_out; } } rx_ring->r.dma = dma; rx_ring->r.len = len; rx_ring->r.desc_size = desc_size; rx_ring->r.wp = 0; rx_ring->r.rp = 0; return 0; err_out: for (i = 0; i < allocated; i++) { skb = rx_ring->buf[i]; if (!skb) continue; dma = *((dma_addr_t *)skb->cb); dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); rx_ring->buf[i] = NULL; } dma_free_coherent(&pdev->dev, ring_sz, head, dma); rtw_err(rtwdev, "failed to init rx buffer\n"); return ret; } static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_pci_tx_ring *tx_ring; struct rtw_pci_rx_ring *rx_ring; struct rtw_chip_info *chip = rtwdev->chip; int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0; int tx_desc_size, rx_desc_size; u32 len; int ret; tx_desc_size = chip->tx_buf_desc_sz; for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) { tx_ring = &rtwpci->tx_rings[i]; len = max_num_of_tx_queue(i); ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len); if (ret) goto out; } rx_desc_size = chip->rx_buf_desc_sz; for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) { rx_ring = &rtwpci->rx_rings[j]; ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size, RTK_MAX_RX_DESC_NUM); if (ret) goto out; } return 0; out: tx_alloced = i; for (i = 0; i < tx_alloced; i++) { tx_ring = &rtwpci->tx_rings[i]; rtw_pci_free_tx_ring(rtwdev, tx_ring); } rx_alloced = j; for (j = 0; j < rx_alloced; j++) { rx_ring = &rtwpci->rx_rings[j]; rtw_pci_free_rx_ring(rtwdev, rx_ring); } return ret; } static void rtw_pci_deinit(struct rtw_dev *rtwdev) { rtw_pci_free_trx_ring(rtwdev); } static int rtw_pci_init(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; int ret = 0; rtwpci->irq_mask[0] = IMR_HIGHDOK | IMR_MGNTDOK | IMR_BKDOK | IMR_BEDOK | IMR_VIDOK | IMR_VODOK | IMR_ROK | IMR_BCNDMAINT_E | IMR_C2HCMD | 0; rtwpci->irq_mask[1] = IMR_TXFOVW | 0; rtwpci->irq_mask[3] = IMR_H2CDOK | 0; spin_lock_init(&rtwpci->irq_lock); spin_lock_init(&rtwpci->hwirq_lock); ret = rtw_pci_init_trx_ring(rtwdev); return ret; } static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; u32 len; u8 tmp; dma_addr_t dma; tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3); rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7); dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma; rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma); if (!rtw_chip_wcpu_11n(rtwdev)) { len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len; dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma; rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0; rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0; rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK); rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma); } len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len; dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma; rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0; rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0; rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK); rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma); len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len; dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma; rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0; rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0; rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK); rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma); len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len; dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma; rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0; rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0; rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK); rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma); len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len; dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma; rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0; rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0; rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK); rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma); len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len; dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma; rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0; rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0; rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK); rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma); len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len; dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma; rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0; rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0; rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK); rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma); len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len; dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma; rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0; rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0; rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK); rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma); /* reset read/write point */ rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff); /* reset H2C Queue index in a single write */ if (rtw_chip_wcpu_11ac(rtwdev)) rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX); } static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev) { rtw_pci_reset_buf_desc(rtwdev); } static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, bool exclude_rx) { unsigned long flags; u32 imr0_unmask = exclude_rx ? IMR_ROK : 0; spin_lock_irqsave(&rtwpci->hwirq_lock, flags); rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask); rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]); if (rtw_chip_wcpu_11ac(rtwdev)) rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]); rtwpci->irq_enabled = true; spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); } static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) { unsigned long flags; spin_lock_irqsave(&rtwpci->hwirq_lock, flags); if (!rtwpci->irq_enabled) goto out; rtw_write32(rtwdev, RTK_PCI_HIMR0, 0); rtw_write32(rtwdev, RTK_PCI_HIMR1, 0); if (rtw_chip_wcpu_11ac(rtwdev)) rtw_write32(rtwdev, RTK_PCI_HIMR3, 0); rtwpci->irq_enabled = false; out: spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); } static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) { /* reset dma and rx tag */ rtw_write32_set(rtwdev, RTK_PCI_CTRL, BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN); rtwpci->rx_tag = 0; } static int rtw_pci_setup(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; rtw_pci_reset_trx_ring(rtwdev); rtw_pci_dma_reset(rtwdev, rtwpci); return 0; } static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) { struct rtw_pci_tx_ring *tx_ring; u8 queue; rtw_pci_reset_trx_ring(rtwdev); for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) { tx_ring = &rtwpci->tx_rings[queue]; rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring); } } static void rtw_pci_napi_start(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags)) return; napi_enable(&rtwpci->napi); } static void rtw_pci_napi_stop(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags)) return; napi_synchronize(&rtwpci->napi); napi_disable(&rtwpci->napi); } static int rtw_pci_start(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; rtw_pci_napi_start(rtwdev); spin_lock_bh(&rtwpci->irq_lock); rtwpci->running = true; rtw_pci_enable_interrupt(rtwdev, rtwpci, false); spin_unlock_bh(&rtwpci->irq_lock); return 0; } static void rtw_pci_stop(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct pci_dev *pdev = rtwpci->pdev; spin_lock_bh(&rtwpci->irq_lock); rtwpci->running = false; rtw_pci_disable_interrupt(rtwdev, rtwpci); spin_unlock_bh(&rtwpci->irq_lock); synchronize_irq(pdev->irq); rtw_pci_napi_stop(rtwdev); spin_lock_bh(&rtwpci->irq_lock); rtw_pci_dma_release(rtwdev, rtwpci); spin_unlock_bh(&rtwpci->irq_lock); } static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_pci_tx_ring *tx_ring; bool tx_empty = true; u8 queue; if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) goto enter_deep_ps; lockdep_assert_held(&rtwpci->irq_lock); /* Deep PS state is not allowed to TX-DMA */ for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) { /* BCN queue is rsvd page, does not have DMA interrupt * H2C queue is managed by firmware */ if (queue == RTW_TX_QUEUE_BCN || queue == RTW_TX_QUEUE_H2C) continue; tx_ring = &rtwpci->tx_rings[queue]; /* check if there is any skb DMAing */ if (skb_queue_len(&tx_ring->queue)) { tx_empty = false; break; } } if (!tx_empty) { rtw_dbg(rtwdev, RTW_DBG_PS, "TX path not empty, cannot enter deep power save state\n"); return; } enter_deep_ps: set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags); rtw_power_mode_change(rtwdev, true); } static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev) { #if defined(__linux__) struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; lockdep_assert_held(&rtwpci->irq_lock); #elif defined(__FreeBSD__) lockdep_assert_held(&((struct rtw_pci *)rtwdev->priv)->irq_lock); #endif if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) rtw_power_mode_change(rtwdev, false); } static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; spin_lock_bh(&rtwpci->irq_lock); if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) rtw_pci_deep_ps_enter(rtwdev); if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) rtw_pci_deep_ps_leave(rtwdev); spin_unlock_bh(&rtwpci->irq_lock); } static u8 ac_to_hwq[] = { [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO, [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI, [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE, [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK, }; #if defined(__linux__) static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS); #elif defined(__FreeBSD__) rtw88_static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS); #endif static u8 rtw_hw_queue_mapping(struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; __le16 fc = hdr->frame_control; u8 q_mapping = skb_get_queue_mapping(skb); u8 queue; if (unlikely(ieee80211_is_beacon(fc))) queue = RTW_TX_QUEUE_BCN; else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))) queue = RTW_TX_QUEUE_MGMT; else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq))) queue = ac_to_hwq[IEEE80211_AC_BE]; else queue = ac_to_hwq[q_mapping]; return queue; } static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci, struct rtw_pci_tx_ring *ring) { struct sk_buff *prev = skb_dequeue(&ring->queue); struct rtw_pci_tx_data *tx_data; dma_addr_t dma; if (!prev) return; tx_data = rtw_pci_get_tx_data(prev); dma = tx_data->dma; dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE); dev_kfree_skb_any(prev); } static void rtw_pci_dma_check(struct rtw_dev *rtwdev, struct rtw_pci_rx_ring *rx_ring, u32 idx) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_chip_info *chip = rtwdev->chip; struct rtw_pci_rx_buffer_desc *buf_desc; u32 desc_sz = chip->rx_buf_desc_sz; u16 total_pkt_size; buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + idx * desc_sz); total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size); /* rx tag mismatch, throw a warning */ if (total_pkt_size != rtwpci->rx_tag) rtw_warn(rtwdev, "pci bus timeout, check dma status\n"); rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX; } static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q) { u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q]; u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2); return FIELD_GET(TRX_BD_IDX_MASK, bd_idx); } static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q]; u32 cur_rp; u8 i; /* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a * bit dynamic, it's hard to define a reasonable fixed total timeout to * use read_poll_timeout* helper. Instead, we can ensure a reasonable * polling times, so we just use for loop with udelay here. */ for (i = 0; i < 30; i++) { cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q); if (cur_rp == ring->r.wp) return; udelay(1); } if (!drop) rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n", pci_q); } static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues, bool drop) { u8 q; for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) { /* It may be not necessary to flush BCN and H2C tx queues. */ if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C) continue; if (pci_queues & BIT(q)) __pci_flush_queue(rtwdev, q, drop); } } static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop) { u32 pci_queues = 0; u8 i; /* If all of the hardware queues are requested to flush, * flush all of the pci queues. */ if (queues == BIT(rtwdev->hw->queues) - 1) { pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1; } else { for (i = 0; i < rtwdev->hw->queues; i++) if (queues & BIT(i)) pci_queues |= BIT(ac_to_hwq[i]); } __rtw_pci_flush_queues(rtwdev, pci_queues, drop); } static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_pci_tx_ring *ring; u32 bd_idx; ring = &rtwpci->tx_rings[queue]; bd_idx = rtw_pci_tx_queue_idx_addr[queue]; spin_lock_bh(&rtwpci->irq_lock); if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) rtw_pci_deep_ps_leave(rtwdev); rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK); spin_unlock_bh(&rtwpci->irq_lock); } static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; u8 queue; for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) if (test_and_clear_bit(queue, rtwpci->tx_queued)) rtw_pci_tx_kick_off_queue(rtwdev, queue); } static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb, u8 queue) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_chip_info *chip = rtwdev->chip; struct rtw_pci_tx_ring *ring; struct rtw_pci_tx_data *tx_data; dma_addr_t dma; u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz; u32 tx_buf_desc_sz = chip->tx_buf_desc_sz; u32 size; u32 psb_len; u8 *pkt_desc; struct rtw_pci_tx_buffer_desc *buf_desc; ring = &rtwpci->tx_rings[queue]; size = skb->len; if (queue == RTW_TX_QUEUE_BCN) rtw_pci_release_rsvd_page(rtwpci, ring); else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len)) return -ENOSPC; pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); memset(pkt_desc, 0, tx_pkt_desc_sz); pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue); rtw_tx_fill_tx_desc(pkt_info, skb); dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(&rtwpci->pdev->dev, dma)) return -EBUSY; /* after this we got dma mapped, there is no way back */ buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz); memset(buf_desc, 0, tx_buf_desc_sz); psb_len = (skb->len - 1) / 128 + 1; if (queue == RTW_TX_QUEUE_BCN) psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET; buf_desc[0].psb_len = cpu_to_le16(psb_len); buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz); buf_desc[0].dma = cpu_to_le32(dma); buf_desc[1].buf_size = cpu_to_le16(size); buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz); tx_data = rtw_pci_get_tx_data(skb); tx_data->dma = dma; tx_data->sn = pkt_info->sn; spin_lock_bh(&rtwpci->irq_lock); skb_queue_tail(&ring->queue, skb); if (queue == RTW_TX_QUEUE_BCN) goto out_unlock; /* update write-index, and kick it off later */ set_bit(queue, rtwpci->tx_queued); if (++ring->r.wp >= ring->r.len) ring->r.wp = 0; out_unlock: spin_unlock_bh(&rtwpci->irq_lock); return 0; } static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size) { struct sk_buff *skb; struct rtw_tx_pkt_info pkt_info = {0}; u8 reg_bcn_work; int ret; skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size); if (!skb) return -ENOMEM; ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN); if (ret) { +#if defined(__FreeBSD__) + dev_kfree_skb_any(skb); +#endif rtw_err(rtwdev, "failed to write rsvd page data\n"); return ret; } /* reserved pages go through beacon queue */ reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK); reg_bcn_work |= BIT_PCI_BCNQ_FLAG; rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work); return 0; } static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size) { struct sk_buff *skb; struct rtw_tx_pkt_info pkt_info = {0}; int ret; skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size); if (!skb) return -ENOMEM; ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C); if (ret) { +#if defined(__FreeBSD__) + dev_kfree_skb_any(skb); +#endif rtw_err(rtwdev, "failed to write h2c data\n"); return ret; } rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C); return 0; } static int rtw_pci_tx_write(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct rtw_pci_tx_ring *ring; u8 queue = rtw_hw_queue_mapping(skb); int ret; ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue); if (ret) return ret; ring = &rtwpci->tx_rings[queue]; spin_lock_bh(&rtwpci->irq_lock); if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) { ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb)); ring->queue_stopped = true; } spin_unlock_bh(&rtwpci->irq_lock); return 0; } static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, u8 hw_queue) { struct ieee80211_hw *hw = rtwdev->hw; struct ieee80211_tx_info *info; struct rtw_pci_tx_ring *ring; struct rtw_pci_tx_data *tx_data; struct sk_buff *skb; u32 count; u32 bd_idx_addr; u32 bd_idx, cur_rp, rp_idx; u16 q_map; ring = &rtwpci->tx_rings[hw_queue]; bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue]; bd_idx = rtw_read32(rtwdev, bd_idx_addr); cur_rp = bd_idx >> 16; cur_rp &= TRX_BD_IDX_MASK; rp_idx = ring->r.rp; if (cur_rp >= ring->r.rp) count = cur_rp - ring->r.rp; else count = ring->r.len - (ring->r.rp - cur_rp); while (count--) { skb = skb_dequeue(&ring->queue); if (!skb) { rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n", count, hw_queue, bd_idx, ring->r.rp, cur_rp); break; } tx_data = rtw_pci_get_tx_data(skb); dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, DMA_TO_DEVICE); /* just free command packets from host to card */ if (hw_queue == RTW_TX_QUEUE_H2C) { dev_kfree_skb_irq(skb); continue; } if (ring->queue_stopped && avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) { q_map = skb_get_queue_mapping(skb); ieee80211_wake_queue(hw, q_map); ring->queue_stopped = false; } if (++rp_idx >= ring->r.len) rp_idx = 0; skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); info = IEEE80211_SKB_CB(skb); /* enqueue to wait for tx report */ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn); continue; } /* always ACK for others, then they won't be marked as drop */ if (info->flags & IEEE80211_TX_CTL_NO_ACK) info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; else info->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_info_clear_status(info); ieee80211_tx_status_irqsafe(hw, skb); } ring->r.rp = cur_rp; } static void rtw_pci_rx_isr(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct napi_struct *napi = &rtwpci->napi; napi_schedule(napi); } static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) { struct rtw_pci_rx_ring *ring; int count = 0; u32 tmp, cur_wp; ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ); cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK); if (cur_wp >= ring->r.wp) count = cur_wp - ring->r.wp; else count = ring->r.len - (ring->r.wp - cur_wp); return count; } static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, u8 hw_queue, u32 limit) { struct rtw_chip_info *chip = rtwdev->chip; struct napi_struct *napi = &rtwpci->napi; struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; struct rtw_rx_pkt_stat pkt_stat; struct ieee80211_rx_status rx_status; struct sk_buff *skb, *new; u32 cur_rp = ring->r.rp; u32 count, rx_done = 0; u32 pkt_offset; u32 pkt_desc_sz = chip->rx_pkt_desc_sz; u32 buf_desc_sz = chip->rx_buf_desc_sz; u32 new_len; u8 *rx_desc; dma_addr_t dma; count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci); count = min(count, limit); while (count--) { rtw_pci_dma_check(rtwdev, ring, cur_rp); skb = ring->buf[cur_rp]; dma = *((dma_addr_t *)skb->cb); dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE, DMA_FROM_DEVICE); rx_desc = skb->data; chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status); /* offset from rx_desc to payload */ pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + pkt_stat.shift; /* allocate a new skb for this frame, * discard the frame if none available */ new_len = pkt_stat.pkt_len + pkt_offset; new = dev_alloc_skb(new_len); if (WARN_ONCE(!new, "rx routine starvation\n")) goto next_rp; /* put the DMA data including rx_desc from phy to new skb */ skb_put_data(new, skb->data, new_len); if (pkt_stat.is_c2h) { rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new); } else { /* remove rx_desc */ skb_pull(new, pkt_offset); rtw_rx_stats(rtwdev, pkt_stat.vif, new); memcpy(new->cb, &rx_status, sizeof(rx_status)); ieee80211_rx_napi(rtwdev->hw, NULL, new, napi); rx_done++; } next_rp: /* new skb delivered to mac80211, re-enable original skb DMA */ rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp, buf_desc_sz); /* host read next element in ring */ if (++cur_rp >= ring->r.len) cur_rp = 0; } ring->r.rp = cur_rp; /* 'rp', the last position we have read, is seen as previous posistion * of 'wp' that is used to calculate 'count' next time. */ ring->r.wp = cur_rp; rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp); return rx_done; } static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, u32 *irq_status) { unsigned long flags; spin_lock_irqsave(&rtwpci->hwirq_lock, flags); irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0); irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1); if (rtw_chip_wcpu_11ac(rtwdev)) irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3); else irq_status[3] = 0; irq_status[0] &= rtwpci->irq_mask[0]; irq_status[1] &= rtwpci->irq_mask[1]; irq_status[3] &= rtwpci->irq_mask[3]; rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]); rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]); if (rtw_chip_wcpu_11ac(rtwdev)) rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]); spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); } static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev) { struct rtw_dev *rtwdev = dev; struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; /* disable RTW PCI interrupt to avoid more interrupts before the end of * thread function * * disable HIMR here to also avoid new HISR flag being raised before * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs * are cleared, the edge-triggered interrupt will not be generated when * a new HISR flag is set. */ rtw_pci_disable_interrupt(rtwdev, rtwpci); return IRQ_WAKE_THREAD; } static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev) { struct rtw_dev *rtwdev = dev; struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; u32 irq_status[4]; bool rx = false; spin_lock_bh(&rtwpci->irq_lock); rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status); if (irq_status[0] & IMR_MGNTDOK) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT); if (irq_status[0] & IMR_HIGHDOK) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0); if (irq_status[0] & IMR_BEDOK) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE); if (irq_status[0] & IMR_BKDOK) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK); if (irq_status[0] & IMR_VODOK) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO); if (irq_status[0] & IMR_VIDOK) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI); if (irq_status[3] & IMR_H2CDOK) rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C); if (irq_status[0] & IMR_ROK) { rtw_pci_rx_isr(rtwdev); rx = true; } if (unlikely(irq_status[0] & IMR_C2HCMD)) rtw_fw_c2h_cmd_isr(rtwdev); /* all of the jobs for this interrupt have been done */ if (rtwpci->running) rtw_pci_enable_interrupt(rtwdev, rtwpci, rx); spin_unlock_bh(&rtwpci->irq_lock); return IRQ_HANDLED; } static int rtw_pci_io_mapping(struct rtw_dev *rtwdev, struct pci_dev *pdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; unsigned long len; u8 bar_id = 2; int ret; ret = pci_request_regions(pdev, KBUILD_MODNAME); if (ret) { rtw_err(rtwdev, "failed to request pci regions\n"); return ret; } #if defined(__FreeBSD__) ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { rtw_err(rtwdev, "failed to set dma mask to 32-bit\n"); goto err_release_regions; } ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { rtw_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); goto err_release_regions; } #endif len = pci_resource_len(pdev, bar_id); #if defined(__FreeBSD__) linuxkpi_pcim_want_to_use_bus_functions(pdev); #endif rtwpci->mmap = pci_iomap(pdev, bar_id, len); if (!rtwpci->mmap) { pci_release_regions(pdev); rtw_err(rtwdev, "failed to map pci memory\n"); return -ENOMEM; } return 0; #if defined(__FreeBSD__) err_release_regions: pci_release_regions(pdev); return ret; #endif } static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev, struct pci_dev *pdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; if (rtwpci->mmap) { pci_iounmap(pdev, rtwpci->mmap); pci_release_regions(pdev); } } static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data) { u16 write_addr; u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK); u8 flag; u8 cnt; write_addr = addr & BITS_DBI_ADDR_MASK; write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN); rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data); rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr); rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16); for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); if (flag == 0) return; udelay(10); } WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr); } static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value) { u16 read_addr = addr & BITS_DBI_ADDR_MASK; u8 flag; u8 cnt; rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr); rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16); for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); if (flag == 0) { read_addr = REG_DBI_RDATA_V1 + (addr & 3); *value = rtw_read8(rtwdev, read_addr); return 0; } udelay(10); } WARN(1, "failed to read DBI register, addr=0x%04x\n", addr); return -EIO; } static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1) { u8 page; u8 wflag; u8 cnt; rtw_write16(rtwdev, REG_MDIO_V1, data); page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1; page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2; rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK); rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page); rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1); for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1); if (wflag == 0) return; udelay(10); } WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr); } static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable) { u8 value; int ret; if (rtw_pci_disable_aspm) return; ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); if (ret) { rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret); return; } if (enable) value |= BIT_CLKREQ_SW_EN; else value &= ~BIT_CLKREQ_SW_EN; rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); } static void rtw_pci_clkreq_pad_low(struct rtw_dev *rtwdev, bool enable) { u8 value; int ret; ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); if (ret) { rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret); return; } if (enable) value &= ~BIT_CLKREQ_N_PAD; else value |= BIT_CLKREQ_N_PAD; rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); } static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable) { u8 value; int ret; if (rtw_pci_disable_aspm) return; ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); if (ret) { rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret); return; } if (enable) value |= BIT_L1_SW_EN; else value &= ~BIT_L1_SW_EN; rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value); } static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; /* Like CLKREQ, ASPM is also implemented by two HW modules, and can * only be enabled when host supports it. * * And ASPM mechanism should be enabled when driver/firmware enters * power save mode, without having heavy traffic. Because we've * experienced some inter-operability issues that the link tends * to enter L1 state on the fly even when driver is having high * throughput. This is probably because the ASPM behavior slightly * varies from different SOC. */ if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)) return; if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) || (!enter && atomic_inc_return(&rtwpci->link_usage) == 1)) rtw_pci_aspm_set(rtwdev, enter); } static void rtw_pci_link_cfg(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; struct pci_dev *pdev = rtwpci->pdev; u16 link_ctrl; int ret; /* RTL8822CE has enabled REFCLK auto calibration, it does not need * to add clock delay to cover the REFCLK timing gap. */ if (chip->id == RTW_CHIP_TYPE_8822C) rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0); /* Though there is standard PCIE configuration space to set the * link control register, but by Realtek's design, driver should * check if host supports CLKREQ/ASPM to enable the HW module. * * These functions are implemented by two HW modules associated, * one is responsible to access PCIE configuration space to * follow the host settings, and another is in charge of doing * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes * the host does not support it, and due to some reasons or wrong * settings (ex. CLKREQ# not Bi-Direction), it could lead to device * loss if HW misbehaves on the link. * * Hence it's designed that driver should first check the PCIE * configuration space is sync'ed and enabled, then driver can turn * on the other module that is actually working on the mechanism. */ ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); if (ret) { rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); return; } if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) rtw_pci_clkreq_set(rtwdev, true); rtwpci->link_ctrl = link_ctrl; } static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; switch (chip->id) { case RTW_CHIP_TYPE_8822C: if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D) rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG, BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1); break; default: break; } } static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; const struct rtw_intf_phy_para *para; u16 cut; u16 value; u16 offset; int i; cut = BIT(0) << rtwdev->hal.cut_version; for (i = 0; i < chip->intf_table->n_gen1_para; i++) { para = &chip->intf_table->gen1_para[i]; if (!(para->cut_mask & cut)) continue; if (para->offset == 0xffff) break; offset = para->offset; value = para->value; if (para->ip_sel == RTW_IP_SEL_PHY) rtw_mdio_write(rtwdev, offset, value, true); else rtw_dbi_write8(rtwdev, offset, value); } for (i = 0; i < chip->intf_table->n_gen2_para; i++) { para = &chip->intf_table->gen2_para[i]; if (!(para->cut_mask & cut)) continue; if (para->offset == 0xffff) break; offset = para->offset; value = para->value; if (para->ip_sel == RTW_IP_SEL_PHY) rtw_mdio_write(rtwdev, offset, value, false); else rtw_dbi_write8(rtwdev, offset, value); } rtw_pci_link_cfg(rtwdev); } static int __maybe_unused rtw_pci_suspend(struct device *dev) { struct ieee80211_hw *hw = dev_get_drvdata(dev); struct rtw_dev *rtwdev = hw->priv; struct rtw_chip_info *chip = rtwdev->chip; struct rtw_efuse *efuse = &rtwdev->efuse; if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) rtw_pci_clkreq_pad_low(rtwdev, true); return 0; } static int __maybe_unused rtw_pci_resume(struct device *dev) { struct ieee80211_hw *hw = dev_get_drvdata(dev); struct rtw_dev *rtwdev = hw->priv; struct rtw_chip_info *chip = rtwdev->chip; struct rtw_efuse *efuse = &rtwdev->efuse; if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) rtw_pci_clkreq_pad_low(rtwdev, false); return 0; } SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume); EXPORT_SYMBOL(rtw_pm_ops); static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev) { int ret; ret = pci_enable_device(pdev); if (ret) { rtw_err(rtwdev, "failed to enable pci device\n"); return ret; } pci_set_master(pdev); pci_set_drvdata(pdev, rtwdev->hw); SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); return 0; } static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev) { pci_clear_master(pdev); pci_disable_device(pdev); } static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev) { struct rtw_pci *rtwpci; int ret; rtwpci = (struct rtw_pci *)rtwdev->priv; rtwpci->pdev = pdev; /* after this driver can access to hw registers */ ret = rtw_pci_io_mapping(rtwdev, pdev); if (ret) { rtw_err(rtwdev, "failed to request pci io region\n"); goto err_out; } ret = rtw_pci_init(rtwdev); if (ret) { rtw_err(rtwdev, "failed to allocate pci resources\n"); goto err_io_unmap; } return 0; err_io_unmap: rtw_pci_io_unmapping(rtwdev, pdev); err_out: return ret; } static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev) { rtw_pci_deinit(rtwdev); rtw_pci_io_unmapping(rtwdev, pdev); } static struct rtw_hci_ops rtw_pci_ops = { .tx_write = rtw_pci_tx_write, .tx_kick_off = rtw_pci_tx_kick_off, .flush_queues = rtw_pci_flush_queues, .setup = rtw_pci_setup, .start = rtw_pci_start, .stop = rtw_pci_stop, .deep_ps = rtw_pci_deep_ps, .link_ps = rtw_pci_link_ps, .interface_cfg = rtw_pci_interface_cfg, .read8 = rtw_pci_read8, .read16 = rtw_pci_read16, .read32 = rtw_pci_read32, .write8 = rtw_pci_write8, .write16 = rtw_pci_write16, .write32 = rtw_pci_write32, .write_data_rsvd_page = rtw_pci_write_data_rsvd_page, .write_data_h2c = rtw_pci_write_data_h2c, }; static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) { unsigned int flags = PCI_IRQ_LEGACY; int ret; if (!rtw_disable_msi) flags |= PCI_IRQ_MSI; ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); if (ret < 0) { rtw_err(rtwdev, "failed to alloc PCI irq vectors\n"); return ret; } ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, rtw_pci_interrupt_handler, rtw_pci_interrupt_threadfn, IRQF_SHARED, KBUILD_MODNAME, rtwdev); if (ret) { rtw_err(rtwdev, "failed to request irq %d\n", ret); pci_free_irq_vectors(pdev); } return ret; } static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev) { devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); pci_free_irq_vectors(pdev); } static int rtw_pci_napi_poll(struct napi_struct *napi, int budget) { struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi); struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev, priv); int work_done = 0; if (rtwpci->rx_no_aspm) rtw_pci_link_ps(rtwdev, false); while (work_done < budget) { u32 work_done_once; work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU, budget - work_done); if (work_done_once == 0) break; work_done += work_done_once; } if (work_done < budget) { napi_complete_done(napi, work_done); spin_lock_bh(&rtwpci->irq_lock); if (rtwpci->running) rtw_pci_enable_interrupt(rtwdev, rtwpci, false); spin_unlock_bh(&rtwpci->irq_lock); /* When ISR happens during polling and before napi_complete * while no further data is received. Data on the dma_ring will * not be processed immediately. Check whether dma ring is * empty and perform napi_schedule accordingly. */ if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci)) napi_schedule(napi); } if (rtwpci->rx_no_aspm) rtw_pci_link_ps(rtwdev, true); return work_done; } static void rtw_pci_napi_init(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; init_dummy_netdev(&rtwpci->netdev); netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll, RTW_NAPI_WEIGHT_NUM); } static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; rtw_pci_napi_stop(rtwdev); netif_napi_del(&rtwpci->napi); } int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct pci_dev *bridge = pci_upstream_bridge(pdev); struct ieee80211_hw *hw; struct rtw_dev *rtwdev; struct rtw_pci *rtwpci; int drv_data_size; int ret; drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci); hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops); if (!hw) { dev_err(&pdev->dev, "failed to allocate hw\n"); return -ENOMEM; } rtwdev = hw->priv; rtwdev->hw = hw; rtwdev->dev = &pdev->dev; rtwdev->chip = (struct rtw_chip_info *)id->driver_data; rtwdev->hci.ops = &rtw_pci_ops; rtwdev->hci.type = RTW_HCI_TYPE_PCIE; rtwpci = (struct rtw_pci *)rtwdev->priv; atomic_set(&rtwpci->link_usage, 1); ret = rtw_core_init(rtwdev); if (ret) goto err_release_hw; rtw_dbg(rtwdev, RTW_DBG_PCI, "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n", pdev->vendor, pdev->device, pdev->revision); ret = rtw_pci_claim(rtwdev, pdev); if (ret) { rtw_err(rtwdev, "failed to claim pci device\n"); goto err_deinit_core; } ret = rtw_pci_setup_resource(rtwdev, pdev); if (ret) { rtw_err(rtwdev, "failed to setup pci resources\n"); goto err_pci_declaim; } rtw_pci_napi_init(rtwdev); ret = rtw_chip_info_setup(rtwdev); if (ret) { rtw_err(rtwdev, "failed to setup chip information\n"); goto err_destroy_pci; } /* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */ if (pdev->device == 0xc821 && bridge->vendor == PCI_VENDOR_ID_INTEL) rtwpci->rx_no_aspm = true; rtw_pci_phy_cfg(rtwdev); ret = rtw_register_hw(rtwdev, hw); if (ret) { rtw_err(rtwdev, "failed to register hw\n"); goto err_destroy_pci; } ret = rtw_pci_request_irq(rtwdev, pdev); if (ret) { ieee80211_unregister_hw(hw); goto err_destroy_pci; } return 0; err_destroy_pci: rtw_pci_napi_deinit(rtwdev); rtw_pci_destroy(rtwdev, pdev); err_pci_declaim: rtw_pci_declaim(rtwdev, pdev); err_deinit_core: rtw_core_deinit(rtwdev); err_release_hw: ieee80211_free_hw(hw); return ret; } EXPORT_SYMBOL(rtw_pci_probe); void rtw_pci_remove(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct rtw_dev *rtwdev; struct rtw_pci *rtwpci; if (!hw) return; rtwdev = hw->priv; rtwpci = (struct rtw_pci *)rtwdev->priv; rtw_unregister_hw(rtwdev, hw); rtw_pci_disable_interrupt(rtwdev, rtwpci); rtw_pci_napi_deinit(rtwdev); rtw_pci_destroy(rtwdev, pdev); rtw_pci_declaim(rtwdev, pdev); rtw_pci_free_irq(rtwdev, pdev); rtw_core_deinit(rtwdev); ieee80211_free_hw(hw); } EXPORT_SYMBOL(rtw_pci_remove); void rtw_pci_shutdown(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct rtw_dev *rtwdev; struct rtw_chip_info *chip; if (!hw) return; rtwdev = hw->priv; chip = rtwdev->chip; if (chip->ops->shutdown) chip->ops->shutdown(rtwdev); pci_set_power_state(pdev, PCI_D3hot); } EXPORT_SYMBOL(rtw_pci_shutdown); MODULE_AUTHOR("Realtek Corporation"); MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver"); MODULE_LICENSE("Dual BSD/GPL"); #if defined(__FreeBSD__) MODULE_VERSION(rtw_pci, 1); MODULE_DEPEND(rtw_pci, linuxkpi, 1, 1, 1); MODULE_DEPEND(rtw_pci, linuxkpi_wlan, 1, 1, 1); #ifdef CONFIG_RTW88_DEBUGFS MODULE_DEPEND(rtw_pci, debugfs, 1, 1, 1); #endif #endif diff --git a/sys/contrib/dev/rtw88/tx.c b/sys/contrib/dev/rtw88/tx.c index f405682c9cbf..a85f09a8d7b2 100644 --- a/sys/contrib/dev/rtw88/tx.c +++ b/sys/contrib/dev/rtw88/tx.c @@ -1,695 +1,700 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright(c) 2018-2019 Realtek Corporation */ #include "main.h" #include "tx.h" #include "fw.h" #include "ps.h" #include "debug.h" static void rtw_tx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, struct sk_buff *skb) { struct ieee80211_hdr *hdr; struct rtw_vif *rtwvif; hdr = (struct ieee80211_hdr *)skb->data; if (!ieee80211_is_data(hdr->frame_control)) return; if (!is_broadcast_ether_addr(hdr->addr1) && !is_multicast_ether_addr(hdr->addr1)) { rtwdev->stats.tx_unicast += skb->len; rtwdev->stats.tx_cnt++; if (vif) { rtwvif = (struct rtw_vif *)vif->drv_priv; rtwvif->stats.tx_unicast += skb->len; rtwvif->stats.tx_cnt++; } } } void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb) { __le32 *txdesc = (__le32 *)skb->data; SET_TX_DESC_TXPKTSIZE(txdesc, pkt_info->tx_pkt_size); SET_TX_DESC_OFFSET(txdesc, pkt_info->offset); SET_TX_DESC_PKT_OFFSET(txdesc, pkt_info->pkt_offset); SET_TX_DESC_QSEL(txdesc, pkt_info->qsel); SET_TX_DESC_BMC(txdesc, pkt_info->bmc); SET_TX_DESC_RATE_ID(txdesc, pkt_info->rate_id); SET_TX_DESC_DATARATE(txdesc, pkt_info->rate); SET_TX_DESC_DISDATAFB(txdesc, pkt_info->dis_rate_fallback); SET_TX_DESC_USE_RATE(txdesc, pkt_info->use_rate); SET_TX_DESC_SEC_TYPE(txdesc, pkt_info->sec_type); SET_TX_DESC_DATA_BW(txdesc, pkt_info->bw); SET_TX_DESC_SW_SEQ(txdesc, pkt_info->seq); SET_TX_DESC_MAX_AGG_NUM(txdesc, pkt_info->ampdu_factor); SET_TX_DESC_AMPDU_DENSITY(txdesc, pkt_info->ampdu_density); SET_TX_DESC_DATA_STBC(txdesc, pkt_info->stbc); SET_TX_DESC_DATA_LDPC(txdesc, pkt_info->ldpc); SET_TX_DESC_AGG_EN(txdesc, pkt_info->ampdu_en); SET_TX_DESC_LS(txdesc, pkt_info->ls); SET_TX_DESC_DATA_SHORT(txdesc, pkt_info->short_gi); SET_TX_DESC_SPE_RPT(txdesc, pkt_info->report); SET_TX_DESC_SW_DEFINE(txdesc, pkt_info->sn); SET_TX_DESC_USE_RTS(txdesc, pkt_info->rts); if (pkt_info->rts) { SET_TX_DESC_RTSRATE(txdesc, DESC_RATE24M); SET_TX_DESC_DATA_RTS_SHORT(txdesc, 1); } SET_TX_DESC_DISQSELSEQ(txdesc, pkt_info->dis_qselseq); SET_TX_DESC_EN_HWSEQ(txdesc, pkt_info->en_hwseq); SET_TX_DESC_HW_SSN_SEL(txdesc, pkt_info->hw_ssn_sel); SET_TX_DESC_NAVUSEHDR(txdesc, pkt_info->nav_use_hdr); SET_TX_DESC_BT_NULL(txdesc, pkt_info->bt_null); } EXPORT_SYMBOL(rtw_tx_fill_tx_desc); static u8 get_tx_ampdu_factor(struct ieee80211_sta *sta) { u8 exp = sta->deflink.ht_cap.ampdu_factor; /* the least ampdu factor is 8K, and the value in the tx desc is the * max aggregation num, which represents val * 2 packets can be * aggregated in an AMPDU, so here we should use 8/2=4 as the base */ return (BIT(2) << exp) - 1; } static u8 get_tx_ampdu_density(struct ieee80211_sta *sta) { return sta->deflink.ht_cap.ampdu_density; } static u8 get_highest_ht_tx_rate(struct rtw_dev *rtwdev, struct ieee80211_sta *sta) { u8 rate; if (rtwdev->hal.rf_type == RF_2T2R && sta->deflink.ht_cap.mcs.rx_mask[1] != 0) rate = DESC_RATEMCS15; else rate = DESC_RATEMCS7; return rate; } static u8 get_highest_vht_tx_rate(struct rtw_dev *rtwdev, struct ieee80211_sta *sta) { struct rtw_efuse *efuse = &rtwdev->efuse; u8 rate; u16 tx_mcs_map; tx_mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.tx_mcs_map); if (efuse->hw_cap.nss == 1) { switch (tx_mcs_map & 0x3) { case IEEE80211_VHT_MCS_SUPPORT_0_7: rate = DESC_RATEVHT1SS_MCS7; break; case IEEE80211_VHT_MCS_SUPPORT_0_8: rate = DESC_RATEVHT1SS_MCS8; break; default: case IEEE80211_VHT_MCS_SUPPORT_0_9: rate = DESC_RATEVHT1SS_MCS9; break; } } else if (efuse->hw_cap.nss >= 2) { switch ((tx_mcs_map & 0xc) >> 2) { case IEEE80211_VHT_MCS_SUPPORT_0_7: rate = DESC_RATEVHT2SS_MCS7; break; case IEEE80211_VHT_MCS_SUPPORT_0_8: rate = DESC_RATEVHT2SS_MCS8; break; default: case IEEE80211_VHT_MCS_SUPPORT_0_9: rate = DESC_RATEVHT2SS_MCS9; break; } } else { rate = DESC_RATEVHT1SS_MCS9; } return rate; } static void rtw_tx_report_enable(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info) { struct rtw_tx_report *tx_report = &rtwdev->tx_report; /* [11:8], reserved, fills with zero * [7:2], tx report sequence number * [1:0], firmware use, fills with zero */ pkt_info->sn = (atomic_inc_return(&tx_report->sn) << 2) & 0xfc; pkt_info->report = true; } void rtw_tx_report_purge_timer(struct timer_list *t) { struct rtw_dev *rtwdev = from_timer(rtwdev, t, tx_report.purge_timer); struct rtw_tx_report *tx_report = &rtwdev->tx_report; unsigned long flags; #if defined(__linux__) if (skb_queue_len(&tx_report->queue) == 0) return; rtw_warn(rtwdev, "failed to get tx report from firmware\n"); spin_lock_irqsave(&tx_report->q_lock, flags); skb_queue_purge(&tx_report->queue); spin_unlock_irqrestore(&tx_report->q_lock, flags); #elif defined(__FreeBSD__) uint32_t qlen; spin_lock_irqsave(&tx_report->q_lock, flags); qlen = skb_queue_len(&tx_report->queue); if (qlen > 0) skb_queue_purge(&tx_report->queue); spin_unlock_irqrestore(&tx_report->q_lock, flags); /* * XXX while there could be a new enqueue in the queue * simply not yet processed given the timer is updated without * locks after enqueue in rtw_tx_report_enqueue(), the numbers * seen can be in the 100s. We revert to rtw_dbg from * Linux git 584dce175f0461d5d9d63952a1e7955678c91086 . */ rtw_dbg(rtwdev, RTW_DBG_TX, "failed to get tx report from firmware: " "txreport qlen %u\n", qlen); #endif } void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn) { struct rtw_tx_report *tx_report = &rtwdev->tx_report; unsigned long flags; u8 *drv_data; /* pass sn to tx report handler through driver data */ drv_data = (u8 *)IEEE80211_SKB_CB(skb)->status.status_driver_data; *drv_data = sn; spin_lock_irqsave(&tx_report->q_lock, flags); __skb_queue_tail(&tx_report->queue, skb); spin_unlock_irqrestore(&tx_report->q_lock, flags); mod_timer(&tx_report->purge_timer, jiffies + RTW_TX_PROBE_TIMEOUT); } EXPORT_SYMBOL(rtw_tx_report_enqueue); static void rtw_tx_report_tx_status(struct rtw_dev *rtwdev, struct sk_buff *skb, bool acked) { struct ieee80211_tx_info *info; info = IEEE80211_SKB_CB(skb); ieee80211_tx_info_clear_status(info); if (acked) info->flags |= IEEE80211_TX_STAT_ACK; else info->flags &= ~IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(rtwdev->hw, skb); } void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src) { struct rtw_tx_report *tx_report = &rtwdev->tx_report; struct rtw_c2h_cmd *c2h; struct sk_buff *cur, *tmp; unsigned long flags; u8 sn, st; u8 *n; c2h = get_c2h_from_skb(skb); if (src == C2H_CCX_TX_RPT) { sn = GET_CCX_REPORT_SEQNUM_V0(c2h->payload); st = GET_CCX_REPORT_STATUS_V0(c2h->payload); } else { sn = GET_CCX_REPORT_SEQNUM_V1(c2h->payload); st = GET_CCX_REPORT_STATUS_V1(c2h->payload); } spin_lock_irqsave(&tx_report->q_lock, flags); skb_queue_walk_safe(&tx_report->queue, cur, tmp) { n = (u8 *)IEEE80211_SKB_CB(cur)->status.status_driver_data; if (*n == sn) { __skb_unlink(cur, &tx_report->queue); rtw_tx_report_tx_status(rtwdev, cur, st == 0); break; } } spin_unlock_irqrestore(&tx_report->q_lock, flags); } static u8 rtw_get_mgmt_rate(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 lowest_rate, bool ignore_rate) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = tx_info->control.vif; bool force_lowest = test_bit(RTW_FLAG_FORCE_LOWEST_RATE, rtwdev->flags); if (!vif || !vif->bss_conf.basic_rates || ignore_rate || force_lowest) return lowest_rate; return __ffs(vif->bss_conf.basic_rates) + lowest_rate; } static void rtw_tx_pkt_info_update_rate(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb, bool ignore_rate) { if (rtwdev->hal.current_band_type == RTW_BAND_2G) { pkt_info->rate_id = RTW_RATEID_B_20M; pkt_info->rate = rtw_get_mgmt_rate(rtwdev, skb, DESC_RATE1M, ignore_rate); } else { pkt_info->rate_id = RTW_RATEID_G; pkt_info->rate = rtw_get_mgmt_rate(rtwdev, skb, DESC_RATE6M, ignore_rate); } pkt_info->use_rate = true; pkt_info->dis_rate_fallback = true; } static void rtw_tx_pkt_info_update_sec(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); u8 sec_type = 0; if (info && info->control.hw_key) { struct ieee80211_key_conf *key = info->control.hw_key; switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: sec_type = 0x01; break; case WLAN_CIPHER_SUITE_CCMP: sec_type = 0x03; break; default: break; } } pkt_info->sec_type = sec_type; } static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct ieee80211_sta *sta, struct sk_buff *skb) { rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb, false); pkt_info->dis_qselseq = true; pkt_info->en_hwseq = true; pkt_info->hw_ssn_sel = 0; /* TODO: need to change hw port and hw ssn sel for multiple vifs */ } static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct ieee80211_sta *sta, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hw *hw = rtwdev->hw; struct rtw_dm_info *dm_info = &rtwdev->dm_info; struct rtw_sta_info *si; u8 fix_rate; u16 seq; u8 ampdu_factor = 0; u8 ampdu_density = 0; bool ampdu_en = false; u8 rate = DESC_RATE6M; u8 rate_id = 6; u8 bw = RTW_CHANNEL_WIDTH_20; bool stbc = false; bool ldpc = false; seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; /* for broadcast/multicast, use default values */ if (!sta) goto out; if (info->flags & IEEE80211_TX_CTL_AMPDU) { ampdu_en = true; ampdu_factor = get_tx_ampdu_factor(sta); ampdu_density = get_tx_ampdu_density(sta); } if (info->control.use_rts || skb->len > hw->wiphy->rts_threshold) pkt_info->rts = true; if (sta->deflink.vht_cap.vht_supported) rate = get_highest_vht_tx_rate(rtwdev, sta); else if (sta->deflink.ht_cap.ht_supported) rate = get_highest_ht_tx_rate(rtwdev, sta); else if (sta->deflink.supp_rates[0] <= 0xf) rate = DESC_RATE11M; else rate = DESC_RATE54M; si = (struct rtw_sta_info *)sta->drv_priv; bw = si->bw_mode; rate_id = si->rate_id; stbc = si->stbc_en; ldpc = si->ldpc_en; out: pkt_info->seq = seq; pkt_info->ampdu_factor = ampdu_factor; pkt_info->ampdu_density = ampdu_density; pkt_info->ampdu_en = ampdu_en; pkt_info->rate = rate; pkt_info->rate_id = rate_id; pkt_info->bw = bw; pkt_info->stbc = stbc; pkt_info->ldpc = ldpc; fix_rate = dm_info->fix_rate; if (fix_rate < DESC_RATE_MAX) { pkt_info->rate = fix_rate; pkt_info->dis_rate_fallback = true; pkt_info->use_rate = true; } } void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct ieee80211_sta *sta, struct sk_buff *skb) { struct rtw_chip_info *chip = rtwdev->chip; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct rtw_sta_info *si; struct ieee80211_vif *vif = NULL; __le16 fc = hdr->frame_control; bool bmc; if (sta) { si = (struct rtw_sta_info *)sta->drv_priv; vif = si->vif; } if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc)) rtw_tx_mgmt_pkt_info_update(rtwdev, pkt_info, sta, skb); else if (ieee80211_is_data(fc)) rtw_tx_data_pkt_info_update(rtwdev, pkt_info, sta, skb); bmc = is_broadcast_ether_addr(hdr->addr1) || is_multicast_ether_addr(hdr->addr1); if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) rtw_tx_report_enable(rtwdev, pkt_info); pkt_info->bmc = bmc; rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb); pkt_info->tx_pkt_size = skb->len; pkt_info->offset = chip->tx_pkt_desc_sz; pkt_info->qsel = skb->priority; pkt_info->ls = true; /* maybe merge with tx status ? */ rtw_tx_stats(rtwdev, vif, skb); } void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb, enum rtw_rsvd_packet_type type) { struct rtw_chip_info *chip = rtwdev->chip; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; bool bmc; /* A beacon or dummy reserved page packet indicates that it is the first * reserved page, and the qsel of it will be set in each hci. */ if (type != RSVD_BEACON && type != RSVD_DUMMY) pkt_info->qsel = TX_DESC_QSEL_MGMT; rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb, true); bmc = is_broadcast_ether_addr(hdr->addr1) || is_multicast_ether_addr(hdr->addr1); pkt_info->bmc = bmc; pkt_info->tx_pkt_size = skb->len; pkt_info->offset = chip->tx_pkt_desc_sz; pkt_info->ls = true; if (type == RSVD_PS_POLL) { pkt_info->nav_use_hdr = true; } else { pkt_info->dis_qselseq = true; pkt_info->en_hwseq = true; pkt_info->hw_ssn_sel = 0; } if (type == RSVD_QOS_NULL) pkt_info->bt_null = true; rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb); /* TODO: need to change hw port and hw ssn sel for multiple vifs */ } struct sk_buff * rtw_tx_write_data_rsvd_page_get(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, u8 *buf, u32 size) { struct rtw_chip_info *chip = rtwdev->chip; struct sk_buff *skb; u32 tx_pkt_desc_sz; u32 length; tx_pkt_desc_sz = chip->tx_pkt_desc_sz; length = size + tx_pkt_desc_sz; skb = dev_alloc_skb(length); if (!skb) { rtw_err(rtwdev, "failed to alloc write data rsvd page skb\n"); return NULL; } skb_reserve(skb, tx_pkt_desc_sz); skb_put_data(skb, buf, size); rtw_tx_rsvd_page_pkt_info_update(rtwdev, pkt_info, skb, RSVD_BEACON); return skb; } EXPORT_SYMBOL(rtw_tx_write_data_rsvd_page_get); struct sk_buff * rtw_tx_write_data_h2c_get(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, u8 *buf, u32 size) { struct rtw_chip_info *chip = rtwdev->chip; struct sk_buff *skb; u32 tx_pkt_desc_sz; u32 length; tx_pkt_desc_sz = chip->tx_pkt_desc_sz; length = size + tx_pkt_desc_sz; skb = dev_alloc_skb(length); if (!skb) { rtw_err(rtwdev, "failed to alloc write data h2c skb\n"); return NULL; } skb_reserve(skb, tx_pkt_desc_sz); skb_put_data(skb, buf, size); pkt_info->tx_pkt_size = size; return skb; } EXPORT_SYMBOL(rtw_tx_write_data_h2c_get); void rtw_tx(struct rtw_dev *rtwdev, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct rtw_tx_pkt_info pkt_info = {0}; int ret; rtw_tx_pkt_info_update(rtwdev, &pkt_info, control->sta, skb); ret = rtw_hci_tx_write(rtwdev, &pkt_info, skb); if (ret) { #if defined(__linux__) rtw_err(rtwdev, "failed to write TX skb to HCI\n"); #elif defined(__FreeBSD__) rtw_err(rtwdev, "%s: failed to write TX skb to HCI: %d\n", __func__, ret); #endif goto out; } rtw_hci_tx_kick_off(rtwdev); return; out: ieee80211_free_txskb(rtwdev->hw, skb); } static void rtw_txq_check_agg(struct rtw_dev *rtwdev, struct rtw_txq *rtwtxq, struct sk_buff *skb) { struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq); struct ieee80211_tx_info *info; struct rtw_sta_info *si; if (test_bit(RTW_TXQ_AMPDU, &rtwtxq->flags)) { info = IEEE80211_SKB_CB(skb); info->flags |= IEEE80211_TX_CTL_AMPDU; return; } if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO) return; if (test_bit(RTW_TXQ_BLOCK_BA, &rtwtxq->flags)) return; if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) return; if (!txq->sta) return; si = (struct rtw_sta_info *)txq->sta->drv_priv; set_bit(txq->tid, si->tid_ba); ieee80211_queue_work(rtwdev->hw, &rtwdev->ba_work); } static int rtw_txq_push_skb(struct rtw_dev *rtwdev, struct rtw_txq *rtwtxq, struct sk_buff *skb) { struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq); struct rtw_tx_pkt_info pkt_info = {0}; int ret; rtw_txq_check_agg(rtwdev, rtwtxq, skb); rtw_tx_pkt_info_update(rtwdev, &pkt_info, txq->sta, skb); ret = rtw_hci_tx_write(rtwdev, &pkt_info, skb); if (ret) { #if defined(__linux__) rtw_err(rtwdev, "failed to write TX skb to HCI\n"); #elif defined(__FreeBSD__) rtw_err(rtwdev, "%s: failed to write TX skb to HCI: %d\n", __func__, ret); #endif return ret; } rtwtxq->last_push = jiffies; return 0; } static struct sk_buff *rtw_txq_dequeue(struct rtw_dev *rtwdev, struct rtw_txq *rtwtxq) { struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq); struct sk_buff *skb; skb = ieee80211_tx_dequeue(rtwdev->hw, txq); if (!skb) return NULL; return skb; } static void rtw_txq_push(struct rtw_dev *rtwdev, struct rtw_txq *rtwtxq, unsigned long frames) { struct sk_buff *skb; int ret; int i; rcu_read_lock(); for (i = 0; i < frames; i++) { skb = rtw_txq_dequeue(rtwdev, rtwtxq); if (!skb) break; ret = rtw_txq_push_skb(rtwdev, rtwtxq, skb); if (ret) { +#if defined(__FreeBSD__) + dev_kfree_skb_any(skb); + rtw_err(rtwdev, "failed to push skb, ret %d\n", ret); +#else rtw_err(rtwdev, "failed to pusk skb, ret %d\n", ret); +#endif break; } } rcu_read_unlock(); } void rtw_tx_work(struct work_struct *w) { struct rtw_dev *rtwdev = container_of(w, struct rtw_dev, tx_work); struct rtw_txq *rtwtxq, *tmp; spin_lock_bh(&rtwdev->txq_lock); list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->txqs, list) { struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq); unsigned long frame_cnt; unsigned long byte_cnt; ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt); rtw_txq_push(rtwdev, rtwtxq, frame_cnt); list_del_init(&rtwtxq->list); } rtw_hci_tx_kick_off(rtwdev); spin_unlock_bh(&rtwdev->txq_lock); } void rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq) { struct rtw_txq *rtwtxq; if (!txq) return; rtwtxq = (struct rtw_txq *)txq->drv_priv; INIT_LIST_HEAD(&rtwtxq->list); } void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq) { struct rtw_txq *rtwtxq; if (!txq) return; rtwtxq = (struct rtw_txq *)txq->drv_priv; spin_lock_bh(&rtwdev->txq_lock); if (!list_empty(&rtwtxq->list)) list_del_init(&rtwtxq->list); spin_unlock_bh(&rtwdev->txq_lock); }