diff --git a/ahb.c b/ahb.c index 50809cc1dad4..8dfe9b40c126 100644 --- a/ahb.c +++ b/ahb.c @@ -1,1330 +1,1319 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include #include #include #include +#include #include -#include #include #include "ahb.h" #include "debug.h" #include "hif.h" #include "qmi.h" #include #include "pcic.h" #include #include static const struct of_device_id ath11k_ahb_of_match[] = { /* TODO: Should we change the compatible string to something similar * to one that ath10k uses? */ { .compatible = "qcom,ipq8074-wifi", .data = (void *)ATH11K_HW_IPQ8074, }, { .compatible = "qcom,ipq6018-wifi", .data = (void *)ATH11K_HW_IPQ6018_HW10, }, { .compatible = "qcom,wcn6750-wifi", .data = (void *)ATH11K_HW_WCN6750_HW10, }, { .compatible = "qcom,ipq5018-wifi", .data = (void *)ATH11K_HW_IPQ5018_HW10, }, { } }; MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match); #define ATH11K_IRQ_CE0_OFFSET 4 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = { "misc-pulse1", "misc-latch", "sw-exception", "watchdog", "ce0", "ce1", "ce2", "ce3", "ce4", "ce5", "ce6", "ce7", "ce8", "ce9", "ce10", "ce11", "host2wbm-desc-feed", "host2reo-re-injection", "host2reo-command", "host2rxdma-monitor-ring3", "host2rxdma-monitor-ring2", "host2rxdma-monitor-ring1", "reo2ost-exception", "wbm2host-rx-release", "reo2host-status", "reo2host-destination-ring4", "reo2host-destination-ring3", "reo2host-destination-ring2", "reo2host-destination-ring1", "rxdma2host-monitor-destination-mac3", "rxdma2host-monitor-destination-mac2", "rxdma2host-monitor-destination-mac1", "ppdu-end-interrupts-mac3", "ppdu-end-interrupts-mac2", "ppdu-end-interrupts-mac1", "rxdma2host-monitor-status-ring-mac3", "rxdma2host-monitor-status-ring-mac2", "rxdma2host-monitor-status-ring-mac1", "host2rxdma-host-buf-ring-mac3", "host2rxdma-host-buf-ring-mac2", "host2rxdma-host-buf-ring-mac1", "rxdma2host-destination-ring-mac3", "rxdma2host-destination-ring-mac2", "rxdma2host-destination-ring-mac1", "host2tcl-input-ring4", "host2tcl-input-ring3", "host2tcl-input-ring2", "host2tcl-input-ring1", "wbm2host-tx-completions-ring3", "wbm2host-tx-completions-ring2", "wbm2host-tx-completions-ring1", "tcl2host-status-ring", }; /* enum ext_irq_num - irq numbers that can be used by external modules * like datapath */ enum ext_irq_num { host2wbm_desc_feed = 16, host2reo_re_injection, host2reo_command, host2rxdma_monitor_ring3, host2rxdma_monitor_ring2, host2rxdma_monitor_ring1, reo2host_exception, wbm2host_rx_release, reo2host_status, reo2host_destination_ring4, reo2host_destination_ring3, reo2host_destination_ring2, reo2host_destination_ring1, rxdma2host_monitor_destination_mac3, rxdma2host_monitor_destination_mac2, rxdma2host_monitor_destination_mac1, ppdu_end_interrupts_mac3, ppdu_end_interrupts_mac2, ppdu_end_interrupts_mac1, rxdma2host_monitor_status_ring_mac3, rxdma2host_monitor_status_ring_mac2, rxdma2host_monitor_status_ring_mac1, host2rxdma_host_buf_ring_mac3, host2rxdma_host_buf_ring_mac2, host2rxdma_host_buf_ring_mac1, rxdma2host_destination_ring_mac3, rxdma2host_destination_ring_mac2, rxdma2host_destination_ring_mac1, host2tcl_input_ring4, host2tcl_input_ring3, host2tcl_input_ring2, host2tcl_input_ring1, wbm2host_tx_completions_ring3, wbm2host_tx_completions_ring2, wbm2host_tx_completions_ring1, tcl2host_status_ring, }; static int ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector) { return ab->pci.msi.irqs[vector]; } static inline u32 ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset) { u32 window_start = 0; /* If offset lies within DP register range, use 1st window */ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK) window_start = ATH11K_PCI_WINDOW_START; /* If offset lies within CE register range, use 2nd window */ else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) < ATH11K_PCI_WINDOW_RANGE_MASK) window_start = 2 * ATH11K_PCI_WINDOW_START; return window_start; } static void ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value) { u32 window_start; /* WCN6750 uses static window based register access*/ window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset); iowrite32(value, ab->mem + window_start + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); } static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset) { u32 window_start; u32 val; /* WCN6750 uses static window based register access */ window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset); val = ioread32(ab->mem + window_start + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); return val; } static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = { .wakeup = NULL, .release = NULL, .get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750, .window_write32 = ath11k_ahb_window_write32_wcn6750, .window_read32 = ath11k_ahb_window_read32_wcn6750, }; static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset) { return ioread32(ab->mem + offset); } static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value) { iowrite32(value, ab->mem + offset); } static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab) { int i; for (i = 0; i < ab->hw_params.ce_count; i++) { struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; tasklet_kill(&ce_pipe->intr_tq); } } static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) { int i; for (i = 0; i < irq_grp->num_irq; i++) disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab) { int i; for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; ath11k_ahb_ext_grp_disable(irq_grp); if (irq_grp->napi_enabled) { napi_synchronize(&irq_grp->napi); napi_disable(&irq_grp->napi); irq_grp->napi_enabled = false; } } } static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp) { int i; for (i = 0; i < irq_grp->num_irq; i++) enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset) { u32 val; val = ath11k_ahb_read32(ab, offset); ath11k_ahb_write32(ab, offset, val | BIT(bit)); } static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset) { u32 val; val = ath11k_ahb_read32(ab, offset); ath11k_ahb_write32(ab, offset, val & ~BIT(bit)); } static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) { const struct ce_attr *ce_attr; const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr; u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr; ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab); ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab); ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab); ce_attr = &ab->hw_params.host_ce_config[ce_id]; if (ce_attr->src_nentries) ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr); if (ce_attr->dest_nentries) { ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr); ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, ie3_reg_addr); } } static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) { const struct ce_attr *ce_attr; const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr; u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr; ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab); ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab); ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab); ce_attr = &ab->hw_params.host_ce_config[ce_id]; if (ce_attr->src_nentries) ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr); if (ce_attr->dest_nentries) { ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr); ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, ie3_reg_addr); } } static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab) { int i; int irq_idx; for (i = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; irq_idx = ATH11K_IRQ_CE0_OFFSET + i; synchronize_irq(ab->irq_num[irq_idx]); } } static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab) { int i, j; int irq_idx; for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; for (j = 0; j < irq_grp->num_irq; j++) { irq_idx = irq_grp->irqs[j]; synchronize_irq(ab->irq_num[irq_idx]); } } } static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab) { int i; for (i = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; ath11k_ahb_ce_irq_enable(ab, i); } } static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab) { int i; for (i = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; ath11k_ahb_ce_irq_disable(ab, i); } } static int ath11k_ahb_start(struct ath11k_base *ab) { ath11k_ahb_ce_irqs_enable(ab); ath11k_ce_rx_post_buf(ab); return 0; } static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab) { int i; for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; if (!irq_grp->napi_enabled) { napi_enable(&irq_grp->napi); irq_grp->napi_enabled = true; } ath11k_ahb_ext_grp_enable(irq_grp); } } static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab) { __ath11k_ahb_ext_irq_disable(ab); ath11k_ahb_sync_ext_irqs(ab); } static void ath11k_ahb_stop(struct ath11k_base *ab) { if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) ath11k_ahb_ce_irqs_disable(ab); ath11k_ahb_sync_ce_irqs(ab); ath11k_ahb_kill_tasklets(ab); timer_delete_sync(&ab->rx_replenish_retry); ath11k_ce_cleanup_pipes(ab); } static int ath11k_ahb_power_up(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); int ret; ret = rproc_boot(ab_ahb->tgt_rproc); if (ret) ath11k_err(ab, "failed to boot the remote processor Q6\n"); return ret; } static void ath11k_ahb_power_down(struct ath11k_base *ab, bool is_suspend) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); rproc_shutdown(ab_ahb->tgt_rproc); } static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab) { struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; cfg->tgt_ce_len = ab->hw_params.target_ce_count; cfg->tgt_ce = ab->hw_params.target_ce_config; cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len; cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map; ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id; } static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab) { int i, j; for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; for (j = 0; j < irq_grp->num_irq; j++) free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); netif_napi_del(&irq_grp->napi); free_netdev(irq_grp->napi_ndev); } } static void ath11k_ahb_free_irq(struct ath11k_base *ab) { int irq_idx; int i; if (ab->hw_params.hybrid_bus_type) return ath11k_pcic_free_irq(ab); for (i = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; irq_idx = ATH11K_IRQ_CE0_OFFSET + i; free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); } ath11k_ahb_free_ext_irq(ab); } static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t) { struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num); } static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg) { struct ath11k_ce_pipe *ce_pipe = arg; /* last interrupt received for this CE */ ce_pipe->timestamp = jiffies; ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num); tasklet_schedule(&ce_pipe->intr_tq); return IRQ_HANDLED; } static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget) { struct ath11k_ext_irq_grp *irq_grp = container_of(napi, struct ath11k_ext_irq_grp, napi); struct ath11k_base *ab = irq_grp->ab; int work_done; work_done = ath11k_dp_service_srng(ab, irq_grp, budget); if (work_done < budget) { napi_complete_done(napi, work_done); ath11k_ahb_ext_grp_enable(irq_grp); } if (work_done > budget) work_done = budget; return work_done; } static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg) { struct ath11k_ext_irq_grp *irq_grp = arg; /* last interrupt received for this group */ irq_grp->timestamp = jiffies; ath11k_ahb_ext_grp_disable(irq_grp); napi_schedule(&irq_grp->napi); return IRQ_HANDLED; } static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab) { struct ath11k_hw_params *hw = &ab->hw_params; int i, j; int irq; int ret; for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; u32 num_irq = 0; irq_grp->ab = ab; irq_grp->grp_id = i; irq_grp->napi_ndev = alloc_netdev_dummy(0); if (!irq_grp->napi_ndev) return -ENOMEM; netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, ath11k_ahb_ext_grp_napi_poll); for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) { if (ab->hw_params.ring_mask->tx[i] & BIT(j)) { irq_grp->irqs[num_irq++] = wbm2host_tx_completions_ring1 - j; } if (ab->hw_params.ring_mask->rx[i] & BIT(j)) { irq_grp->irqs[num_irq++] = reo2host_destination_ring1 - j; } if (ab->hw_params.ring_mask->rx_err[i] & BIT(j)) irq_grp->irqs[num_irq++] = reo2host_exception; if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j)) irq_grp->irqs[num_irq++] = wbm2host_rx_release; if (ab->hw_params.ring_mask->reo_status[i] & BIT(j)) irq_grp->irqs[num_irq++] = reo2host_status; if (j < ab->hw_params.max_radios) { if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) { irq_grp->irqs[num_irq++] = rxdma2host_destination_ring_mac1 - ath11k_hw_get_mac_from_pdev_id(hw, j); } if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) { irq_grp->irqs[num_irq++] = host2rxdma_host_buf_ring_mac1 - ath11k_hw_get_mac_from_pdev_id(hw, j); } if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) { irq_grp->irqs[num_irq++] = ppdu_end_interrupts_mac1 - ath11k_hw_get_mac_from_pdev_id(hw, j); irq_grp->irqs[num_irq++] = rxdma2host_monitor_status_ring_mac1 - ath11k_hw_get_mac_from_pdev_id(hw, j); } } } irq_grp->num_irq = num_irq; for (j = 0; j < irq_grp->num_irq; j++) { int irq_idx = irq_grp->irqs[j]; irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]); ab->irq_num[irq_idx] = irq; irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY); ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler, IRQF_TRIGGER_RISING, irq_name[irq_idx], irq_grp); if (ret) { ath11k_err(ab, "failed request_irq for %d\n", irq); } } } return 0; } static int ath11k_ahb_config_irq(struct ath11k_base *ab) { int irq, irq_idx, i; int ret; if (ab->hw_params.hybrid_bus_type) return ath11k_pcic_config_irq(ab); /* Configure CE irqs */ for (i = 0; i < ab->hw_params.ce_count; i++) { struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; irq_idx = ATH11K_IRQ_CE0_OFFSET + i; tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet); irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]); ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler, IRQF_TRIGGER_RISING, irq_name[irq_idx], ce_pipe); if (ret) return ret; ab->irq_num[irq_idx] = irq; } /* Configure external interrupts */ ret = ath11k_ahb_config_ext_irq(ab); return ret; } static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, u8 *ul_pipe, u8 *dl_pipe) { const struct service_to_pipe *entry; bool ul_set = false, dl_set = false; int i; for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) { entry = &ab->hw_params.svc_to_ce_map[i]; if (__le32_to_cpu(entry->service_id) != service_id) continue; switch (__le32_to_cpu(entry->pipedir)) { case PIPEDIR_NONE: break; case PIPEDIR_IN: WARN_ON(dl_set); *dl_pipe = __le32_to_cpu(entry->pipenum); dl_set = true; break; case PIPEDIR_OUT: WARN_ON(ul_set); *ul_pipe = __le32_to_cpu(entry->pipenum); ul_set = true; break; case PIPEDIR_INOUT: WARN_ON(dl_set); WARN_ON(ul_set); *dl_pipe = __le32_to_cpu(entry->pipenum); *ul_pipe = __le32_to_cpu(entry->pipenum); dl_set = true; ul_set = true; break; } } if (WARN_ON(!ul_set || !dl_set)) return -ENOENT; return 0; } static int ath11k_ahb_hif_suspend(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); u32 wake_irq; u32 value = 0; int ret; if (!device_may_wakeup(ab->dev)) return -EPERM; wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ]; ret = enable_irq_wake(wake_irq); if (ret) { ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret); return ret; } value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++, ATH11K_AHB_SMP2P_SMEM_SEQ_NO); value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER, ATH11K_AHB_SMP2P_SMEM_MSG); ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state, ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value); if (ret) { ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret); return ret; } ath11k_dbg(ab, ATH11K_DBG_AHB, "device suspended\n"); return ret; } static int ath11k_ahb_hif_resume(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); u32 wake_irq; u32 value = 0; int ret; if (!device_may_wakeup(ab->dev)) return -EPERM; wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ]; ret = disable_irq_wake(wake_irq); if (ret) { ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret); return ret; } reinit_completion(&ab->wow.wakeup_completed); value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++, ATH11K_AHB_SMP2P_SMEM_SEQ_NO); value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT, ATH11K_AHB_SMP2P_SMEM_MSG); ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state, ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value); if (ret) { ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret); return ret; } ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ); if (ret == 0) { ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n"); return -ETIMEDOUT; } ath11k_dbg(ab, ATH11K_DBG_AHB, "device resumed\n"); return 0; } static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = { .start = ath11k_ahb_start, .stop = ath11k_ahb_stop, .read32 = ath11k_ahb_read32, .write32 = ath11k_ahb_write32, .read = NULL, .irq_enable = ath11k_ahb_ext_irq_enable, .irq_disable = ath11k_ahb_ext_irq_disable, .map_service_to_pipe = ath11k_ahb_map_service_to_pipe, .power_down = ath11k_ahb_power_down, .power_up = ath11k_ahb_power_up, }; static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = { .start = ath11k_pcic_start, .stop = ath11k_pcic_stop, .read32 = ath11k_pcic_read32, .write32 = ath11k_pcic_write32, .read = NULL, .irq_enable = ath11k_pcic_ext_irq_enable, .irq_disable = ath11k_pcic_ext_irq_disable, .get_msi_address = ath11k_pcic_get_msi_address, .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment, .map_service_to_pipe = ath11k_pcic_map_service_to_pipe, .power_down = ath11k_ahb_power_down, .power_up = ath11k_ahb_power_up, .suspend = ath11k_ahb_hif_suspend, .resume = ath11k_ahb_hif_resume, .ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq, .ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq, }; static int ath11k_core_get_rproc(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); struct device *dev = ab->dev; struct rproc *prproc; phandle rproc_phandle; if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) { ath11k_err(ab, "failed to get q6_rproc handle\n"); return -ENOENT; } prproc = rproc_get_by_phandle(rproc_phandle); if (!prproc) { ath11k_dbg(ab, ATH11K_DBG_AHB, "failed to get rproc, deferring\n"); return -EPROBE_DEFER; } ab_ahb->tgt_rproc = prproc; return 0; } static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab) { struct platform_device *pdev = ab->pdev; phys_addr_t msi_addr_pa; dma_addr_t msi_addr_iova; struct resource *res; int int_prop; int ret; int i; ret = ath11k_pcic_init_msi_config(ab); if (ret) { ath11k_err(ab, "failed to init msi config: %d\n", ret); return ret; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ath11k_err(ab, "failed to fetch msi_addr\n"); return -ENOENT; } msi_addr_pa = res->start; msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE, DMA_FROM_DEVICE, 0); if (dma_mapping_error(ab->dev, msi_addr_iova)) return -ENOMEM; ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova); ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova); ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop); if (ret) return ret; ab->pci.msi.ep_base_data = int_prop + 32; for (i = 0; i < ab->pci.msi.config->total_vectors; i++) { ret = platform_get_irq(pdev, i); if (ret < 0) return ret; ab->pci.msi.irqs[i] = ret; } set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags); return 0; } static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); if (!ab->hw_params.smp2p_wow_exit) return 0; ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out", &ab_ahb->smp2p_info.smem_bit); if (IS_ERR(ab_ahb->smp2p_info.smem_state)) { ath11k_err(ab, "failed to fetch smem state: %ld\n", PTR_ERR(ab_ahb->smp2p_info.smem_state)); return PTR_ERR(ab_ahb->smp2p_info.smem_state); } return 0; } static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); if (!ab->hw_params.smp2p_wow_exit) return; qcom_smem_state_put(ab_ahb->smp2p_info.smem_state); } static int ath11k_ahb_setup_resources(struct ath11k_base *ab) { struct platform_device *pdev = ab->pdev; struct resource *mem_res; void __iomem *mem; if (ab->hw_params.hybrid_bus_type) return ath11k_ahb_setup_msi_resources(ab); mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res); if (IS_ERR(mem)) { dev_err(&pdev->dev, "ioremap error\n"); return PTR_ERR(mem); } ab->mem = mem; ab->mem_len = resource_size(mem_res); return 0; } static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); struct device *dev = ab->dev; - struct device_node *node; struct resource r; int ret; - node = of_parse_phandle(dev->of_node, "memory-region", 0); - if (!node) - return -ENOENT; - - ret = of_address_to_resource(node, 0, &r); - of_node_put(node); + ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &r); if (ret) { dev_err(dev, "failed to resolve msa fixed region\n"); return ret; } ab_ahb->fw.msa_paddr = r.start; ab_ahb->fw.msa_size = resource_size(&r); - node = of_parse_phandle(dev->of_node, "memory-region", 1); - if (!node) - return -ENOENT; - - ret = of_address_to_resource(node, 0, &r); - of_node_put(node); + ret = of_reserved_mem_region_to_resource(dev->of_node, 1, &r); if (ret) { dev_err(dev, "failed to resolve ce fixed region\n"); return ret; } ab_ahb->fw.ce_paddr = r.start; ab_ahb->fw.ce_size = resource_size(&r); return 0; } static int ath11k_ahb_ce_remap(struct ath11k_base *ab) { const struct ce_remap *ce_remap = ab->hw_params.ce_remap; struct platform_device *pdev = ab->pdev; if (!ce_remap) { /* no separate CE register space */ ab->mem_ce = ab->mem; return 0; } /* ce register space is moved out of wcss unlike ipq8074 or ipq6018 * and the space is not contiguous, hence remapping the CE registers * to a new space for accessing them. */ ab->mem_ce = ioremap(ce_remap->base, ce_remap->size); if (!ab->mem_ce) { dev_err(&pdev->dev, "ce ioremap error\n"); return -ENOMEM; } return 0; } static void ath11k_ahb_ce_unmap(struct ath11k_base *ab) { if (ab->hw_params.ce_remap) iounmap(ab->mem_ce); } static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); struct device *host_dev = ab->dev; struct platform_device_info info = {}; struct iommu_domain *iommu_dom; struct platform_device *pdev; struct device_node *node; int ret; /* Chipsets not requiring MSA need not initialize * MSA resources, return success in such cases. */ if (!ab->hw_params.fixed_fw_mem) return 0; node = of_get_child_by_name(host_dev->of_node, "wifi-firmware"); if (!node) { ab_ahb->fw.use_tz = true; return 0; } ret = ath11k_ahb_setup_msa_resources(ab); if (ret) { ath11k_err(ab, "failed to setup msa resources\n"); return ret; } info.fwnode = &node->fwnode; info.parent = host_dev; info.name = node->name; info.dma_mask = DMA_BIT_MASK(32); pdev = platform_device_register_full(&info); if (IS_ERR(pdev)) { of_node_put(node); return PTR_ERR(pdev); } ret = of_dma_configure(&pdev->dev, node, true); if (ret) { ath11k_err(ab, "dma configure fail: %d\n", ret); goto err_unregister; } ab_ahb->fw.dev = &pdev->dev; iommu_dom = iommu_paging_domain_alloc(ab_ahb->fw.dev); if (IS_ERR(iommu_dom)) { ath11k_err(ab, "failed to allocate iommu domain\n"); ret = PTR_ERR(iommu_dom); goto err_unregister; } ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev); if (ret) { ath11k_err(ab, "could not attach device: %d\n", ret); goto err_iommu_free; } ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size, IOMMU_READ | IOMMU_WRITE, GFP_KERNEL); if (ret) { ath11k_err(ab, "failed to map firmware region: %d\n", ret); goto err_iommu_detach; } ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size, IOMMU_READ | IOMMU_WRITE, GFP_KERNEL); if (ret) { ath11k_err(ab, "failed to map firmware CE region: %d\n", ret); goto err_iommu_unmap; } ab_ahb->fw.use_tz = false; ab_ahb->fw.iommu_domain = iommu_dom; of_node_put(node); return 0; err_iommu_unmap: iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size); err_iommu_detach: iommu_detach_device(iommu_dom, ab_ahb->fw.dev); err_iommu_free: iommu_domain_free(iommu_dom); err_unregister: platform_device_unregister(pdev); of_node_put(node); return ret; } static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); struct iommu_domain *iommu; size_t unmapped_size; /* Chipsets not requiring MSA would have not initialized * MSA resources, return success in such cases. */ if (!ab->hw_params.fixed_fw_mem) return 0; if (ab_ahb->fw.use_tz) return 0; iommu = ab_ahb->fw.iommu_domain; unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size); if (unmapped_size != ab_ahb->fw.msa_size) ath11k_err(ab, "failed to unmap firmware: %zu\n", unmapped_size); unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size); if (unmapped_size != ab_ahb->fw.ce_size) ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n", unmapped_size); iommu_detach_device(iommu, ab_ahb->fw.dev); iommu_domain_free(iommu); platform_device_unregister(to_platform_device(ab_ahb->fw.dev)); return 0; } static int ath11k_ahb_probe(struct platform_device *pdev) { struct ath11k_base *ab; const struct ath11k_hif_ops *hif_ops; const struct ath11k_pci_ops *pci_ops; enum ath11k_hw_rev hw_rev; int ret; hw_rev = (uintptr_t)device_get_match_data(&pdev->dev); switch (hw_rev) { case ATH11K_HW_IPQ8074: case ATH11K_HW_IPQ6018_HW10: case ATH11K_HW_IPQ5018_HW10: hif_ops = &ath11k_ahb_hif_ops_ipq8074; pci_ops = NULL; break; case ATH11K_HW_WCN6750_HW10: hif_ops = &ath11k_ahb_hif_ops_wcn6750; pci_ops = &ath11k_ahb_pci_ops_wcn6750; break; default: dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev); return -EOPNOTSUPP; } ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n"); return ret; } ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb), ATH11K_BUS_AHB); if (!ab) { dev_err(&pdev->dev, "failed to allocate ath11k base\n"); return -ENOMEM; } ab->hif.ops = hif_ops; ab->pdev = pdev; ab->hw_rev = hw_rev; ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL; platform_set_drvdata(pdev, ab); ret = ath11k_pcic_register_pci_ops(ab, pci_ops); if (ret) { ath11k_err(ab, "failed to register PCI ops: %d\n", ret); goto err_core_free; } ret = ath11k_core_pre_init(ab); if (ret) goto err_core_free; ret = ath11k_ahb_setup_resources(ab); if (ret) goto err_core_free; ret = ath11k_ahb_ce_remap(ab); if (ret) goto err_core_free; ret = ath11k_ahb_fw_resources_init(ab); if (ret) goto err_ce_unmap; ret = ath11k_ahb_setup_smp2p_handle(ab); if (ret) goto err_fw_deinit; ret = ath11k_hal_srng_init(ab); if (ret) goto err_release_smp2p_handle; ret = ath11k_ce_alloc_pipes(ab); if (ret) { ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret); goto err_hal_srng_deinit; } ath11k_ahb_init_qmi_ce_config(ab); ret = ath11k_core_get_rproc(ab); if (ret) { ath11k_err(ab, "failed to get rproc: %d\n", ret); goto err_ce_free; } ret = ath11k_core_init(ab); if (ret) { ath11k_err(ab, "failed to init core: %d\n", ret); goto err_ce_free; } ret = ath11k_ahb_config_irq(ab); if (ret) { ath11k_err(ab, "failed to configure irq: %d\n", ret); goto err_ce_free; } ath11k_qmi_fwreset_from_cold_boot(ab); return 0; err_ce_free: ath11k_ce_free_pipes(ab); err_hal_srng_deinit: ath11k_hal_srng_deinit(ab); err_release_smp2p_handle: ath11k_ahb_release_smp2p_handle(ab); err_fw_deinit: ath11k_ahb_fw_resource_deinit(ab); err_ce_unmap: ath11k_ahb_ce_unmap(ab); err_core_free: ath11k_core_free(ab); platform_set_drvdata(pdev, NULL); return ret; } static void ath11k_ahb_remove_prepare(struct ath11k_base *ab) { unsigned long left; if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) { left = wait_for_completion_timeout(&ab->driver_recovery, ATH11K_AHB_RECOVERY_TIMEOUT); if (!left) ath11k_warn(ab, "failed to receive recovery response completion\n"); } set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags); cancel_work_sync(&ab->restart_work); cancel_work_sync(&ab->qmi.event_work); } static void ath11k_ahb_free_resources(struct ath11k_base *ab) { struct platform_device *pdev = ab->pdev; ath11k_ahb_free_irq(ab); ath11k_hal_srng_deinit(ab); ath11k_ahb_release_smp2p_handle(ab); ath11k_ahb_fw_resource_deinit(ab); ath11k_ce_free_pipes(ab); ath11k_ahb_ce_unmap(ab); ath11k_core_free(ab); platform_set_drvdata(pdev, NULL); } static void ath11k_ahb_remove(struct platform_device *pdev) { struct ath11k_base *ab = platform_get_drvdata(pdev); if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { ath11k_ahb_power_down(ab, false); ath11k_debugfs_soc_destroy(ab); ath11k_qmi_deinit_service(ab); goto qmi_fail; } ath11k_ahb_remove_prepare(ab); ath11k_core_deinit(ab); qmi_fail: ath11k_fw_destroy(ab); ath11k_ahb_free_resources(ab); } static void ath11k_ahb_shutdown(struct platform_device *pdev) { struct ath11k_base *ab = platform_get_drvdata(pdev); /* platform shutdown() & remove() are mutually exclusive. * remove() is invoked during rmmod & shutdown() during * system reboot/shutdown. */ ath11k_ahb_remove_prepare(ab); if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))) goto free_resources; ath11k_core_deinit(ab); free_resources: ath11k_fw_destroy(ab); ath11k_ahb_free_resources(ab); } static struct platform_driver ath11k_ahb_driver = { .driver = { .name = "ath11k", .of_match_table = ath11k_ahb_of_match, }, .probe = ath11k_ahb_probe, .remove = ath11k_ahb_remove, .shutdown = ath11k_ahb_shutdown, }; module_platform_driver(ath11k_ahb_driver); MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/ce.c b/ce.c index c65fc9fb539e..a7a163621b21 100644 --- a/ce.c +++ b/ce.c @@ -1,1079 +1,1080 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include #include "dp_rx.h" #include "debug.h" #include "hif.h" const struct ce_attr ath11k_host_ce_config_ipq8074[] = { /* CE0: host->target HTC control and raw streams */ { .flags = CE_ATTR_FLAGS, .src_nentries = 16, .src_sz_max = 2048, .dest_nentries = 0, .send_cb = ath11k_htc_tx_completion_handler, }, /* CE1: target->host HTT + HTC control */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_htc_rx_completion_handler, }, /* CE2: target->host WMI */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_htc_rx_completion_handler, }, /* CE3: host->target WMI (mac0) */ { .flags = CE_ATTR_FLAGS, .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, .send_cb = ath11k_htc_tx_completion_handler, }, /* CE4: host->target HTT */ { .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, .src_nentries = 2048, .src_sz_max = 256, .dest_nentries = 0, }, /* CE5: target->host pktlog */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler, }, /* CE6: target autonomous hif_memcpy */ { .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, }, /* CE7: host->target WMI (mac1) */ { .flags = CE_ATTR_FLAGS, .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, .send_cb = ath11k_htc_tx_completion_handler, }, /* CE8: target autonomous hif_memcpy */ { .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, }, /* CE9: host->target WMI (mac2) */ { .flags = CE_ATTR_FLAGS, .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, .send_cb = ath11k_htc_tx_completion_handler, }, /* CE10: target->host HTT */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_htc_rx_completion_handler, }, /* CE11: Not used */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, }, }; const struct ce_attr ath11k_host_ce_config_qca6390[] = { /* CE0: host->target HTC control and raw streams */ { .flags = CE_ATTR_FLAGS, .src_nentries = 16, .src_sz_max = 2048, .dest_nentries = 0, }, /* CE1: target->host HTT + HTC control */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_htc_rx_completion_handler, }, /* CE2: target->host WMI */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_htc_rx_completion_handler, }, /* CE3: host->target WMI (mac0) */ { .flags = CE_ATTR_FLAGS, .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, .send_cb = ath11k_htc_tx_completion_handler, }, /* CE4: host->target HTT */ { .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, .src_nentries = 2048, .src_sz_max = 256, .dest_nentries = 0, }, /* CE5: target->host pktlog */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler, }, /* CE6: target autonomous hif_memcpy */ { .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, }, /* CE7: host->target WMI (mac1) */ { .flags = CE_ATTR_FLAGS, .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, .send_cb = ath11k_htc_tx_completion_handler, }, /* CE8: target autonomous hif_memcpy */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, }, }; const struct ce_attr ath11k_host_ce_config_qcn9074[] = { /* CE0: host->target HTC control and raw streams */ { .flags = CE_ATTR_FLAGS, .src_nentries = 16, .src_sz_max = 2048, .dest_nentries = 0, }, /* CE1: target->host HTT + HTC control */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_htc_rx_completion_handler, }, /* CE2: target->host WMI */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 32, .recv_cb = ath11k_htc_rx_completion_handler, }, /* CE3: host->target WMI (mac0) */ { .flags = CE_ATTR_FLAGS, .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, .send_cb = ath11k_htc_tx_completion_handler, }, /* CE4: host->target HTT */ { .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, .src_nentries = 2048, .src_sz_max = 256, .dest_nentries = 0, }, /* CE5: target->host pktlog */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler, }, }; static bool ath11k_ce_need_shadow_fix(int ce_id) { /* only ce4 needs shadow workaround */ if (ce_id == 4) return true; return false; } void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab) { int i; if (!ab->hw_params.supports_shadow_regs) return; for (i = 0; i < ab->hw_params.ce_count; i++) if (ath11k_ce_need_shadow_fix(i)) ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]); } static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe, struct sk_buff *skb, dma_addr_t paddr) { struct ath11k_base *ab = pipe->ab; struct ath11k_ce_ring *ring = pipe->dest_ring; struct hal_srng *srng; unsigned int write_index; unsigned int nentries_mask = ring->nentries_mask; u32 *desc; int ret; lockdep_assert_held(&ab->ce.ce_lock); write_index = ring->write_index; srng = &ab->hal.srng_list[ring->hal_ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) { ret = -ENOSPC; goto exit; } desc = ath11k_hal_srng_src_get_next_entry(ab, srng); if (!desc) { ret = -ENOSPC; goto exit; } ath11k_hal_ce_dst_set_desc(desc, paddr); ring->skb[write_index] = skb; write_index = CE_RING_IDX_INCR(nentries_mask, write_index); ring->write_index = write_index; pipe->rx_buf_needed--; ret = 0; exit: ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return ret; } static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe) { struct ath11k_base *ab = pipe->ab; struct sk_buff *skb; dma_addr_t paddr; int ret = 0; if (!(pipe->dest_ring || pipe->status_ring)) return 0; spin_lock_bh(&ab->ce.ce_lock); while (pipe->rx_buf_needed) { skb = dev_alloc_skb(pipe->buf_sz); if (!skb) { ret = -ENOMEM; goto exit; } WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4)); paddr = dma_map_single(ab->dev, skb->data, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(ab->dev, paddr))) { ath11k_warn(ab, "failed to dma map ce rx buf\n"); dev_kfree_skb_any(skb); ret = -EIO; goto exit; } ATH11K_SKB_RXCB(skb)->paddr = paddr; ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr); if (ret) { - ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret); + ath11k_dbg(ab, ATH11K_DBG_CE, "failed to enqueue rx buf: %d\n", + ret); dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); goto exit; } } exit: spin_unlock_bh(&ab->ce.ce_lock); return ret; } static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe, struct sk_buff **skb, int *nbytes) { struct ath11k_base *ab = pipe->ab; struct hal_srng *srng; unsigned int sw_index; unsigned int nentries_mask; u32 *desc; int ret = 0; spin_lock_bh(&ab->ce.ce_lock); sw_index = pipe->dest_ring->sw_index; nentries_mask = pipe->dest_ring->nentries_mask; srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); if (!desc) { ret = -EIO; goto err; } *nbytes = ath11k_hal_ce_dst_status_get_length(desc); *skb = pipe->dest_ring->skb[sw_index]; pipe->dest_ring->skb[sw_index] = NULL; sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); pipe->dest_ring->sw_index = sw_index; pipe->rx_buf_needed++; err: ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); spin_unlock_bh(&ab->ce.ce_lock); return ret; } static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe) { struct ath11k_base *ab = pipe->ab; struct sk_buff *skb; struct sk_buff_head list; unsigned int nbytes, max_nbytes; int ret; __skb_queue_head_init(&list); while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) { max_nbytes = skb->len + skb_tailroom(skb); dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr, max_nbytes, DMA_FROM_DEVICE); if (unlikely(max_nbytes < nbytes || nbytes == 0)) { ath11k_warn(ab, "unexpected rx length (nbytes %d, max %d)", nbytes, max_nbytes); dev_kfree_skb_any(skb); continue; } skb_put(skb, nbytes); __skb_queue_tail(&list, skb); } while ((skb = __skb_dequeue(&list))) { ath11k_dbg(ab, ATH11K_DBG_CE, "rx ce pipe %d len %d\n", pipe->pipe_num, skb->len); pipe->recv_cb(ab, skb); } ret = ath11k_ce_rx_post_pipe(pipe); if (ret && ret != -ENOSPC) { ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n", pipe->pipe_num, ret); mod_timer(&ab->rx_replenish_retry, jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES); } } static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe) { struct ath11k_base *ab = pipe->ab; struct hal_srng *srng; unsigned int sw_index; unsigned int nentries_mask; struct sk_buff *skb; u32 *desc; spin_lock_bh(&ab->ce.ce_lock); sw_index = pipe->src_ring->sw_index; nentries_mask = pipe->src_ring->nentries_mask; srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); desc = ath11k_hal_srng_src_reap_next(ab, srng); if (!desc) { skb = ERR_PTR(-EIO); goto err_unlock; } skb = pipe->src_ring->skb[sw_index]; pipe->src_ring->skb[sw_index] = NULL; sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); pipe->src_ring->sw_index = sw_index; err_unlock: spin_unlock_bh(&srng->lock); spin_unlock_bh(&ab->ce.ce_lock); return skb; } static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe *pipe) { struct ath11k_base *ab = pipe->ab; struct sk_buff *skb; struct sk_buff_head list; __skb_queue_head_init(&list); while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) { if (!skb) continue; dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len, DMA_TO_DEVICE); if ((!pipe->send_cb) || ab->hw_params.credit_flow) { dev_kfree_skb_any(skb); continue; } __skb_queue_tail(&list, skb); } while ((skb = __skb_dequeue(&list))) { ath11k_dbg(ab, ATH11K_DBG_CE, "tx ce pipe %d len %d\n", pipe->pipe_num, skb->len); pipe->send_cb(ab, skb); } } static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id, struct hal_srng_params *ring_params) { u32 msi_data_start; u32 msi_data_count, msi_data_idx; u32 msi_irq_start; u32 addr_lo; u32 addr_hi; int ret; ret = ath11k_get_user_msi_vector(ab, "CE", &msi_data_count, &msi_data_start, &msi_irq_start); if (ret) return; ath11k_get_msi_address(ab, &addr_lo, &addr_hi); ath11k_get_ce_msi_idx(ab, ce_id, &msi_data_idx); ring_params->msi_addr = addr_lo; ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32); ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start; ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR; } static int ath11k_ce_init_ring(struct ath11k_base *ab, struct ath11k_ce_ring *ce_ring, int ce_id, enum hal_ring_type type) { struct hal_srng_params params = {}; int ret; params.ring_base_paddr = ce_ring->base_addr_ce_space; params.ring_base_vaddr = ce_ring->base_addr_owner_space; params.num_entries = ce_ring->nentries; if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags)) ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, ¶ms); switch (type) { case HAL_CE_SRC: if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags)) params.intr_batch_cntr_thres_entries = 1; break; case HAL_CE_DST: params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max; if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) { params.intr_timer_thres_us = 1024; params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; params.low_threshold = ce_ring->nentries - 3; } break; case HAL_CE_DST_STATUS: if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) { params.intr_batch_cntr_thres_entries = 1; params.intr_timer_thres_us = 0x1000; } break; default: ath11k_warn(ab, "Invalid CE ring type %d\n", type); return -EINVAL; } /* TODO: Init other params needed by HAL to init the ring */ ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, ¶ms); if (ret < 0) { ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n", ret, ce_id); return ret; } ce_ring->hal_ring_id = ret; if (ab->hw_params.supports_shadow_regs && ath11k_ce_need_shadow_fix(ce_id)) ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id], ATH11K_SHADOW_CTRL_TIMER_INTERVAL, ce_ring->hal_ring_id); return 0; } static struct ath11k_ce_ring * ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz) { struct ath11k_ce_ring *ce_ring; dma_addr_t base_addr; ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL); if (ce_ring == NULL) return ERR_PTR(-ENOMEM); ce_ring->nentries = nentries; ce_ring->nentries_mask = nentries - 1; /* Legacy platforms that do not support cache * coherent DMA are unsupported */ ce_ring->base_addr_owner_space_unaligned = dma_alloc_coherent(ab->dev, nentries * desc_sz + CE_DESC_RING_ALIGN, &base_addr, GFP_KERNEL); if (!ce_ring->base_addr_owner_space_unaligned) { kfree(ce_ring); return ERR_PTR(-ENOMEM); } ce_ring->base_addr_ce_space_unaligned = base_addr; ce_ring->base_addr_owner_space = PTR_ALIGN( ce_ring->base_addr_owner_space_unaligned, CE_DESC_RING_ALIGN); ce_ring->base_addr_ce_space = ALIGN( ce_ring->base_addr_ce_space_unaligned, CE_DESC_RING_ALIGN); return ce_ring; } static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id) { struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id]; const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id]; struct ath11k_ce_ring *ring; int nentries; int desc_sz; pipe->attr_flags = attr->flags; if (attr->src_nentries) { pipe->send_cb = attr->send_cb; nentries = roundup_pow_of_two(attr->src_nentries); desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); if (IS_ERR(ring)) return PTR_ERR(ring); pipe->src_ring = ring; } if (attr->dest_nentries) { pipe->recv_cb = attr->recv_cb; nentries = roundup_pow_of_two(attr->dest_nentries); desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST); ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); if (IS_ERR(ring)) return PTR_ERR(ring); pipe->dest_ring = ring; desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS); ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); if (IS_ERR(ring)) return PTR_ERR(ring); pipe->status_ring = ring; } return 0; } void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id) { struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id]; const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id]; if (attr->src_nentries) ath11k_ce_tx_process_cb(pipe); if (pipe->recv_cb) ath11k_ce_recv_process_cb(pipe); } void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id) { struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id]; const struct ce_attr *attr = &ab->hw_params.host_ce_config[pipe_id]; if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries) ath11k_ce_tx_process_cb(pipe); } EXPORT_SYMBOL(ath11k_ce_per_engine_service); int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id, u16 transfer_id) { struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id]; struct hal_srng *srng; u32 *desc; unsigned int write_index, sw_index; unsigned int nentries_mask; int ret = 0; u8 byte_swap_data = 0; int num_used; /* Check if some entries could be regained by handling tx completion if * the CE has interrupts disabled and the used entries is more than the * defined usage threshold. */ if (pipe->attr_flags & CE_ATTR_DIS_INTR) { spin_lock_bh(&ab->ce.ce_lock); write_index = pipe->src_ring->write_index; sw_index = pipe->src_ring->sw_index; if (write_index >= sw_index) num_used = write_index - sw_index; else num_used = pipe->src_ring->nentries - sw_index + write_index; spin_unlock_bh(&ab->ce.ce_lock); if (num_used > ATH11K_CE_USAGE_THRESHOLD) ath11k_ce_poll_send_completed(ab, pipe->pipe_num); } if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) return -ESHUTDOWN; spin_lock_bh(&ab->ce.ce_lock); write_index = pipe->src_ring->write_index; nentries_mask = pipe->src_ring->nentries_mask; srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) { ath11k_hal_srng_access_end(ab, srng); ret = -ENOBUFS; goto err_unlock; } desc = ath11k_hal_srng_src_get_next_reaped(ab, srng); if (!desc) { ath11k_hal_srng_access_end(ab, srng); ret = -ENOBUFS; goto err_unlock; } if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA) byte_swap_data = 1; ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr, skb->len, transfer_id, byte_swap_data); pipe->src_ring->skb[write_index] = skb; pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask, write_index); ath11k_hal_srng_access_end(ab, srng); if (ath11k_ce_need_shadow_fix(pipe_id)) ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]); spin_unlock_bh(&srng->lock); spin_unlock_bh(&ab->ce.ce_lock); return 0; err_unlock: spin_unlock_bh(&srng->lock); spin_unlock_bh(&ab->ce.ce_lock); return ret; } static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe) { struct ath11k_base *ab = pipe->ab; struct ath11k_ce_ring *ring = pipe->dest_ring; struct sk_buff *skb; int i; if (!(ring && pipe->buf_sz)) return; for (i = 0; i < ring->nentries; i++) { skb = ring->skb[i]; if (!skb) continue; ring->skb[i] = NULL; dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); } } static void ath11k_ce_shadow_config(struct ath11k_base *ab) { int i; for (i = 0; i < ab->hw_params.ce_count; i++) { if (ab->hw_params.host_ce_config[i].src_nentries) ath11k_hal_srng_update_shadow_config(ab, HAL_CE_SRC, i); if (ab->hw_params.host_ce_config[i].dest_nentries) { ath11k_hal_srng_update_shadow_config(ab, HAL_CE_DST, i); ath11k_hal_srng_update_shadow_config(ab, HAL_CE_DST_STATUS, i); } } } void ath11k_ce_get_shadow_config(struct ath11k_base *ab, u32 **shadow_cfg, u32 *shadow_cfg_len) { if (!ab->hw_params.supports_shadow_regs) return; ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len); /* shadow is already configured */ if (*shadow_cfg_len) return; /* shadow isn't configured yet, configure now. * non-CE srngs are configured firstly, then * all CE srngs. */ ath11k_hal_srng_shadow_config(ab); ath11k_ce_shadow_config(ab); /* get the shadow configuration */ ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len); } EXPORT_SYMBOL(ath11k_ce_get_shadow_config); void ath11k_ce_cleanup_pipes(struct ath11k_base *ab) { struct ath11k_ce_pipe *pipe; int pipe_num; ath11k_ce_stop_shadow_timers(ab); for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) { pipe = &ab->ce.ce_pipe[pipe_num]; ath11k_ce_rx_pipe_cleanup(pipe); /* Cleanup any src CE's which have interrupts disabled */ ath11k_ce_poll_send_completed(ab, pipe_num); /* NOTE: Should we also clean up tx buffer in all pipes? */ } } EXPORT_SYMBOL(ath11k_ce_cleanup_pipes); void ath11k_ce_rx_post_buf(struct ath11k_base *ab) { struct ath11k_ce_pipe *pipe; int i; int ret; for (i = 0; i < ab->hw_params.ce_count; i++) { pipe = &ab->ce.ce_pipe[i]; ret = ath11k_ce_rx_post_pipe(pipe); if (ret) { if (ret == -ENOSPC) continue; ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n", i, ret); mod_timer(&ab->rx_replenish_retry, jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES); return; } } } EXPORT_SYMBOL(ath11k_ce_rx_post_buf); void ath11k_ce_rx_replenish_retry(struct timer_list *t) { struct ath11k_base *ab = timer_container_of(ab, t, rx_replenish_retry); ath11k_ce_rx_post_buf(ab); } int ath11k_ce_init_pipes(struct ath11k_base *ab) { struct ath11k_ce_pipe *pipe; int i; int ret; for (i = 0; i < ab->hw_params.ce_count; i++) { pipe = &ab->ce.ce_pipe[i]; if (pipe->src_ring) { ret = ath11k_ce_init_ring(ab, pipe->src_ring, i, HAL_CE_SRC); if (ret) { ath11k_warn(ab, "failed to init src ring: %d\n", ret); /* Should we clear any partial init */ return ret; } pipe->src_ring->write_index = 0; pipe->src_ring->sw_index = 0; } if (pipe->dest_ring) { ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i, HAL_CE_DST); if (ret) { ath11k_warn(ab, "failed to init dest ring: %d\n", ret); /* Should we clear any partial init */ return ret; } pipe->rx_buf_needed = pipe->dest_ring->nentries ? pipe->dest_ring->nentries - 2 : 0; pipe->dest_ring->write_index = 0; pipe->dest_ring->sw_index = 0; } if (pipe->status_ring) { ret = ath11k_ce_init_ring(ab, pipe->status_ring, i, HAL_CE_DST_STATUS); if (ret) { ath11k_warn(ab, "failed to init dest status ing: %d\n", ret); /* Should we clear any partial init */ return ret; } pipe->status_ring->write_index = 0; pipe->status_ring->sw_index = 0; } } return 0; } void ath11k_ce_free_pipes(struct ath11k_base *ab) { struct ath11k_ce_pipe *pipe; struct ath11k_ce_ring *ce_ring; int desc_sz; int i; for (i = 0; i < ab->hw_params.ce_count; i++) { pipe = &ab->ce.ce_pipe[i]; if (ath11k_ce_need_shadow_fix(i)) ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]); if (pipe->src_ring) { desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); ce_ring = pipe->src_ring; dma_free_coherent(ab->dev, pipe->src_ring->nentries * desc_sz + CE_DESC_RING_ALIGN, ce_ring->base_addr_owner_space_unaligned, ce_ring->base_addr_ce_space_unaligned); kfree(pipe->src_ring); pipe->src_ring = NULL; } if (pipe->dest_ring) { desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST); ce_ring = pipe->dest_ring; dma_free_coherent(ab->dev, pipe->dest_ring->nentries * desc_sz + CE_DESC_RING_ALIGN, ce_ring->base_addr_owner_space_unaligned, ce_ring->base_addr_ce_space_unaligned); kfree(pipe->dest_ring); pipe->dest_ring = NULL; } if (pipe->status_ring) { desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS); ce_ring = pipe->status_ring; dma_free_coherent(ab->dev, pipe->status_ring->nentries * desc_sz + CE_DESC_RING_ALIGN, ce_ring->base_addr_owner_space_unaligned, ce_ring->base_addr_ce_space_unaligned); kfree(pipe->status_ring); pipe->status_ring = NULL; } } } EXPORT_SYMBOL(ath11k_ce_free_pipes); int ath11k_ce_alloc_pipes(struct ath11k_base *ab) { struct ath11k_ce_pipe *pipe; int i; int ret; const struct ce_attr *attr; spin_lock_init(&ab->ce.ce_lock); for (i = 0; i < ab->hw_params.ce_count; i++) { attr = &ab->hw_params.host_ce_config[i]; pipe = &ab->ce.ce_pipe[i]; pipe->pipe_num = i; pipe->ab = ab; pipe->buf_sz = attr->src_sz_max; ret = ath11k_ce_alloc_pipe(ab, i); if (ret) { /* Free any partial successful allocation */ ath11k_ce_free_pipes(ab); return ret; } } return 0; } EXPORT_SYMBOL(ath11k_ce_alloc_pipes); /* For Big Endian Host, Copy Engine byte_swap is enabled * When Copy Engine does byte_swap, need to byte swap again for the * Host to get/put buffer content in the correct byte order */ void ath11k_ce_byte_swap(void *mem, u32 len) { int i; if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) { if (!mem) return; for (i = 0; i < (len / 4); i++) { *(u32 *)mem = swab32(*(u32 *)mem); mem += 4; } } } int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id) { if (ce_id >= ab->hw_params.ce_count) return -EINVAL; return ab->hw_params.host_ce_config[ce_id].flags; } EXPORT_SYMBOL(ath11k_ce_get_attr_flags); diff --git a/core.c b/core.c index d49353b6b2e7..812686173ac8 100644 --- a/core.c +++ b/core.c @@ -1,2677 +1,2715 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include #include #include #include #include #include #include "core.h" #include "dp_tx.h" #include "dp_rx.h" #include "debug.h" #include "hif.h" #include "wow.h" #include "fw.h" unsigned int ath11k_debug_mask; EXPORT_SYMBOL(ath11k_debug_mask); module_param_named(debug_mask, ath11k_debug_mask, uint, 0644); MODULE_PARM_DESC(debug_mask, "Debugging mask"); static unsigned int ath11k_crypto_mode; module_param_named(crypto_mode, ath11k_crypto_mode, uint, 0644); MODULE_PARM_DESC(crypto_mode, "crypto mode: 0-hardware, 1-software"); /* frame mode values are mapped as per enum ath11k_hw_txrx_mode */ unsigned int ath11k_frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI; module_param_named(frame_mode, ath11k_frame_mode, uint, 0644); MODULE_PARM_DESC(frame_mode, "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)"); bool ath11k_ftm_mode; module_param_named(ftm_mode, ath11k_ftm_mode, bool, 0444); MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode"); static const struct ath11k_hw_params ath11k_hw_params[] = { { .hw_rev = ATH11K_HW_IPQ8074, .name = "ipq8074 hw2.0", .fw = { .dir = "IPQ8074/hw2.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 3, .bdf_addr = 0x4B0C0000, .hw_ops = &ipq8074_ops, .ring_mask = &ath11k_hw_ring_mask_ipq8074, .internal_sleep_clock = false, .regs = &ipq8074_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074, .host_ce_config = ath11k_host_ce_config_ipq8074, .ce_count = 12, .target_ce_config = ath11k_target_ce_config_wlan_ipq8074, .target_ce_count = 11, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074, .svc_to_ce_map_len = 21, .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = false, .rxdma1_enable = true, .num_rxdma_per_pdev = 1, .rx_mac_buf_ring = false, .vdev_start_delay = false, .htt_peer_map_v2 = true, .spectral = { .fft_sz = 2, /* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes. * so added pad size as 2 bytes to compensate the BIN size */ .fft_pad_sz = 2, .summary_pad_sz = 0, .fft_hdr_len = 16, .max_fft_bins = 512, .fragment_160mhz = true, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT), .supports_monitor = true, .full_monitor_mode = false, .supports_shadow_regs = false, .idle_ps = false, .supports_sta_ps = false, .coldboot_cal_mm = true, .coldboot_cal_ftm = true, .cbcal_restart_fw = true, .fw_mem_mode = 0, .num_vdevs = 16 + 1, .num_peers = 512, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), .supports_regdb = false, .fix_l1ss = true, .credit_flow = false, .max_tx_ring = DP_TCL_NUM_RING_MAX, .hal_params = &ath11k_hw_hal_params_ipq8074, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = true, .supports_rssi_stats = false, .fw_wmi_diag_event = false, .current_cc_support = false, .dbr_debug_support = true, .global_reset = false, .bios_sar_capa = NULL, .m3_fw_support = false, .fixed_bdf_addr = true, .fixed_mem_region = true, .static_window_map = false, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = false, .supports_multi_bssid = false, .sram_dump = {}, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_dual_stations = false, .pdev_suspend = false, }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, .name = "ipq6018 hw1.0", .fw = { .dir = "IPQ6018/hw1.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 2, .bdf_addr = 0x4ABC0000, .hw_ops = &ipq6018_ops, .ring_mask = &ath11k_hw_ring_mask_ipq8074, .internal_sleep_clock = false, .regs = &ipq8074_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074, .host_ce_config = ath11k_host_ce_config_ipq8074, .ce_count = 12, .target_ce_config = ath11k_target_ce_config_wlan_ipq8074, .target_ce_count = 11, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018, .svc_to_ce_map_len = 19, .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = false, .rxdma1_enable = true, .num_rxdma_per_pdev = 1, .rx_mac_buf_ring = false, .vdev_start_delay = false, .htt_peer_map_v2 = true, .spectral = { .fft_sz = 4, .fft_pad_sz = 0, .summary_pad_sz = 0, .fft_hdr_len = 16, .max_fft_bins = 512, .fragment_160mhz = true, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT), .supports_monitor = true, .full_monitor_mode = false, .supports_shadow_regs = false, .idle_ps = false, .supports_sta_ps = false, .coldboot_cal_mm = true, .coldboot_cal_ftm = true, .cbcal_restart_fw = true, .fw_mem_mode = 0, .num_vdevs = 16 + 1, .num_peers = 512, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), .supports_regdb = false, .fix_l1ss = true, .credit_flow = false, .max_tx_ring = DP_TCL_NUM_RING_MAX, .hal_params = &ath11k_hw_hal_params_ipq8074, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = true, .supports_rssi_stats = false, .fw_wmi_diag_event = false, .current_cc_support = false, .dbr_debug_support = true, .global_reset = false, .bios_sar_capa = NULL, .m3_fw_support = false, .fixed_bdf_addr = true, .fixed_mem_region = true, .static_window_map = false, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = false, .supports_multi_bssid = false, .sram_dump = {}, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = false, .support_dual_stations = false, .pdev_suspend = false, }, { .name = "qca6390 hw2.0", .hw_rev = ATH11K_HW_QCA6390_HW20, .fw = { .dir = "QCA6390/hw2.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 3, .bdf_addr = 0x4B0C0000, .hw_ops = &qca6390_ops, .ring_mask = &ath11k_hw_ring_mask_qca6390, .internal_sleep_clock = true, .regs = &qca6390_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390, .host_ce_config = ath11k_host_ce_config_qca6390, .ce_count = 9, .target_ce_config = ath11k_target_ce_config_wlan_qca6390, .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = true, .rxdma1_enable = false, .num_rxdma_per_pdev = 2, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, .spectral = { .fft_sz = 0, .fft_pad_sz = 0, .summary_pad_sz = 0, .fft_hdr_len = 0, .max_fft_bins = 0, .fragment_160mhz = false, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), .supports_monitor = false, .full_monitor_mode = false, .supports_shadow_regs = true, .idle_ps = true, .supports_sta_ps = true, .coldboot_cal_mm = false, .coldboot_cal_ftm = false, .cbcal_restart_fw = false, .fw_mem_mode = 0, .num_vdevs = 2 + 1, .num_peers = 512, .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), .supports_regdb = false, .fix_l1ss = true, .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, .hal_params = &ath11k_hw_hal_params_qca6390, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = false, .supports_rssi_stats = true, .fw_wmi_diag_event = true, .current_cc_support = true, .dbr_debug_support = false, .global_reset = true, .bios_sar_capa = NULL, .m3_fw_support = true, .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = false, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = true, .supports_multi_bssid = true, .sram_dump = { .start = 0x01400000, .end = 0x0171ffff, }, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = true, .support_dual_stations = true, .pdev_suspend = false, }, { .name = "qcn9074 hw1.0", .hw_rev = ATH11K_HW_QCN9074_HW10, .fw = { .dir = "QCN9074/hw1.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 1, .single_pdev_only = false, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074, .hw_ops = &qcn9074_ops, .ring_mask = &ath11k_hw_ring_mask_qcn9074, .internal_sleep_clock = false, .regs = &qcn9074_regs, .host_ce_config = ath11k_host_ce_config_qcn9074, .ce_count = 6, .target_ce_config = ath11k_target_ce_config_wlan_qcn9074, .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074, .svc_to_ce_map_len = 18, .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .rxdma1_enable = true, .num_rxdma_per_pdev = 1, .rx_mac_buf_ring = false, .vdev_start_delay = false, .htt_peer_map_v2 = true, .spectral = { .fft_sz = 2, .fft_pad_sz = 0, .summary_pad_sz = 16, .fft_hdr_len = 24, .max_fft_bins = 1024, .fragment_160mhz = false, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT), .supports_monitor = true, .full_monitor_mode = true, .supports_shadow_regs = false, .idle_ps = false, .supports_sta_ps = false, .coldboot_cal_mm = false, .coldboot_cal_ftm = true, .cbcal_restart_fw = true, .fw_mem_mode = 2, .num_vdevs = 8, .num_peers = 128, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074), .supports_regdb = false, .fix_l1ss = true, .credit_flow = false, .max_tx_ring = DP_TCL_NUM_RING_MAX, .hal_params = &ath11k_hw_hal_params_ipq8074, .supports_dynamic_smps_6ghz = true, .alloc_cacheable_memory = true, .supports_rssi_stats = false, .fw_wmi_diag_event = false, .current_cc_support = false, .dbr_debug_support = true, .global_reset = false, .bios_sar_capa = NULL, .m3_fw_support = true, .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = true, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = false, .supports_multi_bssid = false, .sram_dump = {}, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = false, .support_dual_stations = false, .pdev_suspend = false, }, { .name = "wcn6855 hw2.0", .hw_rev = ATH11K_HW_WCN6855_HW20, .fw = { .dir = "WCN6855/hw2.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 3, .bdf_addr = 0x4B0C0000, .hw_ops = &wcn6855_ops, .ring_mask = &ath11k_hw_ring_mask_qca6390, .internal_sleep_clock = true, .regs = &wcn6855_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390, .host_ce_config = ath11k_host_ce_config_qca6390, .ce_count = 9, .target_ce_config = ath11k_target_ce_config_wlan_qca6390, .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = true, .rxdma1_enable = false, .num_rxdma_per_pdev = 2, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, .spectral = { .fft_sz = 0, .fft_pad_sz = 0, .summary_pad_sz = 0, .fft_hdr_len = 0, .max_fft_bins = 0, .fragment_160mhz = false, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), .supports_monitor = false, .full_monitor_mode = false, .supports_shadow_regs = true, .idle_ps = true, .supports_sta_ps = true, .coldboot_cal_mm = false, .coldboot_cal_ftm = false, .cbcal_restart_fw = false, .fw_mem_mode = 0, .num_vdevs = 2 + 1, .num_peers = 512, .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855), .supports_regdb = true, .fix_l1ss = false, .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, .hal_params = &ath11k_hw_hal_params_qca6390, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = false, .supports_rssi_stats = true, .fw_wmi_diag_event = true, .current_cc_support = true, .dbr_debug_support = false, .global_reset = true, .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855, .m3_fw_support = true, .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = false, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = true, .supports_multi_bssid = true, .sram_dump = { .start = 0x01400000, .end = 0x0177ffff, }, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = true, .support_dual_stations = true, .pdev_suspend = false, }, { .name = "wcn6855 hw2.1", .hw_rev = ATH11K_HW_WCN6855_HW21, .fw = { .dir = "WCN6855/hw2.1", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 3, .bdf_addr = 0x4B0C0000, .hw_ops = &wcn6855_ops, .ring_mask = &ath11k_hw_ring_mask_qca6390, .internal_sleep_clock = true, .regs = &wcn6855_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390, .host_ce_config = ath11k_host_ce_config_qca6390, .ce_count = 9, .target_ce_config = ath11k_target_ce_config_wlan_qca6390, .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, .single_pdev_only = true, .rxdma1_enable = false, .num_rxdma_per_pdev = 2, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, .spectral = { .fft_sz = 0, .fft_pad_sz = 0, .summary_pad_sz = 0, .fft_hdr_len = 0, .max_fft_bins = 0, .fragment_160mhz = false, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), .supports_monitor = false, .supports_shadow_regs = true, .idle_ps = true, .supports_sta_ps = true, .coldboot_cal_mm = false, .coldboot_cal_ftm = false, .cbcal_restart_fw = false, .fw_mem_mode = 0, .num_vdevs = 2 + 1, .num_peers = 512, .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855), .supports_regdb = true, .fix_l1ss = false, .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, .hal_params = &ath11k_hw_hal_params_qca6390, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = false, .supports_rssi_stats = true, .fw_wmi_diag_event = true, .current_cc_support = true, .dbr_debug_support = false, .global_reset = true, .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855, .m3_fw_support = true, .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = false, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = true, .supports_multi_bssid = true, .sram_dump = { .start = 0x01400000, .end = 0x0177ffff, }, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = true, .support_dual_stations = true, .pdev_suspend = false, }, { .name = "wcn6750 hw1.0", .hw_rev = ATH11K_HW_WCN6750_HW10, .fw = { .dir = "WCN6750/hw1.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 1, .bdf_addr = 0x4B0C0000, .hw_ops = &wcn6750_ops, .ring_mask = &ath11k_hw_ring_mask_wcn6750, .internal_sleep_clock = false, .regs = &wcn6750_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750, .host_ce_config = ath11k_host_ce_config_qca6390, .ce_count = 9, .target_ce_config = ath11k_target_ce_config_wlan_qca6390, .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = true, .rxdma1_enable = false, .num_rxdma_per_pdev = 1, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, .spectral = { .fft_sz = 0, .fft_pad_sz = 0, .summary_pad_sz = 0, .fft_hdr_len = 0, .max_fft_bins = 0, .fragment_160mhz = false, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), .supports_monitor = false, .supports_shadow_regs = true, .idle_ps = true, .supports_sta_ps = true, .coldboot_cal_mm = true, .coldboot_cal_ftm = true, .cbcal_restart_fw = false, .fw_mem_mode = 0, .num_vdevs = 3, .num_peers = 512, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074), .supports_regdb = true, .fix_l1ss = false, .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX, .hal_params = &ath11k_hw_hal_params_wcn6750, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = false, .supports_rssi_stats = true, .fw_wmi_diag_event = true, .current_cc_support = true, .dbr_debug_support = false, .global_reset = false, .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855, .m3_fw_support = false, .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = true, .hybrid_bus_type = true, .fixed_fw_mem = true, .support_off_channel_tx = true, .supports_multi_bssid = true, .sram_dump = {}, .tcl_ring_retry = false, .tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750, .smp2p_wow_exit = true, .support_fw_mac_sequence = true, .support_dual_stations = false, .pdev_suspend = true, }, { .hw_rev = ATH11K_HW_IPQ5018_HW10, .name = "ipq5018 hw1.0", .fw = { .dir = "IPQ5018/hw1.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = MAX_RADIOS_5018, .bdf_addr = 0x4BA00000, /* hal_desc_sz and hw ops are similar to qcn9074 */ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074), .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074, .ring_mask = &ath11k_hw_ring_mask_ipq8074, .credit_flow = false, .max_tx_ring = 1, .spectral = { .fft_sz = 2, .fft_pad_sz = 0, .summary_pad_sz = 16, .fft_hdr_len = 24, .max_fft_bins = 1024, }, .internal_sleep_clock = false, .regs = &ipq5018_regs, .hw_ops = &ipq5018_ops, .host_ce_config = ath11k_host_ce_config_qcn9074, .ce_count = CE_CNT_5018, .target_ce_config = ath11k_target_ce_config_wlan_ipq5018, .target_ce_count = TARGET_CE_CNT_5018, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq5018, .svc_to_ce_map_len = SVC_CE_MAP_LEN_5018, .ce_ie_addr = &ath11k_ce_ie_addr_ipq5018, .ce_remap = &ath11k_ce_remap_ipq5018, .rxdma1_enable = true, .num_rxdma_per_pdev = RXDMA_PER_PDEV_5018, .rx_mac_buf_ring = false, .vdev_start_delay = false, .htt_peer_map_v2 = true, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT), .supports_monitor = false, .supports_sta_ps = false, .supports_shadow_regs = false, .fw_mem_mode = 0, .num_vdevs = 16 + 1, .num_peers = 512, .supports_regdb = false, .idle_ps = false, .supports_suspend = false, .hal_params = &ath11k_hw_hal_params_ipq8074, .single_pdev_only = false, .coldboot_cal_mm = true, .coldboot_cal_ftm = true, .cbcal_restart_fw = true, .fix_l1ss = true, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = true, .supports_rssi_stats = false, .fw_wmi_diag_event = false, .current_cc_support = false, .dbr_debug_support = true, .global_reset = false, .bios_sar_capa = NULL, .m3_fw_support = false, .fixed_bdf_addr = true, .fixed_mem_region = true, .static_window_map = false, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = false, .supports_multi_bssid = false, .sram_dump = {}, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = false, .support_dual_stations = false, .pdev_suspend = false, }, { .name = "qca2066 hw2.1", .hw_rev = ATH11K_HW_QCA2066_HW21, .fw = { .dir = "QCA2066/hw2.1", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 3, .bdf_addr = 0x4B0C0000, .hw_ops = &wcn6855_ops, .ring_mask = &ath11k_hw_ring_mask_qca6390, .internal_sleep_clock = true, .regs = &wcn6855_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390, .host_ce_config = ath11k_host_ce_config_qca6390, .ce_count = 9, .target_ce_config = ath11k_target_ce_config_wlan_qca6390, .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = true, .rxdma1_enable = false, .num_rxdma_per_pdev = 2, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, .spectral = { .fft_sz = 0, .fft_pad_sz = 0, .summary_pad_sz = 0, .fft_hdr_len = 0, .max_fft_bins = 0, .fragment_160mhz = false, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), .supports_monitor = false, .full_monitor_mode = false, .supports_shadow_regs = true, .idle_ps = true, .supports_sta_ps = true, .coldboot_cal_mm = false, .coldboot_cal_ftm = false, .cbcal_restart_fw = false, .fw_mem_mode = 0, .num_vdevs = 2 + 1, .num_peers = 512, .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855), .supports_regdb = true, .fix_l1ss = false, .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, .hal_params = &ath11k_hw_hal_params_qca6390, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = false, .supports_rssi_stats = true, .fw_wmi_diag_event = true, .current_cc_support = true, .dbr_debug_support = false, .global_reset = true, .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855, .m3_fw_support = true, .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = false, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = true, .supports_multi_bssid = true, .sram_dump = { .start = 0x01400000, .end = 0x0177ffff, }, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = true, .support_dual_stations = true, }, { .name = "qca6698aq hw2.1", .hw_rev = ATH11K_HW_QCA6698AQ_HW21, .fw = { .dir = "QCA6698AQ/hw2.1", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 3, .bdf_addr = 0x4B0C0000, .hw_ops = &wcn6855_ops, .ring_mask = &ath11k_hw_ring_mask_qca6390, .internal_sleep_clock = true, .regs = &wcn6855_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390, .host_ce_config = ath11k_host_ce_config_qca6390, .ce_count = 9, .target_ce_config = ath11k_target_ce_config_wlan_qca6390, .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, .single_pdev_only = true, .rxdma1_enable = false, .num_rxdma_per_pdev = 2, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, .spectral = { .fft_sz = 0, .fft_pad_sz = 0, .summary_pad_sz = 0, .fft_hdr_len = 0, .max_fft_bins = 0, .fragment_160mhz = false, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), .supports_monitor = false, .supports_shadow_regs = true, .idle_ps = true, .supports_sta_ps = true, .coldboot_cal_mm = false, .coldboot_cal_ftm = false, .cbcal_restart_fw = false, .fw_mem_mode = 0, .num_vdevs = 2 + 1, .num_peers = 512, .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855), .supports_regdb = true, .fix_l1ss = false, .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, .hal_params = &ath11k_hw_hal_params_qca6390, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = false, .supports_rssi_stats = true, .fw_wmi_diag_event = true, .current_cc_support = true, .dbr_debug_support = false, .global_reset = true, .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855, .m3_fw_support = true, .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = false, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = true, .supports_multi_bssid = true, .sram_dump = { .start = 0x01400000, .end = 0x0177ffff, }, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = true, .support_dual_stations = true, .pdev_suspend = false, }, }; static const struct dmi_system_id ath11k_pm_quirk_table[] = { { .driver_data = (void *)ATH11K_PM_WOW, - .matches = { + .matches = { /* X13 G4 AMD #1 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21J3"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { /* X13 G4 AMD #2 */ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21J4"), }, }, { .driver_data = (void *)ATH11K_PM_WOW, - .matches = { + .matches = { /* T14 G4 AMD #1 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21K3"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { /* T14 G4 AMD #2 */ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21K4"), }, }, { .driver_data = (void *)ATH11K_PM_WOW, - .matches = { + .matches = { /* P14s G4 AMD #1 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21K5"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { /* P14s G4 AMD #2 */ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21K6"), }, }, { .driver_data = (void *)ATH11K_PM_WOW, - .matches = { + .matches = { /* T16 G2 AMD #1 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21K7"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { /* T16 G2 AMD #2 */ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21K8"), }, }, { .driver_data = (void *)ATH11K_PM_WOW, - .matches = { + .matches = { /* P16s G2 AMD #1 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21K9"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { /* P16s G2 AMD #2 */ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21KA"), }, }, { .driver_data = (void *)ATH11K_PM_WOW, - .matches = { + .matches = { /* T14s G4 AMD #1 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21F8"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { /* T14s G4 AMD #2 */ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21F9"), }, }, {} }; void ath11k_fw_stats_pdevs_free(struct list_head *head) { struct ath11k_fw_stats_pdev *i, *tmp; list_for_each_entry_safe(i, tmp, head, list) { list_del(&i->list); kfree(i); } } void ath11k_fw_stats_vdevs_free(struct list_head *head) { struct ath11k_fw_stats_vdev *i, *tmp; list_for_each_entry_safe(i, tmp, head, list) { list_del(&i->list); kfree(i); } } void ath11k_fw_stats_bcn_free(struct list_head *head) { struct ath11k_fw_stats_bcn *i, *tmp; list_for_each_entry_safe(i, tmp, head, list) { list_del(&i->list); kfree(i); } } void ath11k_fw_stats_init(struct ath11k *ar) { INIT_LIST_HEAD(&ar->fw_stats.pdevs); INIT_LIST_HEAD(&ar->fw_stats.vdevs); INIT_LIST_HEAD(&ar->fw_stats.bcn); init_completion(&ar->fw_stats_complete); init_completion(&ar->fw_stats_done); } void ath11k_fw_stats_free(struct ath11k_fw_stats *stats) { ath11k_fw_stats_pdevs_free(&stats->pdevs); ath11k_fw_stats_vdevs_free(&stats->vdevs); ath11k_fw_stats_bcn_free(&stats->bcn); } bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab) { if (!ath11k_cold_boot_cal) return false; if (ath11k_ftm_mode) return ab->hw_params.coldboot_cal_ftm; else return ab->hw_params.coldboot_cal_mm; } /* Check if we need to continue with suspend/resume operation. * Return: * a negative value: error happens and don't continue. * 0: no error but don't continue. * positive value: no error and do continue. */ static int ath11k_core_continue_suspend_resume(struct ath11k_base *ab) { struct ath11k *ar; if (!ab->hw_params.supports_suspend) return -EOPNOTSUPP; /* so far single_pdev_only chips have supports_suspend as true * so pass 0 as a dummy pdev_id here. */ ar = ab->pdevs[0].ar; if (!ar || ar->state != ATH11K_STATE_OFF) return 0; return 1; } static int ath11k_core_suspend_wow(struct ath11k_base *ab) { int ret; ret = ath11k_dp_rx_pktlog_stop(ab, true); if (ret) { ath11k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n", ret); return ret; } /* So far only single_pdev_only devices can reach here, * so it is valid to handle the first, and the only, pdev. */ ret = ath11k_mac_wait_tx_complete(ab->pdevs[0].ar); if (ret) { ath11k_warn(ab, "failed to wait tx complete: %d\n", ret); return ret; } ret = ath11k_wow_enable(ab); if (ret) { ath11k_warn(ab, "failed to enable wow during suspend: %d\n", ret); return ret; } ret = ath11k_dp_rx_pktlog_stop(ab, false); if (ret) { ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n", ret); return ret; } ath11k_ce_stop_shadow_timers(ab); ath11k_dp_stop_shadow_timers(ab); ath11k_hif_irq_disable(ab); ath11k_hif_ce_irq_disable(ab); ret = ath11k_hif_suspend(ab); if (ret) { ath11k_warn(ab, "failed to suspend hif: %d\n", ret); return ret; } return 0; } static int ath11k_core_suspend_default(struct ath11k_base *ab) { int ret; ret = ath11k_dp_rx_pktlog_stop(ab, true); if (ret) { ath11k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n", ret); return ret; } /* So far only single_pdev_only devices can reach here, * so it is valid to handle the first, and the only, pdev. */ ret = ath11k_mac_wait_tx_complete(ab->pdevs[0].ar); if (ret) { ath11k_warn(ab, "failed to wait tx complete: %d\n", ret); return ret; } ret = ath11k_dp_rx_pktlog_stop(ab, false); if (ret) { ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n", ret); return ret; } ath11k_ce_stop_shadow_timers(ab); ath11k_dp_stop_shadow_timers(ab); /* PM framework skips suspend_late/resume_early callbacks * if other devices report errors in their suspend callbacks. * However ath11k_core_resume() would still be called because * here we return success thus kernel put us on dpm_suspended_list. * Since we won't go through a power down/up cycle, there is * no chance to call complete(&ab->restart_completed) in * ath11k_core_restart(), making ath11k_core_resume() timeout. * So call it here to avoid this issue. This also works in case * no error happens thus suspend_late/resume_early get called, * because it will be reinitialized in ath11k_core_resume_early(). */ complete(&ab->restart_completed); return 0; } int ath11k_core_suspend(struct ath11k_base *ab) { int ret; ret = ath11k_core_continue_suspend_resume(ab); if (ret <= 0) return ret; if (ab->actual_pm_policy == ATH11K_PM_WOW) return ath11k_core_suspend_wow(ab); return ath11k_core_suspend_default(ab); } EXPORT_SYMBOL(ath11k_core_suspend); int ath11k_core_suspend_late(struct ath11k_base *ab) { int ret; ret = ath11k_core_continue_suspend_resume(ab); if (ret <= 0) return ret; if (ab->actual_pm_policy == ATH11K_PM_WOW) return 0; ath11k_hif_irq_disable(ab); ath11k_hif_ce_irq_disable(ab); ath11k_hif_power_down(ab, true); return 0; } EXPORT_SYMBOL(ath11k_core_suspend_late); int ath11k_core_resume_early(struct ath11k_base *ab) { int ret; ret = ath11k_core_continue_suspend_resume(ab); if (ret <= 0) return ret; if (ab->actual_pm_policy == ATH11K_PM_WOW) return 0; reinit_completion(&ab->restart_completed); ret = ath11k_hif_power_up(ab); if (ret) ath11k_warn(ab, "failed to power up hif during resume: %d\n", ret); return ret; } EXPORT_SYMBOL(ath11k_core_resume_early); static int ath11k_core_resume_default(struct ath11k_base *ab) { struct ath11k *ar; long time_left; int ret; time_left = wait_for_completion_timeout(&ab->restart_completed, ATH11K_RESET_TIMEOUT_HZ); if (time_left == 0) { ath11k_warn(ab, "timeout while waiting for restart complete"); return -ETIMEDOUT; } /* So far only single_pdev_only devices can reach here, * so it is valid to handle the first, and the only, pdev. */ ar = ab->pdevs[0].ar; if (ab->hw_params.current_cc_support && ar->alpha2[0] != 0 && ar->alpha2[1] != 0) { ret = ath11k_reg_set_cc(ar); if (ret) { ath11k_warn(ab, "failed to set country code during resume: %d\n", ret); return ret; } } ret = ath11k_dp_rx_pktlog_start(ab); if (ret) ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n", ret); return ret; } static int ath11k_core_resume_wow(struct ath11k_base *ab) { int ret; ret = ath11k_hif_resume(ab); if (ret) { ath11k_warn(ab, "failed to resume hif during resume: %d\n", ret); return ret; } ath11k_hif_ce_irq_enable(ab); ath11k_hif_irq_enable(ab); ret = ath11k_dp_rx_pktlog_start(ab); if (ret) { ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n", ret); return ret; } ret = ath11k_wow_wakeup(ab); if (ret) { ath11k_warn(ab, "failed to wakeup wow during resume: %d\n", ret); return ret; } return 0; } int ath11k_core_resume(struct ath11k_base *ab) { int ret; ret = ath11k_core_continue_suspend_resume(ab); if (ret <= 0) return ret; if (ab->actual_pm_policy == ATH11K_PM_WOW) return ath11k_core_resume_wow(ab); return ath11k_core_resume_default(ab); } EXPORT_SYMBOL(ath11k_core_resume); static void ath11k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data) { struct ath11k_base *ab = data; const char *magic = ATH11K_SMBIOS_BDF_EXT_MAGIC; struct ath11k_smbios_bdf *smbios = (struct ath11k_smbios_bdf *)hdr; ssize_t copied; size_t len; int i; if (ab->qmi.target.bdf_ext[0] != '\0') return; if (hdr->type != ATH11K_SMBIOS_BDF_EXT_TYPE) return; if (hdr->length != ATH11K_SMBIOS_BDF_EXT_LENGTH) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "wrong smbios bdf ext type length (%d).\n", hdr->length); return; } spin_lock_bh(&ab->base_lock); switch (smbios->country_code_flag) { case ATH11K_SMBIOS_CC_ISO: ab->new_alpha2[0] = (smbios->cc_code >> 8) & 0xff; ab->new_alpha2[1] = smbios->cc_code & 0xff; ath11k_dbg(ab, ATH11K_DBG_BOOT, "smbios cc_code %c%c\n", ab->new_alpha2[0], ab->new_alpha2[1]); break; case ATH11K_SMBIOS_CC_WW: ab->new_alpha2[0] = '0'; ab->new_alpha2[1] = '0'; ath11k_dbg(ab, ATH11K_DBG_BOOT, "smbios worldwide regdomain\n"); break; default: ath11k_dbg(ab, ATH11K_DBG_BOOT, "ignore smbios country code setting %d\n", smbios->country_code_flag); break; } spin_unlock_bh(&ab->base_lock); if (!smbios->bdf_enabled) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant name not found.\n"); return; } /* Only one string exists (per spec) */ if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant magic does not match.\n"); return; } len = min_t(size_t, strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext)); for (i = 0; i < len; i++) { if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant name contains non ascii chars.\n"); return; } } /* Copy extension name without magic prefix */ copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic), sizeof(ab->qmi.target.bdf_ext)); if (copied < 0) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant string is longer than the buffer can accommodate\n"); return; } ath11k_dbg(ab, ATH11K_DBG_BOOT, "found and validated bdf variant smbios_type 0x%x bdf %s\n", ATH11K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext); } int ath11k_core_check_smbios(struct ath11k_base *ab) { ab->qmi.target.bdf_ext[0] = '\0'; dmi_walk(ath11k_core_check_cc_code_bdfext, ab); if (ab->qmi.target.bdf_ext[0] == '\0') return -ENODATA; return 0; } int ath11k_core_check_dt(struct ath11k_base *ab) { size_t max_len = sizeof(ab->qmi.target.bdf_ext); const char *variant = NULL; struct device_node *node; node = ab->dev->of_node; if (!node) return -ENOENT; of_property_read_string(node, "qcom,calibration-variant", &variant); if (!variant) of_property_read_string(node, "qcom,ath11k-calibration-variant", &variant); if (!variant) return -ENODATA; if (strscpy(ab->qmi.target.bdf_ext, variant, max_len) < 0) ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant string is longer than the buffer can accommodate (variant: %s)\n", variant); return 0; } enum ath11k_bdf_name_type { ATH11K_BDF_NAME_FULL, ATH11K_BDF_NAME_BUS_NAME, ATH11K_BDF_NAME_CHIP_ID, }; static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name, size_t name_len, bool with_variant, enum ath11k_bdf_name_type name_type) { /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */ char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = {}; if (with_variant && ab->qmi.target.bdf_ext[0] != '\0') scnprintf(variant, sizeof(variant), ",variant=%s", ab->qmi.target.bdf_ext); switch (ab->id.bdf_search) { case ATH11K_BDF_SEARCH_BUS_AND_BOARD: switch (name_type) { case ATH11K_BDF_NAME_FULL: scnprintf(name, name_len, "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s", ath11k_bus_str(ab->hif.bus), ab->id.vendor, ab->id.device, ab->id.subsystem_vendor, ab->id.subsystem_device, ab->qmi.target.chip_id, ab->qmi.target.board_id, variant); break; case ATH11K_BDF_NAME_BUS_NAME: scnprintf(name, name_len, "bus=%s", ath11k_bus_str(ab->hif.bus)); break; case ATH11K_BDF_NAME_CHIP_ID: scnprintf(name, name_len, "bus=%s,qmi-chip-id=%d", ath11k_bus_str(ab->hif.bus), ab->qmi.target.chip_id); break; } break; default: scnprintf(name, name_len, "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s", ath11k_bus_str(ab->hif.bus), ab->qmi.target.chip_id, ab->qmi.target.board_id, variant); break; } ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board name '%s'\n", name); return 0; } static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name, size_t name_len) { return __ath11k_core_create_board_name(ab, name, name_len, true, ATH11K_BDF_NAME_FULL); } static int ath11k_core_create_fallback_board_name(struct ath11k_base *ab, char *name, size_t name_len) { return __ath11k_core_create_board_name(ab, name, name_len, false, ATH11K_BDF_NAME_FULL); } static int ath11k_core_create_bus_type_board_name(struct ath11k_base *ab, char *name, size_t name_len) { return __ath11k_core_create_board_name(ab, name, name_len, false, ATH11K_BDF_NAME_BUS_NAME); } static int ath11k_core_create_chip_id_board_name(struct ath11k_base *ab, char *name, size_t name_len) { return __ath11k_core_create_board_name(ab, name, name_len, false, ATH11K_BDF_NAME_CHIP_ID); } const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab, const char *file) { const struct firmware *fw; char path[100]; int ret; if (file == NULL) return ERR_PTR(-ENOENT); ath11k_core_create_firmware_path(ab, file, path, sizeof(path)); ret = firmware_request_nowarn(&fw, path, ab->dev); if (ret) return ERR_PTR(ret); ath11k_dbg(ab, ATH11K_DBG_BOOT, "firmware request %s size %zu\n", path, fw->size); return fw; } void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) { if (!IS_ERR(bd->fw)) release_firmware(bd->fw); memset(bd, 0, sizeof(*bd)); } static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab, struct ath11k_board_data *bd, const void *buf, size_t buf_len, const char *boardname, int ie_id, int name_id, int data_id) { const struct ath11k_fw_ie *hdr; bool name_match_found; int ret, board_ie_id; size_t board_ie_len; const void *board_ie_data; name_match_found = false; /* go through ATH11K_BD_IE_BOARD_/ATH11K_BD_IE_REGDB_ elements */ while (buf_len > sizeof(struct ath11k_fw_ie)) { hdr = buf; board_ie_id = le32_to_cpu(hdr->id); board_ie_len = le32_to_cpu(hdr->len); board_ie_data = hdr->data; buf_len -= sizeof(*hdr); buf += sizeof(*hdr); if (buf_len < ALIGN(board_ie_len, 4)) { ath11k_err(ab, "invalid %s length: %zu < %zu\n", ath11k_bd_ie_type_str(ie_id), buf_len, ALIGN(board_ie_len, 4)); ret = -EINVAL; goto out; } if (board_ie_id == name_id) { ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "board name", "", board_ie_data, board_ie_len); if (board_ie_len != strlen(boardname)) goto next; ret = memcmp(board_ie_data, boardname, strlen(boardname)); if (ret) goto next; name_match_found = true; ath11k_dbg(ab, ATH11K_DBG_BOOT, "found match %s for name '%s'", ath11k_bd_ie_type_str(ie_id), boardname); } else if (board_ie_id == data_id) { if (!name_match_found) /* no match found */ goto next; ath11k_dbg(ab, ATH11K_DBG_BOOT, "found %s for '%s'", ath11k_bd_ie_type_str(ie_id), boardname); bd->data = board_ie_data; bd->len = board_ie_len; ret = 0; goto out; } else { ath11k_warn(ab, "unknown %s id found: %d\n", ath11k_bd_ie_type_str(ie_id), board_ie_id); } next: /* jump over the padding */ board_ie_len = ALIGN(board_ie_len, 4); buf_len -= board_ie_len; buf += board_ie_len; } /* no match found */ ret = -ENOENT; out: return ret; } static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab, struct ath11k_board_data *bd, const char *boardname, int ie_id_match, int name_id, int data_id) { size_t len, magic_len; const u8 *data; char *filename, filepath[100]; size_t ie_len; struct ath11k_fw_ie *hdr; int ret, ie_id; filename = ATH11K_BOARD_API2_FILE; if (!bd->fw) bd->fw = ath11k_core_firmware_request(ab, filename); if (IS_ERR(bd->fw)) return PTR_ERR(bd->fw); data = bd->fw->data; len = bd->fw->size; ath11k_core_create_firmware_path(ab, filename, filepath, sizeof(filepath)); /* magic has extra null byte padded */ magic_len = strlen(ATH11K_BOARD_MAGIC) + 1; if (len < magic_len) { ath11k_err(ab, "failed to find magic value in %s, file too short: %zu\n", filepath, len); ret = -EINVAL; goto err; } if (memcmp(data, ATH11K_BOARD_MAGIC, magic_len)) { ath11k_err(ab, "found invalid board magic\n"); ret = -EINVAL; goto err; } /* magic is padded to 4 bytes */ magic_len = ALIGN(magic_len, 4); if (len < magic_len) { ath11k_err(ab, "failed: %s too small to contain board data, len: %zu\n", filepath, len); ret = -EINVAL; goto err; } data += magic_len; len -= magic_len; while (len > sizeof(struct ath11k_fw_ie)) { hdr = (struct ath11k_fw_ie *)data; ie_id = le32_to_cpu(hdr->id); ie_len = le32_to_cpu(hdr->len); len -= sizeof(*hdr); data = hdr->data; if (len < ALIGN(ie_len, 4)) { ath11k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n", ie_id, ie_len, len); ret = -EINVAL; goto err; } if (ie_id == ie_id_match) { ret = ath11k_core_parse_bd_ie_board(ab, bd, data, ie_len, boardname, ie_id_match, name_id, data_id); if (ret == -ENOENT) /* no match found, continue */ goto next; else if (ret) /* there was an error, bail out */ goto err; /* either found or error, so stop searching */ goto out; } next: /* jump over the padding */ ie_len = ALIGN(ie_len, 4); len -= ie_len; data += ie_len; } out: if (!bd->data || !bd->len) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch %s for %s from %s\n", ath11k_bd_ie_type_str(ie_id_match), boardname, filepath); ret = -ENODATA; goto err; } return 0; err: ath11k_core_free_bdf(ab, bd); return ret; } int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab, struct ath11k_board_data *bd, const char *name) { bd->fw = ath11k_core_firmware_request(ab, name); if (IS_ERR(bd->fw)) return PTR_ERR(bd->fw); bd->data = bd->fw->data; bd->len = bd->fw->size; return 0; } #define BOARD_NAME_SIZE 200 int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) { char *boardname = NULL, *fallback_boardname = NULL, *chip_id_boardname = NULL; char *filename, filepath[100]; int bd_api; int ret = 0; filename = ATH11K_BOARD_API2_FILE; boardname = kzalloc(BOARD_NAME_SIZE, GFP_KERNEL); if (!boardname) { ret = -ENOMEM; goto exit; } ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); if (ret) { ath11k_err(ab, "failed to create board name: %d", ret); goto exit; } bd_api = 2; ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname, ATH11K_BD_IE_BOARD, ATH11K_BD_IE_BOARD_NAME, ATH11K_BD_IE_BOARD_DATA); if (!ret) goto exit; fallback_boardname = kzalloc(BOARD_NAME_SIZE, GFP_KERNEL); if (!fallback_boardname) { ret = -ENOMEM; goto exit; } ret = ath11k_core_create_fallback_board_name(ab, fallback_boardname, BOARD_NAME_SIZE); if (ret) { ath11k_err(ab, "failed to create fallback board name: %d", ret); goto exit; } ret = ath11k_core_fetch_board_data_api_n(ab, bd, fallback_boardname, ATH11K_BD_IE_BOARD, ATH11K_BD_IE_BOARD_NAME, ATH11K_BD_IE_BOARD_DATA); if (!ret) goto exit; chip_id_boardname = kzalloc(BOARD_NAME_SIZE, GFP_KERNEL); if (!chip_id_boardname) { ret = -ENOMEM; goto exit; } ret = ath11k_core_create_chip_id_board_name(ab, chip_id_boardname, BOARD_NAME_SIZE); if (ret) { ath11k_err(ab, "failed to create chip id board name: %d", ret); goto exit; } ret = ath11k_core_fetch_board_data_api_n(ab, bd, chip_id_boardname, ATH11K_BD_IE_BOARD, ATH11K_BD_IE_BOARD_NAME, ATH11K_BD_IE_BOARD_DATA); if (!ret) goto exit; bd_api = 1; ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_DEFAULT_BOARD_FILE); if (ret) { ath11k_core_create_firmware_path(ab, filename, filepath, sizeof(filepath)); ath11k_err(ab, "failed to fetch board data for %s from %s\n", boardname, filepath); if (memcmp(boardname, fallback_boardname, strlen(boardname))) ath11k_err(ab, "failed to fetch board data for %s from %s\n", fallback_boardname, filepath); ath11k_err(ab, "failed to fetch board data for %s from %s\n", chip_id_boardname, filepath); ath11k_err(ab, "failed to fetch board.bin from %s\n", ab->hw_params.fw.dir); } exit: kfree(boardname); kfree(fallback_boardname); kfree(chip_id_boardname); if (!ret) ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board api %d\n", bd_api); return ret; } int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd) { char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE]; int ret; ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); if (ret) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to create board name for regdb: %d", ret); goto exit; } ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname, ATH11K_BD_IE_REGDB, ATH11K_BD_IE_REGDB_NAME, ATH11K_BD_IE_REGDB_DATA); if (!ret) goto exit; ret = ath11k_core_create_bus_type_board_name(ab, default_boardname, BOARD_NAME_SIZE); if (ret) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to create default board name for regdb: %d", ret); goto exit; } ret = ath11k_core_fetch_board_data_api_n(ab, bd, default_boardname, ATH11K_BD_IE_REGDB, ATH11K_BD_IE_REGDB_NAME, ATH11K_BD_IE_REGDB_DATA); if (!ret) goto exit; ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_REGDB_FILE_NAME); if (ret) ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch %s from %s\n", ATH11K_REGDB_FILE_NAME, ab->hw_params.fw.dir); exit: if (!ret) ath11k_dbg(ab, ATH11K_DBG_BOOT, "fetched regdb\n"); return ret; } static void ath11k_core_stop(struct ath11k_base *ab) { if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) ath11k_qmi_firmware_stop(ab); ath11k_hif_stop(ab); ath11k_wmi_detach(ab); ath11k_dp_pdev_reo_cleanup(ab); /* De-Init of components as needed */ } static int ath11k_core_soc_create(struct ath11k_base *ab) { int ret; if (ath11k_ftm_mode) { ab->fw_mode = ATH11K_FIRMWARE_MODE_FTM; ath11k_info(ab, "Booting in factory test mode\n"); } ret = ath11k_qmi_init_service(ab); if (ret) { ath11k_err(ab, "failed to initialize qmi :%d\n", ret); return ret; } ret = ath11k_debugfs_soc_create(ab); if (ret) { ath11k_err(ab, "failed to create ath11k debugfs\n"); goto err_qmi_deinit; } ret = ath11k_hif_power_up(ab); if (ret) { ath11k_err(ab, "failed to power up :%d\n", ret); goto err_debugfs_reg; } return 0; err_debugfs_reg: ath11k_debugfs_soc_destroy(ab); err_qmi_deinit: ath11k_qmi_deinit_service(ab); return ret; } static void ath11k_core_soc_destroy(struct ath11k_base *ab) { ath11k_debugfs_soc_destroy(ab); ath11k_dp_free(ab); ath11k_reg_free(ab); ath11k_qmi_deinit_service(ab); } static int ath11k_core_pdev_create(struct ath11k_base *ab) { int ret; ret = ath11k_debugfs_pdev_create(ab); if (ret) { ath11k_err(ab, "failed to create core pdev debugfs: %d\n", ret); return ret; } ret = ath11k_dp_pdev_alloc(ab); if (ret) { ath11k_err(ab, "failed to attach DP pdev: %d\n", ret); goto err_pdev_debug; } ret = ath11k_mac_register(ab); if (ret) { ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret); goto err_dp_pdev_free; } ret = ath11k_thermal_register(ab); if (ret) { ath11k_err(ab, "could not register thermal device: %d\n", ret); goto err_mac_unregister; } ret = ath11k_spectral_init(ab); if (ret) { ath11k_err(ab, "failed to init spectral %d\n", ret); goto err_thermal_unregister; } return 0; err_thermal_unregister: ath11k_thermal_unregister(ab); err_mac_unregister: ath11k_mac_unregister(ab); err_dp_pdev_free: ath11k_dp_pdev_free(ab); err_pdev_debug: ath11k_debugfs_pdev_destroy(ab); return ret; } static void ath11k_core_pdev_suspend_target(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; unsigned long time_left; int ret; int i; if (!ab->hw_params.pdev_suspend) return; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; reinit_completion(&ab->htc_suspend); ret = ath11k_wmi_pdev_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR, pdev->pdev_id); if (ret) { ath11k_warn(ab, "could not suspend target :%d\n", ret); /* pointless to try other pdevs */ return; } time_left = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ); if (!time_left) { ath11k_warn(ab, "suspend timed out - target pause event never came\n"); /* pointless to try other pdevs */ return; } } } static void ath11k_core_pdev_destroy(struct ath11k_base *ab) { ath11k_spectral_deinit(ab); ath11k_thermal_unregister(ab); ath11k_mac_unregister(ab); ath11k_core_pdev_suspend_target(ab); ath11k_hif_irq_disable(ab); ath11k_dp_pdev_free(ab); ath11k_debugfs_pdev_destroy(ab); } static int ath11k_core_start(struct ath11k_base *ab) { int ret; ret = ath11k_wmi_attach(ab); if (ret) { ath11k_err(ab, "failed to attach wmi: %d\n", ret); return ret; } ret = ath11k_htc_init(ab); if (ret) { ath11k_err(ab, "failed to init htc: %d\n", ret); goto err_wmi_detach; } ret = ath11k_hif_start(ab); if (ret) { ath11k_err(ab, "failed to start HIF: %d\n", ret); goto err_wmi_detach; } ret = ath11k_htc_wait_target(&ab->htc); if (ret) { ath11k_err(ab, "failed to connect to HTC: %d\n", ret); goto err_hif_stop; } ret = ath11k_dp_htt_connect(&ab->dp); if (ret) { ath11k_err(ab, "failed to connect to HTT: %d\n", ret); goto err_hif_stop; } ret = ath11k_wmi_connect(ab); if (ret) { ath11k_err(ab, "failed to connect wmi: %d\n", ret); goto err_hif_stop; } ret = ath11k_htc_start(&ab->htc); if (ret) { ath11k_err(ab, "failed to start HTC: %d\n", ret); goto err_hif_stop; } ret = ath11k_wmi_wait_for_service_ready(ab); if (ret) { ath11k_err(ab, "failed to receive wmi service ready event: %d\n", ret); goto err_hif_stop; } ret = ath11k_mac_allocate(ab); if (ret) { ath11k_err(ab, "failed to create new hw device with mac80211 :%d\n", ret); goto err_hif_stop; } ath11k_dp_pdev_pre_alloc(ab); ret = ath11k_dp_pdev_reo_setup(ab); if (ret) { ath11k_err(ab, "failed to initialize reo destination rings: %d\n", ret); goto err_mac_destroy; } ret = ath11k_wmi_cmd_init(ab); if (ret) { ath11k_err(ab, "failed to send wmi init cmd: %d\n", ret); goto err_reo_cleanup; } ret = ath11k_wmi_wait_for_unified_ready(ab); if (ret) { ath11k_err(ab, "failed to receive wmi unified ready event: %d\n", ret); goto err_reo_cleanup; } /* put hardware to DBS mode */ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxdma_per_pdev > 1) { ret = ath11k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS); if (ret) { ath11k_err(ab, "failed to send dbs mode: %d\n", ret); goto err_hif_stop; } } ret = ath11k_dp_tx_htt_h2t_ver_req_msg(ab); if (ret) { ath11k_err(ab, "failed to send htt version request message: %d\n", ret); goto err_reo_cleanup; } return 0; err_reo_cleanup: ath11k_dp_pdev_reo_cleanup(ab); err_mac_destroy: ath11k_mac_destroy(ab); err_hif_stop: ath11k_hif_stop(ab); err_wmi_detach: ath11k_wmi_detach(ab); return ret; } static int ath11k_core_start_firmware(struct ath11k_base *ab, enum ath11k_firmware_mode mode) { int ret; ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2, &ab->qmi.ce_cfg.shadow_reg_v2_len); ret = ath11k_qmi_firmware_start(ab, mode); if (ret) { ath11k_err(ab, "failed to send firmware start: %d\n", ret); return ret; } return ret; } int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab) { int ret; switch (ath11k_crypto_mode) { case ATH11K_CRYPT_MODE_SW: set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags); set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags); break; case ATH11K_CRYPT_MODE_HW: clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags); clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags); break; default: ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode); return -EINVAL; } ret = ath11k_core_start_firmware(ab, ab->fw_mode); if (ret) { ath11k_err(ab, "failed to start firmware: %d\n", ret); return ret; } ret = ath11k_ce_init_pipes(ab); if (ret) { ath11k_err(ab, "failed to initialize CE: %d\n", ret); goto err_firmware_stop; } ret = ath11k_dp_alloc(ab); if (ret) { ath11k_err(ab, "failed to init DP: %d\n", ret); goto err_firmware_stop; } if (ath11k_frame_mode == ATH11K_HW_TXRX_RAW) set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags); mutex_lock(&ab->core_lock); ret = ath11k_core_start(ab); if (ret) { ath11k_err(ab, "failed to start core: %d\n", ret); goto err_dp_free; } ret = ath11k_core_pdev_create(ab); if (ret) { ath11k_err(ab, "failed to create pdev core: %d\n", ret); goto err_core_stop; } ath11k_hif_irq_enable(ab); mutex_unlock(&ab->core_lock); return 0; err_core_stop: ath11k_core_stop(ab); ath11k_mac_destroy(ab); err_dp_free: ath11k_dp_free(ab); mutex_unlock(&ab->core_lock); err_firmware_stop: ath11k_qmi_firmware_stop(ab); return ret; } static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab) { int ret; mutex_lock(&ab->core_lock); ath11k_thermal_unregister(ab); ath11k_dp_pdev_free(ab); ath11k_spectral_deinit(ab); ath11k_ce_cleanup_pipes(ab); ath11k_wmi_detach(ab); ath11k_dp_pdev_reo_cleanup(ab); mutex_unlock(&ab->core_lock); ath11k_dp_free(ab); - ath11k_hal_srng_deinit(ab); + ath11k_hal_srng_clear(ab); ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1; - ret = ath11k_hal_srng_init(ab); - if (ret) - return ret; - clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags); ret = ath11k_core_qmi_firmware_ready(ab); if (ret) goto err_hal_srng_deinit; clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags); return 0; err_hal_srng_deinit: ath11k_hal_srng_deinit(ab); return ret; } void ath11k_core_halt(struct ath11k *ar) { struct ath11k_base *ab = ar->ab; struct list_head *pos, *n; lockdep_assert_held(&ar->conf_mutex); ar->num_created_vdevs = 0; ar->allocated_vdev_map = 0; ath11k_mac_scan_finish(ar); ath11k_mac_peer_cleanup_all(ar); cancel_delayed_work_sync(&ar->scan.timeout); cancel_work_sync(&ar->channel_update_work); cancel_work_sync(&ar->regd_update_work); cancel_work_sync(&ab->update_11d_work); rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL); synchronize_rcu(); spin_lock_bh(&ar->data_lock); list_for_each_safe(pos, n, &ar->arvifs) list_del_init(pos); spin_unlock_bh(&ar->data_lock); idr_init(&ar->txmgmt_idr); } static void ath11k_update_11d(struct work_struct *work) { struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work); struct ath11k *ar; struct ath11k_pdev *pdev; int ret, i; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; spin_lock_bh(&ab->base_lock); memcpy(&ar->alpha2, &ab->new_alpha2, 2); spin_unlock_bh(&ab->base_lock); ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c for pdev %d\n", ar->alpha2[0], ar->alpha2[1], i); ret = ath11k_reg_set_cc(ar); if (ret) ath11k_warn(ar->ab, "pdev id %d failed set current country code: %d\n", i, ret); } } void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; int i; spin_lock_bh(&ab->base_lock); ab->stats.fw_crash_counter++; spin_unlock_bh(&ab->base_lock); for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; if (!ar || ar->state == ATH11K_STATE_OFF || ar->state == ATH11K_STATE_FTM) continue; ieee80211_stop_queues(ar->hw); ath11k_mac_drain_tx(ar); ar->state_11d = ATH11K_11D_IDLE; complete(&ar->completed_11d_scan); complete(&ar->scan.started); complete_all(&ar->scan.completed); complete(&ar->scan.on_channel); complete(&ar->peer_assoc_done); complete(&ar->peer_delete_done); complete(&ar->install_key_done); complete(&ar->vdev_setup_done); complete(&ar->vdev_delete_done); complete(&ar->bss_survey_done); complete(&ar->thermal.wmi_sync); wake_up(&ar->dp.tx_empty_waitq); idr_for_each(&ar->txmgmt_idr, ath11k_mac_tx_mgmt_pending_free, ar); idr_destroy(&ar->txmgmt_idr); wake_up(&ar->txmgmt_empty_waitq); ar->monitor_vdev_id = -1; clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags); clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); } wake_up(&ab->wmi_ab.tx_credits_wq); wake_up(&ab->peer_mapping_wq); reinit_completion(&ab->driver_recovery); } static void ath11k_core_post_reconfigure_recovery(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; int i; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; if (!ar || ar->state == ATH11K_STATE_OFF) continue; mutex_lock(&ar->conf_mutex); switch (ar->state) { case ATH11K_STATE_ON: ar->state = ATH11K_STATE_RESTARTING; ath11k_core_halt(ar); ieee80211_restart_hw(ar->hw); break; case ATH11K_STATE_OFF: ath11k_warn(ab, "cannot restart radio %d that hasn't been started\n", i); break; case ATH11K_STATE_RESTARTING: break; case ATH11K_STATE_RESTARTED: ar->state = ATH11K_STATE_WEDGED; fallthrough; case ATH11K_STATE_WEDGED: ath11k_warn(ab, "device is wedged, will not restart radio %d\n", i); break; case ATH11K_STATE_FTM: ath11k_dbg(ab, ATH11K_DBG_TESTMODE, "fw mode reset done radio %d\n", i); break; } mutex_unlock(&ar->conf_mutex); } complete(&ab->driver_recovery); } static void ath11k_core_restart(struct work_struct *work) { struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work); int ret; ret = ath11k_core_reconfigure_on_crash(ab); if (ret) { ath11k_err(ab, "failed to reconfigure driver on crash recovery\n"); return; } if (ab->is_reset) complete_all(&ab->reconfigure_complete); if (!ab->is_reset) ath11k_core_post_reconfigure_recovery(ab); complete(&ab->restart_completed); } static void ath11k_core_reset(struct work_struct *work) { struct ath11k_base *ab = container_of(work, struct ath11k_base, reset_work); int reset_count, fail_cont_count; long time_left; if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))) { ath11k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags); return; } /* Sometimes the recovery will fail and then the next all recovery fail, * this is to avoid infinite recovery since it can not recovery success. */ fail_cont_count = atomic_read(&ab->fail_cont_count); if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FINAL) return; if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FIRST && time_before(jiffies, ab->reset_fail_timeout)) return; reset_count = atomic_inc_return(&ab->reset_count); if (reset_count > 1) { /* Sometimes it happened another reset worker before the previous one * completed, then the second reset worker will destroy the previous one, * thus below is to avoid that. */ ath11k_warn(ab, "already resetting count %d\n", reset_count); reinit_completion(&ab->reset_complete); time_left = wait_for_completion_timeout(&ab->reset_complete, ATH11K_RESET_TIMEOUT_HZ); if (time_left) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "to skip reset\n"); atomic_dec(&ab->reset_count); return; } ab->reset_fail_timeout = jiffies + ATH11K_RESET_FAIL_TIMEOUT_HZ; /* Record the continuous recovery fail count when recovery failed*/ atomic_inc(&ab->fail_cont_count); } ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset starting\n"); ab->is_reset = true; atomic_set(&ab->recovery_count, 0); reinit_completion(&ab->recovery_start); atomic_set(&ab->recovery_start_count, 0); ath11k_coredump_collect(ab); ath11k_core_pre_reconfigure_recovery(ab); reinit_completion(&ab->reconfigure_complete); ath11k_core_post_reconfigure_recovery(ab); ath11k_dbg(ab, ATH11K_DBG_BOOT, "waiting recovery start...\n"); time_left = wait_for_completion_timeout(&ab->recovery_start, ATH11K_RECOVER_START_TIMEOUT_HZ); ath11k_hif_irq_disable(ab); ath11k_hif_ce_irq_disable(ab); ath11k_hif_power_down(ab, false); ath11k_hif_power_up(ab); ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n"); } static int ath11k_init_hw_params(struct ath11k_base *ab) { const struct ath11k_hw_params *hw_params = NULL; int i; for (i = 0; i < ARRAY_SIZE(ath11k_hw_params); i++) { hw_params = &ath11k_hw_params[i]; if (hw_params->hw_rev == ab->hw_rev) break; } if (i == ARRAY_SIZE(ath11k_hw_params)) { ath11k_err(ab, "Unsupported hardware version: 0x%x\n", ab->hw_rev); return -EINVAL; } ab->hw_params = *hw_params; ath11k_info(ab, "%s\n", ab->hw_params.name); return 0; } int ath11k_core_pre_init(struct ath11k_base *ab) { int ret; ret = ath11k_init_hw_params(ab); if (ret) { ath11k_err(ab, "failed to get hw params: %d\n", ret); return ret; } ret = ath11k_fw_pre_init(ab); if (ret) { ath11k_err(ab, "failed to pre init firmware: %d", ret); return ret; } return 0; } EXPORT_SYMBOL(ath11k_core_pre_init); static int ath11k_core_pm_notify(struct notifier_block *nb, unsigned long action, void *nouse) { struct ath11k_base *ab = container_of(nb, struct ath11k_base, pm_nb); switch (action) { case PM_SUSPEND_PREPARE: ab->actual_pm_policy = ab->pm_policy; break; case PM_HIBERNATION_PREPARE: ab->actual_pm_policy = ATH11K_PM_DEFAULT; break; default: break; } return NOTIFY_OK; } static int ath11k_core_pm_notifier_register(struct ath11k_base *ab) { ab->pm_nb.notifier_call = ath11k_core_pm_notify; return register_pm_notifier(&ab->pm_nb); } void ath11k_core_pm_notifier_unregister(struct ath11k_base *ab) { int ret; ret = unregister_pm_notifier(&ab->pm_nb); if (ret) /* just warn here, there is nothing can be done in fail case */ ath11k_warn(ab, "failed to unregister PM notifier %d\n", ret); } EXPORT_SYMBOL(ath11k_core_pm_notifier_unregister); int ath11k_core_init(struct ath11k_base *ab) { const struct dmi_system_id *dmi_id; int ret; dmi_id = dmi_first_match(ath11k_pm_quirk_table); if (dmi_id) ab->pm_policy = (kernel_ulong_t)dmi_id->driver_data; else ab->pm_policy = ATH11K_PM_DEFAULT; ath11k_dbg(ab, ATH11K_DBG_BOOT, "pm policy %u\n", ab->pm_policy); ret = ath11k_core_pm_notifier_register(ab); if (ret) { ath11k_err(ab, "failed to register PM notifier: %d\n", ret); return ret; } ret = ath11k_core_soc_create(ab); if (ret) { ath11k_err(ab, "failed to create soc core: %d\n", ret); goto err_unregister_pm_notifier; } return 0; err_unregister_pm_notifier: ath11k_core_pm_notifier_unregister(ab); return ret; } EXPORT_SYMBOL(ath11k_core_init); void ath11k_core_deinit(struct ath11k_base *ab) { mutex_lock(&ab->core_lock); ath11k_core_pdev_destroy(ab); ath11k_core_stop(ab); mutex_unlock(&ab->core_lock); ath11k_hif_power_down(ab, false); ath11k_mac_destroy(ab); ath11k_core_soc_destroy(ab); ath11k_core_pm_notifier_unregister(ab); } EXPORT_SYMBOL(ath11k_core_deinit); void ath11k_core_free(struct ath11k_base *ab) { destroy_workqueue(ab->workqueue_aux); destroy_workqueue(ab->workqueue); kfree(ab); } EXPORT_SYMBOL(ath11k_core_free); struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, enum ath11k_bus bus) { struct ath11k_base *ab; ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL); if (!ab) return NULL; init_completion(&ab->driver_recovery); ab->workqueue = create_singlethread_workqueue("ath11k_wq"); if (!ab->workqueue) goto err_sc_free; ab->workqueue_aux = create_singlethread_workqueue("ath11k_aux_wq"); if (!ab->workqueue_aux) goto err_free_wq; mutex_init(&ab->core_lock); mutex_init(&ab->tbl_mtx_lock); spin_lock_init(&ab->base_lock); mutex_init(&ab->vdev_id_11d_lock); init_completion(&ab->reset_complete); init_completion(&ab->reconfigure_complete); init_completion(&ab->recovery_start); INIT_LIST_HEAD(&ab->peers); init_waitqueue_head(&ab->peer_mapping_wq); init_waitqueue_head(&ab->wmi_ab.tx_credits_wq); init_waitqueue_head(&ab->qmi.cold_boot_waitq); INIT_WORK(&ab->restart_work, ath11k_core_restart); INIT_WORK(&ab->update_11d_work, ath11k_update_11d); INIT_WORK(&ab->reset_work, ath11k_core_reset); INIT_WORK(&ab->dump_work, ath11k_coredump_upload); timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0); init_completion(&ab->htc_suspend); init_completion(&ab->wow.wakeup_completed); init_completion(&ab->restart_completed); ab->dev = dev; ab->hif.bus = bus; return ab; err_free_wq: destroy_workqueue(ab->workqueue); err_sc_free: kfree(ab); return NULL; } EXPORT_SYMBOL(ath11k_core_alloc); MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ax wireless LAN cards."); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/dp_rx.c b/dp_rx.c index ffc7482c77b6..b9e976ddcbbf 100644 --- a/dp_rx.c +++ b/dp_rx.c @@ -1,5795 +1,5794 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include #include #include #include "core.h" #include "debug.h" #include "debugfs_htt_stats.h" #include "debugfs_sta.h" #include "hal_desc.h" #include "hw.h" #include "dp_rx.h" #include "hal_rx.h" #include "dp_tx.h" #include "peer.h" #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) static inline u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc); } static inline enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab, struct hal_rx_desc *desc) { if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc)) return HAL_ENCRYPT_TYPE_OPEN; return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc); } static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc); } static inline bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc); } static inline u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc); } static inline bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); } static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc); } static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab, struct sk_buff *skb) { struct ieee80211_hdr *hdr; hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); return ieee80211_has_morefrags(hdr->frame_control); } static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab, struct sk_buff *skb) { struct ieee80211_hdr *hdr; hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; } static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc); } static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_attention(desc); } static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, __le32_to_cpu(attn->info2)); } static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, __le32_to_cpu(attn->info1)); } static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, __le32_to_cpu(attn->info1)); } static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn) { return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, __le32_to_cpu(attn->info2)) == RX_DESC_DECRYPT_STATUS_CODE_OK); } static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn) { u32 info = __le32_to_cpu(attn->info1); u32 errmap = 0; if (info & RX_ATTENTION_INFO1_FCS_ERR) errmap |= DP_RX_MPDU_ERR_FCS; if (info & RX_ATTENTION_INFO1_DECRYPT_ERR) errmap |= DP_RX_MPDU_ERR_DECRYPT; if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR) errmap |= DP_RX_MPDU_ERR_TKIP_MIC; if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR) errmap |= DP_RX_MPDU_ERR_AMSDU_ERR; if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR) errmap |= DP_RX_MPDU_ERR_OVERFLOW; if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR) errmap |= DP_RX_MPDU_ERR_MSDU_LEN; if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR) errmap |= DP_RX_MPDU_ERR_MPDU_LEN; return errmap; } static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab, struct hal_rx_desc *desc) { struct rx_attention *rx_attention; u32 errmap; rx_attention = ath11k_dp_rx_get_attention(ab, desc); errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); return errmap & DP_RX_MPDU_ERR_MSDU_LEN; } static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc); } static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc); } static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc); } static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc); } static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc); } static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc); } static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab, struct hal_rx_desc *desc) { return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc)); } static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc); } static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc); } static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc); } static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc); } static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc); } static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab, struct hal_rx_desc *fdesc, struct hal_rx_desc *ldesc) { ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc); } static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn) { return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, __le32_to_cpu(attn->info1)); } static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab, struct hal_rx_desc *rx_desc) { u8 *rx_pkt_hdr; rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc); return rx_pkt_hdr; } static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab, struct hal_rx_desc *rx_desc) { u32 tlv_tag; tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc); return tlv_tag == HAL_RX_MPDU_START; } static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab, struct hal_rx_desc *rx_desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); } static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab, struct hal_rx_desc *desc, u16 len) { ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len); } static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab, struct hal_rx_desc *desc) { struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc); return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) && (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST, __le32_to_cpu(attn->info1))); } static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc); } static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc); } static void ath11k_dp_service_mon_ring(struct timer_list *t) { struct ath11k_base *ab = timer_container_of(ab, t, mon_reap_timer); int i; for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); mod_timer(&ab->mon_reap_timer, jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); } static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab) { int i, reaped = 0; unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS); do { for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) reaped += ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); /* nothing more to reap */ if (reaped < DP_MON_SERVICE_BUDGET) return 0; } while (time_before(jiffies, timeout)); ath11k_warn(ab, "dp mon ring purge timeout"); return -ETIMEDOUT; } /* Returns number of Rx buffers replenished */ int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, struct dp_rxdma_ring *rx_ring, int req_entries, enum hal_rx_buf_return_buf_manager mgr) { struct hal_srng *srng; u32 *desc; struct sk_buff *skb; int num_free; int num_remain; int buf_id; u32 cookie; dma_addr_t paddr; req_entries = min(req_entries, rx_ring->bufs_max); srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); num_free = ath11k_hal_srng_src_num_free(ab, srng, true); if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) req_entries = num_free; req_entries = min(num_free, req_entries); num_remain = req_entries; while (num_remain > 0) { skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + DP_RX_BUFFER_ALIGN_SIZE); if (!skb) break; if (!IS_ALIGNED((unsigned long)skb->data, DP_RX_BUFFER_ALIGN_SIZE)) { skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - skb->data); } paddr = dma_map_single(ab->dev, skb->data, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); if (dma_mapping_error(ab->dev, paddr)) goto fail_free_skb; spin_lock_bh(&rx_ring->idr_lock); buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1, (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC); spin_unlock_bh(&rx_ring->idr_lock); if (buf_id <= 0) goto fail_dma_unmap; desc = ath11k_hal_srng_src_get_next_entry(ab, srng); if (!desc) goto fail_idr_remove; ATH11K_SKB_RXCB(skb)->paddr = paddr; cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); num_remain--; ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); } ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return req_entries - num_remain; fail_idr_remove: spin_lock_bh(&rx_ring->idr_lock); idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); fail_dma_unmap: dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); fail_free_skb: dev_kfree_skb_any(skb); ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return req_entries - num_remain; } static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, struct dp_rxdma_ring *rx_ring) { struct sk_buff *skb; int buf_id; spin_lock_bh(&rx_ring->idr_lock); idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { idr_remove(&rx_ring->bufs_idr, buf_id); /* TODO: Understand where internal driver does this dma_unmap * of rxdma_buffer. */ dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); } idr_destroy(&rx_ring->bufs_idr); spin_unlock_bh(&rx_ring->idr_lock); return 0; } static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_base *ab = ar->ab; struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; int i; ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); rx_ring = &dp->rxdma_mon_buf_ring; ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { rx_ring = &dp->rx_mon_status_refill_ring[i]; ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); } return 0; } static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, struct dp_rxdma_ring *rx_ring, u32 ringtype) { struct ath11k_pdev_dp *dp = &ar->dp; int num_entries; num_entries = rx_ring->refill_buf_ring.size / ath11k_hal_srng_get_entrysize(ar->ab, ringtype); rx_ring->bufs_max = num_entries; ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, ar->ab->hw_params.hal_params->rx_buf_rbm); return 0; } static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_base *ab = ar->ab; struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; int i; ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF); if (ar->ab->hw_params.rxdma1_enable) { rx_ring = &dp->rxdma_mon_buf_ring; ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); } for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { rx_ring = &dp->rx_mon_status_refill_ring[i]; ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); } return 0; } static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_base *ab = ar->ab; int i; ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { if (ab->hw_params.rx_mac_buf_ring) ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); ath11k_dp_srng_cleanup(ab, &dp->rx_mon_status_refill_ring[i].refill_buf_ring); } ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); } void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab) { struct ath11k_dp *dp = &ab->dp; int i; for (i = 0; i < DP_REO_DST_RING_MAX; i++) ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); } int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab) { struct ath11k_dp *dp = &ab->dp; int ret; int i; for (i = 0; i < DP_REO_DST_RING_MAX; i++) { ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i], HAL_REO_DST, i, 0, DP_REO_DST_RING_SIZE); if (ret) { ath11k_warn(ab, "failed to setup reo_dst_ring\n"); goto err_reo_cleanup; } } return 0; err_reo_cleanup: ath11k_dp_pdev_reo_cleanup(ab); return ret; } static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_base *ab = ar->ab; struct dp_srng *srng = NULL; int i; int ret; ret = ath11k_dp_srng_setup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring, HAL_RXDMA_BUF, 0, dp->mac_id, DP_RXDMA_BUF_RING_SIZE); if (ret) { ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); return ret; } if (ar->ab->hw_params.rx_mac_buf_ring) { for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ret = ath11k_dp_srng_setup(ar->ab, &dp->rx_mac_buf_ring[i], HAL_RXDMA_BUF, 1, dp->mac_id + i, 1024); if (ret) { ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n", i); return ret; } } } for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i], HAL_RXDMA_DST, 0, dp->mac_id + i, DP_RXDMA_ERR_DST_RING_SIZE); if (ret) { ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i); return ret; } } for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; ret = ath11k_dp_srng_setup(ar->ab, srng, HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i, DP_RXDMA_MON_STATUS_RING_SIZE); if (ret) { ath11k_warn(ar->ab, "failed to setup rx_mon_status_refill_ring %d\n", i); return ret; } } /* if rxdma1_enable is false, then it doesn't need * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring * and rxdma_mon_desc_ring. * init reap timer for QCA6390. */ if (!ar->ab->hw_params.rxdma1_enable) { //init mon status buffer reap timer timer_setup(&ar->ab->mon_reap_timer, ath11k_dp_service_mon_ring, 0); return 0; } ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring, HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id, DP_RXDMA_MONITOR_BUF_RING_SIZE); if (ret) { ath11k_warn(ar->ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n"); return ret; } ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, HAL_RXDMA_MONITOR_DST, 0, dp->mac_id, DP_RXDMA_MONITOR_DST_RING_SIZE); if (ret) { ath11k_warn(ar->ab, "failed to setup HAL_RXDMA_MONITOR_DST\n"); return ret; } ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id, DP_RXDMA_MONITOR_DESC_RING_SIZE); if (ret) { ath11k_warn(ar->ab, "failed to setup HAL_RXDMA_MONITOR_DESC\n"); return ret; } return 0; } void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) { struct ath11k_dp *dp = &ab->dp; struct dp_reo_cmd *cmd, *tmp; struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; struct dp_rx_tid *rx_tid; spin_lock_bh(&dp->reo_cmd_lock); list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { list_del(&cmd->list); rx_tid = &cmd->data; if (rx_tid->vaddr_unaligned) { dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); rx_tid->vaddr_unaligned = NULL; } kfree(cmd); } list_for_each_entry_safe(cmd_cache, tmp_cache, &dp->reo_cmd_cache_flush_list, list) { list_del(&cmd_cache->list); dp->reo_cmd_cache_flush_count--; rx_tid = &cmd_cache->data; if (rx_tid->vaddr_unaligned) { dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); rx_tid->vaddr_unaligned = NULL; } kfree(cmd_cache); } spin_unlock_bh(&dp->reo_cmd_lock); } static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, enum hal_reo_cmd_status status) { struct dp_rx_tid *rx_tid = ctx; if (status != HAL_REO_CMD_SUCCESS) ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", rx_tid->tid, status); if (rx_tid->vaddr_unaligned) { dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); rx_tid->vaddr_unaligned = NULL; } } static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, struct dp_rx_tid *rx_tid) { struct ath11k_hal_reo_cmd cmd = {}; unsigned long tot_desc_sz, desc_sz; int ret; tot_desc_sz = rx_tid->size; desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); while (tot_desc_sz > desc_sz) { tot_desc_sz -= desc_sz; cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); cmd.addr_hi = upper_32_bits(rx_tid->paddr); ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, HAL_REO_CMD_FLUSH_CACHE, &cmd, NULL); if (ret) ath11k_warn(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", rx_tid->tid, ret); } memset(&cmd, 0, sizeof(cmd)); cmd.addr_lo = lower_32_bits(rx_tid->paddr); cmd.addr_hi = upper_32_bits(rx_tid->paddr); cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, HAL_REO_CMD_FLUSH_CACHE, &cmd, ath11k_dp_reo_cmd_free); if (ret) { ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", rx_tid->tid, ret); dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); rx_tid->vaddr_unaligned = NULL; } } static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, enum hal_reo_cmd_status status) { struct ath11k_base *ab = dp->ab; struct dp_rx_tid *rx_tid = ctx; struct dp_reo_cache_flush_elem *elem, *tmp; if (status == HAL_REO_CMD_DRAIN) { goto free_desc; } else if (status != HAL_REO_CMD_SUCCESS) { /* Shouldn't happen! Cleanup in case of other failure? */ ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", rx_tid->tid, status); return; } elem = kzalloc(sizeof(*elem), GFP_ATOMIC); if (!elem) goto free_desc; elem->ts = jiffies; memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); spin_lock_bh(&dp->reo_cmd_lock); list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); dp->reo_cmd_cache_flush_count++; /* Flush and invalidate aged REO desc from HW cache */ list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, list) { if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD || time_after(jiffies, elem->ts + msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { list_del(&elem->list); dp->reo_cmd_cache_flush_count--; spin_unlock_bh(&dp->reo_cmd_lock); ath11k_dp_reo_cache_flush(ab, &elem->data); kfree(elem); spin_lock_bh(&dp->reo_cmd_lock); } } spin_unlock_bh(&dp->reo_cmd_lock); return; free_desc: dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); rx_tid->vaddr_unaligned = NULL; } void ath11k_peer_rx_tid_delete(struct ath11k *ar, struct ath11k_peer *peer, u8 tid) { struct ath11k_hal_reo_cmd cmd = {}; struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; int ret; if (!rx_tid->active) return; rx_tid->active = false; cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; cmd.addr_lo = lower_32_bits(rx_tid->paddr); cmd.addr_hi = upper_32_bits(rx_tid->paddr); cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, ath11k_dp_rx_tid_del_func); if (ret) { if (ret != -ESHUTDOWN) ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", tid, ret); dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); rx_tid->vaddr_unaligned = NULL; } rx_tid->paddr = 0; rx_tid->paddr_unaligned = 0; rx_tid->size = 0; rx_tid->unaligned_size = 0; } static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, u32 *link_desc, enum hal_wbm_rel_bm_act action) { struct ath11k_dp *dp = &ab->dp; struct hal_srng *srng; u32 *desc; int ret = 0; srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); desc = ath11k_hal_srng_src_get_next_entry(ab, srng); if (!desc) { ret = -ENOBUFS; goto exit; } ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, action); exit: ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return ret; } static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc) { struct ath11k_base *ab = rx_tid->ab; lockdep_assert_held(&ab->base_lock); if (rx_tid->dst_ring_desc) { if (rel_link_desc) ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); kfree(rx_tid->dst_ring_desc); rx_tid->dst_ring_desc = NULL; } rx_tid->cur_sn = 0; rx_tid->last_frag_no = 0; rx_tid->rx_frag_bitmap = 0; __skb_queue_purge(&rx_tid->rx_frags); } void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer) { struct dp_rx_tid *rx_tid; int i; lockdep_assert_held(&ar->ab->base_lock); for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { rx_tid = &peer->rx_tid[i]; spin_unlock_bh(&ar->ab->base_lock); timer_delete_sync(&rx_tid->frag_timer); spin_lock_bh(&ar->ab->base_lock); ath11k_dp_rx_frags_cleanup(rx_tid, true); } } void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) { struct dp_rx_tid *rx_tid; int i; lockdep_assert_held(&ar->ab->base_lock); for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { rx_tid = &peer->rx_tid[i]; ath11k_peer_rx_tid_delete(ar, peer, i); ath11k_dp_rx_frags_cleanup(rx_tid, true); spin_unlock_bh(&ar->ab->base_lock); timer_delete_sync(&rx_tid->frag_timer); spin_lock_bh(&ar->ab->base_lock); } } static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, struct ath11k_peer *peer, struct dp_rx_tid *rx_tid, u32 ba_win_sz, u16 ssn, bool update_ssn) { struct ath11k_hal_reo_cmd cmd = {}; int ret; cmd.addr_lo = lower_32_bits(rx_tid->paddr); cmd.addr_hi = upper_32_bits(rx_tid->paddr); cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; cmd.ba_window_size = ba_win_sz; if (update_ssn) { cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); } ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, NULL); if (ret) { ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", rx_tid->tid, ret); return ret; } rx_tid->ba_win_sz = ba_win_sz; return 0; } static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, const u8 *peer_mac, int vdev_id, u8 tid) { struct ath11k_peer *peer; struct dp_rx_tid *rx_tid; spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, vdev_id, peer_mac); if (!peer) { ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n"); goto unlock_exit; } rx_tid = &peer->rx_tid[tid]; if (!rx_tid->active) goto unlock_exit; dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); rx_tid->vaddr_unaligned = NULL; rx_tid->active = false; unlock_exit: spin_unlock_bh(&ab->base_lock); } int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, u8 tid, u32 ba_win_sz, u16 ssn, enum hal_pn_type pn_type) { struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; struct dp_rx_tid *rx_tid; u32 hw_desc_sz, *vaddr; void *vaddr_unaligned; dma_addr_t paddr; int ret; spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, vdev_id, peer_mac); if (!peer) { ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n", peer_mac); spin_unlock_bh(&ab->base_lock); return -ENOENT; } rx_tid = &peer->rx_tid[tid]; /* Update the tid queue if it is already setup */ if (rx_tid->active) { paddr = rx_tid->paddr; ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, ba_win_sz, ssn, true); spin_unlock_bh(&ab->base_lock); if (ret) { ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d", peer_mac, tid, ret); return ret; } ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, paddr, tid, 1, ba_win_sz); if (ret) ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n", peer_mac, tid, ret); return ret; } rx_tid->tid = tid; rx_tid->ba_win_sz = ba_win_sz; /* TODO: Optimize the memory allocation for qos tid based on * the actual BA window size in REO tid update path. */ if (tid == HAL_DESC_REO_NON_QOS_TID) hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); else hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1; vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr, DMA_BIDIRECTIONAL, GFP_ATOMIC); if (!vaddr_unaligned) { spin_unlock_bh(&ab->base_lock); return -ENOMEM; } rx_tid->vaddr_unaligned = vaddr_unaligned; vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN); rx_tid->paddr_unaligned = paddr; rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr - (unsigned long)rx_tid->vaddr_unaligned); ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type); rx_tid->size = hw_desc_sz; rx_tid->active = true; /* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup. * Since these changes are not reflected in the device, driver now needs to * explicitly call dma_sync_single_for_device. */ dma_sync_single_for_device(ab->dev, rx_tid->paddr, rx_tid->size, DMA_TO_DEVICE); spin_unlock_bh(&ab->base_lock); ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr, tid, 1, ba_win_sz); if (ret) { ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n", peer_mac, tid, ret); ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); } return ret; } int ath11k_dp_rx_ampdu_start(struct ath11k *ar, struct ieee80211_ampdu_params *params) { struct ath11k_base *ab = ar->ab; struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta); int vdev_id = arsta->arvif->vdev_id; int ret; ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, params->tid, params->buf_size, params->ssn, arsta->pn_type); if (ret) ath11k_warn(ab, "failed to setup rx tid %d\n", ret); return ret; } int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, struct ieee80211_ampdu_params *params) { struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta); int vdev_id = arsta->arvif->vdev_id; dma_addr_t paddr; bool active; int ret; spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); if (!peer) { ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n"); spin_unlock_bh(&ab->base_lock); return -ENOENT; } paddr = peer->rx_tid[params->tid].paddr; active = peer->rx_tid[params->tid].active; if (!active) { spin_unlock_bh(&ab->base_lock); return 0; } ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); spin_unlock_bh(&ab->base_lock); if (ret) { ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n", params->tid, ret); return ret; } ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, params->sta->addr, paddr, params->tid, 1, 1); if (ret) ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n", ret); return ret; } int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif, const u8 *peer_addr, enum set_key_cmd key_cmd, struct ieee80211_key_conf *key) { struct ath11k *ar = arvif->ar; struct ath11k_base *ab = ar->ab; struct ath11k_hal_reo_cmd cmd = {}; struct ath11k_peer *peer; struct dp_rx_tid *rx_tid; u8 tid; int ret = 0; /* NOTE: Enable PN/TSC replay check offload only for unicast frames. * We use mac80211 PN/TSC replay check functionality for bcast/mcast * for now. */ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) return 0; cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; cmd.upd0 |= HAL_REO_CMD_UPD0_PN | HAL_REO_CMD_UPD0_PN_SIZE | HAL_REO_CMD_UPD0_PN_VALID | HAL_REO_CMD_UPD0_PN_CHECK | HAL_REO_CMD_UPD0_SVLD; switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (key_cmd == SET_KEY) { cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; cmd.pn_size = 48; } break; default: break; } spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); if (!peer) { ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n"); spin_unlock_bh(&ab->base_lock); return -ENOENT; } for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { rx_tid = &peer->rx_tid[tid]; if (!rx_tid->active) continue; cmd.addr_lo = lower_32_bits(rx_tid->paddr); cmd.addr_hi = upper_32_bits(rx_tid->paddr); ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, NULL); if (ret) { ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n", tid, ret); break; } } spin_unlock_bh(&ab->base_lock); return ret; } static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, u16 peer_id) { int i; for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { if (ppdu_stats->user_stats[i].is_valid_peer_id) { if (peer_id == ppdu_stats->user_stats[i].peer_id) return i; } else { return i; } } return -EINVAL; } static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct htt_ppdu_stats_info *ppdu_info; struct htt_ppdu_user_stats *user_stats; int cur_user; u16 peer_id; ppdu_info = data; switch (tag) { case HTT_PPDU_STATS_TAG_COMMON: if (len < sizeof(struct htt_ppdu_stats_common)) { ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", len, tag); return -EINVAL; } memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, sizeof(struct htt_ppdu_stats_common)); break; case HTT_PPDU_STATS_TAG_USR_RATE: if (len < sizeof(struct htt_ppdu_stats_user_rate)) { ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", len, tag); return -EINVAL; } peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, peer_id); if (cur_user < 0) return -EINVAL; user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; user_stats->peer_id = peer_id; user_stats->is_valid_peer_id = true; memcpy((void *)&user_stats->rate, ptr, sizeof(struct htt_ppdu_stats_user_rate)); user_stats->tlv_flags |= BIT(tag); break; case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", len, tag); return -EINVAL; } peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, peer_id); if (cur_user < 0) return -EINVAL; user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; user_stats->peer_id = peer_id; user_stats->is_valid_peer_id = true; memcpy((void *)&user_stats->cmpltn_cmn, ptr, sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); user_stats->tlv_flags |= BIT(tag); break; case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", len, tag); return -EINVAL; } peer_id = ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, peer_id); if (cur_user < 0) return -EINVAL; user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; user_stats->peer_id = peer_id; user_stats->is_valid_peer_id = true; memcpy((void *)&user_stats->ack_ba, ptr, sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); user_stats->tlv_flags |= BIT(tag); break; } return 0; } int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, const void *ptr, void *data), void *data) { const struct htt_tlv *tlv; const void *begin = ptr; u16 tlv_tag, tlv_len; int ret = -EINVAL; while (len > 0) { if (len < sizeof(*tlv)) { ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", ptr - begin, len, sizeof(*tlv)); return -EINVAL; } tlv = (struct htt_tlv *)ptr; tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header); tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header); ptr += sizeof(*tlv); len -= sizeof(*tlv); if (tlv_len > len) { ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", tlv_tag, ptr - begin, len, tlv_len); return -EINVAL; } ret = iter(ab, tlv_tag, tlv_len, ptr, data); if (ret == -ENOMEM) return ret; ptr += tlv_len; len -= tlv_len; } return 0; } static void ath11k_update_per_peer_tx_stats(struct ath11k *ar, struct htt_ppdu_stats *ppdu_stats, u8 user) { struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; struct ieee80211_sta *sta; struct ath11k_sta *arsta; struct htt_ppdu_stats_user_rate *user_rate; struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; struct htt_ppdu_stats_common *common = &ppdu_stats->common; int ret; u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; u32 succ_bytes = 0; u16 rate = 0, succ_pkts = 0; u32 tx_duration = 0; u8 tid = HTT_PPDU_STATS_NON_QOS_TID; bool is_ampdu = false; if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) return; if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) is_ampdu = HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { succ_bytes = usr_stats->ack_ba.success_bytes; succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M, usr_stats->ack_ba.info); tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM, usr_stats->ack_ba.info); } if (common->fes_duration_us) tx_duration = common->fes_duration_us; user_rate = &usr_stats->rate; flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); sgi = HTT_USR_RATE_GI(user_rate->rate_flags); dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); /* Note: If host configured fixed rates and in some other special * cases, the broadcast/management frames are sent in different rates. * Firmware rate's control to be skipped for this? */ if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) { ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs); return; } if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) { ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs); return; } if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) { ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats", mcs, nss); return; } if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, flags, &rate_idx, &rate); if (ret < 0) return; } rcu_read_lock(); spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); if (!peer || !peer->sta) { spin_unlock_bh(&ab->base_lock); rcu_read_unlock(); return; } sta = peer->sta; arsta = ath11k_sta_to_arsta(sta); memset(&arsta->txrate, 0, sizeof(arsta->txrate)); switch (flags) { case WMI_RATE_PREAMBLE_OFDM: arsta->txrate.legacy = rate; break; case WMI_RATE_PREAMBLE_CCK: arsta->txrate.legacy = rate; break; case WMI_RATE_PREAMBLE_HT: arsta->txrate.mcs = mcs + 8 * (nss - 1); arsta->txrate.flags = RATE_INFO_FLAGS_MCS; if (sgi) arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; break; case WMI_RATE_PREAMBLE_VHT: arsta->txrate.mcs = mcs; arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; if (sgi) arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; break; case WMI_RATE_PREAMBLE_HE: arsta->txrate.mcs = mcs; arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; arsta->txrate.he_dcm = dcm; arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi); arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc ((user_rate->ru_end - user_rate->ru_start) + 1); break; } arsta->txrate.nss = nss; arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); arsta->tx_duration += tx_duration; memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. * So skip peer stats update for mgmt packets. */ if (tid < HTT_PPDU_STATS_NON_QOS_TID) { memset(peer_stats, 0, sizeof(*peer_stats)); peer_stats->succ_pkts = succ_pkts; peer_stats->succ_bytes = succ_bytes; peer_stats->is_ampdu = is_ampdu; peer_stats->duration = tx_duration; peer_stats->ba_fails = HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); } spin_unlock_bh(&ab->base_lock); rcu_read_unlock(); } static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, struct htt_ppdu_stats *ppdu_stats) { u8 user; for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); } static struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, u32 ppdu_id) { struct htt_ppdu_stats_info *ppdu_info; lockdep_assert_held(&ar->data_lock); if (!list_empty(&ar->ppdu_stats_info)) { list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { if (ppdu_info->ppdu_id == ppdu_id) return ppdu_info; } if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { ppdu_info = list_first_entry(&ar->ppdu_stats_info, typeof(*ppdu_info), list); list_del(&ppdu_info->list); ar->ppdu_stat_list_depth--; ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); kfree(ppdu_info); } } ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); if (!ppdu_info) return NULL; list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); ar->ppdu_stat_list_depth++; return ppdu_info; } static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k_htt_ppdu_stats_msg *msg; struct htt_ppdu_stats_info *ppdu_info; struct ath11k *ar; int ret; u8 pdev_id; u32 ppdu_id, len; msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info); pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info); ppdu_id = msg->ppdu_id; rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); if (!ar) { ret = -EINVAL; goto out; } if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) trace_ath11k_htt_ppdu_stats(ar, skb->data, len); spin_lock_bh(&ar->data_lock); ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); if (!ppdu_info) { ret = -EINVAL; goto out_unlock_data; } ppdu_info->ppdu_id = ppdu_id; ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, ath11k_htt_tlv_ppdu_stats_parse, (void *)ppdu_info); if (ret) { ath11k_warn(ab, "Failed to parse tlv %d\n", ret); goto out_unlock_data; } out_unlock_data: spin_unlock_bh(&ar->data_lock); out: rcu_read_unlock(); return ret; } static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) { struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data; struct ath11k *ar; u8 pdev_id; pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); if (!ar) { ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); goto out; } trace_ath11k_htt_pktlog(ar, data->payload, hdr->size, ar->ab->pktlog_defs_checksum); out: rcu_read_unlock(); } static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab, struct sk_buff *skb) { u32 *data = (u32 *)skb->data; u8 pdev_id, ring_type, ring_id, pdev_idx; u16 hp, tp; u32 backpressure_time; struct ath11k_bp_stats *bp_stats; pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data); ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data); ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data); ++data; hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data); tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data); ++data; backpressure_time = *data; ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n", pdev_id, ring_type, ring_id, hp, tp, backpressure_time); if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) { if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX) return; bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id]; } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) { pdev_idx = DP_HW2SW_MACID(pdev_id); if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS) return; bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx]; } else { ath11k_warn(ab, "unknown ring type received in htt bp event %d\n", ring_type); return; } spin_lock_bh(&ab->base_lock); bp_stats->hp = hp; bp_stats->tp = tp; bp_stats->count++; bp_stats->jiffies = jiffies; spin_unlock_bh(&ab->base_lock); } void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k_dp *dp = &ab->dp; struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp); u16 peer_id; u8 vdev_id; u8 mac_addr[ETH_ALEN]; u16 peer_mac_h16; u16 ast_hash; u16 hw_peer_id; ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); switch (type) { case HTT_T2H_MSG_TYPE_VERSION_CONF: dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR, resp->version_msg.version); dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR, resp->version_msg.version); complete(&dp->htt_tgt_version_received); break; case HTT_T2H_MSG_TYPE_PEER_MAP: vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, resp->peer_map_ev.info); peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, resp->peer_map_ev.info); peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, resp->peer_map_ev.info1); ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, peer_mac_h16, mac_addr); ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0); break; case HTT_T2H_MSG_TYPE_PEER_MAP2: vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, resp->peer_map_ev.info); peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, resp->peer_map_ev.info); peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, resp->peer_map_ev.info1); ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, peer_mac_h16, mac_addr); ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, resp->peer_map_ev.info2); hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID, resp->peer_map_ev.info1); ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, hw_peer_id); break; case HTT_T2H_MSG_TYPE_PEER_UNMAP: case HTT_T2H_MSG_TYPE_PEER_UNMAP2: peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, resp->peer_unmap_ev.info); ath11k_peer_unmap_event(ab, peer_id); break; case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: ath11k_htt_pull_ppdu_stats(ab, skb); break; case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: ath11k_debugfs_htt_ext_stats_handler(ab, skb); break; case HTT_T2H_MSG_TYPE_PKTLOG: ath11k_htt_pktlog(ab, skb); break; case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND: ath11k_htt_backpressure_event_handler(ab, skb); break; default: ath11k_warn(ab, "htt event %d not handled\n", type); break; } dev_kfree_skb_any(skb); } static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, struct sk_buff_head *msdu_list, struct sk_buff *first, struct sk_buff *last, u8 l3pad_bytes, int msdu_len) { struct ath11k_base *ab = ar->ab; struct sk_buff *skb; struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); int buf_first_hdr_len, buf_first_len; struct hal_rx_desc *ldesc; int space_extra, rem_len, buf_len; u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; /* As the msdu is spread across multiple rx buffers, * find the offset to the start of msdu for computing * the length of the msdu in the first buffer. */ buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { skb_put(first, buf_first_hdr_len + msdu_len); skb_pull(first, buf_first_hdr_len); return 0; } ldesc = (struct hal_rx_desc *)last->data; rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc); rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc); /* MSDU spans over multiple buffers because the length of the MSDU * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. */ skb_put(first, DP_RX_BUFFER_SIZE); skb_pull(first, buf_first_hdr_len); /* When an MSDU spread over multiple buffers attention, MSDU_END and * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. */ ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc); space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); if (space_extra > 0 && (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { /* Free up all buffers of the MSDU */ while ((skb = __skb_dequeue(msdu_list)) != NULL) { rxcb = ATH11K_SKB_RXCB(skb); if (!rxcb->is_continuation) { dev_kfree_skb_any(skb); break; } dev_kfree_skb_any(skb); } return -ENOMEM; } rem_len = msdu_len - buf_first_len; while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { rxcb = ATH11K_SKB_RXCB(skb); if (rxcb->is_continuation) buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; else buf_len = rem_len; if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { WARN_ON_ONCE(1); dev_kfree_skb_any(skb); return -EINVAL; } skb_put(skb, buf_len + hal_rx_desc_sz); skb_pull(skb, hal_rx_desc_sz); skb_copy_from_linear_data(skb, skb_put(first, buf_len), buf_len); dev_kfree_skb_any(skb); rem_len -= buf_len; if (!rxcb->is_continuation) break; } return 0; } static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, struct sk_buff *first) { struct sk_buff *skb; struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); if (!rxcb->is_continuation) return first; skb_queue_walk(msdu_list, skb) { rxcb = ATH11K_SKB_RXCB(skb); if (!rxcb->is_continuation) return skb; } return NULL; } static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); struct rx_attention *rx_attention; bool ip_csum_fail, l4_csum_fail; rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc); ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention); l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention); msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? CHECKSUM_NONE : CHECKSUM_UNNECESSARY; } int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype) { switch (enctype) { case HAL_ENCRYPT_TYPE_OPEN: case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: case HAL_ENCRYPT_TYPE_TKIP_MIC: return 0; case HAL_ENCRYPT_TYPE_CCMP_128: return IEEE80211_CCMP_MIC_LEN; case HAL_ENCRYPT_TYPE_CCMP_256: return IEEE80211_CCMP_256_MIC_LEN; case HAL_ENCRYPT_TYPE_GCMP_128: case HAL_ENCRYPT_TYPE_AES_GCMP_256: return IEEE80211_GCMP_MIC_LEN; case HAL_ENCRYPT_TYPE_WEP_40: case HAL_ENCRYPT_TYPE_WEP_104: case HAL_ENCRYPT_TYPE_WEP_128: case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: case HAL_ENCRYPT_TYPE_WAPI: break; } ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); return 0; } static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, enum hal_encrypt_type enctype) { switch (enctype) { case HAL_ENCRYPT_TYPE_OPEN: return 0; case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: case HAL_ENCRYPT_TYPE_TKIP_MIC: return IEEE80211_TKIP_IV_LEN; case HAL_ENCRYPT_TYPE_CCMP_128: return IEEE80211_CCMP_HDR_LEN; case HAL_ENCRYPT_TYPE_CCMP_256: return IEEE80211_CCMP_256_HDR_LEN; case HAL_ENCRYPT_TYPE_GCMP_128: case HAL_ENCRYPT_TYPE_AES_GCMP_256: return IEEE80211_GCMP_HDR_LEN; case HAL_ENCRYPT_TYPE_WEP_40: case HAL_ENCRYPT_TYPE_WEP_104: case HAL_ENCRYPT_TYPE_WEP_128: case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: case HAL_ENCRYPT_TYPE_WAPI: break; } ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); return 0; } static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, enum hal_encrypt_type enctype) { switch (enctype) { case HAL_ENCRYPT_TYPE_OPEN: case HAL_ENCRYPT_TYPE_CCMP_128: case HAL_ENCRYPT_TYPE_CCMP_256: case HAL_ENCRYPT_TYPE_GCMP_128: case HAL_ENCRYPT_TYPE_AES_GCMP_256: return 0; case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: case HAL_ENCRYPT_TYPE_TKIP_MIC: return IEEE80211_TKIP_ICV_LEN; case HAL_ENCRYPT_TYPE_WEP_40: case HAL_ENCRYPT_TYPE_WEP_104: case HAL_ENCRYPT_TYPE_WEP_128: case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: case HAL_ENCRYPT_TYPE_WAPI: break; } ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); return 0; } static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, struct sk_buff *msdu, u8 *first_hdr, enum hal_encrypt_type enctype, struct ieee80211_rx_status *status) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; struct ieee80211_hdr *hdr; size_t hdr_len; u8 da[ETH_ALEN]; u8 sa[ETH_ALEN]; u16 qos_ctl = 0; u8 *qos; /* copy SA & DA and pull decapped header */ hdr = (struct ieee80211_hdr *)msdu->data; hdr_len = ieee80211_hdrlen(hdr->frame_control); ether_addr_copy(da, ieee80211_get_DA(hdr)); ether_addr_copy(sa, ieee80211_get_SA(hdr)); skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); if (rxcb->is_first_msdu) { /* original 802.11 header is valid for the first msdu * hence we can reuse the same header */ hdr = (struct ieee80211_hdr *)first_hdr; hdr_len = ieee80211_hdrlen(hdr->frame_control); /* Each A-MSDU subframe will be reported as a separate MSDU, * so strip the A-MSDU bit from QoS Ctl. */ if (ieee80211_is_data_qos(hdr->frame_control)) { qos = ieee80211_get_qos_ctl(hdr); qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; } } else { /* Rebuild qos header if this is a middle/last msdu */ hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); /* Reset the order bit as the HT_Control header is stripped */ hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); qos_ctl = rxcb->tid; if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc)) qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; /* TODO Add other QoS ctl fields when required */ /* copy decap header before overwriting for reuse below */ memcpy(decap_hdr, (uint8_t *)hdr, hdr_len); } if (!(status->flag & RX_FLAG_IV_STRIPPED)) { memcpy(skb_push(msdu, ath11k_dp_rx_crypto_param_len(ar, enctype)), (void *)hdr + hdr_len, ath11k_dp_rx_crypto_param_len(ar, enctype)); } if (!rxcb->is_first_msdu) { memcpy(skb_push(msdu, IEEE80211_QOS_CTL_LEN), &qos_ctl, IEEE80211_QOS_CTL_LEN); memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); return; } memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); /* original 802.11 header has a different DA and in * case of 4addr it may also have different SA */ hdr = (struct ieee80211_hdr *)msdu->data; ether_addr_copy(ieee80211_get_DA(hdr), da); ether_addr_copy(ieee80211_get_SA(hdr), sa); } static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, enum hal_encrypt_type enctype, struct ieee80211_rx_status *status, bool decrypted) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); struct ieee80211_hdr *hdr; size_t hdr_len; size_t crypto_len; if (!rxcb->is_first_msdu || !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { WARN_ON_ONCE(1); return; } skb_trim(msdu, msdu->len - FCS_LEN); if (!decrypted) return; hdr = (void *)msdu->data; /* Tail */ if (status->flag & RX_FLAG_IV_STRIPPED) { skb_trim(msdu, msdu->len - ath11k_dp_rx_crypto_mic_len(ar, enctype)); skb_trim(msdu, msdu->len - ath11k_dp_rx_crypto_icv_len(ar, enctype)); } else { /* MIC */ if (status->flag & RX_FLAG_MIC_STRIPPED) skb_trim(msdu, msdu->len - ath11k_dp_rx_crypto_mic_len(ar, enctype)); /* ICV */ if (status->flag & RX_FLAG_ICV_STRIPPED) skb_trim(msdu, msdu->len - ath11k_dp_rx_crypto_icv_len(ar, enctype)); } /* MMIC */ if ((status->flag & RX_FLAG_MMIC_STRIPPED) && !ieee80211_has_morefrags(hdr->frame_control) && enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); /* Head */ if (status->flag & RX_FLAG_IV_STRIPPED) { hdr_len = ieee80211_hdrlen(hdr->frame_control); crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); memmove((void *)msdu->data + crypto_len, (void *)msdu->data, hdr_len); skb_pull(msdu, crypto_len); } } static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, struct sk_buff *msdu, enum hal_encrypt_type enctype) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); struct ieee80211_hdr *hdr; size_t hdr_len, crypto_len; void *rfc1042; bool is_amsdu; is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc); rfc1042 = hdr; if (rxcb->is_first_msdu) { hdr_len = ieee80211_hdrlen(hdr->frame_control); crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); rfc1042 += hdr_len + crypto_len; } if (is_amsdu) rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); return rfc1042; } static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, struct sk_buff *msdu, u8 *first_hdr, enum hal_encrypt_type enctype, struct ieee80211_rx_status *status) { struct ieee80211_hdr *hdr; struct ethhdr *eth; size_t hdr_len; u8 da[ETH_ALEN]; u8 sa[ETH_ALEN]; void *rfc1042; rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); if (WARN_ON_ONCE(!rfc1042)) return; /* pull decapped header and copy SA & DA */ eth = (struct ethhdr *)msdu->data; ether_addr_copy(da, eth->h_dest); ether_addr_copy(sa, eth->h_source); skb_pull(msdu, sizeof(struct ethhdr)); /* push rfc1042/llc/snap */ memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, sizeof(struct ath11k_dp_rfc1042_hdr)); /* push original 802.11 header */ hdr = (struct ieee80211_hdr *)first_hdr; hdr_len = ieee80211_hdrlen(hdr->frame_control); if (!(status->flag & RX_FLAG_IV_STRIPPED)) { memcpy(skb_push(msdu, ath11k_dp_rx_crypto_param_len(ar, enctype)), (void *)hdr + hdr_len, ath11k_dp_rx_crypto_param_len(ar, enctype)); } memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); /* original 802.11 header has a different DA and in * case of 4addr it may also have different SA */ hdr = (struct ieee80211_hdr *)msdu->data; ether_addr_copy(ieee80211_get_DA(hdr), da); ether_addr_copy(ieee80211_get_SA(hdr), sa); } static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, struct hal_rx_desc *rx_desc, enum hal_encrypt_type enctype, struct ieee80211_rx_status *status, bool decrypted) { u8 *first_hdr; u8 decap; struct ethhdr *ehdr; first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc); switch (decap) { case DP_RX_DECAP_TYPE_NATIVE_WIFI: ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, enctype, status); break; case DP_RX_DECAP_TYPE_RAW: ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, decrypted); break; case DP_RX_DECAP_TYPE_ETHERNET2_DIX: ehdr = (struct ethhdr *)msdu->data; /* mac80211 allows fast path only for authorized STA */ if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) { ATH11K_SKB_RXCB(msdu)->is_eapol = true; ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, enctype, status); break; } /* PN for mcast packets will be validated in mac80211; * remove eth header and add 802.11 header. */ if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted) ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, enctype, status); break; case DP_RX_DECAP_TYPE_8023: /* TODO: Handle undecap for these formats */ break; } } static struct ath11k_peer * ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); struct hal_rx_desc *rx_desc = rxcb->rx_desc; struct ath11k_peer *peer = NULL; lockdep_assert_held(&ab->base_lock); if (rxcb->peer_id) peer = ath11k_peer_find_by_id(ab, rxcb->peer_id); if (peer) return peer; if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc))) return NULL; peer = ath11k_peer_find_by_addr(ab, ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc)); return peer; } static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, struct sk_buff *msdu, struct hal_rx_desc *rx_desc, struct ieee80211_rx_status *rx_status) { bool fill_crypto_hdr; enum hal_encrypt_type enctype; bool is_decrypted = false; struct ath11k_skb_rxcb *rxcb; struct ieee80211_hdr *hdr; struct ath11k_peer *peer; struct rx_attention *rx_attention; u32 err_bitmap; /* PN for multicast packets will be checked in mac80211 */ rxcb = ATH11K_SKB_RXCB(msdu); fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc); rxcb->is_mcbc = fill_crypto_hdr; if (rxcb->is_mcbc) { rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc); rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc); } spin_lock_bh(&ar->ab->base_lock); peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu); if (peer) { if (rxcb->is_mcbc) enctype = peer->sec_type_grp; else enctype = peer->sec_type; } else { enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc); } spin_unlock_bh(&ar->ab->base_lock); rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); /* Clear per-MPDU flags while leaving per-PPDU flags intact */ rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | RX_FLAG_MMIC_ERROR | RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED); if (err_bitmap & DP_RX_MPDU_ERR_FCS) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) rx_status->flag |= RX_FLAG_MMIC_ERROR; if (is_decrypted) { rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; if (fill_crypto_hdr) rx_status->flag |= RX_FLAG_MIC_STRIPPED | RX_FLAG_ICV_STRIPPED; else rx_status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_PN_VALIDATED; } ath11k_dp_rx_h_csum_offload(ar, msdu); ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, enctype, rx_status, is_decrypted); if (!is_decrypted || fill_crypto_hdr) return; if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) != DP_RX_DECAP_TYPE_ETHERNET2_DIX) { hdr = (void *)msdu->data; hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); } } static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, struct ieee80211_rx_status *rx_status) { struct ieee80211_supported_band *sband; enum rx_msdu_start_pkt_type pkt_type; u8 bw; u8 rate_mcs, nss; u8 sgi; bool is_cck, is_ldpc; pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc); bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc); rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc); nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc); sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc); switch (pkt_type) { case RX_MSDU_START_PKT_TYPE_11A: case RX_MSDU_START_PKT_TYPE_11B: is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); sband = &ar->mac.sbands[rx_status->band]; rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, is_cck); break; case RX_MSDU_START_PKT_TYPE_11N: rx_status->encoding = RX_ENC_HT; if (rate_mcs > ATH11K_HT_MCS_MAX) { ath11k_warn(ar->ab, "Received with invalid mcs in HT mode %d\n", rate_mcs); break; } rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); if (sgi) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); break; case RX_MSDU_START_PKT_TYPE_11AC: rx_status->encoding = RX_ENC_VHT; rx_status->rate_idx = rate_mcs; if (rate_mcs > ATH11K_VHT_MCS_MAX) { ath11k_warn(ar->ab, "Received with invalid mcs in VHT mode %d\n", rate_mcs); break; } rx_status->nss = nss; if (sgi) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc); if (is_ldpc) rx_status->enc_flags |= RX_ENC_FLAG_LDPC; break; case RX_MSDU_START_PKT_TYPE_11AX: rx_status->rate_idx = rate_mcs; if (rate_mcs > ATH11K_HE_MCS_MAX) { ath11k_warn(ar->ab, "Received with invalid mcs in HE mode %d\n", rate_mcs); break; } rx_status->encoding = RX_ENC_HE; rx_status->nss = nss; rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi); rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); break; } } static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, struct ieee80211_rx_status *rx_status) { u8 channel_num; u32 center_freq, meta_data; struct ieee80211_channel *channel; rx_status->freq = 0; rx_status->rate_idx = 0; rx_status->nss = 0; rx_status->encoding = RX_ENC_LEGACY; rx_status->bw = RATE_INFO_BW_20; rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc); channel_num = meta_data; center_freq = meta_data >> 16; if (center_freq >= ATH11K_MIN_6G_FREQ && center_freq <= ATH11K_MAX_6G_FREQ) { rx_status->band = NL80211_BAND_6GHZ; rx_status->freq = center_freq; } else if (channel_num >= 1 && channel_num <= 14) { rx_status->band = NL80211_BAND_2GHZ; } else if (channel_num >= 36 && channel_num <= 177) { rx_status->band = NL80211_BAND_5GHZ; } else { spin_lock_bh(&ar->data_lock); channel = ar->rx_channel; if (channel) { rx_status->band = channel->band; channel_num = ieee80211_frequency_to_channel(channel->center_freq); } spin_unlock_bh(&ar->data_lock); ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ", rx_desc, sizeof(struct hal_rx_desc)); } if (rx_status->band != NL80211_BAND_6GHZ) rx_status->freq = ieee80211_channel_to_frequency(channel_num, rx_status->band); ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); } static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, struct sk_buff *msdu, struct ieee80211_rx_status *status) { static const struct ieee80211_radiotap_he known = { .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), }; struct ieee80211_rx_status *rx_status; struct ieee80211_radiotap_he *he = NULL; struct ieee80211_sta *pubsta = NULL; struct ath11k_peer *peer; struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); u8 decap = DP_RX_DECAP_TYPE_RAW; bool is_mcbc = rxcb->is_mcbc; bool is_eapol = rxcb->is_eapol; if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) && !(status->flag & RX_FLAG_SKIP_MONITOR)) { he = skb_push(msdu, sizeof(known)); memcpy(he, &known, sizeof(known)); status->flag |= RX_FLAG_RADIOTAP_HE; } if (!(status->flag & RX_FLAG_ONLY_MONITOR)) decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc); spin_lock_bh(&ar->ab->base_lock); peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu); if (peer && peer->sta) pubsta = peer->sta; spin_unlock_bh(&ar->ab->base_lock); ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", msdu, msdu->len, peer ? peer->addr : NULL, rxcb->tid, is_mcbc ? "mcast" : "ucast", rxcb->seq_no, (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", (status->encoding == RX_ENC_HT) ? "ht" : "", (status->encoding == RX_ENC_VHT) ? "vht" : "", (status->encoding == RX_ENC_HE) ? "he" : "", (status->bw == RATE_INFO_BW_40) ? "40" : "", (status->bw == RATE_INFO_BW_80) ? "80" : "", (status->bw == RATE_INFO_BW_160) ? "160" : "", status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", status->rate_idx, status->nss, status->freq, status->band, status->flag, !!(status->flag & RX_FLAG_FAILED_FCS_CRC), !!(status->flag & RX_FLAG_MMIC_ERROR), !!(status->flag & RX_FLAG_AMSDU_MORE)); ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ", msdu->data, msdu->len); rx_status = IEEE80211_SKB_RXCB(msdu); *rx_status = *status; /* TODO: trace rx packet */ /* PN for multicast packets are not validate in HW, * so skip 802.3 rx path * Also, fast_rx expects the STA to be authorized, hence * eapol packets are sent in slow path. */ if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) rx_status->flag |= RX_FLAG_8023; ieee80211_rx_napi(ar->hw, pubsta, msdu, napi); } static int ath11k_dp_rx_process_msdu(struct ath11k *ar, struct sk_buff *msdu, struct sk_buff_head *msdu_list, struct ieee80211_rx_status *rx_status) { struct ath11k_base *ab = ar->ab; struct hal_rx_desc *rx_desc, *lrx_desc; struct rx_attention *rx_attention; struct ath11k_skb_rxcb *rxcb; struct sk_buff *last_buf; u8 l3_pad_bytes; u8 *hdr_status; u16 msdu_len; int ret; u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); if (!last_buf) { ath11k_warn(ab, "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); ret = -EIO; goto free_out; } rx_desc = (struct hal_rx_desc *)msdu->data; if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) { ath11k_warn(ar->ab, "msdu len not valid\n"); ret = -EIO; goto free_out; } lrx_desc = (struct hal_rx_desc *)last_buf->data; rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc); if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { ath11k_warn(ab, "msdu_done bit in attention is not set\n"); ret = -EIO; goto free_out; } rxcb = ATH11K_SKB_RXCB(msdu); rxcb->rx_desc = rx_desc; msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc); l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc); if (rxcb->is_frag) { skb_pull(msdu, hal_rx_desc_sz); } else if (!rxcb->is_continuation) { if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc); ret = -EINVAL; ath11k_warn(ab, "invalid msdu len %u\n", msdu_len); ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status, sizeof(struct ieee80211_hdr)); ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc, sizeof(struct hal_rx_desc)); goto free_out; } skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len); skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes); } else { ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, msdu, last_buf, l3_pad_bytes, msdu_len); if (ret) { ath11k_warn(ab, "failed to coalesce msdu rx buffer%d\n", ret); goto free_out; } } ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status); ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status); rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; return 0; free_out: return ret; } static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, struct napi_struct *napi, struct sk_buff_head *msdu_list, int mac_id) { struct sk_buff *msdu; struct ath11k *ar; struct ieee80211_rx_status rx_status = {}; int ret; if (skb_queue_empty(msdu_list)) return; if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) { __skb_queue_purge(msdu_list); return; } ar = ab->pdevs[mac_id].ar; if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) { __skb_queue_purge(msdu_list); return; } while ((msdu = __skb_dequeue(msdu_list))) { ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status); if (unlikely(ret)) { ath11k_dbg(ab, ATH11K_DBG_DATA, "Unable to process msdu %d", ret); dev_kfree_skb_any(msdu); continue; } ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status); } } int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, struct napi_struct *napi, int budget) { struct ath11k_dp *dp = &ab->dp; struct dp_rxdma_ring *rx_ring; int num_buffs_reaped[MAX_RADIOS] = {}; struct sk_buff_head msdu_list[MAX_RADIOS]; struct ath11k_skb_rxcb *rxcb; int total_msdu_reaped = 0; struct hal_srng *srng; struct sk_buff *msdu; bool done = false; int buf_id, mac_id; struct ath11k *ar; struct hal_reo_dest_ring *desc; enum hal_reo_dest_ring_push_reason push_reason; u32 cookie; int i; for (i = 0; i < MAX_RADIOS; i++) __skb_queue_head_init(&msdu_list[i]); srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; spin_lock_bh(&srng->lock); try_again: ath11k_hal_srng_access_begin(ab, srng); while (likely(desc = (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab, srng))) { cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, desc->buf_addr_info.info1); buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); if (unlikely(buf_id == 0)) continue; ar = ab->pdevs[mac_id].ar; rx_ring = &ar->dp.rx_refill_buf_ring; spin_lock_bh(&rx_ring->idr_lock); msdu = idr_find(&rx_ring->bufs_idr, buf_id); if (unlikely(!msdu)) { ath11k_warn(ab, "frame rx with invalid buf_id %d\n", buf_id); spin_unlock_bh(&rx_ring->idr_lock); continue; } idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); rxcb = ATH11K_SKB_RXCB(msdu); dma_unmap_single(ab->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu), DMA_FROM_DEVICE); num_buffs_reaped[mac_id]++; push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, desc->info0); if (unlikely(push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) { dev_kfree_skb_any(msdu); ab->soc_stats.hal_reo_error[ring_id]++; continue; } rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 & RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 & RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); rxcb->is_continuation = !!(desc->rx_msdu_info.info0 & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID, desc->rx_mpdu_info.meta_data); rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM, desc->rx_mpdu_info.info0); rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM, desc->info0); rxcb->mac_id = mac_id; __skb_queue_tail(&msdu_list[mac_id], msdu); if (rxcb->is_continuation) { done = false; } else { total_msdu_reaped++; done = true; } if (total_msdu_reaped >= budget) break; } /* Hw might have updated the head pointer after we cached it. * In this case, even though there are entries in the ring we'll * get rx_desc NULL. Give the read another try with updated cached * head pointer so that we can reap complete MPDU in the current * rx processing. */ if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) { ath11k_hal_srng_access_end(ab, srng); goto try_again; } ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); if (unlikely(!total_msdu_reaped)) goto exit; for (i = 0; i < ab->num_radios; i++) { if (!num_buffs_reaped[i]) continue; ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i); ar = ab->pdevs[i].ar; rx_ring = &ar->dp.rx_refill_buf_ring; ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], ab->hw_params.hal_params->rx_buf_rbm); } exit: return total_msdu_reaped; } static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, struct hal_rx_mon_ppdu_info *ppdu_info) { struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; u32 num_msdu; int i; if (!rx_stats) return; arsta->rssi_comb = ppdu_info->rssi_comb; ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb); num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; rx_stats->num_msdu += num_msdu; rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count; rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; rx_stats->other_msdu_count += ppdu_info->other_msdu_count; if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { ppdu_info->nss = 1; ppdu_info->mcs = HAL_RX_MAX_MCS; ppdu_info->tid = IEEE80211_NUM_TIDS; } if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; if (ppdu_info->mcs <= HAL_RX_MAX_MCS) rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; if (ppdu_info->gi < HAL_RX_GI_MAX) rx_stats->gi_count[ppdu_info->gi] += num_msdu; if (ppdu_info->bw < HAL_RX_BW_MAX) rx_stats->bw_count[ppdu_info->bw] += num_msdu; if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; if (ppdu_info->tid <= IEEE80211_NUM_TIDS) rx_stats->tid_count[ppdu_info->tid] += num_msdu; if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; if (ppdu_info->is_stbc) rx_stats->stbc_count += num_msdu; if (ppdu_info->beamformed) rx_stats->beamformed_count += num_msdu; if (ppdu_info->num_mpdu_fcs_ok > 1) rx_stats->ampdu_msdu_count += num_msdu; else rx_stats->non_ampdu_msdu_count += num_msdu; rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; rx_stats->dcm_count += ppdu_info->dcm; rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > ARRAY_SIZE(ppdu_info->rssi_chain_pri20)); for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++) arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i]; rx_stats->rx_duration += ppdu_info->rx_duration; arsta->rx_duration = rx_stats->rx_duration; } static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, struct dp_rxdma_ring *rx_ring, int *buf_id) { struct sk_buff *skb; dma_addr_t paddr; skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + DP_RX_BUFFER_ALIGN_SIZE); if (!skb) goto fail_alloc_skb; if (!IS_ALIGNED((unsigned long)skb->data, DP_RX_BUFFER_ALIGN_SIZE)) { skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - skb->data); } paddr = dma_map_single(ab->dev, skb->data, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(ab->dev, paddr))) goto fail_free_skb; spin_lock_bh(&rx_ring->idr_lock); *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, rx_ring->bufs_max, GFP_ATOMIC); spin_unlock_bh(&rx_ring->idr_lock); if (*buf_id < 0) goto fail_dma_unmap; ATH11K_SKB_RXCB(skb)->paddr = paddr; return skb; fail_dma_unmap: dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); fail_free_skb: dev_kfree_skb_any(skb); fail_alloc_skb: return NULL; } int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, struct dp_rxdma_ring *rx_ring, int req_entries, enum hal_rx_buf_return_buf_manager mgr) { struct hal_srng *srng; u32 *desc; struct sk_buff *skb; int num_free; int num_remain; int buf_id; u32 cookie; dma_addr_t paddr; req_entries = min(req_entries, rx_ring->bufs_max); srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); num_free = ath11k_hal_srng_src_num_free(ab, srng, true); req_entries = min(num_free, req_entries); num_remain = req_entries; while (num_remain > 0) { skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, &buf_id); if (!skb) break; paddr = ATH11K_SKB_RXCB(skb)->paddr; desc = ath11k_hal_srng_src_get_next_entry(ab, srng); if (!desc) goto fail_desc_get; cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); num_remain--; ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); } ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return req_entries - num_remain; fail_desc_get: spin_lock_bh(&rx_ring->idr_lock); idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return req_entries - num_remain; } #define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535 static void ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon, struct hal_tlv_hdr *tlv) { struct hal_rx_ppdu_start *ppdu_start; u16 ppdu_id_diff, ppdu_id, tlv_len; u8 *ptr; /* PPDU id is part of second tlv, move ptr to second tlv */ tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl); ptr = (u8 *)tlv; ptr += sizeof(*tlv) + tlv_len; tlv = (struct hal_tlv_hdr *)ptr; if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START) return; ptr += sizeof(*tlv); ppdu_start = (struct hal_rx_ppdu_start *)ptr; ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID, __le32_to_cpu(ppdu_start->info0)); if (pmon->sw_mon_entries.ppdu_id < ppdu_id) { pmon->buf_state = DP_MON_STATUS_LEAD; ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id; if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP) pmon->buf_state = DP_MON_STATUS_LAG; } else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) { pmon->buf_state = DP_MON_STATUS_LAG; ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id; if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP) pmon->buf_state = DP_MON_STATUS_LEAD; } } static enum dp_mon_status_buf_state ath11k_dp_rx_mon_buf_done(struct ath11k_base *ab, struct hal_srng *srng, struct dp_rxdma_ring *rx_ring) { struct ath11k_skb_rxcb *rxcb; struct hal_tlv_hdr *tlv; struct sk_buff *skb; void *status_desc; dma_addr_t paddr; u32 cookie; int buf_id; u8 rbm; status_desc = ath11k_hal_srng_src_next_peek(ab, srng); if (!status_desc) return DP_MON_STATUS_NO_DMA; ath11k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm); buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); spin_lock_bh(&rx_ring->idr_lock); skb = idr_find(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); if (!skb) return DP_MON_STATUS_NO_DMA; rxcb = ATH11K_SKB_RXCB(skb); dma_sync_single_for_cpu(ab->dev, rxcb->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); tlv = (struct hal_tlv_hdr *)skb->data; if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE) return DP_MON_STATUS_NO_DMA; return DP_MON_STATUS_REPLINISH; } static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, int *budget, struct sk_buff_head *skb_list) { struct ath11k *ar; const struct ath11k_hw_hal_params *hal_params; enum dp_mon_status_buf_state reap_status; struct ath11k_pdev_dp *dp; struct dp_rxdma_ring *rx_ring; struct ath11k_mon_data *pmon; struct hal_srng *srng; void *rx_mon_status_desc; struct sk_buff *skb; struct ath11k_skb_rxcb *rxcb; struct hal_tlv_hdr *tlv; u32 cookie; int buf_id, srng_id; dma_addr_t paddr; u8 rbm; int num_buffs_reaped = 0; ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; dp = &ar->dp; pmon = &dp->mon_data; srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id); rx_ring = &dp->rx_mon_status_refill_ring[srng_id]; srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); while (*budget) { *budget -= 1; rx_mon_status_desc = ath11k_hal_srng_src_peek(ab, srng); if (!rx_mon_status_desc) { pmon->buf_state = DP_MON_STATUS_REPLINISH; break; } ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, &cookie, &rbm); if (paddr) { buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); spin_lock_bh(&rx_ring->idr_lock); skb = idr_find(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); if (!skb) { ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", buf_id); pmon->buf_state = DP_MON_STATUS_REPLINISH; goto move_next; } rxcb = ATH11K_SKB_RXCB(skb); dma_sync_single_for_cpu(ab->dev, rxcb->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); tlv = (struct hal_tlv_hdr *)skb->data; if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE) { ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n", FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl), buf_id); /* RxDMA status done bit might not be set even * though tp is moved by HW. */ /* If done status is missing: * 1. As per MAC team's suggestion, * when HP + 1 entry is peeked and if DMA * is not done and if HP + 2 entry's DMA done * is set. skip HP + 1 entry and * start processing in next interrupt. * 2. If HP + 2 entry's DMA done is not set, * poll onto HP + 1 entry DMA done to be set. * Check status for same buffer for next time * dp_rx_mon_status_srng_process */ reap_status = ath11k_dp_rx_mon_buf_done(ab, srng, rx_ring); if (reap_status == DP_MON_STATUS_NO_DMA) continue; spin_lock_bh(&rx_ring->idr_lock); idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); dma_unmap_single(ab->dev, rxcb->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); pmon->buf_state = DP_MON_STATUS_REPLINISH; goto move_next; } spin_lock_bh(&rx_ring->idr_lock); idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); if (ab->hw_params.full_monitor_mode) { ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv); if (paddr == pmon->mon_status_paddr) pmon->buf_state = DP_MON_STATUS_MATCH; } dma_unmap_single(ab->dev, rxcb->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); __skb_queue_tail(skb_list, skb); } else { pmon->buf_state = DP_MON_STATUS_REPLINISH; } move_next: skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, &buf_id); if (!skb) { hal_params = ab->hw_params.hal_params; ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, hal_params->rx_buf_rbm); num_buffs_reaped++; break; } rxcb = ATH11K_SKB_RXCB(skb); cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, cookie, ab->hw_params.hal_params->rx_buf_rbm); ath11k_hal_srng_src_get_next_entry(ab, srng); num_buffs_reaped++; } ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return num_buffs_reaped; } static void ath11k_dp_rx_frag_timer(struct timer_list *timer) { struct dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer, frag_timer); spin_lock_bh(&rx_tid->ab->base_lock); if (rx_tid->last_frag_no && rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { spin_unlock_bh(&rx_tid->ab->base_lock); return; } ath11k_dp_rx_frags_cleanup(rx_tid, true); spin_unlock_bh(&rx_tid->ab->base_lock); } int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id) { struct ath11k_base *ab = ar->ab; struct crypto_shash *tfm; struct ath11k_peer *peer; struct dp_rx_tid *rx_tid; int i; tfm = crypto_alloc_shash("michael_mic", 0, 0); if (IS_ERR(tfm)) { ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n", PTR_ERR(tfm)); return PTR_ERR(tfm); } spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, vdev_id, peer_mac); if (!peer) { ath11k_warn(ab, "failed to find the peer to set up fragment info\n"); spin_unlock_bh(&ab->base_lock); crypto_free_shash(tfm); return -ENOENT; } for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { rx_tid = &peer->rx_tid[i]; rx_tid->ab = ab; timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0); skb_queue_head_init(&rx_tid->rx_frags); } peer->tfm_mmic = tfm; peer->dp_setup_done = true; spin_unlock_bh(&ab->base_lock); return 0; } static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, struct ieee80211_hdr *hdr, u8 *data, size_t data_len, u8 *mic) { SHASH_DESC_ON_STACK(desc, tfm); u8 mic_hdr[16] = {}; u8 tid = 0; int ret; if (!tfm) return -EINVAL; desc->tfm = tfm; ret = crypto_shash_setkey(tfm, key, 8); if (ret) goto out; ret = crypto_shash_init(desc); if (ret) goto out; /* TKIP MIC header */ memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); if (ieee80211_is_data_qos(hdr->frame_control)) tid = ieee80211_get_tid(hdr); mic_hdr[12] = tid; ret = crypto_shash_update(desc, mic_hdr, 16); if (ret) goto out; ret = crypto_shash_update(desc, data, data_len); if (ret) goto out; ret = crypto_shash_final(desc, mic); out: shash_desc_zero(desc); return ret; } static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer, struct sk_buff *msdu) { struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); struct ieee80211_key_conf *key_conf; struct ieee80211_hdr *hdr; u8 mic[IEEE80211_CCMP_MIC_LEN]; int head_len, tail_len, ret; size_t data_len; u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; u8 *key, *data; u8 key_idx; if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC) return 0; hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); hdr_len = ieee80211_hdrlen(hdr->frame_control); head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; if (!is_multicast_ether_addr(hdr->addr1)) key_idx = peer->ucast_keyidx; else key_idx = peer->mcast_keyidx; key_conf = peer->keys[key_idx]; data = msdu->data + head_len; data_len = msdu->len - head_len - tail_len; key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) goto mic_fail; return 0; mic_fail: (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true; (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true; rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; skb_pull(msdu, hal_rx_desc_sz); ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); ieee80211_rx(ar->hw, msdu); return -EINVAL; } static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu, enum hal_encrypt_type enctype, u32 flags) { struct ieee80211_hdr *hdr; size_t hdr_len; size_t crypto_len; u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; if (!flags) return; hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); if (flags & RX_FLAG_MIC_STRIPPED) skb_trim(msdu, msdu->len - ath11k_dp_rx_crypto_mic_len(ar, enctype)); if (flags & RX_FLAG_ICV_STRIPPED) skb_trim(msdu, msdu->len - ath11k_dp_rx_crypto_icv_len(ar, enctype)); if (flags & RX_FLAG_IV_STRIPPED) { hdr_len = ieee80211_hdrlen(hdr->frame_control); crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len, (void *)msdu->data + hal_rx_desc_sz, hdr_len); skb_pull(msdu, crypto_len); } } static int ath11k_dp_rx_h_defrag(struct ath11k *ar, struct ath11k_peer *peer, struct dp_rx_tid *rx_tid, struct sk_buff **defrag_skb) { struct hal_rx_desc *rx_desc; struct sk_buff *skb, *first_frag, *last_frag; struct ieee80211_hdr *hdr; struct rx_attention *rx_attention; enum hal_encrypt_type enctype; bool is_decrypted = false; int msdu_len = 0; int extra_space; u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; first_frag = skb_peek(&rx_tid->rx_frags); last_frag = skb_peek_tail(&rx_tid->rx_frags); skb_queue_walk(&rx_tid->rx_frags, skb) { flags = 0; rx_desc = (struct hal_rx_desc *)skb->data; hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc); if (enctype != HAL_ENCRYPT_TYPE_OPEN) { rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); } if (is_decrypted) { if (skb != first_frag) flags |= RX_FLAG_IV_STRIPPED; if (skb != last_frag) flags |= RX_FLAG_ICV_STRIPPED | RX_FLAG_MIC_STRIPPED; } /* RX fragments are always raw packets */ if (skb != last_frag) skb_trim(skb, skb->len - FCS_LEN); ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); if (skb != first_frag) skb_pull(skb, hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control)); msdu_len += skb->len; } extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); if (extra_space > 0 && (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) return -ENOMEM; __skb_unlink(first_frag, &rx_tid->rx_frags); while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { skb_put_data(first_frag, skb->data, skb->len); dev_kfree_skb_any(skb); } hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); ATH11K_SKB_RXCB(first_frag)->is_frag = 1; if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) first_frag = NULL; *defrag_skb = first_frag; return 0; } static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid, struct sk_buff *defrag_skb) { struct ath11k_base *ab = ar->ab; struct ath11k_pdev_dp *dp = &ar->dp; struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring; struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; struct hal_reo_entrance_ring *reo_ent_ring; struct hal_reo_dest_ring *reo_dest_ring; struct dp_link_desc_bank *link_desc_banks; struct hal_rx_msdu_link *msdu_link; struct hal_rx_msdu_details *msdu0; struct hal_srng *srng; dma_addr_t paddr; u32 desc_bank, msdu_info, mpdu_info; u32 dst_idx, cookie, hal_rx_desc_sz; int ret, buf_id; hal_rx_desc_sz = ab->hw_params.hal_desc_sz; link_desc_banks = ab->dp.link_desc_banks; reo_dest_ring = rx_tid->dst_ring_desc; ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + (paddr - link_desc_banks[desc_bank].paddr)); msdu0 = &msdu_link->msdu_link[0]; dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0); memset(msdu0, 0, sizeof(*msdu0)); msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) | FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) | FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) | FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH, defrag_skb->len - hal_rx_desc_sz) | FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) | FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) | FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1); msdu0->rx_msdu_info.info0 = msdu_info; /* change msdu len in hal rx desc */ ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz); paddr = dma_map_single(ab->dev, defrag_skb->data, defrag_skb->len + skb_tailroom(defrag_skb), DMA_TO_DEVICE); if (dma_mapping_error(ab->dev, paddr)) return -ENOMEM; spin_lock_bh(&rx_refill_ring->idr_lock); buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0, rx_refill_ring->bufs_max * 3, GFP_ATOMIC); spin_unlock_bh(&rx_refill_ring->idr_lock); if (buf_id < 0) { ret = -ENOMEM; goto err_unmap_dma; } ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr; cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) | FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, ab->hw_params.hal_params->rx_buf_rbm); /* Fill mpdu details into reo entrance ring */ srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); reo_ent_ring = (struct hal_reo_entrance_ring *) ath11k_hal_srng_src_get_next_entry(ab, srng); if (!reo_ent_ring) { ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); ret = -ENOSPC; goto err_free_idr; } memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank, HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) | FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) | FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) | FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) | FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) | FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) | FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1); reo_ent_ring->rx_mpdu_info.info0 = mpdu_info; reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data; reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo; reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI, FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI, reo_dest_ring->info0)) | FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx); ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return 0; err_free_idr: spin_lock_bh(&rx_refill_ring->idr_lock); idr_remove(&rx_refill_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_refill_ring->idr_lock); err_unmap_dma: dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb), DMA_TO_DEVICE); return ret; } static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar, struct sk_buff *a, struct sk_buff *b) { int frag1, frag2; frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a); frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b); return frag1 - frag2; } static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar, struct sk_buff_head *frag_list, struct sk_buff *cur_frag) { struct sk_buff *skb; int cmp; skb_queue_walk(frag_list, skb) { cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag); if (cmp < 0) continue; __skb_queue_before(frag_list, skb, cur_frag); return; } __skb_queue_tail(frag_list, cur_frag); } static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb) { struct ieee80211_hdr *hdr; u64 pn = 0; u8 *ehdr; u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); pn = ehdr[0]; pn |= (u64)ehdr[1] << 8; pn |= (u64)ehdr[4] << 16; pn |= (u64)ehdr[5] << 24; pn |= (u64)ehdr[6] << 32; pn |= (u64)ehdr[7] << 40; return pn; } static bool ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid) { enum hal_encrypt_type encrypt_type; struct sk_buff *first_frag, *skb; struct hal_rx_desc *desc; u64 last_pn; u64 cur_pn; first_frag = skb_peek(&rx_tid->rx_frags); desc = (struct hal_rx_desc *)first_frag->data; encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc); if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) return true; last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag); skb_queue_walk(&rx_tid->rx_frags, skb) { if (skb == first_frag) continue; cur_pn = ath11k_dp_rx_h_get_pn(ar, skb); if (cur_pn != last_pn + 1) return false; last_pn = cur_pn; } return true; } static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, struct sk_buff *msdu, u32 *ring_desc) { struct ath11k_base *ab = ar->ab; struct hal_rx_desc *rx_desc; struct ath11k_peer *peer; struct dp_rx_tid *rx_tid; struct sk_buff *defrag_skb = NULL; u32 peer_id; u16 seqno, frag_no; u8 tid; int ret = 0; bool more_frags; bool is_mcbc; rx_desc = (struct hal_rx_desc *)msdu->data; peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc); tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc); seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc); frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu); more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu); is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc); /* Multicast/Broadcast fragments are not expected */ if (is_mcbc) return -EINVAL; if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) || !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) || tid > IEEE80211_NUM_TIDS) return -EINVAL; /* received unfragmented packet in reo * exception ring, this shouldn't happen * as these packets typically come from * reo2sw srngs. */ if (WARN_ON_ONCE(!frag_no && !more_frags)) return -EINVAL; spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_id(ab, peer_id); if (!peer) { ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", peer_id); ret = -ENOENT; goto out_unlock; } if (!peer->dp_setup_done) { ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n", peer->addr, peer_id); ret = -ENOENT; goto out_unlock; } rx_tid = &peer->rx_tid[tid]; if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || skb_queue_empty(&rx_tid->rx_frags)) { /* Flush stored fragments and start a new sequence */ ath11k_dp_rx_frags_cleanup(rx_tid, true); rx_tid->cur_sn = seqno; } if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { /* Fragment already present */ ret = -EINVAL; goto out_unlock; } if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap))) __skb_queue_tail(&rx_tid->rx_frags, msdu); else ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu); rx_tid->rx_frag_bitmap |= BIT(frag_no); if (!more_frags) rx_tid->last_frag_no = frag_no; if (frag_no == 0) { rx_tid->dst_ring_desc = kmemdup(ring_desc, sizeof(*rx_tid->dst_ring_desc), GFP_ATOMIC); if (!rx_tid->dst_ring_desc) { ret = -ENOMEM; goto out_unlock; } } else { ath11k_dp_rx_link_desc_return(ab, ring_desc, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); } if (!rx_tid->last_frag_no || rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { mod_timer(&rx_tid->frag_timer, jiffies + ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS); goto out_unlock; } spin_unlock_bh(&ab->base_lock); timer_delete_sync(&rx_tid->frag_timer); spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_id(ab, peer_id); if (!peer) goto err_frags_cleanup; if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) goto err_frags_cleanup; if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) goto err_frags_cleanup; if (!defrag_skb) goto err_frags_cleanup; if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) goto err_frags_cleanup; ath11k_dp_rx_frags_cleanup(rx_tid, false); goto out_unlock; err_frags_cleanup: dev_kfree_skb_any(defrag_skb); ath11k_dp_rx_frags_cleanup(rx_tid, true); out_unlock: spin_unlock_bh(&ab->base_lock); return ret; } static int ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop) { struct ath11k_pdev_dp *dp = &ar->dp; struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; struct sk_buff *msdu; struct ath11k_skb_rxcb *rxcb; struct hal_rx_desc *rx_desc; u8 *hdr_status; u16 msdu_len; u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; spin_lock_bh(&rx_ring->idr_lock); msdu = idr_find(&rx_ring->bufs_idr, buf_id); if (!msdu) { ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n", buf_id); spin_unlock_bh(&rx_ring->idr_lock); return -EINVAL; } idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); rxcb = ATH11K_SKB_RXCB(msdu); dma_unmap_single(ar->ab->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu), DMA_FROM_DEVICE); if (drop) { dev_kfree_skb_any(msdu); return 0; } rcu_read_lock(); if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { dev_kfree_skb_any(msdu); goto exit; } if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { dev_kfree_skb_any(msdu); goto exit; } rx_desc = (struct hal_rx_desc *)msdu->data; msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc); if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len); ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, sizeof(struct ieee80211_hdr)); ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, sizeof(struct hal_rx_desc)); dev_kfree_skb_any(msdu); goto exit; } skb_put(msdu, hal_rx_desc_sz + msdu_len); if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) { dev_kfree_skb_any(msdu); ath11k_dp_rx_link_desc_return(ar->ab, ring_desc, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); } exit: rcu_read_unlock(); return 0; } int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, int budget) { u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; struct dp_link_desc_bank *link_desc_banks; enum hal_rx_buf_return_buf_manager rbm; int tot_n_bufs_reaped, quota, ret, i; int n_bufs_reaped[MAX_RADIOS] = {}; struct dp_rxdma_ring *rx_ring; struct dp_srng *reo_except; u32 desc_bank, num_msdus; struct hal_srng *srng; struct ath11k_dp *dp; void *link_desc_va; int buf_id, mac_id; struct ath11k *ar; dma_addr_t paddr; u32 *desc; bool is_frag; u8 drop = 0; tot_n_bufs_reaped = 0; quota = budget; dp = &ab->dp; reo_except = &dp->reo_except_ring; link_desc_banks = dp->link_desc_banks; srng = &ab->hal.srng_list[reo_except->ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); while (budget && (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc; ab->soc_stats.err_ring_pkts++; ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, &desc_bank); if (ret) { ath11k_warn(ab, "failed to parse error reo desc %d\n", ret); continue; } link_desc_va = link_desc_banks[desc_bank].vaddr + (paddr - link_desc_banks[desc_bank].paddr); ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, &rbm); if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && rbm != HAL_RX_BUF_RBM_SW1_BM && rbm != HAL_RX_BUF_RBM_SW3_BM) { ab->soc_stats.invalid_rbm++; ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); ath11k_dp_rx_link_desc_return(ab, desc, HAL_WBM_REL_BM_ACT_REL_MSDU); continue; } is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG); /* Process only rx fragments with one msdu per link desc below, and drop * msdu's indicated due to error reasons. */ if (!is_frag || num_msdus > 1) { drop = 1; /* Return the link desc back to wbm idle list */ ath11k_dp_rx_link_desc_return(ab, desc, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); } for (i = 0; i < num_msdus; i++) { buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, msdu_cookies[i]); mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, msdu_cookies[i]); ar = ab->pdevs[mac_id].ar; if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) { n_bufs_reaped[mac_id]++; tot_n_bufs_reaped++; } } if (tot_n_bufs_reaped >= quota) { tot_n_bufs_reaped = quota; goto exit; } budget = quota - tot_n_bufs_reaped; } exit: ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); for (i = 0; i < ab->num_radios; i++) { if (!n_bufs_reaped[i]) continue; ar = ab->pdevs[i].ar; rx_ring = &ar->dp.rx_refill_buf_ring; ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], ab->hw_params.hal_params->rx_buf_rbm); } return tot_n_bufs_reaped; } static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, int msdu_len, struct sk_buff_head *msdu_list) { struct sk_buff *skb, *tmp; struct ath11k_skb_rxcb *rxcb; int n_buffs; n_buffs = DIV_ROUND_UP(msdu_len, (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz)); skb_queue_walk_safe(msdu_list, skb, tmp) { rxcb = ATH11K_SKB_RXCB(skb); if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { if (!n_buffs) break; __skb_unlink(skb, msdu_list); dev_kfree_skb_any(skb); n_buffs--; } } } static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, struct ieee80211_rx_status *status, struct sk_buff_head *msdu_list) { u16 msdu_len; struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; struct rx_attention *rx_attention; u8 l3pad_bytes; struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { /* First buffer will be freed by the caller, so deduct it's length */ msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); return -EINVAL; } rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc); if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { ath11k_warn(ar->ab, "msdu_done bit not set in null_q_des processing\n"); __skb_queue_purge(msdu_list); return -EIO; } /* Handle NULL queue descriptor violations arising out a missing * REO queue for a given peer or a given TID. This typically * may happen if a packet is received on a QOS enabled TID before the * ADDBA negotiation for that TID, when the TID queue is setup. Or * it may also happen for MC/BC frames if they are not routed to the * non-QOS TID queue, in the absence of any other default TID queue. * This error can show up both in a REO destination or WBM release ring. */ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); if (rxcb->is_frag) { skb_pull(msdu, hal_rx_desc_sz); } else { l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) return -EINVAL; skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); } ath11k_dp_rx_h_ppdu(ar, desc, status); ath11k_dp_rx_h_mpdu(ar, msdu, desc, status); rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc); /* Please note that caller will having the access to msdu and completing * rx with mac80211. Need not worry about cleaning up amsdu_list. */ return 0; } static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, struct ieee80211_rx_status *status, struct sk_buff_head *msdu_list) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); bool drop = false; ar->ab->soc_stats.reo_error[rxcb->err_code]++; switch (rxcb->err_code) { case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) drop = true; break; case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: /* TODO: Do not drop PN failed packets in the driver; * instead, it is good to drop such packets in mac80211 * after incrementing the replay counters. */ fallthrough; default: /* TODO: Review other errors and process them to mac80211 * as appropriate. */ drop = true; break; } return drop; } static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, struct ieee80211_rx_status *status) { u16 msdu_len; struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; u8 l3pad_bytes; struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); ath11k_dp_rx_h_ppdu(ar, desc, status); status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | RX_FLAG_DECRYPTED); ath11k_dp_rx_h_undecap(ar, msdu, desc, HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); } static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, struct ieee80211_rx_status *status) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); bool drop = false; ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; switch (rxcb->err_code) { case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); break; default: /* TODO: Review other rxdma error code to check if anything is * worth reporting to mac80211 */ drop = true; break; } return drop; } static void ath11k_dp_rx_wbm_err(struct ath11k *ar, struct napi_struct *napi, struct sk_buff *msdu, struct sk_buff_head *msdu_list) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); struct ieee80211_rx_status rxs = {}; bool drop = true; switch (rxcb->err_rel_src) { case HAL_WBM_REL_SRC_MODULE_REO: drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); break; case HAL_WBM_REL_SRC_MODULE_RXDMA: drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); break; default: /* msdu will get freed */ break; } if (drop) { dev_kfree_skb_any(msdu); return; } ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs); } int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, struct napi_struct *napi, int budget) { struct ath11k *ar; struct ath11k_dp *dp = &ab->dp; struct dp_rxdma_ring *rx_ring; struct hal_rx_wbm_rel_info err_info; struct hal_srng *srng; struct sk_buff *msdu; struct sk_buff_head msdu_list[MAX_RADIOS]; struct ath11k_skb_rxcb *rxcb; u32 *rx_desc; int buf_id, mac_id; int num_buffs_reaped[MAX_RADIOS] = {}; int total_num_buffs_reaped = 0; int ret, i; for (i = 0; i < ab->num_radios; i++) __skb_queue_head_init(&msdu_list[i]); srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); while (budget) { rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); if (!rx_desc) break; ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); if (ret) { ath11k_warn(ab, "failed to parse rx error in wbm_rel ring desc %d\n", ret); continue; } buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie); mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie); ar = ab->pdevs[mac_id].ar; rx_ring = &ar->dp.rx_refill_buf_ring; spin_lock_bh(&rx_ring->idr_lock); msdu = idr_find(&rx_ring->bufs_idr, buf_id); if (!msdu) { ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n", buf_id, mac_id); spin_unlock_bh(&rx_ring->idr_lock); continue; } idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); rxcb = ATH11K_SKB_RXCB(msdu); dma_unmap_single(ab->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu), DMA_FROM_DEVICE); num_buffs_reaped[mac_id]++; total_num_buffs_reaped++; budget--; if (err_info.push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { dev_kfree_skb_any(msdu); continue; } rxcb->err_rel_src = err_info.err_rel_src; rxcb->err_code = err_info.err_code; rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; __skb_queue_tail(&msdu_list[mac_id], msdu); } ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); if (!total_num_buffs_reaped) goto done; for (i = 0; i < ab->num_radios; i++) { if (!num_buffs_reaped[i]) continue; ar = ab->pdevs[i].ar; rx_ring = &ar->dp.rx_refill_buf_ring; ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], ab->hw_params.hal_params->rx_buf_rbm); } rcu_read_lock(); for (i = 0; i < ab->num_radios; i++) { if (!rcu_dereference(ab->pdevs_active[i])) { __skb_queue_purge(&msdu_list[i]); continue; } ar = ab->pdevs[i].ar; if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { __skb_queue_purge(&msdu_list[i]); continue; } while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); } rcu_read_unlock(); done: return total_num_buffs_reaped; } int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) { struct ath11k *ar; struct dp_srng *err_ring; struct dp_rxdma_ring *rx_ring; struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; struct hal_srng *srng; u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; enum hal_rx_buf_return_buf_manager rbm; enum hal_reo_entr_rxdma_ecode rxdma_err_code; struct ath11k_skb_rxcb *rxcb; struct sk_buff *skb; struct hal_reo_entrance_ring *entr_ring; void *desc; int num_buf_freed = 0; int quota = budget; dma_addr_t paddr; u32 desc_bank; void *link_desc_va; int num_msdus; int i; int buf_id; ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id)]; rx_ring = &ar->dp.rx_refill_buf_ring; srng = &ab->hal.srng_list[err_ring->ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); while (quota-- && (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); entr_ring = (struct hal_reo_entrance_ring *)desc; rxdma_err_code = FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, entr_ring->info1); ab->soc_stats.rxdma_error[rxdma_err_code]++; link_desc_va = link_desc_banks[desc_bank].vaddr + (paddr - link_desc_banks[desc_bank].paddr); ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, &rbm); for (i = 0; i < num_msdus; i++) { buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, msdu_cookies[i]); spin_lock_bh(&rx_ring->idr_lock); skb = idr_find(&rx_ring->bufs_idr, buf_id); if (!skb) { ath11k_warn(ab, "rxdma error with invalid buf_id %d\n", buf_id); spin_unlock_bh(&rx_ring->idr_lock); continue; } idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); rxcb = ATH11K_SKB_RXCB(skb); dma_unmap_single(ab->dev, rxcb->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); num_buf_freed++; } ath11k_dp_rx_link_desc_return(ab, desc, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); } ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); if (num_buf_freed) ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, ab->hw_params.hal_params->rx_buf_rbm); return budget - quota; } void ath11k_dp_process_reo_status(struct ath11k_base *ab) { struct ath11k_dp *dp = &ab->dp; struct hal_srng *srng; struct dp_reo_cmd *cmd, *tmp; bool found = false; u32 *reo_desc; u16 tag; struct hal_reo_status reo_status; srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; memset(&reo_status, 0, sizeof(reo_status)); spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc); switch (tag) { case HAL_REO_GET_QUEUE_STATS_STATUS: ath11k_hal_reo_status_queue_stats(ab, reo_desc, &reo_status); break; case HAL_REO_FLUSH_QUEUE_STATUS: ath11k_hal_reo_flush_queue_status(ab, reo_desc, &reo_status); break; case HAL_REO_FLUSH_CACHE_STATUS: ath11k_hal_reo_flush_cache_status(ab, reo_desc, &reo_status); break; case HAL_REO_UNBLOCK_CACHE_STATUS: ath11k_hal_reo_unblk_cache_status(ab, reo_desc, &reo_status); break; case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, &reo_status); break; case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, &reo_status); break; case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, &reo_status); break; default: ath11k_warn(ab, "Unknown reo status type %d\n", tag); continue; } spin_lock_bh(&dp->reo_cmd_lock); list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { found = true; list_del(&cmd->list); break; } } spin_unlock_bh(&dp->reo_cmd_lock); if (found) { cmd->handler(dp, (void *)&cmd->data, reo_status.uniform_hdr.cmd_status); kfree(cmd); } found = false; } ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); } void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) { struct ath11k *ar = ab->pdevs[mac_id].ar; ath11k_dp_rx_pdev_srng_free(ar); ath11k_dp_rxdma_pdev_buf_free(ar); } int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) { struct ath11k *ar = ab->pdevs[mac_id].ar; struct ath11k_pdev_dp *dp = &ar->dp; u32 ring_id; int i; int ret; ret = ath11k_dp_rx_pdev_srng_alloc(ar); if (ret) { ath11k_warn(ab, "failed to setup rx srngs\n"); return ret; } ret = ath11k_dp_rxdma_pdev_buf_setup(ar); if (ret) { ath11k_warn(ab, "failed to setup rxdma ring\n"); return ret; } ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF); if (ret) { ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", ret); return ret; } if (ab->hw_params.rx_mac_buf_ring) { for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = dp->rx_mac_buf_ring[i].ring_id; ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, HAL_RXDMA_BUF); if (ret) { ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", i, ret); return ret; } } } for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = dp->rxdma_err_dst_ring[i].ring_id; ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, HAL_RXDMA_DST); if (ret) { ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", i, ret); return ret; } } if (!ab->hw_params.rxdma1_enable) goto config_refill_ring; ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_MONITOR_BUF); if (ret) { ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", ret); return ret; } ret = ath11k_dp_tx_htt_srng_setup(ab, dp->rxdma_mon_dst_ring.ring_id, mac_id, HAL_RXDMA_MONITOR_DST); if (ret) { ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", ret); return ret; } ret = ath11k_dp_tx_htt_srng_setup(ab, dp->rxdma_mon_desc_ring.ring_id, mac_id, HAL_RXDMA_MONITOR_DESC); if (ret) { ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", ret); return ret; } config_refill_ring: for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, HAL_RXDMA_MONITOR_STATUS); if (ret) { ath11k_warn(ab, "failed to configure mon_status_refill_ring%d %d\n", i, ret); return ret; } } return 0; } static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) { if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) { *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc); *total_len -= *frag_len; } else { *frag_len = *total_len; *total_len = 0; } } static int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, void *p_last_buf_addr_info, u8 mac_id) { struct ath11k_pdev_dp *dp = &ar->dp; struct dp_srng *dp_srng; void *hal_srng; void *src_srng_desc; int ret = 0; if (ar->ab->hw_params.rxdma1_enable) { dp_srng = &dp->rxdma_mon_desc_ring; hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; } else { dp_srng = &ar->ab->dp.wbm_desc_rel_ring; hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; } ath11k_hal_srng_access_begin(ar->ab, hal_srng); src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); if (src_srng_desc) { struct ath11k_buffer_addr *src_desc = src_srng_desc; *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); } else { ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "Monitor Link Desc Ring %d Full", mac_id); ret = -ENOMEM; } ath11k_hal_srng_access_end(ar->ab, hal_srng); return ret; } static void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, dma_addr_t *paddr, u32 *sw_cookie, u8 *rbm, void **pp_buf_addr_info) { struct hal_rx_msdu_link *msdu_link = rx_msdu_link_desc; struct ath11k_buffer_addr *buf_addr_info; buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm); *pp_buf_addr_info = (void *)buf_addr_info; } static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) { if (skb->len > len) { skb_trim(skb, len); } else { if (skb_tailroom(skb) < len - skb->len) { if ((pskb_expand_head(skb, 0, len - skb->len - skb_tailroom(skb), GFP_ATOMIC))) { dev_kfree_skb_any(skb); return -ENOMEM; } } skb_put(skb, (len - skb->len)); } return 0; } static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, void *msdu_link_desc, struct hal_rx_msdu_list *msdu_list, u16 *num_msdus) { struct hal_rx_msdu_details *msdu_details = NULL; struct rx_msdu_desc *msdu_desc_info = NULL; struct hal_rx_msdu_link *msdu_link = NULL; int i; u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1); u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); u8 tmp = 0; msdu_link = msdu_link_desc; msdu_details = &msdu_link->msdu_link[0]; for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR, msdu_details[i].buf_addr_info.info0) == 0) { msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; msdu_desc_info->info0 |= last; - ; break; } msdu_desc_info = &msdu_details[i].rx_msdu_info; if (!i) msdu_desc_info->info0 |= first; else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) msdu_desc_info->info0 |= last; msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; msdu_list->msdu_info[i].msdu_len = HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); msdu_list->sw_cookie[i] = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, msdu_details[i].buf_addr_info.info1); tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, msdu_details[i].buf_addr_info.info1); msdu_list->rbm[i] = tmp; } *num_msdus = i; } static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, u32 *rx_bufs_used) { u32 ret = 0; if ((*ppdu_id < msdu_ppdu_id) && ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { *ppdu_id = msdu_ppdu_id; ret = msdu_ppdu_id; } else if ((*ppdu_id > msdu_ppdu_id) && ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { /* mon_dst is behind than mon_status * skip dst_ring and free it */ *rx_bufs_used += 1; *ppdu_id = msdu_ppdu_id; ret = msdu_ppdu_id; } return ret; } static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, bool *is_frag, u32 *total_len, u32 *frag_len, u32 *msdu_cnt) { if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { if (!*is_frag) { *total_len = info->msdu_len; *is_frag = true; } ath11k_dp_mon_set_frag_len(total_len, frag_len); } else { if (*is_frag) { ath11k_dp_mon_set_frag_len(total_len, frag_len); } else { *frag_len = info->msdu_len; } *is_frag = false; *msdu_cnt -= 1; } } /* clang stack usage explodes if this is inlined */ static noinline_for_stack u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, void *ring_entry, struct sk_buff **head_msdu, struct sk_buff **tail_msdu, u32 *npackets, u32 *ppdu_id) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; struct sk_buff *msdu = NULL, *last = NULL; struct hal_rx_msdu_list msdu_list; void *p_buf_addr_info, *p_last_buf_addr_info; struct hal_rx_desc *rx_desc; void *rx_msdu_link_desc; dma_addr_t paddr; u16 num_msdus = 0; u32 rx_buf_size, rx_pkt_offset, sw_cookie; u32 rx_bufs_used = 0, i = 0; u32 msdu_ppdu_id = 0, msdu_cnt = 0; u32 total_len = 0, frag_len = 0; bool is_frag, is_first_msdu; bool drop_mpdu = false; struct ath11k_skb_rxcb *rxcb; struct hal_reo_entrance_ring *ent_desc = ring_entry; int buf_id; u32 rx_link_buf_info[2]; u8 rbm; if (!ar->ab->hw_params.rxdma1_enable) rx_ring = &dp->rx_refill_buf_ring; ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, &sw_cookie, &p_last_buf_addr_info, &rbm, &msdu_cnt); if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, ent_desc->info1) == HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { u8 rxdma_err = FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, ent_desc->info1); if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { drop_mpdu = true; pmon->rx_mon_stats.dest_mpdu_drop++; } } is_frag = false; is_first_msdu = true; do { if (pmon->mon_last_linkdesc_paddr == paddr) { pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; return rx_bufs_used; } if (ar->ab->hw_params.rxdma1_enable) rx_msdu_link_desc = (void *)pmon->link_desc_banks[sw_cookie].vaddr + (paddr - pmon->link_desc_banks[sw_cookie].paddr); else rx_msdu_link_desc = (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr + (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr); ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, &num_msdus); for (i = 0; i < num_msdus; i++) { u32 l2_hdr_offset; if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "i %d last_cookie %d is same\n", i, pmon->mon_last_buf_cookie); drop_mpdu = true; pmon->rx_mon_stats.dup_mon_buf_cnt++; continue; } buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, msdu_list.sw_cookie[i]); spin_lock_bh(&rx_ring->idr_lock); msdu = idr_find(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); if (!msdu) { ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "msdu_pop: invalid buf_id %d\n", buf_id); goto next_msdu; } rxcb = ATH11K_SKB_RXCB(msdu); if (!rxcb->unmapped) { dma_unmap_single(ar->ab->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu), DMA_FROM_DEVICE); rxcb->unmapped = 1; } if (drop_mpdu) { ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "i %d drop msdu %p *ppdu_id %x\n", i, msdu, *ppdu_id); dev_kfree_skb_any(msdu); msdu = NULL; goto next_msdu; } rx_desc = (struct hal_rx_desc *)msdu->data; rx_pkt_offset = sizeof(struct hal_rx_desc); l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc); if (is_first_msdu) { if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) { drop_mpdu = true; dev_kfree_skb_any(msdu); msdu = NULL; pmon->mon_last_linkdesc_paddr = paddr; goto next_msdu; } msdu_ppdu_id = ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc); if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, ppdu_id, &rx_bufs_used)) { if (rx_bufs_used) { drop_mpdu = true; dev_kfree_skb_any(msdu); msdu = NULL; goto next_msdu; } return rx_bufs_used; } pmon->mon_last_linkdesc_paddr = paddr; is_first_msdu = false; } ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], &is_frag, &total_len, &frag_len, &msdu_cnt); rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); if (!(*head_msdu)) *head_msdu = msdu; else if (last) last->next = msdu; last = msdu; next_msdu: pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; rx_bufs_used++; spin_lock_bh(&rx_ring->idr_lock); idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); } ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm); ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, &sw_cookie, &rbm, &p_buf_addr_info); if (ar->ab->hw_params.rxdma1_enable) { if (ath11k_dp_rx_monitor_link_desc_return(ar, p_last_buf_addr_info, dp->mac_id)) ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "dp_rx_monitor_link_desc_return failed"); } else { ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); } p_last_buf_addr_info = p_buf_addr_info; } while (paddr && msdu_cnt); if (last) last->next = NULL; *tail_msdu = msdu; if (msdu_cnt == 0) *npackets = 1; return rx_bufs_used; } static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu) { u32 rx_pkt_offset, l2_hdr_offset; rx_pkt_offset = ar->ab->hw_params.hal_desc_sz; l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, (struct hal_rx_desc *)msdu->data); skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); } static struct sk_buff * ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, u32 mac_id, struct sk_buff *head_msdu, struct sk_buff *last_msdu, struct ieee80211_rx_status *rxs, bool *fcs_err) { struct ath11k_base *ab = ar->ab; struct sk_buff *msdu, *prev_buf; struct hal_rx_desc *rx_desc; char *hdr_desc; u8 *dest, decap_format; struct ieee80211_hdr_3addr *wh; struct rx_attention *rx_attention; u32 err_bitmap; if (!head_msdu) goto err_merge_fail; rx_desc = (struct hal_rx_desc *)head_msdu->data; rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc); err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); if (err_bitmap & DP_RX_MPDU_ERR_FCS) *fcs_err = true; if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention)) return NULL; decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc); ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); if (decap_format == DP_RX_DECAP_TYPE_RAW) { ath11k_dp_rx_msdus_set_payload(ar, head_msdu); prev_buf = head_msdu; msdu = head_msdu->next; while (msdu) { ath11k_dp_rx_msdus_set_payload(ar, msdu); prev_buf = msdu; msdu = msdu->next; } prev_buf->next = NULL; skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { u8 qos_pkt = 0; rx_desc = (struct hal_rx_desc *)head_msdu->data; hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc); /* Base size */ wh = (struct ieee80211_hdr_3addr *)hdr_desc; if (ieee80211_is_data_qos(wh->frame_control)) qos_pkt = 1; msdu = head_msdu; while (msdu) { ath11k_dp_rx_msdus_set_payload(ar, msdu); if (qos_pkt) { dest = skb_push(msdu, sizeof(__le16)); if (!dest) goto err_merge_fail; memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr)); } prev_buf = msdu; msdu = msdu->next; } dest = skb_put(prev_buf, HAL_RX_FCS_LEN); if (!dest) goto err_merge_fail; ath11k_dbg(ab, ATH11K_DBG_DATA, "mpdu_buf %p mpdu_buf->len %u", prev_buf, prev_buf->len); } else { ath11k_dbg(ab, ATH11K_DBG_DATA, "decap format %d is not supported!\n", decap_format); goto err_merge_fail; } return head_msdu; err_merge_fail: return NULL; } static void ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status, u8 *rtap_buf) { u32 rtap_len = 0; put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]); rtap_len += 2; put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]); rtap_len += 2; put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]); rtap_len += 2; put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]); rtap_len += 2; put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]); rtap_len += 2; put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]); } static void ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status, u8 *rtap_buf) { u32 rtap_len = 0; put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]); rtap_len += 2; put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]); rtap_len += 2; rtap_buf[rtap_len] = rx_status->he_RU[0]; rtap_len += 1; rtap_buf[rtap_len] = rx_status->he_RU[1]; rtap_len += 1; rtap_buf[rtap_len] = rx_status->he_RU[2]; rtap_len += 1; rtap_buf[rtap_len] = rx_status->he_RU[3]; } static void ath11k_update_radiotap(struct ath11k *ar, struct hal_rx_mon_ppdu_info *ppduinfo, struct sk_buff *mon_skb, struct ieee80211_rx_status *rxs) { struct ieee80211_supported_band *sband; u8 *ptr = NULL; rxs->flag |= RX_FLAG_MACTIME_START; rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR; if (ppduinfo->nss) rxs->nss = ppduinfo->nss; if (ppduinfo->he_mu_flags) { rxs->flag |= RX_FLAG_RADIOTAP_HE_MU; rxs->encoding = RX_ENC_HE; ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu)); ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr); } else if (ppduinfo->he_flags) { rxs->flag |= RX_FLAG_RADIOTAP_HE; rxs->encoding = RX_ENC_HE; ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he)); ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr); rxs->rate_idx = ppduinfo->rate; } else if (ppduinfo->vht_flags) { rxs->encoding = RX_ENC_VHT; rxs->rate_idx = ppduinfo->rate; } else if (ppduinfo->ht_flags) { rxs->encoding = RX_ENC_HT; rxs->rate_idx = ppduinfo->rate; } else { rxs->encoding = RX_ENC_LEGACY; sband = &ar->mac.sbands[rxs->band]; rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate, ppduinfo->cck_flag); } rxs->mactime = ppduinfo->tsft; } static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, struct sk_buff *head_msdu, struct hal_rx_mon_ppdu_info *ppduinfo, struct sk_buff *tail_msdu, struct napi_struct *napi) { struct ath11k_pdev_dp *dp = &ar->dp; struct sk_buff *mon_skb, *skb_next, *header; struct ieee80211_rx_status *rxs = &dp->rx_status; bool fcs_err = false; mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, tail_msdu, rxs, &fcs_err); if (!mon_skb) goto mon_deliver_fail; header = mon_skb; rxs->flag = 0; if (fcs_err) rxs->flag = RX_FLAG_FAILED_FCS_CRC; do { skb_next = mon_skb->next; if (!skb_next) rxs->flag &= ~RX_FLAG_AMSDU_MORE; else rxs->flag |= RX_FLAG_AMSDU_MORE; if (mon_skb == header) { header = NULL; rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; } else { rxs->flag |= RX_FLAG_ALLOW_SAME_PN; } rxs->flag |= RX_FLAG_ONLY_MONITOR; ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs); ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs); mon_skb = skb_next; } while (mon_skb); rxs->flag = 0; return 0; mon_deliver_fail: mon_skb = head_msdu; while (mon_skb) { skb_next = mon_skb->next; dev_kfree_skb_any(mon_skb); mon_skb = skb_next; } return -EINVAL; } /* The destination ring processing is stuck if the destination is not * moving while status ring moves 16 PPDU. The destination ring processing * skips this destination ring PPDU as a workaround. */ #define MON_DEST_RING_STUCK_MAX_CNT 16 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, u32 quota, struct napi_struct *napi) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; const struct ath11k_hw_hal_params *hal_params; void *ring_entry; struct hal_srng *mon_dst_srng; u32 ppdu_id; u32 rx_bufs_used; u32 ring_id; struct ath11k_pdev_mon_stats *rx_mon_stats; u32 npackets = 0; u32 mpdu_rx_bufs_used; if (ar->ab->hw_params.rxdma1_enable) ring_id = dp->rxdma_mon_dst_ring.ring_id; else ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id; mon_dst_srng = &ar->ab->hal.srng_list[ring_id]; spin_lock_bh(&pmon->mon_lock); spin_lock_bh(&mon_dst_srng->lock); ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); ppdu_id = pmon->mon_ppdu_info.ppdu_id; rx_bufs_used = 0; rx_mon_stats = &pmon->rx_mon_stats; while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { struct sk_buff *head_msdu, *tail_msdu; head_msdu = NULL; tail_msdu = NULL; mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry, &head_msdu, &tail_msdu, &npackets, &ppdu_id); rx_bufs_used += mpdu_rx_bufs_used; if (mpdu_rx_bufs_used) { dp->mon_dest_ring_stuck_cnt = 0; } else { dp->mon_dest_ring_stuck_cnt++; rx_mon_stats->dest_mon_not_reaped++; } if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) { rx_mon_stats->dest_mon_stuck++; ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n", pmon->mon_ppdu_info.ppdu_id, ppdu_id, dp->mon_dest_ring_stuck_cnt, rx_mon_stats->dest_mon_not_reaped, rx_mon_stats->dest_mon_stuck); pmon->mon_ppdu_info.ppdu_id = ppdu_id; continue; } if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { pmon->mon_ppdu_status = DP_PPDU_STATUS_START; ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n", ppdu_id, pmon->mon_ppdu_info.ppdu_id, rx_mon_stats->dest_mon_not_reaped, rx_mon_stats->dest_mon_stuck); break; } if (head_msdu && tail_msdu) { ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, &pmon->mon_ppdu_info, tail_msdu, napi); rx_mon_stats->dest_mpdu_done++; } ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, mon_dst_srng); } ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); spin_unlock_bh(&mon_dst_srng->lock); spin_unlock_bh(&pmon->mon_lock); if (rx_bufs_used) { rx_mon_stats->dest_ppdu_done++; hal_params = ar->ab->hw_params.hal_params; if (ar->ab->hw_params.rxdma1_enable) ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, &dp->rxdma_mon_buf_ring, rx_bufs_used, hal_params->rx_buf_rbm); else ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, &dp->rx_refill_buf_ring, rx_bufs_used, hal_params->rx_buf_rbm); } } int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, struct napi_struct *napi, int budget) { struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); enum hal_rx_mon_status hal_status; struct sk_buff *skb; struct sk_buff_head skb_list; struct ath11k_peer *peer; struct ath11k_sta *arsta; int num_buffs_reaped = 0; u32 rx_buf_sz; u16 log_type; struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data; struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats; struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; __skb_queue_head_init(&skb_list); num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, &skb_list); if (!num_buffs_reaped) goto exit; memset(ppdu_info, 0, sizeof(*ppdu_info)); ppdu_info->peer_id = HAL_INVALID_PEERID; while ((skb = __skb_dequeue(&skb_list))) { if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) { log_type = ATH11K_PKTLOG_TYPE_LITE_RX; rx_buf_sz = DP_RX_BUFFER_SIZE_LITE; } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) { log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF; rx_buf_sz = DP_RX_BUFFER_SIZE; } else { log_type = ATH11K_PKTLOG_TYPE_INVALID; rx_buf_sz = 0; } if (log_type != ATH11K_PKTLOG_TYPE_INVALID) trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); memset(ppdu_info, 0, sizeof(*ppdu_info)); ppdu_info->peer_id = HAL_INVALID_PEERID; hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb); if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) && pmon->mon_ppdu_status == DP_PPDU_STATUS_START && hal_status == HAL_TLV_STATUS_PPDU_DONE) { rx_mon_stats->status_ppdu_done++; pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; if (!ab->hw_params.full_monitor_mode) { ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi); pmon->mon_ppdu_status = DP_PPDU_STATUS_START; } } if (ppdu_info->peer_id == HAL_INVALID_PEERID || hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { dev_kfree_skb_any(skb); continue; } rcu_read_lock(); spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id); if (!peer || !peer->sta) { ath11k_dbg(ab, ATH11K_DBG_DATA, "failed to find the peer with peer_id %d\n", ppdu_info->peer_id); goto next_skb; } arsta = ath11k_sta_to_arsta(peer->sta); ath11k_dp_rx_update_peer_stats(arsta, ppdu_info); if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); next_skb: spin_unlock_bh(&ab->base_lock); rcu_read_unlock(); dev_kfree_skb_any(skb); memset(ppdu_info, 0, sizeof(*ppdu_info)); ppdu_info->peer_id = HAL_INVALID_PEERID; } exit: return num_buffs_reaped; } static u32 ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar, void *ring_entry, struct sk_buff **head_msdu, struct sk_buff **tail_msdu, struct hal_sw_mon_ring_entries *sw_mon_entries) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = &dp->mon_data; struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; struct sk_buff *msdu = NULL, *last = NULL; struct hal_sw_monitor_ring *sw_desc = ring_entry; struct hal_rx_msdu_list msdu_list; struct hal_rx_desc *rx_desc; struct ath11k_skb_rxcb *rxcb; void *rx_msdu_link_desc; void *p_buf_addr_info, *p_last_buf_addr_info; int buf_id, i = 0; u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset; u32 rx_bufs_used = 0, msdu_cnt = 0; u32 total_len = 0, frag_len = 0, sw_cookie; u16 num_msdus = 0; u8 rxdma_err, rbm; bool is_frag, is_first_msdu; bool drop_mpdu = false; ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries); sw_cookie = sw_mon_entries->mon_dst_sw_cookie; sw_mon_entries->end_of_ppdu = false; sw_mon_entries->drop_ppdu = false; p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info; msdu_cnt = sw_mon_entries->msdu_cnt; sw_mon_entries->end_of_ppdu = FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0); if (sw_mon_entries->end_of_ppdu) return rx_bufs_used; if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON, sw_desc->info0) == HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { rxdma_err = FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE, sw_desc->info0); if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { pmon->rx_mon_stats.dest_mpdu_drop++; drop_mpdu = true; } } is_frag = false; is_first_msdu = true; do { rx_msdu_link_desc = (u8 *)pmon->link_desc_banks[sw_cookie].vaddr + (sw_mon_entries->mon_dst_paddr - pmon->link_desc_banks[sw_cookie].paddr); ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, &num_msdus); for (i = 0; i < num_msdus; i++) { buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, msdu_list.sw_cookie[i]); spin_lock_bh(&rx_ring->idr_lock); msdu = idr_find(&rx_ring->bufs_idr, buf_id); if (!msdu) { ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon msdu_pop: invalid buf_id %d\n", buf_id); spin_unlock_bh(&rx_ring->idr_lock); goto next_msdu; } idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); rxcb = ATH11K_SKB_RXCB(msdu); if (!rxcb->unmapped) { dma_unmap_single(ar->ab->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu), DMA_FROM_DEVICE); rxcb->unmapped = 1; } if (drop_mpdu) { ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: i %d drop msdu %p *ppdu_id %x\n", i, msdu, sw_mon_entries->ppdu_id); dev_kfree_skb_any(msdu); msdu_cnt--; goto next_msdu; } rx_desc = (struct hal_rx_desc *)msdu->data; rx_pkt_offset = sizeof(struct hal_rx_desc); l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc); if (is_first_msdu) { if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) { drop_mpdu = true; dev_kfree_skb_any(msdu); msdu = NULL; goto next_msdu; } is_first_msdu = false; } ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], &is_frag, &total_len, &frag_len, &msdu_cnt); rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); if (!(*head_msdu)) *head_msdu = msdu; else if (last) last->next = msdu; last = msdu; next_msdu: rx_bufs_used++; } ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &sw_mon_entries->mon_dst_paddr, &sw_mon_entries->mon_dst_sw_cookie, &rbm, &p_buf_addr_info); if (ath11k_dp_rx_monitor_link_desc_return(ar, p_last_buf_addr_info, dp->mac_id)) ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: dp_rx_monitor_link_desc_return failed\n"); p_last_buf_addr_info = p_buf_addr_info; } while (sw_mon_entries->mon_dst_paddr && msdu_cnt); if (last) last->next = NULL; *tail_msdu = msdu; return rx_bufs_used; } static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp, struct dp_full_mon_mpdu *mon_mpdu, struct sk_buff *head, struct sk_buff *tail) { mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC); if (!mon_mpdu) return -ENOMEM; list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list); mon_mpdu->head = head; mon_mpdu->tail = tail; return 0; } static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp, struct dp_full_mon_mpdu *mon_mpdu) { struct dp_full_mon_mpdu *tmp; struct sk_buff *tmp_msdu, *skb_next; if (list_empty(&dp->dp_full_mon_mpdu_list)) return; list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) { list_del(&mon_mpdu->list); tmp_msdu = mon_mpdu->head; while (tmp_msdu) { skb_next = tmp_msdu->next; dev_kfree_skb_any(tmp_msdu); tmp_msdu = skb_next; } kfree(mon_mpdu); } } static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar, int mac_id, struct ath11k_mon_data *pmon, struct napi_struct *napi) { struct ath11k_pdev_mon_stats *rx_mon_stats; struct dp_full_mon_mpdu *tmp; struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu; struct sk_buff *head_msdu, *tail_msdu; struct ath11k_base *ab = ar->ab; struct ath11k_dp *dp = &ab->dp; int ret; rx_mon_stats = &pmon->rx_mon_stats; list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) { list_del(&mon_mpdu->list); head_msdu = mon_mpdu->head; tail_msdu = mon_mpdu->tail; if (head_msdu && tail_msdu) { ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu, &pmon->mon_ppdu_info, tail_msdu, napi); rx_mon_stats->dest_mpdu_done++; ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n"); } kfree(mon_mpdu); } return ret; } static int ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id, struct napi_struct *napi, int budget) { struct ath11k *ar = ab->pdevs[mac_id].ar; struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = &dp->mon_data; struct hal_sw_mon_ring_entries *sw_mon_entries; int quota = 0, work = 0, count; sw_mon_entries = &pmon->sw_mon_entries; while (pmon->hold_mon_dst_ring) { quota = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, 1); if (pmon->buf_state == DP_MON_STATUS_MATCH) { count = sw_mon_entries->status_buf_count; if (count > 1) { quota += ath11k_dp_rx_process_mon_status(ab, mac_id, napi, count); } ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id, pmon, napi); pmon->hold_mon_dst_ring = false; } else if (!pmon->mon_status_paddr || pmon->buf_state == DP_MON_STATUS_LEAD) { sw_mon_entries->drop_ppdu = true; pmon->hold_mon_dst_ring = false; } if (!quota) break; work += quota; } if (sw_mon_entries->drop_ppdu) ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu); return work; } static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id, struct napi_struct *napi, int budget) { struct ath11k *ar = ab->pdevs[mac_id].ar; struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = &dp->mon_data; struct hal_sw_mon_ring_entries *sw_mon_entries; struct ath11k_pdev_mon_stats *rx_mon_stats; struct sk_buff *head_msdu, *tail_msdu; struct hal_srng *mon_dst_srng; void *ring_entry; u32 rx_bufs_used = 0, mpdu_rx_bufs_used; int quota = 0, ret; bool break_dst_ring = false; spin_lock_bh(&pmon->mon_lock); sw_mon_entries = &pmon->sw_mon_entries; rx_mon_stats = &pmon->rx_mon_stats; if (pmon->hold_mon_dst_ring) { spin_unlock_bh(&pmon->mon_lock); goto reap_status_ring; } mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; spin_lock_bh(&mon_dst_srng->lock); ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { head_msdu = NULL; tail_msdu = NULL; mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry, &head_msdu, &tail_msdu, sw_mon_entries); rx_bufs_used += mpdu_rx_bufs_used; if (!sw_mon_entries->end_of_ppdu) { if (head_msdu) { ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp, pmon->mon_mpdu, head_msdu, tail_msdu); if (ret) break_dst_ring = true; } goto next_entry; } else { if (!sw_mon_entries->ppdu_id && !sw_mon_entries->mon_status_paddr) { break_dst_ring = true; goto next_entry; } } rx_mon_stats->dest_ppdu_done++; pmon->mon_ppdu_status = DP_PPDU_STATUS_START; pmon->buf_state = DP_MON_STATUS_LAG; pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr; pmon->hold_mon_dst_ring = true; next_entry: ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, mon_dst_srng); if (break_dst_ring) break; } ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); spin_unlock_bh(&mon_dst_srng->lock); spin_unlock_bh(&pmon->mon_lock); if (rx_bufs_used) { ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, &dp->rxdma_mon_buf_ring, rx_bufs_used, HAL_RX_BUF_RBM_SW3_BM); } reap_status_ring: quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id, napi, budget); return quota; } int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, struct napi_struct *napi, int budget) { struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); int ret = 0; if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) && ab->hw_params.full_monitor_mode) ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget); else ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); return ret; } static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; pmon->mon_ppdu_status = DP_PPDU_STATUS_START; memset(&pmon->rx_mon_stats, 0, sizeof(pmon->rx_mon_stats)); return 0; } int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = &dp->mon_data; struct hal_srng *mon_desc_srng = NULL; struct dp_srng *dp_srng; int ret = 0; u32 n_link_desc = 0; ret = ath11k_dp_rx_pdev_mon_status_attach(ar); if (ret) { ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); return ret; } /* if rxdma1_enable is false, no need to setup * rxdma_mon_desc_ring. */ if (!ar->ab->hw_params.rxdma1_enable) return 0; dp_srng = &dp->rxdma_mon_desc_ring; n_link_desc = dp_srng->size / ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC); mon_desc_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, HAL_RXDMA_MONITOR_DESC, mon_desc_srng, n_link_desc); if (ret) { ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); return ret; } pmon->mon_last_linkdesc_paddr = 0; pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; spin_lock_init(&pmon->mon_lock); return 0; } static int ath11k_dp_mon_link_free(struct ath11k *ar) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = &dp->mon_data; ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, HAL_RXDMA_MONITOR_DESC, &dp->rxdma_mon_desc_ring); return 0; } int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) { ath11k_dp_mon_link_free(ar); return 0; } int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab) { /* start reap timer */ mod_timer(&ab->mon_reap_timer, jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); return 0; } int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer) { int ret; if (stop_timer) timer_delete_sync(&ab->mon_reap_timer); /* reap all the monitor related rings */ ret = ath11k_dp_purge_mon_ring(ab); if (ret) { ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret); return ret; } return 0; } diff --git a/hal.c b/hal.c index 0c3ce7509ab8..0c797b8d0a27 100644 --- a/hal.c +++ b/hal.c @@ -1,1439 +1,1455 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include #include #include "hal_tx.h" #include "debug.h" #include "hal_desc.h" #include "hif.h" static const struct hal_srng_config hw_srng_config_template[] = { /* TODO: max_rings can populated by querying HW capabilities */ { /* REO_DST */ .start_ring_id = HAL_SRNG_RING_ID_REO2SW1, .max_rings = 4, .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_DST, .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE, }, { /* REO_EXCEPTION */ /* Designating REO2TCL ring as exception ring. This ring is * similar to other REO2SW rings though it is named as REO2TCL. * Any of theREO2SW rings can be used as exception ring. */ .start_ring_id = HAL_SRNG_RING_ID_REO2TCL, .max_rings = 1, .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_DST, .max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE, }, { /* REO_REINJECT */ .start_ring_id = HAL_SRNG_RING_ID_SW2REO, .max_rings = 1, .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE, }, { /* REO_CMD */ .start_ring_id = HAL_SRNG_RING_ID_REO_CMD, .max_rings = 1, .entry_size = (sizeof(struct hal_tlv_hdr) + sizeof(struct hal_reo_get_queue_stats)) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE, }, { /* REO_STATUS */ .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS, .max_rings = 1, .entry_size = (sizeof(struct hal_tlv_hdr) + sizeof(struct hal_reo_get_queue_stats_status)) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_DST, .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE, }, { /* TCL_DATA */ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1, .max_rings = 3, .entry_size = (sizeof(struct hal_tlv_hdr) + sizeof(struct hal_tcl_data_cmd)) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE, }, { /* TCL_CMD */ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD, .max_rings = 1, .entry_size = (sizeof(struct hal_tlv_hdr) + sizeof(struct hal_tcl_gse_cmd)) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE, }, { /* TCL_STATUS */ .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS, .max_rings = 1, .entry_size = (sizeof(struct hal_tlv_hdr) + sizeof(struct hal_tcl_status_ring)) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_DST, .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE, }, { /* CE_SRC */ .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC, .max_rings = 12, .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE, }, { /* CE_DST */ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST, .max_rings = 12, .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE, }, { /* CE_DST_STATUS */ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS, .max_rings = 12, .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_DST, .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE, }, { /* WBM_IDLE_LINK */ .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK, .max_rings = 1, .entry_size = sizeof(struct hal_wbm_link_desc) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE, }, { /* SW2WBM_RELEASE */ .start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE, .max_rings = 1, .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE, }, { /* WBM2SW_RELEASE */ .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE, .max_rings = 5, .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_DST, .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE, }, { /* RXDMA_BUF */ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF, .max_rings = 2, .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, .lmac_ring = true, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_RXDMA_RING_MAX_SIZE, }, { /* RXDMA_DST */ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0, .max_rings = 1, .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, .lmac_ring = true, .ring_dir = HAL_SRNG_DIR_DST, .max_size = HAL_RXDMA_RING_MAX_SIZE, }, { /* RXDMA_MONITOR_BUF */ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF, .max_rings = 1, .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, .lmac_ring = true, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_RXDMA_RING_MAX_SIZE, }, { /* RXDMA_MONITOR_STATUS */ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF, .max_rings = 1, .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, .lmac_ring = true, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_RXDMA_RING_MAX_SIZE, }, { /* RXDMA_MONITOR_DST */ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1, .max_rings = 1, .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, .lmac_ring = true, .ring_dir = HAL_SRNG_DIR_DST, .max_size = HAL_RXDMA_RING_MAX_SIZE, }, { /* RXDMA_MONITOR_DESC */ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC, .max_rings = 1, .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, .lmac_ring = true, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_RXDMA_RING_MAX_SIZE, }, { /* RXDMA DIR BUF */ .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF, .max_rings = 1, .entry_size = 8 >> 2, /* TODO: Define the struct */ .lmac_ring = true, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_RXDMA_RING_MAX_SIZE, }, }; static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; size_t size; size = sizeof(u32) * HAL_SRNG_RING_ID_MAX; hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr, GFP_KERNEL); if (!hal->rdp.vaddr) return -ENOMEM; return 0; } static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; size_t size; if (!hal->rdp.vaddr) return; size = sizeof(u32) * HAL_SRNG_RING_ID_MAX; dma_free_coherent(ab->dev, size, hal->rdp.vaddr, hal->rdp.paddr); hal->rdp.vaddr = NULL; } static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; size_t size; size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS; hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr, GFP_KERNEL); if (!hal->wrp.vaddr) return -ENOMEM; return 0; } static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; size_t size; if (!hal->wrp.vaddr) return; size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS; dma_free_coherent(ab->dev, size, hal->wrp.vaddr, hal->wrp.paddr); hal->wrp.vaddr = NULL; } static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab, struct hal_srng *srng, int ring_num) { struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST]; u32 addr; u32 val; addr = HAL_CE_DST_RING_CTRL + srng_config->reg_start[HAL_SRNG_REG_GRP_R0] + ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0]; val = ath11k_hif_read32(ab, addr); val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN; val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN, srng->u.dst_ring.max_buffer_length); ath11k_hif_write32(ab, addr, val); } static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab, struct hal_srng *srng) { struct ath11k_hal *hal = &ab->hal; u32 val; u64 hp_addr; u32 reg_base; reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) { ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab), srng->msi_addr); val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR, ((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT)) | HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE; ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab), val); ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(ab), srng->msi_data); } ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr); val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB, ((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE, (srng->entry_size * srng->num_entries)); ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(ab), val); val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) | FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size); ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET(ab), val); /* interrupt setup */ val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD, (srng->intr_timer_thres_us >> 3)); val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD, (srng->intr_batch_cntr_thres_entries * srng->entry_size)); ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab), val); hp_addr = hal->rdp.paddr + ((unsigned long)srng->u.dst_ring.hp_addr - (unsigned long)hal->rdp.vaddr); ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab), hp_addr & HAL_ADDR_LSB_REG_MASK); ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab), hp_addr >> HAL_ADDR_MSB_REG_SHIFT); /* Initialize head and tail pointers to indicate ring is empty */ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; ath11k_hif_write32(ab, reg_base, 0); ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET(ab), 0); *srng->u.dst_ring.hp_addr = 0; reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; val = 0; if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP) val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP; if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP) val |= HAL_REO1_RING_MISC_HOST_FW_SWAP; if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP) val |= HAL_REO1_RING_MISC_MSI_SWAP; val |= HAL_REO1_RING_MISC_SRNG_ENABLE; ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET(ab), val); } static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab, struct hal_srng *srng) { struct ath11k_hal *hal = &ab->hal; u32 val; u64 tp_addr; u32 reg_base; reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) { ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab), srng->msi_addr); val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR, ((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT)) | HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE; ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab), val); ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MSI1_DATA_OFFSET(ab), srng->msi_data); } ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr); val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB, ((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE, (srng->entry_size * srng->num_entries)); ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val); val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size); ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val); if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) { ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr); val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB, ((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE, (srng->entry_size * srng->num_entries)); ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val); } /* interrupt setup */ /* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the * unit of 8 usecs instead of 1 usec (as required by v1). */ val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD, srng->intr_timer_thres_us); val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD, (srng->intr_batch_cntr_thres_entries * srng->entry_size)); ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab), val); val = 0; if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) { val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD, srng->u.src_ring.low_threshold); } ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab), val); if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) { tp_addr = hal->rdp.paddr + ((unsigned long)srng->u.src_ring.tp_addr - (unsigned long)hal->rdp.vaddr); ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab), tp_addr & HAL_ADDR_LSB_REG_MASK); ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab), tp_addr >> HAL_ADDR_MSB_REG_SHIFT); } /* Initialize head and tail pointers to indicate ring is empty */ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; ath11k_hif_write32(ab, reg_base, 0); ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0); *srng->u.src_ring.tp_addr = 0; reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; val = 0; if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP) val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP; if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP) val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP; if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP) val |= HAL_TCL1_RING_MISC_MSI_SWAP; /* Loop count is not used for SRC rings */ val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE; val |= HAL_TCL1_RING_MISC_SRNG_ENABLE; ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val); } static void ath11k_hal_srng_hw_init(struct ath11k_base *ab, struct hal_srng *srng) { if (srng->ring_dir == HAL_SRNG_DIR_SRC) ath11k_hal_srng_src_hw_init(ab, srng); else ath11k_hal_srng_dst_hw_init(ab, srng); } static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab, enum hal_ring_type type, int ring_num, int mac_id) { struct hal_srng_config *srng_config = &ab->hal.srng_config[type]; int ring_id; if (ring_num >= srng_config->max_rings) { ath11k_warn(ab, "invalid ring number :%d\n", ring_num); return -EINVAL; } ring_id = srng_config->start_ring_id + ring_num; if (srng_config->lmac_ring) ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC; if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX)) return -EINVAL; return ring_id; } int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type) { struct hal_srng_config *srng_config; if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES)) return -EINVAL; srng_config = &ab->hal.srng_config[ring_type]; return (srng_config->entry_size << 2); } int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type) { struct hal_srng_config *srng_config; if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES)) return -EINVAL; srng_config = &ab->hal.srng_config[ring_type]; return (srng_config->max_size / srng_config->entry_size); } void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng, struct hal_srng_params *params) { params->ring_base_paddr = srng->ring_base_paddr; params->ring_base_vaddr = srng->ring_base_vaddr; params->num_entries = srng->num_entries; params->intr_timer_thres_us = srng->intr_timer_thres_us; params->intr_batch_cntr_thres_entries = srng->intr_batch_cntr_thres_entries; params->low_threshold = srng->u.src_ring.low_threshold; params->msi_addr = srng->msi_addr; params->msi_data = srng->msi_data; params->flags = srng->flags; } dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab, struct hal_srng *srng) { if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING)) return 0; if (srng->ring_dir == HAL_SRNG_DIR_SRC) return ab->hal.wrp.paddr + ((unsigned long)srng->u.src_ring.hp_addr - (unsigned long)ab->hal.wrp.vaddr); else return ab->hal.rdp.paddr + ((unsigned long)srng->u.dst_ring.hp_addr - (unsigned long)ab->hal.rdp.vaddr); } dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab, struct hal_srng *srng) { if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING)) return 0; if (srng->ring_dir == HAL_SRNG_DIR_SRC) return ab->hal.rdp.paddr + ((unsigned long)srng->u.src_ring.tp_addr - (unsigned long)ab->hal.rdp.vaddr); else return ab->hal.wrp.paddr + ((unsigned long)srng->u.dst_ring.tp_addr - (unsigned long)ab->hal.wrp.vaddr); } u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type) { switch (type) { case HAL_CE_DESC_SRC: return sizeof(struct hal_ce_srng_src_desc); case HAL_CE_DESC_DST: return sizeof(struct hal_ce_srng_dest_desc); case HAL_CE_DESC_DST_STATUS: return sizeof(struct hal_ce_srng_dst_status_desc); } return 0; } void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id, u8 byte_swap_data) { struct hal_ce_srng_src_desc *desc = buf; desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK; desc->buffer_addr_info = FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI, ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP, byte_swap_data) | FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) | FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len); desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id); } void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr) { struct hal_ce_srng_dest_desc *desc = buf; desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK; desc->buffer_addr_info = FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI, ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)); } u32 ath11k_hal_ce_dst_status_get_length(void *buf) { struct hal_ce_srng_dst_status_desc *desc = buf; u32 len; len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags); desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN; return len; } void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie, dma_addr_t paddr) { desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, (paddr & HAL_ADDR_LSB_REG_MASK)); desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR, ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) | FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie); } u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng) { lockdep_assert_held(&srng->lock); if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) return (srng->ring_base_vaddr + srng->u.dst_ring.tp); return NULL; } static u32 *ath11k_hal_srng_dst_peek_with_dma(struct ath11k_base *ab, struct hal_srng *srng, dma_addr_t *paddr) { lockdep_assert_held(&srng->lock); if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) { *paddr = srng->ring_base_paddr + sizeof(*srng->ring_base_vaddr) * srng->u.dst_ring.tp; return srng->ring_base_vaddr + srng->u.dst_ring.tp; } return NULL; } static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab, struct hal_srng *srng) { dma_addr_t desc_paddr; u32 *desc; /* prefetch only if desc is available */ desc = ath11k_hal_srng_dst_peek_with_dma(ab, srng, &desc_paddr); if (likely(desc)) { dma_sync_single_for_cpu(ab->dev, desc_paddr, (srng->entry_size * sizeof(u32)), DMA_FROM_DEVICE); prefetch(desc); } } u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab, struct hal_srng *srng) { u32 *desc; lockdep_assert_held(&srng->lock); if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp) return NULL; desc = srng->ring_base_vaddr + srng->u.dst_ring.tp; srng->u.dst_ring.tp += srng->entry_size; /* wrap around to start of ring*/ if (srng->u.dst_ring.tp == srng->ring_size) srng->u.dst_ring.tp = 0; /* Try to prefetch the next descriptor in the ring */ if (srng->flags & HAL_SRNG_FLAGS_CACHED) ath11k_hal_srng_prefetch_desc(ab, srng); return desc; } int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng, bool sync_hw_ptr) { u32 tp, hp; lockdep_assert_held(&srng->lock); tp = srng->u.dst_ring.tp; if (sync_hw_ptr) { hp = *srng->u.dst_ring.hp_addr; srng->u.dst_ring.cached_hp = hp; } else { hp = srng->u.dst_ring.cached_hp; } if (hp >= tp) return (hp - tp) / srng->entry_size; else return (srng->ring_size - tp + hp) / srng->entry_size; } /* Returns number of available entries in src ring */ int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng, bool sync_hw_ptr) { u32 tp, hp; lockdep_assert_held(&srng->lock); hp = srng->u.src_ring.hp; if (sync_hw_ptr) { tp = *srng->u.src_ring.tp_addr; srng->u.src_ring.cached_tp = tp; } else { tp = srng->u.src_ring.cached_tp; } if (tp > hp) return ((tp - hp) / srng->entry_size) - 1; else return ((srng->ring_size - hp + tp) / srng->entry_size) - 1; } u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab, struct hal_srng *srng) { u32 *desc; u32 next_hp; lockdep_assert_held(&srng->lock); /* TODO: Using % is expensive, but we have to do this since size of some * SRNG rings is not power of 2 (due to descriptor sizes). Need to see * if separate function is defined for rings having power of 2 ring size * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the * overhead of % by using mask (with &). */ next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size; if (next_hp == srng->u.src_ring.cached_tp) return NULL; desc = srng->ring_base_vaddr + srng->u.src_ring.hp; srng->u.src_ring.hp = next_hp; /* TODO: Reap functionality is not used by all rings. If particular * ring does not use reap functionality, we need not update reap_hp * with next_hp pointer. Need to make sure a separate function is used * before doing any optimization by removing below code updating * reap_hp. */ srng->u.src_ring.reap_hp = next_hp; return desc; } u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab, struct hal_srng *srng) { u32 *desc; u32 next_reap_hp; lockdep_assert_held(&srng->lock); next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) % srng->ring_size; if (next_reap_hp == srng->u.src_ring.cached_tp) return NULL; desc = srng->ring_base_vaddr + next_reap_hp; srng->u.src_ring.reap_hp = next_reap_hp; return desc; } u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab, struct hal_srng *srng) { u32 *desc; lockdep_assert_held(&srng->lock); if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp) return NULL; desc = srng->ring_base_vaddr + srng->u.src_ring.hp; srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size; return desc; } u32 *ath11k_hal_srng_src_next_peek(struct ath11k_base *ab, struct hal_srng *srng) { u32 next_hp; lockdep_assert_held(&srng->lock); next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size; if (next_hp != srng->u.src_ring.cached_tp) return srng->ring_base_vaddr + next_hp; return NULL; } u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng) { lockdep_assert_held(&srng->lock); if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) == srng->u.src_ring.cached_tp) return NULL; return srng->ring_base_vaddr + srng->u.src_ring.hp; } void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng) { u32 hp; lockdep_assert_held(&srng->lock); if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.cached_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; } else { hp = READ_ONCE(*srng->u.dst_ring.hp_addr); if (hp != srng->u.dst_ring.cached_hp) { srng->u.dst_ring.cached_hp = hp; /* Make sure descriptor is read after the head * pointer. */ dma_rmb(); } /* Try to prefetch the next descriptor in the ring */ if (srng->flags & HAL_SRNG_FLAGS_CACHED) ath11k_hal_srng_prefetch_desc(ab, srng); } } /* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin() * should have been called before this. */ void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng) { lockdep_assert_held(&srng->lock); if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) { /* For LMAC rings, ring pointer updates are done through FW and * hence written to a shared memory location that is read by FW */ if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.last_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; /* Make sure descriptor is written before updating the * head pointer. */ dma_wmb(); WRITE_ONCE(*srng->u.src_ring.hp_addr, srng->u.src_ring.hp); } else { srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; /* Make sure descriptor is read before updating the * tail pointer. */ dma_mb(); WRITE_ONCE(*srng->u.dst_ring.tp_addr, srng->u.dst_ring.tp); } } else { if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.last_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; /* Assume implementation use an MMIO write accessor * which has the required wmb() so that the descriptor * is written before the updating the head pointer. */ ath11k_hif_write32(ab, (unsigned long)srng->u.src_ring.hp_addr - (unsigned long)ab->mem, srng->u.src_ring.hp); } else { srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; /* Make sure descriptor is read before updating the * tail pointer. */ mb(); ath11k_hif_write32(ab, (unsigned long)srng->u.dst_ring.tp_addr - (unsigned long)ab->mem, srng->u.dst_ring.tp); } } srng->timestamp = jiffies; } void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab, struct hal_wbm_idle_scatter_list *sbuf, u32 nsbufs, u32 tot_link_desc, u32 end_offset) { struct ath11k_buffer_addr *link_addr; int i; u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64; link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE; for (i = 1; i < nsbufs; i++) { link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK; link_addr->info1 = FIELD_PREP( HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32, (u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) | FIELD_PREP( HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG, BASE_ADDR_MATCH_TAG_VAL); link_addr = (void *)sbuf[i].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE; } ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR, FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) | FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1)); ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR, FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST, reg_scatter_buf_sz * nsbufs)); ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_RING_BASE_LSB, FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK)); ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_RING_BASE_MSB, FIELD_PREP( HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32, (u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) | FIELD_PREP( HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG, BASE_ADDR_MATCH_TAG_VAL)); /* Setup head and tail pointers for the idle list */ ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0, FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[nsbufs - 1].paddr)); ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1, FIELD_PREP( HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32, ((u64)sbuf[nsbufs - 1].paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1, (end_offset >> 2))); ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0, FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr)); ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0, FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr)); ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1, FIELD_PREP( HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32, ((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1, 0)); ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR, 2 * tot_link_desc); /* Enable the SRNG */ ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab), 0x40); } int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, int ring_num, int mac_id, struct hal_srng_params *params) { struct ath11k_hal *hal = &ab->hal; struct hal_srng_config *srng_config = &ab->hal.srng_config[type]; struct hal_srng *srng; int ring_id; u32 lmac_idx; int i; u32 reg_base; ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id); if (ring_id < 0) return ring_id; srng = &hal->srng_list[ring_id]; srng->ring_id = ring_id; srng->ring_dir = srng_config->ring_dir; srng->ring_base_paddr = params->ring_base_paddr; srng->ring_base_vaddr = params->ring_base_vaddr; srng->entry_size = srng_config->entry_size; srng->num_entries = params->num_entries; srng->ring_size = srng->entry_size * srng->num_entries; srng->intr_batch_cntr_thres_entries = params->intr_batch_cntr_thres_entries; srng->intr_timer_thres_us = params->intr_timer_thres_us; srng->flags = params->flags; srng->msi_addr = params->msi_addr; srng->msi_data = params->msi_data; srng->initialized = 1; spin_lock_init(&srng->lock); lockdep_set_class(&srng->lock, hal->srng_key + ring_id); for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) { srng->hwreg_base[i] = srng_config->reg_start[i] + (ring_num * srng_config->reg_size[i]); } memset(srng->ring_base_vaddr, 0, (srng->entry_size * srng->num_entries) << 2); /* TODO: Add comments on these swap configurations */ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP | HAL_SRNG_FLAGS_RING_PTR_SWAP; reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.hp = 0; srng->u.src_ring.cached_tp = 0; srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size; srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id); srng->u.src_ring.low_threshold = params->low_threshold * srng->entry_size; if (srng_config->lmac_ring) { lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START; srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr + lmac_idx); srng->flags |= HAL_SRNG_FLAGS_LMAC_RING; } else { if (!ab->hw_params.supports_shadow_regs) srng->u.src_ring.hp_addr = (u32 *)((unsigned long)ab->mem + reg_base); else ath11k_dbg(ab, ATH11K_DBG_HAL, "type %d ring_num %d reg_base 0x%x shadow 0x%lx\n", type, ring_num, reg_base, (unsigned long)srng->u.src_ring.hp_addr - (unsigned long)ab->mem); } } else { /* During initialization loop count in all the descriptors * will be set to zero, and HW will set it to 1 on completing * descriptor update in first loop, and increments it by 1 on * subsequent loops (loop count wraps around after reaching * 0xffff). The 'loop_cnt' in SW ring state is the expected * loop count in descriptors updated by HW (to be processed * by SW). */ srng->u.dst_ring.loop_cnt = 1; srng->u.dst_ring.tp = 0; srng->u.dst_ring.cached_hp = 0; srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id); if (srng_config->lmac_ring) { /* For LMAC rings, tail pointer updates will be done * through FW by writing to a shared memory location */ lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START; srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr + lmac_idx); srng->flags |= HAL_SRNG_FLAGS_LMAC_RING; } else { if (!ab->hw_params.supports_shadow_regs) srng->u.dst_ring.tp_addr = (u32 *)((unsigned long)ab->mem + reg_base + (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab))); else ath11k_dbg(ab, ATH11K_DBG_HAL, "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n", type, ring_num, reg_base + (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab)), (unsigned long)srng->u.dst_ring.tp_addr - (unsigned long)ab->mem); } } if (srng_config->lmac_ring) return ring_id; ath11k_hal_srng_hw_init(ab, srng); if (type == HAL_CE_DST) { srng->u.dst_ring.max_buffer_length = params->max_buffer_len; ath11k_hal_ce_dst_setup(ab, srng, ring_num); } return ring_id; } static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab, int shadow_cfg_idx, enum hal_ring_type ring_type, int ring_num) { struct hal_srng *srng; struct ath11k_hal *hal = &ab->hal; int ring_id; struct hal_srng_config *srng_config = &hal->srng_config[ring_type]; ring_id = ath11k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0); if (ring_id < 0) return; srng = &hal->srng_list[ring_id]; if (srng_config->ring_dir == HAL_SRNG_DIR_DST) srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) + (unsigned long)ab->mem); else srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) + (unsigned long)ab->mem); } int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab, enum hal_ring_type ring_type, int ring_num) { struct ath11k_hal *hal = &ab->hal; struct hal_srng_config *srng_config = &hal->srng_config[ring_type]; int shadow_cfg_idx = hal->num_shadow_reg_configured; u32 target_reg; if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS) return -EINVAL; hal->num_shadow_reg_configured++; target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START]; target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] * ring_num; /* For destination ring, shadow the TP */ if (srng_config->ring_dir == HAL_SRNG_DIR_DST) target_reg += HAL_OFFSET_FROM_HP_TO_TP; hal->shadow_reg_addr[shadow_cfg_idx] = target_reg; /* update hp/tp addr to hal structure*/ ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type, ring_num); ath11k_dbg(ab, ATH11K_DBG_HAL, "update shadow config target_reg %x shadow reg 0x%x shadow_idx 0x%x ring_type %d ring num %d", target_reg, HAL_SHADOW_REG(ab, shadow_cfg_idx), shadow_cfg_idx, ring_type, ring_num); return 0; } void ath11k_hal_srng_shadow_config(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; int ring_type, ring_num; /* update all the non-CE srngs. */ for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) { struct hal_srng_config *srng_config = &hal->srng_config[ring_type]; if (ring_type == HAL_CE_SRC || ring_type == HAL_CE_DST || ring_type == HAL_CE_DST_STATUS) continue; if (srng_config->lmac_ring) continue; for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++) ath11k_hal_srng_update_shadow_config(ab, ring_type, ring_num); } } void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab, u32 **cfg, u32 *len) { struct ath11k_hal *hal = &ab->hal; *len = hal->num_shadow_reg_configured; *cfg = hal->shadow_reg_addr; } void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab, struct hal_srng *srng) { lockdep_assert_held(&srng->lock); /* check whether the ring is empty. Update the shadow * HP only when then ring isn't empty. */ if (srng->ring_dir == HAL_SRNG_DIR_SRC && *srng->u.src_ring.tp_addr != srng->u.src_ring.hp) ath11k_hal_srng_access_end(ab, srng); } static int ath11k_hal_srng_create_config(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; struct hal_srng_config *s; hal->srng_config = kmemdup(hw_srng_config_template, sizeof(hw_srng_config_template), GFP_KERNEL); if (!hal->srng_config) return -ENOMEM; s = &hal->srng_config[HAL_REO_DST]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(ab); s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab); s->reg_size[1] = HAL_REO2_RING_HP(ab) - HAL_REO1_RING_HP(ab); s = &hal->srng_config[HAL_REO_EXCEPTION]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab); s = &hal->srng_config[HAL_REO_REINJECT]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(ab); s = &hal->srng_config[HAL_REO_CMD]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(ab); s = &hal->srng_config[HAL_REO_STATUS]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(ab); s = &hal->srng_config[HAL_TCL_DATA]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP; s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab); s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP; s = &hal->srng_config[HAL_TCL_CMD]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP; s = &hal->srng_config[HAL_TCL_STATUS]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP; s = &hal->srng_config[HAL_CE_SRC]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB + ATH11K_CE_OFFSET(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP + ATH11K_CE_OFFSET(ab); s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); s = &hal->srng_config[HAL_CE_DST]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB + ATH11K_CE_OFFSET(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP + ATH11K_CE_OFFSET(ab); s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); s = &hal->srng_config[HAL_CE_DST_STATUS]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_BASE_LSB + ATH11K_CE_OFFSET(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP + ATH11K_CE_OFFSET(ab); s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); s = &hal->srng_config[HAL_WBM_IDLE_LINK]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP; s = &hal->srng_config[HAL_SW2WBM_RELEASE]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP; s = &hal->srng_config[HAL_WBM2SW_RELEASE]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP; s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) - HAL_WBM0_RELEASE_RING_BASE_LSB(ab); s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP; return 0; } static void ath11k_hal_register_srng_key(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; u32 ring_id; for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++) lockdep_register_key(hal->srng_key + ring_id); } static void ath11k_hal_unregister_srng_key(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; u32 ring_id; for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++) lockdep_unregister_key(hal->srng_key + ring_id); } int ath11k_hal_srng_init(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; int ret; memset(hal, 0, sizeof(*hal)); ret = ath11k_hal_srng_create_config(ab); if (ret) goto err_hal; ret = ath11k_hal_alloc_cont_rdp(ab); if (ret) goto err_hal; ret = ath11k_hal_alloc_cont_wrp(ab); if (ret) goto err_free_cont_rdp; ath11k_hal_register_srng_key(ab); return 0; err_free_cont_rdp: ath11k_hal_free_cont_rdp(ab); err_hal: return ret; } EXPORT_SYMBOL(ath11k_hal_srng_init); void ath11k_hal_srng_deinit(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; int i; for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) ab->hal.srng_list[i].initialized = 0; ath11k_hal_unregister_srng_key(ab); ath11k_hal_free_cont_rdp(ab); ath11k_hal_free_cont_wrp(ab); kfree(hal->srng_config); hal->srng_config = NULL; } EXPORT_SYMBOL(ath11k_hal_srng_deinit); +void ath11k_hal_srng_clear(struct ath11k_base *ab) +{ + /* No need to memset rdp and wrp memory since each individual + * segment would get cleared in ath11k_hal_srng_src_hw_init() + * and ath11k_hal_srng_dst_hw_init(). + */ + memset(ab->hal.srng_list, 0, + sizeof(ab->hal.srng_list)); + memset(ab->hal.shadow_reg_addr, 0, + sizeof(ab->hal.shadow_reg_addr)); + ab->hal.avail_blk_resource = 0; + ab->hal.current_blk_index = 0; + ab->hal.num_shadow_reg_configured = 0; +} +EXPORT_SYMBOL(ath11k_hal_srng_clear); + void ath11k_hal_dump_srng_stats(struct ath11k_base *ab) { struct hal_srng *srng; struct ath11k_ext_irq_grp *irq_grp; struct ath11k_ce_pipe *ce_pipe; int i; ath11k_err(ab, "Last interrupt received for each CE:\n"); for (i = 0; i < ab->hw_params.ce_count; i++) { ce_pipe = &ab->ce.ce_pipe[i]; if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n", i, ce_pipe->pipe_num, jiffies_to_msecs(jiffies - ce_pipe->timestamp)); } ath11k_err(ab, "\nLast interrupt received for each group:\n"); for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { irq_grp = &ab->ext_irq_grp[i]; ath11k_err(ab, "group_id %d %ums before\n", irq_grp->grp_id, jiffies_to_msecs(jiffies - irq_grp->timestamp)); } for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) { srng = &ab->hal.srng_list[i]; if (!srng->initialized) continue; if (srng->ring_dir == HAL_SRNG_DIR_SRC) ath11k_err(ab, "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n", srng->ring_id, srng->u.src_ring.hp, srng->u.src_ring.reap_hp, *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp, srng->u.src_ring.last_tp, jiffies_to_msecs(jiffies - srng->timestamp)); else if (srng->ring_dir == HAL_SRNG_DIR_DST) ath11k_err(ab, "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n", srng->ring_id, srng->u.dst_ring.tp, *srng->u.dst_ring.hp_addr, srng->u.dst_ring.cached_hp, srng->u.dst_ring.last_hp, jiffies_to_msecs(jiffies - srng->timestamp)); } } diff --git a/hal.h b/hal.h index 601542410c75..839095af9267 100644 --- a/hal.h +++ b/hal.h @@ -1,977 +1,978 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_HAL_H #define ATH11K_HAL_H #include "hal_desc.h" #include "rx_desc.h" struct ath11k_base; #define HAL_LINK_DESC_SIZE (32 << 2) #define HAL_LINK_DESC_ALIGN 128 #define HAL_NUM_MPDUS_PER_LINK_DESC 6 #define HAL_NUM_TX_MSDUS_PER_LINK_DESC 7 #define HAL_NUM_RX_MSDUS_PER_LINK_DESC 6 #define HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC 12 #define HAL_MAX_AVAIL_BLK_RES 3 #define HAL_RING_BASE_ALIGN 8 #define HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX 32704 /* TODO: Check with hw team on the supported scatter buf size */ #define HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE 8 #define HAL_WBM_IDLE_SCATTER_BUF_SIZE (HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX - \ HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE) #define HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX 48 #define HAL_DSCP_TID_TBL_SIZE 24 /* calculate the register address from bar0 of shadow register x */ #define HAL_SHADOW_BASE_ADDR(ab) ab->hw_params.regs->hal_shadow_base_addr #define HAL_SHADOW_NUM_REGS 36 #define HAL_HP_OFFSET_IN_REG_START 1 #define HAL_OFFSET_FROM_HP_TO_TP 4 #define HAL_SHADOW_REG(ab, x) (HAL_SHADOW_BASE_ADDR(ab) + (4 * (x))) /* WCSS Relative address */ #define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000 #define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000 #define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000 #define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(x) \ (ab->hw_params.regs->hal_seq_wcss_umac_ce0_src_reg) #define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(x) \ (ab->hw_params.regs->hal_seq_wcss_umac_ce0_dst_reg) #define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(x) \ (ab->hw_params.regs->hal_seq_wcss_umac_ce1_src_reg) #define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(x) \ (ab->hw_params.regs->hal_seq_wcss_umac_ce1_dst_reg) #define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000 #define HAL_CE_WFSS_CE_REG_BASE 0x01b80000 #define HAL_WLAON_REG_BASE 0x01f80000 /* SW2TCL(x) R0 ring configuration address */ #define HAL_TCL1_RING_CMN_CTRL_REG 0x00000014 #define HAL_TCL1_RING_DSCP_TID_MAP 0x0000002c #define HAL_TCL1_RING_BASE_LSB(ab) ab->hw_params.regs->hal_tcl1_ring_base_lsb #define HAL_TCL1_RING_BASE_MSB(ab) ab->hw_params.regs->hal_tcl1_ring_base_msb #define HAL_TCL1_RING_ID(ab) ab->hw_params.regs->hal_tcl1_ring_id #define HAL_TCL1_RING_MISC(ab) ab->hw_params.regs->hal_tcl1_ring_misc #define HAL_TCL1_RING_TP_ADDR_LSB(ab) \ ab->hw_params.regs->hal_tcl1_ring_tp_addr_lsb #define HAL_TCL1_RING_TP_ADDR_MSB(ab) \ ab->hw_params.regs->hal_tcl1_ring_tp_addr_msb #define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) \ ab->hw_params.regs->hal_tcl1_ring_consumer_int_setup_ix0 #define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) \ ab->hw_params.regs->hal_tcl1_ring_consumer_int_setup_ix1 #define HAL_TCL1_RING_MSI1_BASE_LSB(ab) \ ab->hw_params.regs->hal_tcl1_ring_msi1_base_lsb #define HAL_TCL1_RING_MSI1_BASE_MSB(ab) \ ab->hw_params.regs->hal_tcl1_ring_msi1_base_msb #define HAL_TCL1_RING_MSI1_DATA(ab) \ ab->hw_params.regs->hal_tcl1_ring_msi1_data #define HAL_TCL2_RING_BASE_LSB(ab) ab->hw_params.regs->hal_tcl2_ring_base_lsb #define HAL_TCL_RING_BASE_LSB(ab) ab->hw_params.regs->hal_tcl_ring_base_lsb #define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab) \ (HAL_TCL1_RING_MSI1_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab)) #define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab) \ (HAL_TCL1_RING_MSI1_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab)) #define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab) \ (HAL_TCL1_RING_MSI1_DATA(ab) - HAL_TCL1_RING_BASE_LSB(ab)) #define HAL_TCL1_RING_BASE_MSB_OFFSET(ab) \ (HAL_TCL1_RING_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab)) #define HAL_TCL1_RING_ID_OFFSET(ab) \ (HAL_TCL1_RING_ID(ab) - HAL_TCL1_RING_BASE_LSB(ab)) #define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab) \ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) - HAL_TCL1_RING_BASE_LSB(ab)) #define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) \ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) - HAL_TCL1_RING_BASE_LSB(ab)) #define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) \ (HAL_TCL1_RING_TP_ADDR_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab)) #define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) \ (HAL_TCL1_RING_TP_ADDR_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab)) #define HAL_TCL1_RING_MISC_OFFSET(ab) \ (HAL_TCL1_RING_MISC(ab) - HAL_TCL1_RING_BASE_LSB(ab)) /* SW2TCL(x) R2 ring pointers (head/tail) address */ #define HAL_TCL1_RING_HP 0x00002000 #define HAL_TCL1_RING_TP 0x00002004 #define HAL_TCL2_RING_HP 0x00002008 #define HAL_TCL_RING_HP 0x00002018 #define HAL_TCL1_RING_TP_OFFSET \ (HAL_TCL1_RING_TP - HAL_TCL1_RING_HP) /* TCL STATUS ring address */ #define HAL_TCL_STATUS_RING_BASE_LSB(ab) \ ab->hw_params.regs->hal_tcl_status_ring_base_lsb #define HAL_TCL_STATUS_RING_HP 0x00002030 /* REO2SW(x) R0 ring configuration address */ #define HAL_REO1_GEN_ENABLE 0x00000000 #define HAL_REO1_DEST_RING_CTRL_IX_0 0x00000004 #define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008 #define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c #define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010 #define HAL_REO1_MISC_CTL(ab) ab->hw_params.regs->hal_reo1_misc_ctl #define HAL_REO1_RING_BASE_LSB(ab) ab->hw_params.regs->hal_reo1_ring_base_lsb #define HAL_REO1_RING_BASE_MSB(ab) ab->hw_params.regs->hal_reo1_ring_base_msb #define HAL_REO1_RING_ID(ab) ab->hw_params.regs->hal_reo1_ring_id #define HAL_REO1_RING_MISC(ab) ab->hw_params.regs->hal_reo1_ring_misc #define HAL_REO1_RING_HP_ADDR_LSB(ab) \ ab->hw_params.regs->hal_reo1_ring_hp_addr_lsb #define HAL_REO1_RING_HP_ADDR_MSB(ab) \ ab->hw_params.regs->hal_reo1_ring_hp_addr_msb #define HAL_REO1_RING_PRODUCER_INT_SETUP(ab) \ ab->hw_params.regs->hal_reo1_ring_producer_int_setup #define HAL_REO1_RING_MSI1_BASE_LSB(ab) \ ab->hw_params.regs->hal_reo1_ring_msi1_base_lsb #define HAL_REO1_RING_MSI1_BASE_MSB(ab) \ ab->hw_params.regs->hal_reo1_ring_msi1_base_msb #define HAL_REO1_RING_MSI1_DATA(ab) \ ab->hw_params.regs->hal_reo1_ring_msi1_data #define HAL_REO2_RING_BASE_LSB(ab) ab->hw_params.regs->hal_reo2_ring_base_lsb #define HAL_REO1_AGING_THRESH_IX_0(ab) \ ab->hw_params.regs->hal_reo1_aging_thresh_ix_0 #define HAL_REO1_AGING_THRESH_IX_1(ab) \ ab->hw_params.regs->hal_reo1_aging_thresh_ix_1 #define HAL_REO1_AGING_THRESH_IX_2(ab) \ ab->hw_params.regs->hal_reo1_aging_thresh_ix_2 #define HAL_REO1_AGING_THRESH_IX_3(ab) \ ab->hw_params.regs->hal_reo1_aging_thresh_ix_3 #define HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab) \ (HAL_REO1_RING_MSI1_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab)) #define HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab) \ (HAL_REO1_RING_MSI1_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab)) #define HAL_REO1_RING_MSI1_DATA_OFFSET(ab) \ (HAL_REO1_RING_MSI1_DATA(ab) - HAL_REO1_RING_BASE_LSB(ab)) #define HAL_REO1_RING_BASE_MSB_OFFSET(ab) \ (HAL_REO1_RING_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab)) #define HAL_REO1_RING_ID_OFFSET(ab) (HAL_REO1_RING_ID(ab) - HAL_REO1_RING_BASE_LSB(ab)) #define HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab) \ (HAL_REO1_RING_PRODUCER_INT_SETUP(ab) - HAL_REO1_RING_BASE_LSB(ab)) #define HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab) \ (HAL_REO1_RING_HP_ADDR_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab)) #define HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab) \ (HAL_REO1_RING_HP_ADDR_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab)) #define HAL_REO1_RING_MISC_OFFSET(ab) \ (HAL_REO1_RING_MISC(ab) - HAL_REO1_RING_BASE_LSB(ab)) /* REO2SW(x) R2 ring pointers (head/tail) address */ #define HAL_REO1_RING_HP(ab) ab->hw_params.regs->hal_reo1_ring_hp #define HAL_REO1_RING_TP(ab) ab->hw_params.regs->hal_reo1_ring_tp #define HAL_REO2_RING_HP(ab) ab->hw_params.regs->hal_reo2_ring_hp #define HAL_REO1_RING_TP_OFFSET(ab) (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab)) /* REO2TCL R0 ring configuration address */ #define HAL_REO_TCL_RING_BASE_LSB(ab) \ ab->hw_params.regs->hal_reo_tcl_ring_base_lsb /* REO2TCL R2 ring pointer (head/tail) address */ #define HAL_REO_TCL_RING_HP(ab) ab->hw_params.regs->hal_reo_tcl_ring_hp /* REO CMD R0 address */ #define HAL_REO_CMD_RING_BASE_LSB(ab) \ ab->hw_params.regs->hal_reo_cmd_ring_base_lsb /* REO CMD R2 address */ #define HAL_REO_CMD_HP(ab) ab->hw_params.regs->hal_reo_cmd_ring_hp /* SW2REO R0 address */ #define HAL_SW2REO_RING_BASE_LSB(ab) \ ab->hw_params.regs->hal_sw2reo_ring_base_lsb /* SW2REO R2 address */ #define HAL_SW2REO_RING_HP(ab) ab->hw_params.regs->hal_sw2reo_ring_hp /* CE ring R0 address */ #define HAL_CE_DST_RING_BASE_LSB 0x00000000 #define HAL_CE_DST_STATUS_RING_BASE_LSB 0x00000058 #define HAL_CE_DST_RING_CTRL 0x000000b0 /* CE ring R2 address */ #define HAL_CE_DST_RING_HP 0x00000400 #define HAL_CE_DST_STATUS_RING_HP 0x00000408 /* REO status address */ #define HAL_REO_STATUS_RING_BASE_LSB(ab) \ ab->hw_params.regs->hal_reo_status_ring_base_lsb #define HAL_REO_STATUS_HP(ab) ab->hw_params.regs->hal_reo_status_hp /* WBM Idle R0 address */ #define HAL_WBM_IDLE_LINK_RING_BASE_LSB(x) \ (ab->hw_params.regs->hal_wbm_idle_link_ring_base_lsb) #define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(x) \ (ab->hw_params.regs->hal_wbm_idle_link_ring_misc) #define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR 0x00000048 #define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR 0x0000004c #define HAL_WBM_SCATTERED_RING_BASE_LSB 0x00000058 #define HAL_WBM_SCATTERED_RING_BASE_MSB 0x0000005c #define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0 0x00000068 #define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1 0x0000006c #define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0 0x00000078 #define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1 0x0000007c #define HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR 0x00000084 /* WBM Idle R2 address */ #define HAL_WBM_IDLE_LINK_RING_HP 0x000030b0 /* SW2WBM R0 release address */ #define HAL_WBM_RELEASE_RING_BASE_LSB(x) \ (ab->hw_params.regs->hal_wbm_release_ring_base_lsb) /* SW2WBM R2 release address */ #define HAL_WBM_RELEASE_RING_HP 0x00003018 /* WBM2SW R0 release address */ #define HAL_WBM0_RELEASE_RING_BASE_LSB(x) \ (ab->hw_params.regs->hal_wbm0_release_ring_base_lsb) #define HAL_WBM1_RELEASE_RING_BASE_LSB(x) \ (ab->hw_params.regs->hal_wbm1_release_ring_base_lsb) /* WBM2SW R2 release address */ #define HAL_WBM0_RELEASE_RING_HP 0x000030c0 #define HAL_WBM1_RELEASE_RING_HP 0x000030c8 /* TCL ring field mask and offset */ #define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8) #define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0) #define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0) #define HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE BIT(1) #define HAL_TCL1_RING_MISC_MSI_SWAP BIT(3) #define HAL_TCL1_RING_MISC_HOST_FW_SWAP BIT(4) #define HAL_TCL1_RING_MISC_DATA_TLV_SWAP BIT(5) #define HAL_TCL1_RING_MISC_SRNG_ENABLE BIT(6) #define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD GENMASK(31, 16) #define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD GENMASK(14, 0) #define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD GENMASK(15, 0) #define HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8) #define HAL_TCL1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0) #define HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN BIT(17) #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP GENMASK(31, 0) #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP0 GENMASK(2, 0) #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP1 GENMASK(5, 3) #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP2 GENMASK(8, 6) #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP3 GENMASK(11, 9) #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP4 GENMASK(14, 12) #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP5 GENMASK(17, 15) #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18) #define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21) /* REO ring field mask and offset */ #define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8) #define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0) #define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8) #define HAL_REO1_RING_ID_ENTRY_SIZE GENMASK(7, 0) #define HAL_REO1_RING_MISC_MSI_SWAP BIT(3) #define HAL_REO1_RING_MISC_HOST_FW_SWAP BIT(4) #define HAL_REO1_RING_MISC_DATA_TLV_SWAP BIT(5) #define HAL_REO1_RING_MISC_SRNG_ENABLE BIT(6) #define HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD GENMASK(31, 16) #define HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD GENMASK(14, 0) #define HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8) #define HAL_REO1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0) #define HAL_REO1_GEN_ENABLE_FRAG_DST_RING GENMASK(25, 23) #define HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE BIT(2) #define HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE BIT(3) #define HAL_REO1_MISC_CTL_FRAGMENT_DST_RING GENMASK(20, 17) /* CE ring bit field mask and shift */ #define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0) #define HAL_ADDR_LSB_REG_MASK 0xffffffff #define HAL_ADDR_MSB_REG_SHIFT 32 /* WBM ring bit field mask and shift */ #define HAL_WBM_LINK_DESC_IDLE_LIST_MODE BIT(1) #define HAL_WBM_SCATTER_BUFFER_SIZE GENMASK(10, 2) #define HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST GENMASK(31, 16) #define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32 GENMASK(7, 0) #define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG GENMASK(31, 8) #define HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1 GENMASK(20, 8) #define HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1 GENMASK(20, 8) #define BASE_ADDR_MATCH_TAG_VAL 0x5 #define HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE 0x000fffff #define HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE 0x000fffff #define HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE 0x0000ffff #define HAL_REO_CMD_RING_BASE_MSB_RING_SIZE 0x0000ffff #define HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff #define HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE 0x000fffff #define HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE 0x000fffff #define HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff #define HAL_CE_SRC_RING_BASE_MSB_RING_SIZE 0x0000ffff #define HAL_CE_DST_RING_BASE_MSB_RING_SIZE 0x0000ffff #define HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff #define HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE 0x0000ffff #define HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE 0x0000ffff #define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff #define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff /* IPQ5018 ce registers */ #define HAL_IPQ5018_CE_WFSS_REG_BASE 0x08400000 #define HAL_IPQ5018_CE_SIZE 0x200000 /* Add any other errors here and return them in * ath11k_hal_rx_desc_get_err(). */ enum hal_srng_ring_id { HAL_SRNG_RING_ID_REO2SW1 = 0, HAL_SRNG_RING_ID_REO2SW2, HAL_SRNG_RING_ID_REO2SW3, HAL_SRNG_RING_ID_REO2SW4, HAL_SRNG_RING_ID_REO2TCL, HAL_SRNG_RING_ID_SW2REO, HAL_SRNG_RING_ID_REO_CMD = 8, HAL_SRNG_RING_ID_REO_STATUS, HAL_SRNG_RING_ID_SW2TCL1 = 16, HAL_SRNG_RING_ID_SW2TCL2, HAL_SRNG_RING_ID_SW2TCL3, HAL_SRNG_RING_ID_SW2TCL4, HAL_SRNG_RING_ID_SW2TCL_CMD = 24, HAL_SRNG_RING_ID_TCL_STATUS, HAL_SRNG_RING_ID_CE0_SRC = 32, HAL_SRNG_RING_ID_CE1_SRC, HAL_SRNG_RING_ID_CE2_SRC, HAL_SRNG_RING_ID_CE3_SRC, HAL_SRNG_RING_ID_CE4_SRC, HAL_SRNG_RING_ID_CE5_SRC, HAL_SRNG_RING_ID_CE6_SRC, HAL_SRNG_RING_ID_CE7_SRC, HAL_SRNG_RING_ID_CE8_SRC, HAL_SRNG_RING_ID_CE9_SRC, HAL_SRNG_RING_ID_CE10_SRC, HAL_SRNG_RING_ID_CE11_SRC, HAL_SRNG_RING_ID_CE0_DST = 56, HAL_SRNG_RING_ID_CE1_DST, HAL_SRNG_RING_ID_CE2_DST, HAL_SRNG_RING_ID_CE3_DST, HAL_SRNG_RING_ID_CE4_DST, HAL_SRNG_RING_ID_CE5_DST, HAL_SRNG_RING_ID_CE6_DST, HAL_SRNG_RING_ID_CE7_DST, HAL_SRNG_RING_ID_CE8_DST, HAL_SRNG_RING_ID_CE9_DST, HAL_SRNG_RING_ID_CE10_DST, HAL_SRNG_RING_ID_CE11_DST, HAL_SRNG_RING_ID_CE0_DST_STATUS = 80, HAL_SRNG_RING_ID_CE1_DST_STATUS, HAL_SRNG_RING_ID_CE2_DST_STATUS, HAL_SRNG_RING_ID_CE3_DST_STATUS, HAL_SRNG_RING_ID_CE4_DST_STATUS, HAL_SRNG_RING_ID_CE5_DST_STATUS, HAL_SRNG_RING_ID_CE6_DST_STATUS, HAL_SRNG_RING_ID_CE7_DST_STATUS, HAL_SRNG_RING_ID_CE8_DST_STATUS, HAL_SRNG_RING_ID_CE9_DST_STATUS, HAL_SRNG_RING_ID_CE10_DST_STATUS, HAL_SRNG_RING_ID_CE11_DST_STATUS, HAL_SRNG_RING_ID_WBM_IDLE_LINK = 104, HAL_SRNG_RING_ID_WBM_SW_RELEASE, HAL_SRNG_RING_ID_WBM2SW0_RELEASE, HAL_SRNG_RING_ID_WBM2SW1_RELEASE, HAL_SRNG_RING_ID_WBM2SW2_RELEASE, HAL_SRNG_RING_ID_WBM2SW3_RELEASE, HAL_SRNG_RING_ID_WBM2SW4_RELEASE, HAL_SRNG_RING_ID_UMAC_ID_END = 127, HAL_SRNG_RING_ID_LMAC1_ID_START, HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF = HAL_SRNG_RING_ID_LMAC1_ID_START, HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF, HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF, HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_STATBUF, HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF, HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0, HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1, HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC, HAL_SRNG_RING_ID_RXDMA_DIR_BUF, HAL_SRNG_RING_ID_LMAC1_ID_END = 143 }; /* SRNG registers are split into two groups R0 and R2 */ #define HAL_SRNG_REG_GRP_R0 0 #define HAL_SRNG_REG_GRP_R2 1 #define HAL_SRNG_NUM_REG_GRP 2 #define HAL_SRNG_NUM_LMACS 3 #define HAL_SRNG_REO_EXCEPTION HAL_SRNG_RING_ID_REO2SW1 #define HAL_SRNG_RINGS_PER_LMAC (HAL_SRNG_RING_ID_LMAC1_ID_END - \ HAL_SRNG_RING_ID_LMAC1_ID_START) #define HAL_SRNG_NUM_LMAC_RINGS (HAL_SRNG_NUM_LMACS * HAL_SRNG_RINGS_PER_LMAC) #define HAL_SRNG_RING_ID_MAX (HAL_SRNG_RING_ID_UMAC_ID_END + \ HAL_SRNG_NUM_LMAC_RINGS) enum hal_ring_type { HAL_REO_DST, HAL_REO_EXCEPTION, HAL_REO_REINJECT, HAL_REO_CMD, HAL_REO_STATUS, HAL_TCL_DATA, HAL_TCL_CMD, HAL_TCL_STATUS, HAL_CE_SRC, HAL_CE_DST, HAL_CE_DST_STATUS, HAL_WBM_IDLE_LINK, HAL_SW2WBM_RELEASE, HAL_WBM2SW_RELEASE, HAL_RXDMA_BUF, HAL_RXDMA_DST, HAL_RXDMA_MONITOR_BUF, HAL_RXDMA_MONITOR_STATUS, HAL_RXDMA_MONITOR_DST, HAL_RXDMA_MONITOR_DESC, HAL_RXDMA_DIR_BUF, HAL_MAX_RING_TYPES, }; #define HAL_RX_MAX_BA_WINDOW 256 #define HAL_DEFAULT_REO_TIMEOUT_USEC (40 * 1000) /** * enum hal_reo_cmd_type: Enum for REO command type * @HAL_REO_CMD_GET_QUEUE_STATS: Get REO queue status/stats * @HAL_REO_CMD_FLUSH_QUEUE: Flush all frames in REO queue * @HAL_REO_CMD_FLUSH_CACHE: Flush descriptor entries in the cache * @HAL_REO_CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked * earlier with a 'REO_FLUSH_CACHE' command * @HAL_REO_CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list * @HAL_REO_CMD_UPDATE_RX_QUEUE: Update REO queue settings */ enum hal_reo_cmd_type { HAL_REO_CMD_GET_QUEUE_STATS = 0, HAL_REO_CMD_FLUSH_QUEUE = 1, HAL_REO_CMD_FLUSH_CACHE = 2, HAL_REO_CMD_UNBLOCK_CACHE = 3, HAL_REO_CMD_FLUSH_TIMEOUT_LIST = 4, HAL_REO_CMD_UPDATE_RX_QUEUE = 5, }; /** * enum hal_reo_cmd_status: Enum for execution status of REO command * @HAL_REO_CMD_SUCCESS: Command has successfully executed * @HAL_REO_CMD_BLOCKED: Command could not be executed as the queue * or cache was blocked * @HAL_REO_CMD_FAILED: Command execution failed, could be due to * invalid queue desc * @HAL_REO_CMD_RESOURCE_BLOCKED: * @HAL_REO_CMD_DRAIN: */ enum hal_reo_cmd_status { HAL_REO_CMD_SUCCESS = 0, HAL_REO_CMD_BLOCKED = 1, HAL_REO_CMD_FAILED = 2, HAL_REO_CMD_RESOURCE_BLOCKED = 3, HAL_REO_CMD_DRAIN = 0xff, }; struct hal_wbm_idle_scatter_list { dma_addr_t paddr; struct hal_wbm_link_desc *vaddr; }; struct hal_srng_params { dma_addr_t ring_base_paddr; u32 *ring_base_vaddr; int num_entries; u32 intr_batch_cntr_thres_entries; u32 intr_timer_thres_us; u32 flags; u32 max_buffer_len; u32 low_threshold; dma_addr_t msi_addr; u32 msi_data; /* Add more params as needed */ }; enum hal_srng_dir { HAL_SRNG_DIR_SRC, HAL_SRNG_DIR_DST }; /* srng flags */ #define HAL_SRNG_FLAGS_MSI_SWAP 0x00000008 #define HAL_SRNG_FLAGS_RING_PTR_SWAP 0x00000010 #define HAL_SRNG_FLAGS_DATA_TLV_SWAP 0x00000020 #define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN 0x00010000 #define HAL_SRNG_FLAGS_MSI_INTR 0x00020000 #define HAL_SRNG_FLAGS_CACHED 0x20000000 #define HAL_SRNG_FLAGS_LMAC_RING 0x80000000 #define HAL_SRNG_FLAGS_REMAP_CE_RING 0x10000000 #define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1) #define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10) /* Common SRNG ring structure for source and destination rings */ struct hal_srng { /* Unique SRNG ring ID */ u8 ring_id; /* Ring initialization done */ u8 initialized; /* Interrupt/MSI value assigned to this ring */ int irq; /* Physical base address of the ring */ dma_addr_t ring_base_paddr; /* Virtual base address of the ring */ u32 *ring_base_vaddr; /* Number of entries in ring */ u32 num_entries; /* Ring size */ u32 ring_size; /* Ring size mask */ u32 ring_size_mask; /* Size of ring entry */ u32 entry_size; /* Interrupt timer threshold - in micro seconds */ u32 intr_timer_thres_us; /* Interrupt batch counter threshold - in number of ring entries */ u32 intr_batch_cntr_thres_entries; /* MSI Address */ dma_addr_t msi_addr; /* MSI data */ u32 msi_data; /* Misc flags */ u32 flags; /* Lock for serializing ring index updates */ spinlock_t lock; /* Start offset of SRNG register groups for this ring * TBD: See if this is required - register address can be derived * from ring ID */ u32 hwreg_base[HAL_SRNG_NUM_REG_GRP]; u64 timestamp; /* Source or Destination ring */ enum hal_srng_dir ring_dir; union { struct { /* SW tail pointer */ u32 tp; /* Shadow head pointer location to be updated by HW */ volatile u32 *hp_addr; /* Cached head pointer */ u32 cached_hp; /* Tail pointer location to be updated by SW - This * will be a register address and need not be * accessed through SW structure */ u32 *tp_addr; /* Current SW loop cnt */ u32 loop_cnt; /* max transfer size */ u16 max_buffer_length; /* head pointer at access end */ u32 last_hp; } dst_ring; struct { /* SW head pointer */ u32 hp; /* SW reap head pointer */ u32 reap_hp; /* Shadow tail pointer location to be updated by HW */ u32 *tp_addr; /* Cached tail pointer */ u32 cached_tp; /* Head pointer location to be updated by SW - This * will be a register address and need not be accessed * through SW structure */ u32 *hp_addr; /* Low threshold - in number of ring entries */ u32 low_threshold; /* tail pointer at access end */ u32 last_tp; } src_ring; } u; }; /* Interrupt mitigation - Batch threshold in terms of number of frames */ #define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256 #define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128 #define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1 /* Interrupt mitigation - timer threshold in us */ #define HAL_SRNG_INT_TIMER_THRESHOLD_TX 1000 #define HAL_SRNG_INT_TIMER_THRESHOLD_RX 500 #define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 256 /* HW SRNG configuration table */ struct hal_srng_config { int start_ring_id; u16 max_rings; u16 entry_size; u32 reg_start[HAL_SRNG_NUM_REG_GRP]; u16 reg_size[HAL_SRNG_NUM_REG_GRP]; u8 lmac_ring; enum hal_srng_dir ring_dir; u32 max_size; }; /** * enum hal_rx_buf_return_buf_manager - manager for returned rx buffers * * @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list * @HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST: Descriptor returned to WBM idle * descriptor list. * @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW * @HAL_RX_BUF_RBM_SW0_BM: For Tx completion -- returned to host * @HAL_RX_BUF_RBM_SW1_BM: For Tx completion -- returned to host * @HAL_RX_BUF_RBM_SW2_BM: For Tx completion -- returned to host * @HAL_RX_BUF_RBM_SW3_BM: For Rx release -- returned to host * @HAL_RX_BUF_RBM_SW4_BM: For Tx completion -- returned to host */ enum hal_rx_buf_return_buf_manager { HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST, HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST, HAL_RX_BUF_RBM_FW_BM, HAL_RX_BUF_RBM_SW0_BM, HAL_RX_BUF_RBM_SW1_BM, HAL_RX_BUF_RBM_SW2_BM, HAL_RX_BUF_RBM_SW3_BM, HAL_RX_BUF_RBM_SW4_BM, }; #define HAL_SRNG_DESC_LOOP_CNT 0xf0000000 #define HAL_REO_CMD_FLG_NEED_STATUS BIT(0) #define HAL_REO_CMD_FLG_STATS_CLEAR BIT(1) #define HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER BIT(2) #define HAL_REO_CMD_FLG_FLUSH_RELEASE_BLOCKING BIT(3) #define HAL_REO_CMD_FLG_FLUSH_NO_INVAL BIT(4) #define HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS BIT(5) #define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6) #define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7) #define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8) /* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */ #define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8) #define HAL_REO_CMD_UPD0_VLD BIT(9) #define HAL_REO_CMD_UPD0_ALDC BIT(10) #define HAL_REO_CMD_UPD0_DIS_DUP_DETECTION BIT(11) #define HAL_REO_CMD_UPD0_SOFT_REORDER_EN BIT(12) #define HAL_REO_CMD_UPD0_AC BIT(13) #define HAL_REO_CMD_UPD0_BAR BIT(14) #define HAL_REO_CMD_UPD0_RETRY BIT(15) #define HAL_REO_CMD_UPD0_CHECK_2K_MODE BIT(16) #define HAL_REO_CMD_UPD0_OOR_MODE BIT(17) #define HAL_REO_CMD_UPD0_BA_WINDOW_SIZE BIT(18) #define HAL_REO_CMD_UPD0_PN_CHECK BIT(19) #define HAL_REO_CMD_UPD0_EVEN_PN BIT(20) #define HAL_REO_CMD_UPD0_UNEVEN_PN BIT(21) #define HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE BIT(22) #define HAL_REO_CMD_UPD0_PN_SIZE BIT(23) #define HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG BIT(24) #define HAL_REO_CMD_UPD0_SVLD BIT(25) #define HAL_REO_CMD_UPD0_SSN BIT(26) #define HAL_REO_CMD_UPD0_SEQ_2K_ERR BIT(27) #define HAL_REO_CMD_UPD0_PN_ERR BIT(28) #define HAL_REO_CMD_UPD0_PN_VALID BIT(29) #define HAL_REO_CMD_UPD0_PN BIT(30) /* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* fields */ #define HAL_REO_CMD_UPD1_VLD BIT(16) #define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17) #define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19) #define HAL_REO_CMD_UPD1_SOFT_REORDER_EN BIT(20) #define HAL_REO_CMD_UPD1_AC GENMASK(22, 21) #define HAL_REO_CMD_UPD1_BAR BIT(23) #define HAL_REO_CMD_UPD1_RETRY BIT(24) #define HAL_REO_CMD_UPD1_CHECK_2K_MODE BIT(25) #define HAL_REO_CMD_UPD1_OOR_MODE BIT(26) #define HAL_REO_CMD_UPD1_PN_CHECK BIT(27) #define HAL_REO_CMD_UPD1_EVEN_PN BIT(28) #define HAL_REO_CMD_UPD1_UNEVEN_PN BIT(29) #define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30) #define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31) /* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* fields */ #define HAL_REO_CMD_UPD2_SVLD BIT(10) #define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11) #define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23) #define HAL_REO_CMD_UPD2_PN_ERR BIT(24) #define HAL_REO_DEST_RING_CTRL_HASH_RING_MAP GENMASK(31, 8) struct ath11k_hal_reo_cmd { u32 addr_lo; u32 flag; u32 upd0; u32 upd1; u32 upd2; u32 pn[4]; u16 rx_queue_num; u16 min_rel; u16 min_fwd; u8 addr_hi; u8 ac_list; u8 blocking_idx; u16 ba_window_size; u8 pn_size; }; enum hal_pn_type { HAL_PN_TYPE_NONE, HAL_PN_TYPE_WPA, HAL_PN_TYPE_WAPI_EVEN, HAL_PN_TYPE_WAPI_UNEVEN, }; enum hal_ce_desc { HAL_CE_DESC_SRC, HAL_CE_DESC_DST, HAL_CE_DESC_DST_STATUS, }; #define HAL_HASH_ROUTING_RING_TCL 0 #define HAL_HASH_ROUTING_RING_SW1 1 #define HAL_HASH_ROUTING_RING_SW2 2 #define HAL_HASH_ROUTING_RING_SW3 3 #define HAL_HASH_ROUTING_RING_SW4 4 #define HAL_HASH_ROUTING_RING_REL 5 #define HAL_HASH_ROUTING_RING_FW 6 struct hal_reo_status_header { u16 cmd_num; enum hal_reo_cmd_status cmd_status; u16 cmd_exe_time; u32 timestamp; }; struct hal_reo_status_queue_stats { u16 ssn; u16 curr_idx; u32 pn[4]; u32 last_rx_queue_ts; u32 last_rx_dequeue_ts; u32 rx_bitmap[8]; /* Bitmap from 0-255 */ u32 curr_mpdu_cnt; u32 curr_msdu_cnt; u16 fwd_due_to_bar_cnt; u16 dup_cnt; u32 frames_in_order_cnt; u32 num_mpdu_processed_cnt; u32 num_msdu_processed_cnt; u32 total_num_processed_byte_cnt; u32 late_rx_mpdu_cnt; u32 reorder_hole_cnt; u8 timeout_cnt; u8 bar_rx_cnt; u8 num_window_2k_jump_cnt; }; struct hal_reo_status_flush_queue { bool err_detected; }; enum hal_reo_status_flush_cache_err_code { HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_SUCCESS, HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_IN_USE, HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_NOT_FOUND, }; struct hal_reo_status_flush_cache { bool err_detected; enum hal_reo_status_flush_cache_err_code err_code; bool cache_controller_flush_status_hit; u8 cache_controller_flush_status_desc_type; u8 cache_controller_flush_status_client_id; u8 cache_controller_flush_status_err; u8 cache_controller_flush_status_cnt; }; enum hal_reo_status_unblock_cache_type { HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE, HAL_REO_STATUS_UNBLOCK_ENTIRE_CACHE_USAGE, }; struct hal_reo_status_unblock_cache { bool err_detected; enum hal_reo_status_unblock_cache_type unblock_type; }; struct hal_reo_status_flush_timeout_list { bool err_detected; bool list_empty; u16 release_desc_cnt; u16 fwd_buf_cnt; }; enum hal_reo_threshold_idx { HAL_REO_THRESHOLD_IDX_DESC_COUNTER0, HAL_REO_THRESHOLD_IDX_DESC_COUNTER1, HAL_REO_THRESHOLD_IDX_DESC_COUNTER2, HAL_REO_THRESHOLD_IDX_DESC_COUNTER_SUM, }; struct hal_reo_status_desc_thresh_reached { enum hal_reo_threshold_idx threshold_idx; u32 link_desc_counter0; u32 link_desc_counter1; u32 link_desc_counter2; u32 link_desc_counter_sum; }; struct hal_reo_status { struct hal_reo_status_header uniform_hdr; u8 loop_cnt; union { struct hal_reo_status_queue_stats queue_stats; struct hal_reo_status_flush_queue flush_queue; struct hal_reo_status_flush_cache flush_cache; struct hal_reo_status_unblock_cache unblock_cache; struct hal_reo_status_flush_timeout_list timeout_list; struct hal_reo_status_desc_thresh_reached desc_thresh_reached; } u; }; /* HAL context to be used to access SRNG APIs (currently used by data path * and transport (CE) modules) */ struct ath11k_hal { /* HAL internal state for all SRNG rings. */ struct hal_srng srng_list[HAL_SRNG_RING_ID_MAX]; /* SRNG configuration table */ struct hal_srng_config *srng_config; /* Remote pointer memory for HW/FW updates */ struct { u32 *vaddr; dma_addr_t paddr; } rdp; /* Shared memory for ring pointer updates from host to FW */ struct { u32 *vaddr; dma_addr_t paddr; } wrp; /* Available REO blocking resources bitmap */ u8 avail_blk_resource; u8 current_blk_index; /* shadow register configuration */ u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS]; int num_shadow_reg_configured; struct lock_class_key srng_key[HAL_SRNG_RING_ID_MAX]; }; u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid); void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size, u32 start_seq, enum hal_pn_type type); void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab, struct hal_srng *srng); void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab, struct hal_wbm_idle_scatter_list *sbuf, u32 nsbufs, u32 tot_link_desc, u32 end_offset); dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab, struct hal_srng *srng); dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab, struct hal_srng *srng); void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie, dma_addr_t paddr); u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type); void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id, u8 byte_swap_data); void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr); u32 ath11k_hal_ce_dst_status_get_length(void *buf); int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type); int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type); void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng, struct hal_srng_params *params); u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab, struct hal_srng *srng); u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng); int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng, bool sync_hw_ptr); u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng); u32 *ath11k_hal_srng_src_next_peek(struct ath11k_base *ab, struct hal_srng *srng); u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab, struct hal_srng *srng); u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab, struct hal_srng *srng); u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab, struct hal_srng *srng); int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng, bool sync_hw_ptr); void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng); void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng); int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, int ring_num, int mac_id, struct hal_srng_params *params); int ath11k_hal_srng_init(struct ath11k_base *ath11k); void ath11k_hal_srng_deinit(struct ath11k_base *ath11k); +void ath11k_hal_srng_clear(struct ath11k_base *ab); void ath11k_hal_dump_srng_stats(struct ath11k_base *ab); void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab, u32 **cfg, u32 *len); int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab, enum hal_ring_type ring_type, int ring_num); void ath11k_hal_srng_shadow_config(struct ath11k_base *ab); void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab, struct hal_srng *srng); #endif diff --git a/mac.c b/mac.c index 106e2530b64e..0e41b5a91d66 100644 --- a/mac.c +++ b/mac.c @@ -1,10696 +1,10696 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include #include #include #include #include #include #include #include "mac.h" #include "core.h" #include "debug.h" #include "wmi.h" #include "hw.h" #include "dp_tx.h" #include "dp_rx.h" #include "testmode.h" #include "peer.h" #include "debugfs_sta.h" #include "hif.h" #include "wow.h" #define CHAN2G(_channel, _freq, _flags) { \ .band = NL80211_BAND_2GHZ, \ .hw_value = (_channel), \ .center_freq = (_freq), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } #define CHAN5G(_channel, _freq, _flags) { \ .band = NL80211_BAND_5GHZ, \ .hw_value = (_channel), \ .center_freq = (_freq), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } #define CHAN6G(_channel, _freq, _flags) { \ .band = NL80211_BAND_6GHZ, \ .hw_value = (_channel), \ .center_freq = (_freq), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } static const struct ieee80211_channel ath11k_2ghz_channels[] = { CHAN2G(1, 2412, 0), CHAN2G(2, 2417, 0), CHAN2G(3, 2422, 0), CHAN2G(4, 2427, 0), CHAN2G(5, 2432, 0), CHAN2G(6, 2437, 0), CHAN2G(7, 2442, 0), CHAN2G(8, 2447, 0), CHAN2G(9, 2452, 0), CHAN2G(10, 2457, 0), CHAN2G(11, 2462, 0), CHAN2G(12, 2467, 0), CHAN2G(13, 2472, 0), CHAN2G(14, 2484, 0), }; static const struct ieee80211_channel ath11k_5ghz_channels[] = { CHAN5G(36, 5180, 0), CHAN5G(40, 5200, 0), CHAN5G(44, 5220, 0), CHAN5G(48, 5240, 0), CHAN5G(52, 5260, 0), CHAN5G(56, 5280, 0), CHAN5G(60, 5300, 0), CHAN5G(64, 5320, 0), CHAN5G(100, 5500, 0), CHAN5G(104, 5520, 0), CHAN5G(108, 5540, 0), CHAN5G(112, 5560, 0), CHAN5G(116, 5580, 0), CHAN5G(120, 5600, 0), CHAN5G(124, 5620, 0), CHAN5G(128, 5640, 0), CHAN5G(132, 5660, 0), CHAN5G(136, 5680, 0), CHAN5G(140, 5700, 0), CHAN5G(144, 5720, 0), CHAN5G(149, 5745, 0), CHAN5G(153, 5765, 0), CHAN5G(157, 5785, 0), CHAN5G(161, 5805, 0), CHAN5G(165, 5825, 0), CHAN5G(169, 5845, 0), CHAN5G(173, 5865, 0), CHAN5G(177, 5885, 0), }; static const struct ieee80211_channel ath11k_6ghz_channels[] = { CHAN6G(1, 5955, 0), CHAN6G(5, 5975, 0), CHAN6G(9, 5995, 0), CHAN6G(13, 6015, 0), CHAN6G(17, 6035, 0), CHAN6G(21, 6055, 0), CHAN6G(25, 6075, 0), CHAN6G(29, 6095, 0), CHAN6G(33, 6115, 0), CHAN6G(37, 6135, 0), CHAN6G(41, 6155, 0), CHAN6G(45, 6175, 0), CHAN6G(49, 6195, 0), CHAN6G(53, 6215, 0), CHAN6G(57, 6235, 0), CHAN6G(61, 6255, 0), CHAN6G(65, 6275, 0), CHAN6G(69, 6295, 0), CHAN6G(73, 6315, 0), CHAN6G(77, 6335, 0), CHAN6G(81, 6355, 0), CHAN6G(85, 6375, 0), CHAN6G(89, 6395, 0), CHAN6G(93, 6415, 0), CHAN6G(97, 6435, 0), CHAN6G(101, 6455, 0), CHAN6G(105, 6475, 0), CHAN6G(109, 6495, 0), CHAN6G(113, 6515, 0), CHAN6G(117, 6535, 0), CHAN6G(121, 6555, 0), CHAN6G(125, 6575, 0), CHAN6G(129, 6595, 0), CHAN6G(133, 6615, 0), CHAN6G(137, 6635, 0), CHAN6G(141, 6655, 0), CHAN6G(145, 6675, 0), CHAN6G(149, 6695, 0), CHAN6G(153, 6715, 0), CHAN6G(157, 6735, 0), CHAN6G(161, 6755, 0), CHAN6G(165, 6775, 0), CHAN6G(169, 6795, 0), CHAN6G(173, 6815, 0), CHAN6G(177, 6835, 0), CHAN6G(181, 6855, 0), CHAN6G(185, 6875, 0), CHAN6G(189, 6895, 0), CHAN6G(193, 6915, 0), CHAN6G(197, 6935, 0), CHAN6G(201, 6955, 0), CHAN6G(205, 6975, 0), CHAN6G(209, 6995, 0), CHAN6G(213, 7015, 0), CHAN6G(217, 7035, 0), CHAN6G(221, 7055, 0), CHAN6G(225, 7075, 0), CHAN6G(229, 7095, 0), CHAN6G(233, 7115, 0), /* new addition in IEEE Std 802.11ax-2021 */ CHAN6G(2, 5935, 0), }; static struct ieee80211_rate ath11k_legacy_rates[] = { { .bitrate = 10, .hw_value = ATH11K_HW_RATE_CCK_LP_1M }, { .bitrate = 20, .hw_value = ATH11K_HW_RATE_CCK_LP_2M, .hw_value_short = ATH11K_HW_RATE_CCK_SP_2M, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 55, .hw_value = ATH11K_HW_RATE_CCK_LP_5_5M, .hw_value_short = ATH11K_HW_RATE_CCK_SP_5_5M, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 110, .hw_value = ATH11K_HW_RATE_CCK_LP_11M, .hw_value_short = ATH11K_HW_RATE_CCK_SP_11M, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 60, .hw_value = ATH11K_HW_RATE_OFDM_6M }, { .bitrate = 90, .hw_value = ATH11K_HW_RATE_OFDM_9M }, { .bitrate = 120, .hw_value = ATH11K_HW_RATE_OFDM_12M }, { .bitrate = 180, .hw_value = ATH11K_HW_RATE_OFDM_18M }, { .bitrate = 240, .hw_value = ATH11K_HW_RATE_OFDM_24M }, { .bitrate = 360, .hw_value = ATH11K_HW_RATE_OFDM_36M }, { .bitrate = 480, .hw_value = ATH11K_HW_RATE_OFDM_48M }, { .bitrate = 540, .hw_value = ATH11K_HW_RATE_OFDM_54M }, }; static const int ath11k_phymodes[NUM_NL80211_BANDS][ATH11K_CHAN_WIDTH_NUM] = { [NL80211_BAND_2GHZ] = { [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20_2G, [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20_2G, [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40_2G, [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80_2G, [NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_160] = MODE_UNKNOWN, }, [NL80211_BAND_5GHZ] = { [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20, [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20, [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40, [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80, [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160, [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80, }, [NL80211_BAND_6GHZ] = { [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20, [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20, [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40, [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80, [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160, [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80, }, }; const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default = { .rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START | HTT_RX_FILTER_TLV_FLAGS_PPDU_END | HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE, .pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0, .pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1, .pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2, .pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 | HTT_RX_FP_CTRL_FILTER_FLASG3 }; #define ATH11K_MAC_FIRST_OFDM_RATE_IDX 4 #define ath11k_g_rates ath11k_legacy_rates #define ath11k_g_rates_size (ARRAY_SIZE(ath11k_legacy_rates)) #define ath11k_a_rates (ath11k_legacy_rates + 4) #define ath11k_a_rates_size (ARRAY_SIZE(ath11k_legacy_rates) - 4) #define ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD 200 /* in msecs */ /* Overhead due to the processing of channel switch events from FW */ #define ATH11K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD 10 /* in msecs */ static const u32 ath11k_smps_map[] = { [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, }; enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy) { enum nl80211_he_ru_alloc ret; switch (ru_phy) { case RU_26: ret = NL80211_RATE_INFO_HE_RU_ALLOC_26; break; case RU_52: ret = NL80211_RATE_INFO_HE_RU_ALLOC_52; break; case RU_106: ret = NL80211_RATE_INFO_HE_RU_ALLOC_106; break; case RU_242: ret = NL80211_RATE_INFO_HE_RU_ALLOC_242; break; case RU_484: ret = NL80211_RATE_INFO_HE_RU_ALLOC_484; break; case RU_996: ret = NL80211_RATE_INFO_HE_RU_ALLOC_996; break; default: ret = NL80211_RATE_INFO_HE_RU_ALLOC_26; break; } return ret; } enum nl80211_he_ru_alloc ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones) { enum nl80211_he_ru_alloc ret; switch (ru_tones) { case 26: ret = NL80211_RATE_INFO_HE_RU_ALLOC_26; break; case 52: ret = NL80211_RATE_INFO_HE_RU_ALLOC_52; break; case 106: ret = NL80211_RATE_INFO_HE_RU_ALLOC_106; break; case 242: ret = NL80211_RATE_INFO_HE_RU_ALLOC_242; break; case 484: ret = NL80211_RATE_INFO_HE_RU_ALLOC_484; break; case 996: ret = NL80211_RATE_INFO_HE_RU_ALLOC_996; break; case (996 * 2): ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; break; default: ret = NL80211_RATE_INFO_HE_RU_ALLOC_26; break; } return ret; } enum nl80211_he_gi ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi) { enum nl80211_he_gi ret; switch (sgi) { case RX_MSDU_START_SGI_0_8_US: ret = NL80211_RATE_INFO_HE_GI_0_8; break; case RX_MSDU_START_SGI_1_6_US: ret = NL80211_RATE_INFO_HE_GI_1_6; break; case RX_MSDU_START_SGI_3_2_US: ret = NL80211_RATE_INFO_HE_GI_3_2; break; default: ret = NL80211_RATE_INFO_HE_GI_0_8; break; } return ret; } u8 ath11k_mac_bw_to_mac80211_bw(u8 bw) { u8 ret = 0; switch (bw) { case ATH11K_BW_20: ret = RATE_INFO_BW_20; break; case ATH11K_BW_40: ret = RATE_INFO_BW_40; break; case ATH11K_BW_80: ret = RATE_INFO_BW_80; break; case ATH11K_BW_160: ret = RATE_INFO_BW_160; break; } return ret; } enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw) { switch (bw) { case RATE_INFO_BW_20: return ATH11K_BW_20; case RATE_INFO_BW_40: return ATH11K_BW_40; case RATE_INFO_BW_80: return ATH11K_BW_80; case RATE_INFO_BW_160: return ATH11K_BW_160; default: return ATH11K_BW_20; } } int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx, u16 *rate) { /* As default, it is OFDM rates */ int i = ATH11K_MAC_FIRST_OFDM_RATE_IDX; int max_rates_idx = ath11k_g_rates_size; if (preamble == WMI_RATE_PREAMBLE_CCK) { hw_rc &= ~ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK; i = 0; max_rates_idx = ATH11K_MAC_FIRST_OFDM_RATE_IDX; } while (i < max_rates_idx) { if (hw_rc == ath11k_legacy_rates[i].hw_value) { *rateidx = i; *rate = ath11k_legacy_rates[i].bitrate; return 0; } i++; } return -EINVAL; } static int get_num_chains(u32 mask) { int num_chains = 0; while (mask) { if (mask & BIT(0)) num_chains++; mask >>= 1; } return num_chains; } u8 ath11k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, u32 bitrate) { int i; for (i = 0; i < sband->n_bitrates; i++) if (sband->bitrates[i].bitrate == bitrate) return i; return 0; } static u32 ath11k_mac_max_ht_nss(const u8 *ht_mcs_mask) { int nss; for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--) if (ht_mcs_mask[nss]) return nss + 1; return 1; } static u32 ath11k_mac_max_vht_nss(const u16 *vht_mcs_mask) { int nss; for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--) if (vht_mcs_mask[nss]) return nss + 1; return 1; } static u32 ath11k_mac_max_he_nss(const u16 *he_mcs_mask) { int nss; for (nss = NL80211_HE_NSS_MAX - 1; nss >= 0; nss--) if (he_mcs_mask[nss]) return nss + 1; return 1; } static u8 ath11k_parse_mpdudensity(u8 mpdudensity) { /* 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": * 0 for no restriction * 1 for 1/4 us * 2 for 1/2 us * 3 for 1 us * 4 for 2 us * 5 for 4 us * 6 for 8 us * 7 for 16 us */ switch (mpdudensity) { case 0: return 0; case 1: case 2: case 3: /* Our lower layer calculations limit our precision to * 1 microsecond */ return 1; case 4: return 2; case 5: return 4; case 6: return 8; case 7: return 16; default: return 0; } } static int ath11k_mac_vif_chan(struct ieee80211_vif *vif, struct cfg80211_chan_def *def) { struct ieee80211_chanctx_conf *conf; rcu_read_lock(); conf = rcu_dereference(vif->bss_conf.chanctx_conf); if (!conf) { rcu_read_unlock(); return -ENOENT; } *def = conf->def; rcu_read_unlock(); return 0; } static bool ath11k_mac_bitrate_is_cck(int bitrate) { switch (bitrate) { case 10: case 20: case 55: case 110: return true; } return false; } u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, u8 hw_rate, bool cck) { const struct ieee80211_rate *rate; int i; for (i = 0; i < sband->n_bitrates; i++) { rate = &sband->bitrates[i]; if (ath11k_mac_bitrate_is_cck(rate->bitrate) != cck) continue; if (rate->hw_value == hw_rate) return i; else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && rate->hw_value_short == hw_rate) return i; } return 0; } static u8 ath11k_mac_bitrate_to_rate(int bitrate) { return DIV_ROUND_UP(bitrate, 5) | (ath11k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0); } static void ath11k_get_arvif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath11k_vif_iter *arvif_iter = data; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); if (arvif->vdev_id == arvif_iter->vdev_id) arvif_iter->arvif = arvif; } struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id) { struct ath11k_vif_iter arvif_iter; u32 flags; memset(&arvif_iter, 0, sizeof(struct ath11k_vif_iter)); arvif_iter.vdev_id = vdev_id; flags = IEEE80211_IFACE_ITER_RESUME_ALL; ieee80211_iterate_active_interfaces_atomic(ar->hw, flags, ath11k_get_arvif_iter, &arvif_iter); if (!arvif_iter.arvif) { ath11k_warn(ar->ab, "No VIF found for vdev %d\n", vdev_id); return NULL; } return arvif_iter.arvif; } struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab, u32 vdev_id) { int i; struct ath11k_pdev *pdev; struct ath11k_vif *arvif; for (i = 0; i < ab->num_radios; i++) { pdev = rcu_dereference(ab->pdevs_active[i]); if (pdev && pdev->ar && (pdev->ar->allocated_vdev_map & (1LL << vdev_id))) { arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id); if (arvif) return arvif; } } return NULL; } struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id) { int i; struct ath11k_pdev *pdev; for (i = 0; i < ab->num_radios; i++) { pdev = rcu_dereference(ab->pdevs_active[i]); if (pdev && pdev->ar) { if (pdev->ar->allocated_vdev_map & (1LL << vdev_id)) return pdev->ar; } } return NULL; } struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id) { int i; struct ath11k_pdev *pdev; if (ab->hw_params.single_pdev_only) { pdev = rcu_dereference(ab->pdevs_active[0]); return pdev ? pdev->ar : NULL; } if (WARN_ON(pdev_id > ab->num_radios)) return NULL; for (i = 0; i < ab->num_radios; i++) { if (ab->fw_mode == ATH11K_FIRMWARE_MODE_FTM) pdev = &ab->pdevs[i]; else pdev = rcu_dereference(ab->pdevs_active[i]); if (pdev && pdev->pdev_id == pdev_id) return (pdev->ar ? pdev->ar : NULL); } return NULL; } struct ath11k_vif *ath11k_mac_get_vif_up(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; struct ath11k_vif *arvif; int i; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; list_for_each_entry(arvif, &ar->arvifs, list) { if (arvif->is_up) return arvif; } } return NULL; } static bool ath11k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BAND band2) { return (((band1 == NL80211_BAND_2GHZ) && (band2 & WMI_HOST_WLAN_2G_CAP)) || (((band1 == NL80211_BAND_5GHZ) || (band1 == NL80211_BAND_6GHZ)) && (band2 & WMI_HOST_WLAN_5G_CAP))); } u8 ath11k_mac_get_target_pdev_id_from_vif(struct ath11k_vif *arvif) { struct ath11k *ar = arvif->ar; struct ath11k_base *ab = ar->ab; struct ieee80211_vif *vif = arvif->vif; struct cfg80211_chan_def def; enum nl80211_band band; u8 pdev_id = ab->target_pdev_ids[0].pdev_id; int i; if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return pdev_id; band = def.chan->band; for (i = 0; i < ab->target_pdev_count; i++) { if (ath11k_mac_band_match(band, ab->target_pdev_ids[i].supported_bands)) return ab->target_pdev_ids[i].pdev_id; } return pdev_id; } u8 ath11k_mac_get_target_pdev_id(struct ath11k *ar) { struct ath11k_vif *arvif; arvif = ath11k_mac_get_vif_up(ar->ab); if (arvif) return ath11k_mac_get_target_pdev_id_from_vif(arvif); else return ar->ab->target_pdev_ids[0].pdev_id; } static void ath11k_pdev_caps_update(struct ath11k *ar) { struct ath11k_base *ab = ar->ab; ar->max_tx_power = ab->target_caps.hw_max_tx_power; /* FIXME Set min_tx_power to ab->target_caps.hw_min_tx_power. * But since the received value in svcrdy is same as hw_max_tx_power, * we can set ar->min_tx_power to 0 currently until * this is fixed in firmware */ ar->min_tx_power = 0; ar->txpower_limit_2g = ar->max_tx_power; ar->txpower_limit_5g = ar->max_tx_power; ar->txpower_scale = WMI_HOST_TP_SCALE_MAX; } static int ath11k_mac_txpower_recalc(struct ath11k *ar) { struct ath11k_pdev *pdev = ar->pdev; struct ath11k_vif *arvif; int ret, txpower = -1; u32 param; lockdep_assert_held(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) { if (arvif->txpower <= 0) continue; if (txpower == -1) txpower = arvif->txpower; else txpower = min(txpower, arvif->txpower); } if (txpower == -1) return 0; /* txpwr is set as 2 units per dBm in FW*/ txpower = min_t(u32, max_t(u32, ar->min_tx_power, txpower), ar->max_tx_power) * 2; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower to set in hw %d\n", txpower / 2); if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) && ar->txpower_limit_2g != txpower) { param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G; ret = ath11k_wmi_pdev_set_param(ar, param, txpower, ar->pdev->pdev_id); if (ret) goto fail; ar->txpower_limit_2g = txpower; } if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) && ar->txpower_limit_5g != txpower) { param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G; ret = ath11k_wmi_pdev_set_param(ar, param, txpower, ar->pdev->pdev_id); if (ret) goto fail; ar->txpower_limit_5g = txpower; } return 0; fail: ath11k_warn(ar->ab, "failed to recalc txpower limit %d using pdev param %d: %d\n", txpower / 2, param, ret); return ret; } static int ath11k_recalc_rtscts_prot(struct ath11k_vif *arvif) { struct ath11k *ar = arvif->ar; u32 vdev_param, rts_cts = 0; int ret; lockdep_assert_held(&ar->conf_mutex); vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS; /* Enable RTS/CTS protection for sw retries (when legacy stations * are in BSS) or by default only for second rate series. * TODO: Check if we need to enable CTS 2 Self in any case */ rts_cts = WMI_USE_RTS_CTS; if (arvif->num_legacy_stations > 0) rts_cts |= WMI_RTSCTS_ACROSS_SW_RETRIES << 4; else rts_cts |= WMI_RTSCTS_FOR_SECOND_RATESERIES << 4; /* Need not send duplicate param value to firmware */ if (arvif->rtscts_prot_mode == rts_cts) return 0; arvif->rtscts_prot_mode = rts_cts; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d recalc rts/cts prot %d\n", arvif->vdev_id, rts_cts); ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, rts_cts); if (ret) ath11k_warn(ar->ab, "failed to recalculate rts/cts prot for vdev %d: %d\n", arvif->vdev_id, ret); return ret; } static int ath11k_mac_set_kickout(struct ath11k_vif *arvif) { struct ath11k *ar = arvif->ar; u32 param; int ret; ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_STA_KICKOUT_TH, ATH11K_KICKOUT_THRESHOLD, ar->pdev->pdev_id); if (ret) { ath11k_warn(ar->ab, "failed to set kickout threshold on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } param = WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, ATH11K_KEEPALIVE_MIN_IDLE); if (ret) { ath11k_warn(ar->ab, "failed to set keepalive minimum idle time on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, ATH11K_KEEPALIVE_MAX_IDLE); if (ret) { ath11k_warn(ar->ab, "failed to set keepalive maximum idle time on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, ATH11K_KEEPALIVE_MAX_UNRESPONSIVE); if (ret) { ath11k_warn(ar->ab, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } return 0; } void ath11k_mac_peer_cleanup_all(struct ath11k *ar) { struct ath11k_peer *peer, *tmp; struct ath11k_base *ab = ar->ab; lockdep_assert_held(&ar->conf_mutex); mutex_lock(&ab->tbl_mtx_lock); spin_lock_bh(&ab->base_lock); list_for_each_entry_safe(peer, tmp, &ab->peers, list) { ath11k_peer_rx_tid_cleanup(ar, peer); ath11k_peer_rhash_delete(ab, peer); list_del(&peer->list); kfree(peer); } spin_unlock_bh(&ab->base_lock); mutex_unlock(&ab->tbl_mtx_lock); ar->num_peers = 0; ar->num_stations = 0; } static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar) { lockdep_assert_held(&ar->conf_mutex); if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) return -ESHUTDOWN; if (!wait_for_completion_timeout(&ar->vdev_setup_done, ATH11K_VDEV_SETUP_TIMEOUT_HZ)) return -ETIMEDOUT; return ar->last_wmi_vdev_start_status ? -EINVAL : 0; } static void ath11k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf, void *data) { struct cfg80211_chan_def **def = data; *def = &conf->def; } static int ath11k_mac_monitor_vdev_start(struct ath11k *ar, int vdev_id, struct cfg80211_chan_def *chandef) { struct ieee80211_channel *channel; struct wmi_vdev_start_req_arg arg = {}; int ret; lockdep_assert_held(&ar->conf_mutex); channel = chandef->chan; arg.vdev_id = vdev_id; arg.channel.freq = channel->center_freq; arg.channel.band_center_freq1 = chandef->center_freq1; arg.channel.band_center_freq2 = chandef->center_freq2; arg.channel.mode = ath11k_phymodes[chandef->chan->band][chandef->width]; arg.channel.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR); arg.channel.min_power = 0; arg.channel.max_power = channel->max_power; arg.channel.max_reg_power = channel->max_reg_power; arg.channel.max_antenna_gain = channel->max_antenna_gain; arg.pref_tx_streams = ar->num_tx_chains; arg.pref_rx_streams = ar->num_rx_chains; arg.channel.passive = !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR); reinit_completion(&ar->vdev_setup_done); reinit_completion(&ar->vdev_delete_done); ret = ath11k_wmi_vdev_start(ar, &arg, false); if (ret) { ath11k_warn(ar->ab, "failed to request monitor vdev %i start: %d\n", vdev_id, ret); return ret; } ret = ath11k_mac_vdev_setup_sync(ar); if (ret) { ath11k_warn(ar->ab, "failed to synchronize setup for monitor vdev %i start: %d\n", vdev_id, ret); return ret; } ret = ath11k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr, NULL, 0, 0); if (ret) { ath11k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n", vdev_id, ret); goto vdev_stop; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %i started\n", vdev_id); return 0; vdev_stop: reinit_completion(&ar->vdev_setup_done); ret = ath11k_wmi_vdev_stop(ar, vdev_id); if (ret) { ath11k_warn(ar->ab, "failed to stop monitor vdev %i after start failure: %d\n", vdev_id, ret); return ret; } ret = ath11k_mac_vdev_setup_sync(ar); if (ret) { ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i stop: %d\n", vdev_id, ret); return ret; } return -EIO; } static int ath11k_mac_monitor_vdev_stop(struct ath11k *ar) { int ret; lockdep_assert_held(&ar->conf_mutex); reinit_completion(&ar->vdev_setup_done); ret = ath11k_wmi_vdev_stop(ar, ar->monitor_vdev_id); if (ret) { ath11k_warn(ar->ab, "failed to request monitor vdev %i stop: %d\n", ar->monitor_vdev_id, ret); return ret; } ret = ath11k_mac_vdev_setup_sync(ar); if (ret) { ath11k_warn(ar->ab, "failed to synchronize monitor vdev %i stop: %d\n", ar->monitor_vdev_id, ret); return ret; } ret = ath11k_wmi_vdev_down(ar, ar->monitor_vdev_id); if (ret) { ath11k_warn(ar->ab, "failed to put down monitor vdev %i: %d\n", ar->monitor_vdev_id, ret); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %i stopped\n", ar->monitor_vdev_id); return 0; } static int ath11k_mac_monitor_vdev_create(struct ath11k *ar) { struct ath11k_pdev *pdev = ar->pdev; struct vdev_create_params param = {}; int bit, ret; u8 tmp_addr[6] = {}; u16 nss; lockdep_assert_held(&ar->conf_mutex); if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) return 0; if (ar->ab->free_vdev_map == 0) { ath11k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n"); return -ENOMEM; } bit = __ffs64(ar->ab->free_vdev_map); ar->monitor_vdev_id = bit; param.if_id = ar->monitor_vdev_id; param.type = WMI_VDEV_TYPE_MONITOR; param.subtype = WMI_VDEV_SUBTYPE_NONE; param.pdev_id = pdev->pdev_id; if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) { param.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains; param.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains; } if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) { param.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains; param.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains; } ret = ath11k_wmi_vdev_create(ar, tmp_addr, ¶m); if (ret) { ath11k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n", ar->monitor_vdev_id, ret); ar->monitor_vdev_id = -1; return ret; } nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1; ret = ath11k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id, WMI_VDEV_PARAM_NSS, nss); if (ret) { ath11k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n", ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret); goto err_vdev_del; } ret = ath11k_mac_txpower_recalc(ar); if (ret) { ath11k_warn(ar->ab, "failed to recalc txpower for monitor vdev %d: %d\n", ar->monitor_vdev_id, ret); goto err_vdev_del; } ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id; ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); ar->num_created_vdevs++; set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %d created\n", ar->monitor_vdev_id); return 0; err_vdev_del: ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id); ar->monitor_vdev_id = -1; return ret; } static int ath11k_mac_monitor_vdev_delete(struct ath11k *ar) { int ret; unsigned long time_left; lockdep_assert_held(&ar->conf_mutex); if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) return 0; reinit_completion(&ar->vdev_delete_done); ret = ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id); if (ret) { ath11k_warn(ar->ab, "failed to request wmi monitor vdev %i removal: %d\n", ar->monitor_vdev_id, ret); return ret; } time_left = wait_for_completion_timeout(&ar->vdev_delete_done, ATH11K_VDEV_DELETE_TIMEOUT_HZ); if (time_left == 0) { ath11k_warn(ar->ab, "Timeout in receiving vdev delete response\n"); } else { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %d deleted\n", ar->monitor_vdev_id); ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id); ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id); ar->num_created_vdevs--; ar->monitor_vdev_id = -1; clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); } return ret; } static int ath11k_mac_monitor_start(struct ath11k *ar) { struct cfg80211_chan_def *chandef = NULL; int ret; lockdep_assert_held(&ar->conf_mutex); if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) return 0; ieee80211_iter_chan_contexts_atomic(ar->hw, ath11k_mac_get_any_chandef_iter, &chandef); if (!chandef) return 0; ret = ath11k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef); if (ret) { ath11k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret); ath11k_mac_monitor_vdev_delete(ar); return ret; } set_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags); ar->num_started_vdevs++; ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, false); if (ret) { ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during start: %d", ret); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor started\n"); return 0; } static int ath11k_mac_monitor_stop(struct ath11k *ar) { int ret; lockdep_assert_held(&ar->conf_mutex); if (!test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) return 0; ret = ath11k_mac_monitor_vdev_stop(ar); if (ret) { ath11k_warn(ar->ab, "failed to stop monitor vdev: %d\n", ret); return ret; } clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags); ar->num_started_vdevs--; ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, true); if (ret) { ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during stop: %d", ret); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor stopped ret %d\n", ret); return 0; } static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif) { struct ath11k *ar = arvif->ar; struct ieee80211_vif *vif = arvif->vif; struct ieee80211_conf *conf = &ar->hw->conf; enum wmi_sta_powersave_param param; enum wmi_sta_ps_mode psmode; int ret; int timeout; bool enable_ps; lockdep_assert_held(&arvif->ar->conf_mutex); if (arvif->vif->type != NL80211_IFTYPE_STATION) return 0; enable_ps = arvif->ps; if (enable_ps) { psmode = WMI_STA_PS_MODE_ENABLED; param = WMI_STA_PS_PARAM_INACTIVITY_TIME; timeout = conf->dynamic_ps_timeout; if (timeout == 0) { /* firmware doesn't like 0 */ timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000; } ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, timeout); if (ret) { ath11k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n", arvif->vdev_id, ret); return ret; } } else { psmode = WMI_STA_PS_MODE_DISABLED; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d psmode %s\n", arvif->vdev_id, psmode ? "enable" : "disable"); ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode); if (ret) { ath11k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n", psmode, arvif->vdev_id, ret); return ret; } return 0; } static int ath11k_mac_config_ps(struct ath11k *ar) { struct ath11k_vif *arvif; int ret = 0; lockdep_assert_held(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) { ret = ath11k_mac_vif_setup_ps(arvif); if (ret) { ath11k_warn(ar->ab, "failed to setup powersave: %d\n", ret); break; } } return ret; } static int ath11k_mac_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed) { struct ath11k *ar = hw->priv; struct ieee80211_conf *conf = &hw->conf; int ret = 0; mutex_lock(&ar->conf_mutex); if (changed & IEEE80211_CONF_CHANGE_MONITOR) { if (conf->flags & IEEE80211_CONF_MONITOR) { set_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags); if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) goto out; ret = ath11k_mac_monitor_vdev_create(ar); if (ret) { ath11k_warn(ar->ab, "failed to create monitor vdev: %d", ret); goto out; } ret = ath11k_mac_monitor_start(ar); if (ret) { ath11k_warn(ar->ab, "failed to start monitor: %d", ret); goto err_mon_del; } } else { clear_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags); if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) goto out; ret = ath11k_mac_monitor_stop(ar); if (ret) { ath11k_warn(ar->ab, "failed to stop monitor: %d", ret); goto out; } ret = ath11k_mac_monitor_vdev_delete(ar); if (ret) { ath11k_warn(ar->ab, "failed to delete monitor vdev: %d", ret); goto out; } } } out: mutex_unlock(&ar->conf_mutex); return ret; err_mon_del: ath11k_mac_monitor_vdev_delete(ar); mutex_unlock(&ar->conf_mutex); return ret; } static void ath11k_mac_setup_nontx_vif_rsnie(struct ath11k_vif *arvif, bool tx_arvif_rsnie_present, const u8 *profile, u8 profile_len) { if (cfg80211_find_ie(WLAN_EID_RSN, profile, profile_len)) { arvif->rsnie_present = true; } else if (tx_arvif_rsnie_present) { int i; u8 nie_len; const u8 *nie = cfg80211_find_ext_ie(WLAN_EID_EXT_NON_INHERITANCE, profile, profile_len); if (!nie) return; nie_len = nie[1]; nie += 2; for (i = 0; i < nie_len; i++) { if (nie[i] == WLAN_EID_RSN) { arvif->rsnie_present = false; break; } } } } static bool ath11k_mac_set_nontx_vif_params(struct ath11k_vif *tx_arvif, struct ath11k_vif *arvif, struct sk_buff *bcn) { struct ieee80211_mgmt *mgmt; const u8 *ies, *profile, *next_profile; int ies_len; ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn); mgmt = (struct ieee80211_mgmt *)bcn->data; ies += sizeof(mgmt->u.beacon); ies_len = skb_tail_pointer(bcn) - ies; ies = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ies, ies_len); arvif->rsnie_present = tx_arvif->rsnie_present; while (ies) { u8 mbssid_len; ies_len -= (2 + ies[1]); mbssid_len = ies[1] - 1; profile = &ies[3]; while (mbssid_len) { u8 profile_len; profile_len = profile[1]; next_profile = profile + (2 + profile_len); mbssid_len -= (2 + profile_len); profile += 2; profile_len -= (2 + profile[1]); profile += (2 + profile[1]); /* nontx capabilities */ profile_len -= (2 + profile[1]); profile += (2 + profile[1]); /* SSID */ if (profile[2] == arvif->vif->bss_conf.bssid_index) { profile_len -= 5; profile = profile + 5; ath11k_mac_setup_nontx_vif_rsnie(arvif, tx_arvif->rsnie_present, profile, profile_len); return true; } profile = next_profile; } ies = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, profile, ies_len); } return false; } static int ath11k_mac_setup_bcn_p2p_ie(struct ath11k_vif *arvif, struct sk_buff *bcn) { struct ath11k *ar = arvif->ar; struct ieee80211_mgmt *mgmt; const u8 *p2p_ie; int ret; mgmt = (void *)bcn->data; p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, mgmt->u.beacon.variable, bcn->len - (mgmt->u.beacon.variable - bcn->data)); if (!p2p_ie) return -ENOENT; ret = ath11k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); if (ret) { ath11k_warn(ar->ab, "failed to submit P2P GO bcn ie for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } return ret; } static int ath11k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, u8 oui_type, size_t ie_offset) { size_t len; const u8 *next, *end; u8 *ie; if (WARN_ON(skb->len < ie_offset)) return -EINVAL; ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, skb->data + ie_offset, skb->len - ie_offset); if (!ie) return -ENOENT; len = ie[1] + 2; end = skb->data + skb->len; next = ie + len; if (WARN_ON(next > end)) return -EINVAL; memmove(ie, next, end - next); skb_trim(skb, skb->len - len); return 0; } static int ath11k_mac_set_vif_params(struct ath11k_vif *arvif, struct sk_buff *bcn) { struct ath11k_base *ab = arvif->ar->ab; struct ieee80211_mgmt *mgmt; int ret = 0; u8 *ies; ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn); mgmt = (struct ieee80211_mgmt *)bcn->data; ies += sizeof(mgmt->u.beacon); if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies))) arvif->rsnie_present = true; else arvif->rsnie_present = false; if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, ies, (skb_tail_pointer(bcn) - ies))) arvif->wpaie_present = true; else arvif->wpaie_present = false; if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) return ret; ret = ath11k_mac_setup_bcn_p2p_ie(arvif, bcn); if (ret) { ath11k_warn(ab, "failed to setup P2P GO bcn ie: %d\n", ret); return ret; } /* P2P IE is inserted by firmware automatically (as * configured above) so remove it from the base beacon * template to avoid duplicate P2P IEs in beacon frames. */ ret = ath11k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, offsetof(struct ieee80211_mgmt, u.beacon.variable)); if (ret) { ath11k_warn(ab, "failed to remove P2P vendor ie: %d\n", ret); return ret; } return ret; } static struct ath11k_vif *ath11k_mac_get_tx_arvif(struct ath11k_vif *arvif) { struct ieee80211_bss_conf *link_conf, *tx_bss_conf; lockdep_assert_wiphy(arvif->ar->hw->wiphy); link_conf = &arvif->vif->bss_conf; tx_bss_conf = wiphy_dereference(arvif->ar->hw->wiphy, link_conf->tx_bss_conf); if (tx_bss_conf) return ath11k_vif_to_arvif(tx_bss_conf->vif); return NULL; } static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif, struct ath11k_vif *tx_arvif) { struct ieee80211_ema_beacons *beacons; int ret = 0; bool nontx_vif_params_set = false; u32 params = 0; u8 i = 0; beacons = ieee80211_beacon_get_template_ema_list(tx_arvif->ar->hw, tx_arvif->vif, 0); if (!beacons || !beacons->cnt) { ath11k_warn(arvif->ar->ab, "failed to get ema beacon templates from mac80211\n"); return -EPERM; } if (tx_arvif == arvif) { if (ath11k_mac_set_vif_params(tx_arvif, beacons->bcn[0].skb)) return -EINVAL; } else { arvif->wpaie_present = tx_arvif->wpaie_present; } for (i = 0; i < beacons->cnt; i++) { if (tx_arvif != arvif && !nontx_vif_params_set) nontx_vif_params_set = ath11k_mac_set_nontx_vif_params(tx_arvif, arvif, beacons->bcn[i].skb); params = beacons->cnt; params |= (i << WMI_EMA_TMPL_IDX_SHIFT); params |= ((!i ? 1 : 0) << WMI_EMA_FIRST_TMPL_SHIFT); params |= ((i + 1 == beacons->cnt ? 1 : 0) << WMI_EMA_LAST_TMPL_SHIFT); ret = ath11k_wmi_bcn_tmpl(tx_arvif->ar, tx_arvif->vdev_id, &beacons->bcn[i].offs, beacons->bcn[i].skb, params); if (ret) { ath11k_warn(tx_arvif->ar->ab, "failed to set ema beacon template id %i error %d\n", i, ret); break; } } ieee80211_beacon_free_ema_list(beacons); if (tx_arvif != arvif && !nontx_vif_params_set) return -EINVAL; /* Profile not found in the beacons */ return ret; } static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif, struct ath11k_vif *tx_arvif) { struct ath11k *ar = arvif->ar; struct ath11k_base *ab = ar->ab; struct ieee80211_hw *hw = ar->hw; struct ieee80211_vif *vif = arvif->vif; struct ieee80211_mutable_offsets offs = {}; struct sk_buff *bcn; int ret; if (tx_arvif != arvif) { ar = tx_arvif->ar; ab = ar->ab; hw = ar->hw; vif = tx_arvif->vif; } bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0); if (!bcn) { ath11k_warn(ab, "failed to get beacon template from mac80211\n"); return -EPERM; } if (tx_arvif == arvif) { if (ath11k_mac_set_vif_params(tx_arvif, bcn)) return -EINVAL; } else if (!ath11k_mac_set_nontx_vif_params(tx_arvif, arvif, bcn)) { return -EINVAL; } ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, 0); kfree_skb(bcn); if (ret) ath11k_warn(ab, "failed to submit beacon template command: %d\n", ret); return ret; } static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) { struct ieee80211_vif *vif = arvif->vif; struct ath11k_vif *tx_arvif; if (arvif->vdev_type != WMI_VDEV_TYPE_AP) return 0; /* Target does not expect beacon templates for the already up * non-transmitting interfaces, and results in a crash if sent. */ tx_arvif = ath11k_mac_get_tx_arvif(arvif); if (tx_arvif) { if (arvif != tx_arvif && arvif->is_up) return 0; if (vif->bss_conf.ema_ap) return ath11k_mac_setup_bcn_tmpl_ema(arvif, tx_arvif); } else { tx_arvif = arvif; } return ath11k_mac_setup_bcn_tmpl_mbssid(arvif, tx_arvif); } void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif) { struct ieee80211_vif *vif = arvif->vif; if (!vif->bss_conf.color_change_active && !arvif->bcca_zero_sent) return; if (vif->bss_conf.color_change_active && ieee80211_beacon_cntdwn_is_complete(vif, 0)) { arvif->bcca_zero_sent = true; ieee80211_color_change_finish(vif, 0); return; } arvif->bcca_zero_sent = false; if (vif->bss_conf.color_change_active) ieee80211_beacon_update_cntdwn(vif, 0); ath11k_mac_setup_bcn_tmpl(arvif); } static void ath11k_control_beaconing(struct ath11k_vif *arvif, struct ieee80211_bss_conf *info) { struct ath11k *ar = arvif->ar; struct ath11k_vif *tx_arvif; int ret = 0; lockdep_assert_held(&arvif->ar->conf_mutex); if (!info->enable_beacon) { ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id); if (ret) ath11k_warn(ar->ab, "failed to down vdev_id %i: %d\n", arvif->vdev_id, ret); arvif->is_up = false; return; } /* Install the beacon template to the FW */ ret = ath11k_mac_setup_bcn_tmpl(arvif); if (ret) { ath11k_warn(ar->ab, "failed to update bcn tmpl during vdev up: %d\n", ret); return; } arvif->aid = 0; ether_addr_copy(arvif->bssid, info->bssid); tx_arvif = ath11k_mac_get_tx_arvif(arvif); ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, arvif->bssid, tx_arvif ? tx_arvif->bssid : NULL, info->bssid_index, 1 << info->bssid_indicator); if (ret) { ath11k_warn(ar->ab, "failed to bring up vdev %d: %i\n", arvif->vdev_id, ret); return; } arvif->is_up = true; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d up\n", arvif->vdev_id); } static void ath11k_mac_handle_beacon_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct sk_buff *skb = data; struct ieee80211_mgmt *mgmt = (void *)skb->data; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); if (vif->type != NL80211_IFTYPE_STATION) return; if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) return; cancel_delayed_work(&arvif->connection_loss_work); } void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb) { ieee80211_iterate_active_interfaces_atomic(ar->hw, IEEE80211_IFACE_ITER_NORMAL, ath11k_mac_handle_beacon_iter, skb); } static void ath11k_mac_handle_beacon_miss_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { u32 *vdev_id = data; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k *ar = arvif->ar; struct ieee80211_hw *hw = ar->hw; if (arvif->vdev_id != *vdev_id) return; if (!arvif->is_up) return; ieee80211_beacon_loss(vif); /* Firmware doesn't report beacon loss events repeatedly. If AP probe * (done by mac80211) succeeds but beacons do not resume then it * doesn't make sense to continue operation. Queue connection loss work * which can be cancelled when beacon is received. */ ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, ATH11K_CONNECTION_LOSS_HZ); } void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id) { ieee80211_iterate_active_interfaces_atomic(ar->hw, IEEE80211_IFACE_ITER_NORMAL, ath11k_mac_handle_beacon_miss_iter, &vdev_id); } static void ath11k_mac_vif_sta_connection_loss_work(struct work_struct *work) { struct ath11k_vif *arvif = container_of(work, struct ath11k_vif, connection_loss_work.work); struct ieee80211_vif *vif = arvif->vif; if (!arvif->is_up) return; ieee80211_connection_loss(vif); } static void ath11k_peer_assoc_h_basic(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); u32 aid; lockdep_assert_held(&ar->conf_mutex); if (vif->type == NL80211_IFTYPE_STATION) aid = vif->cfg.aid; else aid = sta->aid; ether_addr_copy(arg->peer_mac, sta->addr); arg->vdev_id = arvif->vdev_id; arg->peer_associd = aid; arg->auth_flag = true; /* TODO: STA WAR in ath10k for listen interval required? */ arg->peer_listen_intval = ar->hw->conf.listen_interval; arg->peer_nss = 1; arg->peer_caps = vif->bss_conf.assoc_capability; } static void ath11k_peer_assoc_h_crypto(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { struct ieee80211_bss_conf *info = &vif->bss_conf; struct cfg80211_chan_def def; struct cfg80211_bss *bss; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); const u8 *rsnie = NULL; const u8 *wpaie = NULL; lockdep_assert_held(&ar->conf_mutex); if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return; bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0, IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); if (arvif->rsnie_present || arvif->wpaie_present) { arg->need_ptk_4_way = true; if (arvif->wpaie_present) arg->need_gtk_2_way = true; } else if (bss) { const struct cfg80211_bss_ies *ies; rcu_read_lock(); rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); ies = rcu_dereference(bss->ies); wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, ies->data, ies->len); rcu_read_unlock(); cfg80211_put_bss(ar->hw->wiphy, bss); } /* FIXME: base on RSN IE/WPA IE is a correct idea? */ if (rsnie || wpaie) { ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "%s: rsn ie found\n", __func__); arg->need_ptk_4_way = true; } if (wpaie) { ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "%s: wpa ie found\n", __func__); arg->need_gtk_2_way = true; } if (sta->mfp) { /* TODO: Need to check if FW supports PMF? */ arg->is_pmf_enabled = true; } /* TODO: safe_mode_enabled (bypass 4-way handshake) flag req? */ } static void ath11k_peer_assoc_h_rates(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; struct cfg80211_chan_def def; const struct ieee80211_supported_band *sband; const struct ieee80211_rate *rates; enum nl80211_band band; u32 ratemask; u8 rate; int i; lockdep_assert_held(&ar->conf_mutex); if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return; band = def.chan->band; sband = ar->hw->wiphy->bands[band]; ratemask = sta->deflink.supp_rates[band]; ratemask &= arvif->bitrate_mask.control[band].legacy; rates = sband->bitrates; rateset->num_rates = 0; for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { if (!(ratemask & 1)) continue; rate = ath11k_mac_bitrate_to_rate(rates->bitrate); rateset->rates[rateset->num_rates] = rate; rateset->num_rates++; } } static bool ath11k_peer_assoc_h_ht_masked(const u8 *ht_mcs_mask) { int nss; for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++) if (ht_mcs_mask[nss]) return false; return true; } static bool ath11k_peer_assoc_h_vht_masked(const u16 *vht_mcs_mask) { int nss; for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) if (vht_mcs_mask[nss]) return false; return true; } static void ath11k_peer_assoc_h_ht(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; int i, n; u8 max_nss; u32 stbc; lockdep_assert_held(&ar->conf_mutex); if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return; if (!ht_cap->ht_supported) return; band = def.chan->band; ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; if (ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) return; arg->ht_flag = true; arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + ht_cap->ampdu_factor)) - 1; arg->peer_mpdu_density = ath11k_parse_mpdudensity(ht_cap->ampdu_density); arg->peer_ht_caps = ht_cap->cap; arg->peer_rate_caps |= WMI_HOST_RC_HT_FLAG; if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) arg->ldpc_flag = true; if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) { arg->bw_40 = true; arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG; } /* As firmware handles this two flags (IEEE80211_HT_CAP_SGI_20 * and IEEE80211_HT_CAP_SGI_40) for enabling SGI, we reset * both flags if guard interval is Default GI */ if (arvif->bitrate_mask.control[band].gi == NL80211_TXRATE_DEFAULT_GI) arg->peer_ht_caps &= ~(IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40); if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40)) arg->peer_rate_caps |= WMI_HOST_RC_SGI_FLAG; } if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { arg->peer_rate_caps |= WMI_HOST_RC_TX_STBC_FLAG; arg->stbc_flag = true; } if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; stbc = stbc << WMI_HOST_RC_RX_STBC_FLAG_S; arg->peer_rate_caps |= stbc; arg->stbc_flag = true; } if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) arg->peer_rate_caps |= WMI_HOST_RC_TS_FLAG; else if (ht_cap->mcs.rx_mask[1]) arg->peer_rate_caps |= WMI_HOST_RC_DS_FLAG; for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) && (ht_mcs_mask[i / 8] & BIT(i % 8))) { max_nss = (i / 8) + 1; arg->peer_ht_rates.rates[n++] = i; } /* This is a workaround for HT-enabled STAs which break the spec * and have no HT capabilities RX mask (no HT RX MCS map). * * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. * * Firmware asserts if such situation occurs. */ if (n == 0) { arg->peer_ht_rates.num_rates = 8; for (i = 0; i < arg->peer_ht_rates.num_rates; i++) arg->peer_ht_rates.rates[i] = i; } else { arg->peer_ht_rates.num_rates = n; arg->peer_nss = min(sta->deflink.rx_nss, max_nss); } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "ht peer %pM mcs cnt %d nss %d\n", arg->peer_mac, arg->peer_ht_rates.num_rates, arg->peer_nss); } static int ath11k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) { switch ((mcs_map >> (2 * nss)) & 0x3) { case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1; case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1; case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1; } return 0; } static u16 ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set, const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX]) { int idx_limit; int nss; u16 mcs_map; u16 mcs; for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { mcs_map = ath11k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & vht_mcs_limit[nss]; if (mcs_map) idx_limit = fls(mcs_map) - 1; else idx_limit = -1; switch (idx_limit) { case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: mcs = IEEE80211_VHT_MCS_SUPPORT_0_7; break; case 8: mcs = IEEE80211_VHT_MCS_SUPPORT_0_8; break; case 9: mcs = IEEE80211_VHT_MCS_SUPPORT_0_9; break; default: WARN_ON(1); fallthrough; case -1: mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED; break; } tx_mcs_set &= ~(0x3 << (nss * 2)); tx_mcs_set |= mcs << (nss * 2); } return tx_mcs_set; } static u8 ath11k_get_nss_160mhz(struct ath11k *ar, u8 max_nss) { u8 nss_ratio_info = ar->pdev->cap.nss_ratio_info; u8 max_sup_nss = 0; switch (nss_ratio_info) { case WMI_NSS_RATIO_1BY2_NSS: max_sup_nss = max_nss >> 1; break; case WMI_NSS_RATIO_3BY4_NSS: ath11k_warn(ar->ab, "WMI_NSS_RATIO_3BY4_NSS not supported\n"); break; case WMI_NSS_RATIO_1_NSS: max_sup_nss = max_nss; break; case WMI_NSS_RATIO_2_NSS: ath11k_warn(ar->ab, "WMI_NSS_RATIO_2_NSS not supported\n"); break; default: ath11k_warn(ar->ab, "invalid nss ratio received from firmware: %d\n", nss_ratio_info); break; } return max_sup_nss; } static void ath11k_peer_assoc_h_vht(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; u16 *vht_mcs_mask; u8 ampdu_factor; u8 max_nss, vht_mcs; int i, vht_nss, nss_idx; bool user_rate_valid = true; u32 rx_nss, tx_nss, nss_160; if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return; if (!vht_cap->vht_supported) return; band = def.chan->band; vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; if (ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) return; arg->vht_flag = true; /* TODO: similar flags required? */ arg->vht_capable = true; if (def.chan->band == NL80211_BAND_2GHZ) arg->vht_ng_flag = true; arg->peer_vht_caps = vht_cap->cap; ampdu_factor = (vht_cap->cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to * zero in VHT IE. Using it would result in degraded throughput. * arg->peer_max_mpdu at this point contains HT max_mpdu so keep * it if VHT max_mpdu is smaller. */ arg->peer_max_mpdu = max(arg->peer_max_mpdu, (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + ampdu_factor)) - 1); if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) arg->bw_80 = true; if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) arg->bw_160 = true; vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask); if (vht_nss > sta->deflink.rx_nss) { user_rate_valid = false; for (nss_idx = sta->deflink.rx_nss - 1; nss_idx >= 0; nss_idx--) { if (vht_mcs_mask[nss_idx]) { user_rate_valid = true; break; } } } if (!user_rate_valid) { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting vht range mcs value to peer supported nss %d for peer %pM\n", sta->deflink.rx_nss, sta->addr); vht_mcs_mask[sta->deflink.rx_nss - 1] = vht_mcs_mask[vht_nss - 1]; } /* Calculate peer NSS capability from VHT capabilities if STA * supports VHT. */ for (i = 0, max_nss = 0; i < NL80211_VHT_NSS_MAX; i++) { vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> (2 * i) & 3; if (vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED && vht_mcs_mask[i]) max_nss = i + 1; } arg->peer_nss = min(sta->deflink.rx_nss, max_nss); arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest); arg->tx_mcs_set = ath11k_peer_assoc_h_vht_limit( __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask); /* In IPQ8074 platform, VHT mcs rate 10 and 11 is enabled by default. * VHT mcs rate 10 and 11 is not supported in 11ac standard. * so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode. */ arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK; arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11; if ((arg->tx_mcs_set & IEEE80211_VHT_MCS_NOT_SUPPORTED) == IEEE80211_VHT_MCS_NOT_SUPPORTED) arg->peer_vht_caps &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; /* TODO: Check */ arg->tx_max_mcs_nss = 0xFF; if (arg->peer_phymode == MODE_11AC_VHT160 || arg->peer_phymode == MODE_11AC_VHT80_80) { tx_nss = ath11k_get_nss_160mhz(ar, max_nss); rx_nss = min(arg->peer_nss, tx_nss); arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE; if (!rx_nss) { ath11k_warn(ar->ab, "invalid max_nss\n"); return; } if (arg->peer_phymode == MODE_11AC_VHT160) nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1); else nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1); arg->peer_bw_rxnss_override |= nss_160; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vht peer %pM max_mpdu %d flags 0x%x nss_override 0x%x\n", sta->addr, arg->peer_max_mpdu, arg->peer_flags, arg->peer_bw_rxnss_override); } static int ath11k_mac_get_max_he_mcs_map(u16 mcs_map, int nss) { switch ((mcs_map >> (2 * nss)) & 0x3) { case IEEE80211_HE_MCS_SUPPORT_0_7: return BIT(8) - 1; case IEEE80211_HE_MCS_SUPPORT_0_9: return BIT(10) - 1; case IEEE80211_HE_MCS_SUPPORT_0_11: return BIT(12) - 1; } return 0; } static u16 ath11k_peer_assoc_h_he_limit(u16 tx_mcs_set, const u16 he_mcs_limit[NL80211_HE_NSS_MAX]) { int idx_limit; int nss; u16 mcs_map; u16 mcs; for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) { mcs_map = ath11k_mac_get_max_he_mcs_map(tx_mcs_set, nss) & he_mcs_limit[nss]; if (mcs_map) idx_limit = fls(mcs_map) - 1; else idx_limit = -1; switch (idx_limit) { case 0 ... 7: mcs = IEEE80211_HE_MCS_SUPPORT_0_7; break; case 8: case 9: mcs = IEEE80211_HE_MCS_SUPPORT_0_9; break; case 10: case 11: mcs = IEEE80211_HE_MCS_SUPPORT_0_11; break; default: WARN_ON(1); fallthrough; case -1: mcs = IEEE80211_HE_MCS_NOT_SUPPORTED; break; } tx_mcs_set &= ~(0x3 << (nss * 2)); tx_mcs_set |= mcs << (nss * 2); } return tx_mcs_set; } static bool ath11k_peer_assoc_h_he_masked(const u16 *he_mcs_mask) { int nss; for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) if (he_mcs_mask[nss]) return false; return true; } static void ath11k_peer_assoc_h_he(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; enum nl80211_band band; u16 he_mcs_mask[NL80211_HE_NSS_MAX]; u8 max_nss, he_mcs; u16 he_tx_mcs = 0, v = 0; int i, he_nss, nss_idx; bool user_rate_valid = true; u32 rx_nss, tx_nss, nss_160; u8 ampdu_factor, rx_mcs_80, rx_mcs_160; u16 mcs_160_map, mcs_80_map; bool support_160; if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return; if (!he_cap->has_he) return; band = def.chan->band; memcpy(he_mcs_mask, arvif->bitrate_mask.control[band].he_mcs, sizeof(he_mcs_mask)); if (ath11k_peer_assoc_h_he_masked(he_mcs_mask)) return; arg->he_flag = true; support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G); /* Supported HE-MCS and NSS Set of peer he_cap is intersection with self he_cp */ mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); /* Initialize rx_mcs_160 to 9 which is an invalid value */ rx_mcs_160 = 9; if (support_160) { for (i = 7; i >= 0; i--) { u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3; if (mcs_160 != IEEE80211_VHT_MCS_NOT_SUPPORTED) { rx_mcs_160 = i + 1; break; } } } /* Initialize rx_mcs_80 to 9 which is an invalid value */ rx_mcs_80 = 9; for (i = 7; i >= 0; i--) { u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3; if (mcs_80 != IEEE80211_VHT_MCS_NOT_SUPPORTED) { rx_mcs_80 = i + 1; break; } } if (support_160) max_nss = min(rx_mcs_80, rx_mcs_160); else max_nss = rx_mcs_80; arg->peer_nss = min(sta->deflink.rx_nss, max_nss); memcpy_and_pad(&arg->peer_he_cap_macinfo, sizeof(arg->peer_he_cap_macinfo), he_cap->he_cap_elem.mac_cap_info, sizeof(he_cap->he_cap_elem.mac_cap_info), 0); memcpy_and_pad(&arg->peer_he_cap_phyinfo, sizeof(arg->peer_he_cap_phyinfo), he_cap->he_cap_elem.phy_cap_info, sizeof(he_cap->he_cap_elem.phy_cap_info), 0); arg->peer_he_ops = vif->bss_conf.he_oper.params; /* the top most byte is used to indicate BSS color info */ arg->peer_he_ops &= 0xffffff; /* As per section 26.6.1 11ax Draft5.0, if the Max AMPDU Exponent Extension * in HE cap is zero, use the arg->peer_max_mpdu as calculated while parsing * VHT caps(if VHT caps is present) or HT caps (if VHT caps is not present). * * For non-zero value of Max AMPDU Extponent Extension in HE MAC caps, * if a HE STA sends VHT cap and HE cap IE in assoc request then, use * MAX_AMPDU_LEN_FACTOR as 20 to calculate max_ampdu length. * If a HE STA that does not send VHT cap, but HE and HT cap in assoc * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu * length. */ ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3], IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); if (ampdu_factor) { if (sta->deflink.vht_cap.vht_supported) arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR + ampdu_factor)) - 1; else if (sta->deflink.ht_cap.ht_supported) arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR + ampdu_factor)) - 1; } if (he_cap->he_cap_elem.phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { int bit = 7; int nss, ru; arg->peer_ppet.numss_m1 = he_cap->ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK; arg->peer_ppet.ru_bit_mask = (he_cap->ppe_thres[0] & IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >> IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS; for (nss = 0; nss <= arg->peer_ppet.numss_m1; nss++) { for (ru = 0; ru < 4; ru++) { u32 val = 0; int i; if ((arg->peer_ppet.ru_bit_mask & BIT(ru)) == 0) continue; for (i = 0; i < 6; i++) { val >>= 1; val |= ((he_cap->ppe_thres[bit / 8] >> (bit % 8)) & 0x1) << 5; bit++; } arg->peer_ppet.ppet16_ppet8_ru3_ru0[nss] |= val << (ru * 6); } } } if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES) arg->twt_responder = true; if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ) arg->twt_requester = true; he_nss = ath11k_mac_max_he_nss(he_mcs_mask); if (he_nss > sta->deflink.rx_nss) { user_rate_valid = false; for (nss_idx = sta->deflink.rx_nss - 1; nss_idx >= 0; nss_idx--) { if (he_mcs_mask[nss_idx]) { user_rate_valid = true; break; } } } if (!user_rate_valid) { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting he range mcs value to peer supported nss %d for peer %pM\n", sta->deflink.rx_nss, sta->addr); he_mcs_mask[sta->deflink.rx_nss - 1] = he_mcs_mask[he_nss - 1]; } switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_160: if (he_cap->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) { v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80); v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask); arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v; v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80); arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v; arg->peer_he_mcs_count++; he_tx_mcs = v; } v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v; v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160); v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask); arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v; arg->peer_he_mcs_count++; if (!he_tx_mcs) he_tx_mcs = v; fallthrough; default: v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v; v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80); v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask); arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v; arg->peer_he_mcs_count++; if (!he_tx_mcs) he_tx_mcs = v; break; } /* Calculate peer NSS capability from HE capabilities if STA * supports HE. */ for (i = 0, max_nss = 0; i < NL80211_HE_NSS_MAX; i++) { he_mcs = he_tx_mcs >> (2 * i) & 3; /* In case of fixed rates, MCS Range in he_tx_mcs might have * unsupported range, with he_mcs_mask set, so check either of them * to find nss. */ if (he_mcs != IEEE80211_HE_MCS_NOT_SUPPORTED || he_mcs_mask[i]) max_nss = i + 1; } arg->peer_nss = min(sta->deflink.rx_nss, max_nss); if (arg->peer_phymode == MODE_11AX_HE160 || arg->peer_phymode == MODE_11AX_HE80_80) { tx_nss = ath11k_get_nss_160mhz(ar, max_nss); rx_nss = min(arg->peer_nss, tx_nss); arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE; if (!rx_nss) { ath11k_warn(ar->ab, "invalid max_nss\n"); return; } if (arg->peer_phymode == MODE_11AX_HE160) nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1); else nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1); arg->peer_bw_rxnss_override |= nss_160; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "he peer %pM nss %d mcs cnt %d nss_override 0x%x\n", sta->addr, arg->peer_nss, arg->peer_he_mcs_count, arg->peer_bw_rxnss_override); } static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; struct cfg80211_chan_def def; enum nl80211_band band; u8 ampdu_factor; if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return; band = def.chan->band; if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa) return; if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) arg->bw_40 = true; if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) arg->bw_80 = true; if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) arg->bw_160 = true; arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa); arg->peer_mpdu_density = ath11k_parse_mpdudensity(FIELD_GET(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START, arg->peer_he_caps_6ghz)); /* From IEEE Std 802.11ax-2021 - Section 10.12.2: An HE STA shall be capable of * receiving A-MPDU where the A-MPDU pre-EOF padding length is up to the value * indicated by the Maximum A-MPDU Length Exponent Extension field in the HE * Capabilities element and the Maximum A-MPDU Length Exponent field in HE 6 GHz * Band Capabilities element in the 6 GHz band. * * Here, we are extracting the Max A-MPDU Exponent Extension from HE caps and * factor is the Maximum A-MPDU Length Exponent from HE 6 GHZ Band capability. */ ampdu_factor = FIELD_GET(IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK, he_cap->he_cap_elem.mac_cap_info[3]) + FIELD_GET(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP, arg->peer_he_caps_6ghz); arg->peer_max_mpdu = (1u << (IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR + ampdu_factor)) - 1; } static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta, struct peer_assoc_params *arg) { const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; int smps; if (!ht_cap->ht_supported && !sta->deflink.he_6ghz_capa.capa) return; if (ht_cap->ht_supported) { smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; } else { smps = le16_get_bits(sta->deflink.he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_SM_PS); } switch (smps) { case WLAN_HT_CAP_SM_PS_STATIC: arg->static_mimops_flag = true; break; case WLAN_HT_CAP_SM_PS_DYNAMIC: arg->dynamic_mimops_flag = true; break; case WLAN_HT_CAP_SM_PS_DISABLED: arg->spatial_mux_flag = true; break; default: break; } } static void ath11k_peer_assoc_h_qos(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); switch (arvif->vdev_type) { case WMI_VDEV_TYPE_AP: if (sta->wme) { /* TODO: Check WME vs QoS */ arg->is_wme_set = true; arg->qos_flag = true; } if (sta->wme && sta->uapsd_queues) { /* TODO: Check WME vs QoS */ arg->is_wme_set = true; arg->apsd_flag = true; arg->peer_rate_caps |= WMI_HOST_RC_UAPSD_FLAG; } break; case WMI_VDEV_TYPE_STA: if (sta->wme) { arg->is_wme_set = true; arg->qos_flag = true; } break; default: break; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "peer %pM qos %d\n", sta->addr, arg->qos_flag); } static int ath11k_peer_assoc_qos_ap(struct ath11k *ar, struct ath11k_vif *arvif, struct ieee80211_sta *sta) { struct ap_ps_params params; u32 max_sp; u32 uapsd; int ret; lockdep_assert_held(&ar->conf_mutex); params.vdev_id = arvif->vdev_id; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "uapsd_queues 0x%x max_sp %d\n", sta->uapsd_queues, sta->max_sp); uapsd = 0; if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; max_sp = 0; if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) max_sp = sta->max_sp; params.param = WMI_AP_PS_PEER_PARAM_UAPSD; params.value = uapsd; ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, ¶ms); if (ret) goto err; params.param = WMI_AP_PS_PEER_PARAM_MAX_SP; params.value = max_sp; ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, ¶ms); if (ret) goto err; /* TODO revisit during testing */ params.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE; params.value = DISABLE_SIFS_RESPONSE_TRIGGER; ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, ¶ms); if (ret) goto err; params.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD; params.value = DISABLE_SIFS_RESPONSE_TRIGGER; ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, ¶ms); if (ret) goto err; return 0; err: ath11k_warn(ar->ab, "failed to set ap ps peer param %d for vdev %i: %d\n", params.param, arvif->vdev_id, ret); return ret; } static bool ath11k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) { return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >> ATH11K_MAC_FIRST_OFDM_RATE_IDX; } static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar, struct ieee80211_sta *sta) { if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { switch (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: return MODE_11AC_VHT160; case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: return MODE_11AC_VHT80_80; default: /* not sure if this is a valid case? */ return MODE_11AC_VHT160; } } if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) return MODE_11AC_VHT80; if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) return MODE_11AC_VHT40; if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) return MODE_11AC_VHT20; return MODE_UNKNOWN; } static enum wmi_phy_mode ath11k_mac_get_phymode_he(struct ath11k *ar, struct ieee80211_sta *sta) { if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) return MODE_11AX_HE160; else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) return MODE_11AX_HE80_80; /* not sure if this is a valid case? */ return MODE_11AX_HE160; } if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) return MODE_11AX_HE80; if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) return MODE_11AX_HE40; if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) return MODE_11AX_HE20; return MODE_UNKNOWN; } static void ath11k_peer_assoc_h_phymode(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; const u16 *he_mcs_mask; enum wmi_phy_mode phymode = MODE_UNKNOWN; if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return; band = def.chan->band; ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs; switch (band) { case NL80211_BAND_2GHZ: if (sta->deflink.he_cap.has_he && !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) { if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) phymode = MODE_11AX_HE80_2G; else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11AX_HE40_2G; else phymode = MODE_11AX_HE20_2G; } else if (sta->deflink.vht_cap.vht_supported && !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) { if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11AC_VHT40; else phymode = MODE_11AC_VHT20; } else if (sta->deflink.ht_cap.ht_supported && !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) { if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11NG_HT40; else phymode = MODE_11NG_HT20; } else if (ath11k_mac_sta_has_ofdm_only(sta)) { phymode = MODE_11G; } else { phymode = MODE_11B; } break; case NL80211_BAND_5GHZ: case NL80211_BAND_6GHZ: /* Check HE first */ if (sta->deflink.he_cap.has_he && !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) { phymode = ath11k_mac_get_phymode_he(ar, sta); } else if (sta->deflink.vht_cap.vht_supported && !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) { phymode = ath11k_mac_get_phymode_vht(ar, sta); } else if (sta->deflink.ht_cap.ht_supported && !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) { if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) phymode = MODE_11NA_HT40; else phymode = MODE_11NA_HT20; } else { phymode = MODE_11A; } break; default: break; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "peer %pM phymode %s\n", sta->addr, ath11k_wmi_phymode_str(phymode)); arg->peer_phymode = phymode; WARN_ON(phymode == MODE_UNKNOWN); } static void ath11k_peer_assoc_prepare(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg, bool reassoc) { struct ath11k_sta *arsta; lockdep_assert_held(&ar->conf_mutex); arsta = ath11k_sta_to_arsta(sta); memset(arg, 0, sizeof(*arg)); reinit_completion(&ar->peer_assoc_done); arg->peer_new_assoc = !reassoc; ath11k_peer_assoc_h_basic(ar, vif, sta, arg); ath11k_peer_assoc_h_crypto(ar, vif, sta, arg); ath11k_peer_assoc_h_rates(ar, vif, sta, arg); ath11k_peer_assoc_h_phymode(ar, vif, sta, arg); ath11k_peer_assoc_h_ht(ar, vif, sta, arg); ath11k_peer_assoc_h_vht(ar, vif, sta, arg); ath11k_peer_assoc_h_he(ar, vif, sta, arg); ath11k_peer_assoc_h_he_6ghz(ar, vif, sta, arg); ath11k_peer_assoc_h_qos(ar, vif, sta, arg); ath11k_peer_assoc_h_smps(sta, arg); arsta->peer_nss = arg->peer_nss; /* TODO: amsdu_disable req? */ } static int ath11k_setup_peer_smps(struct ath11k *ar, struct ath11k_vif *arvif, const u8 *addr, const struct ieee80211_sta_ht_cap *ht_cap, u16 he_6ghz_capa) { int smps; if (!ht_cap->ht_supported && !he_6ghz_capa) return 0; if (ht_cap->ht_supported) { smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; } else { smps = FIELD_GET(IEEE80211_HE_6GHZ_CAP_SM_PS, he_6ghz_capa); } if (smps >= ARRAY_SIZE(ath11k_smps_map)) return -EINVAL; return ath11k_wmi_set_peer_param(ar, addr, arvif->vdev_id, WMI_PEER_MIMO_PS_STATE, ath11k_smps_map[smps]); } static bool ath11k_mac_set_he_txbf_conf(struct ath11k_vif *arvif) { struct ath11k *ar = arvif->ar; u32 param, value; int ret; if (!arvif->vif->bss_conf.he_support) return true; param = WMI_VDEV_PARAM_SET_HEMU_MODE; value = 0; if (arvif->vif->bss_conf.he_su_beamformer) { value |= FIELD_PREP(HE_MODE_SU_TX_BFER, HE_SU_BFER_ENABLE); if (arvif->vif->bss_conf.he_mu_beamformer && arvif->vdev_type == WMI_VDEV_TYPE_AP) value |= FIELD_PREP(HE_MODE_MU_TX_BFER, HE_MU_BFER_ENABLE); } if (arvif->vif->type != NL80211_IFTYPE_MESH_POINT) { value |= FIELD_PREP(HE_MODE_DL_OFDMA, HE_DL_MUOFDMA_ENABLE) | FIELD_PREP(HE_MODE_UL_OFDMA, HE_UL_MUOFDMA_ENABLE); if (arvif->vif->bss_conf.he_full_ul_mumimo) value |= FIELD_PREP(HE_MODE_UL_MUMIMO, HE_UL_MUMIMO_ENABLE); if (arvif->vif->bss_conf.he_su_beamformee) value |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE); } ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, value); if (ret) { ath11k_warn(ar->ab, "failed to set vdev %d HE MU mode: %d\n", arvif->vdev_id, ret); return false; } param = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE; value = FIELD_PREP(HE_VHT_SOUNDING_MODE, HE_VHT_SOUNDING_MODE_ENABLE) | FIELD_PREP(HE_TRIG_NONTRIG_SOUNDING_MODE, HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE); ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, value); if (ret) { ath11k_warn(ar->ab, "failed to set vdev %d sounding mode: %d\n", arvif->vdev_id, ret); return false; } return true; } static bool ath11k_mac_vif_recalc_sta_he_txbf(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta_he_cap *he_cap) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ieee80211_he_cap_elem he_cap_elem = {}; struct ieee80211_sta_he_cap *cap_band = NULL; struct cfg80211_chan_def def; u32 param = WMI_VDEV_PARAM_SET_HEMU_MODE; u32 hemode = 0; int ret; if (!vif->bss_conf.he_support) return true; if (vif->type != NL80211_IFTYPE_STATION) return false; if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return false; if (def.chan->band == NL80211_BAND_2GHZ) cap_band = &ar->mac.iftype[NL80211_BAND_2GHZ][vif->type].he_cap; else cap_band = &ar->mac.iftype[NL80211_BAND_5GHZ][vif->type].he_cap; memcpy(&he_cap_elem, &cap_band->he_cap_elem, sizeof(he_cap_elem)); if (HECAP_PHY_SUBFME_GET(he_cap_elem.phy_cap_info)) { if (HECAP_PHY_SUBFMR_GET(he_cap->he_cap_elem.phy_cap_info)) hemode |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE); if (HECAP_PHY_MUBFMR_GET(he_cap->he_cap_elem.phy_cap_info)) hemode |= FIELD_PREP(HE_MODE_MU_TX_BFEE, HE_MU_BFEE_ENABLE); } if (vif->type != NL80211_IFTYPE_MESH_POINT) { hemode |= FIELD_PREP(HE_MODE_DL_OFDMA, HE_DL_MUOFDMA_ENABLE) | FIELD_PREP(HE_MODE_UL_OFDMA, HE_UL_MUOFDMA_ENABLE); if (HECAP_PHY_ULMUMIMO_GET(he_cap_elem.phy_cap_info)) if (HECAP_PHY_ULMUMIMO_GET(he_cap->he_cap_elem.phy_cap_info)) hemode |= FIELD_PREP(HE_MODE_UL_MUMIMO, HE_UL_MUMIMO_ENABLE); if (FIELD_GET(HE_MODE_MU_TX_BFEE, hemode)) hemode |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE); if (FIELD_GET(HE_MODE_MU_TX_BFER, hemode)) hemode |= FIELD_PREP(HE_MODE_SU_TX_BFER, HE_SU_BFER_ENABLE); } ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, hemode); if (ret) { ath11k_warn(ar->ab, "failed to submit vdev param txbf 0x%x: %d\n", hemode, ret); return false; } return true; } static void ath11k_bss_assoc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct peer_assoc_params peer_arg; struct ieee80211_sta *ap_sta; struct ath11k_peer *peer; bool is_auth = false; struct ieee80211_sta_he_cap he_cap; int ret; lockdep_assert_held(&ar->conf_mutex); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %i assoc bssid %pM aid %d\n", arvif->vdev_id, arvif->bssid, arvif->aid); rcu_read_lock(); ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); if (!ap_sta) { ath11k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n", bss_conf->bssid, arvif->vdev_id); rcu_read_unlock(); return; } /* he_cap here is updated at assoc success for sta mode only */ he_cap = ap_sta->deflink.he_cap; ath11k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false); rcu_read_unlock(); if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) { ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n", arvif->vdev_id, bss_conf->bssid); return; } peer_arg.is_assoc = true; ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); if (ret) { ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n", bss_conf->bssid, arvif->vdev_id, ret); return; } if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) { ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", bss_conf->bssid, arvif->vdev_id); return; } ret = ath11k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ap_sta->deflink.ht_cap, le16_to_cpu(ap_sta->deflink.he_6ghz_capa.capa)); if (ret) { ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); return; } WARN_ON(arvif->is_up); arvif->aid = vif->cfg.aid; ether_addr_copy(arvif->bssid, bss_conf->bssid); ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid, NULL, 0, 0); if (ret) { ath11k_warn(ar->ab, "failed to set vdev %d up: %d\n", arvif->vdev_id, ret); return; } arvif->is_up = true; arvif->rekey_data.enable_offload = false; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d up (associated) bssid %pM aid %d\n", arvif->vdev_id, bss_conf->bssid, vif->cfg.aid); spin_lock_bh(&ar->ab->base_lock); peer = ath11k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid); if (peer && peer->is_authorized) is_auth = true; spin_unlock_bh(&ar->ab->base_lock); if (is_auth) { ret = ath11k_wmi_set_peer_param(ar, arvif->bssid, arvif->vdev_id, WMI_PEER_AUTHORIZE, 1); if (ret) ath11k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret); } ret = ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id, &bss_conf->he_obss_pd); if (ret) ath11k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n", arvif->vdev_id, ret); ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, WMI_VDEV_PARAM_DTIM_POLICY, WMI_DTIM_POLICY_STICK); if (ret) ath11k_warn(ar->ab, "failed to set vdev %d dtim policy: %d\n", arvif->vdev_id, ret); ath11k_mac_11d_scan_stop_all(ar->ab); } static void ath11k_bss_disassoc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; lockdep_assert_held(&ar->conf_mutex); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %i disassoc bssid %pM\n", arvif->vdev_id, arvif->bssid); ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id); if (ret) ath11k_warn(ar->ab, "failed to down vdev %i: %d\n", arvif->vdev_id, ret); arvif->is_up = false; memset(&arvif->rekey_data, 0, sizeof(arvif->rekey_data)); cancel_delayed_work_sync(&arvif->connection_loss_work); } static u32 ath11k_mac_get_rate_hw_value(int bitrate) { u32 preamble; u16 hw_value; int rate; size_t i; if (ath11k_mac_bitrate_is_cck(bitrate)) preamble = WMI_RATE_PREAMBLE_CCK; else preamble = WMI_RATE_PREAMBLE_OFDM; for (i = 0; i < ARRAY_SIZE(ath11k_legacy_rates); i++) { if (ath11k_legacy_rates[i].bitrate != bitrate) continue; hw_value = ath11k_legacy_rates[i].hw_value; rate = ATH11K_HW_RATE_CODE(hw_value, 0, preamble); return rate; } return -EINVAL; } static void ath11k_recalculate_mgmt_rate(struct ath11k *ar, struct ieee80211_vif *vif, struct cfg80211_chan_def *def) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); const struct ieee80211_supported_band *sband; u8 basic_rate_idx; int hw_rate_code; u32 vdev_param; u16 bitrate; int ret; lockdep_assert_held(&ar->conf_mutex); sband = ar->hw->wiphy->bands[def->chan->band]; basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; bitrate = sband->bitrates[basic_rate_idx].bitrate; hw_rate_code = ath11k_mac_get_rate_hw_value(bitrate); if (hw_rate_code < 0) { ath11k_warn(ar->ab, "bitrate not supported %d\n", bitrate); return; } vdev_param = WMI_VDEV_PARAM_MGMT_RATE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, hw_rate_code); if (ret) ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret); /* For WCN6855, firmware will clear this param when vdev starts, hence * cache it here so that we can reconfigure it once vdev starts. */ ar->hw_rate_code = hw_rate_code; vdev_param = WMI_VDEV_PARAM_BEACON_RATE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, hw_rate_code); if (ret) ath11k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret); } static int ath11k_mac_fils_discovery(struct ath11k_vif *arvif, struct ieee80211_bss_conf *info) { struct ath11k *ar = arvif->ar; struct sk_buff *tmpl; int ret; u32 interval; bool unsol_bcast_probe_resp_enabled = false; if (info->fils_discovery.max_interval) { interval = info->fils_discovery.max_interval; tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif); if (tmpl) ret = ath11k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id, tmpl); } else if (info->unsol_bcast_probe_resp_interval) { unsol_bcast_probe_resp_enabled = 1; interval = info->unsol_bcast_probe_resp_interval; tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw, arvif->vif); if (tmpl) ret = ath11k_wmi_probe_resp_tmpl(ar, arvif->vdev_id, tmpl); } else { /* Disable */ return ath11k_wmi_fils_discovery(ar, arvif->vdev_id, 0, false); } if (!tmpl) { ath11k_warn(ar->ab, "mac vdev %i failed to retrieve %s template\n", arvif->vdev_id, (unsol_bcast_probe_resp_enabled ? "unsolicited broadcast probe response" : "FILS discovery")); return -EPERM; } kfree_skb(tmpl); if (!ret) ret = ath11k_wmi_fils_discovery(ar, arvif->vdev_id, interval, unsol_bcast_probe_resp_enabled); return ret; } static int ath11k_mac_config_obss_pd(struct ath11k *ar, struct ieee80211_he_obss_pd *he_obss_pd) { u32 bitmap[2], param_id, param_val, pdev_id; int ret; s8 non_srg_th = 0, srg_th = 0; pdev_id = ar->pdev->pdev_id; /* Set and enable SRG/non-SRG OBSS PD Threshold */ param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD; if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) { ret = ath11k_wmi_pdev_set_param(ar, param_id, 0, pdev_id); if (ret) ath11k_warn(ar->ab, "failed to set obss_pd_threshold for pdev: %u\n", pdev_id); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "obss pd sr_ctrl %x non_srg_thres %u srg_max %u\n", he_obss_pd->sr_ctrl, he_obss_pd->non_srg_max_offset, he_obss_pd->max_offset); param_val = 0; if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED) { non_srg_th = ATH11K_OBSS_PD_MAX_THRESHOLD; } else { if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT) non_srg_th = (ATH11K_OBSS_PD_MAX_THRESHOLD + he_obss_pd->non_srg_max_offset); else non_srg_th = ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD; param_val |= ATH11K_OBSS_PD_NON_SRG_EN; } if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) { srg_th = ATH11K_OBSS_PD_MAX_THRESHOLD + he_obss_pd->max_offset; param_val |= ATH11K_OBSS_PD_SRG_EN; } if (test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT, ar->ab->wmi_ab.svc_map)) { param_val |= ATH11K_OBSS_PD_THRESHOLD_IN_DBM; param_val |= FIELD_PREP(GENMASK(15, 8), srg_th); } else { non_srg_th -= ATH11K_DEFAULT_NOISE_FLOOR; /* SRG not supported and threshold in dB */ param_val &= ~(ATH11K_OBSS_PD_SRG_EN | ATH11K_OBSS_PD_THRESHOLD_IN_DBM); } param_val |= (non_srg_th & GENMASK(7, 0)); ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id); if (ret) { ath11k_warn(ar->ab, "failed to set obss_pd_threshold for pdev: %u\n", pdev_id); return ret; } /* Enable OBSS PD for all access category */ param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC; param_val = 0xf; ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id); if (ret) { ath11k_warn(ar->ab, "failed to set obss_pd_per_ac for pdev: %u\n", pdev_id); return ret; } /* Set SR Prohibit */ param_id = WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT; param_val = !!(he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED); ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id); if (ret) { ath11k_warn(ar->ab, "failed to set sr_prohibit for pdev: %u\n", pdev_id); return ret; } if (!test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT, ar->ab->wmi_ab.svc_map)) return 0; /* Set SRG BSS Color Bitmap */ memcpy(bitmap, he_obss_pd->bss_color_bitmap, sizeof(bitmap)); ret = ath11k_wmi_pdev_set_srg_bss_color_bitmap(ar, bitmap); if (ret) { ath11k_warn(ar->ab, "failed to set bss_color_bitmap for pdev: %u\n", pdev_id); return ret; } /* Set SRG Partial BSSID Bitmap */ memcpy(bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(bitmap)); ret = ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(ar, bitmap); if (ret) { ath11k_warn(ar->ab, "failed to set partial_bssid_bitmap for pdev: %u\n", pdev_id); return ret; } memset(bitmap, 0xff, sizeof(bitmap)); /* Enable all BSS Colors for SRG */ ret = ath11k_wmi_pdev_srg_obss_color_enable_bitmap(ar, bitmap); if (ret) { ath11k_warn(ar->ab, "failed to set srg_color_en_bitmap pdev: %u\n", pdev_id); return ret; } /* Enable all partial BSSID mask for SRG */ ret = ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(ar, bitmap); if (ret) { ath11k_warn(ar->ab, "failed to set srg_bssid_en_bitmap pdev: %u\n", pdev_id); return ret; } /* Enable all BSS Colors for non-SRG */ ret = ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(ar, bitmap); if (ret) { ath11k_warn(ar->ab, "failed to set non_srg_color_en_bitmap pdev: %u\n", pdev_id); return ret; } /* Enable all partial BSSID mask for non-SRG */ ret = ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(ar, bitmap); if (ret) { ath11k_warn(ar->ab, "failed to set non_srg_bssid_en_bitmap pdev: %u\n", pdev_id); return ret; } return 0; } static bool ath11k_mac_supports_station_tpc(struct ath11k *ar, struct ath11k_vif *arvif, const struct cfg80211_chan_def *chandef) { return ath11k_wmi_supports_6ghz_cc_ext(ar) && test_bit(WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT, ar->ab->wmi_ab.svc_map) && arvif->vdev_type == WMI_VDEV_TYPE_STA && arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE && chandef->chan && chandef->chan->band == NL80211_BAND_6GHZ; } static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u64 changed) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; u32 param_id, param_value; enum nl80211_band band; u32 vdev_param; int mcast_rate; u32 preamble; u16 hw_value; u16 bitrate; int ret = 0; u8 rateidx; u32 rate, param; u32 ipv4_cnt; mutex_lock(&ar->conf_mutex); if (changed & BSS_CHANGED_BEACON_INT) { arvif->beacon_interval = info->beacon_int; param_id = WMI_VDEV_PARAM_BEACON_INTERVAL; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, arvif->beacon_interval); if (ret) ath11k_warn(ar->ab, "Failed to set beacon interval for VDEV: %d\n", arvif->vdev_id); else ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Beacon interval: %d set for VDEV: %d\n", arvif->beacon_interval, arvif->vdev_id); } if (changed & BSS_CHANGED_BEACON) { param_id = WMI_PDEV_PARAM_BEACON_TX_MODE; param_value = WMI_BEACON_STAGGERED_MODE; ret = ath11k_wmi_pdev_set_param(ar, param_id, param_value, ar->pdev->pdev_id); if (ret) ath11k_warn(ar->ab, "Failed to set beacon mode for VDEV: %d\n", arvif->vdev_id); else ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Set staggered beacon mode for VDEV: %d\n", arvif->vdev_id); if (!arvif->do_not_send_tmpl || !arvif->bcca_zero_sent) { ret = ath11k_mac_setup_bcn_tmpl(arvif); if (ret) ath11k_warn(ar->ab, "failed to update bcn template: %d\n", ret); } if (arvif->bcca_zero_sent) arvif->do_not_send_tmpl = true; else arvif->do_not_send_tmpl = false; if (vif->bss_conf.he_support) { ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, WMI_VDEV_PARAM_BA_MODE, WMI_BA_MODE_BUFFER_SIZE_256); if (ret) ath11k_warn(ar->ab, "failed to set BA BUFFER SIZE 256 for vdev: %d\n", arvif->vdev_id); else ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Set BA BUFFER SIZE 256 for VDEV: %d\n", arvif->vdev_id); } } if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { arvif->dtim_period = info->dtim_period; param_id = WMI_VDEV_PARAM_DTIM_PERIOD; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, arvif->dtim_period); if (ret) ath11k_warn(ar->ab, "Failed to set dtim period for VDEV %d: %i\n", arvif->vdev_id, ret); else ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "DTIM period: %d set for VDEV: %d\n", arvif->dtim_period, arvif->vdev_id); } if (changed & BSS_CHANGED_SSID && vif->type == NL80211_IFTYPE_AP) { arvif->u.ap.ssid_len = vif->cfg.ssid_len; if (vif->cfg.ssid_len) memcpy(arvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len); arvif->u.ap.hidden_ssid = info->hidden_ssid; } if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) ether_addr_copy(arvif->bssid, info->bssid); if (changed & BSS_CHANGED_BEACON_ENABLED) { if (info->enable_beacon) ath11k_mac_set_he_txbf_conf(arvif); ath11k_control_beaconing(arvif, info); if (arvif->is_up && vif->bss_conf.he_support && vif->bss_conf.he_oper.params) { param_id = WMI_VDEV_PARAM_HEOPS_0_31; param_value = vif->bss_conf.he_oper.params; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, param_value); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "he oper param: %x set for VDEV: %d\n", param_value, arvif->vdev_id); if (ret) ath11k_warn(ar->ab, "Failed to set he oper params %x for VDEV %d: %i\n", param_value, arvif->vdev_id, ret); } } if (changed & BSS_CHANGED_ERP_CTS_PROT) { u32 cts_prot; cts_prot = !!(info->use_cts_prot); param_id = WMI_VDEV_PARAM_PROTECTION_MODE; if (arvif->is_started) { ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, cts_prot); if (ret) ath11k_warn(ar->ab, "Failed to set CTS prot for VDEV: %d\n", arvif->vdev_id); else ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Set CTS prot: %d for VDEV: %d\n", cts_prot, arvif->vdev_id); } else { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "defer protection mode setup, vdev is not ready yet\n"); } } if (changed & BSS_CHANGED_ERP_SLOT) { u32 slottime; if (info->use_short_slot) slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ else slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ param_id = WMI_VDEV_PARAM_SLOT_TIME; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, slottime); if (ret) ath11k_warn(ar->ab, "Failed to set erp slot for VDEV: %d\n", arvif->vdev_id); else ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Set slottime: %d for VDEV: %d\n", slottime, arvif->vdev_id); } if (changed & BSS_CHANGED_ERP_PREAMBLE) { u32 preamble; if (info->use_short_preamble) preamble = WMI_VDEV_PREAMBLE_SHORT; else preamble = WMI_VDEV_PREAMBLE_LONG; param_id = WMI_VDEV_PARAM_PREAMBLE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, preamble); if (ret) ath11k_warn(ar->ab, "Failed to set preamble for VDEV: %d\n", arvif->vdev_id); else ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Set preamble: %d for VDEV: %d\n", preamble, arvif->vdev_id); } if (changed & BSS_CHANGED_ASSOC) { if (vif->cfg.assoc) ath11k_bss_assoc(hw, vif, info); else ath11k_bss_disassoc(hw, vif); } if (changed & BSS_CHANGED_TXPOWER) { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev_id %i txpower %d\n", arvif->vdev_id, info->txpower); arvif->txpower = info->txpower; ath11k_mac_txpower_recalc(ar); } if (changed & BSS_CHANGED_PS && ar->ab->hw_params.supports_sta_ps) { arvif->ps = vif->cfg.ps; ret = ath11k_mac_config_ps(ar); if (ret) ath11k_warn(ar->ab, "failed to setup ps on vdev %i: %d\n", arvif->vdev_id, ret); } if (changed & BSS_CHANGED_MCAST_RATE && !ath11k_mac_vif_chan(arvif->vif, &def)) { band = def.chan->band; mcast_rate = vif->bss_conf.mcast_rate[band]; if (mcast_rate > 0) rateidx = mcast_rate - 1; else rateidx = ffs(vif->bss_conf.basic_rates) - 1; if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) rateidx += ATH11K_MAC_FIRST_OFDM_RATE_IDX; bitrate = ath11k_legacy_rates[rateidx].bitrate; hw_value = ath11k_legacy_rates[rateidx].hw_value; if (ath11k_mac_bitrate_is_cck(bitrate)) preamble = WMI_RATE_PREAMBLE_CCK; else preamble = WMI_RATE_PREAMBLE_OFDM; rate = ATH11K_HW_RATE_CODE(hw_value, 0, preamble); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d mcast_rate %x\n", arvif->vdev_id, rate); vdev_param = WMI_VDEV_PARAM_MCAST_DATA_RATE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, rate); if (ret) ath11k_warn(ar->ab, "failed to set mcast rate on vdev %i: %d\n", arvif->vdev_id, ret); vdev_param = WMI_VDEV_PARAM_BCAST_DATA_RATE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, rate); if (ret) ath11k_warn(ar->ab, "failed to set bcast rate on vdev %i: %d\n", arvif->vdev_id, ret); } if (changed & BSS_CHANGED_BASIC_RATES && !ath11k_mac_vif_chan(arvif->vif, &def)) ath11k_recalculate_mgmt_rate(ar, vif, &def); if (changed & BSS_CHANGED_TWT) { struct wmi_twt_enable_params twt_params = {}; if (info->twt_requester || info->twt_responder) { ath11k_wmi_fill_default_twt_params(&twt_params); ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params); } else { ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id); } } if (changed & BSS_CHANGED_HE_OBSS_PD) ath11k_mac_config_obss_pd(ar, &info->he_obss_pd); if (changed & BSS_CHANGED_HE_BSS_COLOR) { if (vif->type == NL80211_IFTYPE_AP) { ret = ath11k_wmi_send_obss_color_collision_cfg_cmd( ar, arvif->vdev_id, info->he_bss_color.color, ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS, info->he_bss_color.enabled); if (ret) ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n", arvif->vdev_id, ret); param_id = WMI_VDEV_PARAM_BSS_COLOR; if (info->he_bss_color.enabled) param_value = info->he_bss_color.color << IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET; else param_value = IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, param_value); if (ret) ath11k_warn(ar->ab, "failed to set bss color param on vdev %i: %d\n", arvif->vdev_id, ret); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "bss color param 0x%x set on vdev %i\n", param_value, arvif->vdev_id); } else if (vif->type == NL80211_IFTYPE_STATION) { ret = ath11k_wmi_send_bss_color_change_enable_cmd(ar, arvif->vdev_id, 1); if (ret) ath11k_warn(ar->ab, "failed to enable bss color change on vdev %i: %d\n", arvif->vdev_id, ret); ret = ath11k_wmi_send_obss_color_collision_cfg_cmd( ar, arvif->vdev_id, 0, ATH11K_BSS_COLOR_COLLISION_DETECTION_STA_PERIOD_MS, 1); if (ret) ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n", arvif->vdev_id, ret); } } if (changed & BSS_CHANGED_FTM_RESPONDER && arvif->ftm_responder != info->ftm_responder && test_bit(WMI_TLV_SERVICE_RTT, ar->ab->wmi_ab.svc_map) && (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT)) { arvif->ftm_responder = info->ftm_responder; param = WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, arvif->ftm_responder); if (ret) ath11k_warn(ar->ab, "Failed to set ftm responder %i: %d\n", arvif->vdev_id, ret); } if (changed & BSS_CHANGED_FILS_DISCOVERY || changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP) ath11k_mac_fils_discovery(arvif, info); if (changed & BSS_CHANGED_ARP_FILTER) { ipv4_cnt = min(vif->cfg.arp_addr_cnt, ATH11K_IPV4_MAX_COUNT); memcpy(arvif->arp_ns_offload.ipv4_addr, vif->cfg.arp_addr_list, ipv4_cnt * sizeof(u32)); memcpy(arvif->arp_ns_offload.mac_addr, vif->addr, ETH_ALEN); arvif->arp_ns_offload.ipv4_count = ipv4_cnt; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n", vif->cfg.arp_addr_cnt, vif->addr, arvif->arp_ns_offload.ipv4_addr); } mutex_unlock(&ar->conf_mutex); } void __ath11k_mac_scan_finish(struct ath11k *ar) { lockdep_assert_held(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: break; case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: if (ar->scan.is_roc && ar->scan.roc_notify) ieee80211_remain_on_channel_expired(ar->hw); fallthrough; case ATH11K_SCAN_STARTING: if (!ar->scan.is_roc) { struct cfg80211_scan_info info = { .aborted = ((ar->scan.state == ATH11K_SCAN_ABORTING) || (ar->scan.state == ATH11K_SCAN_STARTING)), }; ieee80211_scan_completed(ar->hw, &info); } ar->scan.state = ATH11K_SCAN_IDLE; ar->scan_channel = NULL; ar->scan.roc_freq = 0; cancel_delayed_work(&ar->scan.timeout); complete_all(&ar->scan.completed); break; } } void ath11k_mac_scan_finish(struct ath11k *ar) { spin_lock_bh(&ar->data_lock); __ath11k_mac_scan_finish(ar); spin_unlock_bh(&ar->data_lock); } static int ath11k_scan_stop(struct ath11k *ar) { struct scan_cancel_param arg = { .req_type = WLAN_SCAN_CANCEL_SINGLE, .scan_id = ATH11K_SCAN_ID, }; int ret; lockdep_assert_held(&ar->conf_mutex); /* TODO: Fill other STOP Params */ arg.pdev_id = ar->pdev->pdev_id; ret = ath11k_wmi_send_scan_stop_cmd(ar, &arg); if (ret) { ath11k_warn(ar->ab, "failed to stop wmi scan: %d\n", ret); goto out; } ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); if (ret == 0) { ath11k_warn(ar->ab, "failed to receive scan abort comple: timed out\n"); ret = -ETIMEDOUT; } else if (ret > 0) { ret = 0; } out: /* Scan state should be updated upon scan completion but in case * firmware fails to deliver the event (for whatever reason) it is * desired to clean up scan state anyway. Firmware may have just * dropped the scan completion event delivery due to transport pipe * being overflown with data and/or it can recover on its own before * next scan request is submitted. */ spin_lock_bh(&ar->data_lock); if (ar->scan.state != ATH11K_SCAN_IDLE) __ath11k_mac_scan_finish(ar); spin_unlock_bh(&ar->data_lock); return ret; } static void ath11k_scan_abort(struct ath11k *ar) { int ret; lockdep_assert_held(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: /* This can happen if timeout worker kicked in and called * abortion while scan completion was being processed. */ break; case ATH11K_SCAN_STARTING: case ATH11K_SCAN_ABORTING: ath11k_warn(ar->ab, "refusing scan abortion due to invalid scan state: %d\n", ar->scan.state); break; case ATH11K_SCAN_RUNNING: ar->scan.state = ATH11K_SCAN_ABORTING; spin_unlock_bh(&ar->data_lock); ret = ath11k_scan_stop(ar); if (ret) ath11k_warn(ar->ab, "failed to abort scan: %d\n", ret); spin_lock_bh(&ar->data_lock); break; } spin_unlock_bh(&ar->data_lock); } static void ath11k_scan_timeout_work(struct work_struct *work) { struct ath11k *ar = container_of(work, struct ath11k, scan.timeout.work); mutex_lock(&ar->conf_mutex); ath11k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); } static int ath11k_start_scan(struct ath11k *ar, struct scan_req_params *arg) { int ret; unsigned long timeout = 1 * HZ; lockdep_assert_held(&ar->conf_mutex); if (ath11k_spectral_get_mode(ar) == ATH11K_SPECTRAL_BACKGROUND) ath11k_spectral_reset_buffer(ar); ret = ath11k_wmi_send_scan_start_cmd(ar, arg); if (ret) return ret; if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map)) { timeout = 5 * HZ; if (ar->supports_6ghz) timeout += 5 * HZ; } ret = wait_for_completion_timeout(&ar->scan.started, timeout); if (ret == 0) { ret = ath11k_scan_stop(ar); if (ret) ath11k_warn(ar->ab, "failed to stop scan: %d\n", ret); return -ETIMEDOUT; } /* If we failed to start the scan, return error code at * this point. This is probably due to some issue in the * firmware, but no need to wedge the driver due to that... */ spin_lock_bh(&ar->data_lock); if (ar->scan.state == ATH11K_SCAN_IDLE) { spin_unlock_bh(&ar->data_lock); return -EINVAL; } spin_unlock_bh(&ar->data_lock); return 0; } static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_scan_request *req = &hw_req->req; struct scan_req_params *arg = NULL; int ret = 0; int i; u32 scan_timeout; /* Firmwares advertising the support of triggering 11D algorithm * on the scan results of a regular scan expects driver to send * WMI_11D_SCAN_START_CMDID before sending WMI_START_SCAN_CMDID. * With this feature, separate 11D scan can be avoided since * regdomain can be determined with the scan results of the * regular scan. */ if (ar->state_11d == ATH11K_11D_PREPARING && test_bit(WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN, ar->ab->wmi_ab.svc_map)) ath11k_mac_11d_scan_start(ar, arvif->vdev_id); mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: reinit_completion(&ar->scan.started); reinit_completion(&ar->scan.completed); ar->scan.state = ATH11K_SCAN_STARTING; ar->scan.is_roc = false; ar->scan.vdev_id = arvif->vdev_id; ret = 0; break; case ATH11K_SCAN_STARTING: case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: ret = -EBUSY; break; } spin_unlock_bh(&ar->data_lock); if (ret) goto exit; arg = kzalloc(sizeof(*arg), GFP_KERNEL); if (!arg) { ret = -ENOMEM; goto exit; } ath11k_wmi_start_scan_init(ar, arg); arg->vdev_id = arvif->vdev_id; arg->scan_id = ATH11K_SCAN_ID; if (ar->ab->hw_params.single_pdev_only) arg->scan_f_filter_prb_req = 1; if (req->ie_len) { arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL); if (!arg->extraie.ptr) { ret = -ENOMEM; goto exit; } arg->extraie.len = req->ie_len; } if (req->n_ssids) { arg->num_ssids = req->n_ssids; for (i = 0; i < arg->num_ssids; i++) { arg->ssid[i].length = req->ssids[i].ssid_len; memcpy(&arg->ssid[i].ssid, req->ssids[i].ssid, req->ssids[i].ssid_len); } } else { arg->scan_f_passive = 1; } if (req->n_channels) { arg->num_chan = req->n_channels; arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list), GFP_KERNEL); if (!arg->chan_list) { ret = -ENOMEM; goto exit; } for (i = 0; i < arg->num_chan; i++) { if (test_bit(WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL, ar->ab->wmi_ab.svc_map)) { arg->chan_list[i] = u32_encode_bits(req->channels[i]->center_freq, WMI_SCAN_CONFIG_PER_CHANNEL_MASK); /* If NL80211_SCAN_FLAG_COLOCATED_6GHZ is set in scan * flags, then scan all PSC channels in 6 GHz band and * those non-PSC channels where RNR IE is found during * the legacy 2.4/5 GHz scan. * If NL80211_SCAN_FLAG_COLOCATED_6GHZ is not set, * then all channels in 6 GHz will be scanned. */ if (req->channels[i]->band == NL80211_BAND_6GHZ && req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ && !cfg80211_channel_is_psc(req->channels[i])) arg->chan_list[i] |= WMI_SCAN_CH_FLAG_SCAN_ONLY_IF_RNR_FOUND; } else { arg->chan_list[i] = req->channels[i]->center_freq; } } } if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { arg->scan_f_add_spoofed_mac_in_probe = 1; ether_addr_copy(arg->mac_addr.addr, req->mac_addr); ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask); } /* if duration is set, default dwell times will be overwritten */ if (req->duration) { arg->dwell_time_active = req->duration; arg->dwell_time_active_2g = req->duration; arg->dwell_time_active_6g = req->duration; arg->dwell_time_passive = req->duration; arg->dwell_time_passive_6g = req->duration; arg->burst_duration = req->duration; scan_timeout = min_t(u32, arg->max_rest_time * (arg->num_chan - 1) + (req->duration + ATH11K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) * arg->num_chan, arg->max_scan_time); } else { scan_timeout = arg->max_scan_time; } /* Add a margin to account for event/command processing */ scan_timeout += ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD; ret = ath11k_start_scan(ar, arg); if (ret) { ath11k_warn(ar->ab, "failed to start hw scan: %d\n", ret); spin_lock_bh(&ar->data_lock); ar->scan.state = ATH11K_SCAN_IDLE; spin_unlock_bh(&ar->data_lock); } ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, msecs_to_jiffies(scan_timeout)); exit: if (arg) { kfree(arg->chan_list); kfree(arg->extraie.ptr); kfree(arg); } mutex_unlock(&ar->conf_mutex); if (ar->state_11d == ATH11K_11D_PREPARING) ath11k_mac_11d_scan_start(ar, arvif->vdev_id); return ret; } static void ath11k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; mutex_lock(&ar->conf_mutex); ath11k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); cancel_delayed_work_sync(&ar->scan.timeout); } static int ath11k_install_key(struct ath11k_vif *arvif, struct ieee80211_key_conf *key, enum set_key_cmd cmd, const u8 *macaddr, u32 flags) { int ret; struct ath11k *ar = arvif->ar; struct wmi_vdev_install_key_arg arg = { .vdev_id = arvif->vdev_id, .key_idx = key->keyidx, .key_len = key->keylen, .key_data = key->key, .key_flags = flags, .macaddr = macaddr, }; lockdep_assert_held(&arvif->ar->conf_mutex); reinit_completion(&ar->install_key_done); if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) return 0; if (cmd == DISABLE_KEY) { arg.key_cipher = WMI_CIPHER_NONE; arg.key_data = NULL; goto install; } switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: arg.key_cipher = WMI_CIPHER_AES_CCM; /* TODO: Re-check if flag is valid */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; break; case WLAN_CIPHER_SUITE_TKIP: arg.key_cipher = WMI_CIPHER_TKIP; arg.key_txmic_len = 8; arg.key_rxmic_len = 8; break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: arg.key_cipher = WMI_CIPHER_AES_GCM; key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; break; default: ath11k_warn(ar->ab, "cipher %d is not supported\n", key->cipher); return -EOPNOTSUPP; } if (test_bit(ATH11K_FLAG_RAW_MODE, &ar->ab->dev_flags)) key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV | IEEE80211_KEY_FLAG_RESERVE_TAILROOM; install: ret = ath11k_wmi_vdev_install_key(arvif->ar, &arg); if (ret) return ret; if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ)) return -ETIMEDOUT; return ar->install_key_status ? -EINVAL : 0; } static int ath11k_clear_peer_keys(struct ath11k_vif *arvif, const u8 *addr) { struct ath11k *ar = arvif->ar; struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; int first_errno = 0; int ret; int i; u32 flags = 0; lockdep_assert_held(&ar->conf_mutex); spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, arvif->vdev_id, addr); spin_unlock_bh(&ab->base_lock); if (!peer) return -ENOENT; for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { if (!peer->keys[i]) continue; /* key flags are not required to delete the key */ ret = ath11k_install_key(arvif, peer->keys[i], DISABLE_KEY, addr, flags); if (ret < 0 && first_errno == 0) first_errno = ret; if (ret < 0) ath11k_warn(ab, "failed to remove peer key %d: %d\n", i, ret); spin_lock_bh(&ab->base_lock); peer->keys[i] = NULL; spin_unlock_bh(&ab->base_lock); } return first_errno; } static int ath11k_set_group_keys(struct ath11k_vif *arvif) { struct ath11k *ar = arvif->ar; struct ath11k_base *ab = ar->ab; const u8 *addr = arvif->bssid; int i, ret, first_errno = 0; struct ath11k_peer *peer; spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, arvif->vdev_id, addr); spin_unlock_bh(&ab->base_lock); if (!peer) return -ENOENT; for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { struct ieee80211_key_conf *key = peer->keys[i]; if (!key || (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) continue; ret = ath11k_install_key(arvif, key, SET_KEY, addr, WMI_KEY_GROUP); if (ret < 0 && first_errno == 0) first_errno = ret; if (ret < 0) ath11k_warn(ab, "failed to set group key of idx %d for vdev %d: %d\n", i, arvif->vdev_id, ret); } return first_errno; } static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_peer *peer; struct ath11k_sta *arsta; bool is_ap_with_no_sta; const u8 *peer_addr; int ret = 0; u32 flags = 0; /* BIP needs to be done in software */ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) return 1; if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) return 1; if (key->keyidx > WMI_MAX_KEY_INDEX) return -ENOSPC; mutex_lock(&ar->conf_mutex); if (sta) peer_addr = sta->addr; else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) peer_addr = vif->bss_conf.bssid; else peer_addr = vif->addr; key->hw_key_idx = key->keyidx; /* the peer should not disappear in mid-way (unless FW goes awry) since * we already hold conf_mutex. we just make sure its there now. */ spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); /* flush the fragments cache during key (re)install to * ensure all frags in the new frag list belong to the same key. */ if (peer && sta && cmd == SET_KEY) ath11k_peer_frags_flush(ar, peer); spin_unlock_bh(&ab->base_lock); if (!peer) { if (cmd == SET_KEY) { ath11k_warn(ab, "cannot install key for non-existent peer %pM\n", peer_addr); ret = -EOPNOTSUPP; goto exit; } else { /* if the peer doesn't exist there is no key to disable * anymore */ goto exit; } } if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) - flags |= WMI_KEY_PAIRWISE; + flags = WMI_KEY_PAIRWISE; else - flags |= WMI_KEY_GROUP; + flags = WMI_KEY_GROUP; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "%s for peer %pM on vdev %d flags 0x%X, type = %d, num_sta %d\n", cmd == SET_KEY ? "SET_KEY" : "DEL_KEY", peer_addr, arvif->vdev_id, flags, arvif->vdev_type, arvif->num_stations); /* Allow group key clearing only in AP mode when no stations are * associated. There is a known race condition in firmware where * group addressed packets may be dropped if the key is cleared * and immediately set again during rekey. * * During GTK rekey, mac80211 issues a clear key (if the old key * exists) followed by an install key operation for same key * index. This causes ath11k to send two WMI commands in quick * succession: one to clear the old key and another to install the * new key in the same slot. * * Under certain conditions—especially under high load or time * sensitive scenarios, firmware may process these commands * asynchronously in a way that firmware assumes the key is * cleared whereas hardware has a valid key. This inconsistency * between hardware and firmware leads to group addressed packet * drops after rekey. * Only setting the same key again can restore a valid key in * firmware and allow packets to be transmitted. * * There is a use case where an AP can transition from Secure mode * to open mode without a vdev restart by just deleting all * associated peers and clearing key, Hence allow clear key for * that case alone. Mark arvif->reinstall_group_keys in such cases * and reinstall the same key when the first peer is added, * allowing firmware to recover from the race if it had occurred. */ is_ap_with_no_sta = (vif->type == NL80211_IFTYPE_AP && !arvif->num_stations); - if ((flags & WMI_KEY_PAIRWISE) || cmd == SET_KEY || is_ap_with_no_sta) { + if (flags == WMI_KEY_PAIRWISE || cmd == SET_KEY || is_ap_with_no_sta) { ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags); if (ret) { ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret); goto exit; } ret = ath11k_dp_peer_rx_pn_replay_config(arvif, peer_addr, cmd, key); if (ret) { ath11k_warn(ab, "failed to offload PN replay detection %d\n", ret); goto exit; } - if ((flags & WMI_KEY_GROUP) && cmd == SET_KEY && is_ap_with_no_sta) + if (flags == WMI_KEY_GROUP && cmd == SET_KEY && is_ap_with_no_sta) arvif->reinstall_group_keys = true; } spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); if (peer && cmd == SET_KEY) { peer->keys[key->keyidx] = key; if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { peer->ucast_keyidx = key->keyidx; peer->sec_type = ath11k_dp_tx_get_encrypt_type(key->cipher); } else { peer->mcast_keyidx = key->keyidx; peer->sec_type_grp = ath11k_dp_tx_get_encrypt_type(key->cipher); } } else if (peer && cmd == DISABLE_KEY) { peer->keys[key->keyidx] = NULL; if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) peer->ucast_keyidx = 0; else peer->mcast_keyidx = 0; } else if (!peer) /* impossible unless FW goes crazy */ ath11k_warn(ab, "peer %pM disappeared!\n", peer_addr); if (sta) { arsta = ath11k_sta_to_arsta(sta); switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (cmd == SET_KEY) arsta->pn_type = HAL_PN_TYPE_WPA; else arsta->pn_type = HAL_PN_TYPE_NONE; break; default: arsta->pn_type = HAL_PN_TYPE_NONE; break; } } spin_unlock_bh(&ab->base_lock); exit: mutex_unlock(&ar->conf_mutex); return ret; } static int ath11k_mac_bitrate_mask_num_ht_rates(struct ath11k *ar, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { int num_rates = 0; int i; for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) num_rates += hweight8(mask->control[band].ht_mcs[i]); return num_rates; } static int ath11k_mac_bitrate_mask_num_vht_rates(struct ath11k *ar, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { int num_rates = 0; int i; for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) num_rates += hweight16(mask->control[band].vht_mcs[i]); return num_rates; } static int ath11k_mac_bitrate_mask_num_he_rates(struct ath11k *ar, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { int num_rates = 0; int i; for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) num_rates += hweight16(mask->control[band].he_mcs[i]); return num_rates; } static int ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif, struct ieee80211_sta *sta, const struct cfg80211_bitrate_mask *mask, enum nl80211_band band) { struct ath11k *ar = arvif->ar; u8 vht_rate, nss; u32 rate_code; int ret, i; lockdep_assert_held(&ar->conf_mutex); nss = 0; for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { if (hweight16(mask->control[band].vht_mcs[i]) == 1) { nss = i + 1; vht_rate = ffs(mask->control[band].vht_mcs[i]) - 1; } } if (!nss) { ath11k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM", sta->addr); return -EINVAL; } /* Avoid updating invalid nss as fixed rate*/ if (nss > sta->deflink.rx_nss) return -EINVAL; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates", sta->addr); rate_code = ATH11K_HW_RATE_CODE(vht_rate, nss - 1, WMI_RATE_PREAMBLE_VHT); ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_PARAM_FIXED_RATE, rate_code); if (ret) ath11k_warn(ar->ab, "failed to update STA %pM Fixed Rate %d: %d\n", sta->addr, rate_code, ret); return ret; } static int ath11k_mac_set_peer_he_fixed_rate(struct ath11k_vif *arvif, struct ieee80211_sta *sta, const struct cfg80211_bitrate_mask *mask, enum nl80211_band band) { struct ath11k *ar = arvif->ar; u8 he_rate, nss; u32 rate_code; int ret, i; lockdep_assert_held(&ar->conf_mutex); nss = 0; for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) { if (hweight16(mask->control[band].he_mcs[i]) == 1) { nss = i + 1; he_rate = ffs(mask->control[band].he_mcs[i]) - 1; } } if (!nss) { ath11k_warn(ar->ab, "No single he fixed rate found to set for %pM", sta->addr); return -EINVAL; } /* Avoid updating invalid nss as fixed rate */ if (nss > sta->deflink.rx_nss) return -EINVAL; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting fixed he rate for peer %pM, device will not switch to any other selected rates", sta->addr); rate_code = ATH11K_HW_RATE_CODE(he_rate, nss - 1, WMI_RATE_PREAMBLE_HE); ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_PARAM_FIXED_RATE, rate_code); if (ret) ath11k_warn(ar->ab, "failed to update sta %pM fixed rate %d: %d\n", sta->addr, rate_code, ret); return ret; } static int ath11k_mac_set_peer_ht_fixed_rate(struct ath11k_vif *arvif, struct ieee80211_sta *sta, const struct cfg80211_bitrate_mask *mask, enum nl80211_band band) { struct ath11k *ar = arvif->ar; u8 ht_rate, nss = 0; u32 rate_code; int ret, i; lockdep_assert_held(&ar->conf_mutex); for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { if (hweight8(mask->control[band].ht_mcs[i]) == 1) { nss = i + 1; ht_rate = ffs(mask->control[band].ht_mcs[i]) - 1; } } if (!nss) { ath11k_warn(ar->ab, "No single HT Fixed rate found to set for %pM", sta->addr); return -EINVAL; } /* Avoid updating invalid nss as fixed rate*/ if (nss > sta->deflink.rx_nss) return -EINVAL; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Setting Fixed HT Rate for peer %pM. Device will not switch to any other selected rates", sta->addr); rate_code = ATH11K_HW_RATE_CODE(ht_rate, nss - 1, WMI_RATE_PREAMBLE_HT); ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_PARAM_FIXED_RATE, rate_code); if (ret) ath11k_warn(ar->ab, "failed to update STA %pM HT Fixed Rate %d: %d\n", sta->addr, rate_code, ret); return ret; } static int ath11k_station_assoc(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool reassoc) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct peer_assoc_params peer_arg; int ret = 0; struct cfg80211_chan_def def; enum nl80211_band band; struct cfg80211_bitrate_mask *mask; u8 num_ht_rates, num_vht_rates, num_he_rates; lockdep_assert_held(&ar->conf_mutex); if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return -EPERM; band = def.chan->band; mask = &arvif->bitrate_mask; ath11k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc); peer_arg.is_assoc = true; ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); if (ret) { ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", sta->addr, arvif->vdev_id, ret); return ret; } if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) { ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", sta->addr, arvif->vdev_id); return -ETIMEDOUT; } num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask); num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask); num_ht_rates = ath11k_mac_bitrate_mask_num_ht_rates(ar, band, mask); /* If single VHT/HE rate is configured (by set_bitrate_mask()), * peer_assoc will disable VHT/HE. This is now enabled by a peer specific * fixed param. * Note that all other rates and NSS will be disabled for this peer. */ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { ret = ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, band); if (ret) return ret; } else if (sta->deflink.he_cap.has_he && num_he_rates == 1) { ret = ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask, band); if (ret) return ret; } else if (sta->deflink.ht_cap.ht_supported && num_ht_rates == 1) { ret = ath11k_mac_set_peer_ht_fixed_rate(arvif, sta, mask, band); if (ret) return ret; } /* Re-assoc is run only to update supported rates for given station. It * doesn't make much sense to reconfigure the peer completely. */ if (reassoc) return 0; ret = ath11k_setup_peer_smps(ar, arvif, sta->addr, &sta->deflink.ht_cap, le16_to_cpu(sta->deflink.he_6ghz_capa.capa)); if (ret) { ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); return ret; } if (!sta->wme) { arvif->num_legacy_stations++; ret = ath11k_recalc_rtscts_prot(arvif); if (ret) return ret; } if (sta->wme && sta->uapsd_queues) { ret = ath11k_peer_assoc_qos_ap(ar, arvif, sta); if (ret) { ath11k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n", sta->addr, arvif->vdev_id, ret); return ret; } } return 0; } static int ath11k_station_disassoc(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret = 0; lockdep_assert_held(&ar->conf_mutex); if (!sta->wme) { arvif->num_legacy_stations--; ret = ath11k_recalc_rtscts_prot(arvif); if (ret) return ret; } ret = ath11k_clear_peer_keys(arvif, sta->addr); if (ret) { ath11k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } return 0; } static u32 ath11k_mac_max_nss(const u8 *ht_mcs_mask, const u16 *vht_mcs_mask, const u16 *he_mcs_mask) { return max3(ath11k_mac_max_ht_nss(ht_mcs_mask), ath11k_mac_max_vht_nss(vht_mcs_mask), ath11k_mac_max_he_nss(he_mcs_mask)); } static void ath11k_sta_rc_update_wk(struct work_struct *wk) { struct ath11k *ar; struct ath11k_vif *arvif; struct ath11k_sta *arsta; struct ieee80211_sta *sta; struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; const u16 *he_mcs_mask; u32 changed, bw, nss, smps, bw_prev; int err, num_ht_rates, num_vht_rates, num_he_rates; const struct cfg80211_bitrate_mask *mask; struct peer_assoc_params peer_arg; enum wmi_phy_mode peer_phymode; arsta = container_of(wk, struct ath11k_sta, update_wk); sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); arvif = arsta->arvif; ar = arvif->ar; if (WARN_ON(ath11k_mac_vif_chan(arvif->vif, &def))) return; band = def.chan->band; ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs; spin_lock_bh(&ar->data_lock); changed = arsta->changed; arsta->changed = 0; bw = arsta->bw; bw_prev = arsta->bw_prev; nss = arsta->nss; smps = arsta->smps; spin_unlock_bh(&ar->data_lock); mutex_lock(&ar->conf_mutex); nss = max_t(u32, 1, nss); nss = min(nss, ath11k_mac_max_nss(ht_mcs_mask, vht_mcs_mask, he_mcs_mask)); if (changed & IEEE80211_RC_BW_CHANGED) { /* Get the peer phymode */ ath11k_peer_assoc_h_phymode(ar, arvif->vif, sta, &peer_arg); peer_phymode = peer_arg.peer_phymode; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "update sta %pM peer bw %d phymode %d\n", sta->addr, bw, peer_phymode); if (bw > bw_prev) { /* BW is upgraded. In this case we send WMI_PEER_PHYMODE * followed by WMI_PEER_CHWIDTH */ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "BW upgrade for sta %pM new BW %d, old BW %d\n", sta->addr, bw, bw_prev); err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_PHYMODE, peer_phymode); if (err) { ath11k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n", sta->addr, peer_phymode, err); goto err_rc_bw_changed; } err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_CHWIDTH, bw); if (err) ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n", sta->addr, bw, err); } else { /* BW is downgraded. In this case we send WMI_PEER_CHWIDTH * followed by WMI_PEER_PHYMODE */ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "BW downgrade for sta %pM new BW %d,old BW %d\n", sta->addr, bw, bw_prev); err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_CHWIDTH, bw); if (err) { ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n", sta->addr, bw, err); goto err_rc_bw_changed; } err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_PHYMODE, peer_phymode); if (err) ath11k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n", sta->addr, peer_phymode, err); } } if (changed & IEEE80211_RC_NSS_CHANGED) { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "update sta %pM nss %d\n", sta->addr, nss); err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_NSS, nss); if (err) ath11k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n", sta->addr, nss, err); } if (changed & IEEE80211_RC_SMPS_CHANGED) { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "update sta %pM smps %d\n", sta->addr, smps); err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_MIMO_PS_STATE, smps); if (err) ath11k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n", sta->addr, smps, err); } if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { mask = &arvif->bitrate_mask; num_ht_rates = ath11k_mac_bitrate_mask_num_ht_rates(ar, band, mask); num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask); num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask); /* Peer_assoc_prepare will reject vht rates in * bitrate_mask if its not available in range format and * sets vht tx_rateset as unsupported. So multiple VHT MCS * setting(eg. MCS 4,5,6) per peer is not supported here. * But, Single rate in VHT mask can be set as per-peer * fixed rate. But even if any HT rates are configured in * the bitrate mask, device will not switch to those rates * when per-peer Fixed rate is set. * TODO: Check RATEMASK_CMDID to support auto rates selection * across HT/VHT and for multiple VHT MCS support. */ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, band); } else if (sta->deflink.he_cap.has_he && num_he_rates == 1) { ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask, band); } else if (sta->deflink.ht_cap.ht_supported && num_ht_rates == 1) { ath11k_mac_set_peer_ht_fixed_rate(arvif, sta, mask, band); } else { /* If the peer is non-VHT/HE or no fixed VHT/HE rate * is provided in the new bitrate mask we set the * other rates using peer_assoc command. Also clear * the peer fixed rate settings as it has higher proprity * than peer assoc */ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_PARAM_FIXED_RATE, WMI_FIXED_RATE_NONE); if (err) ath11k_warn(ar->ab, "failed to disable peer fixed rate for sta %pM: %d\n", sta->addr, err); ath11k_peer_assoc_prepare(ar, arvif->vif, sta, &peer_arg, true); peer_arg.is_assoc = false; err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); if (err) ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", sta->addr, arvif->vdev_id, err); if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", sta->addr, arvif->vdev_id); } } err_rc_bw_changed: mutex_unlock(&ar->conf_mutex); } static void ath11k_sta_set_4addr_wk(struct work_struct *wk) { struct ath11k *ar; struct ath11k_vif *arvif; struct ath11k_sta *arsta; struct ieee80211_sta *sta; int ret = 0; arsta = container_of(wk, struct ath11k_sta, set_4addr_wk); sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); arvif = arsta->arvif; ar = arvif->ar; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting USE_4ADDR for peer %pM\n", sta->addr); ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_USE_4ADDR, 1); if (ret) ath11k_warn(ar->ab, "failed to set peer %pM 4addr capability: %d\n", sta->addr, ret); } static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif, struct ieee80211_sta *sta) { struct ath11k *ar = arvif->ar; lockdep_assert_held(&ar->conf_mutex); if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) return 0; if (ar->num_stations >= ar->max_num_stations) return -ENOBUFS; ar->num_stations++; arvif->num_stations++; return 0; } static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif, struct ieee80211_sta *sta) { struct ath11k *ar = arvif->ar; lockdep_assert_held(&ar->conf_mutex); if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) return; ar->num_stations--; arvif->num_stations--; } static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar, struct ieee80211_sta *sta) { u32 bw = WMI_PEER_CHWIDTH_20MHZ; switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_20: bw = WMI_PEER_CHWIDTH_20MHZ; break; case IEEE80211_STA_RX_BW_40: bw = WMI_PEER_CHWIDTH_40MHZ; break; case IEEE80211_STA_RX_BW_80: bw = WMI_PEER_CHWIDTH_80MHZ; break; case IEEE80211_STA_RX_BW_160: bw = WMI_PEER_CHWIDTH_160MHZ; break; default: ath11k_warn(ar->ab, "Invalid bandwidth %d for %pM\n", sta->deflink.bandwidth, sta->addr); bw = WMI_PEER_CHWIDTH_20MHZ; break; } return bw; } static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret = 0; s16 txpwr; if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) { txpwr = 0; } else { txpwr = sta->deflink.txpwr.power; if (!txpwr) return -EINVAL; } if (txpwr > ATH11K_TX_POWER_MAX_VAL || txpwr < ATH11K_TX_POWER_MIN_VAL) return -EINVAL; mutex_lock(&ar->conf_mutex); ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_USE_FIXED_PWR, txpwr); if (ret) { ath11k_warn(ar->ab, "failed to set tx power for station ret: %d\n", ret); goto out; } out: mutex_unlock(&ar->conf_mutex); return ret; } static void ath11k_mac_op_sta_set_4addr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enabled) { struct ath11k *ar = hw->priv; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); if (enabled && !arsta->use_4addr_set) { ieee80211_queue_work(ar->hw, &arsta->set_4addr_wk); arsta->use_4addr_set = true; } } static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_link_sta *link_sta, u32 changed) { struct ieee80211_sta *sta = link_sta->sta; struct ath11k *ar = hw->priv; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_peer *peer; u32 bw, smps; spin_lock_bh(&ar->ab->base_lock); peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); if (!peer) { spin_unlock_bh(&ar->ab->base_lock); ath11k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n", sta->addr, arvif->vdev_id); return; } spin_unlock_bh(&ar->ab->base_lock); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "sta rc update for %pM changed %08x bw %d nss %d smps %d\n", sta->addr, changed, sta->deflink.bandwidth, sta->deflink.rx_nss, sta->deflink.smps_mode); spin_lock_bh(&ar->data_lock); if (changed & IEEE80211_RC_BW_CHANGED) { bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta); arsta->bw_prev = arsta->bw; arsta->bw = bw; } if (changed & IEEE80211_RC_NSS_CHANGED) arsta->nss = sta->deflink.rx_nss; if (changed & IEEE80211_RC_SMPS_CHANGED) { smps = WMI_PEER_SMPS_PS_NONE; switch (sta->deflink.smps_mode) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_OFF: smps = WMI_PEER_SMPS_PS_NONE; break; case IEEE80211_SMPS_STATIC: smps = WMI_PEER_SMPS_STATIC; break; case IEEE80211_SMPS_DYNAMIC: smps = WMI_PEER_SMPS_DYNAMIC; break; default: ath11k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n", sta->deflink.smps_mode, sta->addr); smps = WMI_PEER_SMPS_PS_NONE; break; } arsta->smps = smps; } arsta->changed |= changed; spin_unlock_bh(&ar->data_lock); ieee80211_queue_work(hw, &arsta->update_wk); } static int ath11k_conf_tx_uapsd(struct ath11k *ar, struct ieee80211_vif *vif, u16 ac, bool enable) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); u32 value = 0; int ret = 0; if (arvif->vdev_type != WMI_VDEV_TYPE_STA) return 0; switch (ac) { case IEEE80211_AC_VO: value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; break; case IEEE80211_AC_VI: value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; break; case IEEE80211_AC_BE: value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; break; case IEEE80211_AC_BK: value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; break; } if (enable) arvif->u.sta.uapsd |= value; else arvif->u.sta.uapsd &= ~value; ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, WMI_STA_PS_PARAM_UAPSD, arvif->u.sta.uapsd); if (ret) { ath11k_warn(ar->ab, "could not set uapsd params %d\n", ret); goto exit; } if (arvif->u.sta.uapsd) value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; else value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, WMI_STA_PS_PARAM_RX_WAKE_POLICY, value); if (ret) ath11k_warn(ar->ab, "could not set rx wake param %d\n", ret); exit: return ret; } static int ath11k_mac_op_conf_tx_mu_edca(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 ac, const struct ieee80211_tx_queue_params *params) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k *ar = hw->priv; struct wmi_wmm_params_arg *p; int ret; switch (ac) { case IEEE80211_AC_VO: p = &arvif->muedca_params.ac_vo; break; case IEEE80211_AC_VI: p = &arvif->muedca_params.ac_vi; break; case IEEE80211_AC_BE: p = &arvif->muedca_params.ac_be; break; case IEEE80211_AC_BK: p = &arvif->muedca_params.ac_bk; break; default: ath11k_warn(ar->ab, "error ac: %d", ac); return -EINVAL; } p->cwmin = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(3, 0)); p->cwmax = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(7, 4)); p->aifs = u8_get_bits(params->mu_edca_param_rec.aifsn, GENMASK(3, 0)); p->txop = params->mu_edca_param_rec.mu_edca_timer; ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id, &arvif->muedca_params, WMI_WMM_PARAM_TYPE_11AX_MU_EDCA); return ret; } static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 ac, const struct ieee80211_tx_queue_params *params) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct wmi_wmm_params_arg *p = NULL; int ret; mutex_lock(&ar->conf_mutex); switch (ac) { case IEEE80211_AC_VO: p = &arvif->wmm_params.ac_vo; break; case IEEE80211_AC_VI: p = &arvif->wmm_params.ac_vi; break; case IEEE80211_AC_BE: p = &arvif->wmm_params.ac_be; break; case IEEE80211_AC_BK: p = &arvif->wmm_params.ac_bk; break; } if (WARN_ON(!p)) { ret = -EINVAL; goto exit; } p->cwmin = params->cw_min; p->cwmax = params->cw_max; p->aifs = params->aifs; p->txop = params->txop; ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id, &arvif->wmm_params, WMI_WMM_PARAM_TYPE_LEGACY); if (ret) { ath11k_warn(ar->ab, "failed to set wmm params: %d\n", ret); goto exit; } if (params->mu_edca) { ret = ath11k_mac_op_conf_tx_mu_edca(hw, vif, link_id, ac, params); if (ret) { ath11k_warn(ar->ab, "failed to set mu_edca params: %d\n", ret); goto exit; } } ret = ath11k_conf_tx_uapsd(ar, vif, ac, params->uapsd); if (ret) ath11k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret); exit: mutex_unlock(&ar->conf_mutex); return ret; } static struct ieee80211_sta_ht_cap ath11k_create_ht_cap(struct ath11k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask) { int i; struct ieee80211_sta_ht_cap ht_cap = {}; u32 ar_vht_cap = ar->pdev->cap.vht_cap; if (!(ar_ht_cap & WMI_HT_CAP_ENABLED)) return ht_cap; ht_cap.ht_supported = 1; ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE; ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT; if (ar_ht_cap & WMI_HT_CAP_HT20_SGI) ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; if (ar_ht_cap & WMI_HT_CAP_HT40_SGI) ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; if (ar_ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) { u32 smps; smps = WLAN_HT_CAP_SM_PS_DYNAMIC; smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; ht_cap.cap |= smps; } if (ar_ht_cap & WMI_HT_CAP_TX_STBC) ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; if (ar_ht_cap & WMI_HT_CAP_RX_STBC) { u32 stbc; stbc = ar_ht_cap; stbc &= WMI_HT_CAP_RX_STBC; stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; stbc &= IEEE80211_HT_CAP_RX_STBC; ht_cap.cap |= stbc; } if (ar_ht_cap & WMI_HT_CAP_RX_LDPC) ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; if (ar_ht_cap & WMI_HT_CAP_L_SIG_TXOP_PROT) ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; if (ar_vht_cap & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; for (i = 0; i < ar->num_rx_chains; i++) { if (rate_cap_rx_chainmask & BIT(i)) ht_cap.mcs.rx_mask[i] = 0xFF; } ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; return ht_cap; } static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif) { u32 value = 0; struct ath11k *ar = arvif->ar; int nsts; int sound_dim; u32 vht_cap = ar->pdev->cap.vht_cap; u32 vdev_param = WMI_VDEV_PARAM_TXBF; if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) { nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); } if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { sound_dim = vht_cap & IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; if (sound_dim > (ar->num_tx_chains - 1)) sound_dim = ar->num_tx_chains - 1; value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET); } if (!value) return 0; if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) { value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) && arvif->vdev_type == WMI_VDEV_TYPE_AP) value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; } /* TODO: SUBFEE not validated in HK, disable here until validated? */ if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) { value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) && arvif->vdev_type == WMI_VDEV_TYPE_STA) value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; } return ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, value); } static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap) { bool subfer, subfee; int sound_dim = 0, nsts = 0; subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)); subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)); if (ar->num_tx_chains < 2) { *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); subfer = false; } if (ar->num_rx_chains < 2) { *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE); subfee = false; } /* If SU Beaformer is not set, then disable MU Beamformer Capability */ if (!subfer) *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE); /* If SU Beaformee is not set, then disable MU Beamformee Capability */ if (!subfee) *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); sound_dim = (*vht_cap & IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK); sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; *vht_cap &= ~IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; nsts = (*vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK); nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; *vht_cap &= ~IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; /* Enable Sounding Dimension Field only if SU BF is enabled */ if (subfer) { if (sound_dim > (ar->num_tx_chains - 1)) sound_dim = ar->num_tx_chains - 1; sound_dim <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; *vht_cap |= sound_dim; } /* Enable Beamformee STS Field only if SU BF is enabled */ if (subfee) { nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; *vht_cap |= nsts; } } static struct ieee80211_sta_vht_cap ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask, u32 rate_cap_rx_chainmask) { struct ieee80211_sta_vht_cap vht_cap = {}; u16 txmcs_map, rxmcs_map; int i; vht_cap.vht_supported = 1; vht_cap.cap = ar->pdev->cap.vht_cap; if (ar->pdev->cap.nss_ratio_enabled) vht_cap.vht_mcs.tx_highest |= cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); ath11k_set_vht_txbf_cap(ar, &vht_cap.cap); rxmcs_map = 0; txmcs_map = 0; for (i = 0; i < 8; i++) { if (i < ar->num_tx_chains && rate_cap_tx_chainmask & BIT(i)) txmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); else txmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); if (i < ar->num_rx_chains && rate_cap_rx_chainmask & BIT(i)) rxmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); else rxmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); } if (rate_cap_tx_chainmask <= 1) vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map); vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map); return vht_cap; } static void ath11k_mac_setup_ht_vht_cap(struct ath11k *ar, struct ath11k_pdev_cap *cap, u32 *ht_cap_info) { struct ieee80211_supported_band *band; u32 rate_cap_tx_chainmask; u32 rate_cap_rx_chainmask; u32 ht_cap; rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift; rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift; if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) { band = &ar->mac.sbands[NL80211_BAND_2GHZ]; ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info; if (ht_cap_info) *ht_cap_info = ht_cap; band->ht_cap = ath11k_create_ht_cap(ar, ht_cap, rate_cap_rx_chainmask); } if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && (ar->ab->hw_params.single_pdev_only || !ar->supports_6ghz)) { band = &ar->mac.sbands[NL80211_BAND_5GHZ]; ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info; if (ht_cap_info) *ht_cap_info = ht_cap; band->ht_cap = ath11k_create_ht_cap(ar, ht_cap, rate_cap_rx_chainmask); band->vht_cap = ath11k_create_vht_cap(ar, rate_cap_tx_chainmask, rate_cap_rx_chainmask); } } static int ath11k_check_chain_mask(struct ath11k *ar, u32 ant, bool is_tx_ant) { /* TODO: Check the request chainmask against the supported * chainmask table which is advertised in extented_service_ready event */ return 0; } static void ath11k_gen_ppe_thresh(struct ath11k_ppe_threshold *fw_ppet, u8 *he_ppet) { int nss, ru; u8 bit = 7; he_ppet[0] = fw_ppet->numss_m1 & IEEE80211_PPE_THRES_NSS_MASK; he_ppet[0] |= (fw_ppet->ru_bit_mask << IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) & IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK; for (nss = 0; nss <= fw_ppet->numss_m1; nss++) { for (ru = 0; ru < 4; ru++) { u8 val; int i; if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0) continue; val = (fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) & 0x3f; val = ((val >> 3) & 0x7) | ((val & 0x7) << 3); for (i = 5; i >= 0; i--) { he_ppet[bit / 8] |= ((val >> i) & 0x1) << ((bit % 8)); bit++; } } } } static void ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem) { u8 m; m = IEEE80211_HE_MAC_CAP0_TWT_RES | IEEE80211_HE_MAC_CAP0_TWT_REQ; he_cap_elem->mac_cap_info[0] &= ~m; m = IEEE80211_HE_MAC_CAP2_TRS | IEEE80211_HE_MAC_CAP2_BCAST_TWT | IEEE80211_HE_MAC_CAP2_MU_CASCADING; he_cap_elem->mac_cap_info[2] &= ~m; m = IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED | IEEE80211_HE_MAC_CAP2_BCAST_TWT | IEEE80211_HE_MAC_CAP2_MU_CASCADING; he_cap_elem->mac_cap_info[3] &= ~m; m = IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG | IEEE80211_HE_MAC_CAP4_BQR; he_cap_elem->mac_cap_info[4] &= ~m; m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION | IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU | IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING | IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX; he_cap_elem->mac_cap_info[5] &= ~m; m = IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO; he_cap_elem->phy_cap_info[2] &= ~m; m = IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU | IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK | IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK; he_cap_elem->phy_cap_info[3] &= ~m; m = IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; he_cap_elem->phy_cap_info[4] &= ~m; m = IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK; he_cap_elem->phy_cap_info[5] &= ~m; m = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB | IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB | IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO; he_cap_elem->phy_cap_info[6] &= ~m; m = IEEE80211_HE_PHY_CAP7_PSR_BASED_SR | IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP | IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ | IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ; he_cap_elem->phy_cap_info[7] &= ~m; m = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU; he_cap_elem->phy_cap_info[8] &= ~m; m = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM | IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK | IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU | IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB; he_cap_elem->phy_cap_info[9] &= ~m; } static __le16 ath11k_mac_setup_he_6ghz_cap(struct ath11k_pdev_cap *pcap, struct ath11k_band_cap *bcap) { u8 val; bcap->he_6ghz_capa = IEEE80211_HT_MPDU_DENSITY_NONE; if (bcap->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) bcap->he_6ghz_capa |= FIELD_PREP(IEEE80211_HE_6GHZ_CAP_SM_PS, WLAN_HT_CAP_SM_PS_DYNAMIC); else bcap->he_6ghz_capa |= FIELD_PREP(IEEE80211_HE_6GHZ_CAP_SM_PS, WLAN_HT_CAP_SM_PS_DISABLED); val = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, pcap->vht_cap); bcap->he_6ghz_capa |= FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP, val); val = FIELD_GET(IEEE80211_VHT_CAP_MAX_MPDU_MASK, pcap->vht_cap); bcap->he_6ghz_capa |= FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN, val); if (pcap->vht_cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN) bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS; if (pcap->vht_cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN) bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS; return cpu_to_le16(bcap->he_6ghz_capa); } static void ath11k_mac_set_hemcsmap(struct ath11k *ar, struct ath11k_pdev_cap *cap, struct ieee80211_sta_he_cap *he_cap, int band) { u16 txmcs_map, rxmcs_map; u32 i; rxmcs_map = 0; txmcs_map = 0; for (i = 0; i < 8; i++) { if (i < ar->num_tx_chains && (ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift) & BIT(i)) txmcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2); else txmcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2); if (i < ar->num_rx_chains && (ar->cfg_rx_chainmask >> cap->tx_chain_mask_shift) & BIT(i)) rxmcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2); else rxmcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2); } he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(rxmcs_map & 0xffff); he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(txmcs_map & 0xffff); he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(rxmcs_map & 0xffff); he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(txmcs_map & 0xffff); he_cap->he_mcs_nss_supp.rx_mcs_80p80 = cpu_to_le16(rxmcs_map & 0xffff); he_cap->he_mcs_nss_supp.tx_mcs_80p80 = cpu_to_le16(txmcs_map & 0xffff); } static int ath11k_mac_copy_he_cap(struct ath11k *ar, struct ath11k_pdev_cap *cap, struct ieee80211_sband_iftype_data *data, int band) { int i, idx = 0; for (i = 0; i < NUM_NL80211_IFTYPES; i++) { struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap; struct ath11k_band_cap *band_cap = &cap->band[band]; struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem; switch (i) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: break; default: continue; } data[idx].types_mask = BIT(i); he_cap->has_he = true; memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info, sizeof(he_cap_elem->mac_cap_info)); memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info, sizeof(he_cap_elem->phy_cap_info)); he_cap_elem->mac_cap_info[1] &= IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK; he_cap_elem->phy_cap_info[5] &= ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK; he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1; switch (i) { case NL80211_IFTYPE_AP: he_cap_elem->phy_cap_info[3] &= ~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK; he_cap_elem->phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; break; case NL80211_IFTYPE_STATION: he_cap_elem->mac_cap_info[0] &= ~IEEE80211_HE_MAC_CAP0_TWT_RES; he_cap_elem->mac_cap_info[0] |= IEEE80211_HE_MAC_CAP0_TWT_REQ; he_cap_elem->phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; break; case NL80211_IFTYPE_MESH_POINT: ath11k_mac_filter_he_cap_mesh(he_cap_elem); break; } ath11k_mac_set_hemcsmap(ar, cap, he_cap, band); memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres)); if (he_cap_elem->phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) ath11k_gen_ppe_thresh(&band_cap->he_ppet, he_cap->ppe_thres); if (band == NL80211_BAND_6GHZ) { data[idx].he_6ghz_capa.capa = ath11k_mac_setup_he_6ghz_cap(cap, band_cap); } idx++; } return idx; } static void ath11k_mac_setup_he_cap(struct ath11k *ar, struct ath11k_pdev_cap *cap) { struct ieee80211_supported_band *band; int count; if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) { count = ath11k_mac_copy_he_cap(ar, cap, ar->mac.iftype[NL80211_BAND_2GHZ], NL80211_BAND_2GHZ); band = &ar->mac.sbands[NL80211_BAND_2GHZ]; _ieee80211_set_sband_iftype_data(band, ar->mac.iftype[NL80211_BAND_2GHZ], count); } if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) { count = ath11k_mac_copy_he_cap(ar, cap, ar->mac.iftype[NL80211_BAND_5GHZ], NL80211_BAND_5GHZ); band = &ar->mac.sbands[NL80211_BAND_5GHZ]; _ieee80211_set_sband_iftype_data(band, ar->mac.iftype[NL80211_BAND_5GHZ], count); } if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && ar->supports_6ghz) { count = ath11k_mac_copy_he_cap(ar, cap, ar->mac.iftype[NL80211_BAND_6GHZ], NL80211_BAND_6GHZ); band = &ar->mac.sbands[NL80211_BAND_6GHZ]; _ieee80211_set_sband_iftype_data(band, ar->mac.iftype[NL80211_BAND_6GHZ], count); } } static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant) { int ret; lockdep_assert_held(&ar->conf_mutex); if (ath11k_check_chain_mask(ar, tx_ant, true)) return -EINVAL; if (ath11k_check_chain_mask(ar, rx_ant, false)) return -EINVAL; ar->cfg_tx_chainmask = tx_ant; ar->cfg_rx_chainmask = rx_ant; if (ar->state != ATH11K_STATE_ON && ar->state != ATH11K_STATE_RESTARTED) return 0; ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK, tx_ant, ar->pdev->pdev_id); if (ret) { ath11k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n", ret, tx_ant); return ret; } ar->num_tx_chains = get_num_chains(tx_ant); ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RX_CHAIN_MASK, rx_ant, ar->pdev->pdev_id); if (ret) { ath11k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n", ret, rx_ant); return ret; } ar->num_rx_chains = get_num_chains(rx_ant); /* Reload HT/VHT/HE capability */ ath11k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL); ath11k_mac_setup_he_cap(ar, &ar->pdev->cap); return 0; } static void ath11k_mgmt_over_wmi_tx_drop(struct ath11k *ar, struct sk_buff *skb) { int num_mgmt; ieee80211_free_txskb(ar->hw, skb); num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); if (num_mgmt < 0) WARN_ON_ONCE(1); if (!num_mgmt) wake_up(&ar->txmgmt_empty_waitq); } static void ath11k_mac_tx_mgmt_free(struct ath11k *ar, int buf_id) { struct sk_buff *msdu; struct ieee80211_tx_info *info; spin_lock_bh(&ar->txmgmt_idr_lock); msdu = idr_remove(&ar->txmgmt_idr, buf_id); spin_unlock_bh(&ar->txmgmt_idr_lock); if (!msdu) return; dma_unmap_single(ar->ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, DMA_TO_DEVICE); info = IEEE80211_SKB_CB(msdu); memset(&info->status, 0, sizeof(info->status)); ath11k_mgmt_over_wmi_tx_drop(ar, msdu); } int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) { struct ath11k *ar = ctx; ath11k_mac_tx_mgmt_free(ar, buf_id); return 0; } static int ath11k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx) { struct ieee80211_vif *vif = ctx; struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb); struct ath11k *ar = skb_cb->ar; if (skb_cb->vif == vif) ath11k_mac_tx_mgmt_free(ar, buf_id); return 0; } static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif, struct sk_buff *skb) { struct ath11k_base *ab = ar->ab; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); struct ieee80211_tx_info *info; enum hal_encrypt_type enctype; unsigned int mic_len; dma_addr_t paddr; int buf_id; int ret; ATH11K_SKB_CB(skb)->ar = ar; spin_lock_bh(&ar->txmgmt_idr_lock); buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0, ATH11K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC); spin_unlock_bh(&ar->txmgmt_idr_lock); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "tx mgmt frame, buf id %d\n", buf_id); if (buf_id < 0) return -ENOSPC; info = IEEE80211_SKB_CB(skb); if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) { if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { if (!(skb_cb->flags & ATH11K_SKB_CIPHER_SET)) ath11k_warn(ab, "WMI management tx frame without ATH11K_SKB_CIPHER_SET"); enctype = ath11k_dp_tx_get_encrypt_type(skb_cb->cipher); mic_len = ath11k_dp_rx_crypto_mic_len(ar, enctype); skb_put(skb, mic_len); } } paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(ab->dev, paddr)) { ath11k_warn(ab, "failed to DMA map mgmt Tx buffer\n"); ret = -EIO; goto err_free_idr; } ATH11K_SKB_CB(skb)->paddr = paddr; ret = ath11k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb); if (ret) { ath11k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret); goto err_unmap_buf; } return 0; err_unmap_buf: dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len, DMA_TO_DEVICE); err_free_idr: spin_lock_bh(&ar->txmgmt_idr_lock); idr_remove(&ar->txmgmt_idr, buf_id); spin_unlock_bh(&ar->txmgmt_idr_lock); return ret; } static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar) { struct sk_buff *skb; while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) ath11k_mgmt_over_wmi_tx_drop(ar, skb); } static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) { struct ath11k *ar = container_of(work, struct ath11k, wmi_mgmt_tx_work); struct ath11k_skb_cb *skb_cb; struct ath11k_vif *arvif; struct sk_buff *skb; int ret; while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) { skb_cb = ATH11K_SKB_CB(skb); if (!skb_cb->vif) { ath11k_warn(ar->ab, "no vif found for mgmt frame\n"); ath11k_mgmt_over_wmi_tx_drop(ar, skb); continue; } arvif = ath11k_vif_to_arvif(skb_cb->vif); mutex_lock(&ar->conf_mutex); if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) { ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb); if (ret) { ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n", arvif->vdev_id, ret); ath11k_mgmt_over_wmi_tx_drop(ar, skb); } else { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "tx mgmt frame, vdev_id %d\n", arvif->vdev_id); } } else { ath11k_warn(ar->ab, "dropping mgmt frame for vdev %d, is_started %d\n", arvif->vdev_id, arvif->is_started); ath11k_mgmt_over_wmi_tx_drop(ar, skb); } mutex_unlock(&ar->conf_mutex); } } static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb, bool is_prb_rsp) { struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) return -ESHUTDOWN; /* Drop probe response packets when the pending management tx * count has reached a certain threshold, so as to prioritize * other mgmt packets like auth and assoc to be sent on time * for establishing successful connections. */ if (is_prb_rsp && atomic_read(&ar->num_pending_mgmt_tx) > ATH11K_PRB_RSP_DROP_THRESHOLD) { ath11k_warn(ar->ab, "dropping probe response as pending queue is almost full\n"); return -ENOSPC; } if (skb_queue_len_lockless(q) >= ATH11K_TX_MGMT_NUM_PENDING_MAX) { ath11k_warn(ar->ab, "mgmt tx queue is full\n"); return -ENOSPC; } skb_queue_tail(q, skb); atomic_inc(&ar->num_pending_mgmt_tx); queue_work(ar->ab->workqueue_aux, &ar->wmi_mgmt_tx_work); return 0; } static void ath11k_mac_op_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); struct ath11k *ar = hw->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_key_conf *key = info->control.hw_key; struct ath11k_sta *arsta = NULL; u32 info_flags = info->flags; bool is_prb_rsp; int ret; memset(skb_cb, 0, sizeof(*skb_cb)); skb_cb->vif = vif; if (key) { skb_cb->cipher = key->cipher; skb_cb->flags |= ATH11K_SKB_CIPHER_SET; } if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { skb_cb->flags |= ATH11K_SKB_HW_80211_ENCAP; } else if (ieee80211_is_mgmt(hdr->frame_control)) { is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control); ret = ath11k_mac_mgmt_tx(ar, skb, is_prb_rsp); if (ret) { ath11k_warn(ar->ab, "failed to queue management frame %d\n", ret); ieee80211_free_txskb(ar->hw, skb); } return; } if (control->sta) arsta = ath11k_sta_to_arsta(control->sta); ret = ath11k_dp_tx(ar, arvif, arsta, skb); if (unlikely(ret)) { ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret); ieee80211_free_txskb(ar->hw, skb); } } void ath11k_mac_drain_tx(struct ath11k *ar) { /* make sure rcu-protected mac80211 tx path itself is drained */ synchronize_net(); cancel_work_sync(&ar->wmi_mgmt_tx_work); ath11k_mgmt_over_wmi_tx_purge(ar); } static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable) { struct htt_rx_ring_tlv_filter tlv_filter = {}; struct ath11k_base *ab = ar->ab; int i, ret = 0; u32 ring_id; if (enable) { tlv_filter = ath11k_mac_mon_status_filter_default; if (ath11k_debugfs_rx_filter(ar)) tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar); } for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id + i, HAL_RXDMA_MONITOR_STATUS, DP_RX_BUFFER_SIZE, &tlv_filter); } if (enable && !ar->ab->hw_params.rxdma1_enable) mod_timer(&ar->ab->mon_reap_timer, jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); return ret; } static void ath11k_mac_wait_reconfigure(struct ath11k_base *ab) { int recovery_start_count; if (!ab->is_reset) return; recovery_start_count = atomic_inc_return(&ab->recovery_start_count); ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery start count %d\n", recovery_start_count); if (recovery_start_count == ab->num_radios) { complete(&ab->recovery_start); ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery started success\n"); } ath11k_dbg(ab, ATH11K_DBG_MAC, "waiting reconfigure...\n"); wait_for_completion_timeout(&ab->reconfigure_complete, ATH11K_RECONFIGURE_TIMEOUT_HZ); } static int ath11k_mac_op_start(struct ieee80211_hw *hw) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_pdev *pdev = ar->pdev; int ret; if (ath11k_ftm_mode) { ath11k_warn(ab, "mac operations not supported in factory test mode\n"); return -EOPNOTSUPP; } ath11k_mac_drain_tx(ar); mutex_lock(&ar->conf_mutex); switch (ar->state) { case ATH11K_STATE_OFF: ar->state = ATH11K_STATE_ON; break; case ATH11K_STATE_RESTARTING: ar->state = ATH11K_STATE_RESTARTED; ath11k_mac_wait_reconfigure(ab); break; case ATH11K_STATE_RESTARTED: case ATH11K_STATE_WEDGED: case ATH11K_STATE_ON: case ATH11K_STATE_FTM: WARN_ON(1); ret = -EINVAL; goto err; } ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1, pdev->pdev_id); if (ret) { ath11k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret); goto err; } ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1, pdev->pdev_id); if (ret) { ath11k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret); goto err; } if (test_bit(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi->wmi_ab->svc_map)) { ret = ath11k_wmi_scan_prob_req_oui(ar, ar->mac_addr); if (ret) { ath11k_err(ab, "failed to set prob req oui: %i\n", ret); goto err; } } ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE, 0, pdev->pdev_id); if (ret) { ath11k_err(ab, "failed to set ac override for ARP: %d\n", ret); goto err; } ret = ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(ar, pdev->pdev_id); if (ret) { ath11k_err(ab, "failed to offload radar detection: %d\n", ret); goto err; } ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar, HTT_PPDU_STATS_TAG_DEFAULT); if (ret) { ath11k_err(ab, "failed to req ppdu stats: %d\n", ret); goto err; } ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_MESH_MCAST_ENABLE, 1, pdev->pdev_id); if (ret) { ath11k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret); goto err; } __ath11k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); /* TODO: Do we need to enable ANI? */ ath11k_reg_update_chan_list(ar, false); ar->num_started_vdevs = 0; ar->num_created_vdevs = 0; ar->num_peers = 0; ar->allocated_vdev_map = 0; /* Configure monitor status ring with default rx_filter to get rx status * such as rssi, rx_duration. */ ret = ath11k_mac_config_mon_status_default(ar, true); if (ret) { ath11k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n", ret); goto err; } /* Configure the hash seed for hash based reo dest ring selection */ ath11k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id); /* allow device to enter IMPS */ if (ab->hw_params.idle_ps) { ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG, 1, pdev->pdev_id); if (ret) { ath11k_err(ab, "failed to enable idle ps: %d\n", ret); goto err; } } mutex_unlock(&ar->conf_mutex); rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], &ab->pdevs[ar->pdev_idx]); return 0; err: ar->state = ATH11K_STATE_OFF; mutex_unlock(&ar->conf_mutex); return ret; } static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend) { struct ath11k *ar = hw->priv; struct htt_ppdu_stats_info *ppdu_stats, *tmp; struct scan_chan_list_params *params; int ret; ath11k_mac_drain_tx(ar); mutex_lock(&ar->conf_mutex); ret = ath11k_mac_config_mon_status_default(ar, false); if (ret) ath11k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n", ret); clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags); ar->state = ATH11K_STATE_OFF; mutex_unlock(&ar->conf_mutex); cancel_delayed_work_sync(&ar->scan.timeout); cancel_work_sync(&ar->channel_update_work); cancel_work_sync(&ar->regd_update_work); cancel_work_sync(&ar->ab->update_11d_work); if (ar->state_11d == ATH11K_11D_PREPARING) { ar->state_11d = ATH11K_11D_IDLE; complete(&ar->completed_11d_scan); } spin_lock_bh(&ar->data_lock); list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) { list_del(&ppdu_stats->list); kfree(ppdu_stats); } while ((params = list_first_entry_or_null(&ar->channel_update_queue, struct scan_chan_list_params, list))) { list_del(¶ms->list); kfree(params); } spin_unlock_bh(&ar->data_lock); rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL); synchronize_rcu(); atomic_set(&ar->num_pending_mgmt_tx, 0); } static int ath11k_mac_setup_vdev_params_mbssid(struct ath11k_vif *arvif, u32 *flags, u32 *tx_vdev_id) { struct ath11k *ar = arvif->ar; struct ath11k_vif *tx_arvif; *tx_vdev_id = 0; tx_arvif = ath11k_mac_get_tx_arvif(arvif); if (!tx_arvif) { *flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP; return 0; } if (arvif->vif->bss_conf.nontransmitted) { if (ar->hw->wiphy != tx_arvif->ar->hw->wiphy) return -EINVAL; *flags = WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP; *tx_vdev_id = tx_arvif->vdev_id; } else if (tx_arvif == arvif) { *flags = WMI_HOST_VDEV_FLAGS_TRANSMIT_AP; } else { return -EINVAL; } if (arvif->vif->bss_conf.ema_ap) *flags |= WMI_HOST_VDEV_FLAGS_EMA_MODE; return 0; } static int ath11k_mac_setup_vdev_create_params(struct ath11k_vif *arvif, struct vdev_create_params *params) { struct ath11k *ar = arvif->ar; struct ath11k_pdev *pdev = ar->pdev; int ret; params->if_id = arvif->vdev_id; params->type = arvif->vdev_type; params->subtype = arvif->vdev_subtype; params->pdev_id = pdev->pdev_id; params->mbssid_flags = 0; params->mbssid_tx_vdev_id = 0; if (!test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT, ar->ab->wmi_ab.svc_map)) { ret = ath11k_mac_setup_vdev_params_mbssid(arvif, ¶ms->mbssid_flags, ¶ms->mbssid_tx_vdev_id); if (ret) return ret; } if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) { params->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains; params->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains; } if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) { params->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains; params->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains; } if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP && ar->supports_6ghz) { params->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains; params->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains; } return 0; } static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); u32 param_id, param_value; int ret; param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE; if (ath11k_frame_mode != ATH11K_HW_TXRX_ETHERNET || (vif->type != NL80211_IFTYPE_STATION && vif->type != NL80211_IFTYPE_AP)) vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED | IEEE80211_OFFLOAD_DECAP_ENABLED); if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) param_value = ATH11K_HW_TXRX_ETHERNET; else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) param_value = ATH11K_HW_TXRX_RAW; else param_value = ATH11K_HW_TXRX_NATIVE_WIFI; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, param_value); if (ret) { ath11k_warn(ab, "failed to set vdev %d tx encap mode: %d\n", arvif->vdev_id, ret); vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; } param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE; if (vif->offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED) param_value = ATH11K_HW_TXRX_ETHERNET; else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) param_value = ATH11K_HW_TXRX_RAW; else param_value = ATH11K_HW_TXRX_NATIVE_WIFI; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, param_value); if (ret) { ath11k_warn(ab, "failed to set vdev %d rx decap mode: %d\n", arvif->vdev_id, ret); vif->offload_flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED; } } static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; struct ath11k_vif *arvif; int i; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; list_for_each_entry(arvif, &ar->arvifs, list) { if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_AP) return true; } } return false; } void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id) { struct wmi_11d_scan_start_params param; int ret; mutex_lock(&ar->ab->vdev_id_11d_lock); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev id for 11d scan %d\n", ar->vdev_id_11d_scan); if (ar->regdom_set_by_user) goto fin; if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID) goto fin; if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map)) goto fin; if (ath11k_mac_vif_ap_active_any(ar->ab)) goto fin; param.vdev_id = vdev_id; param.start_interval_msec = 0; param.scan_period_msec = ATH11K_SCAN_11D_INTERVAL; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "start 11d scan\n"); ret = ath11k_wmi_send_11d_scan_start_cmd(ar, ¶m); if (ret) { ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n", vdev_id, ret); } else { ar->vdev_id_11d_scan = vdev_id; if (ar->state_11d == ATH11K_11D_PREPARING) ar->state_11d = ATH11K_11D_RUNNING; } fin: if (ar->state_11d == ATH11K_11D_PREPARING) { ar->state_11d = ATH11K_11D_IDLE; complete(&ar->completed_11d_scan); } mutex_unlock(&ar->ab->vdev_id_11d_lock); } void ath11k_mac_11d_scan_stop(struct ath11k *ar) { int ret; u32 vdev_id; if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map)) return; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "stop 11d scan\n"); mutex_lock(&ar->ab->vdev_id_11d_lock); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "stop 11d vdev id %d\n", ar->vdev_id_11d_scan); if (ar->state_11d == ATH11K_11D_PREPARING) { ar->state_11d = ATH11K_11D_IDLE; complete(&ar->completed_11d_scan); } if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID) { vdev_id = ar->vdev_id_11d_scan; ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id); if (ret) { ath11k_warn(ar->ab, "failed to stopt 11d scan vdev %d ret: %d\n", vdev_id, ret); } else { ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID; ar->state_11d = ATH11K_11D_IDLE; complete(&ar->completed_11d_scan); } } mutex_unlock(&ar->ab->vdev_id_11d_lock); } void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; int i; ath11k_dbg(ab, ATH11K_DBG_MAC, "stop soc 11d scan\n"); for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; ath11k_mac_11d_scan_stop(ar); } } static int ath11k_mac_vdev_delete(struct ath11k *ar, struct ath11k_vif *arvif) { unsigned long time_left; struct ieee80211_vif *vif = arvif->vif; int ret = 0; lockdep_assert_held(&ar->conf_mutex); reinit_completion(&ar->vdev_delete_done); ret = ath11k_wmi_vdev_delete(ar, arvif->vdev_id); if (ret) { ath11k_warn(ar->ab, "failed to delete WMI vdev %d: %d\n", arvif->vdev_id, ret); return ret; } time_left = wait_for_completion_timeout(&ar->vdev_delete_done, ATH11K_VDEV_DELETE_TIMEOUT_HZ); if (time_left == 0) { ath11k_warn(ar->ab, "Timeout in receiving vdev delete response\n"); return -ETIMEDOUT; } ar->ab->free_vdev_map |= 1LL << (arvif->vdev_id); ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); ar->num_created_vdevs--; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n", vif->addr, arvif->vdev_id); return ret; } static void ath11k_mac_bcn_tx_work(struct work_struct *work) { struct ath11k_vif *arvif = container_of(work, struct ath11k_vif, bcn_tx_work); mutex_lock(&arvif->ar->conf_mutex); ath11k_mac_bcn_tx_event(arvif); mutex_unlock(&arvif->ar->conf_mutex); } static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct vdev_create_params vdev_param = {}; struct peer_create_params peer_param; u32 param_id, param_value; u16 nss; int i; int ret, fbret; int bit; vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; mutex_lock(&ar->conf_mutex); if (vif->type == NL80211_IFTYPE_AP && ar->num_peers > (ar->max_num_peers - 1)) { ath11k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n"); ret = -ENOBUFS; goto err; } if (ar->num_created_vdevs > (TARGET_NUM_VDEVS(ab) - 1)) { ath11k_warn(ab, "failed to create vdev %u, reached max vdev limit %d\n", ar->num_created_vdevs, TARGET_NUM_VDEVS(ab)); ret = -EBUSY; goto err; } memset(arvif, 0, sizeof(*arvif)); arvif->ar = ar; arvif->vif = vif; INIT_LIST_HEAD(&arvif->list); INIT_WORK(&arvif->bcn_tx_work, ath11k_mac_bcn_tx_work); INIT_DELAYED_WORK(&arvif->connection_loss_work, ath11k_mac_vif_sta_connection_loss_work); for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { arvif->bitrate_mask.control[i].legacy = 0xffffffff; arvif->bitrate_mask.control[i].gi = NL80211_TXRATE_FORCE_SGI; memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, sizeof(arvif->bitrate_mask.control[i].ht_mcs)); memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, sizeof(arvif->bitrate_mask.control[i].vht_mcs)); memset(arvif->bitrate_mask.control[i].he_mcs, 0xff, sizeof(arvif->bitrate_mask.control[i].he_mcs)); } bit = __ffs64(ab->free_vdev_map); arvif->vdev_id = bit; arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; switch (vif->type) { case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_STATION: arvif->vdev_type = WMI_VDEV_TYPE_STA; if (vif->p2p) arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT; break; case NL80211_IFTYPE_MESH_POINT: arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S; fallthrough; case NL80211_IFTYPE_AP: arvif->vdev_type = WMI_VDEV_TYPE_AP; if (vif->p2p) arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO; break; case NL80211_IFTYPE_MONITOR: arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; ar->monitor_vdev_id = bit; break; case NL80211_IFTYPE_P2P_DEVICE: arvif->vdev_type = WMI_VDEV_TYPE_STA; arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; break; default: WARN_ON(1); break; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "add interface id %d type %d subtype %d map %llx\n", arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, ab->free_vdev_map); vif->cab_queue = arvif->vdev_id % (ATH11K_HW_MAX_QUEUES - 1); for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) vif->hw_queue[i] = i % (ATH11K_HW_MAX_QUEUES - 1); ret = ath11k_mac_setup_vdev_create_params(arvif, &vdev_param); if (ret) { ath11k_warn(ab, "failed to create vdev parameters %d: %d\n", arvif->vdev_id, ret); goto err; } ret = ath11k_wmi_vdev_create(ar, vif->addr, &vdev_param); if (ret) { ath11k_warn(ab, "failed to create WMI vdev %d: %d\n", arvif->vdev_id, ret); goto err; } ar->num_created_vdevs++; ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM created, vdev_id %d\n", vif->addr, arvif->vdev_id); ar->allocated_vdev_map |= 1LL << arvif->vdev_id; ab->free_vdev_map &= ~(1LL << arvif->vdev_id); spin_lock_bh(&ar->data_lock); list_add(&arvif->list, &ar->arvifs); spin_unlock_bh(&ar->data_lock); ath11k_mac_op_update_vif_offload(hw, vif); nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, WMI_VDEV_PARAM_NSS, nss); if (ret) { ath11k_warn(ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n", arvif->vdev_id, ar->cfg_tx_chainmask, nss, ret); goto err_vdev_del; } switch (arvif->vdev_type) { case WMI_VDEV_TYPE_AP: peer_param.vdev_id = arvif->vdev_id; peer_param.peer_addr = vif->addr; peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; ret = ath11k_peer_create(ar, arvif, NULL, &peer_param); if (ret) { ath11k_warn(ab, "failed to vdev %d create peer for AP: %d\n", arvif->vdev_id, ret); goto err_vdev_del; } ret = ath11k_mac_set_kickout(arvif); if (ret) { ath11k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n", arvif->vdev_id, ret); goto err_peer_del; } ath11k_mac_11d_scan_stop_all(ar->ab); break; case WMI_VDEV_TYPE_STA: param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY; param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param_id, param_value); if (ret) { ath11k_warn(ar->ab, "failed to set vdev %d RX wake policy: %d\n", arvif->vdev_id, ret); goto err_peer_del; } param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param_id, param_value); if (ret) { ath11k_warn(ar->ab, "failed to set vdev %d TX wake threshold: %d\n", arvif->vdev_id, ret); goto err_peer_del; } param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT; param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param_id, param_value); if (ret) { ath11k_warn(ar->ab, "failed to set vdev %d pspoll count: %d\n", arvif->vdev_id, ret); goto err_peer_del; } ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, WMI_STA_PS_MODE_DISABLED); if (ret) { ath11k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n", arvif->vdev_id, ret); goto err_peer_del; } if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map)) { reinit_completion(&ar->completed_11d_scan); ar->state_11d = ATH11K_11D_PREPARING; } break; case WMI_VDEV_TYPE_MONITOR: set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); break; default: break; } arvif->txpower = vif->bss_conf.txpower; ret = ath11k_mac_txpower_recalc(ar); if (ret) goto err_peer_del; param_id = WMI_VDEV_PARAM_RTS_THRESHOLD; param_value = ar->hw->wiphy->rts_threshold; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, param_value); if (ret) { ath11k_warn(ar->ab, "failed to set rts threshold for vdev %d: %d\n", arvif->vdev_id, ret); } ath11k_dp_vdev_tx_attach(ar, arvif); if (vif->type != NL80211_IFTYPE_MONITOR && test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) { ret = ath11k_mac_monitor_vdev_create(ar); if (ret) ath11k_warn(ar->ab, "failed to create monitor vdev during add interface: %d", ret); } if (ath11k_wmi_supports_6ghz_cc_ext(ar)) { struct cur_regulatory_info *reg_info; reg_info = &ab->reg_info_store[ar->pdev_idx]; ath11k_dbg(ab, ATH11K_DBG_MAC, "interface added to change reg rules\n"); ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_LPI_AP); } mutex_unlock(&ar->conf_mutex); return 0; err_peer_del: if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { fbret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr); if (fbret) { ath11k_warn(ar->ab, "fallback fail to delete peer addr %pM vdev_id %d ret %d\n", vif->addr, arvif->vdev_id, fbret); goto err; } } err_vdev_del: ath11k_mac_vdev_delete(ar, arvif); spin_lock_bh(&ar->data_lock); list_del(&arvif->list); spin_unlock_bh(&ar->data_lock); err: mutex_unlock(&ar->conf_mutex); return ret; } static int ath11k_mac_vif_unref(int buf_id, void *skb, void *ctx) { struct ieee80211_vif *vif = ctx; struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); if (skb_cb->vif == vif) skb_cb->vif = NULL; return 0; } static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_base *ab = ar->ab; int ret; int i; cancel_delayed_work_sync(&arvif->connection_loss_work); cancel_work_sync(&arvif->bcn_tx_work); mutex_lock(&ar->conf_mutex); ath11k_dbg(ab, ATH11K_DBG_MAC, "remove interface (vdev %d)\n", arvif->vdev_id); ret = ath11k_spectral_vif_stop(arvif); if (ret) ath11k_warn(ab, "failed to stop spectral for vdev %i: %d\n", arvif->vdev_id, ret); if (arvif->vdev_type == WMI_VDEV_TYPE_STA) ath11k_mac_11d_scan_stop(ar); if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { ret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr); if (ret) ath11k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n", arvif->vdev_id, ret); } ret = ath11k_mac_vdev_delete(ar, arvif); if (ret) { ath11k_warn(ab, "failed to delete vdev %d: %d\n", arvif->vdev_id, ret); goto err_vdev_del; } if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); ar->monitor_vdev_id = -1; } else if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags) && !test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) { ret = ath11k_mac_monitor_vdev_delete(ar); if (ret) /* continue even if there's an error */ ath11k_warn(ar->ab, "failed to delete vdev monitor during remove interface: %d", ret); } err_vdev_del: spin_lock_bh(&ar->data_lock); list_del(&arvif->list); spin_unlock_bh(&ar->data_lock); ath11k_peer_cleanup(ar, arvif->vdev_id); idr_for_each(&ar->txmgmt_idr, ath11k_mac_vif_txmgmt_idr_remove, vif); for (i = 0; i < ab->hw_params.max_tx_ring; i++) { spin_lock_bh(&ab->dp.tx_ring[i].tx_idr_lock); idr_for_each(&ab->dp.tx_ring[i].txbuf_idr, ath11k_mac_vif_unref, vif); spin_unlock_bh(&ab->dp.tx_ring[i].tx_idr_lock); } /* Recalc txpower for remaining vdev */ ath11k_mac_txpower_recalc(ar); /* TODO: recalc traffic pause state based on the available vdevs */ mutex_unlock(&ar->conf_mutex); } /* FIXME: Has to be verified. */ #define SUPPORTED_FILTERS \ (FIF_ALLMULTI | \ FIF_CONTROL | \ FIF_PSPOLL | \ FIF_OTHER_BSS | \ FIF_BCN_PRBRESP_PROMISC | \ FIF_PROBE_REQ | \ FIF_FCSFAIL) static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct ath11k *ar = hw->priv; mutex_lock(&ar->conf_mutex); *total_flags &= SUPPORTED_FILTERS; ar->filter_flags = *total_flags; mutex_unlock(&ar->conf_mutex); } static int ath11k_mac_op_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant, u32 *rx_ant) { struct ath11k *ar = hw->priv; mutex_lock(&ar->conf_mutex); *tx_ant = ar->cfg_tx_chainmask; *rx_ant = ar->cfg_rx_chainmask; mutex_unlock(&ar->conf_mutex); return 0; } static int ath11k_mac_op_set_antenna(struct ieee80211_hw *hw, int radio_idx, u32 tx_ant, u32 rx_ant) { struct ath11k *ar = hw->priv; int ret; mutex_lock(&ar->conf_mutex); ret = __ath11k_set_antenna(ar, tx_ant, rx_ant); mutex_unlock(&ar->conf_mutex); return ret; } static int ath11k_mac_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) { struct ath11k *ar = hw->priv; int ret = -EINVAL; mutex_lock(&ar->conf_mutex); switch (params->action) { case IEEE80211_AMPDU_RX_START: ret = ath11k_dp_rx_ampdu_start(ar, params); break; case IEEE80211_AMPDU_RX_STOP: ret = ath11k_dp_rx_ampdu_stop(ar, params); break; case IEEE80211_AMPDU_TX_START: case IEEE80211_AMPDU_TX_STOP_CONT: case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: case IEEE80211_AMPDU_TX_OPERATIONAL: /* Tx A-MPDU aggregation offloaded to hw/fw so deny mac80211 * Tx aggregation requests. */ ret = -EOPNOTSUPP; break; } mutex_unlock(&ar->conf_mutex); return ret; } static int ath11k_mac_op_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx add freq %u width %d ptr %p\n", ctx->def.chan->center_freq, ctx->def.width, ctx); mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); /* TODO: In case of multiple channel context, populate rx_channel from * Rx PPDU desc information. */ ar->rx_channel = ctx->def.chan; spin_unlock_bh(&ar->data_lock); mutex_unlock(&ar->conf_mutex); return 0; } static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx remove freq %u width %d ptr %p\n", ctx->def.chan->center_freq, ctx->def.width, ctx); mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); /* TODO: In case of there is one more channel context left, populate * rx_channel with the channel of that remaining channel context. */ ar->rx_channel = NULL; spin_unlock_bh(&ar->data_lock); mutex_unlock(&ar->conf_mutex); } static int ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif, struct ieee80211_chanctx_conf *ctx, bool restart) { struct ath11k *ar = arvif->ar; struct ath11k_base *ab = ar->ab; struct wmi_vdev_start_req_arg arg = {}; const struct cfg80211_chan_def *chandef = &ctx->def; int ret = 0; unsigned int dfs_cac_time; lockdep_assert_held(&ar->conf_mutex); reinit_completion(&ar->vdev_setup_done); arg.vdev_id = arvif->vdev_id; arg.dtim_period = arvif->dtim_period; arg.bcn_intval = arvif->beacon_interval; arg.channel.freq = chandef->chan->center_freq; arg.channel.band_center_freq1 = chandef->center_freq1; arg.channel.band_center_freq2 = chandef->center_freq2; arg.channel.mode = ath11k_phymodes[chandef->chan->band][chandef->width]; arg.channel.min_power = 0; arg.channel.max_power = chandef->chan->max_power; arg.channel.max_reg_power = chandef->chan->max_reg_power; arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain; arg.pref_tx_streams = ar->num_tx_chains; arg.pref_rx_streams = ar->num_rx_chains; arg.mbssid_flags = 0; arg.mbssid_tx_vdev_id = 0; if (test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT, ar->ab->wmi_ab.svc_map)) { ret = ath11k_mac_setup_vdev_params_mbssid(arvif, &arg.mbssid_flags, &arg.mbssid_tx_vdev_id); if (ret) return ret; } if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { arg.ssid = arvif->u.ap.ssid; arg.ssid_len = arvif->u.ap.ssid_len; arg.hidden_ssid = arvif->u.ap.hidden_ssid; /* For now allow DFS for AP mode */ arg.channel.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); arg.channel.freq2_radar = ctx->radar_enabled; arg.channel.passive = arg.channel.chan_radar; spin_lock_bh(&ab->base_lock); arg.regdomain = ar->ab->dfs_region; spin_unlock_bh(&ab->base_lock); } arg.channel.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR); ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %d start center_freq %d phymode %s\n", arg.vdev_id, arg.channel.freq, ath11k_wmi_phymode_str(arg.channel.mode)); ret = ath11k_wmi_vdev_start(ar, &arg, restart); if (ret) { ath11k_warn(ar->ab, "failed to %s WMI vdev %i\n", restart ? "restart" : "start", arg.vdev_id); return ret; } ret = ath11k_mac_vdev_setup_sync(ar); if (ret) { ath11k_warn(ab, "failed to synchronize setup for vdev %i %s: %d\n", arg.vdev_id, restart ? "restart" : "start", ret); return ret; } /* TODO: For now we only set TPC power here. However when * channel changes, say CSA, it should be updated again. */ if (ath11k_mac_supports_station_tpc(ar, arvif, chandef)) { ath11k_mac_fill_reg_tpc_info(ar, arvif->vif, &arvif->chanctx); ath11k_wmi_send_vdev_set_tpc_power(ar, arvif->vdev_id, &arvif->reg_tpc_info); } if (!restart) ar->num_started_vdevs++; ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM started, vdev_id %d\n", arvif->vif->addr, arvif->vdev_id); /* Enable CAC Flag in the driver by checking the all sub-channel's DFS * state as NL80211_DFS_USABLE which indicates CAC needs to be * done before channel usage. This flags is used to drop rx packets. * during CAC. */ /* TODO Set the flag for other interface types as required */ if (arvif->vdev_type == WMI_VDEV_TYPE_AP && ctx->radar_enabled && cfg80211_chandef_dfs_usable(ar->hw->wiphy, chandef)) { set_bit(ATH11K_CAC_RUNNING, &ar->dev_flags); dfs_cac_time = cfg80211_chandef_dfs_cac_time(ar->hw->wiphy, chandef); ath11k_dbg(ab, ATH11K_DBG_MAC, "cac started dfs_cac_time %u center_freq %d center_freq1 %d for vdev %d\n", dfs_cac_time, arg.channel.freq, chandef->center_freq1, arg.vdev_id); } ret = ath11k_mac_set_txbf_conf(arvif); if (ret) ath11k_warn(ab, "failed to set txbf conf for vdev %d: %d\n", arvif->vdev_id, ret); return 0; } static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif) { struct ath11k *ar = arvif->ar; int ret; lockdep_assert_held(&ar->conf_mutex); reinit_completion(&ar->vdev_setup_done); ret = ath11k_wmi_vdev_stop(ar, arvif->vdev_id); if (ret) { ath11k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n", arvif->vdev_id, ret); goto err; } ret = ath11k_mac_vdev_setup_sync(ar); if (ret) { ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n", arvif->vdev_id, ret); goto err; } WARN_ON(ar->num_started_vdevs == 0); ar->num_started_vdevs--; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n", arvif->vif->addr, arvif->vdev_id); if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "CAC Stopped for vdev %d\n", arvif->vdev_id); } return 0; err: return ret; } static int ath11k_mac_vdev_start(struct ath11k_vif *arvif, struct ieee80211_chanctx_conf *ctx) { return ath11k_mac_vdev_start_restart(arvif, ctx, false); } static int ath11k_mac_vdev_restart(struct ath11k_vif *arvif, struct ieee80211_chanctx_conf *ctx) { return ath11k_mac_vdev_start_restart(arvif, ctx, true); } struct ath11k_mac_change_chanctx_arg { struct ieee80211_chanctx_conf *ctx; struct ieee80211_vif_chanctx_switch *vifs; int n_vifs; int next_vif; }; static void ath11k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath11k_mac_change_chanctx_arg *arg = data; if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx) return; arg->n_vifs++; } static void ath11k_mac_change_chanctx_fill_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath11k_mac_change_chanctx_arg *arg = data; struct ieee80211_chanctx_conf *ctx; ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf); if (ctx != arg->ctx) return; if (WARN_ON(arg->next_vif == arg->n_vifs)) return; arg->vifs[arg->next_vif].vif = vif; arg->vifs[arg->next_vif].old_ctx = ctx; arg->vifs[arg->next_vif].new_ctx = ctx; arg->next_vif++; } static void ath11k_mac_update_vif_chan(struct ath11k *ar, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs) { struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif, *tx_arvif; int ret; int i; bool monitor_vif = false; lockdep_assert_held(&ar->conf_mutex); /* Associated channel resources of all relevant vdevs * should be available for the channel switch now. */ /* TODO: Update ar->rx_channel */ for (i = 0; i < n_vifs; i++) { arvif = ath11k_vif_to_arvif(vifs[i].vif); if (WARN_ON(!arvif->is_started)) continue; /* change_chanctx can be called even before vdev_up from * ieee80211_start_ap->ieee80211_vif_use_channel-> * ieee80211_recalc_radar_chanctx. * * Firmware expect vdev_restart only if vdev is up. * If vdev is down then it expect vdev_stop->vdev_start. */ if (arvif->is_up) { ret = ath11k_mac_vdev_restart(arvif, vifs[i].new_ctx); if (ret) { ath11k_warn(ab, "failed to restart vdev %d: %d\n", arvif->vdev_id, ret); continue; } } else { ret = ath11k_mac_vdev_stop(arvif); if (ret) { ath11k_warn(ab, "failed to stop vdev %d: %d\n", arvif->vdev_id, ret); continue; } ret = ath11k_mac_vdev_start(arvif, vifs[i].new_ctx); if (ret) ath11k_warn(ab, "failed to start vdev %d: %d\n", arvif->vdev_id, ret); continue; } ret = ath11k_mac_setup_bcn_tmpl(arvif); if (ret) ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n", ret); tx_arvif = ath11k_mac_get_tx_arvif(arvif); ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, arvif->bssid, tx_arvif ? tx_arvif->bssid : NULL, arvif->vif->bss_conf.bssid_index, 1 << arvif->vif->bss_conf.bssid_indicator); if (ret) { ath11k_warn(ab, "failed to bring vdev up %d: %d\n", arvif->vdev_id, ret); continue; } } /* Restart the internal monitor vdev on new channel */ if (!monitor_vif && test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) { ret = ath11k_mac_monitor_stop(ar); if (ret) { ath11k_warn(ar->ab, "failed to stop monitor during vif channel update: %d", ret); return; } ret = ath11k_mac_monitor_start(ar); if (ret) { ath11k_warn(ar->ab, "failed to start monitor during vif channel update: %d", ret); return; } } } static void ath11k_mac_update_active_vif_chan(struct ath11k *ar, struct ieee80211_chanctx_conf *ctx) { struct ath11k_mac_change_chanctx_arg arg = { .ctx = ctx }; lockdep_assert_held(&ar->conf_mutex); ieee80211_iterate_active_interfaces_atomic(ar->hw, IEEE80211_IFACE_ITER_NORMAL, ath11k_mac_change_chanctx_cnt_iter, &arg); if (arg.n_vifs == 0) return; arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), GFP_KERNEL); if (!arg.vifs) return; ieee80211_iterate_active_interfaces_atomic(ar->hw, IEEE80211_IFACE_ITER_NORMAL, ath11k_mac_change_chanctx_fill_iter, &arg); ath11k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); kfree(arg.vifs); } static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; mutex_lock(&ar->conf_mutex); ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx change freq %u width %d ptr %p changed %x\n", ctx->def.chan->center_freq, ctx->def.width, ctx, changed); /* This shouldn't really happen because channel switching should use * switch_vif_chanctx(). */ if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) goto unlock; if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH || changed & IEEE80211_CHANCTX_CHANGE_RADAR) ath11k_mac_update_active_vif_chan(ar, ctx); /* TODO: Recalc radar detection */ unlock: mutex_unlock(&ar->conf_mutex); } static int ath11k_mac_start_vdev_delay(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; if (WARN_ON(arvif->is_started)) return -EBUSY; ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx); if (ret) { ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", arvif->vdev_id, vif->addr, arvif->chanctx.def.chan->center_freq, ret); return ret; } /* Reconfigure hardware rate code since it is cleared by firmware. */ if (ar->hw_rate_code > 0) { u32 vdev_param = WMI_VDEV_PARAM_MGMT_RATE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, ar->hw_rate_code); if (ret) { ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret); return ret; } } if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, 0, ar->mac_addr, NULL, 0, 0); if (ret) { ath11k_warn(ab, "failed put monitor up: %d\n", ret); return ret; } } arvif->is_started = true; /* TODO: Setup ps and cts/rts protection */ return 0; } static int ath11k_mac_stop_vdev_early(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; if (WARN_ON(!arvif->is_started)) return -EBUSY; ret = ath11k_mac_vdev_stop(arvif); if (ret) { ath11k_warn(ab, "failed to stop vdev %i: %d\n", arvif->vdev_id, ret); return ret; } arvif->is_started = false; /* TODO: Setup ps and cts/rts protection */ return 0; } static u8 ath11k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def) { if (chan_def->chan->flags & IEEE80211_CHAN_PSD) { switch (chan_def->width) { case NL80211_CHAN_WIDTH_20: return 1; case NL80211_CHAN_WIDTH_40: return 2; case NL80211_CHAN_WIDTH_80: return 4; case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: return 8; default: return 1; } } else { switch (chan_def->width) { case NL80211_CHAN_WIDTH_20: return 1; case NL80211_CHAN_WIDTH_40: return 2; case NL80211_CHAN_WIDTH_80: return 3; case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: return 4; default: return 1; } } } static u16 ath11k_mac_get_6ghz_start_frequency(struct cfg80211_chan_def *chan_def) { u16 diff_seq; /* It is to get the lowest channel number's center frequency of the chan. * For example, * bandwidth=40 MHz, center frequency is 5965, lowest channel is 1 * with center frequency 5955, its diff is 5965 - 5955 = 10. * bandwidth=80 MHz, center frequency is 5985, lowest channel is 1 * with center frequency 5955, its diff is 5985 - 5955 = 30. * bandwidth=160 MHz, center frequency is 6025, lowest channel is 1 * with center frequency 5955, its diff is 6025 - 5955 = 70. */ switch (chan_def->width) { case NL80211_CHAN_WIDTH_160: diff_seq = 70; break; case NL80211_CHAN_WIDTH_80: case NL80211_CHAN_WIDTH_80P80: diff_seq = 30; break; case NL80211_CHAN_WIDTH_40: diff_seq = 10; break; default: diff_seq = 0; } return chan_def->center_freq1 - diff_seq; } static u16 ath11k_mac_get_seg_freq(struct cfg80211_chan_def *chan_def, u16 start_seq, u8 seq) { u16 seg_seq; /* It is to get the center frequency of the specific bandwidth. * start_seq means the lowest channel number's center frequency. * seq 0/1/2/3 means 20 MHz/40 MHz/80 MHz/160 MHz&80P80. * For example, * lowest channel is 1, its center frequency 5955, * center frequency is 5955 when bandwidth=20 MHz, its diff is 5955 - 5955 = 0. * lowest channel is 1, its center frequency 5955, * center frequency is 5965 when bandwidth=40 MHz, its diff is 5965 - 5955 = 10. * lowest channel is 1, its center frequency 5955, * center frequency is 5985 when bandwidth=80 MHz, its diff is 5985 - 5955 = 30. * lowest channel is 1, its center frequency 5955, * center frequency is 6025 when bandwidth=160 MHz, its diff is 6025 - 5955 = 70. */ if (chan_def->width == NL80211_CHAN_WIDTH_80P80 && seq == 3) return chan_def->center_freq2; seg_seq = 10 * (BIT(seq) - 1); return seg_seq + start_seq; } static void ath11k_mac_get_psd_channel(struct ath11k *ar, u16 step_freq, u16 *start_freq, u16 *center_freq, u8 i, struct ieee80211_channel **temp_chan, s8 *tx_power) { /* It is to get the center frequency for each 20 MHz. * For example, if the chan is 160 MHz and center frequency is 6025, * then it include 8 channels, they are 1/5/9/13/17/21/25/29, * channel number 1's center frequency is 5955, it is parameter start_freq. * parameter i is the step of the 8 channels. i is 0~7 for the 8 channels. * the channel 1/5/9/13/17/21/25/29 maps i=0/1/2/3/4/5/6/7, * and maps its center frequency is 5955/5975/5995/6015/6035/6055/6075/6095, * the gap is 20 for each channel, parameter step_freq means the gap. * after get the center frequency of each channel, it is easy to find the * struct ieee80211_channel of it and get the max_reg_power. */ *center_freq = *start_freq + i * step_freq; *temp_chan = ieee80211_get_channel(ar->hw->wiphy, *center_freq); *tx_power = (*temp_chan)->max_reg_power; } static void ath11k_mac_get_eirp_power(struct ath11k *ar, u16 *start_freq, u16 *center_freq, u8 i, struct ieee80211_channel **temp_chan, struct cfg80211_chan_def *def, s8 *tx_power) { /* It is to get the center frequency for 20 MHz/40 MHz/80 MHz/ * 160 MHz&80P80 bandwidth, and then plus 10 to the center frequency, * it is the center frequency of a channel number. * For example, when configured channel number is 1. * center frequency is 5965 when bandwidth=40 MHz, after plus 10, it is 5975, * then it is channel number 5. * center frequency is 5985 when bandwidth=80 MHz, after plus 10, it is 5995, * then it is channel number 9. * center frequency is 6025 when bandwidth=160 MHz, after plus 10, it is 6035, * then it is channel number 17. * after get the center frequency of each channel, it is easy to find the * struct ieee80211_channel of it and get the max_reg_power. */ *center_freq = ath11k_mac_get_seg_freq(def, *start_freq, i); /* For the 20 MHz, its center frequency is same with same channel */ if (i != 0) *center_freq += 10; *temp_chan = ieee80211_get_channel(ar->hw->wiphy, *center_freq); *tx_power = (*temp_chan)->max_reg_power; } void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx) { struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; struct ath11k_reg_tpc_power_info *reg_tpc_info = &arvif->reg_tpc_info; struct ieee80211_channel *chan, *temp_chan; u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction; bool is_psd_power = false, is_tpe_present = false; s8 max_tx_power[ATH11K_NUM_PWR_LEVELS], psd_power, tx_power; s8 eirp_power = 0; u16 start_freq, center_freq; chan = ctx->def.chan; start_freq = ath11k_mac_get_6ghz_start_frequency(&ctx->def); pwr_reduction = bss_conf->pwr_reduction; if (arvif->reg_tpc_info.num_pwr_levels) { is_tpe_present = true; num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels; } else { num_pwr_levels = ath11k_mac_get_num_pwr_levels(&bss_conf->chanreq.oper); } for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) { /* STA received TPE IE*/ if (is_tpe_present) { /* local power is PSD power*/ if (chan->flags & IEEE80211_CHAN_PSD) { /* Connecting AP is psd power */ if (reg_tpc_info->is_psd_power) { is_psd_power = true; ath11k_mac_get_psd_channel(ar, 20, &start_freq, ¢er_freq, pwr_lvl_idx, &temp_chan, &tx_power); psd_power = temp_chan->psd; eirp_power = tx_power; max_tx_power[pwr_lvl_idx] = min_t(s8, psd_power, reg_tpc_info->tpe[pwr_lvl_idx]); /* Connecting AP is not psd power */ } else { ath11k_mac_get_eirp_power(ar, &start_freq, ¢er_freq, pwr_lvl_idx, &temp_chan, &ctx->def, &tx_power); psd_power = temp_chan->psd; /* convert psd power to EIRP power based * on channel width */ tx_power = min_t(s8, tx_power, psd_power + 13 + pwr_lvl_idx * 3); max_tx_power[pwr_lvl_idx] = min_t(s8, tx_power, reg_tpc_info->tpe[pwr_lvl_idx]); } /* local power is not PSD power */ } else { /* Connecting AP is psd power */ if (reg_tpc_info->is_psd_power) { is_psd_power = true; ath11k_mac_get_psd_channel(ar, 20, &start_freq, ¢er_freq, pwr_lvl_idx, &temp_chan, &tx_power); eirp_power = tx_power; max_tx_power[pwr_lvl_idx] = reg_tpc_info->tpe[pwr_lvl_idx]; /* Connecting AP is not psd power */ } else { ath11k_mac_get_eirp_power(ar, &start_freq, ¢er_freq, pwr_lvl_idx, &temp_chan, &ctx->def, &tx_power); max_tx_power[pwr_lvl_idx] = min_t(s8, tx_power, reg_tpc_info->tpe[pwr_lvl_idx]); } } /* STA not received TPE IE */ } else { /* local power is PSD power*/ if (chan->flags & IEEE80211_CHAN_PSD) { is_psd_power = true; ath11k_mac_get_psd_channel(ar, 20, &start_freq, ¢er_freq, pwr_lvl_idx, &temp_chan, &tx_power); psd_power = temp_chan->psd; eirp_power = tx_power; max_tx_power[pwr_lvl_idx] = psd_power; } else { ath11k_mac_get_eirp_power(ar, &start_freq, ¢er_freq, pwr_lvl_idx, &temp_chan, &ctx->def, &tx_power); max_tx_power[pwr_lvl_idx] = tx_power; } } if (is_psd_power) { /* If AP local power constraint is present */ if (pwr_reduction) eirp_power = eirp_power - pwr_reduction; /* If firmware updated max tx power is non zero, then take * the min of firmware updated ap tx power * and max power derived from above mentioned parameters. */ ath11k_dbg(ab, ATH11K_DBG_MAC, "eirp power : %d firmware report power : %d\n", eirp_power, ar->max_allowed_tx_power); /* Firmware reports lower max_allowed_tx_power during vdev * start response. In case of 6 GHz, firmware is not aware * of EIRP power unless driver sets EIRP power through WMI * TPC command. So radio which does not support idle power * save can set maximum calculated EIRP power directly to * firmware through TPC command without min comparison with * vdev start response's max_allowed_tx_power. */ if (ar->max_allowed_tx_power && ab->hw_params.idle_ps) eirp_power = min_t(s8, eirp_power, ar->max_allowed_tx_power); } else { /* If AP local power constraint is present */ if (pwr_reduction) max_tx_power[pwr_lvl_idx] = max_tx_power[pwr_lvl_idx] - pwr_reduction; /* If firmware updated max tx power is non zero, then take * the min of firmware updated ap tx power * and max power derived from above mentioned parameters. */ if (ar->max_allowed_tx_power && ab->hw_params.idle_ps) max_tx_power[pwr_lvl_idx] = min_t(s8, max_tx_power[pwr_lvl_idx], ar->max_allowed_tx_power); } reg_tpc_info->chan_power_info[pwr_lvl_idx].chan_cfreq = center_freq; reg_tpc_info->chan_power_info[pwr_lvl_idx].tx_power = max_tx_power[pwr_lvl_idx]; } reg_tpc_info->num_pwr_levels = num_pwr_levels; reg_tpc_info->is_psd_power = is_psd_power; reg_tpc_info->eirp_power = eirp_power; reg_tpc_info->ap_power_type = ath11k_reg_ap_pwr_convert(vif->bss_conf.power_type); } static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx) { struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; struct ieee80211_parsed_tpe_eirp *non_psd = NULL; struct ieee80211_parsed_tpe_psd *psd = NULL; enum wmi_reg_6ghz_client_type client_type; struct cur_regulatory_info *reg_info; u8 local_tpe_count, reg_tpe_count; bool use_local_tpe; int i; reg_info = &ab->reg_info_store[ar->pdev_idx]; client_type = reg_info->client_type; local_tpe_count = bss_conf->tpe.max_local[client_type].valid + bss_conf->tpe.psd_local[client_type].valid; reg_tpe_count = bss_conf->tpe.max_reg_client[client_type].valid + bss_conf->tpe.psd_reg_client[client_type].valid; if (!reg_tpe_count && !local_tpe_count) { ath11k_warn(ab, "no transmit power envelope match client power type %d\n", client_type); return; } else if (!reg_tpe_count) { use_local_tpe = true; } else { use_local_tpe = false; } if (use_local_tpe) { psd = &bss_conf->tpe.psd_local[client_type]; if (!psd->valid) psd = NULL; non_psd = &bss_conf->tpe.max_local[client_type]; if (!non_psd->valid) non_psd = NULL; } else { psd = &bss_conf->tpe.psd_reg_client[client_type]; if (!psd->valid) psd = NULL; non_psd = &bss_conf->tpe.max_reg_client[client_type]; if (!non_psd->valid) non_psd = NULL; } if (non_psd && !psd) { arvif->reg_tpc_info.is_psd_power = false; arvif->reg_tpc_info.eirp_power = 0; arvif->reg_tpc_info.num_pwr_levels = non_psd->count; for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) { ath11k_dbg(ab, ATH11K_DBG_MAC, "non PSD power[%d] : %d\n", i, non_psd->power[i]); arvif->reg_tpc_info.tpe[i] = non_psd->power[i] / 2; } } if (psd) { arvif->reg_tpc_info.is_psd_power = true; arvif->reg_tpc_info.num_pwr_levels = psd->count; for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) { ath11k_dbg(ab, ATH11K_DBG_MAC, "TPE PSD power[%d] : %d\n", i, psd->power[i]); arvif->reg_tpc_info.tpe[i] = psd->power[i] / 2; } } } static int ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; mutex_lock(&ar->conf_mutex); ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx assign ptr %p vdev_id %i\n", ctx, arvif->vdev_id); if (ath11k_wmi_supports_6ghz_cc_ext(ar) && ctx->def.chan->band == NL80211_BAND_6GHZ && arvif->vdev_type == WMI_VDEV_TYPE_STA) { arvif->chanctx = *ctx; ath11k_mac_parse_tx_pwr_env(ar, vif, ctx); } /* for QCA6390 bss peer must be created before vdev_start */ if (ab->hw_params.vdev_start_delay && arvif->vdev_type != WMI_VDEV_TYPE_AP && arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && !ath11k_peer_find_by_vdev_id(ab, arvif->vdev_id)) { memcpy(&arvif->chanctx, ctx, sizeof(*ctx)); ret = 0; goto out; } if (WARN_ON(arvif->is_started)) { ret = -EBUSY; goto out; } if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ret = ath11k_mac_monitor_start(ar); if (ret) { ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d", ret); goto out; } arvif->is_started = true; goto out; } if (!arvif->is_started) { ret = ath11k_mac_vdev_start(arvif, ctx); if (ret) { ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", arvif->vdev_id, vif->addr, ctx->def.chan->center_freq, ret); goto out; } arvif->is_started = true; } if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) { ret = ath11k_mac_monitor_start(ar); if (ret) { ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d", ret); goto out; } } /* TODO: Setup ps and cts/rts protection */ ret = 0; out: mutex_unlock(&ar->conf_mutex); return ret; } static void ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_peer *peer; int ret; mutex_lock(&ar->conf_mutex); ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx unassign ptr %p vdev_id %i\n", ctx, arvif->vdev_id); if (ab->hw_params.vdev_start_delay && arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_addr(ab, ar->mac_addr); spin_unlock_bh(&ab->base_lock); if (peer) ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr); } if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ret = ath11k_mac_monitor_stop(ar); if (ret) { ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d", ret); mutex_unlock(&ar->conf_mutex); return; } arvif->is_started = false; mutex_unlock(&ar->conf_mutex); return; } if (arvif->is_started) { ret = ath11k_mac_vdev_stop(arvif); if (ret) ath11k_warn(ab, "failed to stop vdev %i: %d\n", arvif->vdev_id, ret); arvif->is_started = false; } if (ab->hw_params.vdev_start_delay && arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) ath11k_wmi_vdev_down(ar, arvif->vdev_id); if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->num_started_vdevs == 1 && test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) { ret = ath11k_mac_monitor_stop(ar); if (ret) /* continue even if there's an error */ ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d", ret); } if (arvif->vdev_type == WMI_VDEV_TYPE_STA) ath11k_mac_11d_scan_start(ar, arvif->vdev_id); mutex_unlock(&ar->conf_mutex); } static int ath11k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs, enum ieee80211_chanctx_switch_mode mode) { struct ath11k *ar = hw->priv; mutex_lock(&ar->conf_mutex); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "chanctx switch n_vifs %d mode %d\n", n_vifs, mode); ath11k_mac_update_vif_chan(ar, vifs, n_vifs); mutex_unlock(&ar->conf_mutex); return 0; } static int ath11k_set_vdev_param_to_all_vifs(struct ath11k *ar, int param, u32 value) { struct ath11k_vif *arvif; int ret = 0; mutex_lock(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting mac vdev %d param %d value %d\n", param, arvif->vdev_id, value); ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, value); if (ret) { ath11k_warn(ar->ab, "failed to set param %d for vdev %d: %d\n", param, arvif->vdev_id, ret); break; } } mutex_unlock(&ar->conf_mutex); return ret; } /* mac80211 stores device specific RTS/Fragmentation threshold value, * this is set interface specific to firmware from ath11k driver */ static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx, u32 value) { struct ath11k *ar = hw->priv; int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD; return ath11k_set_vdev_param_to_all_vifs(ar, param_id, value); } static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, int radio_idx, u32 value) { /* Even though there's a WMI vdev param for fragmentation threshold no * known firmware actually implements it. Moreover it is not possible to * rely frame fragmentation to mac80211 because firmware clears the * "more fragments" bit in frame control making it impossible for remote * devices to reassemble frames. * * Hence implement a dummy callback just to say fragmentation isn't * supported. This effectively prevents mac80211 from doing frame * fragmentation in software. */ return -EOPNOTSUPP; } static int ath11k_mac_flush_tx_complete(struct ath11k *ar) { long time_left; int ret = 0; time_left = wait_event_timeout(ar->dp.tx_empty_waitq, (atomic_read(&ar->dp.num_tx_pending) == 0), ATH11K_FLUSH_TIMEOUT); if (time_left == 0) { ath11k_warn(ar->ab, "failed to flush transmit queue, data pkts pending %d\n", atomic_read(&ar->dp.num_tx_pending)); ret = -ETIMEDOUT; } time_left = wait_event_timeout(ar->txmgmt_empty_waitq, (atomic_read(&ar->num_pending_mgmt_tx) == 0), ATH11K_FLUSH_TIMEOUT); if (time_left == 0) { ath11k_warn(ar->ab, "failed to flush mgmt transmit queue, mgmt pkts pending %d\n", atomic_read(&ar->num_pending_mgmt_tx)); ret = -ETIMEDOUT; } return ret; } int ath11k_mac_wait_tx_complete(struct ath11k *ar) { ath11k_mac_drain_tx(ar); return ath11k_mac_flush_tx_complete(ar); } static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { struct ath11k *ar = hw->priv; if (drop) return; ath11k_mac_flush_tx_complete(ar); } static bool ath11k_mac_has_single_legacy_rate(struct ath11k *ar, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { int num_rates = 0; num_rates = hweight32(mask->control[band].legacy); if (ath11k_mac_bitrate_mask_num_ht_rates(ar, band, mask)) return false; if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask)) return false; if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask)) return false; return num_rates == 1; } static __le16 ath11k_mac_get_tx_mcs_map(const struct ieee80211_sta_he_cap *he_cap) { if (he_cap->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) return he_cap->he_mcs_nss_supp.tx_mcs_80p80; if (he_cap->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) return he_cap->he_mcs_nss_supp.tx_mcs_160; return he_cap->he_mcs_nss_supp.tx_mcs_80; } static bool ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar, struct ath11k_vif *arvif, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask, int *nss) { struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); const struct ieee80211_sta_he_cap *he_cap; u16 he_mcs_map = 0; u8 ht_nss_mask = 0; u8 vht_nss_mask = 0; u8 he_nss_mask = 0; int i; /* No need to consider legacy here. Basic rates are always present * in bitrate mask */ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { if (mask->control[band].ht_mcs[i] == 0) continue; else if (mask->control[band].ht_mcs[i] == sband->ht_cap.mcs.rx_mask[i]) ht_nss_mask |= BIT(i); else return false; } for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { if (mask->control[band].vht_mcs[i] == 0) continue; else if (mask->control[band].vht_mcs[i] == ath11k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) vht_nss_mask |= BIT(i); else return false; } he_cap = ieee80211_get_he_iftype_cap_vif(sband, arvif->vif); if (!he_cap) return false; he_mcs_map = le16_to_cpu(ath11k_mac_get_tx_mcs_map(he_cap)); for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) { if (mask->control[band].he_mcs[i] == 0) continue; if (mask->control[band].he_mcs[i] == ath11k_mac_get_max_he_mcs_map(he_mcs_map, i)) he_nss_mask |= BIT(i); else return false; } if (ht_nss_mask != vht_nss_mask || ht_nss_mask != he_nss_mask) return false; if (ht_nss_mask == 0) return false; if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask) return false; *nss = fls(ht_nss_mask); return true; } static int ath11k_mac_get_single_legacy_rate(struct ath11k *ar, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask, u32 *rate, u8 *nss) { int rate_idx; u16 bitrate; u8 preamble; u8 hw_rate; if (hweight32(mask->control[band].legacy) != 1) return -EINVAL; rate_idx = ffs(mask->control[band].legacy) - 1; if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) rate_idx += ATH11K_MAC_FIRST_OFDM_RATE_IDX; hw_rate = ath11k_legacy_rates[rate_idx].hw_value; bitrate = ath11k_legacy_rates[rate_idx].bitrate; if (ath11k_mac_bitrate_is_cck(bitrate)) preamble = WMI_RATE_PREAMBLE_CCK; else preamble = WMI_RATE_PREAMBLE_OFDM; *nss = 1; *rate = ATH11K_HW_RATE_CODE(hw_rate, 0, preamble); return 0; } static int ath11k_mac_set_fixed_rate_gi_ltf(struct ath11k_vif *arvif, u8 he_gi, u8 he_ltf) { struct ath11k *ar = arvif->ar; int ret; /* 0.8 = 0, 1.6 = 2 and 3.2 = 3. */ if (he_gi && he_gi != 0xFF) he_gi += 1; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, WMI_VDEV_PARAM_SGI, he_gi); if (ret) { ath11k_warn(ar->ab, "failed to set he gi %d: %d\n", he_gi, ret); return ret; } /* start from 1 */ if (he_ltf != 0xFF) he_ltf += 1; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, WMI_VDEV_PARAM_HE_LTF, he_ltf); if (ret) { ath11k_warn(ar->ab, "failed to set he ltf %d: %d\n", he_ltf, ret); return ret; } return 0; } static int ath11k_mac_set_auto_rate_gi_ltf(struct ath11k_vif *arvif, u16 he_gi, u8 he_ltf) { struct ath11k *ar = arvif->ar; int ret; u32 he_ar_gi_ltf; if (he_gi != 0xFF) { switch (he_gi) { case NL80211_RATE_INFO_HE_GI_0_8: he_gi = WMI_AUTORATE_800NS_GI; break; case NL80211_RATE_INFO_HE_GI_1_6: he_gi = WMI_AUTORATE_1600NS_GI; break; case NL80211_RATE_INFO_HE_GI_3_2: he_gi = WMI_AUTORATE_3200NS_GI; break; default: ath11k_warn(ar->ab, "invalid he gi: %d\n", he_gi); return -EINVAL; } } if (he_ltf != 0xFF) { switch (he_ltf) { case NL80211_RATE_INFO_HE_1XLTF: he_ltf = WMI_HE_AUTORATE_LTF_1X; break; case NL80211_RATE_INFO_HE_2XLTF: he_ltf = WMI_HE_AUTORATE_LTF_2X; break; case NL80211_RATE_INFO_HE_4XLTF: he_ltf = WMI_HE_AUTORATE_LTF_4X; break; default: ath11k_warn(ar->ab, "invalid he ltf: %d\n", he_ltf); return -EINVAL; } } he_ar_gi_ltf = he_gi | he_ltf; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, WMI_VDEV_PARAM_AUTORATE_MISC_CFG, he_ar_gi_ltf); if (ret) { ath11k_warn(ar->ab, "failed to set he autorate gi %u ltf %u: %d\n", he_gi, he_ltf, ret); return ret; } return 0; } static int ath11k_mac_set_rate_params(struct ath11k_vif *arvif, u32 rate, u8 nss, u8 sgi, u8 ldpc, u8 he_gi, u8 he_ltf, bool he_fixed_rate) { struct ath11k *ar = arvif->ar; u32 vdev_param; int ret; lockdep_assert_held(&ar->conf_mutex); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "set rate params vdev %i rate 0x%02x nss 0x%02x sgi 0x%02x ldpc 0x%02x he_gi 0x%02x he_ltf 0x%02x he_fixed_rate %d\n", arvif->vdev_id, rate, nss, sgi, ldpc, he_gi, he_ltf, he_fixed_rate); if (!arvif->vif->bss_conf.he_support) { vdev_param = WMI_VDEV_PARAM_FIXED_RATE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, rate); if (ret) { ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n", rate, ret); return ret; } } vdev_param = WMI_VDEV_PARAM_NSS; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, nss); if (ret) { ath11k_warn(ar->ab, "failed to set nss param %d: %d\n", nss, ret); return ret; } vdev_param = WMI_VDEV_PARAM_LDPC; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, ldpc); if (ret) { ath11k_warn(ar->ab, "failed to set ldpc param %d: %d\n", ldpc, ret); return ret; } if (arvif->vif->bss_conf.he_support) { if (he_fixed_rate) { ret = ath11k_mac_set_fixed_rate_gi_ltf(arvif, he_gi, he_ltf); if (ret) { ath11k_warn(ar->ab, "failed to set fixed rate gi ltf: %d\n", ret); return ret; } } else { ret = ath11k_mac_set_auto_rate_gi_ltf(arvif, he_gi, he_ltf); if (ret) { ath11k_warn(ar->ab, "failed to set auto rate gi ltf: %d\n", ret); return ret; } } } else { vdev_param = WMI_VDEV_PARAM_SGI; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, sgi); if (ret) { ath11k_warn(ar->ab, "failed to set sgi param %d: %d\n", sgi, ret); return ret; } } return 0; } static bool ath11k_mac_vht_mcs_range_present(struct ath11k *ar, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { int i; u16 vht_mcs; for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { vht_mcs = mask->control[band].vht_mcs[i]; switch (vht_mcs) { case 0: case BIT(8) - 1: case BIT(9) - 1: case BIT(10) - 1: break; default: return false; } } return true; } static bool ath11k_mac_he_mcs_range_present(struct ath11k *ar, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { int i; u16 he_mcs; for (i = 0; i < NL80211_HE_NSS_MAX; i++) { he_mcs = mask->control[band].he_mcs[i]; switch (he_mcs) { case 0: case BIT(8) - 1: case BIT(10) - 1: case BIT(12) - 1: break; default: return false; } } return true; } static void ath11k_mac_set_bitrate_mask_iter(void *data, struct ieee80211_sta *sta) { struct ath11k_vif *arvif = data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arvif->ar; spin_lock_bh(&ar->data_lock); arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; spin_unlock_bh(&ar->data_lock); ieee80211_queue_work(ar->hw, &arsta->update_wk); } static void ath11k_mac_disable_peer_fixed_rate(void *data, struct ieee80211_sta *sta) { struct ath11k_vif *arvif = data; struct ath11k *ar = arvif->ar; int ret; ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_PARAM_FIXED_RATE, WMI_FIXED_RATE_NONE); if (ret) ath11k_warn(ar->ab, "failed to disable peer fixed rate for STA %pM ret %d\n", sta->addr, ret); } static bool ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { bool he_fixed_rate = false, vht_fixed_rate = false; struct ath11k_peer *peer; const u16 *vht_mcs_mask, *he_mcs_mask; struct ieee80211_link_sta *deflink; u8 vht_nss, he_nss; bool ret = true; vht_mcs_mask = mask->control[band].vht_mcs; he_mcs_mask = mask->control[band].he_mcs; if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask) == 1) vht_fixed_rate = true; if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask) == 1) he_fixed_rate = true; if (!vht_fixed_rate && !he_fixed_rate) return true; vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask); he_nss = ath11k_mac_max_he_nss(he_mcs_mask); rcu_read_lock(); spin_lock_bh(&ar->ab->base_lock); list_for_each_entry(peer, &ar->ab->peers, list) { if (peer->sta) { deflink = &peer->sta->deflink; if (vht_fixed_rate && (!deflink->vht_cap.vht_supported || deflink->rx_nss < vht_nss)) { ret = false; goto out; } if (he_fixed_rate && (!deflink->he_cap.has_he || deflink->rx_nss < he_nss)) { ret = false; goto out; } } } out: spin_unlock_bh(&ar->ab->base_lock); rcu_read_unlock(); return ret; } static int ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; struct ath11k_pdev_cap *cap; struct ath11k *ar = arvif->ar; enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; const u16 *he_mcs_mask; u8 he_ltf = 0; u8 he_gi = 0; u32 rate; u8 nss; u8 sgi; u8 ldpc; int single_nss; int ret; int num_rates; bool he_fixed_rate = false; if (ath11k_mac_vif_chan(vif, &def)) return -EPERM; band = def.chan->band; cap = &ar->pdev->cap; ht_mcs_mask = mask->control[band].ht_mcs; vht_mcs_mask = mask->control[band].vht_mcs; he_mcs_mask = mask->control[band].he_mcs; ldpc = !!(cap->band[band].ht_cap_info & WMI_HT_CAP_TX_LDPC); sgi = mask->control[band].gi; if (sgi == NL80211_TXRATE_FORCE_LGI) return -EINVAL; he_gi = mask->control[band].he_gi; he_ltf = mask->control[band].he_ltf; /* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it * requires passing at least one of used basic rates along with them. * Fixed rate setting across different preambles(legacy, HT, VHT) is * not supported by the FW. Hence use of FIXED_RATE vdev param is not * suitable for setting single HT/VHT rates. * But, there could be a single basic rate passed from userspace which * can be done through the FIXED_RATE param. */ if (ath11k_mac_has_single_legacy_rate(ar, band, mask)) { ret = ath11k_mac_get_single_legacy_rate(ar, band, mask, &rate, &nss); if (ret) { ath11k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } ieee80211_iterate_stations_mtx(ar->hw, ath11k_mac_disable_peer_fixed_rate, arvif); } else if (ath11k_mac_bitrate_mask_get_single_nss(ar, arvif, band, mask, &single_nss)) { rate = WMI_FIXED_RATE_NONE; nss = single_nss; mutex_lock(&ar->conf_mutex); arvif->bitrate_mask = *mask; ieee80211_iterate_stations_atomic(ar->hw, ath11k_mac_set_bitrate_mask_iter, arvif); mutex_unlock(&ar->conf_mutex); } else { rate = WMI_FIXED_RATE_NONE; if (!ath11k_mac_validate_vht_he_fixed_rate_settings(ar, band, mask)) ath11k_warn(ar->ab, "could not update fixed rate settings to all peers due to mcs/nss incompatibility\n"); nss = min_t(u32, ar->num_tx_chains, ath11k_mac_max_nss(ht_mcs_mask, vht_mcs_mask, he_mcs_mask)); /* If multiple rates across different preambles are given * we can reconfigure this info with all peers using PEER_ASSOC * command with the below exception cases. * - Single VHT Rate : peer_assoc command accommodates only MCS * range values i.e 0-7, 0-8, 0-9 for VHT. Though mac80211 * mandates passing basic rates along with HT/VHT rates, FW * doesn't allow switching from VHT to Legacy. Hence instead of * setting legacy and VHT rates using RATEMASK_CMD vdev cmd, * we could set this VHT rate as peer fixed rate param, which * will override FIXED rate and FW rate control algorithm. * If single VHT rate is passed along with HT rates, we select * the VHT rate as fixed rate for vht peers. * - Multiple VHT Rates : When Multiple VHT rates are given,this * can be set using RATEMASK CMD which uses FW rate-ctl alg. * TODO: Setting multiple VHT MCS and replacing peer_assoc with * RATEMASK_CMDID can cover all use cases of setting rates * across multiple preambles and rates within same type. * But requires more validation of the command at this point. */ num_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask); if (!ath11k_mac_vht_mcs_range_present(ar, band, mask) && num_rates > 1) { /* TODO: Handle multiple VHT MCS values setting using * RATEMASK CMD */ ath11k_warn(ar->ab, "setting %d mcs values in bitrate mask not supported\n", num_rates); return -EINVAL; } num_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask); if (num_rates == 1) he_fixed_rate = true; if (!ath11k_mac_he_mcs_range_present(ar, band, mask) && num_rates > 1) { ath11k_warn(ar->ab, "Setting more than one HE MCS Value in bitrate mask not supported\n"); return -EINVAL; } mutex_lock(&ar->conf_mutex); ieee80211_iterate_stations_mtx(ar->hw, ath11k_mac_disable_peer_fixed_rate, arvif); arvif->bitrate_mask = *mask; ieee80211_iterate_stations_atomic(ar->hw, ath11k_mac_set_bitrate_mask_iter, arvif); mutex_unlock(&ar->conf_mutex); } mutex_lock(&ar->conf_mutex); ret = ath11k_mac_set_rate_params(arvif, rate, nss, sgi, ldpc, he_gi, he_ltf, he_fixed_rate); if (ret) { ath11k_warn(ar->ab, "failed to set rate params on vdev %i: %d\n", arvif->vdev_id, ret); } mutex_unlock(&ar->conf_mutex); return ret; } static void ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; int recovery_count; struct ath11k_vif *arvif; if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) return; mutex_lock(&ar->conf_mutex); if (ar->state == ATH11K_STATE_RESTARTED) { ath11k_warn(ar->ab, "pdev %d successfully recovered\n", ar->pdev->pdev_id); ar->state = ATH11K_STATE_ON; ieee80211_wake_queues(ar->hw); if (ar->ab->hw_params.current_cc_support && ar->alpha2[0] != 0 && ar->alpha2[1] != 0) ath11k_reg_set_cc(ar); if (ab->is_reset) { recovery_count = atomic_inc_return(&ab->recovery_count); ath11k_dbg(ab, ATH11K_DBG_BOOT, "recovery count %d\n", recovery_count); /* When there are multiple radios in an SOC, * the recovery has to be done for each radio */ if (recovery_count == ab->num_radios) { atomic_dec(&ab->reset_count); complete(&ab->reset_complete); ab->is_reset = false; atomic_set(&ab->fail_cont_count, 0); ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset success\n"); } } if (ar->ab->hw_params.support_fw_mac_sequence) { list_for_each_entry(arvif, &ar->arvifs, list) { if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA) ieee80211_hw_restart_disconnect(arvif->vif); } } } mutex_unlock(&ar->conf_mutex); } static void ath11k_mac_update_bss_chan_survey(struct ath11k *ar, struct ieee80211_channel *channel) { int ret; enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ; lockdep_assert_held(&ar->conf_mutex); if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) || ar->rx_channel != channel) return; if (ar->scan.state != ATH11K_SCAN_IDLE) { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "ignoring bss chan info req while scanning..\n"); return; } reinit_completion(&ar->bss_survey_done); ret = ath11k_wmi_pdev_bss_chan_info_request(ar, type); if (ret) { ath11k_warn(ar->ab, "failed to send pdev bss chan info request\n"); return; } ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ); if (ret == 0) ath11k_warn(ar->ab, "bss channel survey timed out\n"); } static int ath11k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct ath11k *ar = hw->priv; struct ieee80211_supported_band *sband; struct survey_info *ar_survey; int ret = 0; if (idx >= ATH11K_NUM_CHANS) return -ENOENT; ar_survey = &ar->survey[idx]; mutex_lock(&ar->conf_mutex); sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; if (sband && idx >= sband->n_channels) { idx -= sband->n_channels; sband = NULL; } if (!sband) sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; if (sband && idx >= sband->n_channels) { idx -= sband->n_channels; sband = NULL; } if (!sband) sband = hw->wiphy->bands[NL80211_BAND_6GHZ]; if (!sband || idx >= sband->n_channels) { ret = -ENOENT; goto exit; } ath11k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); spin_lock_bh(&ar->data_lock); memcpy(survey, ar_survey, sizeof(*survey)); spin_unlock_bh(&ar->data_lock); survey->channel = &sband->channels[idx]; if (ar->rx_channel == survey->channel) survey->filled |= SURVEY_INFO_IN_USE; exit: mutex_unlock(&ar->conf_mutex); return ret; } static void ath11k_mac_put_chain_rssi(struct station_info *sinfo, struct ath11k_sta *arsta, char *pre, bool clear) { struct ath11k *ar = arsta->arvif->ar; int i; s8 rssi; for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { sinfo->chains &= ~BIT(i); rssi = arsta->chain_signal[i]; if (clear) arsta->chain_signal[i] = ATH11K_INVALID_RSSI_FULL; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "sta statistics %s rssi[%d] %d\n", pre, i, rssi); if (rssi != ATH11K_DEFAULT_NOISE_FLOOR && rssi != ATH11K_INVALID_RSSI_FULL && rssi != ATH11K_INVALID_RSSI_EMPTY && rssi != 0) { sinfo->chain_signal[i] = rssi; sinfo->chains |= BIT(i); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); } } } static void ath11k_mac_fw_stats_reset(struct ath11k *ar) { spin_lock_bh(&ar->data_lock); ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs); ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs); ar->fw_stats.num_vdev_recvd = 0; ar->fw_stats.num_bcn_recvd = 0; spin_unlock_bh(&ar->data_lock); } int ath11k_mac_fw_stats_request(struct ath11k *ar, struct stats_request_params *req_param) { struct ath11k_base *ab = ar->ab; unsigned long time_left; int ret; lockdep_assert_held(&ar->conf_mutex); ath11k_mac_fw_stats_reset(ar); reinit_completion(&ar->fw_stats_complete); reinit_completion(&ar->fw_stats_done); ret = ath11k_wmi_send_stats_request_cmd(ar, req_param); if (ret) { ath11k_warn(ab, "could not request fw stats (%d)\n", ret); return ret; } time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ); if (!time_left) return -ETIMEDOUT; /* FW stats can get split when exceeding the stats data buffer limit. * In that case, since there is no end marking for the back-to-back * received 'update stats' event, we keep a 3 seconds timeout in case, * fw_stats_done is not marked yet */ time_left = wait_for_completion_timeout(&ar->fw_stats_done, 3 * HZ); if (!time_left) return -ETIMEDOUT; return 0; } static int ath11k_mac_get_fw_stats(struct ath11k *ar, u32 pdev_id, u32 vdev_id, u32 stats_id) { struct ath11k_base *ab = ar->ab; struct stats_request_params req_param; int ret; lockdep_assert_held(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON) return -ENETDOWN; req_param.pdev_id = pdev_id; req_param.vdev_id = vdev_id; req_param.stats_id = stats_id; ret = ath11k_mac_fw_stats_request(ar, &req_param); if (ret) ath11k_warn(ab, "failed to request fw stats: %d\n", ret); ath11k_dbg(ab, ATH11K_DBG_WMI, "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n", pdev_id, vdev_id, stats_id); return ret; } static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct station_info *sinfo) { struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; s8 signal; bool db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, ar->ab->wmi_ab.svc_map); sinfo->rx_duration = arsta->rx_duration; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); sinfo->tx_duration = arsta->tx_duration; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); if (arsta->txrate.legacy || arsta->txrate.nss) { if (arsta->txrate.legacy) { sinfo->txrate.legacy = arsta->txrate.legacy; } else { sinfo->txrate.mcs = arsta->txrate.mcs; sinfo->txrate.nss = arsta->txrate.nss; sinfo->txrate.bw = arsta->txrate.bw; sinfo->txrate.he_gi = arsta->txrate.he_gi; sinfo->txrate.he_dcm = arsta->txrate.he_dcm; sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc; } sinfo->txrate.flags = arsta->txrate.flags; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } ath11k_mac_put_chain_rssi(sinfo, arsta, "ppdu", false); mutex_lock(&ar->conf_mutex); if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) && arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA && ar->ab->hw_params.supports_rssi_stats && !ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0, WMI_REQUEST_RSSI_PER_CHAIN_STAT)) { ath11k_mac_put_chain_rssi(sinfo, arsta, "fw stats", true); } signal = arsta->rssi_comb; if (!signal && arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA && ar->ab->hw_params.supports_rssi_stats && !(ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0, WMI_REQUEST_VDEV_STAT))) signal = arsta->rssi_beacon; mutex_unlock(&ar->conf_mutex); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "sta statistics db2dbm %u rssi comb %d rssi beacon %d\n", db2dbm, arsta->rssi_comb, arsta->rssi_beacon); if (signal) { sinfo->signal = db2dbm ? signal : signal + ATH11K_DEFAULT_NOISE_FLOOR; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); } sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi); if (!db2dbm) sinfo->signal_avg += ATH11K_DEFAULT_NOISE_FLOOR; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } #if IS_ENABLED(CONFIG_IPV6) static void ath11k_generate_ns_mc_addr(struct ath11k *ar, struct ath11k_arp_ns_offload *offload) { int i; for (i = 0; i < offload->ipv6_count; i++) { offload->self_ipv6_addr[i][0] = 0xff; offload->self_ipv6_addr[i][1] = 0x02; offload->self_ipv6_addr[i][11] = 0x01; offload->self_ipv6_addr[i][12] = 0xff; offload->self_ipv6_addr[i][13] = offload->ipv6_addr[i][13]; offload->self_ipv6_addr[i][14] = offload->ipv6_addr[i][14]; offload->self_ipv6_addr[i][15] = offload->ipv6_addr[i][15]; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "NS solicited addr %pI6\n", offload->self_ipv6_addr[i]); } } static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct inet6_dev *idev) { struct ath11k *ar = hw->priv; struct ath11k_arp_ns_offload *offload; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct inet6_ifaddr *ifa6; struct ifacaddr6 *ifaca6; u32 count, scope; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "op ipv6 changed\n"); offload = &arvif->arp_ns_offload; count = 0; /* The _ipv6_changed() is called with RCU lock already held in * atomic_notifier_call_chain(), so we don't need to call * rcu_read_lock() again here. But note that with CONFIG_PREEMPT_RT * enabled, read_lock_bh() also calls rcu_read_lock(). This is OK * because RCU read critical section is allowed to get nested. */ read_lock_bh(&idev->lock); memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr)); memset(offload->self_ipv6_addr, 0, sizeof(offload->self_ipv6_addr)); memcpy(offload->mac_addr, vif->addr, ETH_ALEN); /* get unicast address */ list_for_each_entry(ifa6, &idev->addr_list, if_list) { if (count >= ATH11K_IPV6_MAX_COUNT) goto generate; if (ifa6->flags & IFA_F_DADFAILED) continue; scope = ipv6_addr_src_scope(&ifa6->addr); if (scope == IPV6_ADDR_SCOPE_LINKLOCAL || scope == IPV6_ADDR_SCOPE_GLOBAL) { memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr, sizeof(ifa6->addr.s6_addr)); offload->ipv6_type[count] = ATH11K_IPV6_UC_TYPE; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "count %d ipv6 uc %pI6 scope %d\n", count, offload->ipv6_addr[count], scope); count++; } else { ath11k_warn(ar->ab, "Unsupported ipv6 scope: %d\n", scope); } } /* get anycast address */ for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6; ifaca6 = rcu_dereference(ifaca6->aca_next)) { if (count >= ATH11K_IPV6_MAX_COUNT) goto generate; scope = ipv6_addr_src_scope(&ifaca6->aca_addr); if (scope == IPV6_ADDR_SCOPE_LINKLOCAL || scope == IPV6_ADDR_SCOPE_GLOBAL) { memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr, sizeof(ifaca6->aca_addr)); offload->ipv6_type[count] = ATH11K_IPV6_AC_TYPE; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "count %d ipv6 ac %pI6 scope %d\n", count, offload->ipv6_addr[count], scope); count++; } else { ath11k_warn(ar->ab, "Unsupported ipv scope: %d\n", scope); } } generate: offload->ipv6_count = count; read_unlock_bh(&idev->lock); /* generate ns multicast address */ ath11k_generate_ns_mc_addr(ar, offload); } #endif static void ath11k_mac_op_set_rekey_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *data) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_rekey_data *rekey_data = &arvif->rekey_data; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "set rekey data vdev %d\n", arvif->vdev_id); mutex_lock(&ar->conf_mutex); memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN); memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN); /* The supplicant works on big-endian, the firmware expects it on * little endian. */ rekey_data->replay_ctr = get_unaligned_be64(data->replay_ctr); arvif->rekey_data.enable_offload = true; ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kck", NULL, rekey_data->kck, NL80211_KCK_LEN); ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kek", NULL, rekey_data->kck, NL80211_KEK_LEN); ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "replay ctr", NULL, &rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr)); mutex_unlock(&ar->conf_mutex); } static int ath11k_mac_op_set_bios_sar_specs(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar) { struct ath11k *ar = hw->priv; const struct cfg80211_sar_sub_specs *sspec; int ret, index; u8 *sar_tbl; u32 i; if (!sar || sar->type != NL80211_SAR_TYPE_POWER || sar->num_sub_specs == 0) return -EINVAL; mutex_lock(&ar->conf_mutex); if (!test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) || !ar->ab->hw_params.bios_sar_capa) { ret = -EOPNOTSUPP; goto exit; } ret = ath11k_wmi_pdev_set_bios_geo_table_param(ar); if (ret) { ath11k_warn(ar->ab, "failed to set geo table: %d\n", ret); goto exit; } sar_tbl = kzalloc(BIOS_SAR_TABLE_LEN, GFP_KERNEL); if (!sar_tbl) { ret = -ENOMEM; goto exit; } sspec = sar->sub_specs; for (i = 0; i < sar->num_sub_specs; i++) { if (sspec->freq_range_index >= (BIOS_SAR_TABLE_LEN >> 1)) { ath11k_warn(ar->ab, "Ignore bad frequency index %u, max allowed %u\n", sspec->freq_range_index, BIOS_SAR_TABLE_LEN >> 1); continue; } /* chain0 and chain1 share same power setting */ sar_tbl[sspec->freq_range_index] = sspec->power; index = sspec->freq_range_index + (BIOS_SAR_TABLE_LEN >> 1); sar_tbl[index] = sspec->power; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "sar tbl[%d] = %d\n", sspec->freq_range_index, sar_tbl[sspec->freq_range_index]); sspec++; } ret = ath11k_wmi_pdev_set_bios_sar_table_param(ar, sar_tbl); if (ret) ath11k_warn(ar->ab, "failed to set sar power: %d", ret); kfree(sar_tbl); exit: mutex_unlock(&ar->conf_mutex); return ret; } static int ath11k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); ar->scan.roc_notify = false; spin_unlock_bh(&ar->data_lock); ath11k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); cancel_delayed_work_sync(&ar->scan.timeout); return 0; } static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *chan, int duration, enum ieee80211_roc_type type) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct scan_req_params *arg; int ret; u32 scan_time_msec; mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: reinit_completion(&ar->scan.started); reinit_completion(&ar->scan.completed); reinit_completion(&ar->scan.on_channel); ar->scan.state = ATH11K_SCAN_STARTING; ar->scan.is_roc = true; ar->scan.vdev_id = arvif->vdev_id; ar->scan.roc_freq = chan->center_freq; ar->scan.roc_notify = true; ret = 0; break; case ATH11K_SCAN_STARTING: case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: ret = -EBUSY; break; } spin_unlock_bh(&ar->data_lock); if (ret) goto exit; scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; arg = kzalloc(sizeof(*arg), GFP_KERNEL); if (!arg) { ret = -ENOMEM; goto exit; } ath11k_wmi_start_scan_init(ar, arg); arg->num_chan = 1; arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list), GFP_KERNEL); if (!arg->chan_list) { ret = -ENOMEM; goto free_arg; } arg->vdev_id = arvif->vdev_id; arg->scan_id = ATH11K_SCAN_ID; arg->chan_list[0] = chan->center_freq; arg->dwell_time_active = scan_time_msec; arg->dwell_time_passive = scan_time_msec; arg->max_scan_time = scan_time_msec; arg->scan_f_passive = 1; arg->burst_duration = duration; if (!ar->ab->hw_params.single_pdev_only) arg->scan_f_filter_prb_req = 1; ret = ath11k_start_scan(ar, arg); if (ret) { ath11k_warn(ar->ab, "failed to start roc scan: %d\n", ret); spin_lock_bh(&ar->data_lock); ar->scan.state = ATH11K_SCAN_IDLE; spin_unlock_bh(&ar->data_lock); goto free_chan_list; } ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); if (ret == 0) { ath11k_warn(ar->ab, "failed to switch to channel for roc scan\n"); ret = ath11k_scan_stop(ar); if (ret) ath11k_warn(ar->ab, "failed to stop scan: %d\n", ret); ret = -ETIMEDOUT; goto free_chan_list; } ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, msecs_to_jiffies(duration)); ret = 0; free_chan_list: kfree(arg->chan_list); free_arg: kfree(arg); exit: mutex_unlock(&ar->conf_mutex); return ret; } static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, int *dbm) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_fw_stats_pdev *pdev; int ret; /* Final Tx power is minimum of Target Power, CTL power, Regulatory * Power, PSD EIRP Power. We just know the Regulatory power from the * regulatory rules obtained. FW knows all these power and sets the min * of these. Hence, we request the FW pdev stats in which FW reports * the minimum of all vdev's channel Tx power. */ mutex_lock(&ar->conf_mutex); /* Firmware doesn't provide Tx power during CAC hence no need to fetch * the stats. */ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { mutex_unlock(&ar->conf_mutex); return -EAGAIN; } ret = ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0, WMI_REQUEST_PDEV_STAT); if (ret) { ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret); goto err_fallback; } spin_lock_bh(&ar->data_lock); pdev = list_first_entry_or_null(&ar->fw_stats.pdevs, struct ath11k_fw_stats_pdev, list); if (!pdev) { spin_unlock_bh(&ar->data_lock); goto err_fallback; } /* tx power is set as 2 units per dBm in FW. */ *dbm = pdev->chan_tx_power / 2; spin_unlock_bh(&ar->data_lock); mutex_unlock(&ar->conf_mutex); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware %d, reported %d dBm\n", pdev->chan_tx_power, *dbm); return 0; err_fallback: mutex_unlock(&ar->conf_mutex); /* We didn't get txpower from FW. Hence, relying on vif->bss_conf.txpower */ *dbm = vif->bss_conf.txpower; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n", *dbm); return 0; } static int ath11k_mac_station_add(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct peer_create_params peer_param; int ret; lockdep_assert_held(&ar->conf_mutex); ret = ath11k_mac_inc_num_stations(arvif, sta); if (ret) { ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n", ar->max_num_stations); goto exit; } /* Driver allows the DEL KEY followed by SET KEY sequence for * group keys for only when there is no clients associated, if at * all firmware has entered the race during that window, * reinstalling the same key when the first sta connects will allow * firmware to recover from the race. */ if (arvif->num_stations == 1 && arvif->reinstall_group_keys) { ath11k_dbg(ab, ATH11K_DBG_MAC, "set group keys on 1st station add for vdev %d\n", arvif->vdev_id); ret = ath11k_set_group_keys(arvif); if (ret) goto dec_num_station; arvif->reinstall_group_keys = false; } arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL); if (!arsta->rx_stats) { ret = -ENOMEM; goto dec_num_station; } peer_param.vdev_id = arvif->vdev_id; peer_param.peer_addr = sta->addr; peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; ret = ath11k_peer_create(ar, arvif, sta, &peer_param); if (ret) { ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n", sta->addr, arvif->vdev_id); goto free_rx_stats; } ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n", sta->addr, arvif->vdev_id); if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) { arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL); if (!arsta->tx_stats) { ret = -ENOMEM; goto free_peer; } } if (ieee80211_vif_is_mesh(vif)) { ath11k_dbg(ab, ATH11K_DBG_MAC, "setting USE_4ADDR for mesh STA %pM\n", sta->addr); ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_USE_4ADDR, 1); if (ret) { ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n", sta->addr, ret); goto free_tx_stats; } } ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr); if (ret) { ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n", sta->addr, arvif->vdev_id, ret); goto free_tx_stats; } if (ab->hw_params.vdev_start_delay && !arvif->is_started && arvif->vdev_type != WMI_VDEV_TYPE_AP) { ret = ath11k_mac_start_vdev_delay(ar->hw, vif); if (ret) { ath11k_warn(ab, "failed to delay vdev start: %d\n", ret); goto free_tx_stats; } } ewma_avg_rssi_init(&arsta->avg_rssi); return 0; free_tx_stats: kfree(arsta->tx_stats); arsta->tx_stats = NULL; free_peer: ath11k_peer_delete(ar, arvif->vdev_id, sta->addr); free_rx_stats: kfree(arsta->rx_stats); arsta->rx_stats = NULL; dec_num_station: ath11k_mac_dec_num_stations(arvif, sta); exit: return ret; } static int ath11k_mac_station_remove(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); int ret; if (ab->hw_params.vdev_start_delay && arvif->is_started && arvif->vdev_type != WMI_VDEV_TYPE_AP) { ret = ath11k_mac_stop_vdev_early(ar->hw, vif); if (ret) { ath11k_warn(ab, "failed to do early vdev stop: %d\n", ret); return ret; } } ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr); ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr); if (ret) ath11k_warn(ab, "Failed to delete peer: %pM for VDEV: %d\n", sta->addr, arvif->vdev_id); else ath11k_dbg(ab, ATH11K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n", sta->addr, arvif->vdev_id); ath11k_mac_dec_num_stations(arvif, sta); kfree(arsta->tx_stats); arsta->tx_stats = NULL; kfree(arsta->rx_stats); arsta->rx_stats = NULL; return ret; } static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); enum ieee80211_ap_reg_power power_type; struct cur_regulatory_info *reg_info; struct ath11k_peer *peer; int ret = 0; /* cancel must be done outside the mutex to avoid deadlock */ if ((old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST)) { cancel_work_sync(&arsta->update_wk); cancel_work_sync(&arsta->set_4addr_wk); } mutex_lock(&ar->conf_mutex); if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { memset(arsta, 0, sizeof(*arsta)); arsta->arvif = arvif; arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk); INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk); ret = ath11k_mac_station_add(ar, vif, sta); if (ret) ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n", sta->addr, arvif->vdev_id); } else if ((old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST)) { ret = ath11k_mac_station_remove(ar, vif, sta); if (ret) ath11k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n", sta->addr, arvif->vdev_id); mutex_lock(&ar->ab->tbl_mtx_lock); spin_lock_bh(&ar->ab->base_lock); peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); if (peer && peer->sta == sta) { ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n", vif->addr, arvif->vdev_id); ath11k_peer_rhash_delete(ar->ab, peer); peer->sta = NULL; list_del(&peer->list); kfree(peer); ar->num_peers--; } spin_unlock_bh(&ar->ab->base_lock); mutex_unlock(&ar->ab->tbl_mtx_lock); } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC && (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT || vif->type == NL80211_IFTYPE_ADHOC)) { ret = ath11k_station_assoc(ar, vif, sta, false); if (ret) ath11k_warn(ar->ab, "Failed to associate station: %pM\n", sta->addr); spin_lock_bh(&ar->data_lock); /* Set arsta bw and prev bw */ arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta); arsta->bw_prev = arsta->bw; spin_unlock_bh(&ar->data_lock); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { spin_lock_bh(&ar->ab->base_lock); peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); if (peer) peer->is_authorized = true; spin_unlock_bh(&ar->ab->base_lock); if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) { ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_AUTHORIZE, 1); if (ret) ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", sta->addr, arvif->vdev_id, ret); } if (!ret && ath11k_wmi_supports_6ghz_cc_ext(ar) && arvif->vdev_type == WMI_VDEV_TYPE_STA && arvif->chanctx.def.chan && arvif->chanctx.def.chan->band == NL80211_BAND_6GHZ) { reg_info = &ar->ab->reg_info_store[ar->pdev_idx]; power_type = vif->bss_conf.power_type; if (power_type == IEEE80211_REG_UNSET_AP) { ath11k_warn(ar->ab, "invalid power type %d\n", power_type); ret = -EINVAL; } else { ret = ath11k_reg_handle_chan_list(ar->ab, reg_info, power_type); if (ret) ath11k_warn(ar->ab, "failed to handle chan list with power type %d\n", power_type); } } } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { spin_lock_bh(&ar->ab->base_lock); peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); if (peer) peer->is_authorized = false; spin_unlock_bh(&ar->ab->base_lock); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH && (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT || vif->type == NL80211_IFTYPE_ADHOC)) { ret = ath11k_station_disassoc(ar, vif, sta); if (ret) ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n", sta->addr); } mutex_unlock(&ar->conf_mutex); return ret; } static const struct ieee80211_ops ath11k_ops = { .tx = ath11k_mac_op_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = ath11k_mac_op_start, .stop = ath11k_mac_op_stop, .reconfig_complete = ath11k_mac_op_reconfig_complete, .add_interface = ath11k_mac_op_add_interface, .remove_interface = ath11k_mac_op_remove_interface, .update_vif_offload = ath11k_mac_op_update_vif_offload, .config = ath11k_mac_op_config, .bss_info_changed = ath11k_mac_op_bss_info_changed, .configure_filter = ath11k_mac_op_configure_filter, .hw_scan = ath11k_mac_op_hw_scan, .cancel_hw_scan = ath11k_mac_op_cancel_hw_scan, .set_key = ath11k_mac_op_set_key, .set_rekey_data = ath11k_mac_op_set_rekey_data, .sta_state = ath11k_mac_op_sta_state, .sta_set_4addr = ath11k_mac_op_sta_set_4addr, .sta_set_txpwr = ath11k_mac_op_sta_set_txpwr, .link_sta_rc_update = ath11k_mac_op_sta_rc_update, .conf_tx = ath11k_mac_op_conf_tx, .set_antenna = ath11k_mac_op_set_antenna, .get_antenna = ath11k_mac_op_get_antenna, .ampdu_action = ath11k_mac_op_ampdu_action, .add_chanctx = ath11k_mac_op_add_chanctx, .remove_chanctx = ath11k_mac_op_remove_chanctx, .change_chanctx = ath11k_mac_op_change_chanctx, .assign_vif_chanctx = ath11k_mac_op_assign_vif_chanctx, .unassign_vif_chanctx = ath11k_mac_op_unassign_vif_chanctx, .switch_vif_chanctx = ath11k_mac_op_switch_vif_chanctx, .set_rts_threshold = ath11k_mac_op_set_rts_threshold, .set_frag_threshold = ath11k_mac_op_set_frag_threshold, .set_bitrate_mask = ath11k_mac_op_set_bitrate_mask, .get_survey = ath11k_mac_op_get_survey, .flush = ath11k_mac_op_flush, .sta_statistics = ath11k_mac_op_sta_statistics, CFG80211_TESTMODE_CMD(ath11k_tm_cmd) #ifdef CONFIG_PM .suspend = ath11k_wow_op_suspend, .resume = ath11k_wow_op_resume, .set_wakeup = ath11k_wow_op_set_wakeup, #endif #ifdef CONFIG_ATH11K_DEBUGFS .vif_add_debugfs = ath11k_debugfs_op_vif_add, .sta_add_debugfs = ath11k_debugfs_sta_op_add, #endif #if IS_ENABLED(CONFIG_IPV6) .ipv6_addr_change = ath11k_mac_op_ipv6_changed, #endif .get_txpower = ath11k_mac_op_get_txpower, .set_sar_specs = ath11k_mac_op_set_bios_sar_specs, .remain_on_channel = ath11k_mac_op_remain_on_channel, .cancel_remain_on_channel = ath11k_mac_op_cancel_remain_on_channel, }; static void ath11k_mac_update_ch_list(struct ath11k *ar, struct ieee80211_supported_band *band, u32 freq_low, u32 freq_high) { int i; if (!(freq_low && freq_high)) return; for (i = 0; i < band->n_channels; i++) { if (band->channels[i].center_freq < freq_low || band->channels[i].center_freq > freq_high) band->channels[i].flags |= IEEE80211_CHAN_DISABLED; } } static u32 ath11k_get_phy_id(struct ath11k *ar, u32 band) { struct ath11k_pdev *pdev = ar->pdev; struct ath11k_pdev_cap *pdev_cap = &pdev->cap; if (band == WMI_HOST_WLAN_2G_CAP) return pdev_cap->band[NL80211_BAND_2GHZ].phy_id; if (band == WMI_HOST_WLAN_5G_CAP) return pdev_cap->band[NL80211_BAND_5GHZ].phy_id; ath11k_warn(ar->ab, "unsupported phy cap:%d\n", band); return 0; } static int ath11k_mac_setup_channels_rates(struct ath11k *ar, u32 supported_bands) { struct ieee80211_supported_band *band; struct ath11k_hal_reg_capabilities_ext *reg_cap, *temp_reg_cap; void *channels; u32 phy_id; BUILD_BUG_ON((ARRAY_SIZE(ath11k_2ghz_channels) + ARRAY_SIZE(ath11k_5ghz_channels) + ARRAY_SIZE(ath11k_6ghz_channels)) != ATH11K_NUM_CHANS); reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx]; temp_reg_cap = reg_cap; if (supported_bands & WMI_HOST_WLAN_2G_CAP) { channels = kmemdup(ath11k_2ghz_channels, sizeof(ath11k_2ghz_channels), GFP_KERNEL); if (!channels) return -ENOMEM; band = &ar->mac.sbands[NL80211_BAND_2GHZ]; band->band = NL80211_BAND_2GHZ; band->n_channels = ARRAY_SIZE(ath11k_2ghz_channels); band->channels = channels; band->n_bitrates = ath11k_g_rates_size; band->bitrates = ath11k_g_rates; ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; if (ar->ab->hw_params.single_pdev_only) { phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP); temp_reg_cap = &ar->ab->hal_reg_cap[phy_id]; } ath11k_mac_update_ch_list(ar, band, temp_reg_cap->low_2ghz_chan, temp_reg_cap->high_2ghz_chan); } if (supported_bands & WMI_HOST_WLAN_5G_CAP) { if (reg_cap->high_5ghz_chan >= ATH11K_MIN_6G_FREQ) { channels = kmemdup(ath11k_6ghz_channels, sizeof(ath11k_6ghz_channels), GFP_KERNEL); if (!channels) { kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); return -ENOMEM; } ar->supports_6ghz = true; band = &ar->mac.sbands[NL80211_BAND_6GHZ]; band->band = NL80211_BAND_6GHZ; band->n_channels = ARRAY_SIZE(ath11k_6ghz_channels); band->channels = channels; band->n_bitrates = ath11k_a_rates_size; band->bitrates = ath11k_a_rates; ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band; if (ar->ab->hw_params.single_pdev_only) { phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP); temp_reg_cap = &ar->ab->hal_reg_cap[phy_id]; } ath11k_mac_update_ch_list(ar, band, temp_reg_cap->low_5ghz_chan, temp_reg_cap->high_5ghz_chan); } if (reg_cap->low_5ghz_chan < ATH11K_MIN_6G_FREQ) { channels = kmemdup(ath11k_5ghz_channels, sizeof(ath11k_5ghz_channels), GFP_KERNEL); if (!channels) { kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); return -ENOMEM; } band = &ar->mac.sbands[NL80211_BAND_5GHZ]; band->band = NL80211_BAND_5GHZ; band->n_channels = ARRAY_SIZE(ath11k_5ghz_channels); band->channels = channels; band->n_bitrates = ath11k_a_rates_size; band->bitrates = ath11k_a_rates; ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; if (ar->ab->hw_params.single_pdev_only) { phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP); temp_reg_cap = &ar->ab->hal_reg_cap[phy_id]; } ath11k_mac_update_ch_list(ar, band, temp_reg_cap->low_5ghz_chan, temp_reg_cap->high_5ghz_chan); } } return 0; } static void ath11k_mac_setup_mac_address_list(struct ath11k *ar) { struct mac_address *addresses; u16 n_addresses; int i; if (!ar->ab->hw_params.support_dual_stations) return; n_addresses = ar->ab->hw_params.num_vdevs; addresses = kcalloc(n_addresses, sizeof(*addresses), GFP_KERNEL); if (!addresses) return; memcpy(addresses[0].addr, ar->mac_addr, ETH_ALEN); for (i = 1; i < n_addresses; i++) { memcpy(addresses[i].addr, ar->mac_addr, ETH_ALEN); /* set Local Administered Address bit */ addresses[i].addr[0] |= 0x2; addresses[i].addr[0] += (i - 1) << 4; } ar->hw->wiphy->addresses = addresses; ar->hw->wiphy->n_addresses = n_addresses; } static int ath11k_mac_setup_iface_combinations(struct ath11k *ar) { struct ath11k_base *ab = ar->ab; struct ieee80211_iface_combination *combinations; struct ieee80211_iface_limit *limits; int n_limits, n_combos; bool p2p; p2p = ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE); if (ab->hw_params.support_dual_stations) n_combos = 2; else n_combos = 1; combinations = kcalloc(n_combos, sizeof(*combinations), GFP_KERNEL); if (!combinations) return -ENOMEM; if (p2p) n_limits = 3; else n_limits = 2; limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL); if (!limits) { kfree(combinations); return -ENOMEM; } limits[0].max = 1; limits[0].types |= BIT(NL80211_IFTYPE_STATION); limits[1].max = 16; limits[1].types |= BIT(NL80211_IFTYPE_AP); if (IS_ENABLED(CONFIG_MAC80211_MESH) && ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT)) limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT); combinations[0].limits = limits; combinations[0].n_limits = n_limits; combinations[0].beacon_int_infra_match = true; combinations[0].beacon_int_min_gcd = 100; combinations[0].max_interfaces = 16; combinations[0].num_different_channels = 1; combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | BIT(NL80211_CHAN_WIDTH_40) | BIT(NL80211_CHAN_WIDTH_80) | BIT(NL80211_CHAN_WIDTH_80P80) | BIT(NL80211_CHAN_WIDTH_160); if (ab->hw_params.support_dual_stations) { limits[0].max = 2; combinations[1].limits = limits; combinations[1].n_limits = n_limits; combinations[1].beacon_int_infra_match = true; combinations[1].beacon_int_min_gcd = 100; combinations[1].max_interfaces = ab->hw_params.num_vdevs; combinations[1].num_different_channels = 2; } if (p2p) { limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); limits[2].max = 1; limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE); } ar->hw->wiphy->iface_combinations = combinations; ar->hw->wiphy->n_iface_combinations = n_combos; return 0; } static const u8 ath11k_if_types_ext_capa[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, }; static const u8 ath11k_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, }; static const u8 ath11k_if_types_ext_capa_ap[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT, [10] = WLAN_EXT_CAPA11_EMA_SUPPORT, }; static const struct wiphy_iftype_ext_capab ath11k_iftypes_ext_capa[] = { { .extended_capabilities = ath11k_if_types_ext_capa, .extended_capabilities_mask = ath11k_if_types_ext_capa, .extended_capabilities_len = sizeof(ath11k_if_types_ext_capa), }, { .iftype = NL80211_IFTYPE_STATION, .extended_capabilities = ath11k_if_types_ext_capa_sta, .extended_capabilities_mask = ath11k_if_types_ext_capa_sta, .extended_capabilities_len = sizeof(ath11k_if_types_ext_capa_sta), }, { .iftype = NL80211_IFTYPE_AP, .extended_capabilities = ath11k_if_types_ext_capa_ap, .extended_capabilities_mask = ath11k_if_types_ext_capa_ap, .extended_capabilities_len = sizeof(ath11k_if_types_ext_capa_ap), }, }; static void __ath11k_mac_unregister(struct ath11k *ar) { cancel_work_sync(&ar->channel_update_work); cancel_work_sync(&ar->regd_update_work); ieee80211_unregister_hw(ar->hw); idr_for_each(&ar->txmgmt_idr, ath11k_mac_tx_mgmt_pending_free, ar); idr_destroy(&ar->txmgmt_idr); kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); kfree(ar->hw->wiphy->iface_combinations[0].limits); kfree(ar->hw->wiphy->iface_combinations); kfree(ar->hw->wiphy->addresses); SET_IEEE80211_DEV(ar->hw, NULL); } void ath11k_mac_unregister(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; int i; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; if (!ar) continue; __ath11k_mac_unregister(ar); } ath11k_peer_rhash_tbl_destroy(ab); } static int __ath11k_mac_register(struct ath11k *ar) { struct ath11k_base *ab = ar->ab; struct ath11k_pdev_cap *cap = &ar->pdev->cap; static const u32 cipher_suites[] = { WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, WLAN_CIPHER_SUITE_AES_CMAC, WLAN_CIPHER_SUITE_BIP_CMAC_256, WLAN_CIPHER_SUITE_BIP_GMAC_128, WLAN_CIPHER_SUITE_BIP_GMAC_256, WLAN_CIPHER_SUITE_GCMP, WLAN_CIPHER_SUITE_GCMP_256, WLAN_CIPHER_SUITE_CCMP_256, }; int ret; u32 ht_cap = 0; ath11k_pdev_caps_update(ar); SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); ath11k_mac_setup_mac_address_list(ar); SET_IEEE80211_DEV(ar->hw, ab->dev); ret = ath11k_mac_setup_channels_rates(ar, cap->supported_bands); if (ret) goto err; wiphy_read_of_freq_limits(ar->hw->wiphy); ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap); ath11k_mac_setup_he_cap(ar, cap); ret = ath11k_mac_setup_iface_combinations(ar); if (ret) { ath11k_err(ar->ab, "failed to setup interface combinations: %d\n", ret); goto err_free_channels; } ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask; ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask; ar->hw->wiphy->interface_modes = ab->hw_params.interface_modes; if (ab->hw_params.single_pdev_only && ar->supports_6ghz) ieee80211_hw_set(ar->hw, SINGLE_SCAN_ON_ALL_BANDS); if (ab->hw_params.supports_multi_bssid) { ieee80211_hw_set(ar->hw, SUPPORTS_MULTI_BSSID); ieee80211_hw_set(ar->hw, SUPPORTS_ONLY_HE_MULTI_BSSID); } ieee80211_hw_set(ar->hw, SIGNAL_DBM); ieee80211_hw_set(ar->hw, SUPPORTS_PS); ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(ar->hw, MFP_CAPABLE); ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); ieee80211_hw_set(ar->hw, AP_LINK_PS); ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF); ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); ieee80211_hw_set(ar->hw, QUEUE_CONTROL); ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG); ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK); if (ath11k_frame_mode == ATH11K_HW_TXRX_ETHERNET) { ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD); ieee80211_hw_set(ar->hw, SUPPORTS_RX_DECAP_OFFLOAD); } if (cap->nss_ratio_enabled) ieee80211_hw_set(ar->hw, SUPPORTS_VHT_EXT_NSS_BW); if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) { ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION); ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW); ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER); ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(ar->hw, USES_RSS); } ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; /* TODO: Check if HT capability advertised from firmware is different * for each band for a dual band capable radio. It will be tricky to * handle it when the ht capability different for each band. */ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS || (ar->supports_6ghz && ab->hw_params.supports_dynamic_smps_6ghz)) ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; ar->hw->max_listen_interval = ATH11K_MAX_HW_LISTEN_INTERVAL; ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; ar->hw->wiphy->max_remain_on_channel_duration = 5000; ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | NL80211_FEATURE_AP_SCAN; ar->max_num_stations = TARGET_NUM_STATIONS(ab); ar->max_num_peers = TARGET_NUM_PEERS_PDEV(ab); ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; if (test_bit(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi->wmi_ab->svc_map)) { ar->hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; } if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) { ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS; ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH; ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS; ar->hw->wiphy->max_sched_scan_plan_interval = WMI_PNO_MAX_SCHED_SCAN_PLAN_INT; ar->hw->wiphy->max_sched_scan_plan_iterations = WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS; ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR; } ret = ath11k_wow_init(ar); if (ret) { ath11k_warn(ar->ab, "failed to init wow: %d\n", ret); goto err_free_if_combs; } if (test_bit(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI, ar->ab->wmi_ab.svc_map)) wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT); ar->hw->queues = ATH11K_HW_MAX_QUEUES; ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN; ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1; ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; ar->hw->vif_data_size = sizeof(struct ath11k_vif); ar->hw->sta_data_size = sizeof(struct ath11k_sta); wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR); if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD, ar->ab->wmi_ab.svc_map)) { wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_BSS_COLOR); ieee80211_hw_set(ar->hw, DETECTS_COLOR_COLLISION); } ar->hw->wiphy->cipher_suites = cipher_suites; ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); ar->hw->wiphy->iftype_ext_capab = ath11k_iftypes_ext_capa; ar->hw->wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath11k_iftypes_ext_capa); if (ar->supports_6ghz) { wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY); wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP); } wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); if (test_bit(WMI_TLV_SERVICE_RTT, ar->ab->wmi_ab.svc_map)) wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); ar->hw->wiphy->mbssid_max_interfaces = TARGET_NUM_VDEVS(ab); ar->hw->wiphy->ema_max_profile_periodicity = TARGET_EMA_MAX_PROFILE_PERIOD; ath11k_reg_init(ar); if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) { ar->hw->netdev_features = NETIF_F_HW_CSUM; ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); } if (test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) && ab->hw_params.bios_sar_capa) ar->hw->wiphy->sar_capa = ab->hw_params.bios_sar_capa; ret = ieee80211_register_hw(ar->hw); if (ret) { ath11k_err(ar->ab, "ieee80211 registration failed: %d\n", ret); goto err_free_if_combs; } if (!ab->hw_params.supports_monitor) /* There's a race between calling ieee80211_register_hw() * and here where the monitor mode is enabled for a little * while. But that time is so short and in practise it make * a difference in real life. */ ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR); /* Apply the regd received during initialization */ ret = ath11k_regd_update(ar); if (ret) { ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret); goto err_unregister_hw; } if (ab->hw_params.current_cc_support && ab->new_alpha2[0]) { memcpy(&ar->alpha2, ab->new_alpha2, 2); ret = ath11k_reg_set_cc(ar); if (ret) ath11k_warn(ar->ab, "failed set cc code for mac register: %d\n", ret); } ret = ath11k_debugfs_register(ar); if (ret) { ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret); goto err_unregister_hw; } return 0; err_unregister_hw: ieee80211_unregister_hw(ar->hw); err_free_if_combs: kfree(ar->hw->wiphy->iface_combinations[0].limits); kfree(ar->hw->wiphy->iface_combinations); err_free_channels: kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); err: SET_IEEE80211_DEV(ar->hw, NULL); return ret; } int ath11k_mac_register(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; int i; int ret; u8 mac_addr[ETH_ALEN] = {}; if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) return 0; /* Initialize channel counters frequency value in hertz */ ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ; ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1; ret = ath11k_peer_rhash_tbl_init(ab); if (ret) return ret; device_get_mac_address(ab->dev, mac_addr); for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; if (ab->pdevs_macaddr_valid) { ether_addr_copy(ar->mac_addr, pdev->mac_addr); } else { if (is_zero_ether_addr(mac_addr)) ether_addr_copy(ar->mac_addr, ab->mac_addr); else ether_addr_copy(ar->mac_addr, mac_addr); ar->mac_addr[4] += i; } idr_init(&ar->txmgmt_idr); spin_lock_init(&ar->txmgmt_idr_lock); ret = __ath11k_mac_register(ar); if (ret) goto err_cleanup; init_waitqueue_head(&ar->txmgmt_empty_waitq); } return 0; err_cleanup: for (i = i - 1; i >= 0; i--) { pdev = &ab->pdevs[i]; ar = pdev->ar; __ath11k_mac_unregister(ar); } ath11k_peer_rhash_tbl_destroy(ab); return ret; } int ath11k_mac_allocate(struct ath11k_base *ab) { struct ieee80211_hw *hw; struct ath11k *ar; struct ath11k_pdev *pdev; int ret; int i; if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) return 0; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; hw = ieee80211_alloc_hw(sizeof(struct ath11k), &ath11k_ops); if (!hw) { ath11k_warn(ab, "failed to allocate mac80211 hw device\n"); ret = -ENOMEM; goto err_free_mac; } ar = hw->priv; ar->hw = hw; ar->ab = ab; ar->pdev = pdev; ar->pdev_idx = i; ar->lmac_id = ath11k_hw_get_mac_from_pdev_id(&ab->hw_params, i); ar->wmi = &ab->wmi_ab.wmi[i]; /* FIXME wmi[0] is already initialized during attach, * Should we do this again? */ ath11k_wmi_pdev_attach(ab, i); ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask; ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask; ar->num_tx_chains = get_num_chains(pdev->cap.tx_chain_mask); ar->num_rx_chains = get_num_chains(pdev->cap.rx_chain_mask); pdev->ar = ar; spin_lock_init(&ar->data_lock); INIT_LIST_HEAD(&ar->arvifs); INIT_LIST_HEAD(&ar->ppdu_stats_info); mutex_init(&ar->conf_mutex); init_completion(&ar->vdev_setup_done); init_completion(&ar->vdev_delete_done); init_completion(&ar->peer_assoc_done); init_completion(&ar->peer_delete_done); init_completion(&ar->install_key_done); init_completion(&ar->bss_survey_done); init_completion(&ar->scan.started); init_completion(&ar->scan.completed); init_completion(&ar->scan.on_channel); init_completion(&ar->thermal.wmi_sync); INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work); INIT_WORK(&ar->channel_update_work, ath11k_regd_update_chan_list_work); INIT_LIST_HEAD(&ar->channel_update_queue); INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work); INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work); skb_queue_head_init(&ar->wmi_mgmt_tx_queue); clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags); ar->monitor_vdev_id = -1; clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID; init_completion(&ar->completed_11d_scan); ath11k_fw_stats_init(ar); } return 0; err_free_mac: ath11k_mac_destroy(ab); return ret; } void ath11k_mac_destroy(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; int i; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; if (!ar) continue; ath11k_fw_stats_free(&ar->fw_stats); ieee80211_free_hw(ar->hw); pdev->ar = NULL; } } int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif, enum wmi_sta_keepalive_method method, u32 interval) { struct ath11k *ar = arvif->ar; struct wmi_sta_keepalive_arg arg = {}; int ret; lockdep_assert_held(&ar->conf_mutex); if (arvif->vdev_type != WMI_VDEV_TYPE_STA) return 0; if (!test_bit(WMI_TLV_SERVICE_STA_KEEP_ALIVE, ar->ab->wmi_ab.svc_map)) return 0; arg.vdev_id = arvif->vdev_id; arg.enabled = 1; arg.method = method; arg.interval = interval; ret = ath11k_wmi_sta_keepalive(ar, &arg); if (ret) { ath11k_warn(ar->ab, "failed to set keepalive on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } return 0; } diff --git a/qmi.c b/qmi.c index 378ac96b861b..aea56c38bf8f 100644 --- a/qmi.c +++ b/qmi.c @@ -1,3375 +1,3366 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include #include #include "qmi.h" #include "core.h" #include "debug.h" #include "hif.h" #include -#include +#include #include #include #include #define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02 #define HOST_CSTATE_BIT 0x04 #define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08 #define PLATFORM_CAP_PCIE_PME_D3COLD 0x10 #define FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING=" bool ath11k_cold_boot_cal = 1; EXPORT_SYMBOL(ath11k_cold_boot_cal); module_param_named(cold_boot_cal, ath11k_cold_boot_cal, bool, 0644); MODULE_PARM_DESC(cold_boot_cal, "Decrease the channel switch time but increase the driver load time (Default: true)"); static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, num_clients_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, num_clients), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, wake_msi_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, wake_msi), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, gpios_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, gpios_len), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01, .elem_size = sizeof(u32), .array_type = VAR_LEN_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, gpios), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, nm_modem_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, nm_modem), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, bdf_support_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, bdf_support), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, bdf_cache_support_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, bdf_cache_support), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, m3_support_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, m3_support), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, m3_cache_support_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, m3_cache_support), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, cal_filesys_support_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, cal_filesys_support), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, cal_cache_support_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, cal_cache_support), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, cal_done_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, cal_done), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1B, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, mem_bucket_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x1B, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, mem_bucket), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1C, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, mem_cfg_mode_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1C, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, mem_cfg_mode), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, fw_ready_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, fw_ready_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, initiate_cal_download_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, initiate_cal_download_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, initiate_cal_update_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, initiate_cal_update_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, msa_ready_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, msa_ready_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, pin_connect_result_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, pin_connect_result_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, client_id_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, client_id), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, request_mem_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, request_mem_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, fw_mem_ready_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, fw_mem_ready_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, fw_init_done_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, fw_init_done_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, rejuvenate_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, rejuvenate_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, xo_cal_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, xo_cal_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1B, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, cal_done_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1B, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, cal_done_enable), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01, fw_status_valid), }, { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01, fw_status), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, size), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, type), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_WLANFW_MAX_NUM_MEM_CFG_V01, .elem_size = sizeof(struct qmi_wlanfw_mem_cfg_s_v01), .array_type = VAR_LEN_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg), .ei_array = qmi_wlanfw_mem_cfg_s_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = { { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01, mem_seg_len), }, { .data_type = QMI_STRUCT, .elem_len = ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01, .elem_size = sizeof(struct qmi_wlanfw_mem_seg_s_v01), .array_type = VAR_LEN_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01, mem_seg), .ei_array = qmi_wlanfw_mem_seg_s_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = { { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01, mem_seg_len), }, { .data_type = QMI_STRUCT, .elem_len = ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01, .elem_size = sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01), .array_type = VAR_LEN_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01, mem_seg), .ei_array = qmi_wlanfw_mem_seg_resp_s_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_device_info_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlfw_device_info_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01, bar_addr_valid), }, { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01, bar_addr), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01, bar_size_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01, bar_size), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01, chip_id), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01, chip_family), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_rf_board_info_s_v01, board_id), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01, fw_version), }, { .data_type = QMI_STRING, .elem_len = ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1, .elem_size = sizeof(char), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01, fw_build_timestamp), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, chip_info_valid), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_wlanfw_rf_chip_info_s_v01), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, chip_info), .ei_array = qmi_wlanfw_rf_chip_info_s_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, board_info_valid), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_wlanfw_rf_board_info_s_v01), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, board_info), .ei_array = qmi_wlanfw_rf_board_info_s_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, soc_info_valid), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_wlanfw_soc_info_s_v01), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, soc_info), .ei_array = qmi_wlanfw_soc_info_s_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, fw_version_info_valid), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_wlanfw_fw_version_info_s_v01), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, fw_version_info), .ei_array = qmi_wlanfw_fw_version_info_s_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, fw_build_id_valid), }, { .data_type = QMI_STRING, .elem_len = ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1, .elem_size = sizeof(char), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, fw_build_id), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, num_macs_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, num_macs), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, voltage_mv_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, voltage_mv), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, time_freq_hz_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, time_freq_hz), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, otp_version_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, otp_version), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, eeprom_read_timeout_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, eeprom_read_timeout), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, valid), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, file_id_valid), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, file_id), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, total_size_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, total_size), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, seg_id_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, seg_id), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, data_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u16), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, data_len), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = QMI_WLANFW_MAX_DATA_SIZE_V01, .elem_size = sizeof(u8), .array_type = VAR_LEN_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, data), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, end_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, end), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, bdf_type_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, bdf_type), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = { { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), .array_type = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, pipe_num), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, pipe_dir), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, nentries), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, nbytes_max), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, flags), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01, service_id), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01, pipe_dir), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01, pipe_num), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(u16), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id), }, { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(u16), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, offset), }, { .data_type = QMI_EOTI, .array_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01, addr), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01, mode), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01, hw_debug_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01, hw_debug), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, host_version_valid), }, { .data_type = QMI_STRING, .elem_len = QMI_WLANFW_MAX_STR_LEN_V01 + 1, .elem_size = sizeof(char), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, host_version), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, tgt_cfg_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, tgt_cfg_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_WLANFW_MAX_NUM_CE_V01, .elem_size = sizeof( struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01), .array_type = VAR_LEN_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, tgt_cfg), .ei_array = qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, svc_cfg_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, svc_cfg_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_WLANFW_MAX_NUM_SVC_V01, .elem_size = sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01), .array_type = VAR_LEN_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, svc_cfg), .ei_array = qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, shadow_reg_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, shadow_reg_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V01, .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01), .array_type = VAR_LEN_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, shadow_reg), .ei_array = qmi_wlanfw_shadow_reg_cfg_s_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, shadow_reg_v2_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, shadow_reg_v2_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01, .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01), .array_type = VAR_LEN_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, shadow_reg_v2), .ei_array = qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .array_type = NO_ARRAY, }, }; static const struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .array_type = NO_ARRAY, }, }; static const struct qmi_elem_info qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .array_type = NO_ARRAY, }, }; static const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01, enablefwlog_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01, enablefwlog), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .array_type = NO_ARRAY, }, }; /* clang stack usage explodes if this is inlined */ static noinline_for_stack int ath11k_qmi_host_cap_send(struct ath11k_base *ab) { struct qmi_wlanfw_host_cap_req_msg_v01 req; struct qmi_wlanfw_host_cap_resp_msg_v01 resp; struct qmi_txn txn; int ret = 0; memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); req.num_clients_valid = 1; req.num_clients = 1; req.mem_cfg_mode = ab->qmi.target_mem_mode; req.mem_cfg_mode_valid = 1; req.bdf_support_valid = 1; req.bdf_support = 1; if (ab->hw_params.m3_fw_support) { req.m3_support_valid = 1; req.m3_support = 1; req.m3_cache_support_valid = 1; req.m3_cache_support = 1; } else { req.m3_support_valid = 0; req.m3_support = 0; req.m3_cache_support_valid = 0; req.m3_cache_support = 0; } req.cal_done_valid = 1; req.cal_done = ab->qmi.cal_done; if (ab->hw_params.internal_sleep_clock) { req.nm_modem_valid = 1; /* Notify firmware that this is non-qualcomm platform. */ req.nm_modem |= HOST_CSTATE_BIT; /* Notify firmware about the sleep clock selection, * nm_modem_bit[1] is used for this purpose. Host driver on * non-qualcomm platforms should select internal sleep * clock. */ req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT; } if (ab->hw_params.global_reset) req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET; req.nm_modem |= PLATFORM_CAP_PCIE_PME_D3COLD; ath11k_dbg(ab, ATH11K_DBG_QMI, "host cap request\n"); ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp); if (ret < 0) goto out; ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_HOST_CAP_REQ_V01, QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_host_cap_req_msg_v01_ei, &req); if (ret < 0) { qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send host capability request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) goto out; if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "host capability request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; } out: return ret; } static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab) { struct qmi_wlanfw_ind_register_req_msg_v01 *req; struct qmi_wlanfw_ind_register_resp_msg_v01 *resp; struct qmi_handle *handle = &ab->qmi.handle; struct qmi_txn txn; int ret; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; resp = kzalloc(sizeof(*resp), GFP_KERNEL); if (!resp) { ret = -ENOMEM; goto resp_out; } req->client_id_valid = 1; req->client_id = QMI_WLANFW_CLIENT_ID; req->fw_ready_enable_valid = 1; req->fw_ready_enable = 1; req->cal_done_enable_valid = 1; req->cal_done_enable = 1; req->fw_init_done_enable_valid = 1; req->fw_init_done_enable = 1; req->pin_connect_result_enable_valid = 0; req->pin_connect_result_enable = 0; /* WCN6750 doesn't request for DDR memory via QMI, * instead it uses a fixed 12MB reserved memory * region in DDR. */ if (!ab->hw_params.fixed_fw_mem) { req->request_mem_enable_valid = 1; req->request_mem_enable = 1; req->fw_mem_ready_enable_valid = 1; req->fw_mem_ready_enable = 1; } ret = qmi_txn_init(handle, &txn, qmi_wlanfw_ind_register_resp_msg_v01_ei, resp); if (ret < 0) goto out; ath11k_dbg(ab, ATH11K_DBG_QMI, "indication register request\n"); ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_IND_REGISTER_REQ_V01, QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_ind_register_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send indication register request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { ath11k_warn(ab, "failed to register fw indication: %d\n", ret); goto out; } if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "firmware indication register request failed: %d %d\n", resp->resp.result, resp->resp.error); ret = -EINVAL; goto out; } out: kfree(resp); resp_out: kfree(req); return ret; } static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab) { struct qmi_wlanfw_respond_mem_req_msg_v01 *req; struct qmi_wlanfw_respond_mem_resp_msg_v01 resp; struct qmi_txn txn; int ret = 0, i; bool delayed; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; memset(&resp, 0, sizeof(resp)); /* For QCA6390 by default FW requests a block of ~4M contiguous * DMA memory, it's hard to allocate from OS. So host returns * failure to FW and FW will then request multiple blocks of small * chunk size memory. */ if (!(ab->hw_params.fixed_mem_region || test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) && ab->qmi.target_mem_delayed) { delayed = true; ath11k_dbg(ab, ATH11K_DBG_QMI, "delays mem_request %d\n", ab->qmi.mem_seg_count); memset(req, 0, sizeof(*req)); } else { delayed = false; req->mem_seg_len = ab->qmi.mem_seg_count; for (i = 0; i < req->mem_seg_len ; i++) { req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr; req->mem_seg[i].size = ab->qmi.target_mem[i].size; req->mem_seg[i].type = ab->qmi.target_mem[i].type; ath11k_dbg(ab, ATH11K_DBG_QMI, "req mem_seg[%d] %pad %u %u\n", i, &ab->qmi.target_mem[i].paddr, ab->qmi.target_mem[i].size, ab->qmi.target_mem[i].type); } } ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_respond_mem_resp_msg_v01_ei, &resp); if (ret < 0) goto out; ath11k_dbg(ab, ATH11K_DBG_QMI, "respond memory request delayed %i\n", delayed); ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_RESPOND_MEM_REQ_V01, QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_respond_mem_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to respond qmi memory request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { ath11k_warn(ab, "failed to wait qmi memory request: %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { /* the error response is expected when * target_mem_delayed is true. */ if (delayed && resp.resp.error == 0) goto out; ath11k_warn(ab, "qmi respond memory request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; } out: kfree(req); return ret; } static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab) { int i; for (i = 0; i < ab->qmi.mem_seg_count; i++) { if (!ab->qmi.target_mem[i].anyaddr) continue; if (ab->hw_params.fixed_mem_region || test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) { iounmap(ab->qmi.target_mem[i].iaddr); ab->qmi.target_mem[i].iaddr = NULL; continue; } dma_free_coherent(ab->dev, ab->qmi.target_mem[i].prev_size, ab->qmi.target_mem[i].vaddr, ab->qmi.target_mem[i].paddr); ab->qmi.target_mem[i].vaddr = NULL; } } static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab) { int i; struct target_mem_chunk *chunk; ab->qmi.target_mem_delayed = false; for (i = 0; i < ab->qmi.mem_seg_count; i++) { chunk = &ab->qmi.target_mem[i]; /* Firmware reloads in coldboot/firmware recovery. * in such case, no need to allocate memory for FW again. */ if (chunk->vaddr) { if (chunk->prev_type == chunk->type && chunk->prev_size == chunk->size) continue; if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) { ath11k_dbg(ab, ATH11K_DBG_QMI, "size/type mismatch (current %d %u) (prev %d %u), try later with small size\n", chunk->size, chunk->type, chunk->prev_size, chunk->prev_type); ab->qmi.target_mem_delayed = true; return 0; } /* cannot reuse the existing chunk */ dma_free_coherent(ab->dev, chunk->prev_size, chunk->vaddr, chunk->paddr); chunk->vaddr = NULL; } chunk->vaddr = dma_alloc_coherent(ab->dev, chunk->size, &chunk->paddr, GFP_KERNEL | __GFP_NOWARN); if (!chunk->vaddr) { if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) { ath11k_dbg(ab, ATH11K_DBG_QMI, "dma allocation failed (%d B type %u), will try later with small size\n", chunk->size, chunk->type); ath11k_qmi_free_target_mem_chunk(ab); ab->qmi.target_mem_delayed = true; return 0; } ath11k_err(ab, "failed to allocate dma memory for qmi (%d B type %u)\n", chunk->size, chunk->type); return -EINVAL; } chunk->prev_type = chunk->type; chunk->prev_size = chunk->size; } return 0; } static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab) { struct device *dev = ab->dev; - struct device_node *hremote_node = NULL; - struct resource res; + struct resource res = {}; u32 host_ddr_sz; int i, idx, ret; for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) { switch (ab->qmi.target_mem[i].type) { case HOST_DDR_REGION_TYPE: - hremote_node = of_parse_phandle(dev->of_node, "memory-region", 0); - if (!hremote_node) { - ath11k_dbg(ab, ATH11K_DBG_QMI, - "fail to get hremote_node\n"); - return -ENODEV; - } - - ret = of_address_to_resource(hremote_node, 0, &res); - of_node_put(hremote_node); + ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &res); if (ret) { ath11k_dbg(ab, ATH11K_DBG_QMI, "fail to get reg from hremote\n"); return ret; } if (res.end - res.start + 1 < ab->qmi.target_mem[i].size) { ath11k_dbg(ab, ATH11K_DBG_QMI, "fail to assign memory of sz\n"); return -EINVAL; } ab->qmi.target_mem[idx].paddr = res.start; ab->qmi.target_mem[idx].iaddr = ioremap(ab->qmi.target_mem[idx].paddr, ab->qmi.target_mem[i].size); if (!ab->qmi.target_mem[idx].iaddr) return -EIO; ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; host_ddr_sz = ab->qmi.target_mem[i].size; ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; idx++; break; case BDF_MEM_REGION_TYPE: ab->qmi.target_mem[idx].paddr = ab->hw_params.bdf_addr; ab->qmi.target_mem[idx].iaddr = NULL; ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; idx++; break; case CALDB_MEM_REGION_TYPE: if (ab->qmi.target_mem[i].size > ATH11K_QMI_CALDB_SIZE) { ath11k_warn(ab, "qmi mem size is low to load caldata\n"); return -EINVAL; } if (ath11k_core_coldboot_cal_support(ab)) { - if (hremote_node) { + if (resource_size(&res)) { ab->qmi.target_mem[idx].paddr = res.start + host_ddr_sz; ab->qmi.target_mem[idx].iaddr = ioremap(ab->qmi.target_mem[idx].paddr, ab->qmi.target_mem[i].size); if (!ab->qmi.target_mem[idx].iaddr) return -EIO; } else { ab->qmi.target_mem[idx].paddr = ATH11K_QMI_CALDB_ADDRESS; ab->qmi.target_mem[idx].iaddr = NULL; } } else { ab->qmi.target_mem[idx].paddr = 0; ab->qmi.target_mem[idx].iaddr = NULL; } ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; idx++; break; default: ath11k_warn(ab, "qmi ignore invalid mem req type %d\n", ab->qmi.target_mem[i].type); break; } } ab->qmi.mem_seg_count = idx; return 0; } static int ath11k_qmi_request_device_info(struct ath11k_base *ab) { struct qmi_wlanfw_device_info_req_msg_v01 req = {}; struct qmi_wlanfw_device_info_resp_msg_v01 resp = {}; struct qmi_txn txn; void __iomem *bar_addr_va; int ret; /* device info message req is only sent for hybrid bus devices */ if (!ab->hw_params.hybrid_bus_type) return 0; ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlfw_device_info_resp_msg_v01_ei, &resp); if (ret < 0) goto out; ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_DEVICE_INFO_REQ_V01, QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_device_info_req_msg_v01_ei, &req); if (ret < 0) { qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send qmi target device info request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { ath11k_warn(ab, "failed to wait qmi target device info request: %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "qmi device info request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; } if (!resp.bar_addr_valid || !resp.bar_size_valid) { ath11k_warn(ab, "qmi device info response invalid: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; } if (!resp.bar_addr || resp.bar_size != ATH11K_QMI_DEVICE_BAR_SIZE) { ath11k_warn(ab, "qmi device info invalid address and size: %llu %u\n", resp.bar_addr, resp.bar_size); ret = -EINVAL; goto out; } bar_addr_va = devm_ioremap(ab->dev, resp.bar_addr, resp.bar_size); if (!bar_addr_va) { ath11k_warn(ab, "qmi device info ioremap failed\n"); ab->mem_len = 0; ret = -EIO; goto out; } ab->mem = bar_addr_va; ab->mem_len = resp.bar_size; if (!ab->hw_params.ce_remap) ab->mem_ce = ab->mem; return 0; out: return ret; } static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) { struct qmi_wlanfw_cap_req_msg_v01 req; struct qmi_wlanfw_cap_resp_msg_v01 resp; struct qmi_txn txn; int ret = 0; int r; char *fw_build_id; int fw_build_id_mask_len; memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_cap_resp_msg_v01_ei, &resp); if (ret < 0) goto out; ath11k_dbg(ab, ATH11K_DBG_QMI, "target cap request\n"); ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_CAP_REQ_V01, QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_cap_req_msg_v01_ei, &req); if (ret < 0) { qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send qmi cap request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { ath11k_warn(ab, "failed to wait qmi cap request: %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "qmi cap request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; } if (resp.chip_info_valid) { ab->qmi.target.chip_id = resp.chip_info.chip_id; ab->qmi.target.chip_family = resp.chip_info.chip_family; } if (resp.board_info_valid) ab->qmi.target.board_id = resp.board_info.board_id; else ab->qmi.target.board_id = 0xFF; if (resp.soc_info_valid) ab->qmi.target.soc_id = resp.soc_info.soc_id; if (resp.fw_version_info_valid) { ab->qmi.target.fw_version = resp.fw_version_info.fw_version; strscpy(ab->qmi.target.fw_build_timestamp, resp.fw_version_info.fw_build_timestamp, sizeof(ab->qmi.target.fw_build_timestamp)); } if (resp.fw_build_id_valid) strscpy(ab->qmi.target.fw_build_id, resp.fw_build_id, sizeof(ab->qmi.target.fw_build_id)); if (resp.eeprom_read_timeout_valid) { ab->qmi.target.eeprom_caldata = resp.eeprom_read_timeout; ath11k_dbg(ab, ATH11K_DBG_QMI, "cal data supported from eeprom\n"); } fw_build_id = ab->qmi.target.fw_build_id; fw_build_id_mask_len = strlen(FW_BUILD_ID_MASK); if (!strncmp(fw_build_id, FW_BUILD_ID_MASK, fw_build_id_mask_len)) fw_build_id = fw_build_id + fw_build_id_mask_len; ath11k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n", ab->qmi.target.chip_id, ab->qmi.target.chip_family, ab->qmi.target.board_id, ab->qmi.target.soc_id); ath11k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s", ab->qmi.target.fw_version, ab->qmi.target.fw_build_timestamp, fw_build_id); r = ath11k_core_check_smbios(ab); if (r) ath11k_dbg(ab, ATH11K_DBG_QMI, "SMBIOS bdf variant name not set.\n"); r = ath11k_core_check_dt(ab); if (r) ath11k_dbg(ab, ATH11K_DBG_QMI, "DT bdf variant name not set.\n"); out: return ret; } static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab, const u8 *data, u32 len, u8 type) { struct qmi_wlanfw_bdf_download_req_msg_v01 *req; struct qmi_wlanfw_bdf_download_resp_msg_v01 resp; struct qmi_txn txn; const u8 *temp = data; void __iomem *bdf_addr = NULL; int ret = 0; u32 remaining = len; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; memset(&resp, 0, sizeof(resp)); if (ab->hw_params.fixed_bdf_addr) { bdf_addr = ioremap(ab->hw_params.bdf_addr, ab->hw_params.fw.board_size); if (!bdf_addr) { ath11k_warn(ab, "qmi ioremap error for bdf_addr\n"); ret = -EIO; goto err_free_req; } } while (remaining) { req->valid = 1; req->file_id_valid = 1; req->file_id = ab->qmi.target.board_id; req->total_size_valid = 1; req->total_size = remaining; req->seg_id_valid = 1; req->data_valid = 1; req->bdf_type = type; req->bdf_type_valid = 1; req->end_valid = 1; req->end = 0; if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) { req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01; } else { req->data_len = remaining; req->end = 1; } if (ab->hw_params.fixed_bdf_addr || type == ATH11K_QMI_FILE_TYPE_EEPROM) { req->data_valid = 0; req->end = 1; req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE; } else { memcpy(req->data, temp, req->data_len); } if (ab->hw_params.fixed_bdf_addr) { if (type == ATH11K_QMI_FILE_TYPE_CALDATA) bdf_addr += ab->hw_params.fw.cal_offset; memcpy_toio(bdf_addr, temp, len); } ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_bdf_download_resp_msg_v01_ei, &resp); if (ret < 0) goto err_iounmap; ath11k_dbg(ab, ATH11K_DBG_QMI, "bdf download req fixed addr type %d\n", type); ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_BDF_DOWNLOAD_REQ_V01, QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_bdf_download_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); goto err_iounmap; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { ath11k_warn(ab, "failed to wait board file download request: %d\n", ret); goto err_iounmap; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "board file download request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto err_iounmap; } if (ab->hw_params.fixed_bdf_addr || type == ATH11K_QMI_FILE_TYPE_EEPROM) { remaining = 0; } else { remaining -= req->data_len; temp += req->data_len; req->seg_id++; ath11k_dbg(ab, ATH11K_DBG_QMI, "bdf download request remaining %i\n", remaining); } } err_iounmap: if (ab->hw_params.fixed_bdf_addr) iounmap(bdf_addr); err_free_req: kfree(req); return ret; } static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab, bool regdb) { struct device *dev = ab->dev; char filename[ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE]; const struct firmware *fw_entry; struct ath11k_board_data bd; u32 fw_size, file_type; int ret = 0, bdf_type; const u8 *tmp; memset(&bd, 0, sizeof(bd)); if (regdb) { ret = ath11k_core_fetch_regdb(ab, &bd); } else { ret = ath11k_core_fetch_bdf(ab, &bd); if (ret) ath11k_warn(ab, "qmi failed to fetch board file: %d\n", ret); } if (ret) goto out; if (regdb) bdf_type = ATH11K_QMI_BDF_TYPE_REGDB; else if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0) bdf_type = ATH11K_QMI_BDF_TYPE_ELF; else bdf_type = ATH11K_QMI_BDF_TYPE_BIN; ath11k_dbg(ab, ATH11K_DBG_QMI, "bdf_type %d\n", bdf_type); fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len); ret = ath11k_qmi_load_file_target_mem(ab, bd.data, fw_size, bdf_type); if (ret < 0) { ath11k_warn(ab, "qmi failed to load bdf file\n"); goto out; } /* QCA6390/WCN6855 does not support cal data, skip it */ if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF || bdf_type == ATH11K_QMI_BDF_TYPE_REGDB) goto out; if (ab->qmi.target.eeprom_caldata) { file_type = ATH11K_QMI_FILE_TYPE_EEPROM; tmp = filename; fw_size = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE; } else { file_type = ATH11K_QMI_FILE_TYPE_CALDATA; /* cal--.bin */ snprintf(filename, sizeof(filename), "cal-%s-%s.bin", ath11k_bus_str(ab->hif.bus), dev_name(dev)); fw_entry = ath11k_core_firmware_request(ab, filename); if (!IS_ERR(fw_entry)) goto success; fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE); if (IS_ERR(fw_entry)) { /* Caldata may not be present during first time calibration in * factory hence allow to boot without loading caldata in ftm mode */ if (ath11k_ftm_mode) { ath11k_info(ab, "Booting without cal data file in factory test mode\n"); return 0; } ret = PTR_ERR(fw_entry); ath11k_warn(ab, "qmi failed to load CAL data file:%s\n", filename); goto out; } success: fw_size = min_t(u32, ab->hw_params.fw.board_size, fw_entry->size); tmp = fw_entry->data; } ret = ath11k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type); if (ret < 0) { ath11k_warn(ab, "qmi failed to load caldata\n"); goto out_qmi_cal; } ath11k_dbg(ab, ATH11K_DBG_QMI, "caldata type: %u\n", file_type); out_qmi_cal: if (!ab->qmi.target.eeprom_caldata) release_firmware(fw_entry); out: ath11k_core_free_bdf(ab, &bd); ath11k_dbg(ab, ATH11K_DBG_QMI, "BDF download sequence completed\n"); return ret; } static int ath11k_qmi_m3_load(struct ath11k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; const struct firmware *fw = NULL; const void *m3_data; char path[100]; size_t m3_len; int ret; if (m3_mem->vaddr) /* m3 firmware buffer is already available in the DMA buffer */ return 0; if (ab->fw.m3_data && ab->fw.m3_len > 0) { /* firmware-N.bin had a m3 firmware file so use that */ m3_data = ab->fw.m3_data; m3_len = ab->fw.m3_len; } else { /* No m3 file in firmware-N.bin so try to request old * separate m3.bin. */ fw = ath11k_core_firmware_request(ab, ATH11K_M3_FILE); if (IS_ERR(fw)) { ret = PTR_ERR(fw); ath11k_core_create_firmware_path(ab, ATH11K_M3_FILE, path, sizeof(path)); ath11k_err(ab, "failed to load %s: %d\n", path, ret); return ret; } m3_data = fw->data; m3_len = fw->size; } m3_mem->vaddr = dma_alloc_coherent(ab->dev, m3_len, &m3_mem->paddr, GFP_KERNEL); if (!m3_mem->vaddr) { ath11k_err(ab, "failed to allocate memory for M3 with size %zu\n", - fw->size); + m3_len); ret = -ENOMEM; goto out; } memcpy(m3_mem->vaddr, m3_data, m3_len); m3_mem->size = m3_len; ret = 0; out: release_firmware(fw); return ret; } static void ath11k_qmi_m3_free(struct ath11k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; if (!ab->hw_params.m3_fw_support || !m3_mem->vaddr) return; dma_free_coherent(ab->dev, m3_mem->size, m3_mem->vaddr, m3_mem->paddr); m3_mem->vaddr = NULL; m3_mem->size = 0; } /* clang stack usage explodes if this is inlined */ static noinline_for_stack int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; struct qmi_wlanfw_m3_info_req_msg_v01 req; struct qmi_wlanfw_m3_info_resp_msg_v01 resp; struct qmi_txn txn; int ret = 0; memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); if (ab->hw_params.m3_fw_support) { ret = ath11k_qmi_m3_load(ab); if (ret) { ath11k_err(ab, "failed to load m3 firmware: %d", ret); return ret; } req.addr = m3_mem->paddr; req.size = m3_mem->size; } else { req.addr = 0; req.size = 0; } ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp); if (ret < 0) goto out; ath11k_dbg(ab, ATH11K_DBG_QMI, "m3 info req\n"); ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_M3_INFO_REQ_V01, QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN, qmi_wlanfw_m3_info_req_msg_v01_ei, &req); if (ret < 0) { qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send m3 information request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { ath11k_warn(ab, "failed to wait m3 information request: %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "m3 info request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; } out: return ret; } static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab, u32 mode) { struct qmi_wlanfw_wlan_mode_req_msg_v01 req; struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp; struct qmi_txn txn; int ret = 0; memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); req.mode = mode; req.hw_debug_valid = 1; req.hw_debug = 0; ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_wlan_mode_resp_msg_v01_ei, &resp); if (ret < 0) goto out; ath11k_dbg(ab, ATH11K_DBG_QMI, "wlan mode req mode %d\n", mode); ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_WLAN_MODE_REQ_V01, QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req); if (ret < 0) { qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send wlan mode request (mode %d): %d\n", mode, ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { if (mode == ATH11K_FIRMWARE_MODE_OFF && ret == -ENETRESET) { ath11k_warn(ab, "WLFW service is dis-connected\n"); return 0; } ath11k_warn(ab, "failed to wait wlan mode request (mode %d): %d\n", mode, ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "wlan mode request failed (mode: %d): %d %d\n", mode, resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; } out: return ret; } static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab) { struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req; struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp; struct ce_pipe_config *ce_cfg; struct service_to_pipe *svc_cfg; struct qmi_txn txn; int ret = 0, pipe_num; ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce; svc_cfg = (struct service_to_pipe *)ab->qmi.ce_cfg.svc_to_ce_map; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; memset(&resp, 0, sizeof(resp)); req->host_version_valid = 1; strscpy(req->host_version, ATH11K_HOST_VERSION_STRING, sizeof(req->host_version)); req->tgt_cfg_valid = 1; /* This is number of CE configs */ req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len; for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) { req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum; req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir; req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries; req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max; req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags; } req->svc_cfg_valid = 1; /* This is number of Service/CE configs */ req->svc_cfg_len = ab->qmi.ce_cfg.svc_to_ce_map_len; for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) { req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id; req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir; req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum; } req->shadow_reg_valid = 0; /* set shadow v2 configuration */ if (ab->hw_params.supports_shadow_regs) { req->shadow_reg_v2_valid = 1; req->shadow_reg_v2_len = min_t(u32, ab->qmi.ce_cfg.shadow_reg_v2_len, QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01); memcpy(&req->shadow_reg_v2, ab->qmi.ce_cfg.shadow_reg_v2, sizeof(u32) * req->shadow_reg_v2_len); } else { req->shadow_reg_v2_valid = 0; } ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp); if (ret < 0) goto out; ath11k_dbg(ab, ATH11K_DBG_QMI, "wlan cfg req\n"); ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_WLAN_CFG_REQ_V01, QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send wlan config request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { ath11k_warn(ab, "failed to wait wlan config request: %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "wlan config request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; } out: kfree(req); return ret; } static int ath11k_qmi_wlanfw_wlan_ini_send(struct ath11k_base *ab, bool enable) { int ret; struct qmi_txn txn; struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {}; struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp = {}; req.enablefwlog_valid = true; req.enablefwlog = enable ? 1 : 0; ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_wlan_ini_resp_msg_v01_ei, &resp); if (ret < 0) goto out; ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_WLAN_INI_REQ_V01, QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req); if (ret < 0) { ath11k_warn(ab, "qmi failed to send wlan ini request, err = %d\n", ret); qmi_txn_cancel(&txn); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { ath11k_warn(ab, "qmi failed wlan ini request, err = %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "qmi wlan ini request failed, result: %d, err: %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; } out: return ret; } void ath11k_qmi_firmware_stop(struct ath11k_base *ab) { int ret; ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware stop\n"); ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_OFF); if (ret < 0) { ath11k_warn(ab, "qmi failed to send wlan mode off: %d\n", ret); return; } } int ath11k_qmi_firmware_start(struct ath11k_base *ab, u32 mode) { int ret; ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware start\n"); if (ab->hw_params.fw_wmi_diag_event) { ret = ath11k_qmi_wlanfw_wlan_ini_send(ab, true); if (ret < 0) { ath11k_warn(ab, "qmi failed to send wlan fw ini:%d\n", ret); return ret; } } ret = ath11k_qmi_wlanfw_wlan_cfg_send(ab); if (ret < 0) { ath11k_warn(ab, "qmi failed to send wlan cfg: %d\n", ret); return ret; } ret = ath11k_qmi_wlanfw_mode_send(ab, mode); if (ret < 0) { ath11k_warn(ab, "qmi failed to send wlan fw mode: %d\n", ret); return ret; } return 0; } int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab) { long time_left; if (!ath11k_core_coldboot_cal_support(ab) || ab->hw_params.cbcal_restart_fw == 0) return 0; ath11k_dbg(ab, ATH11K_DBG_QMI, "wait for cold boot done\n"); time_left = wait_event_timeout(ab->qmi.cold_boot_waitq, (ab->qmi.cal_done == 1), ATH11K_COLD_BOOT_FW_RESET_DELAY); if (time_left <= 0) { ath11k_warn(ab, "Coldboot Calibration timed out\n"); return -ETIMEDOUT; } /* reset the firmware */ ath11k_hif_power_down(ab, false); ath11k_hif_power_up(ab); ath11k_dbg(ab, ATH11K_DBG_QMI, "exit wait for cold boot done\n"); return 0; } EXPORT_SYMBOL(ath11k_qmi_fwreset_from_cold_boot); static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab) { long time_left; int ret; ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_COLD_BOOT); if (ret < 0) { ath11k_warn(ab, "qmi failed to send wlan fw mode: %d\n", ret); return ret; } ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration wait started\n"); time_left = wait_event_timeout(ab->qmi.cold_boot_waitq, (ab->qmi.cal_done == 1), ATH11K_COLD_BOOT_FW_RESET_DELAY); if (time_left <= 0) { ath11k_warn(ab, "coldboot calibration timed out\n"); return 0; } ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration done\n"); return 0; } static int ath11k_qmi_driver_event_post(struct ath11k_qmi *qmi, enum ath11k_qmi_event_type type, void *data) { struct ath11k_qmi_driver_event *event; event = kzalloc(sizeof(*event), GFP_ATOMIC); if (!event) return -ENOMEM; event->type = type; event->data = data; spin_lock(&qmi->event_lock); list_add_tail(&event->list, &qmi->event_list); spin_unlock(&qmi->event_lock); queue_work(qmi->event_wq, &qmi->event_work); return 0; } static int ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi) { struct ath11k_base *ab = qmi->ab; int ret; ret = ath11k_qmi_respond_fw_mem_request(ab); if (ret < 0) { ath11k_warn(ab, "qmi failed to respond fw mem req: %d\n", ret); return ret; } return ret; } static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi) { struct ath11k_base *ab = qmi->ab; int ret; ret = ath11k_qmi_request_target_cap(ab); if (ret < 0) { ath11k_warn(ab, "failed to request qmi target capabilities: %d\n", ret); return ret; } ret = ath11k_qmi_request_device_info(ab); if (ret < 0) { ath11k_warn(ab, "failed to request qmi device info: %d\n", ret); return ret; } if (ab->hw_params.supports_regdb) ath11k_qmi_load_bdf_qmi(ab, true); ret = ath11k_qmi_load_bdf_qmi(ab, false); if (ret < 0) { ath11k_warn(ab, "failed to load board data file: %d\n", ret); return ret; } return 0; } static int ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi) { struct ath11k_base *ab = qmi->ab; int ret; ret = ath11k_qmi_fw_ind_register_send(ab); if (ret < 0) { ath11k_warn(ab, "failed to send qmi firmware indication: %d\n", ret); return ret; } ret = ath11k_qmi_host_cap_send(ab); if (ret < 0) { ath11k_warn(ab, "failed to send qmi host cap: %d\n", ret); return ret; } if (!ab->hw_params.fixed_fw_mem) return ret; ret = ath11k_qmi_event_load_bdf(qmi); if (ret < 0) { ath11k_warn(ab, "qmi failed to download BDF:%d\n", ret); return ret; } return ret; } static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *data) { struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); struct ath11k_base *ab = qmi->ab; const struct qmi_wlanfw_request_mem_ind_msg_v01 *msg = data; int i, ret; ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware request memory request\n"); if (msg->mem_seg_len == 0 || msg->mem_seg_len > ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01) ath11k_warn(ab, "invalid memory segment length: %u\n", msg->mem_seg_len); ab->qmi.mem_seg_count = msg->mem_seg_len; for (i = 0; i < qmi->mem_seg_count ; i++) { ab->qmi.target_mem[i].type = msg->mem_seg[i].type; ab->qmi.target_mem[i].size = msg->mem_seg[i].size; ath11k_dbg(ab, ATH11K_DBG_QMI, "mem seg type %d size %d\n", msg->mem_seg[i].type, msg->mem_seg[i].size); } if (ab->hw_params.fixed_mem_region || test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) { ret = ath11k_qmi_assign_target_mem_chunk(ab); if (ret) { ath11k_warn(ab, "failed to assign qmi target memory: %d\n", ret); return; } } else { ret = ath11k_qmi_alloc_target_mem_chunk(ab); if (ret) { ath11k_warn(ab, "failed to allocate qmi target memory: %d\n", ret); return; } } ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_REQUEST_MEM, NULL); } static void ath11k_qmi_msg_mem_ready_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded) { struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); struct ath11k_base *ab = qmi->ab; ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware memory ready indication\n"); ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_MEM_READY, NULL); } static void ath11k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded) { struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); struct ath11k_base *ab = qmi->ab; ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware ready\n"); if (!ab->qmi.cal_done) { ab->qmi.cal_done = 1; wake_up(&ab->qmi.cold_boot_waitq); } ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_READY, NULL); } static void ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded) { struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); struct ath11k_base *ab = qmi->ab; ab->qmi.cal_done = 1; wake_up(&ab->qmi.cold_boot_waitq); ath11k_dbg(ab, ATH11K_DBG_QMI, "cold boot calibration done\n"); } static void ath11k_qmi_msg_fw_init_done_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded) { struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); struct ath11k_base *ab = qmi->ab; ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_INIT_DONE, NULL); ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware init done\n"); } static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = { { .type = QMI_INDICATION, .msg_id = QMI_WLFW_REQUEST_MEM_IND_V01, .ei = qmi_wlanfw_request_mem_ind_msg_v01_ei, .decoded_size = sizeof(struct qmi_wlanfw_request_mem_ind_msg_v01), .fn = ath11k_qmi_msg_mem_request_cb, }, { .type = QMI_INDICATION, .msg_id = QMI_WLFW_FW_MEM_READY_IND_V01, .ei = qmi_wlanfw_mem_ready_ind_msg_v01_ei, .decoded_size = sizeof(struct qmi_wlanfw_fw_mem_ready_ind_msg_v01), .fn = ath11k_qmi_msg_mem_ready_cb, }, { .type = QMI_INDICATION, .msg_id = QMI_WLFW_FW_READY_IND_V01, .ei = qmi_wlanfw_fw_ready_ind_msg_v01_ei, .decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01), .fn = ath11k_qmi_msg_fw_ready_cb, }, { .type = QMI_INDICATION, .msg_id = QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01, .ei = qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei, .decoded_size = sizeof(struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01), .fn = ath11k_qmi_msg_cold_boot_cal_done_cb, }, { .type = QMI_INDICATION, .msg_id = QMI_WLFW_FW_INIT_DONE_IND_V01, .ei = qmi_wlfw_fw_init_done_ind_msg_v01_ei, .decoded_size = sizeof(struct qmi_wlfw_fw_init_done_ind_msg_v01), .fn = ath11k_qmi_msg_fw_init_done_cb, }, /* end of list */ {}, }; static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl, struct qmi_service *service) { struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); struct ath11k_base *ab = qmi->ab; struct sockaddr_qrtr *sq = &qmi->sq; int ret; sq->sq_family = AF_QIPCRTR; sq->sq_node = service->node; sq->sq_port = service->port; ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq, sizeof(*sq), 0); if (ret) { ath11k_warn(ab, "failed to connect to qmi remote service: %d\n", ret); return ret; } ath11k_dbg(ab, ATH11K_DBG_QMI, "wifi fw qmi service connected\n"); ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_ARRIVE, NULL); return ret; } static void ath11k_qmi_ops_del_server(struct qmi_handle *qmi_hdl, struct qmi_service *service) { struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); struct ath11k_base *ab = qmi->ab; ath11k_dbg(ab, ATH11K_DBG_QMI, "wifi fw del server\n"); ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_EXIT, NULL); } static const struct qmi_ops ath11k_qmi_ops = { .new_server = ath11k_qmi_ops_new_server, .del_server = ath11k_qmi_ops_del_server, }; static void ath11k_qmi_driver_event_work(struct work_struct *work) { struct ath11k_qmi *qmi = container_of(work, struct ath11k_qmi, event_work); struct ath11k_qmi_driver_event *event; struct ath11k_base *ab = qmi->ab; int ret; spin_lock(&qmi->event_lock); while (!list_empty(&qmi->event_list)) { event = list_first_entry(&qmi->event_list, struct ath11k_qmi_driver_event, list); list_del(&event->list); spin_unlock(&qmi->event_lock); if (test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)) { kfree(event); return; } switch (event->type) { case ATH11K_QMI_EVENT_SERVER_ARRIVE: ret = ath11k_qmi_event_server_arrive(qmi); if (ret < 0) set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); break; case ATH11K_QMI_EVENT_SERVER_EXIT: set_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags); set_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags); if (!ab->is_reset) ath11k_core_pre_reconfigure_recovery(ab); break; case ATH11K_QMI_EVENT_REQUEST_MEM: ret = ath11k_qmi_event_mem_request(qmi); if (ret < 0) set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); break; case ATH11K_QMI_EVENT_FW_MEM_READY: ret = ath11k_qmi_event_load_bdf(qmi); if (ret < 0) { set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); break; } ret = ath11k_qmi_wlanfw_m3_info_send(ab); if (ret < 0) { ath11k_warn(ab, "failed to send qmi m3 info req: %d\n", ret); set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); } break; case ATH11K_QMI_EVENT_FW_INIT_DONE: clear_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) { if (ab->is_reset) ath11k_hal_dump_srng_stats(ab); queue_work(ab->workqueue, &ab->restart_work); break; } if (ab->qmi.cal_done == 0 && ath11k_core_coldboot_cal_support(ab)) { ath11k_qmi_process_coldboot_calibration(ab); } else { clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags); clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags); ret = ath11k_core_qmi_firmware_ready(ab); if (ret) { set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); break; } set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags); } break; case ATH11K_QMI_EVENT_FW_READY: /* For targets requiring a FW restart upon cold * boot completion, there is no need to process * FW ready; such targets will receive FW init * done message after FW restart. */ if (ab->hw_params.cbcal_restart_fw) break; clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags); clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags); ath11k_core_qmi_firmware_ready(ab); set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags); break; case ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE: break; default: ath11k_warn(ab, "invalid qmi event type: %d", event->type); break; } kfree(event); spin_lock(&qmi->event_lock); } spin_unlock(&qmi->event_lock); } int ath11k_qmi_init_service(struct ath11k_base *ab) { int ret; memset(&ab->qmi.target, 0, sizeof(struct target_info)); memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk)); ab->qmi.ab = ab; ab->qmi.target_mem_mode = ab->hw_params.fw_mem_mode; ret = qmi_handle_init(&ab->qmi.handle, ATH11K_QMI_RESP_LEN_MAX, &ath11k_qmi_ops, ath11k_qmi_msg_handlers); if (ret < 0) { ath11k_warn(ab, "failed to initialize qmi handle: %d\n", ret); return ret; } ab->qmi.event_wq = alloc_ordered_workqueue("ath11k_qmi_driver_event", 0); if (!ab->qmi.event_wq) { ath11k_err(ab, "failed to allocate workqueue\n"); return -EFAULT; } INIT_LIST_HEAD(&ab->qmi.event_list); spin_lock_init(&ab->qmi.event_lock); INIT_WORK(&ab->qmi.event_work, ath11k_qmi_driver_event_work); ret = qmi_add_lookup(&ab->qmi.handle, ATH11K_QMI_WLFW_SERVICE_ID_V01, ATH11K_QMI_WLFW_SERVICE_VERS_V01, ab->qmi.service_ins_id); if (ret < 0) { ath11k_warn(ab, "failed to add qmi lookup: %d\n", ret); destroy_workqueue(ab->qmi.event_wq); return ret; } return ret; } void ath11k_qmi_deinit_service(struct ath11k_base *ab) { qmi_handle_release(&ab->qmi.handle); cancel_work_sync(&ab->qmi.event_work); destroy_workqueue(ab->qmi.event_wq); ath11k_qmi_m3_free(ab); ath11k_qmi_free_target_mem_chunk(ab); } EXPORT_SYMBOL(ath11k_qmi_deinit_service); void ath11k_qmi_free_resource(struct ath11k_base *ab) { ath11k_qmi_free_target_mem_chunk(ab); ath11k_qmi_m3_free(ab); } diff --git a/wmi.c b/wmi.c index 0491e3fd6b5e..e3b444333dee 100644 --- a/wmi.c +++ b/wmi.c @@ -1,9900 +1,9903 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include #include #include #include #include #include #include #include #include #include #include "core.h" #include "debug.h" #include "mac.h" #include "hw.h" #include "peer.h" #include "testmode.h" #include "p2p.h" struct wmi_tlv_policy { size_t min_len; }; struct wmi_tlv_svc_ready_parse { bool wmi_svc_bitmap_done; }; struct wmi_tlv_dma_ring_caps_parse { struct wmi_dma_ring_capabilities *dma_ring_caps; u32 n_dma_ring_caps; }; struct wmi_tlv_svc_rdy_ext_parse { struct ath11k_service_ext_param param; struct wmi_soc_mac_phy_hw_mode_caps *hw_caps; struct wmi_hw_mode_capabilities *hw_mode_caps; u32 n_hw_mode_caps; u32 tot_phy_id; struct wmi_hw_mode_capabilities pref_hw_mode_caps; struct wmi_mac_phy_capabilities *mac_phy_caps; u32 n_mac_phy_caps; struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps; struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps; u32 n_ext_hal_reg_caps; struct wmi_tlv_dma_ring_caps_parse dma_caps_parse; bool hw_mode_done; bool mac_phy_done; bool ext_hal_reg_done; bool mac_phy_chainmask_combo_done; bool mac_phy_chainmask_cap_done; bool oem_dma_ring_cap_done; bool dma_ring_cap_done; }; struct wmi_tlv_svc_rdy_ext2_parse { struct wmi_tlv_dma_ring_caps_parse dma_caps_parse; bool dma_ring_cap_done; }; struct wmi_tlv_rdy_parse { u32 num_extra_mac_addr; }; struct wmi_tlv_dma_buf_release_parse { struct ath11k_wmi_dma_buf_release_fixed_param fixed; struct wmi_dma_buf_release_entry *buf_entry; struct wmi_dma_buf_release_meta_data *meta_data; u32 num_buf_entry; u32 num_meta; bool buf_entry_done; bool meta_data_done; }; struct wmi_tlv_fw_stats_parse { const struct wmi_stats_event *ev; const struct wmi_per_chain_rssi_stats *rssi; struct ath11k_fw_stats *stats; int rssi_num; bool chain_rssi_done; }; struct wmi_tlv_mgmt_rx_parse { const struct wmi_mgmt_rx_hdr *fixed; const u8 *frame_buf; bool frame_buf_done; }; static const struct wmi_tlv_policy wmi_tlv_policies[] = { [WMI_TAG_ARRAY_BYTE] = { .min_len = 0 }, [WMI_TAG_ARRAY_UINT32] = { .min_len = 0 }, [WMI_TAG_SERVICE_READY_EVENT] = { .min_len = sizeof(struct wmi_service_ready_event) }, [WMI_TAG_SERVICE_READY_EXT_EVENT] = { .min_len = sizeof(struct wmi_service_ready_ext_event) }, [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) }, [WMI_TAG_SOC_HAL_REG_CAPABILITIES] = { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) }, [WMI_TAG_VDEV_START_RESPONSE_EVENT] = { .min_len = sizeof(struct wmi_vdev_start_resp_event) }, [WMI_TAG_PEER_DELETE_RESP_EVENT] = { .min_len = sizeof(struct wmi_peer_delete_resp_event) }, [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = { .min_len = sizeof(struct wmi_bcn_tx_status_event) }, [WMI_TAG_VDEV_STOPPED_EVENT] = { .min_len = sizeof(struct wmi_vdev_stopped_event) }, [WMI_TAG_REG_CHAN_LIST_CC_EVENT] = { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) }, [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = { .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) }, [WMI_TAG_MGMT_RX_HDR] = { .min_len = sizeof(struct wmi_mgmt_rx_hdr) }, [WMI_TAG_MGMT_TX_COMPL_EVENT] = { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) }, [WMI_TAG_SCAN_EVENT] = { .min_len = sizeof(struct wmi_scan_event) }, [WMI_TAG_PEER_STA_KICKOUT_EVENT] = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) }, [WMI_TAG_ROAM_EVENT] = { .min_len = sizeof(struct wmi_roam_event) }, [WMI_TAG_CHAN_INFO_EVENT] = { .min_len = sizeof(struct wmi_chan_info_event) }, [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) }, [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) }, [WMI_TAG_READY_EVENT] = { .min_len = sizeof(struct wmi_ready_event_min) }, [WMI_TAG_SERVICE_AVAILABLE_EVENT] = {.min_len = sizeof(struct wmi_service_available_event) }, [WMI_TAG_PEER_ASSOC_CONF_EVENT] = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) }, [WMI_TAG_STATS_EVENT] = { .min_len = sizeof(struct wmi_stats_event) }, [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) }, [WMI_TAG_HOST_SWFDA_EVENT] = { .min_len = sizeof(struct wmi_fils_discovery_event) }, [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = { .min_len = sizeof(struct wmi_probe_resp_tx_status_event) }, [WMI_TAG_VDEV_DELETE_RESP_EVENT] = { .min_len = sizeof(struct wmi_vdev_delete_resp_event) }, [WMI_TAG_OBSS_COLOR_COLLISION_EVT] = { .min_len = sizeof(struct wmi_obss_color_collision_event) }, [WMI_TAG_11D_NEW_COUNTRY_EVENT] = { .min_len = sizeof(struct wmi_11d_new_cc_ev) }, [WMI_TAG_PER_CHAIN_RSSI_STATS] = { .min_len = sizeof(struct wmi_per_chain_rssi_stats) }, [WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = { .min_len = sizeof(struct wmi_twt_add_dialog_event) }, [WMI_TAG_P2P_NOA_INFO] = { .min_len = sizeof(struct ath11k_wmi_p2p_noa_info) }, [WMI_TAG_P2P_NOA_EVENT] = { .min_len = sizeof(struct wmi_p2p_noa_event) }, }; #define PRIMAP(_hw_mode_) \ [_hw_mode_] = _hw_mode_##_PRI static const int ath11k_hw_mode_pri_map[] = { PRIMAP(WMI_HOST_HW_MODE_SINGLE), PRIMAP(WMI_HOST_HW_MODE_DBS), PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE), PRIMAP(WMI_HOST_HW_MODE_SBS), PRIMAP(WMI_HOST_HW_MODE_DBS_SBS), PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS), /* keep last */ PRIMAP(WMI_HOST_HW_MODE_MAX), }; static int ath11k_wmi_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, int (*iter)(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data), void *data) { const void *begin = ptr; const struct wmi_tlv *tlv; u16 tlv_tag, tlv_len; int ret; while (len > 0) { if (len < sizeof(*tlv)) { ath11k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", ptr - begin, len, sizeof(*tlv)); return -EINVAL; } tlv = ptr; tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header); tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header); ptr += sizeof(*tlv); len -= sizeof(*tlv); if (tlv_len > len) { ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", tlv_tag, ptr - begin, len, tlv_len); return -EINVAL; } if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) && wmi_tlv_policies[tlv_tag].min_len && wmi_tlv_policies[tlv_tag].min_len > tlv_len) { ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n", tlv_tag, ptr - begin, tlv_len, wmi_tlv_policies[tlv_tag].min_len); return -EINVAL; } ret = iter(ab, tlv_tag, tlv_len, ptr, data); if (ret) return ret; ptr += tlv_len; len -= tlv_len; } return 0; } static int ath11k_wmi_tlv_iter_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { const void **tb = data; if (tag < WMI_TAG_MAX) tb[tag] = ptr; return 0; } static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb, const void *ptr, size_t len) { return ath11k_wmi_tlv_iter(ar, ptr, len, ath11k_wmi_tlv_iter_parse, (void *)tb); } const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, struct sk_buff *skb, gfp_t gfp) { const void **tb; int ret; tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp); if (!tb) return ERR_PTR(-ENOMEM); ret = ath11k_wmi_tlv_parse(ab, tb, skb->data, skb->len); if (ret) { kfree(tb); return ERR_PTR(ret); } return tb; } static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, u32 cmd_id) { struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_cmd_hdr *cmd_hdr; int ret; u32 cmd = 0; if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL) return -ENOMEM; cmd |= FIELD_PREP(WMI_CMD_HDR_CMD_ID, cmd_id); cmd_hdr = (struct wmi_cmd_hdr *)skb->data; cmd_hdr->cmd_id = cmd; trace_ath11k_wmi_cmd(ab, cmd_id, skb->data, skb->len); memset(skb_cb, 0, sizeof(*skb_cb)); ret = ath11k_htc_send(&ab->htc, wmi->eid, skb); if (ret) goto err_pull; return 0; err_pull: skb_pull(skb, sizeof(struct wmi_cmd_hdr)); return ret; } int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, u32 cmd_id) { struct ath11k_wmi_base *wmi_ab = wmi->wmi_ab; int ret = -EOPNOTSUPP; struct ath11k_base *ab = wmi_ab->ab; might_sleep(); if (ab->hw_params.credit_flow) { wait_event_timeout(wmi_ab->tx_credits_wq, ({ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags)) ret = -ESHUTDOWN; (ret != -EAGAIN); }), WMI_SEND_TIMEOUT_HZ); } else { wait_event_timeout(wmi->tx_ce_desc_wq, ({ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags)) ret = -ESHUTDOWN; (ret != -ENOBUFS); }), WMI_SEND_TIMEOUT_HZ); } if (ret == -EAGAIN) ath11k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id); if (ret == -ENOBUFS) ath11k_warn(wmi_ab->ab, "ce desc not available for wmi command %d\n", cmd_id); return ret; } static int ath11k_pull_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, const void *ptr, struct ath11k_service_ext_param *param) { const struct wmi_service_ready_ext_event *ev = ptr; if (!ev) return -EINVAL; /* Move this to host based bitmap */ param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits; param->default_fw_config_bits = ev->default_fw_config_bits; param->he_cap_info = ev->he_cap_info; param->mpdu_density = ev->mpdu_density; param->max_bssid_rx_filters = ev->max_bssid_rx_filters; memcpy(¶m->ppet, &ev->ppet, sizeof(param->ppet)); return 0; } static int ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, struct wmi_soc_mac_phy_hw_mode_caps *hw_caps, struct wmi_hw_mode_capabilities *wmi_hw_mode_caps, struct wmi_soc_hal_reg_capabilities *hal_reg_caps, struct wmi_mac_phy_capabilities *wmi_mac_phy_caps, u8 hw_mode_id, u8 phy_id, struct ath11k_pdev *pdev) { struct wmi_mac_phy_capabilities *mac_phy_caps; struct ath11k_base *ab = wmi_handle->wmi_ab->ab; struct ath11k_band_cap *cap_band; struct ath11k_pdev_cap *pdev_cap = &pdev->cap; u32 phy_map; u32 hw_idx, phy_idx = 0; if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps) return -EINVAL; for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) { if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id) break; phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map; while (phy_map) { phy_map >>= 1; phy_idx++; } } if (hw_idx == hw_caps->num_hw_modes) return -EINVAL; phy_idx += phy_id; if (phy_id >= hal_reg_caps->num_phy) return -EINVAL; mac_phy_caps = wmi_mac_phy_caps + phy_idx; pdev->pdev_id = mac_phy_caps->pdev_id; pdev_cap->supported_bands |= mac_phy_caps->supported_bands; pdev_cap->ampdu_density = mac_phy_caps->ampdu_density; ab->target_pdev_ids[ab->target_pdev_count].supported_bands = mac_phy_caps->supported_bands; ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id; ab->target_pdev_count++; if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) && !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP)) return -EINVAL; /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from * band to band for a single radio, need to see how this should be * handled. */ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) { pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g; pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g; } if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) { pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g; pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g; pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g; pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g; pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g; pdev_cap->nss_ratio_enabled = WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio); pdev_cap->nss_ratio_info = WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio); } /* tx/rx chainmask reported from fw depends on the actual hw chains used, * For example, for 4x4 capable macphys, first 4 chains can be used for first * mac and the remaining 4 chains can be used for the second mac or vice-versa. * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0 * will be advertised for second mac or vice-versa. Compute the shift value * for tx/rx chainmask which will be used to advertise supported ht/vht rates to * mac80211. */ pdev_cap->tx_chain_mask_shift = find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32); pdev_cap->rx_chain_mask_shift = find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32); if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) { cap_band = &pdev_cap->band[NL80211_BAND_2GHZ]; cap_band->phy_id = mac_phy_caps->phy_id; cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g; cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g; cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g; cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext; cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g; memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_2g, sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE); memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g, sizeof(struct ath11k_ppe_threshold)); } if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) { cap_band = &pdev_cap->band[NL80211_BAND_5GHZ]; cap_band->phy_id = mac_phy_caps->phy_id; cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g; cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g; cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g; cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext; cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g; memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g, sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE); memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g, sizeof(struct ath11k_ppe_threshold)); cap_band = &pdev_cap->band[NL80211_BAND_6GHZ]; cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g; cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g; cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g; cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext; cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g; memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g, sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE); memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g, sizeof(struct ath11k_ppe_threshold)); } return 0; } static int ath11k_pull_reg_cap_svc_rdy_ext(struct ath11k_pdev_wmi *wmi_handle, struct wmi_soc_hal_reg_capabilities *reg_caps, struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap, u8 phy_idx, struct ath11k_hal_reg_capabilities_ext *param) { struct wmi_hal_reg_capabilities_ext *ext_reg_cap; if (!reg_caps || !wmi_ext_reg_cap) return -EINVAL; if (phy_idx >= reg_caps->num_phy) return -EINVAL; ext_reg_cap = &wmi_ext_reg_cap[phy_idx]; param->phy_id = ext_reg_cap->phy_id; param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain; param->eeprom_reg_domain_ext = ext_reg_cap->eeprom_reg_domain_ext; param->regcap1 = ext_reg_cap->regcap1; param->regcap2 = ext_reg_cap->regcap2; /* check if param->wireless_mode is needed */ param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan; param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan; param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan; param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan; return 0; } static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab, const void *evt_buf, struct ath11k_targ_cap *cap) { const struct wmi_service_ready_event *ev = evt_buf; if (!ev) { ath11k_err(ab, "%s: failed by NULL param\n", __func__); return -EINVAL; } cap->phy_capability = ev->phy_capability; cap->max_frag_entry = ev->max_frag_entry; cap->num_rf_chains = ev->num_rf_chains; cap->ht_cap_info = ev->ht_cap_info; cap->vht_cap_info = ev->vht_cap_info; cap->vht_supp_mcs = ev->vht_supp_mcs; cap->hw_min_tx_power = ev->hw_min_tx_power; cap->hw_max_tx_power = ev->hw_max_tx_power; cap->sys_cap_info = ev->sys_cap_info; cap->min_pkt_size_enable = ev->min_pkt_size_enable; cap->max_bcn_ie_size = ev->max_bcn_ie_size; cap->max_num_scan_channels = ev->max_num_scan_channels; cap->max_supported_macs = ev->max_supported_macs; cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps; cap->txrx_chainmask = ev->txrx_chainmask; cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index; cap->num_msdu_desc = ev->num_msdu_desc; return 0; } /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each * 4-byte word. */ static void ath11k_wmi_service_bitmap_copy(struct ath11k_pdev_wmi *wmi, const u32 *wmi_svc_bm) { int i, j; for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) { do { if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32)) set_bit(j, wmi->wmi_ab->svc_map); } while (++j % WMI_SERVICE_BITS_IN_SIZE32); } } static int ath11k_wmi_tlv_svc_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_svc_ready_parse *svc_ready = data; struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0]; u16 expect_len; switch (tag) { case WMI_TAG_SERVICE_READY_EVENT: if (ath11k_pull_service_ready_tlv(ab, ptr, &ab->target_caps)) return -EINVAL; break; case WMI_TAG_ARRAY_UINT32: if (!svc_ready->wmi_svc_bitmap_done) { expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32); if (len < expect_len) { ath11k_warn(ab, "invalid len %d for the tag 0x%x\n", len, tag); return -EINVAL; } ath11k_wmi_service_bitmap_copy(wmi_handle, ptr); svc_ready->wmi_svc_bitmap_done = true; } break; default: break; } return 0; } static int ath11k_service_ready_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_tlv_svc_ready_parse svc_ready = { }; int ret; ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_svc_rdy_parse, &svc_ready); if (ret) { ath11k_warn(ab, "failed to parse tlv %d\n", ret); return ret; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready"); return 0; } struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_ab, u32 len) { struct sk_buff *skb; struct ath11k_base *ab = wmi_ab->ab; u32 round_len = roundup(len, 4); skb = ath11k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len); if (!skb) return NULL; skb_reserve(skb, WMI_SKB_HEADROOM); if (!IS_ALIGNED((unsigned long)skb->data, 4)) ath11k_warn(ab, "unaligned WMI skb data\n"); skb_put(skb, round_len); memset(skb->data, 0, round_len); return skb; } static u32 ath11k_wmi_mgmt_get_freq(struct ath11k *ar, struct ieee80211_tx_info *info) { struct ath11k_base *ab = ar->ab; u32 freq = 0; if (ab->hw_params.support_off_channel_tx && ar->scan.is_roc && (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) freq = ar->scan.roc_freq; return freq; } int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id, struct sk_buff *frame) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame); struct wmi_mgmt_send_cmd *cmd; struct wmi_tlv *frame_tlv; struct sk_buff *skb; u32 buf_len; int ret, len; buf_len = frame->len < WMI_MGMT_SEND_DOWNLD_LEN ? frame->len : WMI_MGMT_SEND_DOWNLD_LEN; len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_mgmt_send_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MGMT_TX_SEND_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->desc_id = buf_id; cmd->chanfreq = ath11k_wmi_mgmt_get_freq(ar, info); cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr); cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr); cmd->frame_len = frame->len; cmd->buf_len = buf_len; cmd->tx_params_valid = 0; frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, buf_len); memcpy(frame_tlv->value, frame->data, buf_len); ath11k_ce_byte_swap(frame_tlv->value, buf_len); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd mgmt tx send"); return ret; } int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr, struct vdev_create_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_create_cmd *cmd; struct sk_buff *skb; struct wmi_vdev_txrx_streams *txrx_streams; struct wmi_tlv *tlv; int ret, len; void *ptr; /* It can be optimized my sending tx/rx chain configuration * only for supported bands instead of always sending it for * both the bands. */ len = sizeof(*cmd) + TLV_HDR_SIZE + (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_create_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CREATE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = param->if_id; cmd->vdev_type = param->type; cmd->vdev_subtype = param->subtype; cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX; cmd->pdev_id = param->pdev_id; cmd->mbssid_flags = param->mbssid_flags; cmd->mbssid_tx_vdev_id = param->mbssid_tx_vdev_id; ether_addr_copy(cmd->vdev_macaddr.addr, macaddr); ptr = skb->data + sizeof(*cmd); len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; txrx_streams = ptr; len = sizeof(*txrx_streams); txrx_streams->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G; txrx_streams->supported_tx_streams = param->chains[NL80211_BAND_2GHZ].tx; txrx_streams->supported_rx_streams = param->chains[NL80211_BAND_2GHZ].rx; txrx_streams++; txrx_streams->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G; txrx_streams->supported_tx_streams = param->chains[NL80211_BAND_5GHZ].tx; txrx_streams->supported_rx_streams = param->chains[NL80211_BAND_5GHZ].rx; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit WMI_VDEV_CREATE_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev create id %d type %d subtype %d macaddr %pM pdevid %d\n", param->if_id, param->type, param->subtype, macaddr, param->pdev_id); return ret; } int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_delete_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_delete_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DELETE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev delete id %d\n", vdev_id); return ret; } int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_stop_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_stop_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_STOP_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev stop id 0x%x\n", vdev_id); return ret; } int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_down_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_down_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DOWN_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev down id 0x%x\n", vdev_id); return ret; } static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan, struct wmi_vdev_start_req_arg *arg) { u32 center_freq1 = arg->channel.band_center_freq1; memset(chan, 0, sizeof(*chan)); chan->mhz = arg->channel.freq; chan->band_center_freq1 = arg->channel.band_center_freq1; if (arg->channel.mode == MODE_11AX_HE160) { if (arg->channel.freq > arg->channel.band_center_freq1) chan->band_center_freq1 = center_freq1 + 40; else chan->band_center_freq1 = center_freq1 - 40; chan->band_center_freq2 = arg->channel.band_center_freq1; } else if ((arg->channel.mode == MODE_11AC_VHT80_80) || (arg->channel.mode == MODE_11AX_HE80_80)) { chan->band_center_freq2 = arg->channel.band_center_freq2; } else { chan->band_center_freq2 = 0; } chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode); if (arg->channel.passive) chan->info |= WMI_CHAN_INFO_PASSIVE; if (arg->channel.allow_ibss) chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED; if (arg->channel.allow_ht) chan->info |= WMI_CHAN_INFO_ALLOW_HT; if (arg->channel.allow_vht) chan->info |= WMI_CHAN_INFO_ALLOW_VHT; if (arg->channel.allow_he) chan->info |= WMI_CHAN_INFO_ALLOW_HE; if (arg->channel.ht40plus) chan->info |= WMI_CHAN_INFO_HT40_PLUS; if (arg->channel.chan_radar) chan->info |= WMI_CHAN_INFO_DFS; if (arg->channel.freq2_radar) chan->info |= WMI_CHAN_INFO_DFS_FREQ2; chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR, arg->channel.max_power) | FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR, arg->channel.max_reg_power); chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX, arg->channel.max_antenna_gain) | FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR, arg->channel.max_power); } int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg, bool restart) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_start_request_cmd *cmd; struct sk_buff *skb; struct wmi_channel *chan; struct wmi_tlv *tlv; void *ptr; int ret, len; if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) return -EINVAL; len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_start_request_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_START_REQUEST_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = arg->vdev_id; cmd->beacon_interval = arg->bcn_intval; cmd->bcn_tx_rate = arg->bcn_tx_rate; cmd->dtim_period = arg->dtim_period; cmd->num_noa_descriptors = arg->num_noa_descriptors; cmd->preferred_rx_streams = arg->pref_rx_streams; cmd->preferred_tx_streams = arg->pref_tx_streams; cmd->cac_duration_ms = arg->cac_duration_ms; cmd->regdomain = arg->regdomain; cmd->he_ops = arg->he_ops; cmd->mbssid_flags = arg->mbssid_flags; cmd->mbssid_tx_vdev_id = arg->mbssid_tx_vdev_id; if (!restart) { if (arg->ssid) { cmd->ssid.ssid_len = arg->ssid_len; memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); } if (arg->hidden_ssid) cmd->flags |= WMI_VDEV_START_HIDDEN_SSID; if (arg->pmf_enabled) cmd->flags |= WMI_VDEV_START_PMF_ENABLED; } cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED; if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED; ptr = skb->data + sizeof(*cmd); chan = ptr; ath11k_wmi_put_wmi_channel(chan, arg); chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) | FIELD_PREP(WMI_TLV_LEN, sizeof(*chan) - TLV_HDR_SIZE); ptr += sizeof(*chan); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, 0); /* Note: This is a nested TLV containing: * [wmi_tlv][ath11k_wmi_p2p_noa_descriptor][wmi_tlv].. */ ptr += sizeof(*tlv); if (restart) ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_RESTART_REQUEST_CMDID); else ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_START_REQUEST_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit vdev_%s cmd\n", restart ? "restart" : "start"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev %s id 0x%x freq 0x%x mode 0x%x\n", restart ? "restart" : "start", arg->vdev_id, arg->channel.freq, arg->channel.mode); return ret; } int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid, u8 *tx_bssid, u32 nontx_profile_idx, u32 nontx_profile_cnt) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_up_cmd *cmd; struct ieee80211_bss_conf *bss_conf; struct ath11k_vif *arvif; struct sk_buff *skb; int ret; arvif = ath11k_mac_get_arvif(ar, vdev_id); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_up_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_UP_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->vdev_assoc_id = aid; ether_addr_copy(cmd->vdev_bssid.addr, bssid); cmd->nontx_profile_idx = nontx_profile_idx; cmd->nontx_profile_cnt = nontx_profile_cnt; if (tx_bssid) ether_addr_copy(cmd->tx_vdev_bssid.addr, tx_bssid); if (arvif && arvif->vif->type == NL80211_IFTYPE_STATION) { bss_conf = &arvif->vif->bss_conf; if (bss_conf->nontransmitted) { ether_addr_copy(cmd->tx_vdev_bssid.addr, bss_conf->transmitter_bssid); cmd->nontx_profile_idx = bss_conf->bssid_index; cmd->nontx_profile_cnt = bss_conf->bssid_indicator; } } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev up id 0x%x assoc id %d bssid %pM\n", vdev_id, aid, bssid); return ret; } int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar, struct peer_create_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_peer_create_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_peer_create_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); ether_addr_copy(cmd->peer_macaddr.addr, param->peer_addr); cmd->peer_type = param->peer_type; cmd->vdev_id = param->vdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd peer create vdev_id %d peer_addr %pM\n", param->vdev_id, param->peer_addr); return ret; } int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar, const u8 *peer_addr, u8 vdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_peer_delete_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_peer_delete_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); cmd->vdev_id = vdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd peer delete vdev_id %d peer_addr %pM\n", vdev_id, peer_addr); return ret; } int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar, struct pdev_set_regdomain_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_set_regdomain_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_REGDOMAIN_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->reg_domain = param->current_rd_in_use; cmd->reg_domain_2g = param->current_rd_2g; cmd->reg_domain_5g = param->current_rd_5g; cmd->conformance_test_limit_2g = param->ctl_2g; cmd->conformance_test_limit_5g = param->ctl_5g; cmd->dfs_domain = param->dfs_domain; cmd->pdev_id = param->pdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n", param->current_rd_in_use, param->current_rd_2g, param->current_rd_5g, param->dfs_domain, param->pdev_id); return ret; } int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr, u32 vdev_id, u32 param_id, u32 param_val) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_peer_set_param_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_peer_set_param_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); cmd->vdev_id = vdev_id; cmd->param_id = param_id; cmd->param_value = param_val; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd peer set param vdev %d peer 0x%pM set param %d value %d\n", vdev_id, peer_addr, param_id, param_val); return ret; } int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar, u8 peer_addr[ETH_ALEN], struct peer_flush_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_peer_flush_tids_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_FLUSH_TIDS_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); cmd->peer_tid_bitmap = param->peer_tid_bitmap; cmd->vdev_id = param->vdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PEER_FLUSH_TIDS cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd peer flush tids vdev_id %d peer_addr %pM tids %08x\n", param->vdev_id, peer_addr, param->peer_tid_bitmap); return ret; } int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar, int vdev_id, const u8 *addr, dma_addr_t paddr, u8 tid, u8 ba_window_size_valid, u32 ba_window_size) { struct wmi_peer_reorder_queue_setup_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_REORDER_QUEUE_SETUP_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); ether_addr_copy(cmd->peer_macaddr.addr, addr); cmd->vdev_id = vdev_id; cmd->tid = tid; cmd->queue_ptr_lo = lower_32_bits(paddr); cmd->queue_ptr_hi = upper_32_bits(paddr); cmd->queue_no = tid; cmd->ba_window_size_valid = ba_window_size_valid; cmd->ba_window_size = ba_window_size; ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_PEER_REORDER_QUEUE_SETUP_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd peer reorder queue setup addr %pM vdev_id %d tid %d\n", addr, vdev_id, tid); return ret; } int ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar, struct rx_reorder_queue_remove_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_peer_reorder_queue_remove_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_REORDER_QUEUE_REMOVE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); ether_addr_copy(cmd->peer_macaddr.addr, param->peer_macaddr); cmd->vdev_id = param->vdev_id; cmd->tid_mask = param->peer_tid_bitmap; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_REORDER_QUEUE_REMOVE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd peer reorder queue remove peer_macaddr %pM vdev_id %d tid_map %d", param->peer_macaddr, param->vdev_id, param->peer_tid_bitmap); return ret; } int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id, u32 param_value, u8 pdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_set_param_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_set_param_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_PARAM_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = pdev_id; cmd->param_id = param_id; cmd->param_value = param_value; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev set param %d pdev id %d value %d\n", param_id, pdev_id, param_value); return ret; } int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, enum wmi_sta_ps_mode psmode) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_set_ps_mode_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_MODE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->sta_ps_mode = psmode; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd sta powersave mode psmode %d vdev id %d\n", psmode, vdev_id); return ret; } int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt, u32 pdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_suspend_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_suspend_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SUSPEND_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->suspend_opt = suspend_opt; cmd->pdev_id = pdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev suspend pdev_id %d\n", pdev_id); return ret; } int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_resume_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_resume_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_RESUME_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = pdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev resume pdev id %d\n", pdev_id); return ret; } /* TODO FW Support for the cmd is not available yet. * Can be tested once the command and corresponding * event is implemented in FW */ int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar, enum wmi_bss_chan_info_req_type type) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_bss_chan_info_req_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->req_type = type; cmd->pdev_id = ar->pdev->pdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev bss chan info request type %d\n", type); return ret; } int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr, struct ap_ps_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_ap_ps_peer_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_AP_PS_PEER_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = param->vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); cmd->param = param->param; cmd->value = param->value; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd ap ps peer param vdev id %d peer %pM param %d value %d\n", param->vdev_id, peer_addr, param->param, param->value); return ret; } int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id, u32 param, u32 param_value) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_sta_powersave_param_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_PARAM_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->param = param; cmd->value = param_value; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd set powersave param vdev_id %d param %d value %d\n", vdev_id, param, param_value); return ret; } int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_force_fw_hang_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_force_fw_hang_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_FORCE_FW_HANG_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->type = type; cmd->delay_time_ms = delay_time_ms; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID); if (ret) { ath11k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd force fw hang"); return ret; } int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id, u32 param_id, u32 param_value) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_set_param_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_set_param_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_PARAM_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->param_id = param_id; cmd->param_value = param_value; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_VDEV_SET_PARAM_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev set param vdev 0x%x param %d value %d\n", vdev_id, param_id, param_value); return ret; } int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar, struct stats_request_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_request_stats_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_request_stats_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_REQUEST_STATS_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->stats_id = param->stats_id; cmd->vdev_id = param->vdev_id; cmd->pdev_id = param->pdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd request stats 0x%x vdev id %d pdev id %d\n", param->stats_id, param->vdev_id, param->pdev_id); return ret; } int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k *ar) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_get_pdev_temperature_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_GET_TEMPERATURE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id); return ret; } int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar, u32 vdev_id, u32 bcn_ctrl_op) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_bcn_offload_ctrl_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_OFFLOAD_CTRL_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->bcn_ctrl_op = bcn_ctrl_op; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd bcn offload ctrl vdev id %d ctrl_op %d\n", vdev_id, bcn_ctrl_op); return ret; } int ath11k_wmi_p2p_go_bcn_ie(struct ath11k *ar, u32 vdev_id, const u8 *p2p_ie) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_p2p_go_set_beacon_ie_cmd *cmd; size_t p2p_ie_len, aligned_len; struct wmi_tlv *tlv; struct sk_buff *skb; int ret, len; p2p_ie_len = p2p_ie[1] + 2; aligned_len = roundup(p2p_ie_len, 4); len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_p2p_go_set_beacon_ie_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_P2P_GO_SET_BEACON_IE) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->ie_buf_len = p2p_ie_len; tlv = (struct wmi_tlv *)cmd->tlv; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, aligned_len); memcpy(tlv->value, p2p_ie, p2p_ie_len); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n"); dev_kfree_skb(skb); } return ret; } int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id, struct ieee80211_mutable_offsets *offs, struct sk_buff *bcn, u32 ema_params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_bcn_tmpl_cmd *cmd; struct wmi_bcn_prb_info *bcn_prb_info; struct wmi_tlv *tlv; struct sk_buff *skb; void *ptr; int ret, len; size_t aligned_len = roundup(bcn->len, 4); struct ieee80211_vif *vif; struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev_id); if (!arvif) { ath11k_warn(ar->ab, "failed to find arvif with vdev id %d\n", vdev_id); return -EINVAL; } vif = arvif->vif; len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_bcn_tmpl_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_TMPL_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->tim_ie_offset = offs->tim_offset; if (vif->bss_conf.csa_active) { cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0]; cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1]; } cmd->buf_len = bcn->len; cmd->mbssid_ie_offset = offs->mbssid_off; cmd->ema_params = ema_params; ptr = skb->data + sizeof(*cmd); bcn_prb_info = ptr; len = sizeof(*bcn_prb_info); bcn_prb_info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_PRB_INFO) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); bcn_prb_info->caps = 0; bcn_prb_info->erp = 0; ptr += sizeof(*bcn_prb_info); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, aligned_len); memcpy(tlv->value, bcn->data, bcn->len); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd bcn tmpl"); return ret; } int ath11k_wmi_vdev_install_key(struct ath11k *ar, struct wmi_vdev_install_key_arg *arg) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_install_key_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; int ret, len; int key_len_aligned = roundup(arg->key_len, sizeof(uint32_t)); len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_install_key_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_INSTALL_KEY_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = arg->vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); cmd->key_idx = arg->key_idx; cmd->key_flags = arg->key_flags; cmd->key_cipher = arg->key_cipher; cmd->key_len = arg->key_len; cmd->key_txmic_len = arg->key_txmic_len; cmd->key_rxmic_len = arg->key_rxmic_len; if (arg->key_rsc_counter) memcpy(&cmd->key_rsc_counter, &arg->key_rsc_counter, sizeof(struct wmi_key_seq_counter)); tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, key_len_aligned); if (arg->key_data) memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_VDEV_INSTALL_KEY cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev install key idx %d cipher %d len %d\n", arg->key_idx, arg->key_cipher, arg->key_len); return ret; } static inline void ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, struct peer_assoc_params *param, bool hw_crypto_disabled) { cmd->peer_flags = 0; if (param->is_wme_set) { if (param->qos_flag) cmd->peer_flags |= WMI_PEER_QOS; if (param->apsd_flag) cmd->peer_flags |= WMI_PEER_APSD; if (param->ht_flag) cmd->peer_flags |= WMI_PEER_HT; if (param->bw_40) cmd->peer_flags |= WMI_PEER_40MHZ; if (param->bw_80) cmd->peer_flags |= WMI_PEER_80MHZ; if (param->bw_160) cmd->peer_flags |= WMI_PEER_160MHZ; /* Typically if STBC is enabled for VHT it should be enabled * for HT as well **/ if (param->stbc_flag) cmd->peer_flags |= WMI_PEER_STBC; /* Typically if LDPC is enabled for VHT it should be enabled * for HT as well **/ if (param->ldpc_flag) cmd->peer_flags |= WMI_PEER_LDPC; if (param->static_mimops_flag) cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS; if (param->dynamic_mimops_flag) cmd->peer_flags |= WMI_PEER_DYN_MIMOPS; if (param->spatial_mux_flag) cmd->peer_flags |= WMI_PEER_SPATIAL_MUX; if (param->vht_flag) cmd->peer_flags |= WMI_PEER_VHT; if (param->he_flag) cmd->peer_flags |= WMI_PEER_HE; if (param->twt_requester) cmd->peer_flags |= WMI_PEER_TWT_REQ; if (param->twt_responder) cmd->peer_flags |= WMI_PEER_TWT_RESP; } /* Suppress authorization for all AUTH modes that need 4-way handshake * (during re-association). * Authorization will be done for these modes on key installation. */ if (param->auth_flag) cmd->peer_flags |= WMI_PEER_AUTH; if (param->need_ptk_4_way) { cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; if (!hw_crypto_disabled && param->is_assoc) cmd->peer_flags &= ~WMI_PEER_AUTH; } if (param->need_gtk_2_way) cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; /* safe mode bypass the 4-way handshake */ if (param->safe_mode_enabled) cmd->peer_flags &= ~(WMI_PEER_NEED_PTK_4_WAY | WMI_PEER_NEED_GTK_2_WAY); if (param->is_pmf_enabled) cmd->peer_flags |= WMI_PEER_PMF; /* Disable AMSDU for station transmit, if user configures it */ /* Disable AMSDU for AP transmit to 11n Stations, if user configures * it * if (param->amsdu_disable) Add after FW support **/ /* Target asserts if node is marked HT and all MCS is set to 0. * Mark the node as non-HT if all the mcs rates are disabled through * iwpriv **/ if (param->peer_ht_rates.num_rates == 0) cmd->peer_flags &= ~WMI_PEER_HT; } int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar, struct peer_assoc_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_peer_assoc_complete_cmd *cmd; struct wmi_vht_rate_set *mcs; struct wmi_he_rate_set *he_mcs; struct sk_buff *skb; struct wmi_tlv *tlv; void *ptr; u32 peer_legacy_rates_align; u32 peer_ht_rates_align; int i, ret, len; peer_legacy_rates_align = roundup(param->peer_legacy_rates.num_rates, sizeof(u32)); peer_ht_rates_align = roundup(param->peer_ht_rates.num_rates, sizeof(u32)); len = sizeof(*cmd) + TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) + TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) + sizeof(*mcs) + TLV_HDR_SIZE + (sizeof(*he_mcs) * param->peer_he_mcs_count); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; ptr = skb->data; cmd = ptr; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_ASSOC_COMPLETE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = param->vdev_id; cmd->peer_new_assoc = param->peer_new_assoc; cmd->peer_associd = param->peer_associd; ath11k_wmi_copy_peer_flags(cmd, param, test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)); ether_addr_copy(cmd->peer_macaddr.addr, param->peer_mac); cmd->peer_rate_caps = param->peer_rate_caps; cmd->peer_caps = param->peer_caps; cmd->peer_listen_intval = param->peer_listen_intval; cmd->peer_ht_caps = param->peer_ht_caps; cmd->peer_max_mpdu = param->peer_max_mpdu; cmd->peer_mpdu_density = param->peer_mpdu_density; cmd->peer_vht_caps = param->peer_vht_caps; cmd->peer_phymode = param->peer_phymode; /* Update 11ax capabilities */ cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0]; cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1]; cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal; cmd->peer_he_caps_6ghz = param->peer_he_caps_6ghz; cmd->peer_he_ops = param->peer_he_ops; memcpy(&cmd->peer_he_cap_phy, ¶m->peer_he_cap_phyinfo, sizeof(param->peer_he_cap_phyinfo)); memcpy(&cmd->peer_ppet, ¶m->peer_ppet, sizeof(param->peer_ppet)); /* Update peer legacy rate information */ ptr += sizeof(*cmd); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, peer_legacy_rates_align); ptr += TLV_HDR_SIZE; cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates; memcpy(ptr, param->peer_legacy_rates.rates, param->peer_legacy_rates.num_rates); /* Update peer HT rate information */ ptr += peer_legacy_rates_align; tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, peer_ht_rates_align); ptr += TLV_HDR_SIZE; cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates; memcpy(ptr, param->peer_ht_rates.rates, param->peer_ht_rates.num_rates); /* VHT Rates */ ptr += peer_ht_rates_align; mcs = ptr; mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VHT_RATE_SET) | FIELD_PREP(WMI_TLV_LEN, sizeof(*mcs) - TLV_HDR_SIZE); cmd->peer_nss = param->peer_nss; /* Update bandwidth-NSS mapping */ cmd->peer_bw_rxnss_override = 0; cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override; if (param->vht_capable) { mcs->rx_max_rate = param->rx_max_rate; mcs->rx_mcs_set = param->rx_mcs_set; mcs->tx_max_rate = param->tx_max_rate; mcs->tx_mcs_set = param->tx_mcs_set; } /* HE Rates */ cmd->peer_he_mcs = param->peer_he_mcs_count; cmd->min_data_rate = param->min_data_rate; ptr += sizeof(*mcs); len = param->peer_he_mcs_count * sizeof(*he_mcs); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; /* Loop through the HE rate set */ for (i = 0; i < param->peer_he_mcs_count; i++) { he_mcs = ptr; he_mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HE_RATE_SET) | FIELD_PREP(WMI_TLV_LEN, sizeof(*he_mcs) - TLV_HDR_SIZE); he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i]; he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i]; ptr += sizeof(*he_mcs); } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PEER_ASSOC_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n", cmd->vdev_id, cmd->peer_associd, param->peer_mac, cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps, cmd->peer_listen_intval, cmd->peer_ht_caps, cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode, cmd->peer_mpdu_density, cmd->peer_vht_caps, cmd->peer_he_cap_info, cmd->peer_he_ops, cmd->peer_he_cap_info_ext, cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1], cmd->peer_he_cap_phy[2], cmd->peer_bw_rxnss_override); return ret; } void ath11k_wmi_start_scan_init(struct ath11k *ar, struct scan_req_params *arg) { /* setup commonly used values */ arg->scan_req_id = 1; if (ar->state_11d == ATH11K_11D_PREPARING) arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM; else arg->scan_priority = WMI_SCAN_PRIORITY_LOW; arg->dwell_time_active = 50; arg->dwell_time_active_2g = 0; arg->dwell_time_passive = 150; arg->dwell_time_active_6g = 40; arg->dwell_time_passive_6g = 30; arg->min_rest_time = 50; arg->max_rest_time = 500; arg->repeat_probe_time = 0; arg->probe_spacing_time = 0; arg->idle_time = 0; arg->max_scan_time = 20000; arg->probe_delay = 5; arg->notify_scan_events = WMI_SCAN_EVENT_STARTED | WMI_SCAN_EVENT_COMPLETED | WMI_SCAN_EVENT_BSS_CHANNEL | WMI_SCAN_EVENT_FOREIGN_CHAN | WMI_SCAN_EVENT_DEQUEUED; arg->scan_f_chan_stat_evnt = 1; if (test_bit(WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE, ar->ab->wmi_ab.svc_map)) arg->scan_ctrl_flags_ext |= WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE; arg->num_bssid = 1; /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be * ZEROs in probe request */ eth_broadcast_addr(arg->bssid_list[0].addr); } static inline void ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd, struct scan_req_params *param) { /* Scan events subscription */ if (param->scan_ev_started) cmd->notify_scan_events |= WMI_SCAN_EVENT_STARTED; if (param->scan_ev_completed) cmd->notify_scan_events |= WMI_SCAN_EVENT_COMPLETED; if (param->scan_ev_bss_chan) cmd->notify_scan_events |= WMI_SCAN_EVENT_BSS_CHANNEL; if (param->scan_ev_foreign_chan) cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN; if (param->scan_ev_dequeued) cmd->notify_scan_events |= WMI_SCAN_EVENT_DEQUEUED; if (param->scan_ev_preempted) cmd->notify_scan_events |= WMI_SCAN_EVENT_PREEMPTED; if (param->scan_ev_start_failed) cmd->notify_scan_events |= WMI_SCAN_EVENT_START_FAILED; if (param->scan_ev_restarted) cmd->notify_scan_events |= WMI_SCAN_EVENT_RESTARTED; if (param->scan_ev_foreign_chn_exit) cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT; if (param->scan_ev_suspended) cmd->notify_scan_events |= WMI_SCAN_EVENT_SUSPENDED; if (param->scan_ev_resumed) cmd->notify_scan_events |= WMI_SCAN_EVENT_RESUMED; /** Set scan control flags */ cmd->scan_ctrl_flags = 0; if (param->scan_f_passive) cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; if (param->scan_f_strict_passive_pch) cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN; if (param->scan_f_promisc_mode) cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROMISCUOS; if (param->scan_f_capture_phy_err) cmd->scan_ctrl_flags |= WMI_SCAN_CAPTURE_PHY_ERROR; if (param->scan_f_half_rate) cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_HALF_RATE_SUPPORT; if (param->scan_f_quarter_rate) cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT; if (param->scan_f_cck_rates) cmd->scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES; if (param->scan_f_ofdm_rates) cmd->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES; if (param->scan_f_chan_stat_evnt) cmd->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; if (param->scan_f_filter_prb_req) cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; if (param->scan_f_bcast_probe) cmd->scan_ctrl_flags |= WMI_SCAN_ADD_BCAST_PROBE_REQ; if (param->scan_f_offchan_mgmt_tx) cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_MGMT_TX; if (param->scan_f_offchan_data_tx) cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_DATA_TX; if (param->scan_f_force_active_dfs_chn) cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS; if (param->scan_f_add_tpc_ie_in_probe) cmd->scan_ctrl_flags |= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ; if (param->scan_f_add_ds_ie_in_probe) cmd->scan_ctrl_flags |= WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ; if (param->scan_f_add_spoofed_mac_in_probe) cmd->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ; if (param->scan_f_add_rand_seq_in_probe) cmd->scan_ctrl_flags |= WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ; if (param->scan_f_en_ie_whitelist_in_probe) cmd->scan_ctrl_flags |= WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ; /* for adaptive scan mode using 3 bits (21 - 23 bits) */ WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags, param->adaptive_dwell_time_mode); cmd->scan_ctrl_flags_ext = param->scan_ctrl_flags_ext; } int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, struct scan_req_params *params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_start_scan_cmd *cmd; struct wmi_ssid *ssid = NULL; struct wmi_mac_addr *bssid; struct sk_buff *skb; struct wmi_tlv *tlv; void *ptr; int i, ret, len; u32 *tmp_ptr; u16 extraie_len_with_pad = 0; struct hint_short_ssid *s_ssid = NULL; struct hint_bssid *hint_bssid = NULL; len = sizeof(*cmd); len += TLV_HDR_SIZE; if (params->num_chan) len += params->num_chan * sizeof(u32); len += TLV_HDR_SIZE; if (params->num_ssids) len += params->num_ssids * sizeof(*ssid); len += TLV_HDR_SIZE; if (params->num_bssid) len += sizeof(*bssid) * params->num_bssid; len += TLV_HDR_SIZE; if (params->extraie.len && params->extraie.len <= 0xFFFF) extraie_len_with_pad = roundup(params->extraie.len, sizeof(u32)); len += extraie_len_with_pad; if (params->num_hint_bssid) len += TLV_HDR_SIZE + params->num_hint_bssid * sizeof(struct hint_bssid); if (params->num_hint_s_ssid) len += TLV_HDR_SIZE + params->num_hint_s_ssid * sizeof(struct hint_short_ssid); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; ptr = skb->data; cmd = ptr; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_START_SCAN_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->scan_id = params->scan_id; cmd->scan_req_id = params->scan_req_id; cmd->vdev_id = params->vdev_id; cmd->scan_priority = params->scan_priority; cmd->notify_scan_events = params->notify_scan_events; ath11k_wmi_copy_scan_event_cntrl_flags(cmd, params); cmd->dwell_time_active = params->dwell_time_active; cmd->dwell_time_active_2g = params->dwell_time_active_2g; cmd->dwell_time_passive = params->dwell_time_passive; cmd->dwell_time_active_6g = params->dwell_time_active_6g; cmd->dwell_time_passive_6g = params->dwell_time_passive_6g; cmd->min_rest_time = params->min_rest_time; cmd->max_rest_time = params->max_rest_time; cmd->repeat_probe_time = params->repeat_probe_time; cmd->probe_spacing_time = params->probe_spacing_time; cmd->idle_time = params->idle_time; cmd->max_scan_time = params->max_scan_time; cmd->probe_delay = params->probe_delay; cmd->burst_duration = params->burst_duration; cmd->num_chan = params->num_chan; cmd->num_bssid = params->num_bssid; cmd->num_ssids = params->num_ssids; cmd->ie_len = params->extraie.len; cmd->n_probes = params->n_probes; ether_addr_copy(cmd->mac_addr.addr, params->mac_addr.addr); ether_addr_copy(cmd->mac_mask.addr, params->mac_mask.addr); ptr += sizeof(*cmd); len = params->num_chan * sizeof(u32); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; tmp_ptr = ptr; for (i = 0; i < params->num_chan; ++i) tmp_ptr[i] = params->chan_list[i]; ptr += len; len = params->num_ssids * sizeof(*ssid); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; if (params->num_ssids) { ssid = ptr; for (i = 0; i < params->num_ssids; ++i) { ssid->ssid_len = params->ssid[i].length; memcpy(ssid->ssid, params->ssid[i].ssid, params->ssid[i].length); ssid++; } } ptr += (params->num_ssids * sizeof(*ssid)); len = params->num_bssid * sizeof(*bssid); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; bssid = ptr; if (params->num_bssid) { for (i = 0; i < params->num_bssid; ++i) { ether_addr_copy(bssid->addr, params->bssid_list[i].addr); bssid++; } } ptr += params->num_bssid * sizeof(*bssid); len = extraie_len_with_pad; tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; if (extraie_len_with_pad) memcpy(ptr, params->extraie.ptr, params->extraie.len); ptr += extraie_len_with_pad; if (params->num_hint_s_ssid) { len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; s_ssid = ptr; for (i = 0; i < params->num_hint_s_ssid; ++i) { s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags; s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid; s_ssid++; } ptr += len; } if (params->num_hint_bssid) { len = params->num_hint_bssid * sizeof(struct hint_bssid); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; hint_bssid = ptr; for (i = 0; i < params->num_hint_bssid; ++i) { hint_bssid->freq_flags = params->hint_bssid[i].freq_flags; ether_addr_copy(¶ms->hint_bssid[i].bssid.addr[0], &hint_bssid->bssid.addr[0]); hint_bssid++; } } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_START_SCAN_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd start scan"); return ret; } int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar, u32 vdev_id, struct ath11k_reg_tpc_power_info *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_set_tpc_power_cmd *cmd; struct wmi_vdev_ch_power_info *ch; struct sk_buff *skb; struct wmi_tlv *tlv; u8 *ptr; int i, ret, len, array_len; array_len = sizeof(*ch) * param->num_pwr_levels; len = sizeof(*cmd) + TLV_HDR_SIZE + array_len; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; ptr = skb->data; cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_TPC_POWER_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->psd_power = param->is_psd_power; cmd->eirp_power = param->eirp_power; cmd->power_type_6ghz = param->ap_power_type; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n", vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type); ptr += sizeof(*cmd); tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, array_len); ptr += TLV_HDR_SIZE; ch = (struct wmi_vdev_ch_power_info *)ptr; for (i = 0; i < param->num_pwr_levels; i++, ch++) { ch->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CH_POWER_INFO) | FIELD_PREP(WMI_TLV_LEN, sizeof(*ch) - TLV_HDR_SIZE); ch->chan_cfreq = param->chan_power_info[i].chan_cfreq; ch->tx_power = param->chan_power_info[i].tx_power; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tpc chan freq %d TX power %d\n", ch->chan_cfreq, ch->tx_power); } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n"); dev_kfree_skb(skb); return ret; } return 0; } int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar, struct scan_cancel_param *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_stop_scan_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_stop_scan_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STOP_SCAN_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = param->vdev_id; cmd->requestor = param->requester; cmd->scan_id = param->scan_id; cmd->pdev_id = param->pdev_id; /* stop the scan with the corresponding scan_id */ if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) { /* Cancelling all scans */ cmd->req_type = WMI_SCAN_STOP_ALL; } else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) { /* Cancelling VAP scans */ cmd->req_type = WMI_SCN_STOP_VAP_ALL; } else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) { /* Cancelling specific scan */ cmd->req_type = WMI_SCAN_STOP_ONE; } else { ath11k_warn(ar->ab, "invalid scan cancel param %d", param->req_type); dev_kfree_skb(skb); return -EINVAL; } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STOP_SCAN_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd stop scan"); return ret; } int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar, struct scan_chan_list_params *chan_list) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_scan_chan_list_cmd *cmd; struct sk_buff *skb; struct wmi_channel *chan_info; struct channel_param *tchan_info; struct wmi_tlv *tlv; void *ptr; int i, ret, len; u16 num_send_chans, num_sends = 0, max_chan_limit = 0; u32 *reg1, *reg2; tchan_info = chan_list->ch_param; while (chan_list->nallchans) { len = sizeof(*cmd) + TLV_HDR_SIZE; max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) / sizeof(*chan_info); if (chan_list->nallchans > max_chan_limit) num_send_chans = max_chan_limit; else num_send_chans = chan_list->nallchans; chan_list->nallchans -= num_send_chans; len += sizeof(*chan_info) * num_send_chans; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_scan_chan_list_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = chan_list->pdev_id; cmd->num_scan_chans = num_send_chans; if (num_sends) cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "no.of chan = %d len = %d pdev_id = %d num_sends = %d\n", num_send_chans, len, cmd->pdev_id, num_sends); ptr = skb->data + sizeof(*cmd); len = sizeof(*chan_info) * num_send_chans; tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); ptr += TLV_HDR_SIZE; for (i = 0; i < num_send_chans; ++i) { chan_info = ptr; memset(chan_info, 0, sizeof(*chan_info)); len = sizeof(*chan_info); chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); reg1 = &chan_info->reg_info_1; reg2 = &chan_info->reg_info_2; chan_info->mhz = tchan_info->mhz; chan_info->band_center_freq1 = tchan_info->cfreq1; chan_info->band_center_freq2 = tchan_info->cfreq2; if (tchan_info->is_chan_passive) chan_info->info |= WMI_CHAN_INFO_PASSIVE; if (tchan_info->allow_he) chan_info->info |= WMI_CHAN_INFO_ALLOW_HE; else if (tchan_info->allow_vht) chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT; else if (tchan_info->allow_ht) chan_info->info |= WMI_CHAN_INFO_ALLOW_HT; if (tchan_info->half_rate) chan_info->info |= WMI_CHAN_INFO_HALF_RATE; if (tchan_info->quarter_rate) chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE; if (tchan_info->psc_channel) chan_info->info |= WMI_CHAN_INFO_PSC; if (tchan_info->dfs_set) chan_info->info |= WMI_CHAN_INFO_DFS; chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, tchan_info->phy_mode); *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR, tchan_info->minpower); *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR, tchan_info->maxpower); *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR, tchan_info->maxregpower); *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS, tchan_info->reg_class_id); *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX, tchan_info->antennamax); *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR, tchan_info->maxregpower); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "chan scan list chan[%d] = %u, chan_info->info %8x\n", i, chan_info->mhz, chan_info->info); ptr += sizeof(*chan_info); tchan_info++; } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd scan chan list channels %d", num_send_chans); num_sends++; } return 0; } int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id, struct wmi_wmm_params_all_arg *param, enum wmi_wmm_params_type wmm_param_type) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_set_wmm_params_cmd *cmd; struct wmi_wmm_params *wmm_param; struct wmi_wmm_params_arg *wmi_wmm_arg; struct sk_buff *skb; int ret, ac; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->wmm_param_type = wmm_param_type; for (ac = 0; ac < WME_NUM_AC; ac++) { switch (ac) { case WME_AC_BE: wmi_wmm_arg = ¶m->ac_be; break; case WME_AC_BK: wmi_wmm_arg = ¶m->ac_bk; break; case WME_AC_VI: wmi_wmm_arg = ¶m->ac_vi; break; case WME_AC_VO: wmi_wmm_arg = ¶m->ac_vo; break; } wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac]; wmm_param->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*wmm_param) - TLV_HDR_SIZE); wmm_param->aifs = wmi_wmm_arg->aifs; wmm_param->cwmin = wmi_wmm_arg->cwmin; wmm_param->cwmax = wmi_wmm_arg->cwmax; wmm_param->txoplimit = wmi_wmm_arg->txop; wmm_param->acm = wmi_wmm_arg->acm; wmm_param->no_ack = wmi_wmm_arg->no_ack; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmm set type %d ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n", wmm_param_type, ac, wmm_param->aifs, wmm_param->cwmin, wmm_param->cwmax, wmm_param->txoplimit, wmm_param->acm, wmm_param->no_ack); } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_WMM_PARAMS_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev set wmm params"); return ret; } int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar, u32 pdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_dfs_phyerr_offload_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = pdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev dfs phyerr offload enable pdev id %d\n", pdev_id); return ret; } int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac, u32 tid, u32 initiator, u32 reason) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_delba_send_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_delba_send_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DELBA_SEND_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, mac); cmd->tid = tid; cmd->initiator = initiator; cmd->reasoncode = reason; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_DELBA_SEND_CMDID cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n", vdev_id, mac, tid, initiator, reason); return ret; } int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac, u32 tid, u32 status) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_addba_setresponse_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_addba_setresponse_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SETRESPONSE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, mac); cmd->tid = tid; cmd->statuscode = status; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n", vdev_id, mac, tid, status); return ret; } int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac, u32 tid, u32 buf_size) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_addba_send_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_addba_send_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SEND_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, mac); cmd->tid = tid; cmd->buffersize = buf_size; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_ADDBA_SEND_CMDID cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n", vdev_id, mac, tid, buf_size); return ret; } int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_addba_clear_resp_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_addba_clear_resp_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_CLEAR_RESP_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, mac); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd addba clear resp vdev_id 0x%X mac_addr %pM\n", vdev_id, mac); return ret; } int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_pktlog_filter_cmd *cmd; struct wmi_pdev_pktlog_filter_info *info; struct sk_buff *skb; struct wmi_tlv *tlv; void *ptr; int ret, len; len = sizeof(*cmd) + sizeof(*info) + TLV_HDR_SIZE; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_pktlog_filter_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id); cmd->num_mac = 1; cmd->enable = enable; ptr = skb->data + sizeof(*cmd); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, sizeof(*info)); ptr += TLV_HDR_SIZE; info = ptr; ether_addr_copy(info->peer_macaddr.addr, addr); info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO) | FIELD_PREP(WMI_TLV_LEN, sizeof(*info) - TLV_HDR_SIZE); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_PKTLOG_FILTER_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog filter"); return ret; } int ath11k_wmi_send_init_country_cmd(struct ath11k *ar, struct wmi_init_country_params init_cc_params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_init_country_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_init_country_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SET_INIT_COUNTRY_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; switch (init_cc_params.flags) { case ALPHA_IS_SET: cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA; memcpy((u8 *)&cmd->cc_info.alpha2, init_cc_params.cc_info.alpha2, 3); break; case CC_IS_SET: cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE; cmd->cc_info.country_code = init_cc_params.cc_info.country_code; break; case REGDMN_IS_SET: cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_REGDOMAIN; cmd->cc_info.regdom_id = init_cc_params.cc_info.regdom_id; break; default: ath11k_warn(ar->ab, "unknown cc params flags: 0x%x", init_cc_params.flags); ret = -EINVAL; goto err; } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_INIT_COUNTRY_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n", ret); goto err; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd set init country"); return 0; err: dev_kfree_skb(skb); return ret; } int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar, struct wmi_set_current_country_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_set_current_country_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_set_current_country_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SET_CURRENT_COUNTRY_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; memcpy(&cmd->new_alpha2, ¶m->alpha2, 3); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd set current country pdev id %d alpha2 %c%c\n", ar->pdev->pdev_id, param->alpha2[0], param->alpha2[1]); return ret; } int ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar, struct thermal_mitigation_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_therm_throt_config_request_cmd *cmd; struct wmi_therm_throt_level_config_info *lvl_conf; struct wmi_tlv *tlv; struct sk_buff *skb; int i, ret, len; len = sizeof(*cmd) + TLV_HDR_SIZE + THERMAL_LEVELS * sizeof(struct wmi_therm_throt_level_config_info); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_therm_throt_config_request_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_CONFIG_REQUEST) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; cmd->enable = param->enable; cmd->dc = param->dc; cmd->dc_per_event = param->dc_per_event; cmd->therm_throt_levels = THERMAL_LEVELS; tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, (THERMAL_LEVELS * sizeof(struct wmi_therm_throt_level_config_info))); lvl_conf = (struct wmi_therm_throt_level_config_info *)(skb->data + sizeof(*cmd) + TLV_HDR_SIZE); for (i = 0; i < THERMAL_LEVELS; i++) { lvl_conf->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO) | FIELD_PREP(WMI_TLV_LEN, sizeof(*lvl_conf) - TLV_HDR_SIZE); lvl_conf->temp_lwm = param->levelconf[i].tmplwm; lvl_conf->temp_hwm = param->levelconf[i].tmphwm; lvl_conf->dc_off_percent = param->levelconf[i].dcoffpercent; lvl_conf->prio = param->levelconf[i].priority; lvl_conf++; } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_THERM_THROT_SET_CONF_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send THERM_THROT_SET_CONF cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd therm throt set conf pdev_id %d enable %d dc %d dc_per_event %x levels %d\n", ar->pdev->pdev_id, param->enable, param->dc, param->dc_per_event, THERMAL_LEVELS); return ret; } int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar, struct wmi_11d_scan_start_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_11d_scan_start_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_11d_scan_start_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = param->vdev_id; cmd->scan_period_msec = param->scan_period_msec; cmd->start_interval_msec = param->start_interval_msec; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd 11d scan start vdev id %d period %d ms internal %d ms\n", cmd->vdev_id, cmd->scan_period_msec, cmd->start_interval_msec); return ret; } int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_11d_scan_stop_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_11d_scan_stop_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_STOP_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd 11d scan stop vdev id %d\n", cmd->vdev_id); return ret; } int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pktlog_enable_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_pktlog_enable_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_ENABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id); cmd->evlist = pktlog_filter; cmd->enable = ATH11K_WMI_PKTLOG_ENABLE_FORCE; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_PKTLOG_ENABLE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog enable"); return ret; } int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pktlog_disable_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_pktlog_disable_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_DISABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_PKTLOG_DISABLE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog disable"); return ret; } void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params) { twt_params->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS; twt_params->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE; twt_params->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP; twt_params->congestion_thresh_teardown = ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN; twt_params->congestion_thresh_critical = ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL; twt_params->interference_thresh_teardown = ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN; twt_params->interference_thresh_setup = ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP; twt_params->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP; twt_params->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN; twt_params->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS; twt_params->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS; twt_params->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT; twt_params->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL; twt_params->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL; twt_params->remove_sta_slot_interval = ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL; /* TODO add MBSSID support */ twt_params->mbss_support = 0; } int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id, struct wmi_twt_enable_params *params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_twt_enable_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_twt_enable_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ENABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = pdev_id; cmd->sta_cong_timer_ms = params->sta_cong_timer_ms; cmd->default_slot_size = params->default_slot_size; cmd->congestion_thresh_setup = params->congestion_thresh_setup; cmd->congestion_thresh_teardown = params->congestion_thresh_teardown; cmd->congestion_thresh_critical = params->congestion_thresh_critical; cmd->interference_thresh_teardown = params->interference_thresh_teardown; cmd->interference_thresh_setup = params->interference_thresh_setup; cmd->min_no_sta_setup = params->min_no_sta_setup; cmd->min_no_sta_teardown = params->min_no_sta_teardown; cmd->no_of_bcast_mcast_slots = params->no_of_bcast_mcast_slots; cmd->min_no_twt_slots = params->min_no_twt_slots; cmd->max_no_sta_twt = params->max_no_sta_twt; cmd->mode_check_interval = params->mode_check_interval; cmd->add_sta_slot_interval = params->add_sta_slot_interval; cmd->remove_sta_slot_interval = params->remove_sta_slot_interval; cmd->mbss_support = params->mbss_support; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID); if (ret) { ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID"); dev_kfree_skb(skb); return ret; } ar->twt_enabled = 1; ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd twt enable"); return 0; } int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_twt_disable_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_twt_disable_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DISABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = pdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DISABLE_CMDID); if (ret) { ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd twt disable"); ar->twt_enabled = 0; return 0; } int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar, struct wmi_twt_add_dialog_params *params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_twt_add_dialog_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_twt_add_dialog_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ADD_DIALOG_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = params->vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr); cmd->dialog_id = params->dialog_id; cmd->wake_intvl_us = params->wake_intvl_us; cmd->wake_intvl_mantis = params->wake_intvl_mantis; cmd->wake_dura_us = params->wake_dura_us; cmd->sp_offset_us = params->sp_offset_us; cmd->flags = params->twt_cmd; if (params->flag_bcast) cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_BCAST; if (params->flag_trigger) cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_TRIGGER; if (params->flag_flow_type) cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE; if (params->flag_protection) cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_PROTECTION; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ADD_DIALOG_CMDID); if (ret) { ath11k_warn(ab, "failed to send wmi command to add twt dialog: %d", ret); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd twt add dialog vdev %u dialog id %u wake interval %u mantissa %u wake duration %u service period offset %u flags 0x%x\n", cmd->vdev_id, cmd->dialog_id, cmd->wake_intvl_us, cmd->wake_intvl_mantis, cmd->wake_dura_us, cmd->sp_offset_us, cmd->flags); return 0; } int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar, struct wmi_twt_del_dialog_params *params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_twt_del_dialog_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_twt_del_dialog_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DEL_DIALOG_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = params->vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr); cmd->dialog_id = params->dialog_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DEL_DIALOG_CMDID); if (ret) { ath11k_warn(ab, "failed to send wmi command to delete twt dialog: %d", ret); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd twt del dialog vdev %u dialog id %u\n", cmd->vdev_id, cmd->dialog_id); return 0; } int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar, struct wmi_twt_pause_dialog_params *params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_twt_pause_dialog_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_twt_pause_dialog_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_PAUSE_DIALOG_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = params->vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr); cmd->dialog_id = params->dialog_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_PAUSE_DIALOG_CMDID); if (ret) { ath11k_warn(ab, "failed to send wmi command to pause twt dialog: %d", ret); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd twt pause dialog vdev %u dialog id %u\n", cmd->vdev_id, cmd->dialog_id); return 0; } int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar, struct wmi_twt_resume_dialog_params *params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_twt_resume_dialog_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_twt_resume_dialog_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_RESUME_DIALOG_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = params->vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr); cmd->dialog_id = params->dialog_id; cmd->sp_offset_us = params->sp_offset_us; cmd->next_twt_size = params->next_twt_size; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_RESUME_DIALOG_CMDID); if (ret) { ath11k_warn(ab, "failed to send wmi command to resume twt dialog: %d", ret); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd twt resume dialog vdev %u dialog id %u service period offset %u next twt subfield size %u\n", cmd->vdev_id, cmd->dialog_id, cmd->sp_offset_us, cmd->next_twt_size); return 0; } int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id, struct ieee80211_he_obss_pd *he_obss_pd) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_obss_spatial_reuse_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->enable = he_obss_pd->enable; cmd->obss_min = he_obss_pd->min_offset; cmd->obss_max = he_obss_pd->max_offset; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID); if (ret) { ath11k_warn(ab, "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd pdev obss pd spatial reuse"); return 0; } int ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_pdev_obss_pd_bitmap_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev set srg bss color bitmap pdev_id %d bss color bitmap %08x %08x\n", cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); return 0; } int ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath11k *ar, u32 *bitmap) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_pdev_obss_pd_bitmap_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev set srg partial bssid bitmap pdev_id %d partial bssid bitmap %08x %08x\n", cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); return 0; } int ath11k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_pdev_obss_pd_bitmap_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev set srg obsscolor enable pdev_id %d bss color enable bitmap %08x %08x\n", cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); return 0; } int ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_pdev_obss_pd_bitmap_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev set srg obss bssid enable bitmap pdev_id %d bssid enable bitmap %08x %08x\n", cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); return 0; } int ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_pdev_obss_pd_bitmap_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev set non srg obss color enable bitmap pdev_id %d bss color enable bitmap %08x %08x\n", cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); return 0; } int ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_pdev_obss_pd_bitmap_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev set non srg obss bssid enable bitmap pdev_id %d bssid enable bitmap %08x %08x\n", cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); return 0; } int ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id, u8 bss_color, u32 period, bool enable) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_obss_color_collision_cfg_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->evt_type = enable ? ATH11K_OBSS_COLOR_COLLISION_DETECTION : ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE; cmd->current_bss_color = bss_color; cmd->detection_period_ms = period; cmd->scan_period_ms = ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS; cmd->free_slot_expiry_time_ms = 0; cmd->flags = 0; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID); if (ret) { ath11k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd obss color collision det config id %d type %d bss_color %d detect_period %d scan_period %d\n", cmd->vdev_id, cmd->evt_type, cmd->current_bss_color, cmd->detection_period_ms, cmd->scan_period_ms); return 0; } int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id, bool enable) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_bss_color_change_enable_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BSS_COLOR_CHANGE_ENABLE) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->enable = enable ? 1 : 0; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BSS_COLOR_CHANGE_ENABLE_CMDID); if (ret) { ath11k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd bss color change enable id %d enable %d\n", cmd->vdev_id, cmd->enable); return 0; } int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id, struct sk_buff *tmpl) { struct wmi_tlv *tlv; struct sk_buff *skb; void *ptr; int ret, len; size_t aligned_len; struct wmi_fils_discovery_tmpl_cmd *cmd; aligned_len = roundup(tmpl->len, 4); len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "vdev %i set FILS discovery template\n", vdev_id); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_FILS_DISCOVERY_TMPL_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->buf_len = tmpl->len; ptr = skb->data + sizeof(*cmd); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, aligned_len); memcpy(tlv->value, tmpl->data, tmpl->len); ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID); if (ret) { ath11k_warn(ar->ab, "WMI vdev %i failed to send FILS discovery template command\n", vdev_id); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd fils discovery tmpl"); return 0; } int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id, struct sk_buff *tmpl) { struct wmi_probe_tmpl_cmd *cmd; struct wmi_bcn_prb_info *probe_info; struct wmi_tlv *tlv; struct sk_buff *skb; void *ptr; int ret, len; size_t aligned_len = roundup(tmpl->len, 4); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "vdev %i set probe response template\n", vdev_id); len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len; skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_probe_tmpl_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PRB_TMPL_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->buf_len = tmpl->len; ptr = skb->data + sizeof(*cmd); probe_info = ptr; len = sizeof(*probe_info); probe_info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_PRB_INFO) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); probe_info->caps = 0; probe_info->erp = 0; ptr += sizeof(*probe_info); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, aligned_len); memcpy(tlv->value, tmpl->data, tmpl->len); ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID); if (ret) { ath11k_warn(ar->ab, "WMI vdev %i failed to send probe response template command\n", vdev_id); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd "); return 0; } int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval, bool unsol_bcast_probe_resp_enabled) { struct sk_buff *skb; int ret, len; struct wmi_fils_discovery_cmd *cmd; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "vdev %i set %s interval to %u TU\n", vdev_id, unsol_bcast_probe_resp_enabled ? "unsolicited broadcast probe response" : "FILS discovery", interval); len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_fils_discovery_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ENABLE_FILS_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->interval = interval; cmd->config = unsol_bcast_probe_resp_enabled; ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID); if (ret) { ath11k_warn(ar->ab, "WMI vdev %i failed to send FILS discovery enable/disable command\n", vdev_id); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd enable fils"); return 0; } static void ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_obss_color_collision_event *ev; struct ath11k_vif *arvif; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event obss color collision"); rcu_read_lock(); ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT]; if (!ev) { ath11k_warn(ab, "failed to fetch obss color collision ev"); goto exit; } arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id); if (!arvif) { ath11k_warn(ab, "failed to find arvif with vedv id %d in obss_color_collision_event\n", ev->vdev_id); goto exit; } switch (ev->evt_type) { case WMI_BSS_COLOR_COLLISION_DETECTION: ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap, 0); ath11k_dbg(ab, ATH11K_DBG_WMI, "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n", ev->vdev_id, ev->evt_type, ev->obss_color_bitmap); break; case WMI_BSS_COLOR_COLLISION_DISABLE: case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY: case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE: break; default: ath11k_warn(ab, "received unknown obss color collision detection event\n"); } exit: kfree(tb); rcu_read_unlock(); } static void ath11k_fill_band_to_mac_param(struct ath11k_base *soc, struct wmi_host_pdev_band_to_mac *band_to_mac) { u8 i; struct ath11k_hal_reg_capabilities_ext *hal_reg_cap; struct ath11k_pdev *pdev; for (i = 0; i < soc->num_radios; i++) { pdev = &soc->pdevs[i]; hal_reg_cap = &soc->hal_reg_cap[i]; band_to_mac[i].pdev_id = pdev->pdev_id; switch (pdev->cap.supported_bands) { case WMI_HOST_WLAN_2G_5G_CAP: band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan; band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan; break; case WMI_HOST_WLAN_2G_CAP: band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan; band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan; break; case WMI_HOST_WLAN_5G_CAP: band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan; band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan; break; default: break; } } } static void ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg, struct target_resource_config *tg_cfg) { wmi_cfg->num_vdevs = tg_cfg->num_vdevs; wmi_cfg->num_peers = tg_cfg->num_peers; wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers; wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs; wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys; wmi_cfg->num_tids = tg_cfg->num_tids; wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit; wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask; wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask; wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0]; wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1]; wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2]; wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3]; wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode; wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req; wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev; wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev; wmi_cfg->roam_offload_max_ap_profiles = tg_cfg->roam_offload_max_ap_profiles; wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups; wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems; wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode; wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size; wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries; wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size; wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim; wmi_cfg->rx_skip_defrag_timeout_dup_detection_check = tg_cfg->rx_skip_defrag_timeout_dup_detection_check; wmi_cfg->vow_config = tg_cfg->vow_config; wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev; wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc; wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries; wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs; wmi_cfg->num_tdls_conn_table_entries = tg_cfg->num_tdls_conn_table_entries; wmi_cfg->beacon_tx_offload_max_vdev = tg_cfg->beacon_tx_offload_max_vdev; wmi_cfg->num_multicast_filter_entries = tg_cfg->num_multicast_filter_entries; wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters; wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern; wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size; wmi_cfg->max_tdls_concurrent_sleep_sta = tg_cfg->max_tdls_concurrent_sleep_sta; wmi_cfg->max_tdls_concurrent_buffer_sta = tg_cfg->max_tdls_concurrent_buffer_sta; wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate; wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs; wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels; wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules; wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size; wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters; wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id; wmi_cfg->flag1 = tg_cfg->flag1; wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support; wmi_cfg->sched_params = tg_cfg->sched_params; wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count; wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count; wmi_cfg->host_service_flags &= ~(1 << WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT); wmi_cfg->host_service_flags |= (tg_cfg->is_reg_cc_ext_event_supported << WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT); wmi_cfg->flags2 = WMI_RSRC_CFG_FLAG2_CALC_NEXT_DTIM_COUNT_SET; wmi_cfg->ema_max_vap_cnt = tg_cfg->ema_max_vap_cnt; wmi_cfg->ema_max_profile_period = tg_cfg->ema_max_profile_period; } static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi, struct wmi_init_cmd_param *param) { struct ath11k_base *ab = wmi->wmi_ab->ab; struct sk_buff *skb; struct wmi_init_cmd *cmd; struct wmi_resource_config *cfg; struct wmi_pdev_set_hw_mode_cmd_param *hw_mode; struct wmi_pdev_band_to_mac *band_to_mac; struct wlan_host_mem_chunk *host_mem_chunks; struct wmi_tlv *tlv; size_t ret, len; void *ptr; u32 hw_mode_len = 0; u16 idx; if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE + (param->num_band_to_mac * sizeof(*band_to_mac)); len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len + (param->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_init_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_INIT_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); ptr = skb->data + sizeof(*cmd); cfg = ptr; ath11k_wmi_copy_resource_config(cfg, param->res_cfg); cfg->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_RESOURCE_CONFIG) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cfg) - TLV_HDR_SIZE); ptr += sizeof(*cfg); host_mem_chunks = ptr + TLV_HDR_SIZE; len = sizeof(struct wlan_host_mem_chunk); for (idx = 0; idx < param->num_mem_chunks; ++idx) { host_mem_chunks[idx].tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WLAN_HOST_MEMORY_CHUNK) | FIELD_PREP(WMI_TLV_LEN, len); host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr; host_mem_chunks[idx].size = param->mem_chunks[idx].len; host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id; ath11k_dbg(ab, ATH11K_DBG_WMI, "host mem chunk req_id %d paddr 0x%llx len %d\n", param->mem_chunks[idx].req_id, (u64)param->mem_chunks[idx].paddr, param->mem_chunks[idx].len); } cmd->num_host_mem_chunks = param->num_mem_chunks; len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks; /* num_mem_chunks is zero */ tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE + len; if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) { hw_mode = ptr; hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*hw_mode) - TLV_HDR_SIZE); hw_mode->hw_mode_index = param->hw_mode_id; hw_mode->num_band_to_mac = param->num_band_to_mac; ptr += sizeof(*hw_mode); len = param->num_band_to_mac * sizeof(*band_to_mac); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; len = sizeof(*band_to_mac); for (idx = 0; idx < param->num_band_to_mac; idx++) { band_to_mac = ptr; band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_BAND_TO_MAC) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id; band_to_mac->start_freq = param->band_to_mac[idx].start_freq; band_to_mac->end_freq = param->band_to_mac[idx].end_freq; ptr += sizeof(*band_to_mac); } } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_INIT_CMDID\n"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd wmi init"); return 0; } int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar, int pdev_id) { struct ath11k_wmi_pdev_lro_config_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct ath11k_wmi_pdev_lro_config_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_LRO_INFO_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); get_random_bytes(cmd->th_4, sizeof(uint32_t) * ATH11K_IPV4_TH_SEED_SIZE); get_random_bytes(cmd->th_6, sizeof(uint32_t) * ATH11K_IPV6_TH_SEED_SIZE); cmd->pdev_id = pdev_id; ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send lro cfg req wmi cmd\n"); goto err; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd lro config pdev_id 0x%x\n", pdev_id); return 0; err: dev_kfree_skb(skb); return ret; } int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab) { unsigned long time_left; time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready, WMI_SERVICE_READY_TIMEOUT_HZ); if (!time_left) return -ETIMEDOUT; return 0; } int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab) { unsigned long time_left; time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready, WMI_SERVICE_READY_TIMEOUT_HZ); if (!time_left) return -ETIMEDOUT; return 0; } int ath11k_wmi_set_hw_mode(struct ath11k_base *ab, enum wmi_host_hw_mode_config_type mode) { struct wmi_pdev_set_hw_mode_cmd_param *cmd; struct sk_buff *skb; struct ath11k_wmi_base *wmi_ab = &ab->wmi_ab; int len; int ret; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = WMI_PDEV_ID_SOC; cmd->hw_mode_index = mode; ret = ath11k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd pdev set hw mode %d", cmd->hw_mode_index); return 0; } int ath11k_wmi_cmd_init(struct ath11k_base *ab) { struct ath11k_wmi_base *wmi_ab = &ab->wmi_ab; struct wmi_init_cmd_param init_param; struct target_resource_config config; memset(&init_param, 0, sizeof(init_param)); memset(&config, 0, sizeof(config)); ab->hw_params.hw_ops->wmi_init_config(ab, &config); if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, ab->wmi_ab.svc_map)) config.is_reg_cc_ext_event_supported = 1; memcpy(&wmi_ab->wlan_resource_config, &config, sizeof(config)); init_param.res_cfg = &wmi_ab->wlan_resource_config; init_param.num_mem_chunks = wmi_ab->num_mem_chunks; init_param.hw_mode_id = wmi_ab->preferred_hw_mode; init_param.mem_chunks = wmi_ab->mem_chunks; if (ab->hw_params.single_pdev_only) init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX; init_param.num_band_to_mac = ab->num_radios; ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac); return ath11k_init_cmd_send(&wmi_ab->wmi[0], &init_param); } int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar, struct ath11k_wmi_vdev_spectral_conf_param *param) { struct ath11k_wmi_vdev_spectral_conf_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct ath11k_wmi_vdev_spectral_conf_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); memcpy(&cmd->param, param, sizeof(*param)); ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send spectral scan config wmi cmd\n"); goto err; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev spectral scan configure vdev_id 0x%x\n", param->vdev_id); return 0; err: dev_kfree_skb(skb); return ret; } int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id, u32 trigger, u32 enable) { struct ath11k_wmi_vdev_spectral_enable_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct ath11k_wmi_vdev_spectral_enable_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->trigger_cmd = trigger; cmd->enable_cmd = enable; ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send spectral enable wmi cmd\n"); goto err; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev spectral scan enable vdev id 0x%x\n", vdev_id); return 0; err: dev_kfree_skb(skb); return ret; } int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar, struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param) { struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DMA_RING_CFG_REQ) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = param->pdev_id; cmd->module_id = param->module_id; cmd->base_paddr_lo = param->base_paddr_lo; cmd->base_paddr_hi = param->base_paddr_hi; cmd->head_idx_paddr_lo = param->head_idx_paddr_lo; cmd->head_idx_paddr_hi = param->head_idx_paddr_hi; cmd->tail_idx_paddr_lo = param->tail_idx_paddr_lo; cmd->tail_idx_paddr_hi = param->tail_idx_paddr_hi; cmd->num_elems = param->num_elems; cmd->buf_size = param->buf_size; cmd->num_resp_per_event = param->num_resp_per_event; cmd->event_timeout_ms = param->event_timeout_ms; ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_PDEV_DMA_RING_CFG_REQ_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send dma ring cfg req wmi cmd\n"); goto err; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev dma ring cfg req pdev_id 0x%x\n", param->pdev_id); return 0; err: dev_kfree_skb(skb); return ret; } static int ath11k_wmi_tlv_dma_buf_entry_parse(struct ath11k_base *soc, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_dma_buf_release_parse *parse = data; if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY) return -EPROTO; if (parse->num_buf_entry >= parse->fixed.num_buf_release_entry) return -ENOBUFS; parse->num_buf_entry++; return 0; } static int ath11k_wmi_tlv_dma_buf_meta_parse(struct ath11k_base *soc, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_dma_buf_release_parse *parse = data; if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA) return -EPROTO; if (parse->num_meta >= parse->fixed.num_meta_data_entry) return -ENOBUFS; parse->num_meta++; return 0; } static int ath11k_wmi_tlv_dma_buf_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_dma_buf_release_parse *parse = data; int ret; switch (tag) { case WMI_TAG_DMA_BUF_RELEASE: memcpy(&parse->fixed, ptr, sizeof(struct ath11k_wmi_dma_buf_release_fixed_param)); parse->fixed.pdev_id = DP_HW2SW_MACID(parse->fixed.pdev_id); break; case WMI_TAG_ARRAY_STRUCT: if (!parse->buf_entry_done) { parse->num_buf_entry = 0; parse->buf_entry = (struct wmi_dma_buf_release_entry *)ptr; ret = ath11k_wmi_tlv_iter(ab, ptr, len, ath11k_wmi_tlv_dma_buf_entry_parse, parse); if (ret) { ath11k_warn(ab, "failed to parse dma buf entry tlv %d\n", ret); return ret; } parse->buf_entry_done = true; } else if (!parse->meta_data_done) { parse->num_meta = 0; parse->meta_data = (struct wmi_dma_buf_release_meta_data *)ptr; ret = ath11k_wmi_tlv_iter(ab, ptr, len, ath11k_wmi_tlv_dma_buf_meta_parse, parse); if (ret) { ath11k_warn(ab, "failed to parse dma buf meta tlv %d\n", ret); return ret; } parse->meta_data_done = true; } break; default: break; } return 0; } static void ath11k_wmi_pdev_dma_ring_buf_release_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_tlv_dma_buf_release_parse parse = { }; struct ath11k_dbring_buf_release_event param; int ret; ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_dma_buf_parse, &parse); if (ret) { ath11k_warn(ab, "failed to parse dma buf release tlv %d\n", ret); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev dma ring buf release"); param.fixed = parse.fixed; param.buf_entry = parse.buf_entry; param.num_buf_entry = parse.num_buf_entry; param.meta_data = parse.meta_data; param.num_meta = parse.num_meta; ret = ath11k_dbring_buffer_release_event(ab, ¶m); if (ret) { ath11k_warn(ab, "failed to handle dma buf release event %d\n", ret); return; } } static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base *soc, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; struct wmi_hw_mode_capabilities *hw_mode_cap; u32 phy_map = 0; if (tag != WMI_TAG_HW_MODE_CAPABILITIES) return -EPROTO; if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes) return -ENOBUFS; hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities, hw_mode_id); svc_rdy_ext->n_hw_mode_caps++; phy_map = hw_mode_cap->phy_id_map; while (phy_map) { svc_rdy_ext->tot_phy_id++; phy_map = phy_map >> 1; } return 0; } static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base *soc, u16 len, const void *ptr, void *data) { struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; struct wmi_hw_mode_capabilities *hw_mode_caps; enum wmi_host_hw_mode_config_type mode, pref; u32 i; int ret; svc_rdy_ext->n_hw_mode_caps = 0; svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr; ret = ath11k_wmi_tlv_iter(soc, ptr, len, ath11k_wmi_tlv_hw_mode_caps_parse, svc_rdy_ext); if (ret) { ath11k_warn(soc, "failed to parse tlv %d\n", ret); return ret; } i = 0; while (i < svc_rdy_ext->n_hw_mode_caps) { hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i]; mode = hw_mode_caps->hw_mode_id; pref = soc->wmi_ab.preferred_hw_mode; if (ath11k_hw_mode_pri_map[mode] < ath11k_hw_mode_pri_map[pref]) { svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps; soc->wmi_ab.preferred_hw_mode = mode; } i++; } ath11k_dbg(soc, ATH11K_DBG_WMI, "preferred_hw_mode:%d\n", soc->wmi_ab.preferred_hw_mode); if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX) return -EINVAL; return 0; } static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base *soc, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; if (tag != WMI_TAG_MAC_PHY_CAPABILITIES) return -EPROTO; if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id) return -ENOBUFS; len = min_t(u16, len, sizeof(struct wmi_mac_phy_capabilities)); if (!svc_rdy_ext->n_mac_phy_caps) { svc_rdy_ext->mac_phy_caps = kcalloc(svc_rdy_ext->tot_phy_id, len, GFP_ATOMIC); if (!svc_rdy_ext->mac_phy_caps) return -ENOMEM; } memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len); svc_rdy_ext->n_mac_phy_caps++; return 0; } static int ath11k_wmi_tlv_ext_hal_reg_caps_parse(struct ath11k_base *soc, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT) return -EPROTO; if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy) return -ENOBUFS; svc_rdy_ext->n_ext_hal_reg_caps++; return 0; } static int ath11k_wmi_tlv_ext_hal_reg_caps(struct ath11k_base *soc, u16 len, const void *ptr, void *data) { struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0]; struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; struct ath11k_hal_reg_capabilities_ext reg_cap; int ret; u32 i; svc_rdy_ext->n_ext_hal_reg_caps = 0; svc_rdy_ext->ext_hal_reg_caps = (struct wmi_hal_reg_capabilities_ext *)ptr; ret = ath11k_wmi_tlv_iter(soc, ptr, len, ath11k_wmi_tlv_ext_hal_reg_caps_parse, svc_rdy_ext); if (ret) { ath11k_warn(soc, "failed to parse tlv %d\n", ret); return ret; } for (i = 0; i < svc_rdy_ext->param.num_phy; i++) { ret = ath11k_pull_reg_cap_svc_rdy_ext(wmi_handle, svc_rdy_ext->soc_hal_reg_caps, svc_rdy_ext->ext_hal_reg_caps, i, ®_cap); if (ret) { ath11k_warn(soc, "failed to extract reg cap %d\n", i); return ret; } memcpy(&soc->hal_reg_cap[reg_cap.phy_id], ®_cap, sizeof(reg_cap)); } return 0; } static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc, u16 len, const void *ptr, void *data) { struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0]; struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; u8 hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id; u32 phy_id_map; int pdev_index = 0; int ret; svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr; svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy; soc->num_radios = 0; soc->target_pdev_count = 0; phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map; while (phy_id_map && soc->num_radios < MAX_RADIOS) { ret = ath11k_pull_mac_phy_cap_svc_ready_ext(wmi_handle, svc_rdy_ext->hw_caps, svc_rdy_ext->hw_mode_caps, svc_rdy_ext->soc_hal_reg_caps, svc_rdy_ext->mac_phy_caps, hw_mode_id, soc->num_radios, &soc->pdevs[pdev_index]); if (ret) { ath11k_warn(soc, "failed to extract mac caps, idx :%d\n", soc->num_radios); return ret; } soc->num_radios++; /* For QCA6390, save mac_phy capability in the same pdev */ if (soc->hw_params.single_pdev_only) pdev_index = 0; else pdev_index = soc->num_radios; /* TODO: mac_phy_cap prints */ phy_id_map >>= 1; } /* For QCA6390, set num_radios to 1 because host manages * both 2G and 5G radio in one pdev. * Set pdev_id = 0 and 0 means soc level. */ if (soc->hw_params.single_pdev_only) { soc->num_radios = 1; soc->pdevs[0].pdev_id = 0; } if (!soc->reg_info_store) { soc->reg_info_store = kcalloc(soc->num_radios, sizeof(*soc->reg_info_store), GFP_ATOMIC); if (!soc->reg_info_store) return -ENOMEM; } return 0; } static int ath11k_wmi_tlv_dma_ring_caps_parse(struct ath11k_base *soc, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_dma_ring_caps_parse *parse = data; if (tag != WMI_TAG_DMA_RING_CAPABILITIES) return -EPROTO; parse->n_dma_ring_caps++; return 0; } static int ath11k_wmi_alloc_dbring_caps(struct ath11k_base *ab, u32 num_cap) { size_t sz; void *ptr; sz = num_cap * sizeof(struct ath11k_dbring_cap); ptr = kzalloc(sz, GFP_ATOMIC); if (!ptr) return -ENOMEM; ab->db_caps = ptr; ab->num_db_cap = num_cap; return 0; } static void ath11k_wmi_free_dbring_caps(struct ath11k_base *ab) { kfree(ab->db_caps); ab->db_caps = NULL; ab->num_db_cap = 0; } static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base *ab, u16 len, const void *ptr, void *data) { struct wmi_tlv_dma_ring_caps_parse *dma_caps_parse = data; struct wmi_dma_ring_capabilities *dma_caps; struct ath11k_dbring_cap *dir_buff_caps; int ret; u32 i; dma_caps_parse->n_dma_ring_caps = 0; dma_caps = (struct wmi_dma_ring_capabilities *)ptr; ret = ath11k_wmi_tlv_iter(ab, ptr, len, ath11k_wmi_tlv_dma_ring_caps_parse, dma_caps_parse); if (ret) { ath11k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret); return ret; } if (!dma_caps_parse->n_dma_ring_caps) return 0; if (ab->num_db_cap) { ath11k_warn(ab, "Already processed, so ignoring dma ring caps\n"); return 0; } ret = ath11k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps); if (ret) return ret; dir_buff_caps = ab->db_caps; for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) { if (dma_caps[i].module_id >= WMI_DIRECT_BUF_MAX) { ath11k_warn(ab, "Invalid module id %d\n", dma_caps[i].module_id); ret = -EINVAL; goto free_dir_buff; } dir_buff_caps[i].id = dma_caps[i].module_id; dir_buff_caps[i].pdev_id = DP_HW2SW_MACID(dma_caps[i].pdev_id); dir_buff_caps[i].min_elem = dma_caps[i].min_elem; dir_buff_caps[i].min_buf_sz = dma_caps[i].min_buf_sz; dir_buff_caps[i].min_buf_align = dma_caps[i].min_buf_align; } return 0; free_dir_buff: ath11k_wmi_free_dbring_caps(ab); return ret; } static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0]; struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; int ret; switch (tag) { case WMI_TAG_SERVICE_READY_EXT_EVENT: ret = ath11k_pull_svc_ready_ext(wmi_handle, ptr, &svc_rdy_ext->param); if (ret) { ath11k_warn(ab, "unable to extract ext params\n"); return ret; } break; case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS: svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr; svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes; break; case WMI_TAG_SOC_HAL_REG_CAPABILITIES: ret = ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(ab, len, ptr, svc_rdy_ext); if (ret) return ret; break; case WMI_TAG_ARRAY_STRUCT: if (!svc_rdy_ext->hw_mode_done) { ret = ath11k_wmi_tlv_hw_mode_caps(ab, len, ptr, svc_rdy_ext); if (ret) return ret; svc_rdy_ext->hw_mode_done = true; } else if (!svc_rdy_ext->mac_phy_done) { svc_rdy_ext->n_mac_phy_caps = 0; ret = ath11k_wmi_tlv_iter(ab, ptr, len, ath11k_wmi_tlv_mac_phy_caps_parse, svc_rdy_ext); if (ret) { ath11k_warn(ab, "failed to parse tlv %d\n", ret); return ret; } svc_rdy_ext->mac_phy_done = true; } else if (!svc_rdy_ext->ext_hal_reg_done) { ret = ath11k_wmi_tlv_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext); if (ret) return ret; svc_rdy_ext->ext_hal_reg_done = true; } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) { svc_rdy_ext->mac_phy_chainmask_combo_done = true; } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) { svc_rdy_ext->mac_phy_chainmask_cap_done = true; } else if (!svc_rdy_ext->oem_dma_ring_cap_done) { svc_rdy_ext->oem_dma_ring_cap_done = true; } else if (!svc_rdy_ext->dma_ring_cap_done) { ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr, &svc_rdy_ext->dma_caps_parse); if (ret) return ret; svc_rdy_ext->dma_ring_cap_done = true; } break; default: break; } return 0; } static int ath11k_service_ready_ext_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { }; int ret; ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_svc_rdy_ext_parse, &svc_rdy_ext); if (ret) { ath11k_warn(ab, "failed to parse tlv %d\n", ret); goto err; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready ext"); if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map)) complete(&ab->wmi_ab.service_ready); kfree(svc_rdy_ext.mac_phy_caps); return 0; err: ath11k_wmi_free_dbring_caps(ab); return ret; } static int ath11k_wmi_tlv_svc_rdy_ext2_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_svc_rdy_ext2_parse *parse = data; int ret; switch (tag) { case WMI_TAG_ARRAY_STRUCT: if (!parse->dma_ring_cap_done) { ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr, &parse->dma_caps_parse); if (ret) return ret; parse->dma_ring_cap_done = true; } break; default: break; } return 0; } static int ath11k_service_ready_ext2_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2 = { }; int ret; ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_svc_rdy_ext2_parse, &svc_rdy_ext2); if (ret) { ath11k_warn(ab, "failed to parse ext2 event tlv %d\n", ret); goto err; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready ext2"); complete(&ab->wmi_ab.service_ready); return 0; err: ath11k_wmi_free_dbring_caps(ab); return ret; } static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_vdev_start_resp_event *vdev_rsp) { const void **tb; const struct wmi_vdev_start_resp_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch vdev start resp ev"); kfree(tb); return -EPROTO; } memset(vdev_rsp, 0, sizeof(*vdev_rsp)); vdev_rsp->vdev_id = ev->vdev_id; vdev_rsp->requestor_id = ev->requestor_id; vdev_rsp->resp_type = ev->resp_type; vdev_rsp->status = ev->status; vdev_rsp->chain_mask = ev->chain_mask; vdev_rsp->smps_mode = ev->smps_mode; vdev_rsp->mac_id = ev->mac_id; vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams; vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams; vdev_rsp->max_allowed_tx_power = ev->max_allowed_tx_power; kfree(tb); return 0; } static void ath11k_print_reg_rule(struct ath11k_base *ab, const char *band, u32 num_reg_rules, struct cur_reg_rule *reg_rule_ptr) { struct cur_reg_rule *reg_rule = reg_rule_ptr; u32 count; ath11k_dbg(ab, ATH11K_DBG_WMI, "number of reg rules in %s band: %d\n", band, num_reg_rules); for (count = 0; count < num_reg_rules; count++) { ath11k_dbg(ab, ATH11K_DBG_WMI, "reg rule %d: (%d - %d @ %d) (%d, %d) (FLAGS %d)\n", count + 1, reg_rule->start_freq, reg_rule->end_freq, reg_rule->max_bw, reg_rule->ant_gain, reg_rule->reg_power, reg_rule->flags); reg_rule++; } } static struct cur_reg_rule *create_reg_rules_from_wmi(u32 num_reg_rules, struct wmi_regulatory_rule_struct *wmi_reg_rule) { struct cur_reg_rule *reg_rule_ptr; u32 count; reg_rule_ptr = kcalloc(num_reg_rules, sizeof(*reg_rule_ptr), GFP_ATOMIC); if (!reg_rule_ptr) return NULL; for (count = 0; count < num_reg_rules; count++) { reg_rule_ptr[count].start_freq = FIELD_GET(REG_RULE_START_FREQ, wmi_reg_rule[count].freq_info); reg_rule_ptr[count].end_freq = FIELD_GET(REG_RULE_END_FREQ, wmi_reg_rule[count].freq_info); reg_rule_ptr[count].max_bw = FIELD_GET(REG_RULE_MAX_BW, wmi_reg_rule[count].bw_pwr_info); reg_rule_ptr[count].reg_power = FIELD_GET(REG_RULE_REG_PWR, wmi_reg_rule[count].bw_pwr_info); reg_rule_ptr[count].ant_gain = FIELD_GET(REG_RULE_ANT_GAIN, wmi_reg_rule[count].bw_pwr_info); reg_rule_ptr[count].flags = FIELD_GET(REG_RULE_FLAGS, wmi_reg_rule[count].flag_info); } return reg_rule_ptr; } static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab, struct sk_buff *skb, struct cur_regulatory_info *reg_info) { const void **tb; const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr; struct wmi_regulatory_rule_struct *wmi_reg_rule; u32 num_2ghz_reg_rules, num_5ghz_reg_rules; int ret; ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n"); tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } chan_list_event_hdr = tb[WMI_TAG_REG_CHAN_LIST_CC_EVENT]; if (!chan_list_event_hdr) { ath11k_warn(ab, "failed to fetch reg chan list update ev\n"); kfree(tb); return -EPROTO; } reg_info->num_2ghz_reg_rules = chan_list_event_hdr->num_2ghz_reg_rules; reg_info->num_5ghz_reg_rules = chan_list_event_hdr->num_5ghz_reg_rules; if (!(reg_info->num_2ghz_reg_rules + reg_info->num_5ghz_reg_rules)) { ath11k_warn(ab, "No regulatory rules available in the event info\n"); kfree(tb); return -EINVAL; } memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2, REG_ALPHA2_LEN); reg_info->dfs_region = chan_list_event_hdr->dfs_region; reg_info->phybitmap = chan_list_event_hdr->phybitmap; reg_info->num_phy = chan_list_event_hdr->num_phy; reg_info->phy_id = chan_list_event_hdr->phy_id; reg_info->ctry_code = chan_list_event_hdr->country_id; reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code; ath11k_dbg(ab, ATH11K_DBG_WMI, "status_code %s", ath11k_cc_status_to_str(reg_info->status_code)); reg_info->status_code = ath11k_wmi_cc_setting_code_to_reg(chan_list_event_hdr->status_code); reg_info->is_ext_reg_event = false; reg_info->min_bw_2ghz = chan_list_event_hdr->min_bw_2ghz; reg_info->max_bw_2ghz = chan_list_event_hdr->max_bw_2ghz; reg_info->min_bw_5ghz = chan_list_event_hdr->min_bw_5ghz; reg_info->max_bw_5ghz = chan_list_event_hdr->max_bw_5ghz; num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules; num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules; ath11k_dbg(ab, ATH11K_DBG_WMI, "cc %s dsf %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d", reg_info->alpha2, reg_info->dfs_region, reg_info->min_bw_2ghz, reg_info->max_bw_2ghz, reg_info->min_bw_5ghz, reg_info->max_bw_5ghz); ath11k_dbg(ab, ATH11K_DBG_WMI, "num_2ghz_reg_rules %d num_5ghz_reg_rules %d", num_2ghz_reg_rules, num_5ghz_reg_rules); wmi_reg_rule = (struct wmi_regulatory_rule_struct *)((u8 *)chan_list_event_hdr + sizeof(*chan_list_event_hdr) + sizeof(struct wmi_tlv)); if (num_2ghz_reg_rules) { reg_info->reg_rules_2ghz_ptr = create_reg_rules_from_wmi(num_2ghz_reg_rules, wmi_reg_rule); if (!reg_info->reg_rules_2ghz_ptr) { kfree(tb); ath11k_warn(ab, "Unable to Allocate memory for 2 GHz rules\n"); return -ENOMEM; } ath11k_print_reg_rule(ab, "2 GHz", num_2ghz_reg_rules, reg_info->reg_rules_2ghz_ptr); } if (num_5ghz_reg_rules) { wmi_reg_rule += num_2ghz_reg_rules; reg_info->reg_rules_5ghz_ptr = create_reg_rules_from_wmi(num_5ghz_reg_rules, wmi_reg_rule); if (!reg_info->reg_rules_5ghz_ptr) { kfree(tb); ath11k_warn(ab, "Unable to Allocate memory for 5 GHz rules\n"); return -ENOMEM; } ath11k_print_reg_rule(ab, "5 GHz", num_5ghz_reg_rules, reg_info->reg_rules_5ghz_ptr); } ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory channel list\n"); kfree(tb); return 0; } static struct cur_reg_rule *create_ext_reg_rules_from_wmi(u32 num_reg_rules, struct wmi_regulatory_ext_rule *wmi_reg_rule) { struct cur_reg_rule *reg_rule_ptr; u32 count; reg_rule_ptr = kcalloc(num_reg_rules, sizeof(*reg_rule_ptr), GFP_ATOMIC); if (!reg_rule_ptr) return NULL; for (count = 0; count < num_reg_rules; count++) { reg_rule_ptr[count].start_freq = u32_get_bits(wmi_reg_rule[count].freq_info, REG_RULE_START_FREQ); reg_rule_ptr[count].end_freq = u32_get_bits(wmi_reg_rule[count].freq_info, REG_RULE_END_FREQ); reg_rule_ptr[count].max_bw = u32_get_bits(wmi_reg_rule[count].bw_pwr_info, REG_RULE_MAX_BW); reg_rule_ptr[count].reg_power = u32_get_bits(wmi_reg_rule[count].bw_pwr_info, REG_RULE_REG_PWR); reg_rule_ptr[count].ant_gain = u32_get_bits(wmi_reg_rule[count].bw_pwr_info, REG_RULE_ANT_GAIN); reg_rule_ptr[count].flags = u32_get_bits(wmi_reg_rule[count].flag_info, REG_RULE_FLAGS); reg_rule_ptr[count].psd_flag = u32_get_bits(wmi_reg_rule[count].psd_power_info, REG_RULE_PSD_INFO); reg_rule_ptr[count].psd_eirp = u32_get_bits(wmi_reg_rule[count].psd_power_info, REG_RULE_PSD_EIRP); } return reg_rule_ptr; } static u8 ath11k_invalid_5ghz_reg_ext_rules_from_wmi(u32 num_reg_rules, const struct wmi_regulatory_ext_rule *rule) { u8 num_invalid_5ghz_rules = 0; u32 count, start_freq; for (count = 0; count < num_reg_rules; count++) { start_freq = u32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ); if (start_freq >= ATH11K_MIN_6G_FREQ) num_invalid_5ghz_rules++; } return num_invalid_5ghz_rules; } static int ath11k_pull_reg_chan_list_ext_update_ev(struct ath11k_base *ab, struct sk_buff *skb, struct cur_regulatory_info *reg_info) { const void **tb; const struct wmi_reg_chan_list_cc_ext_event *ev; struct wmi_regulatory_ext_rule *ext_wmi_reg_rule; u32 num_2ghz_reg_rules, num_5ghz_reg_rules; u32 num_6ghz_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE]; u32 num_6ghz_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; u32 total_reg_rules = 0; int ret, i, j, num_invalid_5ghz_ext_rules = 0; ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory ext channel list\n"); tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch reg chan list ext update ev\n"); kfree(tb); return -EPROTO; } reg_info->num_2ghz_reg_rules = ev->num_2ghz_reg_rules; reg_info->num_5ghz_reg_rules = ev->num_5ghz_reg_rules; reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP] = ev->num_6ghz_reg_rules_ap_lpi; reg_info->num_6ghz_rules_ap[WMI_REG_STANDARD_POWER_AP] = ev->num_6ghz_reg_rules_ap_sp; reg_info->num_6ghz_rules_ap[WMI_REG_VERY_LOW_POWER_AP] = ev->num_6ghz_reg_rules_ap_vlp; for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { reg_info->num_6ghz_rules_client[WMI_REG_INDOOR_AP][i] = ev->num_6ghz_reg_rules_client_lpi[i]; reg_info->num_6ghz_rules_client[WMI_REG_STANDARD_POWER_AP][i] = ev->num_6ghz_reg_rules_client_sp[i]; reg_info->num_6ghz_rules_client[WMI_REG_VERY_LOW_POWER_AP][i] = ev->num_6ghz_reg_rules_client_vlp[i]; } num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules; num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules; total_reg_rules += num_2ghz_reg_rules; total_reg_rules += num_5ghz_reg_rules; if ((num_2ghz_reg_rules > MAX_REG_RULES) || (num_5ghz_reg_rules > MAX_REG_RULES)) { ath11k_warn(ab, "Num reg rules for 2.4 GHz/5 GHz exceeds max limit (num_2ghz_reg_rules: %d num_5ghz_reg_rules: %d max_rules: %d)\n", num_2ghz_reg_rules, num_5ghz_reg_rules, MAX_REG_RULES); kfree(tb); return -EINVAL; } for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { num_6ghz_reg_rules_ap[i] = reg_info->num_6ghz_rules_ap[i]; if (num_6ghz_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) { ath11k_warn(ab, "Num 6 GHz reg rules for AP mode(%d) exceeds max limit (num_6ghz_reg_rules_ap: %d, max_rules: %d)\n", i, num_6ghz_reg_rules_ap[i], MAX_6GHZ_REG_RULES); kfree(tb); return -EINVAL; } total_reg_rules += num_6ghz_reg_rules_ap[i]; } for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { num_6ghz_client[WMI_REG_INDOOR_AP][i] = reg_info->num_6ghz_rules_client[WMI_REG_INDOOR_AP][i]; total_reg_rules += num_6ghz_client[WMI_REG_INDOOR_AP][i]; num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] = reg_info->num_6ghz_rules_client[WMI_REG_STANDARD_POWER_AP][i]; total_reg_rules += num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i]; num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] = reg_info->num_6ghz_rules_client[WMI_REG_VERY_LOW_POWER_AP][i]; total_reg_rules += num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i]; if ((num_6ghz_client[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES) || (num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] > MAX_6GHZ_REG_RULES) || (num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] > MAX_6GHZ_REG_RULES)) { ath11k_warn(ab, "Num 6 GHz client reg rules exceeds max limit, for client(type: %d)\n", i); kfree(tb); return -EINVAL; } } if (!total_reg_rules) { ath11k_warn(ab, "No reg rules available\n"); kfree(tb); return -EINVAL; } memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN); reg_info->dfs_region = ev->dfs_region; reg_info->phybitmap = ev->phybitmap; reg_info->num_phy = ev->num_phy; reg_info->phy_id = ev->phy_id; reg_info->ctry_code = ev->country_id; reg_info->reg_dmn_pair = ev->domain_code; ath11k_dbg(ab, ATH11K_DBG_WMI, "status_code %s", ath11k_cc_status_to_str(reg_info->status_code)); reg_info->status_code = ath11k_wmi_cc_setting_code_to_reg(ev->status_code); reg_info->is_ext_reg_event = true; reg_info->min_bw_2ghz = ev->min_bw_2ghz; reg_info->max_bw_2ghz = ev->max_bw_2ghz; reg_info->min_bw_5ghz = ev->min_bw_5ghz; reg_info->max_bw_5ghz = ev->max_bw_5ghz; reg_info->min_bw_6ghz_ap[WMI_REG_INDOOR_AP] = ev->min_bw_6ghz_ap_lpi; reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP] = ev->max_bw_6ghz_ap_lpi; reg_info->min_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP] = ev->min_bw_6ghz_ap_sp; reg_info->max_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP] = ev->max_bw_6ghz_ap_sp; reg_info->min_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] = ev->min_bw_6ghz_ap_vlp; reg_info->max_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] = ev->max_bw_6ghz_ap_vlp; ath11k_dbg(ab, ATH11K_DBG_WMI, "6 GHz AP BW: LPI (%d - %d), SP (%d - %d), VLP (%d - %d)\n", reg_info->min_bw_6ghz_ap[WMI_REG_INDOOR_AP], reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP], reg_info->min_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP], reg_info->max_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP], reg_info->min_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP], reg_info->max_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP]); for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { reg_info->min_bw_6ghz_client[WMI_REG_INDOOR_AP][i] = ev->min_bw_6ghz_client_lpi[i]; reg_info->max_bw_6ghz_client[WMI_REG_INDOOR_AP][i] = ev->max_bw_6ghz_client_lpi[i]; reg_info->min_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] = ev->min_bw_6ghz_client_sp[i]; reg_info->max_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] = ev->max_bw_6ghz_client_sp[i]; reg_info->min_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] = ev->min_bw_6ghz_client_vlp[i]; reg_info->max_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] = ev->max_bw_6ghz_client_vlp[i]; ath11k_dbg(ab, ATH11K_DBG_WMI, "6 GHz %s BW: LPI (%d - %d), SP (%d - %d), VLP (%d - %d)\n", ath11k_6ghz_client_type_to_str(i), reg_info->min_bw_6ghz_client[WMI_REG_INDOOR_AP][i], reg_info->max_bw_6ghz_client[WMI_REG_INDOOR_AP][i], reg_info->min_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i], reg_info->max_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i], reg_info->min_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i], reg_info->max_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i]); } ath11k_dbg(ab, ATH11K_DBG_WMI, "cc_ext %s dfs %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d phy_bitmap 0x%x", reg_info->alpha2, reg_info->dfs_region, reg_info->min_bw_2ghz, reg_info->max_bw_2ghz, reg_info->min_bw_5ghz, reg_info->max_bw_5ghz, reg_info->phybitmap); ath11k_dbg(ab, ATH11K_DBG_WMI, "num_2ghz_reg_rules %d num_5ghz_reg_rules %d", num_2ghz_reg_rules, num_5ghz_reg_rules); ath11k_dbg(ab, ATH11K_DBG_WMI, "num_6ghz_reg_rules_ap_lpi: %d num_6ghz_reg_rules_ap_sp: %d num_6ghz_reg_rules_ap_vlp: %d", num_6ghz_reg_rules_ap[WMI_REG_INDOOR_AP], num_6ghz_reg_rules_ap[WMI_REG_STANDARD_POWER_AP], num_6ghz_reg_rules_ap[WMI_REG_VERY_LOW_POWER_AP]); j = WMI_REG_DEFAULT_CLIENT; ath11k_dbg(ab, ATH11K_DBG_WMI, "6 GHz Regular client: num_6ghz_reg_rules_lpi: %d num_6ghz_reg_rules_sp: %d num_6ghz_reg_rules_vlp: %d", num_6ghz_client[WMI_REG_INDOOR_AP][j], num_6ghz_client[WMI_REG_STANDARD_POWER_AP][j], num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][j]); j = WMI_REG_SUBORDINATE_CLIENT; ath11k_dbg(ab, ATH11K_DBG_WMI, "6 GHz Subordinate client: num_6ghz_reg_rules_lpi: %d num_6ghz_reg_rules_sp: %d num_6ghz_reg_rules_vlp: %d", num_6ghz_client[WMI_REG_INDOOR_AP][j], num_6ghz_client[WMI_REG_STANDARD_POWER_AP][j], num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][j]); ext_wmi_reg_rule = (struct wmi_regulatory_ext_rule *)((u8 *)ev + sizeof(*ev) + sizeof(struct wmi_tlv)); if (num_2ghz_reg_rules) { reg_info->reg_rules_2ghz_ptr = create_ext_reg_rules_from_wmi(num_2ghz_reg_rules, ext_wmi_reg_rule); if (!reg_info->reg_rules_2ghz_ptr) { kfree(tb); ath11k_warn(ab, "Unable to Allocate memory for 2 GHz rules\n"); return -ENOMEM; } ath11k_print_reg_rule(ab, "2 GHz", num_2ghz_reg_rules, reg_info->reg_rules_2ghz_ptr); } ext_wmi_reg_rule += num_2ghz_reg_rules; /* Firmware might include 6 GHz reg rule in 5 GHz rule list * for few countries along with separate 6 GHz rule. * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list * causes intersect check to be true, and same rules will be * shown multiple times in iw cmd. * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list */ num_invalid_5ghz_ext_rules = ath11k_invalid_5ghz_reg_ext_rules_from_wmi(num_5ghz_reg_rules, ext_wmi_reg_rule); if (num_invalid_5ghz_ext_rules) { ath11k_dbg(ab, ATH11K_DBG_WMI, "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules", reg_info->alpha2, reg_info->num_5ghz_reg_rules, num_invalid_5ghz_ext_rules); num_5ghz_reg_rules = num_5ghz_reg_rules - num_invalid_5ghz_ext_rules; reg_info->num_5ghz_reg_rules = num_5ghz_reg_rules; } if (num_5ghz_reg_rules) { reg_info->reg_rules_5ghz_ptr = create_ext_reg_rules_from_wmi(num_5ghz_reg_rules, ext_wmi_reg_rule); if (!reg_info->reg_rules_5ghz_ptr) { kfree(tb); ath11k_warn(ab, "Unable to Allocate memory for 5 GHz rules\n"); return -ENOMEM; } ath11k_print_reg_rule(ab, "5 GHz", num_5ghz_reg_rules, reg_info->reg_rules_5ghz_ptr); } /* We have adjusted the number of 5 GHz reg rules above. But still those * many rules needs to be adjusted in ext_wmi_reg_rule. * * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases. */ ext_wmi_reg_rule += (num_5ghz_reg_rules + num_invalid_5ghz_ext_rules); for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { reg_info->reg_rules_6ghz_ap_ptr[i] = create_ext_reg_rules_from_wmi(num_6ghz_reg_rules_ap[i], ext_wmi_reg_rule); if (!reg_info->reg_rules_6ghz_ap_ptr[i]) { kfree(tb); ath11k_warn(ab, "Unable to Allocate memory for 6 GHz AP rules\n"); return -ENOMEM; } ath11k_print_reg_rule(ab, ath11k_6ghz_ap_type_to_str(i), num_6ghz_reg_rules_ap[i], reg_info->reg_rules_6ghz_ap_ptr[i]); ext_wmi_reg_rule += num_6ghz_reg_rules_ap[i]; } for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) { ath11k_dbg(ab, ATH11K_DBG_WMI, "6 GHz AP type %s", ath11k_6ghz_ap_type_to_str(j)); for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { reg_info->reg_rules_6ghz_client_ptr[j][i] = create_ext_reg_rules_from_wmi(num_6ghz_client[j][i], ext_wmi_reg_rule); if (!reg_info->reg_rules_6ghz_client_ptr[j][i]) { kfree(tb); ath11k_warn(ab, "Unable to Allocate memory for 6 GHz client rules\n"); return -ENOMEM; } ath11k_print_reg_rule(ab, ath11k_6ghz_client_type_to_str(i), num_6ghz_client[j][i], reg_info->reg_rules_6ghz_client_ptr[j][i]); ext_wmi_reg_rule += num_6ghz_client[j][i]; } } reg_info->client_type = ev->client_type; reg_info->rnr_tpe_usable = ev->rnr_tpe_usable; reg_info->unspecified_ap_usable = ev->unspecified_ap_usable; reg_info->domain_code_6ghz_ap[WMI_REG_INDOOR_AP] = ev->domain_code_6ghz_ap_lpi; reg_info->domain_code_6ghz_ap[WMI_REG_STANDARD_POWER_AP] = ev->domain_code_6ghz_ap_sp; reg_info->domain_code_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] = ev->domain_code_6ghz_ap_vlp; ath11k_dbg(ab, ATH11K_DBG_WMI, "6 GHz reg info client type %s rnr_tpe_usable %d unspecified_ap_usable %d AP sub domain: lpi %s, sp %s, vlp %s\n", ath11k_6ghz_client_type_to_str(reg_info->client_type), reg_info->rnr_tpe_usable, reg_info->unspecified_ap_usable, ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_lpi), ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_sp), ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_vlp)); for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { reg_info->domain_code_6ghz_client[WMI_REG_INDOOR_AP][i] = ev->domain_code_6ghz_client_lpi[i]; reg_info->domain_code_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] = ev->domain_code_6ghz_client_sp[i]; reg_info->domain_code_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] = ev->domain_code_6ghz_client_vlp[i]; ath11k_dbg(ab, ATH11K_DBG_WMI, "6 GHz client type %s client sub domain: lpi %s, sp %s, vlp %s\n", ath11k_6ghz_client_type_to_str(i), ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_lpi[i]), ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_sp[i]), ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_vlp[i]) ); } reg_info->domain_code_6ghz_super_id = ev->domain_code_6ghz_super_id; ath11k_dbg(ab, ATH11K_DBG_WMI, "6 GHz client_type %s 6 GHz super domain %s", ath11k_6ghz_client_type_to_str(reg_info->client_type), ath11k_super_reg_6ghz_to_str(reg_info->domain_code_6ghz_super_id)); ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory ext channel list\n"); kfree(tb); return 0; } static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_peer_delete_resp_event *peer_del_resp) { const void **tb; const struct wmi_peer_delete_resp_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch peer delete resp ev"); kfree(tb); return -EPROTO; } memset(peer_del_resp, 0, sizeof(*peer_del_resp)); peer_del_resp->vdev_id = ev->vdev_id; ether_addr_copy(peer_del_resp->peer_macaddr.addr, ev->peer_macaddr.addr); kfree(tb); return 0; } static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab, struct sk_buff *skb, u32 *vdev_id) { const void **tb; const struct wmi_vdev_delete_resp_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch vdev delete resp ev"); kfree(tb); return -EPROTO; } *vdev_id = ev->vdev_id; kfree(tb); return 0; } static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, struct sk_buff *skb, u32 *vdev_id, u32 *tx_status) { const void **tb; const struct wmi_bcn_tx_status_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch bcn tx status ev"); kfree(tb); return -EPROTO; } *vdev_id = ev->vdev_id; *tx_status = ev->tx_status; kfree(tb); return 0; } static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_buff *skb, u32 *vdev_id) { const void **tb; const struct wmi_vdev_stopped_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_VDEV_STOPPED_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch vdev stop ev"); kfree(tb); return -EPROTO; } *vdev_id = ev->vdev_id; kfree(tb); return 0; } static int ath11k_wmi_tlv_mgmt_rx_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_mgmt_rx_parse *parse = data; switch (tag) { case WMI_TAG_MGMT_RX_HDR: parse->fixed = ptr; break; case WMI_TAG_ARRAY_BYTE: if (!parse->frame_buf_done) { parse->frame_buf = ptr; parse->frame_buf_done = true; } break; } return 0; } static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab, struct sk_buff *skb, struct mgmt_rx_event_params *hdr) { struct wmi_tlv_mgmt_rx_parse parse = { }; const struct wmi_mgmt_rx_hdr *ev; const u8 *frame; int ret; ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_mgmt_rx_parse, &parse); if (ret) { ath11k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret); return ret; } ev = parse.fixed; frame = parse.frame_buf; if (!ev || !frame) { ath11k_warn(ab, "failed to fetch mgmt rx hdr"); return -EPROTO; } hdr->pdev_id = ev->pdev_id; hdr->chan_freq = ev->chan_freq; hdr->channel = ev->channel; hdr->snr = ev->snr; hdr->rate = ev->rate; hdr->phy_mode = ev->phy_mode; hdr->buf_len = ev->buf_len; hdr->status = ev->status; hdr->flags = ev->flags; hdr->rssi = ev->rssi; hdr->tsf_delta = ev->tsf_delta; memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl)); if (skb->len < (frame - skb->data) + hdr->buf_len) { ath11k_warn(ab, "invalid length in mgmt rx hdr ev"); return -EPROTO; } /* shift the sk_buff to point to `frame` */ skb_trim(skb, 0); skb_put(skb, frame - skb->data); skb_pull(skb, frame - skb->data); skb_put(skb, hdr->buf_len); ath11k_ce_byte_swap(skb->data, hdr->buf_len); return 0; } static int wmi_process_mgmt_tx_comp(struct ath11k *ar, struct wmi_mgmt_tx_compl_event *tx_compl_param) { struct sk_buff *msdu; struct ieee80211_tx_info *info; struct ath11k_skb_cb *skb_cb; int num_mgmt; spin_lock_bh(&ar->txmgmt_idr_lock); msdu = idr_find(&ar->txmgmt_idr, tx_compl_param->desc_id); if (!msdu) { ath11k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n", tx_compl_param->desc_id); spin_unlock_bh(&ar->txmgmt_idr_lock); return -ENOENT; } idr_remove(&ar->txmgmt_idr, tx_compl_param->desc_id); spin_unlock_bh(&ar->txmgmt_idr_lock); skb_cb = ATH11K_SKB_CB(msdu); dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); info = IEEE80211_SKB_CB(msdu); + memset(&info->status, 0, sizeof(info->status)); + info->status.rates[0].idx = -1; + if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !tx_compl_param->status) { info->flags |= IEEE80211_TX_STAT_ACK; if (test_bit(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI, ar->ab->wmi_ab.svc_map)) info->status.ack_signal = tx_compl_param->ack_rssi; } ieee80211_tx_status_irqsafe(ar->hw, msdu); num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); /* WARN when we received this event without doing any mgmt tx */ if (num_mgmt < 0) WARN_ON_ONCE(1); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "mgmt tx comp pending %d desc id %d\n", num_mgmt, tx_compl_param->desc_id); if (!num_mgmt) wake_up(&ar->txmgmt_empty_waitq); return 0; } static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_mgmt_tx_compl_event *param) { const void **tb; const struct wmi_mgmt_tx_compl_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch mgmt tx compl ev"); kfree(tb); return -EPROTO; } param->pdev_id = ev->pdev_id; param->desc_id = ev->desc_id; param->status = ev->status; param->ack_rssi = ev->ack_rssi; kfree(tb); return 0; } static void ath11k_wmi_event_scan_started(struct ath11k *ar) { lockdep_assert_held(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: ath11k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n", ath11k_scan_state_str(ar->scan.state), ar->scan.state); break; case ATH11K_SCAN_STARTING: ar->scan.state = ATH11K_SCAN_RUNNING; if (ar->scan.is_roc) ieee80211_ready_on_channel(ar->hw); complete(&ar->scan.started); break; } } static void ath11k_wmi_event_scan_start_failed(struct ath11k *ar) { lockdep_assert_held(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: ath11k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n", ath11k_scan_state_str(ar->scan.state), ar->scan.state); break; case ATH11K_SCAN_STARTING: complete(&ar->scan.started); __ath11k_mac_scan_finish(ar); break; } } static void ath11k_wmi_event_scan_completed(struct ath11k *ar) { lockdep_assert_held(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: case ATH11K_SCAN_STARTING: /* One suspected reason scan can be completed while starting is * if firmware fails to deliver all scan events to the host, * e.g. when transport pipe is full. This has been observed * with spectral scan phyerr events starving wmi transport * pipe. In such case the "scan completed" event should be (and * is) ignored by the host as it may be just firmware's scan * state machine recovering. */ ath11k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n", ath11k_scan_state_str(ar->scan.state), ar->scan.state); break; case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: __ath11k_mac_scan_finish(ar); break; } } static void ath11k_wmi_event_scan_bss_chan(struct ath11k *ar) { lockdep_assert_held(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: case ATH11K_SCAN_STARTING: ath11k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n", ath11k_scan_state_str(ar->scan.state), ar->scan.state); break; case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: ar->scan_channel = NULL; break; } } static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq) { lockdep_assert_held(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: case ATH11K_SCAN_STARTING: ath11k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n", ath11k_scan_state_str(ar->scan.state), ar->scan.state); break; case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); if (ar->scan.is_roc && ar->scan.roc_freq == freq) complete(&ar->scan.on_channel); break; } } static const char * ath11k_wmi_event_scan_type_str(enum wmi_scan_event_type type, enum wmi_scan_completion_reason reason) { switch (type) { case WMI_SCAN_EVENT_STARTED: return "started"; case WMI_SCAN_EVENT_COMPLETED: switch (reason) { case WMI_SCAN_REASON_COMPLETED: return "completed"; case WMI_SCAN_REASON_CANCELLED: return "completed [cancelled]"; case WMI_SCAN_REASON_PREEMPTED: return "completed [preempted]"; case WMI_SCAN_REASON_TIMEDOUT: return "completed [timedout]"; case WMI_SCAN_REASON_INTERNAL_FAILURE: return "completed [internal err]"; case WMI_SCAN_REASON_MAX: break; } return "completed [unknown]"; case WMI_SCAN_EVENT_BSS_CHANNEL: return "bss channel"; case WMI_SCAN_EVENT_FOREIGN_CHAN: return "foreign channel"; case WMI_SCAN_EVENT_DEQUEUED: return "dequeued"; case WMI_SCAN_EVENT_PREEMPTED: return "preempted"; case WMI_SCAN_EVENT_START_FAILED: return "start failed"; case WMI_SCAN_EVENT_RESTARTED: return "restarted"; case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: return "foreign channel exit"; default: return "unknown"; } } static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_scan_event *scan_evt_param) { const void **tb; const struct wmi_scan_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_SCAN_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch scan ev"); kfree(tb); return -EPROTO; } scan_evt_param->event_type = ev->event_type; scan_evt_param->reason = ev->reason; scan_evt_param->channel_freq = ev->channel_freq; scan_evt_param->scan_req_id = ev->scan_req_id; scan_evt_param->scan_id = ev->scan_id; scan_evt_param->vdev_id = ev->vdev_id; scan_evt_param->tsf_timestamp = ev->tsf_timestamp; kfree(tb); return 0; } static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_peer_sta_kickout_arg *arg) { const void **tb; const struct wmi_peer_sta_kickout_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch peer sta kickout ev"); kfree(tb); return -EPROTO; } arg->mac_addr = ev->peer_macaddr.addr; kfree(tb); return 0; } static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_roam_event *roam_ev) { const void **tb; const struct wmi_roam_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_ROAM_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch roam ev"); kfree(tb); return -EPROTO; } roam_ev->vdev_id = ev->vdev_id; roam_ev->reason = ev->reason; roam_ev->rssi = ev->rssi; kfree(tb); return 0; } static int freq_to_idx(struct ath11k *ar, int freq) { struct ieee80211_supported_band *sband; int band, ch, idx = 0; for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { sband = ar->hw->wiphy->bands[band]; if (!sband) continue; for (ch = 0; ch < sband->n_channels; ch++, idx++) if (sband->channels[ch].center_freq == freq) goto exit; } exit: return idx; } static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_chan_info_event *ch_info_ev) { const void **tb; const struct wmi_chan_info_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_CHAN_INFO_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch chan info ev"); kfree(tb); return -EPROTO; } ch_info_ev->err_code = ev->err_code; ch_info_ev->freq = ev->freq; ch_info_ev->cmd_flags = ev->cmd_flags; ch_info_ev->noise_floor = ev->noise_floor; ch_info_ev->rx_clear_count = ev->rx_clear_count; ch_info_ev->cycle_count = ev->cycle_count; ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range; ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp; ch_info_ev->rx_frame_count = ev->rx_frame_count; ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt; ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz; ch_info_ev->vdev_id = ev->vdev_id; kfree(tb); return 0; } static int ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev) { const void **tb; const struct wmi_pdev_bss_chan_info_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch pdev bss chan info ev"); kfree(tb); return -EPROTO; } bss_ch_info_ev->pdev_id = ev->pdev_id; bss_ch_info_ev->freq = ev->freq; bss_ch_info_ev->noise_floor = ev->noise_floor; bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low; bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high; bss_ch_info_ev->cycle_count_low = ev->cycle_count_low; bss_ch_info_ev->cycle_count_high = ev->cycle_count_high; bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low; bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high; bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low; bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high; bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low; bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high; kfree(tb); return 0; } static int ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_vdev_install_key_complete_arg *arg) { const void **tb; const struct wmi_vdev_install_key_compl_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch vdev install key compl ev"); kfree(tb); return -EPROTO; } arg->vdev_id = ev->vdev_id; arg->macaddr = ev->peer_macaddr.addr; arg->key_idx = ev->key_idx; arg->key_flags = ev->key_flags; arg->status = ev->status; kfree(tb); return 0; } static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_peer_assoc_conf_arg *peer_assoc_conf) { const void **tb; const struct wmi_peer_assoc_conf_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch peer assoc conf ev"); kfree(tb); return -EPROTO; } peer_assoc_conf->vdev_id = ev->vdev_id; peer_assoc_conf->macaddr = ev->peer_macaddr.addr; kfree(tb); return 0; } static void ath11k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src, struct ath11k_fw_stats_pdev *dst) { dst->ch_noise_floor = src->chan_nf; dst->tx_frame_count = src->tx_frame_count; dst->rx_frame_count = src->rx_frame_count; dst->rx_clear_count = src->rx_clear_count; dst->cycle_count = src->cycle_count; dst->phy_err_count = src->phy_err_count; dst->chan_tx_power = src->chan_tx_pwr; } static void ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src, struct ath11k_fw_stats_pdev *dst) { dst->comp_queued = src->comp_queued; dst->comp_delivered = src->comp_delivered; dst->msdu_enqued = src->msdu_enqued; dst->mpdu_enqued = src->mpdu_enqued; dst->wmm_drop = src->wmm_drop; dst->local_enqued = src->local_enqued; dst->local_freed = src->local_freed; dst->hw_queued = src->hw_queued; dst->hw_reaped = src->hw_reaped; dst->underrun = src->underrun; dst->hw_paused = src->hw_paused; dst->tx_abort = src->tx_abort; dst->mpdus_requeued = src->mpdus_requeued; dst->tx_ko = src->tx_ko; dst->tx_xretry = src->tx_xretry; dst->data_rc = src->data_rc; dst->self_triggers = src->self_triggers; dst->sw_retry_failure = src->sw_retry_failure; dst->illgl_rate_phy_err = src->illgl_rate_phy_err; dst->pdev_cont_xretry = src->pdev_cont_xretry; dst->pdev_tx_timeout = src->pdev_tx_timeout; dst->pdev_resets = src->pdev_resets; dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure; dst->phy_underrun = src->phy_underrun; dst->txop_ovf = src->txop_ovf; dst->seq_posted = src->seq_posted; dst->seq_failed_queueing = src->seq_failed_queueing; dst->seq_completed = src->seq_completed; dst->seq_restarted = src->seq_restarted; dst->mu_seq_posted = src->mu_seq_posted; dst->mpdus_sw_flush = src->mpdus_sw_flush; dst->mpdus_hw_filter = src->mpdus_hw_filter; dst->mpdus_truncated = src->mpdus_truncated; dst->mpdus_ack_failed = src->mpdus_ack_failed; dst->mpdus_expired = src->mpdus_expired; } static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src, struct ath11k_fw_stats_pdev *dst) { dst->mid_ppdu_route_change = src->mid_ppdu_route_change; dst->status_rcvd = src->status_rcvd; dst->r0_frags = src->r0_frags; dst->r1_frags = src->r1_frags; dst->r2_frags = src->r2_frags; dst->r3_frags = src->r3_frags; dst->htt_msdus = src->htt_msdus; dst->htt_mpdus = src->htt_mpdus; dst->loc_msdus = src->loc_msdus; dst->loc_mpdus = src->loc_mpdus; dst->oversize_amsdu = src->oversize_amsdu; dst->phy_errs = src->phy_errs; dst->phy_err_drop = src->phy_err_drop; dst->mpdu_errs = src->mpdu_errs; dst->rx_ovfl_errs = src->rx_ovfl_errs; } static void ath11k_wmi_pull_vdev_stats(const struct wmi_vdev_stats *src, struct ath11k_fw_stats_vdev *dst) { int i; dst->vdev_id = src->vdev_id; dst->beacon_snr = src->beacon_snr; dst->data_snr = src->data_snr; dst->num_rx_frames = src->num_rx_frames; dst->num_rts_fail = src->num_rts_fail; dst->num_rts_success = src->num_rts_success; dst->num_rx_err = src->num_rx_err; dst->num_rx_discard = src->num_rx_discard; dst->num_tx_not_acked = src->num_tx_not_acked; for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++) dst->num_tx_frames[i] = src->num_tx_frames[i]; for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++) dst->num_tx_frames_retries[i] = src->num_tx_frames_retries[i]; for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++) dst->num_tx_frames_failures[i] = src->num_tx_frames_failures[i]; for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++) dst->tx_rate_history[i] = src->tx_rate_history[i]; for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++) dst->beacon_rssi_history[i] = src->beacon_rssi_history[i]; } static void ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats *src, struct ath11k_fw_stats_bcn *dst) { dst->vdev_id = src->vdev_id; dst->tx_bcn_succ_cnt = src->tx_bcn_succ_cnt; dst->tx_bcn_outage_cnt = src->tx_bcn_outage_cnt; } static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_fw_stats_parse *parse = data; const struct wmi_stats_event *ev = parse->ev; struct ath11k_fw_stats *stats = parse->stats; struct ath11k *ar; struct ath11k_vif *arvif; struct ieee80211_sta *sta; struct ath11k_sta *arsta; const struct wmi_rssi_stats *stats_rssi = (const struct wmi_rssi_stats *)ptr; int j, ret = 0; if (tag != WMI_TAG_RSSI_STATS) return -EPROTO; rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT; ath11k_dbg(ab, ATH11K_DBG_WMI, "stats vdev id %d mac %pM\n", stats_rssi->vdev_id, stats_rssi->peer_macaddr.addr); arvif = ath11k_mac_get_arvif(ar, stats_rssi->vdev_id); if (!arvif) { ath11k_warn(ab, "not found vif for vdev id %d\n", stats_rssi->vdev_id); ret = -EPROTO; goto exit; } ath11k_dbg(ab, ATH11K_DBG_WMI, "stats bssid %pM vif %p\n", arvif->bssid, arvif->vif); sta = ieee80211_find_sta_by_ifaddr(ar->hw, arvif->bssid, NULL); if (!sta) { ath11k_dbg(ab, ATH11K_DBG_WMI, "not found station of bssid %pM for rssi chain\n", arvif->bssid); goto exit; } arsta = ath11k_sta_to_arsta(sta); BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > ARRAY_SIZE(stats_rssi->rssi_avg_beacon)); for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++) { arsta->chain_signal[j] = stats_rssi->rssi_avg_beacon[j]; ath11k_dbg(ab, ATH11K_DBG_WMI, "stats beacon rssi[%d] %d data rssi[%d] %d\n", j, stats_rssi->rssi_avg_beacon[j], j, stats_rssi->rssi_avg_data[j]); } exit: rcu_read_unlock(); return ret; } static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab, struct wmi_tlv_fw_stats_parse *parse, const void *ptr, u16 len) { struct ath11k_fw_stats *stats = parse->stats; const struct wmi_stats_event *ev = parse->ev; struct ath11k *ar; struct ath11k_vif *arvif; struct ieee80211_sta *sta; struct ath11k_sta *arsta; int i, ret = 0; const void *data = ptr; if (!ev) { ath11k_warn(ab, "failed to fetch update stats ev"); return -EPROTO; } stats->stats_id = 0; rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); for (i = 0; i < ev->num_pdev_stats; i++) { const struct wmi_pdev_stats *src; struct ath11k_fw_stats_pdev *dst; src = data; if (len < sizeof(*src)) { ret = -EPROTO; goto exit; } stats->stats_id = WMI_REQUEST_PDEV_STAT; data += sizeof(*src); len -= sizeof(*src); dst = kzalloc(sizeof(*dst), GFP_ATOMIC); if (!dst) continue; ath11k_wmi_pull_pdev_stats_base(&src->base, dst); ath11k_wmi_pull_pdev_stats_tx(&src->tx, dst); ath11k_wmi_pull_pdev_stats_rx(&src->rx, dst); list_add_tail(&dst->list, &stats->pdevs); } for (i = 0; i < ev->num_vdev_stats; i++) { const struct wmi_vdev_stats *src; struct ath11k_fw_stats_vdev *dst; src = data; if (len < sizeof(*src)) { ret = -EPROTO; goto exit; } stats->stats_id = WMI_REQUEST_VDEV_STAT; arvif = ath11k_mac_get_arvif(ar, src->vdev_id); if (arvif) { sta = ieee80211_find_sta_by_ifaddr(ar->hw, arvif->bssid, NULL); if (sta) { arsta = ath11k_sta_to_arsta(sta); arsta->rssi_beacon = src->beacon_snr; ath11k_dbg(ab, ATH11K_DBG_WMI, "stats vdev id %d snr %d\n", src->vdev_id, src->beacon_snr); } else { ath11k_dbg(ab, ATH11K_DBG_WMI, "not found station of bssid %pM for vdev stat\n", arvif->bssid); } } data += sizeof(*src); len -= sizeof(*src); dst = kzalloc(sizeof(*dst), GFP_ATOMIC); if (!dst) continue; ath11k_wmi_pull_vdev_stats(src, dst); list_add_tail(&dst->list, &stats->vdevs); } for (i = 0; i < ev->num_bcn_stats; i++) { const struct wmi_bcn_stats *src; struct ath11k_fw_stats_bcn *dst; src = data; if (len < sizeof(*src)) { ret = -EPROTO; goto exit; } stats->stats_id = WMI_REQUEST_BCN_STAT; data += sizeof(*src); len -= sizeof(*src); dst = kzalloc(sizeof(*dst), GFP_ATOMIC); if (!dst) continue; ath11k_wmi_pull_bcn_stats(src, dst); list_add_tail(&dst->list, &stats->bcn); } exit: rcu_read_unlock(); return ret; } static int ath11k_wmi_tlv_fw_stats_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_fw_stats_parse *parse = data; int ret = 0; switch (tag) { case WMI_TAG_STATS_EVENT: parse->ev = (struct wmi_stats_event *)ptr; parse->stats->pdev_id = parse->ev->pdev_id; break; case WMI_TAG_ARRAY_BYTE: ret = ath11k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len); break; case WMI_TAG_PER_CHAIN_RSSI_STATS: parse->rssi = (struct wmi_per_chain_rssi_stats *)ptr; if (parse->ev->stats_id & WMI_REQUEST_RSSI_PER_CHAIN_STAT) parse->rssi_num = parse->rssi->num_per_chain_rssi_stats; ath11k_dbg(ab, ATH11K_DBG_WMI, "stats id 0x%x num chain %d\n", parse->ev->stats_id, parse->rssi_num); break; case WMI_TAG_ARRAY_STRUCT: if (parse->rssi_num && !parse->chain_rssi_done) { ret = ath11k_wmi_tlv_iter(ab, ptr, len, ath11k_wmi_tlv_rssi_chain_parse, parse); if (ret) { ath11k_warn(ab, "failed to parse rssi chain %d\n", ret); return ret; } parse->chain_rssi_done = true; } break; default: break; } return ret; } int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb, struct ath11k_fw_stats *stats) { struct wmi_tlv_fw_stats_parse parse = { }; stats->stats_id = 0; parse.stats = stats; return ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_fw_stats_parse, &parse); } static void ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev *pdev, char *buf, u32 *length) { u32 len = *length; u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%30s\n", "ath11k PDEV stats"); len += scnprintf(buf + len, buf_len - len, "%30s\n\n", "================="); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Channel noise floor", pdev->ch_noise_floor); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Channel TX power", pdev->chan_tx_power); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "TX frame count", pdev->tx_frame_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "RX frame count", pdev->rx_frame_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "RX clear count", pdev->rx_clear_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Cycle count", pdev->cycle_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "PHY error count", pdev->phy_err_count); *length = len; } static void ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev, char *buf, u32 *length) { u32 len = *length; u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "\n%30s\n", "ath11k PDEV TX stats"); len += scnprintf(buf + len, buf_len - len, "%30s\n\n", "===================="); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "HTT cookies queued", pdev->comp_queued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "HTT cookies disp.", pdev->comp_delivered); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MSDU queued", pdev->msdu_enqued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MPDU queued", pdev->mpdu_enqued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MSDUs dropped", pdev->wmm_drop); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Local enqued", pdev->local_enqued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Local freed", pdev->local_freed); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "HW queued", pdev->hw_queued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "PPDUs reaped", pdev->hw_reaped); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Num underruns", pdev->underrun); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Num HW Paused", pdev->hw_paused); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "PPDUs cleaned", pdev->tx_abort); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MPDUs requeued", pdev->mpdus_requeued); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "PPDU OK", pdev->tx_ko); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Excessive retries", pdev->tx_xretry); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "HW rate", pdev->data_rc); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Sched self triggers", pdev->self_triggers); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Dropped due to SW retries", pdev->sw_retry_failure); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Illegal rate phy errors", pdev->illgl_rate_phy_err); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "PDEV continuous xretry", pdev->pdev_cont_xretry); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "TX timeout", pdev->pdev_tx_timeout); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "PDEV resets", pdev->pdev_resets); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Stateless TIDs alloc failures", pdev->stateless_tid_alloc_failure); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "PHY underrun", pdev->phy_underrun); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "MPDU is more than txop limit", pdev->txop_ovf); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num sequences posted", pdev->seq_posted); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num seq failed queueing ", pdev->seq_failed_queueing); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num sequences completed ", pdev->seq_completed); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num sequences restarted ", pdev->seq_restarted); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num of MU sequences posted ", pdev->mu_seq_posted); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num of MPDUS SW flushed ", pdev->mpdus_sw_flush); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num of MPDUS HW filtered ", pdev->mpdus_hw_filter); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num of MPDUS truncated ", pdev->mpdus_truncated); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num of MPDUS ACK failed ", pdev->mpdus_ack_failed); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num of MPDUS expired ", pdev->mpdus_expired); *length = len; } static void ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev *pdev, char *buf, u32 *length) { u32 len = *length; u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "\n%30s\n", "ath11k PDEV RX stats"); len += scnprintf(buf + len, buf_len - len, "%30s\n\n", "===================="); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Mid PPDU route change", pdev->mid_ppdu_route_change); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Tot. number of statuses", pdev->status_rcvd); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Extra frags on rings 0", pdev->r0_frags); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Extra frags on rings 1", pdev->r1_frags); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Extra frags on rings 2", pdev->r2_frags); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Extra frags on rings 3", pdev->r3_frags); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MSDUs delivered to HTT", pdev->htt_msdus); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MPDUs delivered to HTT", pdev->htt_mpdus); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MSDUs delivered to stack", pdev->loc_msdus); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MPDUs delivered to stack", pdev->loc_mpdus); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Oversized AMSUs", pdev->oversize_amsdu); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "PHY errors", pdev->phy_errs); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "PHY errors drops", pdev->phy_err_drop); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Overflow errors", pdev->rx_ovfl_errs); *length = len; } static void ath11k_wmi_fw_vdev_stats_fill(struct ath11k *ar, const struct ath11k_fw_stats_vdev *vdev, char *buf, u32 *length) { u32 len = *length; u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev->vdev_id); u8 *vif_macaddr; int i; /* VDEV stats has all the active VDEVs of other PDEVs as well, * ignoring those not part of requested PDEV */ if (!arvif) return; vif_macaddr = arvif->vif->addr; len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "VDEV ID", vdev->vdev_id); len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", "VDEV MAC address", vif_macaddr); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "beacon snr", vdev->beacon_snr); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "data snr", vdev->data_snr); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "num rx frames", vdev->num_rx_frames); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "num rts fail", vdev->num_rts_fail); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "num rts success", vdev->num_rts_success); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "num rx err", vdev->num_rx_err); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "num rx discard", vdev->num_rx_discard); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "num tx not acked", vdev->num_tx_not_acked); for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++) len += scnprintf(buf + len, buf_len - len, "%25s [%02d] %u\n", "num tx frames", i, vdev->num_tx_frames[i]); for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++) len += scnprintf(buf + len, buf_len - len, "%25s [%02d] %u\n", "num tx frames retries", i, vdev->num_tx_frames_retries[i]); for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++) len += scnprintf(buf + len, buf_len - len, "%25s [%02d] %u\n", "num tx frames failures", i, vdev->num_tx_frames_failures[i]); for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++) len += scnprintf(buf + len, buf_len - len, "%25s [%02d] 0x%08x\n", "tx rate history", i, vdev->tx_rate_history[i]); for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++) len += scnprintf(buf + len, buf_len - len, "%25s [%02d] %u\n", "beacon rssi history", i, vdev->beacon_rssi_history[i]); len += scnprintf(buf + len, buf_len - len, "\n"); *length = len; } static void ath11k_wmi_fw_bcn_stats_fill(struct ath11k *ar, const struct ath11k_fw_stats_bcn *bcn, char *buf, u32 *length) { u32 len = *length; u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, bcn->vdev_id); u8 *vdev_macaddr; if (!arvif) { ath11k_warn(ar->ab, "invalid vdev id %d in bcn stats", bcn->vdev_id); return; } vdev_macaddr = arvif->vif->addr; len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "VDEV ID", bcn->vdev_id); len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", "VDEV MAC address", vdev_macaddr); len += scnprintf(buf + len, buf_len - len, "%30s\n\n", "================"); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "Num of beacon tx success", bcn->tx_bcn_succ_cnt); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "Num of beacon tx failures", bcn->tx_bcn_outage_cnt); len += scnprintf(buf + len, buf_len - len, "\n"); *length = len; } void ath11k_wmi_fw_stats_fill(struct ath11k *ar, struct ath11k_fw_stats *fw_stats, u32 stats_id, char *buf) { u32 len = 0; u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; const struct ath11k_fw_stats_pdev *pdev; const struct ath11k_fw_stats_vdev *vdev; const struct ath11k_fw_stats_bcn *bcn; size_t num_bcn; spin_lock_bh(&ar->data_lock); if (stats_id == WMI_REQUEST_PDEV_STAT) { pdev = list_first_entry_or_null(&fw_stats->pdevs, struct ath11k_fw_stats_pdev, list); if (!pdev) { ath11k_warn(ar->ab, "failed to get pdev stats\n"); goto unlock; } ath11k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len); ath11k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len); ath11k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len); } if (stats_id == WMI_REQUEST_VDEV_STAT) { len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%30s\n", "ath11k VDEV stats"); len += scnprintf(buf + len, buf_len - len, "%30s\n\n", "================="); list_for_each_entry(vdev, &fw_stats->vdevs, list) ath11k_wmi_fw_vdev_stats_fill(ar, vdev, buf, &len); } if (stats_id == WMI_REQUEST_BCN_STAT) { num_bcn = list_count_nodes(&fw_stats->bcn); len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", "ath11k Beacon stats", num_bcn); len += scnprintf(buf + len, buf_len - len, "%30s\n\n", "==================="); list_for_each_entry(bcn, &fw_stats->bcn, list) ath11k_wmi_fw_bcn_stats_fill(ar, bcn, buf, &len); } unlock: spin_unlock_bh(&ar->data_lock); if (len >= buf_len) buf[len - 1] = 0; else buf[len] = 0; } static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab) { /* try to send pending beacons first. they take priority */ wake_up(&ab->wmi_ab.tx_credits_wq); } static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb) { const struct wmi_11d_new_cc_ev *ev; struct ath11k *ar; struct ath11k_pdev *pdev; const void **tb; int ret, i; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT]; if (!ev) { kfree(tb); ath11k_warn(ab, "failed to fetch 11d new cc ev"); return -EPROTO; } spin_lock_bh(&ab->base_lock); memcpy(&ab->new_alpha2, &ev->new_alpha2, 2); spin_unlock_bh(&ab->base_lock); ath11k_dbg(ab, ATH11K_DBG_WMI, "event 11d new cc %c%c\n", ab->new_alpha2[0], ab->new_alpha2[1]); kfree(tb); for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; ar->state_11d = ATH11K_11D_IDLE; complete(&ar->completed_11d_scan); } queue_work(ab->workqueue, &ab->update_11d_work); return 0; } static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k_pdev_wmi *wmi = NULL; u32 i; u8 wmi_ep_count; u8 eid; eid = ATH11K_SKB_CB(skb)->eid; dev_kfree_skb(skb); if (eid >= ATH11K_HTC_EP_COUNT) return; wmi_ep_count = ab->htc.wmi_ep_count; if (wmi_ep_count > ab->hw_params.max_radios) return; for (i = 0; i < ab->htc.wmi_ep_count; i++) { if (ab->wmi_ab.wmi[i].eid == eid) { wmi = &ab->wmi_ab.wmi[i]; break; } } if (wmi) wake_up(&wmi->tx_ce_desc_wq); } static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb, enum wmi_reg_chan_list_cmd_type id) { struct cur_regulatory_info *reg_info; int ret; reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC); if (!reg_info) return -ENOMEM; if (id == WMI_REG_CHAN_LIST_CC_ID) ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info); else ret = ath11k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info); if (ret) { ath11k_warn(ab, "failed to extract regulatory info\n"); goto mem_free; } ret = ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_UNSET_AP); if (ret) { ath11k_warn(ab, "failed to process regulatory info %d\n", ret); goto mem_free; } kfree(reg_info); return 0; mem_free: ath11k_reg_reset_info(reg_info); kfree(reg_info); return ret; } static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_rdy_parse *rdy_parse = data; struct wmi_ready_event fixed_param; struct wmi_mac_addr *addr_list; struct ath11k_pdev *pdev; u32 num_mac_addr; int i; switch (tag) { case WMI_TAG_READY_EVENT: memset(&fixed_param, 0, sizeof(fixed_param)); memcpy(&fixed_param, (struct wmi_ready_event *)ptr, min_t(u16, sizeof(fixed_param), len)); rdy_parse->num_extra_mac_addr = fixed_param.ready_event_min.num_extra_mac_addr; ether_addr_copy(ab->mac_addr, fixed_param.ready_event_min.mac_addr.addr); ab->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum; break; case WMI_TAG_ARRAY_FIXED_STRUCT: addr_list = (struct wmi_mac_addr *)ptr; num_mac_addr = rdy_parse->num_extra_mac_addr; if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios)) break; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ether_addr_copy(pdev->mac_addr, addr_list[i].addr); } ab->pdevs_macaddr_valid = true; break; default: break; } return 0; } static int ath11k_ready_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_tlv_rdy_parse rdy_parse = { }; int ret; ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_rdy_parse, &rdy_parse); if (ret) { ath11k_warn(ab, "failed to parse tlv %d\n", ret); return ret; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event ready"); complete(&ab->wmi_ab.unified_ready); return 0; } static void ath11k_peer_delete_resp_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_peer_delete_resp_event peer_del_resp; struct ath11k *ar; if (ath11k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) { ath11k_warn(ab, "failed to extract peer delete resp"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer delete resp"); rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_del_resp.vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in peer delete resp ev %d", peer_del_resp.vdev_id); rcu_read_unlock(); return; } complete(&ar->peer_delete_done); rcu_read_unlock(); ath11k_dbg(ab, ATH11K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n", peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr); } static void ath11k_vdev_delete_resp_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k *ar; u32 vdev_id = 0; if (ath11k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) { ath11k_warn(ab, "failed to extract vdev delete resp"); return; } rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in vdev delete resp ev %d", vdev_id); rcu_read_unlock(); return; } complete(&ar->vdev_delete_done); rcu_read_unlock(); ath11k_dbg(ab, ATH11K_DBG_WMI, "event vdev delete resp for vdev id %d\n", vdev_id); } static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status) { switch (vdev_resp_status) { case WMI_VDEV_START_RESPONSE_INVALID_VDEVID: return "invalid vdev id"; case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED: return "not supported"; case WMI_VDEV_START_RESPONSE_DFS_VIOLATION: return "dfs violation"; case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN: return "invalid regdomain"; default: return "unknown"; } } static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_vdev_start_resp_event vdev_start_resp; struct ath11k *ar; u32 status; if (ath11k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) { ath11k_warn(ab, "failed to extract vdev start resp"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event start resp event"); rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_start_resp.vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in vdev start resp ev %d", vdev_start_resp.vdev_id); rcu_read_unlock(); return; } ar->last_wmi_vdev_start_status = 0; ar->max_allowed_tx_power = vdev_start_resp.max_allowed_tx_power; status = vdev_start_resp.status; if (WARN_ON_ONCE(status)) { ath11k_warn(ab, "vdev start resp error status %d (%s)\n", status, ath11k_wmi_vdev_resp_print(status)); ar->last_wmi_vdev_start_status = status; } complete(&ar->vdev_setup_done); rcu_read_unlock(); ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev start resp for vdev id %d", vdev_start_resp.vdev_id); } static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k_vif *arvif; u32 vdev_id, tx_status; if (ath11k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) { ath11k_warn(ab, "failed to extract bcn tx status"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event offload bcn tx status"); rcu_read_lock(); arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_id); if (!arvif) { ath11k_warn(ab, "invalid vdev id %d in bcn_tx_status", vdev_id); rcu_read_unlock(); return; } queue_work(ab->workqueue, &arvif->bcn_tx_work); rcu_read_unlock(); } static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab, struct sk_buff *skb) { const struct wmi_peer_sta_ps_state_chg_event *ev; struct ieee80211_sta *sta; struct ath11k_peer *peer; struct ath11k *ar; struct ath11k_sta *arsta; const void **tb; enum ath11k_wmi_peer_ps_state peer_previous_ps_state; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return; } ev = tb[WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch sta ps change ev"); kfree(tb); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer sta ps change ev addr %pM state %u sup_bitmap %x ps_valid %u ts %u\n", ev->peer_macaddr.addr, ev->peer_ps_state, ev->ps_supported_bitmap, ev->peer_ps_valid, ev->peer_ps_timestamp); rcu_read_lock(); spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_addr(ab, ev->peer_macaddr.addr); if (!peer) { spin_unlock_bh(&ab->base_lock); ath11k_warn(ab, "peer not found %pM\n", ev->peer_macaddr.addr); goto exit; } ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id); if (!ar) { spin_unlock_bh(&ab->base_lock); ath11k_warn(ab, "invalid vdev id in peer sta ps state change ev %d", peer->vdev_id); goto exit; } sta = peer->sta; spin_unlock_bh(&ab->base_lock); if (!sta) { ath11k_warn(ab, "failed to find station entry %pM\n", ev->peer_macaddr.addr); goto exit; } arsta = ath11k_sta_to_arsta(sta); spin_lock_bh(&ar->data_lock); peer_previous_ps_state = arsta->peer_ps_state; arsta->peer_ps_state = ev->peer_ps_state; arsta->peer_current_ps_valid = !!ev->peer_ps_valid; if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT, ar->ab->wmi_ab.svc_map)) { if (!(ev->ps_supported_bitmap & WMI_PEER_PS_VALID) || !(ev->ps_supported_bitmap & WMI_PEER_PS_STATE_TIMESTAMP) || !ev->peer_ps_valid) goto out; if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON) { arsta->ps_start_time = ev->peer_ps_timestamp; arsta->ps_start_jiffies = jiffies; } else if (arsta->peer_ps_state == WMI_PEER_PS_STATE_OFF && peer_previous_ps_state == WMI_PEER_PS_STATE_ON) { arsta->ps_total_duration = arsta->ps_total_duration + (ev->peer_ps_timestamp - arsta->ps_start_time); } if (ar->ps_timekeeper_enable) trace_ath11k_ps_timekeeper(ar, ev->peer_macaddr.addr, ev->peer_ps_timestamp, arsta->peer_ps_state); } out: spin_unlock_bh(&ar->data_lock); exit: rcu_read_unlock(); kfree(tb); } static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k *ar; u32 vdev_id = 0; if (ath11k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) { ath11k_warn(ab, "failed to extract vdev stopped event"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event vdev stopped"); rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in vdev stopped ev %d", vdev_id); rcu_read_unlock(); return; } complete(&ar->vdev_setup_done); rcu_read_unlock(); ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id); } static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb) { struct mgmt_rx_event_params rx_ev = {}; struct ath11k *ar; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr; u16 fc; struct ieee80211_supported_band *sband; if (ath11k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) { ath11k_warn(ab, "failed to extract mgmt rx event"); dev_kfree_skb(skb); return; } memset(status, 0, sizeof(*status)); ath11k_dbg(ab, ATH11K_DBG_MGMT, "event mgmt rx status %08x\n", rx_ev.status); rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id); if (!ar) { ath11k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n", rx_ev.pdev_id); dev_kfree_skb(skb); goto exit; } if ((test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) || (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT | WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) { dev_kfree_skb(skb); goto exit; } if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) status->flag |= RX_FLAG_MMIC_ERROR; if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ && rx_ev.chan_freq <= ATH11K_MAX_6G_FREQ) { status->band = NL80211_BAND_6GHZ; status->freq = rx_ev.chan_freq; } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) { status->band = NL80211_BAND_2GHZ; } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) { status->band = NL80211_BAND_5GHZ; } else { /* Shouldn't happen unless list of advertised channels to * mac80211 has been changed. */ WARN_ON_ONCE(1); dev_kfree_skb(skb); goto exit; } if (rx_ev.phy_mode == MODE_11B && (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ)) ath11k_dbg(ab, ATH11K_DBG_WMI, "mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band); sband = &ar->mac.sbands[status->band]; if (status->band != NL80211_BAND_6GHZ) status->freq = ieee80211_channel_to_frequency(rx_ev.channel, status->band); status->signal = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR; status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100); hdr = (struct ieee80211_hdr *)skb->data; fc = le16_to_cpu(hdr->frame_control); /* Firmware is guaranteed to report all essential management frames via * WMI while it can deliver some extra via HTT. Since there can be * duplicates split the reporting wrt monitor/sniffing. */ status->flag |= RX_FLAG_SKIP_MONITOR; /* In case of PMF, FW delivers decrypted frames with Protected Bit set. * Don't clear that. Also, FW delivers broadcast management frames * (ex: group privacy action frames in mesh) as encrypted payload. */ if (ieee80211_has_protected(hdr->frame_control) && !is_multicast_ether_addr(ieee80211_get_DA(hdr))) { status->flag |= RX_FLAG_DECRYPTED; if (!ieee80211_is_robust_mgmt_frame(skb)) { status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; hdr->frame_control = __cpu_to_le16(fc & ~IEEE80211_FCTL_PROTECTED); } } if (ieee80211_is_beacon(hdr->frame_control)) ath11k_mac_handle_beacon(ar, skb); ath11k_dbg(ab, ATH11K_DBG_MGMT, "event mgmt rx skb %p len %d ftype %02x stype %02x\n", skb, skb->len, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); ath11k_dbg(ab, ATH11K_DBG_MGMT, "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", status->freq, status->band, status->signal, status->rate_idx); ieee80211_rx_ni(ar->hw, skb); exit: rcu_read_unlock(); } static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_mgmt_tx_compl_event tx_compl_param = {}; struct ath11k *ar; if (ath11k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) { ath11k_warn(ab, "failed to extract mgmt tx compl event"); return; } rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, tx_compl_param.pdev_id); if (!ar) { ath11k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n", tx_compl_param.pdev_id); goto exit; } wmi_process_mgmt_tx_comp(ar, &tx_compl_param); ath11k_dbg(ab, ATH11K_DBG_MGMT, "event mgmt tx compl ev pdev_id %d, desc_id %d, status %d ack_rssi %d", tx_compl_param.pdev_id, tx_compl_param.desc_id, tx_compl_param.status, tx_compl_param.ack_rssi); exit: rcu_read_unlock(); } static struct ath11k *ath11k_get_ar_on_scan_state(struct ath11k_base *ab, u32 vdev_id, enum ath11k_scan_state state) { int i; struct ath11k_pdev *pdev; struct ath11k *ar; for (i = 0; i < ab->num_radios; i++) { pdev = rcu_dereference(ab->pdevs_active[i]); if (pdev && pdev->ar) { ar = pdev->ar; spin_lock_bh(&ar->data_lock); if (ar->scan.state == state && ar->scan.vdev_id == vdev_id) { spin_unlock_bh(&ar->data_lock); return ar; } spin_unlock_bh(&ar->data_lock); } } return NULL; } static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k *ar; struct wmi_scan_event scan_ev = {}; if (ath11k_pull_scan_ev(ab, skb, &scan_ev) != 0) { ath11k_warn(ab, "failed to extract scan event"); return; } rcu_read_lock(); /* In case the scan was cancelled, ex. during interface teardown, * the interface will not be found in active interfaces. * Rather, in such scenarios, iterate over the active pdev's to * search 'ar' if the corresponding 'ar' scan is ABORTING and the * aborting scan's vdev id matches this event info. */ if (scan_ev.event_type == WMI_SCAN_EVENT_COMPLETED && scan_ev.reason == WMI_SCAN_REASON_CANCELLED) { ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id, ATH11K_SCAN_ABORTING); if (!ar) ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id, ATH11K_SCAN_RUNNING); } else { ar = ath11k_mac_get_ar_by_vdev_id(ab, scan_ev.vdev_id); } if (!ar) { ath11k_warn(ab, "Received scan event for unknown vdev"); rcu_read_unlock(); return; } spin_lock_bh(&ar->data_lock); ath11k_dbg(ab, ATH11K_DBG_WMI, "event scan %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n", ath11k_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason), scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq, scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id, ath11k_scan_state_str(ar->scan.state), ar->scan.state); switch (scan_ev.event_type) { case WMI_SCAN_EVENT_STARTED: ath11k_wmi_event_scan_started(ar); break; case WMI_SCAN_EVENT_COMPLETED: ath11k_wmi_event_scan_completed(ar); break; case WMI_SCAN_EVENT_BSS_CHANNEL: ath11k_wmi_event_scan_bss_chan(ar); break; case WMI_SCAN_EVENT_FOREIGN_CHAN: ath11k_wmi_event_scan_foreign_chan(ar, scan_ev.channel_freq); break; case WMI_SCAN_EVENT_START_FAILED: ath11k_warn(ab, "received scan start failure event\n"); ath11k_wmi_event_scan_start_failed(ar); break; case WMI_SCAN_EVENT_DEQUEUED: __ath11k_mac_scan_finish(ar); break; case WMI_SCAN_EVENT_PREEMPTED: case WMI_SCAN_EVENT_RESTARTED: case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: default: break; } spin_unlock_bh(&ar->data_lock); rcu_read_unlock(); } static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_peer_sta_kickout_arg arg = {}; struct ieee80211_sta *sta; struct ath11k_peer *peer; struct ath11k *ar; u32 vdev_id; if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) { ath11k_warn(ab, "failed to extract peer sta kickout event"); return; } rcu_read_lock(); spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_addr(ab, arg.mac_addr); if (!peer) { ath11k_warn(ab, "peer not found %pM\n", arg.mac_addr); spin_unlock_bh(&ab->base_lock); goto exit; } vdev_id = peer->vdev_id; spin_unlock_bh(&ab->base_lock); ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d", peer->vdev_id); goto exit; } sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL); if (!sta) { ath11k_warn(ab, "Spurious quick kickout for STA %pM\n", arg.mac_addr); goto exit; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer sta kickout %pM", arg.mac_addr); ieee80211_report_low_ack(sta, 10); exit: rcu_read_unlock(); } static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_roam_event roam_ev = {}; struct ath11k *ar; if (ath11k_pull_roam_ev(ab, skb, &roam_ev) != 0) { ath11k_warn(ab, "failed to extract roam event"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event roam vdev %u reason 0x%08x rssi %d\n", roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi); rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, roam_ev.vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in roam ev %d", roam_ev.vdev_id); rcu_read_unlock(); return; } if (roam_ev.reason >= WMI_ROAM_REASON_MAX) ath11k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n", roam_ev.reason, roam_ev.vdev_id); switch (roam_ev.reason) { case WMI_ROAM_REASON_BEACON_MISS: ath11k_mac_handle_beacon_miss(ar, roam_ev.vdev_id); break; case WMI_ROAM_REASON_BETTER_AP: case WMI_ROAM_REASON_LOW_RSSI: case WMI_ROAM_REASON_SUITABLE_AP_FOUND: case WMI_ROAM_REASON_HO_FAILED: ath11k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n", roam_ev.reason, roam_ev.vdev_id); break; } rcu_read_unlock(); } static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_chan_info_event ch_info_ev = {}; struct ath11k *ar; struct survey_info *survey; int idx; /* HW channel counters frequency value in hertz */ u32 cc_freq_hz = ab->cc_freq_hz; if (ath11k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) { ath11k_warn(ab, "failed to extract chan info event"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n", ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq, ch_info_ev.cmd_flags, ch_info_ev.noise_floor, ch_info_ev.rx_clear_count, ch_info_ev.cycle_count, ch_info_ev.mac_clk_mhz); if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_END_RESP) { ath11k_dbg(ab, ATH11K_DBG_WMI, "chan info report completed\n"); return; } rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, ch_info_ev.vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in chan info ev %d", ch_info_ev.vdev_id); rcu_read_unlock(); return; } spin_lock_bh(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: case ATH11K_SCAN_STARTING: ath11k_warn(ab, "received chan info event without a scan request, ignoring\n"); goto exit; case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: break; } idx = freq_to_idx(ar, ch_info_ev.freq); if (idx >= ARRAY_SIZE(ar->survey)) { ath11k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n", ch_info_ev.freq, idx); goto exit; } /* If FW provides MAC clock frequency in Mhz, overriding the initialized * HW channel counters frequency value */ if (ch_info_ev.mac_clk_mhz) cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000); if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) { survey = &ar->survey[idx]; memset(survey, 0, sizeof(*survey)); survey->noise = ch_info_ev.noise_floor; survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY; survey->time = div_u64(ch_info_ev.cycle_count, cc_freq_hz); survey->time_busy = div_u64(ch_info_ev.rx_clear_count, cc_freq_hz); } exit: spin_unlock_bh(&ar->data_lock); rcu_read_unlock(); } static void ath11k_pdev_bss_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {}; struct survey_info *survey; struct ath11k *ar; u32 cc_freq_hz = ab->cc_freq_hz; u64 busy, total, tx, rx, rx_bss; int idx; if (ath11k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) { ath11k_warn(ab, "failed to extract pdev bss chan info event"); return; } busy = (u64)(bss_ch_info_ev.rx_clear_count_high) << 32 | bss_ch_info_ev.rx_clear_count_low; total = (u64)(bss_ch_info_ev.cycle_count_high) << 32 | bss_ch_info_ev.cycle_count_low; tx = (u64)(bss_ch_info_ev.tx_cycle_count_high) << 32 | bss_ch_info_ev.tx_cycle_count_low; rx = (u64)(bss_ch_info_ev.rx_cycle_count_high) << 32 | bss_ch_info_ev.rx_cycle_count_low; rx_bss = (u64)(bss_ch_info_ev.rx_bss_cycle_count_high) << 32 | bss_ch_info_ev.rx_bss_cycle_count_low; ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n", bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq, bss_ch_info_ev.noise_floor, busy, total, tx, rx, rx_bss); rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, bss_ch_info_ev.pdev_id); if (!ar) { ath11k_warn(ab, "invalid pdev id %d in bss_chan_info event\n", bss_ch_info_ev.pdev_id); rcu_read_unlock(); return; } spin_lock_bh(&ar->data_lock); idx = freq_to_idx(ar, bss_ch_info_ev.freq); if (idx >= ARRAY_SIZE(ar->survey)) { ath11k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n", bss_ch_info_ev.freq, idx); goto exit; } survey = &ar->survey[idx]; survey->noise = bss_ch_info_ev.noise_floor; survey->time = div_u64(total, cc_freq_hz); survey->time_busy = div_u64(busy, cc_freq_hz); survey->time_rx = div_u64(rx_bss, cc_freq_hz); survey->time_tx = div_u64(tx, cc_freq_hz); survey->filled |= (SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_TX); exit: spin_unlock_bh(&ar->data_lock); complete(&ar->bss_survey_done); rcu_read_unlock(); } static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_vdev_install_key_complete_arg install_key_compl = {}; struct ath11k *ar; if (ath11k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) { ath11k_warn(ab, "failed to extract install key compl event"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event vdev install key ev idx %d flags %08x macaddr %pM status %d\n", install_key_compl.key_idx, install_key_compl.key_flags, install_key_compl.macaddr, install_key_compl.status); rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in install key compl ev %d", install_key_compl.vdev_id); rcu_read_unlock(); return; } ar->install_key_status = 0; if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) { ath11k_warn(ab, "install key failed for %pM status %d\n", install_key_compl.macaddr, install_key_compl.status); ar->install_key_status = install_key_compl.status; } complete(&ar->install_key_done); rcu_read_unlock(); } static int ath11k_wmi_tlv_services_parser(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { const struct wmi_service_available_event *ev; u32 *wmi_ext2_service_bitmap; int i, j; switch (tag) { case WMI_TAG_SERVICE_AVAILABLE_EVENT: ev = (struct wmi_service_available_event *)ptr; for (i = 0, j = WMI_MAX_SERVICE; i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE; i++) { do { if (ev->wmi_service_segment_bitmap[i] & BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) set_bit(j, ab->wmi_ab.svc_map); } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); } ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x", ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1], ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]); break; case WMI_TAG_ARRAY_UINT32: wmi_ext2_service_bitmap = (u32 *)ptr; for (i = 0, j = WMI_MAX_EXT_SERVICE; i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE; i++) { do { if (wmi_ext2_service_bitmap[i] & BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) set_bit(j, ab->wmi_ab.svc_map); } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); } ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi_ext2_service__bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x", wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1], wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]); break; } return 0; } static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb) { int ret; ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_services_parser, NULL); if (ret) ath11k_warn(ab, "failed to parse services available tlv %d\n", ret); ath11k_dbg(ab, ATH11K_DBG_WMI, "event service available"); } static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_peer_assoc_conf_arg peer_assoc_conf = {}; struct ath11k *ar; if (ath11k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) { ath11k_warn(ab, "failed to extract peer assoc conf event"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer assoc conf ev vdev id %d macaddr %pM\n", peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr); rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in peer assoc conf ev %d", peer_assoc_conf.vdev_id); rcu_read_unlock(); return; } complete(&ar->peer_assoc_done); rcu_read_unlock(); } static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k_fw_stats stats = {}; size_t total_vdevs_started = 0; struct ath11k_pdev *pdev; bool is_end = true; int i; struct ath11k *ar; int ret; INIT_LIST_HEAD(&stats.pdevs); INIT_LIST_HEAD(&stats.vdevs); INIT_LIST_HEAD(&stats.bcn); ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats); if (ret) { ath11k_warn(ab, "failed to pull fw stats: %d\n", ret); goto free; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event update stats"); rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id); if (!ar) { rcu_read_unlock(); ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n", stats.pdev_id, ret); goto free; } spin_lock_bh(&ar->data_lock); /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_VDEV_STAT and * WMI_REQUEST_RSSI_PER_CHAIN_STAT can be requested via mac ops or via * debugfs fw stats. Therefore, processing it separately. */ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) { list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs); complete(&ar->fw_stats_done); goto complete; } if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) { complete(&ar->fw_stats_done); goto complete; } if (stats.stats_id == WMI_REQUEST_VDEV_STAT) { if (list_empty(&stats.vdevs)) { ath11k_warn(ab, "empty vdev stats"); goto complete; } /* FW sends all the active VDEV stats irrespective of PDEV, * hence limit until the count of all VDEVs started */ for (i = 0; i < ab->num_radios; i++) { pdev = rcu_dereference(ab->pdevs_active[i]); if (pdev && pdev->ar) total_vdevs_started += ar->num_started_vdevs; } if (total_vdevs_started) is_end = ((++ar->fw_stats.num_vdev_recvd) == total_vdevs_started); list_splice_tail_init(&stats.vdevs, &ar->fw_stats.vdevs); if (is_end) complete(&ar->fw_stats_done); goto complete; } /* WMI_REQUEST_BCN_STAT is currently requested only via debugfs fw stats. * Hence, processing it in debugfs context */ ath11k_debugfs_fw_stats_process(ar, &stats); complete: complete(&ar->fw_stats_complete); spin_unlock_bh(&ar->data_lock); rcu_read_unlock(); /* Since the stats's pdev, vdev and beacon list are spliced and reinitialised * at this point, no need to free the individual list. */ return; free: ath11k_fw_stats_free(&stats); } /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned * is not part of BDF CTL(Conformance test limits) table entries. */ static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_pdev_ctl_failsafe_chk_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return; } ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch pdev ctl failsafe check ev"); kfree(tb); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev ctl failsafe check status %d\n", ev->ctl_failsafe_status); /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power * to 10 dBm else the CTL power entry in the BDF would be picked up. */ if (ev->ctl_failsafe_status != 0) ath11k_warn(ab, "pdev ctl failsafe failure status %d", ev->ctl_failsafe_status); kfree(tb); } static void ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab, const struct wmi_pdev_csa_switch_ev *ev, const u32 *vdev_ids) { int i; struct ath11k_vif *arvif; /* Finish CSA once the switch count becomes NULL */ if (ev->current_switch_count) return; rcu_read_lock(); for (i = 0; i < ev->num_vdevs; i++) { arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]); if (!arvif) { ath11k_warn(ab, "Recvd csa status for unknown vdev %d", vdev_ids[i]); continue; } if (arvif->is_up && arvif->vif->bss_conf.csa_active) ieee80211_csa_finish(arvif->vif, 0); } rcu_read_unlock(); } static void ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_pdev_csa_switch_ev *ev; const u32 *vdev_ids; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return; } ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT]; vdev_ids = tb[WMI_TAG_ARRAY_UINT32]; if (!ev || !vdev_ids) { ath11k_warn(ab, "failed to fetch pdev csa switch count ev"); kfree(tb); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev csa switch count %d for pdev %d, num_vdevs %d", ev->current_switch_count, ev->pdev_id, ev->num_vdevs); ath11k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids); kfree(tb); } static void ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_pdev_radar_ev *ev; struct ath11k *ar; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return; } ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch pdev dfs radar detected ev"); kfree(tb); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d", ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width, ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp, ev->freq_offset, ev->sidx); rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); if (!ar) { ath11k_warn(ab, "radar detected in invalid pdev %d\n", ev->pdev_id); goto exit; } ath11k_dbg(ar->ab, ATH11K_DBG_REG, "DFS Radar Detected in pdev %d\n", ev->pdev_id); if (ar->dfs_block_radar_events) ath11k_info(ab, "DFS Radar detected, but ignored as requested\n"); else ieee80211_radar_detected(ar->hw, NULL); exit: rcu_read_unlock(); kfree(tb); } static void ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k *ar; const void **tb; const struct wmi_pdev_temperature_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return; } ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch pdev temp ev"); kfree(tb); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id); rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); if (!ar) { ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id); goto exit; } ath11k_thermal_event_temperature(ar, ev->temp); exit: rcu_read_unlock(); kfree(tb); } static void ath11k_fils_discovery_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_fils_discovery_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse FILS discovery event tlv %d\n", ret); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event fils discovery"); ev = tb[WMI_TAG_HOST_SWFDA_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch FILS discovery event\n"); kfree(tb); return; } ath11k_warn(ab, "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n", ev->vdev_id, ev->fils_tt, ev->tbtt); kfree(tb); } static void ath11k_probe_resp_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_probe_resp_tx_status_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse probe response transmission status event tlv: %d\n", ret); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event probe resp tx status"); ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch probe response transmission status event"); kfree(tb); return; } if (ev->tx_status) ath11k_warn(ab, "Probe response transmission failed for vdev_id %u, status %u\n", ev->vdev_id, ev->tx_status); kfree(tb); } static int ath11k_wmi_tlv_wow_wakeup_host_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_wow_ev_arg *ev = data; const char *wow_pg_fault; int wow_pg_len; switch (tag) { case WMI_TAG_WOW_EVENT_INFO: memcpy(ev, ptr, sizeof(*ev)); ath11k_dbg(ab, ATH11K_DBG_WMI, "wow wakeup host reason %d %s\n", ev->wake_reason, wow_reason(ev->wake_reason)); break; case WMI_TAG_ARRAY_BYTE: if (ev && ev->wake_reason == WOW_REASON_PAGE_FAULT) { wow_pg_fault = ptr; /* the first 4 bytes are length */ wow_pg_len = *(int *)wow_pg_fault; wow_pg_fault += sizeof(int); ath11k_dbg(ab, ATH11K_DBG_WMI, "wow data_len = %d\n", wow_pg_len); ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "wow_event_info_type packet present", "wow_pg_fault ", wow_pg_fault, wow_pg_len); } break; default: break; } return 0; } static void ath11k_wmi_event_wow_wakeup_host(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_wow_ev_arg ev = { }; int ret; ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_wow_wakeup_host_parse, &ev); if (ret) { ath11k_warn(ab, "failed to parse wmi wow tlv: %d\n", ret); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event wow wakeup host"); complete(&ab->wow.wakeup_completed); } static void ath11k_wmi_diag_event(struct ath11k_base *ab, struct sk_buff *skb) { ath11k_dbg(ab, ATH11K_DBG_WMI, "event diag"); trace_ath11k_wmi_diag(ab, skb->data, skb->len); } static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status) { switch (status) { case WMI_ADD_TWT_STATUS_OK: return "ok"; case WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED: return "twt disabled"; case WMI_ADD_TWT_STATUS_USED_DIALOG_ID: return "dialog id in use"; case WMI_ADD_TWT_STATUS_INVALID_PARAM: return "invalid parameters"; case WMI_ADD_TWT_STATUS_NOT_READY: return "not ready"; case WMI_ADD_TWT_STATUS_NO_RESOURCE: return "resource unavailable"; case WMI_ADD_TWT_STATUS_NO_ACK: return "no ack"; case WMI_ADD_TWT_STATUS_NO_RESPONSE: return "no response"; case WMI_ADD_TWT_STATUS_DENIED: return "denied"; case WMI_ADD_TWT_STATUS_UNKNOWN_ERROR: fallthrough; default: return "unknown error"; } } static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_twt_add_dialog_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse wmi twt add dialog status event tlv: %d\n", ret); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event twt add dialog"); ev = tb[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch twt add dialog wmi event\n"); goto exit; } if (ev->status) ath11k_warn(ab, "wmi add twt dialog event vdev %d dialog id %d status %s\n", ev->vdev_id, ev->dialog_id, ath11k_wmi_twt_add_dialog_event_status(ev->status)); exit: kfree(tb); } static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_gtk_offload_status_event *ev; struct ath11k_vif *arvif; __be64 replay_ctr_be; u64 replay_ctr; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return; } ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch gtk offload status ev"); kfree(tb); return; } rcu_read_lock(); arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id); if (!arvif) { ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n", ev->vdev_id); goto exit; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event gtk offload refresh_cnt %d\n", ev->refresh_cnt); ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "replay_cnt", NULL, ev->replay_ctr.counter, GTK_REPLAY_COUNTER_BYTES); replay_ctr = ev->replay_ctr.word1; replay_ctr = (replay_ctr << 32) | ev->replay_ctr.word0; arvif->rekey_data.replay_ctr = replay_ctr; /* supplicant expects big-endian replay counter */ replay_ctr_be = cpu_to_be64(replay_ctr); ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid, (void *)&replay_ctr_be, GFP_ATOMIC); exit: rcu_read_unlock(); kfree(tb); } static void ath11k_wmi_p2p_noa_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_p2p_noa_event *ev; const struct ath11k_wmi_p2p_noa_info *noa; struct ath11k *ar; int vdev_id; u8 noa_descriptors; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ath11k_warn(ab, "failed to parse tlv: %ld\n", PTR_ERR(tb)); return; } ev = tb[WMI_TAG_P2P_NOA_EVENT]; noa = tb[WMI_TAG_P2P_NOA_INFO]; if (!ev || !noa) goto out; vdev_id = ev->vdev_id; noa_descriptors = u32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM); if (noa_descriptors > WMI_P2P_MAX_NOA_DESCRIPTORS) { ath11k_warn(ab, "invalid descriptor num %d in P2P NoA event\n", noa_descriptors); goto out; } ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi tlv p2p noa vdev_id %i descriptors %u\n", vdev_id, noa_descriptors); rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id %d in P2P NoA event\n", vdev_id); goto unlock; } ath11k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa); unlock: rcu_read_unlock(); out: kfree(tb); } static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; enum wmi_tlv_event_id id; cmd_hdr = (struct wmi_cmd_hdr *)skb->data; id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id)); trace_ath11k_wmi_event(ab, id, skb->data, skb->len); if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) goto out; switch (id) { /* Process all the WMI events here */ case WMI_SERVICE_READY_EVENTID: ath11k_service_ready_event(ab, skb); break; case WMI_SERVICE_READY_EXT_EVENTID: ath11k_service_ready_ext_event(ab, skb); break; case WMI_SERVICE_READY_EXT2_EVENTID: ath11k_service_ready_ext2_event(ab, skb); break; case WMI_REG_CHAN_LIST_CC_EVENTID: ath11k_reg_chan_list_event(ab, skb, WMI_REG_CHAN_LIST_CC_ID); break; case WMI_REG_CHAN_LIST_CC_EXT_EVENTID: ath11k_reg_chan_list_event(ab, skb, WMI_REG_CHAN_LIST_CC_EXT_ID); break; case WMI_READY_EVENTID: ath11k_ready_event(ab, skb); break; case WMI_PEER_DELETE_RESP_EVENTID: ath11k_peer_delete_resp_event(ab, skb); break; case WMI_VDEV_START_RESP_EVENTID: ath11k_vdev_start_resp_event(ab, skb); break; case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID: ath11k_bcn_tx_status_event(ab, skb); break; case WMI_VDEV_STOPPED_EVENTID: ath11k_vdev_stopped_event(ab, skb); break; case WMI_MGMT_RX_EVENTID: ath11k_mgmt_rx_event(ab, skb); /* mgmt_rx_event() owns the skb now! */ return; case WMI_MGMT_TX_COMPLETION_EVENTID: ath11k_mgmt_tx_compl_event(ab, skb); break; case WMI_SCAN_EVENTID: ath11k_scan_event(ab, skb); break; case WMI_PEER_STA_KICKOUT_EVENTID: ath11k_peer_sta_kickout_event(ab, skb); break; case WMI_ROAM_EVENTID: ath11k_roam_event(ab, skb); break; case WMI_CHAN_INFO_EVENTID: ath11k_chan_info_event(ab, skb); break; case WMI_PDEV_BSS_CHAN_INFO_EVENTID: ath11k_pdev_bss_chan_info_event(ab, skb); break; case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID: ath11k_vdev_install_key_compl_event(ab, skb); break; case WMI_SERVICE_AVAILABLE_EVENTID: ath11k_service_available_event(ab, skb); break; case WMI_PEER_ASSOC_CONF_EVENTID: ath11k_peer_assoc_conf_event(ab, skb); break; case WMI_UPDATE_STATS_EVENTID: ath11k_update_stats_event(ab, skb); break; case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID: ath11k_pdev_ctl_failsafe_check_event(ab, skb); break; case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID: ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb); break; case WMI_PDEV_UTF_EVENTID: ath11k_tm_wmi_event(ab, id, skb); break; case WMI_PDEV_TEMPERATURE_EVENTID: ath11k_wmi_pdev_temperature_event(ab, skb); break; case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID: ath11k_wmi_pdev_dma_ring_buf_release_event(ab, skb); break; case WMI_HOST_FILS_DISCOVERY_EVENTID: ath11k_fils_discovery_event(ab, skb); break; case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID: ath11k_probe_resp_tx_status_event(ab, skb); break; case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID: ath11k_wmi_obss_color_collision_event(ab, skb); break; case WMI_TWT_ADD_DIALOG_EVENTID: ath11k_wmi_twt_add_dialog_event(ab, skb); break; case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID: ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb); break; case WMI_VDEV_DELETE_RESP_EVENTID: ath11k_vdev_delete_resp_event(ab, skb); break; case WMI_WOW_WAKEUP_HOST_EVENTID: ath11k_wmi_event_wow_wakeup_host(ab, skb); break; case WMI_11D_NEW_COUNTRY_EVENTID: ath11k_reg_11d_new_cc_event(ab, skb); break; case WMI_DIAG_EVENTID: ath11k_wmi_diag_event(ab, skb); break; case WMI_PEER_STA_PS_STATECHG_EVENTID: ath11k_wmi_event_peer_sta_ps_state_chg(ab, skb); break; case WMI_GTK_OFFLOAD_STATUS_EVENTID: ath11k_wmi_gtk_offload_status_event(ab, skb); break; case WMI_P2P_NOA_EVENTID: ath11k_wmi_p2p_noa_event(ab, skb); break; default: ath11k_dbg(ab, ATH11K_DBG_WMI, "unsupported event id 0x%x\n", id); break; } out: dev_kfree_skb(skb); } static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab, u32 pdev_idx) { int status; u32 svc_id[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL, ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1, ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 }; struct ath11k_htc_svc_conn_req conn_req; struct ath11k_htc_svc_conn_resp conn_resp; memset(&conn_req, 0, sizeof(conn_req)); memset(&conn_resp, 0, sizeof(conn_resp)); /* these fields are the same for all service endpoints */ conn_req.ep_ops.ep_tx_complete = ath11k_wmi_htc_tx_complete; conn_req.ep_ops.ep_rx_complete = ath11k_wmi_tlv_op_rx; conn_req.ep_ops.ep_tx_credits = ath11k_wmi_op_ep_tx_credits; /* connect to control service */ conn_req.service_id = svc_id[pdev_idx]; status = ath11k_htc_connect_service(&ab->htc, &conn_req, &conn_resp); if (status) { ath11k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n", status); return status; } ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid; ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid; ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len; init_waitqueue_head(&ab->wmi_ab.wmi[pdev_idx].tx_ce_desc_wq); return 0; } static int ath11k_wmi_send_unit_test_cmd(struct ath11k *ar, struct wmi_unit_test_cmd ut_cmd, u32 *test_args) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_unit_test_cmd *cmd; struct sk_buff *skb; struct wmi_tlv *tlv; void *ptr; u32 *ut_cmd_args; int buf_len, arg_len; int ret; int i; arg_len = sizeof(u32) * ut_cmd.num_args; buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len); if (!skb) return -ENOMEM; cmd = (struct wmi_unit_test_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_UNIT_TEST_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(ut_cmd) - TLV_HDR_SIZE); cmd->vdev_id = ut_cmd.vdev_id; cmd->module_id = ut_cmd.module_id; cmd->num_args = ut_cmd.num_args; cmd->diag_token = ut_cmd.diag_token; ptr = skb->data + sizeof(ut_cmd); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | FIELD_PREP(WMI_TLV_LEN, arg_len); ptr += TLV_HDR_SIZE; ut_cmd_args = ptr; for (i = 0; i < ut_cmd.num_args; i++) ut_cmd_args[i] = test_args[i]; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n", ret); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd unit test module %d vdev %d n_args %d token %d\n", cmd->module_id, cmd->vdev_id, cmd->num_args, cmd->diag_token); return ret; } int ath11k_wmi_simulate_radar(struct ath11k *ar) { struct ath11k_vif *arvif; u32 dfs_args[DFS_MAX_TEST_ARGS]; struct wmi_unit_test_cmd wmi_ut; bool arvif_found = false; list_for_each_entry(arvif, &ar->arvifs, list) { if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) { arvif_found = true; break; } } if (!arvif_found) return -EINVAL; dfs_args[DFS_TEST_CMDID] = 0; dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id; /* Currently we could pass segment_id(b0 - b1), chirp(b2) * freq offset (b3 - b10) to unit test. For simulation * purpose this can be set to 0 which is valid. */ dfs_args[DFS_TEST_RADAR_PARAM] = 0; wmi_ut.vdev_id = arvif->vdev_id; wmi_ut.module_id = DFS_UNIT_TEST_MODULE; wmi_ut.num_args = DFS_MAX_TEST_ARGS; wmi_ut.diag_token = DFS_UNIT_TEST_TOKEN; ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Triggering Radar Simulation\n"); return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args); } int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap, struct ath11k_fw_dbglog *dbglog) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_debug_log_config_cmd_fixed_param *cmd; struct sk_buff *skb; struct wmi_tlv *tlv; int ret, len; len = sizeof(*cmd) + TLV_HDR_SIZE + (MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32)); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_debug_log_config_cmd_fixed_param *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DEBUG_LOG_CONFIG_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->dbg_log_param = dbglog->param; tlv = (struct wmi_tlv *)((u8 *)cmd + sizeof(*cmd)); tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | FIELD_PREP(WMI_TLV_LEN, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32)); switch (dbglog->param) { case WMI_DEBUG_LOG_PARAM_LOG_LEVEL: case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE: case WMI_DEBUG_LOG_PARAM_VDEV_DISABLE: case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP: cmd->value = dbglog->value; break; case WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP: case WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP: cmd->value = dbglog->value; memcpy(tlv->value, module_id_bitmap, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32)); /* clear current config to be used for next user config */ memset(module_id_bitmap, 0, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32)); break; default: dev_kfree_skb(skb); return -EINVAL; } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DBGLOG_CFG_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_DBGLOG_CFG_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd dbglog cfg"); return ret; } int ath11k_wmi_connect(struct ath11k_base *ab) { u32 i; u8 wmi_ep_count; wmi_ep_count = ab->htc.wmi_ep_count; if (wmi_ep_count > ab->hw_params.max_radios) return -1; for (i = 0; i < wmi_ep_count; i++) ath11k_connect_pdev_htc_service(ab, i); return 0; } static void ath11k_wmi_pdev_detach(struct ath11k_base *ab, u8 pdev_id) { if (WARN_ON(pdev_id >= MAX_RADIOS)) return; /* TODO: Deinit any pdev specific wmi resource */ } int ath11k_wmi_pdev_attach(struct ath11k_base *ab, u8 pdev_id) { struct ath11k_pdev_wmi *wmi_handle; if (pdev_id >= ab->hw_params.max_radios) return -EINVAL; wmi_handle = &ab->wmi_ab.wmi[pdev_id]; wmi_handle->wmi_ab = &ab->wmi_ab; ab->wmi_ab.ab = ab; /* TODO: Init remaining resource specific to pdev */ return 0; } int ath11k_wmi_attach(struct ath11k_base *ab) { int ret; ret = ath11k_wmi_pdev_attach(ab, 0); if (ret) return ret; ab->wmi_ab.ab = ab; ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX; /* It's overwritten when service_ext_ready is handled */ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxdma_per_pdev > 1) ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE; /* TODO: Init remaining wmi soc resources required */ init_completion(&ab->wmi_ab.service_ready); init_completion(&ab->wmi_ab.unified_ready); return 0; } void ath11k_wmi_detach(struct ath11k_base *ab) { int i; /* TODO: Deinit wmi resource specific to SOC as required */ for (i = 0; i < ab->htc.wmi_ep_count; i++) ath11k_wmi_pdev_detach(ab, i); ath11k_wmi_free_dbring_caps(ab); } int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id, u32 filter_bitmap, bool enable) { struct wmi_hw_data_filter_cmd *cmd; struct sk_buff *skb; int len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_hw_data_filter_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HW_DATA_FILTER_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->enable = enable; /* Set all modes in case of disable */ if (cmd->enable) cmd->hw_filter_bitmap = filter_bitmap; else cmd->hw_filter_bitmap = ((u32)~0U); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "hw data filter enable %d filter_bitmap 0x%x\n", enable, filter_bitmap); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID); } int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar) { struct wmi_wow_host_wakeup_ind *cmd; struct sk_buff *skb; size_t len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_wow_host_wakeup_ind *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow host wakeup ind\n"); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID); } int ath11k_wmi_wow_enable(struct ath11k *ar) { struct wmi_wow_enable_cmd *cmd; struct sk_buff *skb; int len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_wow_enable_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ENABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->enable = 1; cmd->pause_iface_config = WOW_IFACE_PAUSE_ENABLED; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow enable\n"); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID); } int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar, const u8 mac_addr[ETH_ALEN]) { struct sk_buff *skb; struct wmi_scan_prob_req_oui_cmd *cmd; u32 prob_req_oui; int len; prob_req_oui = (((u32)mac_addr[0]) << 16) | (((u32)mac_addr[1]) << 8) | mac_addr[2]; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_scan_prob_req_oui_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_PROB_REQ_OUI_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->prob_req_oui = prob_req_oui; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "scan prob req oui %d\n", prob_req_oui); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID); } int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id, enum wmi_wow_wakeup_event event, u32 enable) { struct wmi_wow_add_del_event_cmd *cmd; struct sk_buff *skb; size_t len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_wow_add_del_event_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_DEL_EVT_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->is_add = enable; cmd->event_bitmap = (1 << event); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow add wakeup event %s enable %d vdev_id %d\n", wow_wakeup_event(event), enable, vdev_id); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID); } int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id, const u8 *pattern, const u8 *mask, int pattern_len, int pattern_offset) { struct wmi_wow_add_pattern_cmd *cmd; struct wmi_wow_bitmap_pattern *bitmap; struct wmi_tlv *tlv; struct sk_buff *skb; u8 *ptr; size_t len; len = sizeof(*cmd) + sizeof(*tlv) + /* array struct */ sizeof(*bitmap) + /* bitmap */ sizeof(*tlv) + /* empty ipv4 sync */ sizeof(*tlv) + /* empty ipv6 sync */ sizeof(*tlv) + /* empty magic */ sizeof(*tlv) + /* empty info timeout */ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; /* cmd */ ptr = (u8 *)skb->data; cmd = (struct wmi_wow_add_pattern_cmd *)ptr; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_PATTERN_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->pattern_id = pattern_id; cmd->pattern_type = WOW_BITMAP_PATTERN; ptr += sizeof(*cmd); /* bitmap */ tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap)); ptr += sizeof(*tlv); bitmap = (struct wmi_wow_bitmap_pattern *)ptr; bitmap->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_BITMAP_PATTERN_T) | FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap) - TLV_HDR_SIZE); memcpy(bitmap->patternbuf, pattern, pattern_len); ath11k_ce_byte_swap(bitmap->patternbuf, roundup(pattern_len, 4)); memcpy(bitmap->bitmaskbuf, mask, pattern_len); ath11k_ce_byte_swap(bitmap->bitmaskbuf, roundup(pattern_len, 4)); bitmap->pattern_offset = pattern_offset; bitmap->pattern_len = pattern_len; bitmap->bitmask_len = pattern_len; bitmap->pattern_id = pattern_id; ptr += sizeof(*bitmap); /* ipv4 sync */ tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, 0); ptr += sizeof(*tlv); /* ipv6 sync */ tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, 0); ptr += sizeof(*tlv); /* magic */ tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, 0); ptr += sizeof(*tlv); /* pattern info timeout */ tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | FIELD_PREP(WMI_TLV_LEN, 0); ptr += sizeof(*tlv); /* ratelimit interval */ tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | FIELD_PREP(WMI_TLV_LEN, sizeof(u32)); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d\n", vdev_id, pattern_id, pattern_offset); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID); } int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id) { struct wmi_wow_del_pattern_cmd *cmd; struct sk_buff *skb; size_t len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_wow_del_pattern_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_DEL_PATTERN_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->pattern_id = pattern_id; cmd->pattern_type = WOW_BITMAP_PATTERN; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow del pattern vdev_id %d pattern_id %d\n", vdev_id, pattern_id); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID); } static struct sk_buff * ath11k_wmi_op_gen_config_pno_start(struct ath11k *ar, u32 vdev_id, struct wmi_pno_scan_req *pno) { struct nlo_configured_parameters *nlo_list; struct wmi_wow_nlo_config_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; u32 *channel_list; size_t len, nlo_list_len, channel_list_len; u8 *ptr; u32 i; len = sizeof(*cmd) + sizeof(*tlv) + /* TLV place holder for array of structures * nlo_configured_parameters(nlo_list) */ sizeof(*tlv); /* TLV place holder for array of uint32 channel_list */ channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count; len += channel_list_len; nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count; len += nlo_list_len; skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (u8 *)skb->data; cmd = (struct wmi_wow_nlo_config_cmd *)ptr; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = pno->vdev_id; cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN; /* current FW does not support min-max range for dwell time */ cmd->active_dwell_time = pno->active_max_time; cmd->passive_dwell_time = pno->passive_max_time; if (pno->do_passive_scan) cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE; cmd->fast_scan_period = pno->fast_scan_period; cmd->slow_scan_period = pno->slow_scan_period; cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles; cmd->delay_start_time = pno->delay_start_time; if (pno->enable_pno_scan_randomization) { cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ; ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); ath11k_ce_byte_swap(cmd->mac_addr.addr, 8); ath11k_ce_byte_swap(cmd->mac_mask.addr, 8); } ptr += sizeof(*cmd); /* nlo_configured_parameters(nlo_list) */ cmd->no_of_ssids = pno->uc_networks_count; tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, nlo_list_len); ptr += sizeof(*tlv); nlo_list = (struct nlo_configured_parameters *)ptr; for (i = 0; i < cmd->no_of_ssids; i++) { tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, sizeof(*nlo_list) - sizeof(*tlv)); nlo_list[i].ssid.valid = true; nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len; memcpy(nlo_list[i].ssid.ssid.ssid, pno->a_networks[i].ssid.ssid, nlo_list[i].ssid.ssid.ssid_len); ath11k_ce_byte_swap(nlo_list[i].ssid.ssid.ssid, roundup(nlo_list[i].ssid.ssid.ssid_len, 4)); if (pno->a_networks[i].rssi_threshold && pno->a_networks[i].rssi_threshold > -300) { nlo_list[i].rssi_cond.valid = true; nlo_list[i].rssi_cond.rssi = pno->a_networks[i].rssi_threshold; } nlo_list[i].bcast_nw_type.valid = true; nlo_list[i].bcast_nw_type.bcast_nw_type = pno->a_networks[i].bcast_nw_type; } ptr += nlo_list_len; cmd->num_of_channels = pno->a_networks[0].channel_count; tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | FIELD_PREP(WMI_TLV_LEN, channel_list_len); ptr += sizeof(*tlv); channel_list = (u32 *)ptr; for (i = 0; i < cmd->num_of_channels; i++) channel_list[i] = pno->a_networks[0].channels[i]; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv start pno config vdev_id %d\n", vdev_id); return skb; } static struct sk_buff *ath11k_wmi_op_gen_config_pno_stop(struct ath11k *ar, u32 vdev_id) { struct wmi_wow_nlo_config_cmd *cmd; struct sk_buff *skb; size_t len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return ERR_PTR(-ENOMEM); cmd = (struct wmi_wow_nlo_config_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->flags = WMI_NLO_CONFIG_STOP; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv stop pno config vdev_id %d\n", vdev_id); return skb; } int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id, struct wmi_pno_scan_req *pno_scan) { struct sk_buff *skb; if (pno_scan->enable) skb = ath11k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan); else skb = ath11k_wmi_op_gen_config_pno_stop(ar, vdev_id); if (IS_ERR_OR_NULL(skb)) return -ENOMEM; return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); } static void ath11k_wmi_fill_ns_offload(struct ath11k *ar, struct ath11k_arp_ns_offload *offload, u8 **ptr, bool enable, bool ext) { struct wmi_ns_offload_tuple *ns; struct wmi_tlv *tlv; u8 *buf_ptr = *ptr; u32 ns_cnt, ns_ext_tuples; int i, max_offloads; ns_cnt = offload->ipv6_count; tlv = (struct wmi_tlv *)buf_ptr; if (ext) { ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, ns_ext_tuples * sizeof(*ns)); i = WMI_MAX_NS_OFFLOADS; max_offloads = offload->ipv6_count; } else { tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, WMI_MAX_NS_OFFLOADS * sizeof(*ns)); i = 0; max_offloads = WMI_MAX_NS_OFFLOADS; } buf_ptr += sizeof(*tlv); for (; i < max_offloads; i++) { ns = (struct wmi_ns_offload_tuple *)buf_ptr; ns->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NS_OFFLOAD_TUPLE) | FIELD_PREP(WMI_TLV_LEN, sizeof(*ns) - TLV_HDR_SIZE); if (enable) { if (i < ns_cnt) ns->flags |= WMI_NSOL_FLAGS_VALID; memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16); memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16); ath11k_ce_byte_swap(ns->target_ipaddr[0], 16); ath11k_ce_byte_swap(ns->solicitation_ipaddr, 16); if (offload->ipv6_type[i]) ns->flags |= WMI_NSOL_FLAGS_IS_IPV6_ANYCAST; memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN); ath11k_ce_byte_swap(ns->target_mac.addr, 8); if (ns->target_mac.word0 != 0 || ns->target_mac.word1 != 0) { ns->flags |= WMI_NSOL_FLAGS_MAC_VALID; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "index %d ns_solicited %pI6 target %pI6", i, ns->solicitation_ipaddr, ns->target_ipaddr[0]); } buf_ptr += sizeof(*ns); } *ptr = buf_ptr; } static void ath11k_wmi_fill_arp_offload(struct ath11k *ar, struct ath11k_arp_ns_offload *offload, u8 **ptr, bool enable) { struct wmi_arp_offload_tuple *arp; struct wmi_tlv *tlv; u8 *buf_ptr = *ptr; int i; /* fill arp tuple */ tlv = (struct wmi_tlv *)buf_ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, WMI_MAX_ARP_OFFLOADS * sizeof(*arp)); buf_ptr += sizeof(*tlv); for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { arp = (struct wmi_arp_offload_tuple *)buf_ptr; arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARP_OFFLOAD_TUPLE) | FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE); if (enable && i < offload->ipv4_count) { /* Copy the target ip addr and flags */ arp->flags = WMI_ARPOL_FLAGS_VALID; memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4); ath11k_ce_byte_swap(arp->target_ipaddr, 4); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "arp offload address %pI4", arp->target_ipaddr); } buf_ptr += sizeof(*arp); } *ptr = buf_ptr; } int ath11k_wmi_arp_ns_offload(struct ath11k *ar, struct ath11k_vif *arvif, bool enable) { struct ath11k_arp_ns_offload *offload; struct wmi_set_arp_ns_offload_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; u8 *buf_ptr; size_t len; u8 ns_cnt, ns_ext_tuples = 0; offload = &arvif->arp_ns_offload; ns_cnt = offload->ipv6_count; len = sizeof(*cmd) + sizeof(*tlv) + WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_tuple) + sizeof(*tlv) + WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_tuple); if (ns_cnt > WMI_MAX_NS_OFFLOADS) { ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS; len += sizeof(*tlv) + ns_ext_tuples * sizeof(struct wmi_ns_offload_tuple); } skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; buf_ptr = skb->data; cmd = (struct wmi_set_arp_ns_offload_cmd *)buf_ptr; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SET_ARP_NS_OFFLOAD_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->flags = 0; cmd->vdev_id = arvif->vdev_id; cmd->num_ns_ext_tuples = ns_ext_tuples; buf_ptr += sizeof(*cmd); ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0); ath11k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable); if (ns_ext_tuples) ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID); } int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar, struct ath11k_vif *arvif, bool enable) { struct wmi_gtk_rekey_offload_cmd *cmd; struct ath11k_rekey_data *rekey_data = &arvif->rekey_data; int len; struct sk_buff *skb; __le64 replay_ctr; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = arvif->vdev_id; if (enable) { cmd->flags = GTK_OFFLOAD_ENABLE_OPCODE; /* the length in rekey_data and cmd is equal */ memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck)); ath11k_ce_byte_swap(cmd->kck, GTK_OFFLOAD_KEK_BYTES); memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek)); ath11k_ce_byte_swap(cmd->kek, GTK_OFFLOAD_KEK_BYTES); replay_ctr = cpu_to_le64(rekey_data->replay_ctr); memcpy(cmd->replay_ctr, &replay_ctr, sizeof(replay_ctr)); ath11k_ce_byte_swap(cmd->replay_ctr, GTK_REPLAY_COUNTER_BYTES); } else { cmd->flags = GTK_OFFLOAD_DISABLE_OPCODE; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "offload gtk rekey vdev: %d %d\n", arvif->vdev_id, enable); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); } int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar, struct ath11k_vif *arvif) { struct wmi_gtk_rekey_offload_cmd *cmd; int len; struct sk_buff *skb; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = arvif->vdev_id; cmd->flags = GTK_OFFLOAD_REQUEST_STATUS_OPCODE; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "get gtk rekey vdev_id: %d\n", arvif->vdev_id); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); } int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_set_sar_table_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; u8 *buf_ptr; u32 len, sar_len_aligned, rsvd_len_aligned; sar_len_aligned = roundup(BIOS_SAR_TABLE_LEN, sizeof(u32)); rsvd_len_aligned = roundup(BIOS_SAR_RSVD1_LEN, sizeof(u32)); len = sizeof(*cmd) + TLV_HDR_SIZE + sar_len_aligned + TLV_HDR_SIZE + rsvd_len_aligned; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_set_sar_table_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; cmd->sar_len = BIOS_SAR_TABLE_LEN; cmd->rsvd_len = BIOS_SAR_RSVD1_LEN; buf_ptr = skb->data + sizeof(*cmd); tlv = (struct wmi_tlv *)buf_ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, sar_len_aligned); buf_ptr += TLV_HDR_SIZE; memcpy(buf_ptr, sar_val, BIOS_SAR_TABLE_LEN); buf_ptr += sar_len_aligned; tlv = (struct wmi_tlv *)buf_ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned); return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID); } int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_set_geo_table_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; u8 *buf_ptr; u32 len, rsvd_len_aligned; rsvd_len_aligned = roundup(BIOS_SAR_RSVD2_LEN, sizeof(u32)); len = sizeof(*cmd) + TLV_HDR_SIZE + rsvd_len_aligned; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_set_geo_table_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; cmd->rsvd_len = BIOS_SAR_RSVD2_LEN; buf_ptr = skb->data + sizeof(*cmd); tlv = (struct wmi_tlv *)buf_ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned); return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID); } int ath11k_wmi_sta_keepalive(struct ath11k *ar, const struct wmi_sta_keepalive_arg *arg) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_sta_keepalive_cmd *cmd; struct wmi_sta_keepalive_arp_resp *arp; struct sk_buff *skb; size_t len; len = sizeof(*cmd) + sizeof(*arp); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_sta_keepalive_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_KEEPALIVE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = arg->vdev_id; cmd->enabled = arg->enabled; cmd->interval = arg->interval; cmd->method = arg->method; arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1); arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE) | FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE); if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE || arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) { arp->src_ip4_addr = arg->src_ip4_addr; arp->dest_ip4_addr = arg->dest_ip4_addr; ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "sta keepalive vdev %d enabled %d method %d interval %d\n", arg->vdev_id, arg->enabled, arg->method, arg->interval); return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID); } bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar) { return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, ar->ab->wmi_ab.svc_map) && ar->supports_6ghz; }