diff --git a/ahb.c b/ahb.c index fde1ce43c499..50809cc1dad4 100644 --- a/ahb.c +++ b/ahb.c @@ -1,1330 +1,1330 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include #include #include #include #include #include #include #include "ahb.h" #include "debug.h" #include "hif.h" #include "qmi.h" #include #include "pcic.h" #include #include static const struct of_device_id ath11k_ahb_of_match[] = { /* TODO: Should we change the compatible string to something similar * to one that ath10k uses? */ { .compatible = "qcom,ipq8074-wifi", .data = (void *)ATH11K_HW_IPQ8074, }, { .compatible = "qcom,ipq6018-wifi", .data = (void *)ATH11K_HW_IPQ6018_HW10, }, { .compatible = "qcom,wcn6750-wifi", .data = (void *)ATH11K_HW_WCN6750_HW10, }, { .compatible = "qcom,ipq5018-wifi", .data = (void *)ATH11K_HW_IPQ5018_HW10, }, { } }; MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match); #define ATH11K_IRQ_CE0_OFFSET 4 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = { "misc-pulse1", "misc-latch", "sw-exception", "watchdog", "ce0", "ce1", "ce2", "ce3", "ce4", "ce5", "ce6", "ce7", "ce8", "ce9", "ce10", "ce11", "host2wbm-desc-feed", "host2reo-re-injection", "host2reo-command", "host2rxdma-monitor-ring3", "host2rxdma-monitor-ring2", "host2rxdma-monitor-ring1", "reo2ost-exception", "wbm2host-rx-release", "reo2host-status", "reo2host-destination-ring4", "reo2host-destination-ring3", "reo2host-destination-ring2", "reo2host-destination-ring1", "rxdma2host-monitor-destination-mac3", "rxdma2host-monitor-destination-mac2", "rxdma2host-monitor-destination-mac1", "ppdu-end-interrupts-mac3", "ppdu-end-interrupts-mac2", "ppdu-end-interrupts-mac1", "rxdma2host-monitor-status-ring-mac3", "rxdma2host-monitor-status-ring-mac2", "rxdma2host-monitor-status-ring-mac1", "host2rxdma-host-buf-ring-mac3", "host2rxdma-host-buf-ring-mac2", "host2rxdma-host-buf-ring-mac1", "rxdma2host-destination-ring-mac3", "rxdma2host-destination-ring-mac2", "rxdma2host-destination-ring-mac1", "host2tcl-input-ring4", "host2tcl-input-ring3", "host2tcl-input-ring2", "host2tcl-input-ring1", "wbm2host-tx-completions-ring3", "wbm2host-tx-completions-ring2", "wbm2host-tx-completions-ring1", "tcl2host-status-ring", }; /* enum ext_irq_num - irq numbers that can be used by external modules * like datapath */ enum ext_irq_num { host2wbm_desc_feed = 16, host2reo_re_injection, host2reo_command, host2rxdma_monitor_ring3, host2rxdma_monitor_ring2, host2rxdma_monitor_ring1, reo2host_exception, wbm2host_rx_release, reo2host_status, reo2host_destination_ring4, reo2host_destination_ring3, reo2host_destination_ring2, reo2host_destination_ring1, rxdma2host_monitor_destination_mac3, rxdma2host_monitor_destination_mac2, rxdma2host_monitor_destination_mac1, ppdu_end_interrupts_mac3, ppdu_end_interrupts_mac2, ppdu_end_interrupts_mac1, rxdma2host_monitor_status_ring_mac3, rxdma2host_monitor_status_ring_mac2, rxdma2host_monitor_status_ring_mac1, host2rxdma_host_buf_ring_mac3, host2rxdma_host_buf_ring_mac2, host2rxdma_host_buf_ring_mac1, rxdma2host_destination_ring_mac3, rxdma2host_destination_ring_mac2, rxdma2host_destination_ring_mac1, host2tcl_input_ring4, host2tcl_input_ring3, host2tcl_input_ring2, host2tcl_input_ring1, wbm2host_tx_completions_ring3, wbm2host_tx_completions_ring2, wbm2host_tx_completions_ring1, tcl2host_status_ring, }; static int ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector) { return ab->pci.msi.irqs[vector]; } static inline u32 ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset) { u32 window_start = 0; /* If offset lies within DP register range, use 1st window */ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK) window_start = ATH11K_PCI_WINDOW_START; /* If offset lies within CE register range, use 2nd window */ else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) < ATH11K_PCI_WINDOW_RANGE_MASK) window_start = 2 * ATH11K_PCI_WINDOW_START; return window_start; } static void ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value) { u32 window_start; /* WCN6750 uses static window based register access*/ window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset); iowrite32(value, ab->mem + window_start + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); } static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset) { u32 window_start; u32 val; /* WCN6750 uses static window based register access */ window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset); val = ioread32(ab->mem + window_start + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); return val; } static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = { .wakeup = NULL, .release = NULL, .get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750, .window_write32 = ath11k_ahb_window_write32_wcn6750, .window_read32 = ath11k_ahb_window_read32_wcn6750, }; static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset) { return ioread32(ab->mem + offset); } static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value) { iowrite32(value, ab->mem + offset); } static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab) { int i; for (i = 0; i < ab->hw_params.ce_count; i++) { struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; tasklet_kill(&ce_pipe->intr_tq); } } static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) { int i; for (i = 0; i < irq_grp->num_irq; i++) disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab) { int i; for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; ath11k_ahb_ext_grp_disable(irq_grp); if (irq_grp->napi_enabled) { napi_synchronize(&irq_grp->napi); napi_disable(&irq_grp->napi); irq_grp->napi_enabled = false; } } } static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp) { int i; for (i = 0; i < irq_grp->num_irq; i++) enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset) { u32 val; val = ath11k_ahb_read32(ab, offset); ath11k_ahb_write32(ab, offset, val | BIT(bit)); } static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset) { u32 val; val = ath11k_ahb_read32(ab, offset); ath11k_ahb_write32(ab, offset, val & ~BIT(bit)); } static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) { const struct ce_attr *ce_attr; const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr; u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr; ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab); ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab); ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab); ce_attr = &ab->hw_params.host_ce_config[ce_id]; if (ce_attr->src_nentries) ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr); if (ce_attr->dest_nentries) { ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr); ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, ie3_reg_addr); } } static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) { const struct ce_attr *ce_attr; const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr; u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr; ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab); ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab); ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab); ce_attr = &ab->hw_params.host_ce_config[ce_id]; if (ce_attr->src_nentries) ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr); if (ce_attr->dest_nentries) { ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr); ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, ie3_reg_addr); } } static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab) { int i; int irq_idx; for (i = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; irq_idx = ATH11K_IRQ_CE0_OFFSET + i; synchronize_irq(ab->irq_num[irq_idx]); } } static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab) { int i, j; int irq_idx; for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; for (j = 0; j < irq_grp->num_irq; j++) { irq_idx = irq_grp->irqs[j]; synchronize_irq(ab->irq_num[irq_idx]); } } } static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab) { int i; for (i = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; ath11k_ahb_ce_irq_enable(ab, i); } } static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab) { int i; for (i = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; ath11k_ahb_ce_irq_disable(ab, i); } } static int ath11k_ahb_start(struct ath11k_base *ab) { ath11k_ahb_ce_irqs_enable(ab); ath11k_ce_rx_post_buf(ab); return 0; } static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab) { int i; for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; if (!irq_grp->napi_enabled) { napi_enable(&irq_grp->napi); irq_grp->napi_enabled = true; } ath11k_ahb_ext_grp_enable(irq_grp); } } static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab) { __ath11k_ahb_ext_irq_disable(ab); ath11k_ahb_sync_ext_irqs(ab); } static void ath11k_ahb_stop(struct ath11k_base *ab) { if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) ath11k_ahb_ce_irqs_disable(ab); ath11k_ahb_sync_ce_irqs(ab); ath11k_ahb_kill_tasklets(ab); timer_delete_sync(&ab->rx_replenish_retry); ath11k_ce_cleanup_pipes(ab); } static int ath11k_ahb_power_up(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); int ret; ret = rproc_boot(ab_ahb->tgt_rproc); if (ret) ath11k_err(ab, "failed to boot the remote processor Q6\n"); return ret; } static void ath11k_ahb_power_down(struct ath11k_base *ab, bool is_suspend) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); rproc_shutdown(ab_ahb->tgt_rproc); } static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab) { struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; cfg->tgt_ce_len = ab->hw_params.target_ce_count; cfg->tgt_ce = ab->hw_params.target_ce_config; cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len; cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map; ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id; } static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab) { int i, j; for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; for (j = 0; j < irq_grp->num_irq; j++) free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); netif_napi_del(&irq_grp->napi); free_netdev(irq_grp->napi_ndev); } } static void ath11k_ahb_free_irq(struct ath11k_base *ab) { int irq_idx; int i; if (ab->hw_params.hybrid_bus_type) return ath11k_pcic_free_irq(ab); for (i = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; irq_idx = ATH11K_IRQ_CE0_OFFSET + i; free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); } ath11k_ahb_free_ext_irq(ab); } static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t) { struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num); } static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg) { struct ath11k_ce_pipe *ce_pipe = arg; /* last interrupt received for this CE */ ce_pipe->timestamp = jiffies; ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num); tasklet_schedule(&ce_pipe->intr_tq); return IRQ_HANDLED; } static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget) { struct ath11k_ext_irq_grp *irq_grp = container_of(napi, struct ath11k_ext_irq_grp, napi); struct ath11k_base *ab = irq_grp->ab; int work_done; work_done = ath11k_dp_service_srng(ab, irq_grp, budget); if (work_done < budget) { napi_complete_done(napi, work_done); ath11k_ahb_ext_grp_enable(irq_grp); } if (work_done > budget) work_done = budget; return work_done; } static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg) { struct ath11k_ext_irq_grp *irq_grp = arg; /* last interrupt received for this group */ irq_grp->timestamp = jiffies; ath11k_ahb_ext_grp_disable(irq_grp); napi_schedule(&irq_grp->napi); return IRQ_HANDLED; } static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab) { struct ath11k_hw_params *hw = &ab->hw_params; int i, j; int irq; int ret; for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; u32 num_irq = 0; irq_grp->ab = ab; irq_grp->grp_id = i; irq_grp->napi_ndev = alloc_netdev_dummy(0); if (!irq_grp->napi_ndev) return -ENOMEM; netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, ath11k_ahb_ext_grp_napi_poll); for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) { if (ab->hw_params.ring_mask->tx[i] & BIT(j)) { irq_grp->irqs[num_irq++] = wbm2host_tx_completions_ring1 - j; } if (ab->hw_params.ring_mask->rx[i] & BIT(j)) { irq_grp->irqs[num_irq++] = reo2host_destination_ring1 - j; } if (ab->hw_params.ring_mask->rx_err[i] & BIT(j)) irq_grp->irqs[num_irq++] = reo2host_exception; if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j)) irq_grp->irqs[num_irq++] = wbm2host_rx_release; if (ab->hw_params.ring_mask->reo_status[i] & BIT(j)) irq_grp->irqs[num_irq++] = reo2host_status; if (j < ab->hw_params.max_radios) { if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) { irq_grp->irqs[num_irq++] = rxdma2host_destination_ring_mac1 - ath11k_hw_get_mac_from_pdev_id(hw, j); } if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) { irq_grp->irqs[num_irq++] = host2rxdma_host_buf_ring_mac1 - ath11k_hw_get_mac_from_pdev_id(hw, j); } if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) { irq_grp->irqs[num_irq++] = ppdu_end_interrupts_mac1 - ath11k_hw_get_mac_from_pdev_id(hw, j); irq_grp->irqs[num_irq++] = rxdma2host_monitor_status_ring_mac1 - ath11k_hw_get_mac_from_pdev_id(hw, j); } } } irq_grp->num_irq = num_irq; for (j = 0; j < irq_grp->num_irq; j++) { int irq_idx = irq_grp->irqs[j]; irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]); ab->irq_num[irq_idx] = irq; irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY); ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler, IRQF_TRIGGER_RISING, irq_name[irq_idx], irq_grp); if (ret) { ath11k_err(ab, "failed request_irq for %d\n", irq); } } } return 0; } static int ath11k_ahb_config_irq(struct ath11k_base *ab) { int irq, irq_idx, i; int ret; if (ab->hw_params.hybrid_bus_type) return ath11k_pcic_config_irq(ab); /* Configure CE irqs */ for (i = 0; i < ab->hw_params.ce_count; i++) { struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; irq_idx = ATH11K_IRQ_CE0_OFFSET + i; tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet); irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]); ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler, IRQF_TRIGGER_RISING, irq_name[irq_idx], ce_pipe); if (ret) return ret; ab->irq_num[irq_idx] = irq; } /* Configure external interrupts */ ret = ath11k_ahb_config_ext_irq(ab); return ret; } static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, u8 *ul_pipe, u8 *dl_pipe) { const struct service_to_pipe *entry; bool ul_set = false, dl_set = false; int i; for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) { entry = &ab->hw_params.svc_to_ce_map[i]; if (__le32_to_cpu(entry->service_id) != service_id) continue; switch (__le32_to_cpu(entry->pipedir)) { case PIPEDIR_NONE: break; case PIPEDIR_IN: WARN_ON(dl_set); *dl_pipe = __le32_to_cpu(entry->pipenum); dl_set = true; break; case PIPEDIR_OUT: WARN_ON(ul_set); *ul_pipe = __le32_to_cpu(entry->pipenum); ul_set = true; break; case PIPEDIR_INOUT: WARN_ON(dl_set); WARN_ON(ul_set); *dl_pipe = __le32_to_cpu(entry->pipenum); *ul_pipe = __le32_to_cpu(entry->pipenum); dl_set = true; ul_set = true; break; } } if (WARN_ON(!ul_set || !dl_set)) return -ENOENT; return 0; } static int ath11k_ahb_hif_suspend(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); u32 wake_irq; u32 value = 0; int ret; if (!device_may_wakeup(ab->dev)) return -EPERM; wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ]; ret = enable_irq_wake(wake_irq); if (ret) { ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret); return ret; } value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++, ATH11K_AHB_SMP2P_SMEM_SEQ_NO); value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER, ATH11K_AHB_SMP2P_SMEM_MSG); ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state, ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value); if (ret) { ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret); return ret; } ath11k_dbg(ab, ATH11K_DBG_AHB, "device suspended\n"); return ret; } static int ath11k_ahb_hif_resume(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); u32 wake_irq; u32 value = 0; int ret; if (!device_may_wakeup(ab->dev)) return -EPERM; wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ]; ret = disable_irq_wake(wake_irq); if (ret) { ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret); return ret; } reinit_completion(&ab->wow.wakeup_completed); value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++, ATH11K_AHB_SMP2P_SMEM_SEQ_NO); value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT, ATH11K_AHB_SMP2P_SMEM_MSG); ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state, ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value); if (ret) { ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret); return ret; } ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ); if (ret == 0) { ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n"); return -ETIMEDOUT; } ath11k_dbg(ab, ATH11K_DBG_AHB, "device resumed\n"); return 0; } static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = { .start = ath11k_ahb_start, .stop = ath11k_ahb_stop, .read32 = ath11k_ahb_read32, .write32 = ath11k_ahb_write32, .read = NULL, .irq_enable = ath11k_ahb_ext_irq_enable, .irq_disable = ath11k_ahb_ext_irq_disable, .map_service_to_pipe = ath11k_ahb_map_service_to_pipe, .power_down = ath11k_ahb_power_down, .power_up = ath11k_ahb_power_up, }; static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = { .start = ath11k_pcic_start, .stop = ath11k_pcic_stop, .read32 = ath11k_pcic_read32, .write32 = ath11k_pcic_write32, .read = NULL, .irq_enable = ath11k_pcic_ext_irq_enable, .irq_disable = ath11k_pcic_ext_irq_disable, .get_msi_address = ath11k_pcic_get_msi_address, .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment, .map_service_to_pipe = ath11k_pcic_map_service_to_pipe, .power_down = ath11k_ahb_power_down, .power_up = ath11k_ahb_power_up, .suspend = ath11k_ahb_hif_suspend, .resume = ath11k_ahb_hif_resume, .ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq, .ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq, }; static int ath11k_core_get_rproc(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); struct device *dev = ab->dev; struct rproc *prproc; phandle rproc_phandle; if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) { ath11k_err(ab, "failed to get q6_rproc handle\n"); return -ENOENT; } prproc = rproc_get_by_phandle(rproc_phandle); if (!prproc) { ath11k_dbg(ab, ATH11K_DBG_AHB, "failed to get rproc, deferring\n"); return -EPROBE_DEFER; } ab_ahb->tgt_rproc = prproc; return 0; } static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab) { struct platform_device *pdev = ab->pdev; phys_addr_t msi_addr_pa; dma_addr_t msi_addr_iova; struct resource *res; int int_prop; int ret; int i; ret = ath11k_pcic_init_msi_config(ab); if (ret) { ath11k_err(ab, "failed to init msi config: %d\n", ret); return ret; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ath11k_err(ab, "failed to fetch msi_addr\n"); return -ENOENT; } msi_addr_pa = res->start; msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE, DMA_FROM_DEVICE, 0); if (dma_mapping_error(ab->dev, msi_addr_iova)) return -ENOMEM; ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova); ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova); ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop); if (ret) return ret; ab->pci.msi.ep_base_data = int_prop + 32; for (i = 0; i < ab->pci.msi.config->total_vectors; i++) { ret = platform_get_irq(pdev, i); if (ret < 0) return ret; ab->pci.msi.irqs[i] = ret; } set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags); return 0; } static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); if (!ab->hw_params.smp2p_wow_exit) return 0; ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out", &ab_ahb->smp2p_info.smem_bit); if (IS_ERR(ab_ahb->smp2p_info.smem_state)) { ath11k_err(ab, "failed to fetch smem state: %ld\n", PTR_ERR(ab_ahb->smp2p_info.smem_state)); return PTR_ERR(ab_ahb->smp2p_info.smem_state); } return 0; } static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); if (!ab->hw_params.smp2p_wow_exit) return; qcom_smem_state_put(ab_ahb->smp2p_info.smem_state); } static int ath11k_ahb_setup_resources(struct ath11k_base *ab) { struct platform_device *pdev = ab->pdev; struct resource *mem_res; void __iomem *mem; if (ab->hw_params.hybrid_bus_type) return ath11k_ahb_setup_msi_resources(ab); mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res); if (IS_ERR(mem)) { dev_err(&pdev->dev, "ioremap error\n"); return PTR_ERR(mem); } ab->mem = mem; ab->mem_len = resource_size(mem_res); return 0; } static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); struct device *dev = ab->dev; struct device_node *node; struct resource r; int ret; node = of_parse_phandle(dev->of_node, "memory-region", 0); if (!node) return -ENOENT; ret = of_address_to_resource(node, 0, &r); of_node_put(node); if (ret) { dev_err(dev, "failed to resolve msa fixed region\n"); return ret; } ab_ahb->fw.msa_paddr = r.start; ab_ahb->fw.msa_size = resource_size(&r); node = of_parse_phandle(dev->of_node, "memory-region", 1); if (!node) return -ENOENT; ret = of_address_to_resource(node, 0, &r); of_node_put(node); if (ret) { dev_err(dev, "failed to resolve ce fixed region\n"); return ret; } ab_ahb->fw.ce_paddr = r.start; ab_ahb->fw.ce_size = resource_size(&r); return 0; } static int ath11k_ahb_ce_remap(struct ath11k_base *ab) { const struct ce_remap *ce_remap = ab->hw_params.ce_remap; struct platform_device *pdev = ab->pdev; if (!ce_remap) { /* no separate CE register space */ ab->mem_ce = ab->mem; return 0; } /* ce register space is moved out of wcss unlike ipq8074 or ipq6018 * and the space is not contiguous, hence remapping the CE registers * to a new space for accessing them. */ ab->mem_ce = ioremap(ce_remap->base, ce_remap->size); if (!ab->mem_ce) { dev_err(&pdev->dev, "ce ioremap error\n"); return -ENOMEM; } return 0; } static void ath11k_ahb_ce_unmap(struct ath11k_base *ab) { if (ab->hw_params.ce_remap) iounmap(ab->mem_ce); } static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); struct device *host_dev = ab->dev; - struct platform_device_info info = {0}; + struct platform_device_info info = {}; struct iommu_domain *iommu_dom; struct platform_device *pdev; struct device_node *node; int ret; /* Chipsets not requiring MSA need not initialize * MSA resources, return success in such cases. */ if (!ab->hw_params.fixed_fw_mem) return 0; node = of_get_child_by_name(host_dev->of_node, "wifi-firmware"); if (!node) { ab_ahb->fw.use_tz = true; return 0; } ret = ath11k_ahb_setup_msa_resources(ab); if (ret) { ath11k_err(ab, "failed to setup msa resources\n"); return ret; } info.fwnode = &node->fwnode; info.parent = host_dev; info.name = node->name; info.dma_mask = DMA_BIT_MASK(32); pdev = platform_device_register_full(&info); if (IS_ERR(pdev)) { of_node_put(node); return PTR_ERR(pdev); } ret = of_dma_configure(&pdev->dev, node, true); if (ret) { ath11k_err(ab, "dma configure fail: %d\n", ret); goto err_unregister; } ab_ahb->fw.dev = &pdev->dev; iommu_dom = iommu_paging_domain_alloc(ab_ahb->fw.dev); if (IS_ERR(iommu_dom)) { ath11k_err(ab, "failed to allocate iommu domain\n"); ret = PTR_ERR(iommu_dom); goto err_unregister; } ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev); if (ret) { ath11k_err(ab, "could not attach device: %d\n", ret); goto err_iommu_free; } ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size, IOMMU_READ | IOMMU_WRITE, GFP_KERNEL); if (ret) { ath11k_err(ab, "failed to map firmware region: %d\n", ret); goto err_iommu_detach; } ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size, IOMMU_READ | IOMMU_WRITE, GFP_KERNEL); if (ret) { ath11k_err(ab, "failed to map firmware CE region: %d\n", ret); goto err_iommu_unmap; } ab_ahb->fw.use_tz = false; ab_ahb->fw.iommu_domain = iommu_dom; of_node_put(node); return 0; err_iommu_unmap: iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size); err_iommu_detach: iommu_detach_device(iommu_dom, ab_ahb->fw.dev); err_iommu_free: iommu_domain_free(iommu_dom); err_unregister: platform_device_unregister(pdev); of_node_put(node); return ret; } static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); struct iommu_domain *iommu; size_t unmapped_size; /* Chipsets not requiring MSA would have not initialized * MSA resources, return success in such cases. */ if (!ab->hw_params.fixed_fw_mem) return 0; if (ab_ahb->fw.use_tz) return 0; iommu = ab_ahb->fw.iommu_domain; unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size); if (unmapped_size != ab_ahb->fw.msa_size) ath11k_err(ab, "failed to unmap firmware: %zu\n", unmapped_size); unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size); if (unmapped_size != ab_ahb->fw.ce_size) ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n", unmapped_size); iommu_detach_device(iommu, ab_ahb->fw.dev); iommu_domain_free(iommu); platform_device_unregister(to_platform_device(ab_ahb->fw.dev)); return 0; } static int ath11k_ahb_probe(struct platform_device *pdev) { struct ath11k_base *ab; const struct ath11k_hif_ops *hif_ops; const struct ath11k_pci_ops *pci_ops; enum ath11k_hw_rev hw_rev; int ret; hw_rev = (uintptr_t)device_get_match_data(&pdev->dev); switch (hw_rev) { case ATH11K_HW_IPQ8074: case ATH11K_HW_IPQ6018_HW10: case ATH11K_HW_IPQ5018_HW10: hif_ops = &ath11k_ahb_hif_ops_ipq8074; pci_ops = NULL; break; case ATH11K_HW_WCN6750_HW10: hif_ops = &ath11k_ahb_hif_ops_wcn6750; pci_ops = &ath11k_ahb_pci_ops_wcn6750; break; default: dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev); return -EOPNOTSUPP; } ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n"); return ret; } ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb), ATH11K_BUS_AHB); if (!ab) { dev_err(&pdev->dev, "failed to allocate ath11k base\n"); return -ENOMEM; } ab->hif.ops = hif_ops; ab->pdev = pdev; ab->hw_rev = hw_rev; ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL; platform_set_drvdata(pdev, ab); ret = ath11k_pcic_register_pci_ops(ab, pci_ops); if (ret) { ath11k_err(ab, "failed to register PCI ops: %d\n", ret); goto err_core_free; } ret = ath11k_core_pre_init(ab); if (ret) goto err_core_free; ret = ath11k_ahb_setup_resources(ab); if (ret) goto err_core_free; ret = ath11k_ahb_ce_remap(ab); if (ret) goto err_core_free; ret = ath11k_ahb_fw_resources_init(ab); if (ret) goto err_ce_unmap; ret = ath11k_ahb_setup_smp2p_handle(ab); if (ret) goto err_fw_deinit; ret = ath11k_hal_srng_init(ab); if (ret) goto err_release_smp2p_handle; ret = ath11k_ce_alloc_pipes(ab); if (ret) { ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret); goto err_hal_srng_deinit; } ath11k_ahb_init_qmi_ce_config(ab); ret = ath11k_core_get_rproc(ab); if (ret) { ath11k_err(ab, "failed to get rproc: %d\n", ret); goto err_ce_free; } ret = ath11k_core_init(ab); if (ret) { ath11k_err(ab, "failed to init core: %d\n", ret); goto err_ce_free; } ret = ath11k_ahb_config_irq(ab); if (ret) { ath11k_err(ab, "failed to configure irq: %d\n", ret); goto err_ce_free; } ath11k_qmi_fwreset_from_cold_boot(ab); return 0; err_ce_free: ath11k_ce_free_pipes(ab); err_hal_srng_deinit: ath11k_hal_srng_deinit(ab); err_release_smp2p_handle: ath11k_ahb_release_smp2p_handle(ab); err_fw_deinit: ath11k_ahb_fw_resource_deinit(ab); err_ce_unmap: ath11k_ahb_ce_unmap(ab); err_core_free: ath11k_core_free(ab); platform_set_drvdata(pdev, NULL); return ret; } static void ath11k_ahb_remove_prepare(struct ath11k_base *ab) { unsigned long left; if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) { left = wait_for_completion_timeout(&ab->driver_recovery, ATH11K_AHB_RECOVERY_TIMEOUT); if (!left) ath11k_warn(ab, "failed to receive recovery response completion\n"); } set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags); cancel_work_sync(&ab->restart_work); cancel_work_sync(&ab->qmi.event_work); } static void ath11k_ahb_free_resources(struct ath11k_base *ab) { struct platform_device *pdev = ab->pdev; ath11k_ahb_free_irq(ab); ath11k_hal_srng_deinit(ab); ath11k_ahb_release_smp2p_handle(ab); ath11k_ahb_fw_resource_deinit(ab); ath11k_ce_free_pipes(ab); ath11k_ahb_ce_unmap(ab); ath11k_core_free(ab); platform_set_drvdata(pdev, NULL); } static void ath11k_ahb_remove(struct platform_device *pdev) { struct ath11k_base *ab = platform_get_drvdata(pdev); if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { ath11k_ahb_power_down(ab, false); ath11k_debugfs_soc_destroy(ab); ath11k_qmi_deinit_service(ab); goto qmi_fail; } ath11k_ahb_remove_prepare(ab); ath11k_core_deinit(ab); qmi_fail: ath11k_fw_destroy(ab); ath11k_ahb_free_resources(ab); } static void ath11k_ahb_shutdown(struct platform_device *pdev) { struct ath11k_base *ab = platform_get_drvdata(pdev); /* platform shutdown() & remove() are mutually exclusive. * remove() is invoked during rmmod & shutdown() during * system reboot/shutdown. */ ath11k_ahb_remove_prepare(ab); if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))) goto free_resources; ath11k_core_deinit(ab); free_resources: ath11k_fw_destroy(ab); ath11k_ahb_free_resources(ab); } static struct platform_driver ath11k_ahb_driver = { .driver = { .name = "ath11k", .of_match_table = ath11k_ahb_of_match, }, .probe = ath11k_ahb_probe, .remove = ath11k_ahb_remove, .shutdown = ath11k_ahb_shutdown, }; module_platform_driver(ath11k_ahb_driver); MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/ce.c b/ce.c index 746038006eb4..c65fc9fb539e 100644 --- a/ce.c +++ b/ce.c @@ -1,1080 +1,1079 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ +#include #include "dp_rx.h" #include "debug.h" #include "hif.h" const struct ce_attr ath11k_host_ce_config_ipq8074[] = { /* CE0: host->target HTC control and raw streams */ { .flags = CE_ATTR_FLAGS, .src_nentries = 16, .src_sz_max = 2048, .dest_nentries = 0, .send_cb = ath11k_htc_tx_completion_handler, }, /* CE1: target->host HTT + HTC control */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_htc_rx_completion_handler, }, /* CE2: target->host WMI */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_htc_rx_completion_handler, }, /* CE3: host->target WMI (mac0) */ { .flags = CE_ATTR_FLAGS, .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, .send_cb = ath11k_htc_tx_completion_handler, }, /* CE4: host->target HTT */ { .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, .src_nentries = 2048, .src_sz_max = 256, .dest_nentries = 0, }, /* CE5: target->host pktlog */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler, }, /* CE6: target autonomous hif_memcpy */ { .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, }, /* CE7: host->target WMI (mac1) */ { .flags = CE_ATTR_FLAGS, .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, .send_cb = ath11k_htc_tx_completion_handler, }, /* CE8: target autonomous hif_memcpy */ { .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, }, /* CE9: host->target WMI (mac2) */ { .flags = CE_ATTR_FLAGS, .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, .send_cb = ath11k_htc_tx_completion_handler, }, /* CE10: target->host HTT */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_htc_rx_completion_handler, }, /* CE11: Not used */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, }, }; const struct ce_attr ath11k_host_ce_config_qca6390[] = { /* CE0: host->target HTC control and raw streams */ { .flags = CE_ATTR_FLAGS, .src_nentries = 16, .src_sz_max = 2048, .dest_nentries = 0, }, /* CE1: target->host HTT + HTC control */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_htc_rx_completion_handler, }, /* CE2: target->host WMI */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_htc_rx_completion_handler, }, /* CE3: host->target WMI (mac0) */ { .flags = CE_ATTR_FLAGS, .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, .send_cb = ath11k_htc_tx_completion_handler, }, /* CE4: host->target HTT */ { .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, .src_nentries = 2048, .src_sz_max = 256, .dest_nentries = 0, }, /* CE5: target->host pktlog */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler, }, /* CE6: target autonomous hif_memcpy */ { .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, }, /* CE7: host->target WMI (mac1) */ { .flags = CE_ATTR_FLAGS, .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, .send_cb = ath11k_htc_tx_completion_handler, }, /* CE8: target autonomous hif_memcpy */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, }, }; const struct ce_attr ath11k_host_ce_config_qcn9074[] = { /* CE0: host->target HTC control and raw streams */ { .flags = CE_ATTR_FLAGS, .src_nentries = 16, .src_sz_max = 2048, .dest_nentries = 0, }, /* CE1: target->host HTT + HTC control */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_htc_rx_completion_handler, }, /* CE2: target->host WMI */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 32, .recv_cb = ath11k_htc_rx_completion_handler, }, /* CE3: host->target WMI (mac0) */ { .flags = CE_ATTR_FLAGS, .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, .send_cb = ath11k_htc_tx_completion_handler, }, /* CE4: host->target HTT */ { .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, .src_nentries = 2048, .src_sz_max = 256, .dest_nentries = 0, }, /* CE5: target->host pktlog */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler, }, }; static bool ath11k_ce_need_shadow_fix(int ce_id) { /* only ce4 needs shadow workaround */ if (ce_id == 4) return true; return false; } void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab) { int i; if (!ab->hw_params.supports_shadow_regs) return; for (i = 0; i < ab->hw_params.ce_count; i++) if (ath11k_ce_need_shadow_fix(i)) ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]); } static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe, struct sk_buff *skb, dma_addr_t paddr) { struct ath11k_base *ab = pipe->ab; struct ath11k_ce_ring *ring = pipe->dest_ring; struct hal_srng *srng; unsigned int write_index; unsigned int nentries_mask = ring->nentries_mask; u32 *desc; int ret; lockdep_assert_held(&ab->ce.ce_lock); write_index = ring->write_index; srng = &ab->hal.srng_list[ring->hal_ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) { ret = -ENOSPC; goto exit; } desc = ath11k_hal_srng_src_get_next_entry(ab, srng); if (!desc) { ret = -ENOSPC; goto exit; } ath11k_hal_ce_dst_set_desc(desc, paddr); ring->skb[write_index] = skb; write_index = CE_RING_IDX_INCR(nentries_mask, write_index); ring->write_index = write_index; pipe->rx_buf_needed--; ret = 0; exit: ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return ret; } static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe) { struct ath11k_base *ab = pipe->ab; struct sk_buff *skb; dma_addr_t paddr; int ret = 0; if (!(pipe->dest_ring || pipe->status_ring)) return 0; spin_lock_bh(&ab->ce.ce_lock); while (pipe->rx_buf_needed) { skb = dev_alloc_skb(pipe->buf_sz); if (!skb) { ret = -ENOMEM; goto exit; } WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4)); paddr = dma_map_single(ab->dev, skb->data, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(ab->dev, paddr))) { ath11k_warn(ab, "failed to dma map ce rx buf\n"); dev_kfree_skb_any(skb); ret = -EIO; goto exit; } ATH11K_SKB_RXCB(skb)->paddr = paddr; ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr); if (ret) { ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret); dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); goto exit; } } exit: spin_unlock_bh(&ab->ce.ce_lock); return ret; } static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe, struct sk_buff **skb, int *nbytes) { struct ath11k_base *ab = pipe->ab; struct hal_srng *srng; unsigned int sw_index; unsigned int nentries_mask; u32 *desc; int ret = 0; spin_lock_bh(&ab->ce.ce_lock); sw_index = pipe->dest_ring->sw_index; nentries_mask = pipe->dest_ring->nentries_mask; srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); if (!desc) { ret = -EIO; goto err; } - /* Make sure descriptor is read after the head pointer. */ - dma_rmb(); - *nbytes = ath11k_hal_ce_dst_status_get_length(desc); *skb = pipe->dest_ring->skb[sw_index]; pipe->dest_ring->skb[sw_index] = NULL; sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); pipe->dest_ring->sw_index = sw_index; pipe->rx_buf_needed++; err: ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); spin_unlock_bh(&ab->ce.ce_lock); return ret; } static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe) { struct ath11k_base *ab = pipe->ab; struct sk_buff *skb; struct sk_buff_head list; unsigned int nbytes, max_nbytes; int ret; __skb_queue_head_init(&list); while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) { max_nbytes = skb->len + skb_tailroom(skb); dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr, max_nbytes, DMA_FROM_DEVICE); if (unlikely(max_nbytes < nbytes || nbytes == 0)) { ath11k_warn(ab, "unexpected rx length (nbytes %d, max %d)", nbytes, max_nbytes); dev_kfree_skb_any(skb); continue; } skb_put(skb, nbytes); __skb_queue_tail(&list, skb); } while ((skb = __skb_dequeue(&list))) { ath11k_dbg(ab, ATH11K_DBG_CE, "rx ce pipe %d len %d\n", pipe->pipe_num, skb->len); pipe->recv_cb(ab, skb); } ret = ath11k_ce_rx_post_pipe(pipe); if (ret && ret != -ENOSPC) { ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n", pipe->pipe_num, ret); mod_timer(&ab->rx_replenish_retry, jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES); } } static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe) { struct ath11k_base *ab = pipe->ab; struct hal_srng *srng; unsigned int sw_index; unsigned int nentries_mask; struct sk_buff *skb; u32 *desc; spin_lock_bh(&ab->ce.ce_lock); sw_index = pipe->src_ring->sw_index; nentries_mask = pipe->src_ring->nentries_mask; srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); desc = ath11k_hal_srng_src_reap_next(ab, srng); if (!desc) { skb = ERR_PTR(-EIO); goto err_unlock; } skb = pipe->src_ring->skb[sw_index]; pipe->src_ring->skb[sw_index] = NULL; sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); pipe->src_ring->sw_index = sw_index; err_unlock: spin_unlock_bh(&srng->lock); spin_unlock_bh(&ab->ce.ce_lock); return skb; } static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe *pipe) { struct ath11k_base *ab = pipe->ab; struct sk_buff *skb; struct sk_buff_head list; __skb_queue_head_init(&list); while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) { if (!skb) continue; dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len, DMA_TO_DEVICE); if ((!pipe->send_cb) || ab->hw_params.credit_flow) { dev_kfree_skb_any(skb); continue; } __skb_queue_tail(&list, skb); } while ((skb = __skb_dequeue(&list))) { ath11k_dbg(ab, ATH11K_DBG_CE, "tx ce pipe %d len %d\n", pipe->pipe_num, skb->len); pipe->send_cb(ab, skb); } } static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id, struct hal_srng_params *ring_params) { u32 msi_data_start; u32 msi_data_count, msi_data_idx; u32 msi_irq_start; u32 addr_lo; u32 addr_hi; int ret; ret = ath11k_get_user_msi_vector(ab, "CE", &msi_data_count, &msi_data_start, &msi_irq_start); if (ret) return; ath11k_get_msi_address(ab, &addr_lo, &addr_hi); ath11k_get_ce_msi_idx(ab, ce_id, &msi_data_idx); ring_params->msi_addr = addr_lo; ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32); ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start; ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR; } static int ath11k_ce_init_ring(struct ath11k_base *ab, struct ath11k_ce_ring *ce_ring, int ce_id, enum hal_ring_type type) { - struct hal_srng_params params = { 0 }; + struct hal_srng_params params = {}; int ret; params.ring_base_paddr = ce_ring->base_addr_ce_space; params.ring_base_vaddr = ce_ring->base_addr_owner_space; params.num_entries = ce_ring->nentries; if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags)) ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, ¶ms); switch (type) { case HAL_CE_SRC: if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags)) params.intr_batch_cntr_thres_entries = 1; break; case HAL_CE_DST: params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max; if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) { params.intr_timer_thres_us = 1024; params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; params.low_threshold = ce_ring->nentries - 3; } break; case HAL_CE_DST_STATUS: if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) { params.intr_batch_cntr_thres_entries = 1; params.intr_timer_thres_us = 0x1000; } break; default: ath11k_warn(ab, "Invalid CE ring type %d\n", type); return -EINVAL; } /* TODO: Init other params needed by HAL to init the ring */ ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, ¶ms); if (ret < 0) { ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n", ret, ce_id); return ret; } ce_ring->hal_ring_id = ret; if (ab->hw_params.supports_shadow_regs && ath11k_ce_need_shadow_fix(ce_id)) ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id], ATH11K_SHADOW_CTRL_TIMER_INTERVAL, ce_ring->hal_ring_id); return 0; } static struct ath11k_ce_ring * ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz) { struct ath11k_ce_ring *ce_ring; dma_addr_t base_addr; ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL); if (ce_ring == NULL) return ERR_PTR(-ENOMEM); ce_ring->nentries = nentries; ce_ring->nentries_mask = nentries - 1; /* Legacy platforms that do not support cache * coherent DMA are unsupported */ ce_ring->base_addr_owner_space_unaligned = dma_alloc_coherent(ab->dev, nentries * desc_sz + CE_DESC_RING_ALIGN, &base_addr, GFP_KERNEL); if (!ce_ring->base_addr_owner_space_unaligned) { kfree(ce_ring); return ERR_PTR(-ENOMEM); } ce_ring->base_addr_ce_space_unaligned = base_addr; ce_ring->base_addr_owner_space = PTR_ALIGN( ce_ring->base_addr_owner_space_unaligned, CE_DESC_RING_ALIGN); ce_ring->base_addr_ce_space = ALIGN( ce_ring->base_addr_ce_space_unaligned, CE_DESC_RING_ALIGN); return ce_ring; } static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id) { struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id]; const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id]; struct ath11k_ce_ring *ring; int nentries; int desc_sz; pipe->attr_flags = attr->flags; if (attr->src_nentries) { pipe->send_cb = attr->send_cb; nentries = roundup_pow_of_two(attr->src_nentries); desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); if (IS_ERR(ring)) return PTR_ERR(ring); pipe->src_ring = ring; } if (attr->dest_nentries) { pipe->recv_cb = attr->recv_cb; nentries = roundup_pow_of_two(attr->dest_nentries); desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST); ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); if (IS_ERR(ring)) return PTR_ERR(ring); pipe->dest_ring = ring; desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS); ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); if (IS_ERR(ring)) return PTR_ERR(ring); pipe->status_ring = ring; } return 0; } void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id) { struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id]; const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id]; if (attr->src_nentries) ath11k_ce_tx_process_cb(pipe); if (pipe->recv_cb) ath11k_ce_recv_process_cb(pipe); } void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id) { struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id]; const struct ce_attr *attr = &ab->hw_params.host_ce_config[pipe_id]; if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries) ath11k_ce_tx_process_cb(pipe); } EXPORT_SYMBOL(ath11k_ce_per_engine_service); int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id, u16 transfer_id) { struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id]; struct hal_srng *srng; u32 *desc; unsigned int write_index, sw_index; unsigned int nentries_mask; int ret = 0; u8 byte_swap_data = 0; int num_used; /* Check if some entries could be regained by handling tx completion if * the CE has interrupts disabled and the used entries is more than the * defined usage threshold. */ if (pipe->attr_flags & CE_ATTR_DIS_INTR) { spin_lock_bh(&ab->ce.ce_lock); write_index = pipe->src_ring->write_index; sw_index = pipe->src_ring->sw_index; if (write_index >= sw_index) num_used = write_index - sw_index; else num_used = pipe->src_ring->nentries - sw_index + write_index; spin_unlock_bh(&ab->ce.ce_lock); if (num_used > ATH11K_CE_USAGE_THRESHOLD) ath11k_ce_poll_send_completed(ab, pipe->pipe_num); } if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) return -ESHUTDOWN; spin_lock_bh(&ab->ce.ce_lock); write_index = pipe->src_ring->write_index; nentries_mask = pipe->src_ring->nentries_mask; srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) { ath11k_hal_srng_access_end(ab, srng); ret = -ENOBUFS; goto err_unlock; } desc = ath11k_hal_srng_src_get_next_reaped(ab, srng); if (!desc) { ath11k_hal_srng_access_end(ab, srng); ret = -ENOBUFS; goto err_unlock; } if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA) byte_swap_data = 1; ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr, skb->len, transfer_id, byte_swap_data); pipe->src_ring->skb[write_index] = skb; pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask, write_index); ath11k_hal_srng_access_end(ab, srng); if (ath11k_ce_need_shadow_fix(pipe_id)) ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]); spin_unlock_bh(&srng->lock); spin_unlock_bh(&ab->ce.ce_lock); return 0; err_unlock: spin_unlock_bh(&srng->lock); spin_unlock_bh(&ab->ce.ce_lock); return ret; } static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe) { struct ath11k_base *ab = pipe->ab; struct ath11k_ce_ring *ring = pipe->dest_ring; struct sk_buff *skb; int i; if (!(ring && pipe->buf_sz)) return; for (i = 0; i < ring->nentries; i++) { skb = ring->skb[i]; if (!skb) continue; ring->skb[i] = NULL; dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); } } static void ath11k_ce_shadow_config(struct ath11k_base *ab) { int i; for (i = 0; i < ab->hw_params.ce_count; i++) { if (ab->hw_params.host_ce_config[i].src_nentries) ath11k_hal_srng_update_shadow_config(ab, HAL_CE_SRC, i); if (ab->hw_params.host_ce_config[i].dest_nentries) { ath11k_hal_srng_update_shadow_config(ab, HAL_CE_DST, i); ath11k_hal_srng_update_shadow_config(ab, HAL_CE_DST_STATUS, i); } } } void ath11k_ce_get_shadow_config(struct ath11k_base *ab, u32 **shadow_cfg, u32 *shadow_cfg_len) { if (!ab->hw_params.supports_shadow_regs) return; ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len); /* shadow is already configured */ if (*shadow_cfg_len) return; /* shadow isn't configured yet, configure now. * non-CE srngs are configured firstly, then * all CE srngs. */ ath11k_hal_srng_shadow_config(ab); ath11k_ce_shadow_config(ab); /* get the shadow configuration */ ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len); } EXPORT_SYMBOL(ath11k_ce_get_shadow_config); void ath11k_ce_cleanup_pipes(struct ath11k_base *ab) { struct ath11k_ce_pipe *pipe; int pipe_num; ath11k_ce_stop_shadow_timers(ab); for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) { pipe = &ab->ce.ce_pipe[pipe_num]; ath11k_ce_rx_pipe_cleanup(pipe); /* Cleanup any src CE's which have interrupts disabled */ ath11k_ce_poll_send_completed(ab, pipe_num); /* NOTE: Should we also clean up tx buffer in all pipes? */ } } EXPORT_SYMBOL(ath11k_ce_cleanup_pipes); void ath11k_ce_rx_post_buf(struct ath11k_base *ab) { struct ath11k_ce_pipe *pipe; int i; int ret; for (i = 0; i < ab->hw_params.ce_count; i++) { pipe = &ab->ce.ce_pipe[i]; ret = ath11k_ce_rx_post_pipe(pipe); if (ret) { if (ret == -ENOSPC) continue; ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n", i, ret); mod_timer(&ab->rx_replenish_retry, jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES); return; } } } EXPORT_SYMBOL(ath11k_ce_rx_post_buf); void ath11k_ce_rx_replenish_retry(struct timer_list *t) { struct ath11k_base *ab = timer_container_of(ab, t, rx_replenish_retry); ath11k_ce_rx_post_buf(ab); } int ath11k_ce_init_pipes(struct ath11k_base *ab) { struct ath11k_ce_pipe *pipe; int i; int ret; for (i = 0; i < ab->hw_params.ce_count; i++) { pipe = &ab->ce.ce_pipe[i]; if (pipe->src_ring) { ret = ath11k_ce_init_ring(ab, pipe->src_ring, i, HAL_CE_SRC); if (ret) { ath11k_warn(ab, "failed to init src ring: %d\n", ret); /* Should we clear any partial init */ return ret; } pipe->src_ring->write_index = 0; pipe->src_ring->sw_index = 0; } if (pipe->dest_ring) { ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i, HAL_CE_DST); if (ret) { ath11k_warn(ab, "failed to init dest ring: %d\n", ret); /* Should we clear any partial init */ return ret; } pipe->rx_buf_needed = pipe->dest_ring->nentries ? pipe->dest_ring->nentries - 2 : 0; pipe->dest_ring->write_index = 0; pipe->dest_ring->sw_index = 0; } if (pipe->status_ring) { ret = ath11k_ce_init_ring(ab, pipe->status_ring, i, HAL_CE_DST_STATUS); if (ret) { ath11k_warn(ab, "failed to init dest status ing: %d\n", ret); /* Should we clear any partial init */ return ret; } pipe->status_ring->write_index = 0; pipe->status_ring->sw_index = 0; } } return 0; } void ath11k_ce_free_pipes(struct ath11k_base *ab) { struct ath11k_ce_pipe *pipe; struct ath11k_ce_ring *ce_ring; int desc_sz; int i; for (i = 0; i < ab->hw_params.ce_count; i++) { pipe = &ab->ce.ce_pipe[i]; if (ath11k_ce_need_shadow_fix(i)) ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]); if (pipe->src_ring) { desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC); ce_ring = pipe->src_ring; dma_free_coherent(ab->dev, pipe->src_ring->nentries * desc_sz + CE_DESC_RING_ALIGN, ce_ring->base_addr_owner_space_unaligned, ce_ring->base_addr_ce_space_unaligned); kfree(pipe->src_ring); pipe->src_ring = NULL; } if (pipe->dest_ring) { desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST); ce_ring = pipe->dest_ring; dma_free_coherent(ab->dev, pipe->dest_ring->nentries * desc_sz + CE_DESC_RING_ALIGN, ce_ring->base_addr_owner_space_unaligned, ce_ring->base_addr_ce_space_unaligned); kfree(pipe->dest_ring); pipe->dest_ring = NULL; } if (pipe->status_ring) { desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS); ce_ring = pipe->status_ring; dma_free_coherent(ab->dev, pipe->status_ring->nentries * desc_sz + CE_DESC_RING_ALIGN, ce_ring->base_addr_owner_space_unaligned, ce_ring->base_addr_ce_space_unaligned); kfree(pipe->status_ring); pipe->status_ring = NULL; } } } EXPORT_SYMBOL(ath11k_ce_free_pipes); int ath11k_ce_alloc_pipes(struct ath11k_base *ab) { struct ath11k_ce_pipe *pipe; int i; int ret; const struct ce_attr *attr; spin_lock_init(&ab->ce.ce_lock); for (i = 0; i < ab->hw_params.ce_count; i++) { attr = &ab->hw_params.host_ce_config[i]; pipe = &ab->ce.ce_pipe[i]; pipe->pipe_num = i; pipe->ab = ab; pipe->buf_sz = attr->src_sz_max; ret = ath11k_ce_alloc_pipe(ab, i); if (ret) { /* Free any partial successful allocation */ ath11k_ce_free_pipes(ab); return ret; } } return 0; } EXPORT_SYMBOL(ath11k_ce_alloc_pipes); /* For Big Endian Host, Copy Engine byte_swap is enabled * When Copy Engine does byte_swap, need to byte swap again for the * Host to get/put buffer content in the correct byte order */ void ath11k_ce_byte_swap(void *mem, u32 len) { int i; if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) { if (!mem) return; for (i = 0; i < (len / 4); i++) { *(u32 *)mem = swab32(*(u32 *)mem); mem += 4; } } } int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id) { if (ce_id >= ab->hw_params.ce_count) return -EINVAL; return ab->hw_params.host_ce_config[ce_id].flags; } EXPORT_SYMBOL(ath11k_ce_get_attr_flags); diff --git a/core.c b/core.c index 22a101136135..d49353b6b2e7 100644 --- a/core.c +++ b/core.c @@ -1,2670 +1,2677 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ +#include #include #include #include #include #include #include "core.h" #include "dp_tx.h" #include "dp_rx.h" #include "debug.h" #include "hif.h" #include "wow.h" #include "fw.h" unsigned int ath11k_debug_mask; EXPORT_SYMBOL(ath11k_debug_mask); module_param_named(debug_mask, ath11k_debug_mask, uint, 0644); MODULE_PARM_DESC(debug_mask, "Debugging mask"); static unsigned int ath11k_crypto_mode; module_param_named(crypto_mode, ath11k_crypto_mode, uint, 0644); MODULE_PARM_DESC(crypto_mode, "crypto mode: 0-hardware, 1-software"); /* frame mode values are mapped as per enum ath11k_hw_txrx_mode */ unsigned int ath11k_frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI; module_param_named(frame_mode, ath11k_frame_mode, uint, 0644); MODULE_PARM_DESC(frame_mode, "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)"); bool ath11k_ftm_mode; module_param_named(ftm_mode, ath11k_ftm_mode, bool, 0444); MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode"); static const struct ath11k_hw_params ath11k_hw_params[] = { { .hw_rev = ATH11K_HW_IPQ8074, .name = "ipq8074 hw2.0", .fw = { .dir = "IPQ8074/hw2.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 3, .bdf_addr = 0x4B0C0000, .hw_ops = &ipq8074_ops, .ring_mask = &ath11k_hw_ring_mask_ipq8074, .internal_sleep_clock = false, .regs = &ipq8074_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074, .host_ce_config = ath11k_host_ce_config_ipq8074, .ce_count = 12, .target_ce_config = ath11k_target_ce_config_wlan_ipq8074, .target_ce_count = 11, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074, .svc_to_ce_map_len = 21, .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = false, .rxdma1_enable = true, .num_rxdma_per_pdev = 1, .rx_mac_buf_ring = false, .vdev_start_delay = false, .htt_peer_map_v2 = true, .spectral = { .fft_sz = 2, /* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes. * so added pad size as 2 bytes to compensate the BIN size */ .fft_pad_sz = 2, .summary_pad_sz = 0, .fft_hdr_len = 16, .max_fft_bins = 512, .fragment_160mhz = true, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT), .supports_monitor = true, .full_monitor_mode = false, .supports_shadow_regs = false, .idle_ps = false, .supports_sta_ps = false, .coldboot_cal_mm = true, .coldboot_cal_ftm = true, .cbcal_restart_fw = true, .fw_mem_mode = 0, .num_vdevs = 16 + 1, .num_peers = 512, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), .supports_regdb = false, .fix_l1ss = true, .credit_flow = false, .max_tx_ring = DP_TCL_NUM_RING_MAX, .hal_params = &ath11k_hw_hal_params_ipq8074, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = true, .supports_rssi_stats = false, .fw_wmi_diag_event = false, .current_cc_support = false, .dbr_debug_support = true, .global_reset = false, .bios_sar_capa = NULL, .m3_fw_support = false, .fixed_bdf_addr = true, .fixed_mem_region = true, .static_window_map = false, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = false, .supports_multi_bssid = false, .sram_dump = {}, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_dual_stations = false, .pdev_suspend = false, }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, .name = "ipq6018 hw1.0", .fw = { .dir = "IPQ6018/hw1.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 2, .bdf_addr = 0x4ABC0000, .hw_ops = &ipq6018_ops, .ring_mask = &ath11k_hw_ring_mask_ipq8074, .internal_sleep_clock = false, .regs = &ipq8074_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074, .host_ce_config = ath11k_host_ce_config_ipq8074, .ce_count = 12, .target_ce_config = ath11k_target_ce_config_wlan_ipq8074, .target_ce_count = 11, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018, .svc_to_ce_map_len = 19, .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = false, .rxdma1_enable = true, .num_rxdma_per_pdev = 1, .rx_mac_buf_ring = false, .vdev_start_delay = false, .htt_peer_map_v2 = true, .spectral = { .fft_sz = 4, .fft_pad_sz = 0, .summary_pad_sz = 0, .fft_hdr_len = 16, .max_fft_bins = 512, .fragment_160mhz = true, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT), .supports_monitor = true, .full_monitor_mode = false, .supports_shadow_regs = false, .idle_ps = false, .supports_sta_ps = false, .coldboot_cal_mm = true, .coldboot_cal_ftm = true, .cbcal_restart_fw = true, .fw_mem_mode = 0, .num_vdevs = 16 + 1, .num_peers = 512, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), .supports_regdb = false, .fix_l1ss = true, .credit_flow = false, .max_tx_ring = DP_TCL_NUM_RING_MAX, .hal_params = &ath11k_hw_hal_params_ipq8074, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = true, .supports_rssi_stats = false, .fw_wmi_diag_event = false, .current_cc_support = false, .dbr_debug_support = true, .global_reset = false, .bios_sar_capa = NULL, .m3_fw_support = false, .fixed_bdf_addr = true, .fixed_mem_region = true, .static_window_map = false, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = false, .supports_multi_bssid = false, .sram_dump = {}, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = false, .support_dual_stations = false, .pdev_suspend = false, }, { .name = "qca6390 hw2.0", .hw_rev = ATH11K_HW_QCA6390_HW20, .fw = { .dir = "QCA6390/hw2.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 3, .bdf_addr = 0x4B0C0000, .hw_ops = &qca6390_ops, .ring_mask = &ath11k_hw_ring_mask_qca6390, .internal_sleep_clock = true, .regs = &qca6390_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390, .host_ce_config = ath11k_host_ce_config_qca6390, .ce_count = 9, .target_ce_config = ath11k_target_ce_config_wlan_qca6390, .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = true, .rxdma1_enable = false, .num_rxdma_per_pdev = 2, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, .spectral = { .fft_sz = 0, .fft_pad_sz = 0, .summary_pad_sz = 0, .fft_hdr_len = 0, .max_fft_bins = 0, .fragment_160mhz = false, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), .supports_monitor = false, .full_monitor_mode = false, .supports_shadow_regs = true, .idle_ps = true, .supports_sta_ps = true, .coldboot_cal_mm = false, .coldboot_cal_ftm = false, .cbcal_restart_fw = false, .fw_mem_mode = 0, .num_vdevs = 2 + 1, .num_peers = 512, .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), .supports_regdb = false, .fix_l1ss = true, .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, .hal_params = &ath11k_hw_hal_params_qca6390, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = false, .supports_rssi_stats = true, .fw_wmi_diag_event = true, .current_cc_support = true, .dbr_debug_support = false, .global_reset = true, .bios_sar_capa = NULL, .m3_fw_support = true, .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = false, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = true, .supports_multi_bssid = true, .sram_dump = { .start = 0x01400000, .end = 0x0171ffff, }, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = true, .support_dual_stations = true, .pdev_suspend = false, }, { .name = "qcn9074 hw1.0", .hw_rev = ATH11K_HW_QCN9074_HW10, .fw = { .dir = "QCN9074/hw1.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 1, .single_pdev_only = false, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074, .hw_ops = &qcn9074_ops, .ring_mask = &ath11k_hw_ring_mask_qcn9074, .internal_sleep_clock = false, .regs = &qcn9074_regs, .host_ce_config = ath11k_host_ce_config_qcn9074, .ce_count = 6, .target_ce_config = ath11k_target_ce_config_wlan_qcn9074, .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074, .svc_to_ce_map_len = 18, .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .rxdma1_enable = true, .num_rxdma_per_pdev = 1, .rx_mac_buf_ring = false, .vdev_start_delay = false, .htt_peer_map_v2 = true, .spectral = { .fft_sz = 2, .fft_pad_sz = 0, .summary_pad_sz = 16, .fft_hdr_len = 24, .max_fft_bins = 1024, .fragment_160mhz = false, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT), .supports_monitor = true, .full_monitor_mode = true, .supports_shadow_regs = false, .idle_ps = false, .supports_sta_ps = false, .coldboot_cal_mm = false, .coldboot_cal_ftm = true, .cbcal_restart_fw = true, .fw_mem_mode = 2, .num_vdevs = 8, .num_peers = 128, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074), .supports_regdb = false, .fix_l1ss = true, .credit_flow = false, .max_tx_ring = DP_TCL_NUM_RING_MAX, .hal_params = &ath11k_hw_hal_params_ipq8074, .supports_dynamic_smps_6ghz = true, .alloc_cacheable_memory = true, .supports_rssi_stats = false, .fw_wmi_diag_event = false, .current_cc_support = false, .dbr_debug_support = true, .global_reset = false, .bios_sar_capa = NULL, .m3_fw_support = true, .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = true, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = false, .supports_multi_bssid = false, .sram_dump = {}, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = false, .support_dual_stations = false, .pdev_suspend = false, }, { .name = "wcn6855 hw2.0", .hw_rev = ATH11K_HW_WCN6855_HW20, .fw = { .dir = "WCN6855/hw2.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 3, .bdf_addr = 0x4B0C0000, .hw_ops = &wcn6855_ops, .ring_mask = &ath11k_hw_ring_mask_qca6390, .internal_sleep_clock = true, .regs = &wcn6855_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390, .host_ce_config = ath11k_host_ce_config_qca6390, .ce_count = 9, .target_ce_config = ath11k_target_ce_config_wlan_qca6390, .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = true, .rxdma1_enable = false, .num_rxdma_per_pdev = 2, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, .spectral = { .fft_sz = 0, .fft_pad_sz = 0, .summary_pad_sz = 0, .fft_hdr_len = 0, .max_fft_bins = 0, .fragment_160mhz = false, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), .supports_monitor = false, .full_monitor_mode = false, .supports_shadow_regs = true, .idle_ps = true, .supports_sta_ps = true, .coldboot_cal_mm = false, .coldboot_cal_ftm = false, .cbcal_restart_fw = false, .fw_mem_mode = 0, .num_vdevs = 2 + 1, .num_peers = 512, .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855), .supports_regdb = true, .fix_l1ss = false, .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, .hal_params = &ath11k_hw_hal_params_qca6390, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = false, .supports_rssi_stats = true, .fw_wmi_diag_event = true, .current_cc_support = true, .dbr_debug_support = false, .global_reset = true, .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855, .m3_fw_support = true, .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = false, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = true, .supports_multi_bssid = true, .sram_dump = { .start = 0x01400000, .end = 0x0177ffff, }, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = true, .support_dual_stations = true, .pdev_suspend = false, }, { .name = "wcn6855 hw2.1", .hw_rev = ATH11K_HW_WCN6855_HW21, .fw = { .dir = "WCN6855/hw2.1", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 3, .bdf_addr = 0x4B0C0000, .hw_ops = &wcn6855_ops, .ring_mask = &ath11k_hw_ring_mask_qca6390, .internal_sleep_clock = true, .regs = &wcn6855_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390, .host_ce_config = ath11k_host_ce_config_qca6390, .ce_count = 9, .target_ce_config = ath11k_target_ce_config_wlan_qca6390, .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, .single_pdev_only = true, .rxdma1_enable = false, .num_rxdma_per_pdev = 2, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, .spectral = { .fft_sz = 0, .fft_pad_sz = 0, .summary_pad_sz = 0, .fft_hdr_len = 0, .max_fft_bins = 0, .fragment_160mhz = false, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), .supports_monitor = false, .supports_shadow_regs = true, .idle_ps = true, .supports_sta_ps = true, .coldboot_cal_mm = false, .coldboot_cal_ftm = false, .cbcal_restart_fw = false, .fw_mem_mode = 0, .num_vdevs = 2 + 1, .num_peers = 512, .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855), .supports_regdb = true, .fix_l1ss = false, .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, .hal_params = &ath11k_hw_hal_params_qca6390, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = false, .supports_rssi_stats = true, .fw_wmi_diag_event = true, .current_cc_support = true, .dbr_debug_support = false, .global_reset = true, .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855, .m3_fw_support = true, .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = false, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = true, .supports_multi_bssid = true, .sram_dump = { .start = 0x01400000, .end = 0x0177ffff, }, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = true, .support_dual_stations = true, .pdev_suspend = false, }, { .name = "wcn6750 hw1.0", .hw_rev = ATH11K_HW_WCN6750_HW10, .fw = { .dir = "WCN6750/hw1.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 1, .bdf_addr = 0x4B0C0000, .hw_ops = &wcn6750_ops, .ring_mask = &ath11k_hw_ring_mask_wcn6750, .internal_sleep_clock = false, .regs = &wcn6750_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750, .host_ce_config = ath11k_host_ce_config_qca6390, .ce_count = 9, .target_ce_config = ath11k_target_ce_config_wlan_qca6390, .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = true, .rxdma1_enable = false, .num_rxdma_per_pdev = 1, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, .spectral = { .fft_sz = 0, .fft_pad_sz = 0, .summary_pad_sz = 0, .fft_hdr_len = 0, .max_fft_bins = 0, .fragment_160mhz = false, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), .supports_monitor = false, .supports_shadow_regs = true, .idle_ps = true, .supports_sta_ps = true, .coldboot_cal_mm = true, .coldboot_cal_ftm = true, .cbcal_restart_fw = false, .fw_mem_mode = 0, .num_vdevs = 3, .num_peers = 512, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074), .supports_regdb = true, .fix_l1ss = false, .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX, .hal_params = &ath11k_hw_hal_params_wcn6750, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = false, .supports_rssi_stats = true, .fw_wmi_diag_event = true, .current_cc_support = true, .dbr_debug_support = false, .global_reset = false, .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855, .m3_fw_support = false, .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = true, .hybrid_bus_type = true, .fixed_fw_mem = true, .support_off_channel_tx = true, .supports_multi_bssid = true, .sram_dump = {}, .tcl_ring_retry = false, .tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750, .smp2p_wow_exit = true, .support_fw_mac_sequence = true, .support_dual_stations = false, .pdev_suspend = true, }, { .hw_rev = ATH11K_HW_IPQ5018_HW10, .name = "ipq5018 hw1.0", .fw = { .dir = "IPQ5018/hw1.0", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = MAX_RADIOS_5018, .bdf_addr = 0x4BA00000, /* hal_desc_sz and hw ops are similar to qcn9074 */ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074), .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074, .ring_mask = &ath11k_hw_ring_mask_ipq8074, .credit_flow = false, .max_tx_ring = 1, .spectral = { .fft_sz = 2, .fft_pad_sz = 0, .summary_pad_sz = 16, .fft_hdr_len = 24, .max_fft_bins = 1024, }, .internal_sleep_clock = false, .regs = &ipq5018_regs, .hw_ops = &ipq5018_ops, .host_ce_config = ath11k_host_ce_config_qcn9074, .ce_count = CE_CNT_5018, .target_ce_config = ath11k_target_ce_config_wlan_ipq5018, .target_ce_count = TARGET_CE_CNT_5018, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq5018, .svc_to_ce_map_len = SVC_CE_MAP_LEN_5018, .ce_ie_addr = &ath11k_ce_ie_addr_ipq5018, .ce_remap = &ath11k_ce_remap_ipq5018, .rxdma1_enable = true, .num_rxdma_per_pdev = RXDMA_PER_PDEV_5018, .rx_mac_buf_ring = false, .vdev_start_delay = false, .htt_peer_map_v2 = true, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT), .supports_monitor = false, .supports_sta_ps = false, .supports_shadow_regs = false, .fw_mem_mode = 0, .num_vdevs = 16 + 1, .num_peers = 512, .supports_regdb = false, .idle_ps = false, .supports_suspend = false, .hal_params = &ath11k_hw_hal_params_ipq8074, .single_pdev_only = false, .coldboot_cal_mm = true, .coldboot_cal_ftm = true, .cbcal_restart_fw = true, .fix_l1ss = true, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = true, .supports_rssi_stats = false, .fw_wmi_diag_event = false, .current_cc_support = false, .dbr_debug_support = true, .global_reset = false, .bios_sar_capa = NULL, .m3_fw_support = false, .fixed_bdf_addr = true, .fixed_mem_region = true, .static_window_map = false, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = false, .supports_multi_bssid = false, .sram_dump = {}, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = false, .support_dual_stations = false, .pdev_suspend = false, }, { .name = "qca2066 hw2.1", .hw_rev = ATH11K_HW_QCA2066_HW21, .fw = { .dir = "QCA2066/hw2.1", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 3, .bdf_addr = 0x4B0C0000, .hw_ops = &wcn6855_ops, .ring_mask = &ath11k_hw_ring_mask_qca6390, .internal_sleep_clock = true, .regs = &wcn6855_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390, .host_ce_config = ath11k_host_ce_config_qca6390, .ce_count = 9, .target_ce_config = ath11k_target_ce_config_wlan_qca6390, .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = true, .rxdma1_enable = false, .num_rxdma_per_pdev = 2, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, .spectral = { .fft_sz = 0, .fft_pad_sz = 0, .summary_pad_sz = 0, .fft_hdr_len = 0, .max_fft_bins = 0, .fragment_160mhz = false, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), .supports_monitor = false, .full_monitor_mode = false, .supports_shadow_regs = true, .idle_ps = true, .supports_sta_ps = true, .coldboot_cal_mm = false, .coldboot_cal_ftm = false, .cbcal_restart_fw = false, .fw_mem_mode = 0, .num_vdevs = 2 + 1, .num_peers = 512, .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855), .supports_regdb = true, .fix_l1ss = false, .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, .hal_params = &ath11k_hw_hal_params_qca6390, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = false, .supports_rssi_stats = true, .fw_wmi_diag_event = true, .current_cc_support = true, .dbr_debug_support = false, .global_reset = true, .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855, .m3_fw_support = true, .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = false, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = true, .supports_multi_bssid = true, .sram_dump = { .start = 0x01400000, .end = 0x0177ffff, }, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = true, .support_dual_stations = true, }, { .name = "qca6698aq hw2.1", .hw_rev = ATH11K_HW_QCA6698AQ_HW21, .fw = { .dir = "QCA6698AQ/hw2.1", .board_size = 256 * 1024, .cal_offset = 128 * 1024, }, .max_radios = 3, .bdf_addr = 0x4B0C0000, .hw_ops = &wcn6855_ops, .ring_mask = &ath11k_hw_ring_mask_qca6390, .internal_sleep_clock = true, .regs = &wcn6855_regs, .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390, .host_ce_config = ath11k_host_ce_config_qca6390, .ce_count = 9, .target_ce_config = ath11k_target_ce_config_wlan_qca6390, .target_ce_count = 9, .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, .single_pdev_only = true, .rxdma1_enable = false, .num_rxdma_per_pdev = 2, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, .spectral = { .fft_sz = 0, .fft_pad_sz = 0, .summary_pad_sz = 0, .fft_hdr_len = 0, .max_fft_bins = 0, .fragment_160mhz = false, }, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO), .supports_monitor = false, .supports_shadow_regs = true, .idle_ps = true, .supports_sta_ps = true, .coldboot_cal_mm = false, .coldboot_cal_ftm = false, .cbcal_restart_fw = false, .fw_mem_mode = 0, .num_vdevs = 2 + 1, .num_peers = 512, .supports_suspend = true, .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855), .supports_regdb = true, .fix_l1ss = false, .credit_flow = true, .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, .hal_params = &ath11k_hw_hal_params_qca6390, .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = false, .supports_rssi_stats = true, .fw_wmi_diag_event = true, .current_cc_support = true, .dbr_debug_support = false, .global_reset = true, .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855, .m3_fw_support = true, .fixed_bdf_addr = false, .fixed_mem_region = false, .static_window_map = false, .hybrid_bus_type = false, .fixed_fw_mem = false, .support_off_channel_tx = true, .supports_multi_bssid = true, .sram_dump = { .start = 0x01400000, .end = 0x0177ffff, }, .tcl_ring_retry = true, .tx_ring_size = DP_TCL_DATA_RING_SIZE, .smp2p_wow_exit = false, .support_fw_mac_sequence = true, .support_dual_stations = true, .pdev_suspend = false, }, }; static const struct dmi_system_id ath11k_pm_quirk_table[] = { { .driver_data = (void *)ATH11K_PM_WOW, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21J4"), }, }, { .driver_data = (void *)ATH11K_PM_WOW, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21K4"), }, }, { .driver_data = (void *)ATH11K_PM_WOW, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21K6"), }, }, { .driver_data = (void *)ATH11K_PM_WOW, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21K8"), }, }, { .driver_data = (void *)ATH11K_PM_WOW, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21KA"), }, }, { .driver_data = (void *)ATH11K_PM_WOW, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21F9"), }, }, {} }; void ath11k_fw_stats_pdevs_free(struct list_head *head) { struct ath11k_fw_stats_pdev *i, *tmp; list_for_each_entry_safe(i, tmp, head, list) { list_del(&i->list); kfree(i); } } void ath11k_fw_stats_vdevs_free(struct list_head *head) { struct ath11k_fw_stats_vdev *i, *tmp; list_for_each_entry_safe(i, tmp, head, list) { list_del(&i->list); kfree(i); } } void ath11k_fw_stats_bcn_free(struct list_head *head) { struct ath11k_fw_stats_bcn *i, *tmp; list_for_each_entry_safe(i, tmp, head, list) { list_del(&i->list); kfree(i); } } void ath11k_fw_stats_init(struct ath11k *ar) { INIT_LIST_HEAD(&ar->fw_stats.pdevs); INIT_LIST_HEAD(&ar->fw_stats.vdevs); INIT_LIST_HEAD(&ar->fw_stats.bcn); init_completion(&ar->fw_stats_complete); init_completion(&ar->fw_stats_done); } void ath11k_fw_stats_free(struct ath11k_fw_stats *stats) { ath11k_fw_stats_pdevs_free(&stats->pdevs); ath11k_fw_stats_vdevs_free(&stats->vdevs); ath11k_fw_stats_bcn_free(&stats->bcn); } bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab) { if (!ath11k_cold_boot_cal) return false; if (ath11k_ftm_mode) return ab->hw_params.coldboot_cal_ftm; else return ab->hw_params.coldboot_cal_mm; } /* Check if we need to continue with suspend/resume operation. * Return: * a negative value: error happens and don't continue. * 0: no error but don't continue. * positive value: no error and do continue. */ static int ath11k_core_continue_suspend_resume(struct ath11k_base *ab) { struct ath11k *ar; if (!ab->hw_params.supports_suspend) return -EOPNOTSUPP; /* so far single_pdev_only chips have supports_suspend as true * so pass 0 as a dummy pdev_id here. */ ar = ab->pdevs[0].ar; if (!ar || ar->state != ATH11K_STATE_OFF) return 0; return 1; } static int ath11k_core_suspend_wow(struct ath11k_base *ab) { int ret; ret = ath11k_dp_rx_pktlog_stop(ab, true); if (ret) { ath11k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n", ret); return ret; } /* So far only single_pdev_only devices can reach here, * so it is valid to handle the first, and the only, pdev. */ ret = ath11k_mac_wait_tx_complete(ab->pdevs[0].ar); if (ret) { ath11k_warn(ab, "failed to wait tx complete: %d\n", ret); return ret; } ret = ath11k_wow_enable(ab); if (ret) { ath11k_warn(ab, "failed to enable wow during suspend: %d\n", ret); return ret; } ret = ath11k_dp_rx_pktlog_stop(ab, false); if (ret) { ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n", ret); return ret; } ath11k_ce_stop_shadow_timers(ab); ath11k_dp_stop_shadow_timers(ab); ath11k_hif_irq_disable(ab); ath11k_hif_ce_irq_disable(ab); ret = ath11k_hif_suspend(ab); if (ret) { ath11k_warn(ab, "failed to suspend hif: %d\n", ret); return ret; } return 0; } static int ath11k_core_suspend_default(struct ath11k_base *ab) { int ret; ret = ath11k_dp_rx_pktlog_stop(ab, true); if (ret) { ath11k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n", ret); return ret; } /* So far only single_pdev_only devices can reach here, * so it is valid to handle the first, and the only, pdev. */ ret = ath11k_mac_wait_tx_complete(ab->pdevs[0].ar); if (ret) { ath11k_warn(ab, "failed to wait tx complete: %d\n", ret); return ret; } ret = ath11k_dp_rx_pktlog_stop(ab, false); if (ret) { ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n", ret); return ret; } ath11k_ce_stop_shadow_timers(ab); ath11k_dp_stop_shadow_timers(ab); /* PM framework skips suspend_late/resume_early callbacks * if other devices report errors in their suspend callbacks. * However ath11k_core_resume() would still be called because * here we return success thus kernel put us on dpm_suspended_list. * Since we won't go through a power down/up cycle, there is * no chance to call complete(&ab->restart_completed) in * ath11k_core_restart(), making ath11k_core_resume() timeout. * So call it here to avoid this issue. This also works in case * no error happens thus suspend_late/resume_early get called, * because it will be reinitialized in ath11k_core_resume_early(). */ complete(&ab->restart_completed); return 0; } int ath11k_core_suspend(struct ath11k_base *ab) { int ret; ret = ath11k_core_continue_suspend_resume(ab); if (ret <= 0) return ret; if (ab->actual_pm_policy == ATH11K_PM_WOW) return ath11k_core_suspend_wow(ab); return ath11k_core_suspend_default(ab); } EXPORT_SYMBOL(ath11k_core_suspend); int ath11k_core_suspend_late(struct ath11k_base *ab) { int ret; ret = ath11k_core_continue_suspend_resume(ab); if (ret <= 0) return ret; if (ab->actual_pm_policy == ATH11K_PM_WOW) return 0; ath11k_hif_irq_disable(ab); ath11k_hif_ce_irq_disable(ab); ath11k_hif_power_down(ab, true); return 0; } EXPORT_SYMBOL(ath11k_core_suspend_late); int ath11k_core_resume_early(struct ath11k_base *ab) { int ret; ret = ath11k_core_continue_suspend_resume(ab); if (ret <= 0) return ret; if (ab->actual_pm_policy == ATH11K_PM_WOW) return 0; reinit_completion(&ab->restart_completed); ret = ath11k_hif_power_up(ab); if (ret) ath11k_warn(ab, "failed to power up hif during resume: %d\n", ret); return ret; } EXPORT_SYMBOL(ath11k_core_resume_early); static int ath11k_core_resume_default(struct ath11k_base *ab) { struct ath11k *ar; long time_left; int ret; time_left = wait_for_completion_timeout(&ab->restart_completed, ATH11K_RESET_TIMEOUT_HZ); if (time_left == 0) { ath11k_warn(ab, "timeout while waiting for restart complete"); return -ETIMEDOUT; } /* So far only single_pdev_only devices can reach here, * so it is valid to handle the first, and the only, pdev. */ ar = ab->pdevs[0].ar; if (ab->hw_params.current_cc_support && ar->alpha2[0] != 0 && ar->alpha2[1] != 0) { ret = ath11k_reg_set_cc(ar); if (ret) { ath11k_warn(ab, "failed to set country code during resume: %d\n", ret); return ret; } } ret = ath11k_dp_rx_pktlog_start(ab); if (ret) ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n", ret); return ret; } static int ath11k_core_resume_wow(struct ath11k_base *ab) { int ret; ret = ath11k_hif_resume(ab); if (ret) { ath11k_warn(ab, "failed to resume hif during resume: %d\n", ret); return ret; } ath11k_hif_ce_irq_enable(ab); ath11k_hif_irq_enable(ab); ret = ath11k_dp_rx_pktlog_start(ab); if (ret) { ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n", ret); return ret; } ret = ath11k_wow_wakeup(ab); if (ret) { ath11k_warn(ab, "failed to wakeup wow during resume: %d\n", ret); return ret; } return 0; } int ath11k_core_resume(struct ath11k_base *ab) { int ret; ret = ath11k_core_continue_suspend_resume(ab); if (ret <= 0) return ret; if (ab->actual_pm_policy == ATH11K_PM_WOW) return ath11k_core_resume_wow(ab); return ath11k_core_resume_default(ab); } EXPORT_SYMBOL(ath11k_core_resume); static void ath11k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data) { struct ath11k_base *ab = data; const char *magic = ATH11K_SMBIOS_BDF_EXT_MAGIC; struct ath11k_smbios_bdf *smbios = (struct ath11k_smbios_bdf *)hdr; ssize_t copied; size_t len; int i; if (ab->qmi.target.bdf_ext[0] != '\0') return; if (hdr->type != ATH11K_SMBIOS_BDF_EXT_TYPE) return; if (hdr->length != ATH11K_SMBIOS_BDF_EXT_LENGTH) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "wrong smbios bdf ext type length (%d).\n", hdr->length); return; } spin_lock_bh(&ab->base_lock); switch (smbios->country_code_flag) { case ATH11K_SMBIOS_CC_ISO: ab->new_alpha2[0] = (smbios->cc_code >> 8) & 0xff; ab->new_alpha2[1] = smbios->cc_code & 0xff; ath11k_dbg(ab, ATH11K_DBG_BOOT, "smbios cc_code %c%c\n", ab->new_alpha2[0], ab->new_alpha2[1]); break; case ATH11K_SMBIOS_CC_WW: ab->new_alpha2[0] = '0'; ab->new_alpha2[1] = '0'; ath11k_dbg(ab, ATH11K_DBG_BOOT, "smbios worldwide regdomain\n"); break; default: ath11k_dbg(ab, ATH11K_DBG_BOOT, "ignore smbios country code setting %d\n", smbios->country_code_flag); break; } spin_unlock_bh(&ab->base_lock); if (!smbios->bdf_enabled) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant name not found.\n"); return; } /* Only one string exists (per spec) */ if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant magic does not match.\n"); return; } len = min_t(size_t, strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext)); for (i = 0; i < len; i++) { if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant name contains non ascii chars.\n"); return; } } /* Copy extension name without magic prefix */ copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic), sizeof(ab->qmi.target.bdf_ext)); if (copied < 0) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant string is longer than the buffer can accommodate\n"); return; } ath11k_dbg(ab, ATH11K_DBG_BOOT, "found and validated bdf variant smbios_type 0x%x bdf %s\n", ATH11K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext); } int ath11k_core_check_smbios(struct ath11k_base *ab) { ab->qmi.target.bdf_ext[0] = '\0'; dmi_walk(ath11k_core_check_cc_code_bdfext, ab); if (ab->qmi.target.bdf_ext[0] == '\0') return -ENODATA; return 0; } int ath11k_core_check_dt(struct ath11k_base *ab) { size_t max_len = sizeof(ab->qmi.target.bdf_ext); const char *variant = NULL; struct device_node *node; node = ab->dev->of_node; if (!node) return -ENOENT; of_property_read_string(node, "qcom,calibration-variant", &variant); if (!variant) of_property_read_string(node, "qcom,ath11k-calibration-variant", &variant); if (!variant) return -ENODATA; if (strscpy(ab->qmi.target.bdf_ext, variant, max_len) < 0) ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant string is longer than the buffer can accommodate (variant: %s)\n", variant); return 0; } enum ath11k_bdf_name_type { ATH11K_BDF_NAME_FULL, ATH11K_BDF_NAME_BUS_NAME, ATH11K_BDF_NAME_CHIP_ID, }; static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name, size_t name_len, bool with_variant, enum ath11k_bdf_name_type name_type) { /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */ - char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 }; + char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = {}; if (with_variant && ab->qmi.target.bdf_ext[0] != '\0') scnprintf(variant, sizeof(variant), ",variant=%s", ab->qmi.target.bdf_ext); switch (ab->id.bdf_search) { case ATH11K_BDF_SEARCH_BUS_AND_BOARD: switch (name_type) { case ATH11K_BDF_NAME_FULL: scnprintf(name, name_len, "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s", ath11k_bus_str(ab->hif.bus), ab->id.vendor, ab->id.device, ab->id.subsystem_vendor, ab->id.subsystem_device, ab->qmi.target.chip_id, ab->qmi.target.board_id, variant); break; case ATH11K_BDF_NAME_BUS_NAME: scnprintf(name, name_len, "bus=%s", ath11k_bus_str(ab->hif.bus)); break; case ATH11K_BDF_NAME_CHIP_ID: scnprintf(name, name_len, "bus=%s,qmi-chip-id=%d", ath11k_bus_str(ab->hif.bus), ab->qmi.target.chip_id); break; } break; default: scnprintf(name, name_len, "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s", ath11k_bus_str(ab->hif.bus), ab->qmi.target.chip_id, ab->qmi.target.board_id, variant); break; } ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board name '%s'\n", name); return 0; } static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name, size_t name_len) { return __ath11k_core_create_board_name(ab, name, name_len, true, ATH11K_BDF_NAME_FULL); } static int ath11k_core_create_fallback_board_name(struct ath11k_base *ab, char *name, size_t name_len) { return __ath11k_core_create_board_name(ab, name, name_len, false, ATH11K_BDF_NAME_FULL); } static int ath11k_core_create_bus_type_board_name(struct ath11k_base *ab, char *name, size_t name_len) { return __ath11k_core_create_board_name(ab, name, name_len, false, ATH11K_BDF_NAME_BUS_NAME); } static int ath11k_core_create_chip_id_board_name(struct ath11k_base *ab, char *name, size_t name_len) { return __ath11k_core_create_board_name(ab, name, name_len, false, ATH11K_BDF_NAME_CHIP_ID); } const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab, const char *file) { const struct firmware *fw; char path[100]; int ret; if (file == NULL) return ERR_PTR(-ENOENT); ath11k_core_create_firmware_path(ab, file, path, sizeof(path)); ret = firmware_request_nowarn(&fw, path, ab->dev); if (ret) return ERR_PTR(ret); ath11k_dbg(ab, ATH11K_DBG_BOOT, "firmware request %s size %zu\n", path, fw->size); return fw; } void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) { if (!IS_ERR(bd->fw)) release_firmware(bd->fw); memset(bd, 0, sizeof(*bd)); } static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab, struct ath11k_board_data *bd, const void *buf, size_t buf_len, const char *boardname, int ie_id, int name_id, int data_id) { const struct ath11k_fw_ie *hdr; bool name_match_found; int ret, board_ie_id; size_t board_ie_len; const void *board_ie_data; name_match_found = false; /* go through ATH11K_BD_IE_BOARD_/ATH11K_BD_IE_REGDB_ elements */ while (buf_len > sizeof(struct ath11k_fw_ie)) { hdr = buf; board_ie_id = le32_to_cpu(hdr->id); board_ie_len = le32_to_cpu(hdr->len); board_ie_data = hdr->data; buf_len -= sizeof(*hdr); buf += sizeof(*hdr); if (buf_len < ALIGN(board_ie_len, 4)) { ath11k_err(ab, "invalid %s length: %zu < %zu\n", ath11k_bd_ie_type_str(ie_id), buf_len, ALIGN(board_ie_len, 4)); ret = -EINVAL; goto out; } if (board_ie_id == name_id) { ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "board name", "", board_ie_data, board_ie_len); if (board_ie_len != strlen(boardname)) goto next; ret = memcmp(board_ie_data, boardname, strlen(boardname)); if (ret) goto next; name_match_found = true; ath11k_dbg(ab, ATH11K_DBG_BOOT, "found match %s for name '%s'", ath11k_bd_ie_type_str(ie_id), boardname); } else if (board_ie_id == data_id) { if (!name_match_found) /* no match found */ goto next; ath11k_dbg(ab, ATH11K_DBG_BOOT, "found %s for '%s'", ath11k_bd_ie_type_str(ie_id), boardname); bd->data = board_ie_data; bd->len = board_ie_len; ret = 0; goto out; } else { ath11k_warn(ab, "unknown %s id found: %d\n", ath11k_bd_ie_type_str(ie_id), board_ie_id); } next: /* jump over the padding */ board_ie_len = ALIGN(board_ie_len, 4); buf_len -= board_ie_len; buf += board_ie_len; } /* no match found */ ret = -ENOENT; out: return ret; } static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab, struct ath11k_board_data *bd, const char *boardname, int ie_id_match, int name_id, int data_id) { size_t len, magic_len; const u8 *data; char *filename, filepath[100]; size_t ie_len; struct ath11k_fw_ie *hdr; int ret, ie_id; filename = ATH11K_BOARD_API2_FILE; if (!bd->fw) bd->fw = ath11k_core_firmware_request(ab, filename); if (IS_ERR(bd->fw)) return PTR_ERR(bd->fw); data = bd->fw->data; len = bd->fw->size; ath11k_core_create_firmware_path(ab, filename, filepath, sizeof(filepath)); /* magic has extra null byte padded */ magic_len = strlen(ATH11K_BOARD_MAGIC) + 1; if (len < magic_len) { ath11k_err(ab, "failed to find magic value in %s, file too short: %zu\n", filepath, len); ret = -EINVAL; goto err; } if (memcmp(data, ATH11K_BOARD_MAGIC, magic_len)) { ath11k_err(ab, "found invalid board magic\n"); ret = -EINVAL; goto err; } /* magic is padded to 4 bytes */ magic_len = ALIGN(magic_len, 4); if (len < magic_len) { ath11k_err(ab, "failed: %s too small to contain board data, len: %zu\n", filepath, len); ret = -EINVAL; goto err; } data += magic_len; len -= magic_len; while (len > sizeof(struct ath11k_fw_ie)) { hdr = (struct ath11k_fw_ie *)data; ie_id = le32_to_cpu(hdr->id); ie_len = le32_to_cpu(hdr->len); len -= sizeof(*hdr); data = hdr->data; if (len < ALIGN(ie_len, 4)) { ath11k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n", ie_id, ie_len, len); ret = -EINVAL; goto err; } if (ie_id == ie_id_match) { ret = ath11k_core_parse_bd_ie_board(ab, bd, data, ie_len, boardname, ie_id_match, name_id, data_id); if (ret == -ENOENT) /* no match found, continue */ goto next; else if (ret) /* there was an error, bail out */ goto err; /* either found or error, so stop searching */ goto out; } next: /* jump over the padding */ ie_len = ALIGN(ie_len, 4); len -= ie_len; data += ie_len; } out: if (!bd->data || !bd->len) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch %s for %s from %s\n", ath11k_bd_ie_type_str(ie_id_match), boardname, filepath); ret = -ENODATA; goto err; } return 0; err: ath11k_core_free_bdf(ab, bd); return ret; } int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab, struct ath11k_board_data *bd, const char *name) { bd->fw = ath11k_core_firmware_request(ab, name); if (IS_ERR(bd->fw)) return PTR_ERR(bd->fw); bd->data = bd->fw->data; bd->len = bd->fw->size; return 0; } #define BOARD_NAME_SIZE 200 int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) { char *boardname = NULL, *fallback_boardname = NULL, *chip_id_boardname = NULL; char *filename, filepath[100]; int bd_api; int ret = 0; filename = ATH11K_BOARD_API2_FILE; boardname = kzalloc(BOARD_NAME_SIZE, GFP_KERNEL); if (!boardname) { ret = -ENOMEM; goto exit; } ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); if (ret) { ath11k_err(ab, "failed to create board name: %d", ret); goto exit; } bd_api = 2; ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname, ATH11K_BD_IE_BOARD, ATH11K_BD_IE_BOARD_NAME, ATH11K_BD_IE_BOARD_DATA); if (!ret) goto exit; fallback_boardname = kzalloc(BOARD_NAME_SIZE, GFP_KERNEL); if (!fallback_boardname) { ret = -ENOMEM; goto exit; } ret = ath11k_core_create_fallback_board_name(ab, fallback_boardname, BOARD_NAME_SIZE); if (ret) { ath11k_err(ab, "failed to create fallback board name: %d", ret); goto exit; } ret = ath11k_core_fetch_board_data_api_n(ab, bd, fallback_boardname, ATH11K_BD_IE_BOARD, ATH11K_BD_IE_BOARD_NAME, ATH11K_BD_IE_BOARD_DATA); if (!ret) goto exit; chip_id_boardname = kzalloc(BOARD_NAME_SIZE, GFP_KERNEL); if (!chip_id_boardname) { ret = -ENOMEM; goto exit; } ret = ath11k_core_create_chip_id_board_name(ab, chip_id_boardname, BOARD_NAME_SIZE); if (ret) { ath11k_err(ab, "failed to create chip id board name: %d", ret); goto exit; } ret = ath11k_core_fetch_board_data_api_n(ab, bd, chip_id_boardname, ATH11K_BD_IE_BOARD, ATH11K_BD_IE_BOARD_NAME, ATH11K_BD_IE_BOARD_DATA); if (!ret) goto exit; bd_api = 1; ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_DEFAULT_BOARD_FILE); if (ret) { ath11k_core_create_firmware_path(ab, filename, filepath, sizeof(filepath)); ath11k_err(ab, "failed to fetch board data for %s from %s\n", boardname, filepath); if (memcmp(boardname, fallback_boardname, strlen(boardname))) ath11k_err(ab, "failed to fetch board data for %s from %s\n", fallback_boardname, filepath); ath11k_err(ab, "failed to fetch board data for %s from %s\n", chip_id_boardname, filepath); ath11k_err(ab, "failed to fetch board.bin from %s\n", ab->hw_params.fw.dir); } exit: kfree(boardname); kfree(fallback_boardname); kfree(chip_id_boardname); if (!ret) ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board api %d\n", bd_api); return ret; } int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd) { char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE]; int ret; ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); if (ret) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to create board name for regdb: %d", ret); goto exit; } ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname, ATH11K_BD_IE_REGDB, ATH11K_BD_IE_REGDB_NAME, ATH11K_BD_IE_REGDB_DATA); if (!ret) goto exit; ret = ath11k_core_create_bus_type_board_name(ab, default_boardname, BOARD_NAME_SIZE); if (ret) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to create default board name for regdb: %d", ret); goto exit; } ret = ath11k_core_fetch_board_data_api_n(ab, bd, default_boardname, ATH11K_BD_IE_REGDB, ATH11K_BD_IE_REGDB_NAME, ATH11K_BD_IE_REGDB_DATA); if (!ret) goto exit; ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_REGDB_FILE_NAME); if (ret) ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch %s from %s\n", ATH11K_REGDB_FILE_NAME, ab->hw_params.fw.dir); exit: if (!ret) ath11k_dbg(ab, ATH11K_DBG_BOOT, "fetched regdb\n"); return ret; } static void ath11k_core_stop(struct ath11k_base *ab) { if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) ath11k_qmi_firmware_stop(ab); ath11k_hif_stop(ab); ath11k_wmi_detach(ab); ath11k_dp_pdev_reo_cleanup(ab); /* De-Init of components as needed */ } static int ath11k_core_soc_create(struct ath11k_base *ab) { int ret; if (ath11k_ftm_mode) { ab->fw_mode = ATH11K_FIRMWARE_MODE_FTM; ath11k_info(ab, "Booting in factory test mode\n"); } ret = ath11k_qmi_init_service(ab); if (ret) { ath11k_err(ab, "failed to initialize qmi :%d\n", ret); return ret; } ret = ath11k_debugfs_soc_create(ab); if (ret) { ath11k_err(ab, "failed to create ath11k debugfs\n"); goto err_qmi_deinit; } ret = ath11k_hif_power_up(ab); if (ret) { ath11k_err(ab, "failed to power up :%d\n", ret); goto err_debugfs_reg; } return 0; err_debugfs_reg: ath11k_debugfs_soc_destroy(ab); err_qmi_deinit: ath11k_qmi_deinit_service(ab); return ret; } static void ath11k_core_soc_destroy(struct ath11k_base *ab) { ath11k_debugfs_soc_destroy(ab); ath11k_dp_free(ab); ath11k_reg_free(ab); ath11k_qmi_deinit_service(ab); } static int ath11k_core_pdev_create(struct ath11k_base *ab) { int ret; ret = ath11k_debugfs_pdev_create(ab); if (ret) { ath11k_err(ab, "failed to create core pdev debugfs: %d\n", ret); return ret; } ret = ath11k_dp_pdev_alloc(ab); if (ret) { ath11k_err(ab, "failed to attach DP pdev: %d\n", ret); goto err_pdev_debug; } ret = ath11k_mac_register(ab); if (ret) { ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret); goto err_dp_pdev_free; } ret = ath11k_thermal_register(ab); if (ret) { ath11k_err(ab, "could not register thermal device: %d\n", ret); goto err_mac_unregister; } ret = ath11k_spectral_init(ab); if (ret) { ath11k_err(ab, "failed to init spectral %d\n", ret); goto err_thermal_unregister; } return 0; err_thermal_unregister: ath11k_thermal_unregister(ab); err_mac_unregister: ath11k_mac_unregister(ab); err_dp_pdev_free: ath11k_dp_pdev_free(ab); err_pdev_debug: ath11k_debugfs_pdev_destroy(ab); return ret; } static void ath11k_core_pdev_suspend_target(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; unsigned long time_left; int ret; int i; if (!ab->hw_params.pdev_suspend) return; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; reinit_completion(&ab->htc_suspend); ret = ath11k_wmi_pdev_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR, pdev->pdev_id); if (ret) { ath11k_warn(ab, "could not suspend target :%d\n", ret); /* pointless to try other pdevs */ return; } time_left = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ); if (!time_left) { ath11k_warn(ab, "suspend timed out - target pause event never came\n"); /* pointless to try other pdevs */ return; } } } static void ath11k_core_pdev_destroy(struct ath11k_base *ab) { ath11k_spectral_deinit(ab); ath11k_thermal_unregister(ab); ath11k_mac_unregister(ab); ath11k_core_pdev_suspend_target(ab); ath11k_hif_irq_disable(ab); ath11k_dp_pdev_free(ab); ath11k_debugfs_pdev_destroy(ab); } static int ath11k_core_start(struct ath11k_base *ab) { int ret; ret = ath11k_wmi_attach(ab); if (ret) { ath11k_err(ab, "failed to attach wmi: %d\n", ret); return ret; } ret = ath11k_htc_init(ab); if (ret) { ath11k_err(ab, "failed to init htc: %d\n", ret); goto err_wmi_detach; } ret = ath11k_hif_start(ab); if (ret) { ath11k_err(ab, "failed to start HIF: %d\n", ret); goto err_wmi_detach; } ret = ath11k_htc_wait_target(&ab->htc); if (ret) { ath11k_err(ab, "failed to connect to HTC: %d\n", ret); goto err_hif_stop; } ret = ath11k_dp_htt_connect(&ab->dp); if (ret) { ath11k_err(ab, "failed to connect to HTT: %d\n", ret); goto err_hif_stop; } ret = ath11k_wmi_connect(ab); if (ret) { ath11k_err(ab, "failed to connect wmi: %d\n", ret); goto err_hif_stop; } ret = ath11k_htc_start(&ab->htc); if (ret) { ath11k_err(ab, "failed to start HTC: %d\n", ret); goto err_hif_stop; } ret = ath11k_wmi_wait_for_service_ready(ab); if (ret) { ath11k_err(ab, "failed to receive wmi service ready event: %d\n", ret); goto err_hif_stop; } ret = ath11k_mac_allocate(ab); if (ret) { ath11k_err(ab, "failed to create new hw device with mac80211 :%d\n", ret); goto err_hif_stop; } ath11k_dp_pdev_pre_alloc(ab); ret = ath11k_dp_pdev_reo_setup(ab); if (ret) { ath11k_err(ab, "failed to initialize reo destination rings: %d\n", ret); goto err_mac_destroy; } ret = ath11k_wmi_cmd_init(ab); if (ret) { ath11k_err(ab, "failed to send wmi init cmd: %d\n", ret); goto err_reo_cleanup; } ret = ath11k_wmi_wait_for_unified_ready(ab); if (ret) { ath11k_err(ab, "failed to receive wmi unified ready event: %d\n", ret); goto err_reo_cleanup; } /* put hardware to DBS mode */ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxdma_per_pdev > 1) { ret = ath11k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS); if (ret) { ath11k_err(ab, "failed to send dbs mode: %d\n", ret); goto err_hif_stop; } } ret = ath11k_dp_tx_htt_h2t_ver_req_msg(ab); if (ret) { ath11k_err(ab, "failed to send htt version request message: %d\n", ret); goto err_reo_cleanup; } return 0; err_reo_cleanup: ath11k_dp_pdev_reo_cleanup(ab); err_mac_destroy: ath11k_mac_destroy(ab); err_hif_stop: ath11k_hif_stop(ab); err_wmi_detach: ath11k_wmi_detach(ab); return ret; } static int ath11k_core_start_firmware(struct ath11k_base *ab, enum ath11k_firmware_mode mode) { int ret; ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2, &ab->qmi.ce_cfg.shadow_reg_v2_len); ret = ath11k_qmi_firmware_start(ab, mode); if (ret) { ath11k_err(ab, "failed to send firmware start: %d\n", ret); return ret; } return ret; } int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab) { int ret; switch (ath11k_crypto_mode) { case ATH11K_CRYPT_MODE_SW: set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags); set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags); break; case ATH11K_CRYPT_MODE_HW: clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags); clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags); break; default: ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode); return -EINVAL; } ret = ath11k_core_start_firmware(ab, ab->fw_mode); if (ret) { ath11k_err(ab, "failed to start firmware: %d\n", ret); return ret; } ret = ath11k_ce_init_pipes(ab); if (ret) { ath11k_err(ab, "failed to initialize CE: %d\n", ret); goto err_firmware_stop; } ret = ath11k_dp_alloc(ab); if (ret) { ath11k_err(ab, "failed to init DP: %d\n", ret); goto err_firmware_stop; } if (ath11k_frame_mode == ATH11K_HW_TXRX_RAW) set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags); mutex_lock(&ab->core_lock); ret = ath11k_core_start(ab); if (ret) { ath11k_err(ab, "failed to start core: %d\n", ret); goto err_dp_free; } ret = ath11k_core_pdev_create(ab); if (ret) { ath11k_err(ab, "failed to create pdev core: %d\n", ret); goto err_core_stop; } ath11k_hif_irq_enable(ab); mutex_unlock(&ab->core_lock); return 0; err_core_stop: ath11k_core_stop(ab); ath11k_mac_destroy(ab); err_dp_free: ath11k_dp_free(ab); mutex_unlock(&ab->core_lock); err_firmware_stop: ath11k_qmi_firmware_stop(ab); return ret; } static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab) { int ret; mutex_lock(&ab->core_lock); ath11k_thermal_unregister(ab); ath11k_dp_pdev_free(ab); ath11k_spectral_deinit(ab); ath11k_ce_cleanup_pipes(ab); ath11k_wmi_detach(ab); ath11k_dp_pdev_reo_cleanup(ab); mutex_unlock(&ab->core_lock); ath11k_dp_free(ab); ath11k_hal_srng_deinit(ab); ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1; ret = ath11k_hal_srng_init(ab); if (ret) return ret; clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags); ret = ath11k_core_qmi_firmware_ready(ab); if (ret) goto err_hal_srng_deinit; clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags); return 0; err_hal_srng_deinit: ath11k_hal_srng_deinit(ab); return ret; } void ath11k_core_halt(struct ath11k *ar) { struct ath11k_base *ab = ar->ab; struct list_head *pos, *n; lockdep_assert_held(&ar->conf_mutex); ar->num_created_vdevs = 0; ar->allocated_vdev_map = 0; ath11k_mac_scan_finish(ar); ath11k_mac_peer_cleanup_all(ar); cancel_delayed_work_sync(&ar->scan.timeout); cancel_work_sync(&ar->channel_update_work); cancel_work_sync(&ar->regd_update_work); cancel_work_sync(&ab->update_11d_work); rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL); synchronize_rcu(); spin_lock_bh(&ar->data_lock); list_for_each_safe(pos, n, &ar->arvifs) list_del_init(pos); spin_unlock_bh(&ar->data_lock); idr_init(&ar->txmgmt_idr); } static void ath11k_update_11d(struct work_struct *work) { struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work); struct ath11k *ar; struct ath11k_pdev *pdev; int ret, i; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; spin_lock_bh(&ab->base_lock); memcpy(&ar->alpha2, &ab->new_alpha2, 2); spin_unlock_bh(&ab->base_lock); ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c for pdev %d\n", ar->alpha2[0], ar->alpha2[1], i); ret = ath11k_reg_set_cc(ar); if (ret) ath11k_warn(ar->ab, "pdev id %d failed set current country code: %d\n", i, ret); } } void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; int i; spin_lock_bh(&ab->base_lock); ab->stats.fw_crash_counter++; spin_unlock_bh(&ab->base_lock); for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; if (!ar || ar->state == ATH11K_STATE_OFF || ar->state == ATH11K_STATE_FTM) continue; ieee80211_stop_queues(ar->hw); ath11k_mac_drain_tx(ar); ar->state_11d = ATH11K_11D_IDLE; complete(&ar->completed_11d_scan); complete(&ar->scan.started); complete_all(&ar->scan.completed); complete(&ar->scan.on_channel); complete(&ar->peer_assoc_done); complete(&ar->peer_delete_done); complete(&ar->install_key_done); complete(&ar->vdev_setup_done); complete(&ar->vdev_delete_done); complete(&ar->bss_survey_done); complete(&ar->thermal.wmi_sync); wake_up(&ar->dp.tx_empty_waitq); idr_for_each(&ar->txmgmt_idr, ath11k_mac_tx_mgmt_pending_free, ar); idr_destroy(&ar->txmgmt_idr); wake_up(&ar->txmgmt_empty_waitq); ar->monitor_vdev_id = -1; clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags); clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); } wake_up(&ab->wmi_ab.tx_credits_wq); wake_up(&ab->peer_mapping_wq); reinit_completion(&ab->driver_recovery); } static void ath11k_core_post_reconfigure_recovery(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; int i; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; if (!ar || ar->state == ATH11K_STATE_OFF) continue; mutex_lock(&ar->conf_mutex); switch (ar->state) { case ATH11K_STATE_ON: ar->state = ATH11K_STATE_RESTARTING; ath11k_core_halt(ar); ieee80211_restart_hw(ar->hw); break; case ATH11K_STATE_OFF: ath11k_warn(ab, "cannot restart radio %d that hasn't been started\n", i); break; case ATH11K_STATE_RESTARTING: break; case ATH11K_STATE_RESTARTED: ar->state = ATH11K_STATE_WEDGED; fallthrough; case ATH11K_STATE_WEDGED: ath11k_warn(ab, "device is wedged, will not restart radio %d\n", i); break; case ATH11K_STATE_FTM: ath11k_dbg(ab, ATH11K_DBG_TESTMODE, "fw mode reset done radio %d\n", i); break; } mutex_unlock(&ar->conf_mutex); } complete(&ab->driver_recovery); } static void ath11k_core_restart(struct work_struct *work) { struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work); int ret; ret = ath11k_core_reconfigure_on_crash(ab); if (ret) { ath11k_err(ab, "failed to reconfigure driver on crash recovery\n"); return; } if (ab->is_reset) complete_all(&ab->reconfigure_complete); if (!ab->is_reset) ath11k_core_post_reconfigure_recovery(ab); complete(&ab->restart_completed); } static void ath11k_core_reset(struct work_struct *work) { struct ath11k_base *ab = container_of(work, struct ath11k_base, reset_work); int reset_count, fail_cont_count; long time_left; if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))) { ath11k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags); return; } /* Sometimes the recovery will fail and then the next all recovery fail, * this is to avoid infinite recovery since it can not recovery success. */ fail_cont_count = atomic_read(&ab->fail_cont_count); if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FINAL) return; if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FIRST && time_before(jiffies, ab->reset_fail_timeout)) return; reset_count = atomic_inc_return(&ab->reset_count); if (reset_count > 1) { /* Sometimes it happened another reset worker before the previous one * completed, then the second reset worker will destroy the previous one, * thus below is to avoid that. */ ath11k_warn(ab, "already resetting count %d\n", reset_count); reinit_completion(&ab->reset_complete); time_left = wait_for_completion_timeout(&ab->reset_complete, ATH11K_RESET_TIMEOUT_HZ); if (time_left) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "to skip reset\n"); atomic_dec(&ab->reset_count); return; } ab->reset_fail_timeout = jiffies + ATH11K_RESET_FAIL_TIMEOUT_HZ; /* Record the continuous recovery fail count when recovery failed*/ atomic_inc(&ab->fail_cont_count); } ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset starting\n"); ab->is_reset = true; atomic_set(&ab->recovery_count, 0); reinit_completion(&ab->recovery_start); atomic_set(&ab->recovery_start_count, 0); ath11k_coredump_collect(ab); ath11k_core_pre_reconfigure_recovery(ab); reinit_completion(&ab->reconfigure_complete); ath11k_core_post_reconfigure_recovery(ab); ath11k_dbg(ab, ATH11K_DBG_BOOT, "waiting recovery start...\n"); time_left = wait_for_completion_timeout(&ab->recovery_start, ATH11K_RECOVER_START_TIMEOUT_HZ); ath11k_hif_irq_disable(ab); ath11k_hif_ce_irq_disable(ab); ath11k_hif_power_down(ab, false); ath11k_hif_power_up(ab); ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n"); } static int ath11k_init_hw_params(struct ath11k_base *ab) { const struct ath11k_hw_params *hw_params = NULL; int i; for (i = 0; i < ARRAY_SIZE(ath11k_hw_params); i++) { hw_params = &ath11k_hw_params[i]; if (hw_params->hw_rev == ab->hw_rev) break; } if (i == ARRAY_SIZE(ath11k_hw_params)) { ath11k_err(ab, "Unsupported hardware version: 0x%x\n", ab->hw_rev); return -EINVAL; } ab->hw_params = *hw_params; ath11k_info(ab, "%s\n", ab->hw_params.name); return 0; } int ath11k_core_pre_init(struct ath11k_base *ab) { int ret; ret = ath11k_init_hw_params(ab); if (ret) { ath11k_err(ab, "failed to get hw params: %d\n", ret); return ret; } ret = ath11k_fw_pre_init(ab); if (ret) { ath11k_err(ab, "failed to pre init firmware: %d", ret); return ret; } return 0; } EXPORT_SYMBOL(ath11k_core_pre_init); static int ath11k_core_pm_notify(struct notifier_block *nb, unsigned long action, void *nouse) { struct ath11k_base *ab = container_of(nb, struct ath11k_base, pm_nb); switch (action) { case PM_SUSPEND_PREPARE: ab->actual_pm_policy = ab->pm_policy; break; case PM_HIBERNATION_PREPARE: ab->actual_pm_policy = ATH11K_PM_DEFAULT; break; default: break; } return NOTIFY_OK; } static int ath11k_core_pm_notifier_register(struct ath11k_base *ab) { ab->pm_nb.notifier_call = ath11k_core_pm_notify; return register_pm_notifier(&ab->pm_nb); } void ath11k_core_pm_notifier_unregister(struct ath11k_base *ab) { int ret; ret = unregister_pm_notifier(&ab->pm_nb); if (ret) /* just warn here, there is nothing can be done in fail case */ ath11k_warn(ab, "failed to unregister PM notifier %d\n", ret); } EXPORT_SYMBOL(ath11k_core_pm_notifier_unregister); int ath11k_core_init(struct ath11k_base *ab) { const struct dmi_system_id *dmi_id; int ret; dmi_id = dmi_first_match(ath11k_pm_quirk_table); if (dmi_id) ab->pm_policy = (kernel_ulong_t)dmi_id->driver_data; else ab->pm_policy = ATH11K_PM_DEFAULT; ath11k_dbg(ab, ATH11K_DBG_BOOT, "pm policy %u\n", ab->pm_policy); ret = ath11k_core_pm_notifier_register(ab); if (ret) { ath11k_err(ab, "failed to register PM notifier: %d\n", ret); return ret; } ret = ath11k_core_soc_create(ab); if (ret) { ath11k_err(ab, "failed to create soc core: %d\n", ret); - return ret; + goto err_unregister_pm_notifier; } return 0; + +err_unregister_pm_notifier: + ath11k_core_pm_notifier_unregister(ab); + + return ret; } EXPORT_SYMBOL(ath11k_core_init); void ath11k_core_deinit(struct ath11k_base *ab) { mutex_lock(&ab->core_lock); ath11k_core_pdev_destroy(ab); ath11k_core_stop(ab); mutex_unlock(&ab->core_lock); ath11k_hif_power_down(ab, false); ath11k_mac_destroy(ab); ath11k_core_soc_destroy(ab); ath11k_core_pm_notifier_unregister(ab); } EXPORT_SYMBOL(ath11k_core_deinit); void ath11k_core_free(struct ath11k_base *ab) { destroy_workqueue(ab->workqueue_aux); destroy_workqueue(ab->workqueue); kfree(ab); } EXPORT_SYMBOL(ath11k_core_free); struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, enum ath11k_bus bus) { struct ath11k_base *ab; ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL); if (!ab) return NULL; init_completion(&ab->driver_recovery); ab->workqueue = create_singlethread_workqueue("ath11k_wq"); if (!ab->workqueue) goto err_sc_free; ab->workqueue_aux = create_singlethread_workqueue("ath11k_aux_wq"); if (!ab->workqueue_aux) goto err_free_wq; mutex_init(&ab->core_lock); mutex_init(&ab->tbl_mtx_lock); spin_lock_init(&ab->base_lock); mutex_init(&ab->vdev_id_11d_lock); init_completion(&ab->reset_complete); init_completion(&ab->reconfigure_complete); init_completion(&ab->recovery_start); INIT_LIST_HEAD(&ab->peers); init_waitqueue_head(&ab->peer_mapping_wq); init_waitqueue_head(&ab->wmi_ab.tx_credits_wq); init_waitqueue_head(&ab->qmi.cold_boot_waitq); INIT_WORK(&ab->restart_work, ath11k_core_restart); INIT_WORK(&ab->update_11d_work, ath11k_update_11d); INIT_WORK(&ab->reset_work, ath11k_core_reset); INIT_WORK(&ab->dump_work, ath11k_coredump_upload); timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0); init_completion(&ab->htc_suspend); init_completion(&ab->wow.wakeup_completed); init_completion(&ab->restart_completed); ab->dev = dev; ab->hif.bus = bus; return ab; err_free_wq: destroy_workqueue(ab->workqueue); err_sc_free: kfree(ab); return NULL; } EXPORT_SYMBOL(ath11k_core_alloc); MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ax wireless LAN cards."); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/core.h b/core.h index 6b2f207975e3..e8780b05ce11 100644 --- a/core.h +++ b/core.h @@ -1,1343 +1,1354 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_CORE_H #define ATH11K_CORE_H #include #include #include #include #include #include #include #include #include #include +#include #include "qmi.h" #include "htc.h" #include "wmi.h" #include "hal.h" #include "dp.h" #include "ce.h" #include "mac.h" #include "hw.h" #include "hal_rx.h" #include "reg.h" #include "thermal.h" #include "dbring.h" #include "spectral.h" #include "wow.h" #include "fw.h" #include "coredump.h" #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) #define ATH11K_TX_MGMT_NUM_PENDING_MAX 512 #define ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI 64 /* Pending management packets threshold for dropping probe responses */ #define ATH11K_PRB_RSP_DROP_THRESHOLD ((ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4) #define ATH11K_INVALID_HW_MAC_ID 0xFF #define ATH11K_CONNECTION_LOSS_HZ (3 * HZ) /* SMBIOS type containing Board Data File Name Extension */ #define ATH11K_SMBIOS_BDF_EXT_TYPE 0xF8 /* SMBIOS type structure length (excluding strings-set) */ #define ATH11K_SMBIOS_BDF_EXT_LENGTH 0x9 /* The magic used by QCA spec */ #define ATH11K_SMBIOS_BDF_EXT_MAGIC "BDF_" extern unsigned int ath11k_frame_mode; extern bool ath11k_ftm_mode; #define ATH11K_SCAN_TIMEOUT_HZ (20 * HZ) #define ATH11K_MON_TIMER_INTERVAL 10 #define ATH11K_RESET_TIMEOUT_HZ (20 * HZ) #define ATH11K_RESET_MAX_FAIL_COUNT_FIRST 3 #define ATH11K_RESET_MAX_FAIL_COUNT_FINAL 5 #define ATH11K_RESET_FAIL_TIMEOUT_HZ (20 * HZ) #define ATH11K_RECONFIGURE_TIMEOUT_HZ (10 * HZ) #define ATH11K_RECOVER_START_TIMEOUT_HZ (20 * HZ) enum ath11k_supported_bw { ATH11K_BW_20 = 0, ATH11K_BW_40 = 1, ATH11K_BW_80 = 2, ATH11K_BW_160 = 3, }; enum ath11k_bdf_search { ATH11K_BDF_SEARCH_DEFAULT, ATH11K_BDF_SEARCH_BUS_AND_BOARD, }; enum wme_ac { WME_AC_BE, WME_AC_BK, WME_AC_VI, WME_AC_VO, WME_NUM_AC }; #define ATH11K_HT_MCS_MAX 7 #define ATH11K_VHT_MCS_MAX 9 #define ATH11K_HE_MCS_MAX 11 enum ath11k_crypt_mode { /* Only use hardware crypto engine */ ATH11K_CRYPT_MODE_HW, /* Only use software crypto */ ATH11K_CRYPT_MODE_SW, }; static inline enum wme_ac ath11k_tid_to_ac(u32 tid) { return (((tid == 0) || (tid == 3)) ? WME_AC_BE : ((tid == 1) || (tid == 2)) ? WME_AC_BK : ((tid == 4) || (tid == 5)) ? WME_AC_VI : WME_AC_VO); } enum ath11k_skb_flags { ATH11K_SKB_HW_80211_ENCAP = BIT(0), ATH11K_SKB_CIPHER_SET = BIT(1), }; struct ath11k_skb_cb { dma_addr_t paddr; u8 eid; u8 flags; u32 cipher; struct ath11k *ar; struct ieee80211_vif *vif; } __packed; struct ath11k_skb_rxcb { dma_addr_t paddr; bool is_first_msdu; bool is_last_msdu; bool is_continuation; bool is_mcbc; bool is_eapol; struct hal_rx_desc *rx_desc; u8 err_rel_src; u8 err_code; u8 mac_id; u8 unmapped; u8 is_frag; u8 tid; u16 peer_id; u16 seq_no; }; enum ath11k_hw_rev { ATH11K_HW_IPQ8074, ATH11K_HW_QCA6390_HW20, ATH11K_HW_IPQ6018_HW10, ATH11K_HW_QCN9074_HW10, ATH11K_HW_WCN6855_HW20, ATH11K_HW_WCN6855_HW21, ATH11K_HW_WCN6750_HW10, ATH11K_HW_IPQ5018_HW10, ATH11K_HW_QCA2066_HW21, ATH11K_HW_QCA6698AQ_HW21, }; enum ath11k_firmware_mode { /* the default mode, standard 802.11 functionality */ ATH11K_FIRMWARE_MODE_NORMAL, /* factory tests etc */ ATH11K_FIRMWARE_MODE_FTM, /* Cold boot calibration */ ATH11K_FIRMWARE_MODE_COLD_BOOT = 7, }; extern bool ath11k_cold_boot_cal; #define ATH11K_IRQ_NUM_MAX 52 #define ATH11K_EXT_IRQ_NUM_MAX 16 struct ath11k_ext_irq_grp { struct ath11k_base *ab; u32 irqs[ATH11K_EXT_IRQ_NUM_MAX]; u32 num_irq; u32 grp_id; u64 timestamp; bool napi_enabled; struct napi_struct napi; struct net_device *napi_ndev; }; enum ath11k_smbios_cc_type { /* disable country code setting from SMBIOS */ ATH11K_SMBIOS_CC_DISABLE = 0, /* set country code by ANSI country name, based on ISO3166-1 alpha2 */ ATH11K_SMBIOS_CC_ISO = 1, /* worldwide regdomain */ ATH11K_SMBIOS_CC_WW = 2, }; struct ath11k_smbios_bdf { struct dmi_header hdr; u8 features_disabled; /* enum ath11k_smbios_cc_type */ u8 country_code_flag; /* To set specific country, you need to set country code * flag=ATH11K_SMBIOS_CC_ISO first, then if country is United * States, then country code value = 0x5553 ("US",'U' = 0x55, 'S'= * 0x53). To set country to INDONESIA, then country code value = * 0x4944 ("IN", 'I'=0x49, 'D'=0x44). If country code flag = * ATH11K_SMBIOS_CC_WW, then you can use worldwide regulatory * setting. */ u16 cc_code; u8 bdf_enabled; u8 bdf_ext[]; } __packed; #define HEHANDLE_CAP_PHYINFO_SIZE 3 #define HECAP_PHYINFO_SIZE 9 #define HECAP_MACINFO_SIZE 5 #define HECAP_TXRX_MCS_NSS_SIZE 2 #define HECAP_PPET16_PPET8_MAX_SIZE 25 #define HE_PPET16_PPET8_SIZE 8 /* 802.11ax PPE (PPDU packet Extension) threshold */ struct he_ppe_threshold { u32 numss_m1; u32 ru_mask; u32 ppet16_ppet8_ru3_ru0[HE_PPET16_PPET8_SIZE]; }; struct ath11k_he { u8 hecap_macinfo[HECAP_MACINFO_SIZE]; u32 hecap_rxmcsnssmap; u32 hecap_txmcsnssmap; u32 hecap_phyinfo[HEHANDLE_CAP_PHYINFO_SIZE]; struct he_ppe_threshold hecap_ppet; u32 heop_param; }; #define MAX_RADIOS 3 /* ipq5018 hw param macros */ #define MAX_RADIOS_5018 1 #define CE_CNT_5018 6 #define TARGET_CE_CNT_5018 9 #define SVC_CE_MAP_LEN_5018 17 #define RXDMA_PER_PDEV_5018 1 enum { WMI_HOST_TP_SCALE_MAX = 0, WMI_HOST_TP_SCALE_50 = 1, WMI_HOST_TP_SCALE_25 = 2, WMI_HOST_TP_SCALE_12 = 3, WMI_HOST_TP_SCALE_MIN = 4, WMI_HOST_TP_SCALE_SIZE = 5, }; enum ath11k_scan_state { ATH11K_SCAN_IDLE, ATH11K_SCAN_STARTING, ATH11K_SCAN_RUNNING, ATH11K_SCAN_ABORTING, }; enum ath11k_11d_state { ATH11K_11D_IDLE, ATH11K_11D_PREPARING, ATH11K_11D_RUNNING, }; enum ath11k_dev_flags { ATH11K_CAC_RUNNING, ATH11K_FLAG_CORE_REGISTERED, ATH11K_FLAG_CRASH_FLUSH, ATH11K_FLAG_RAW_MODE, ATH11K_FLAG_HW_CRYPTO_DISABLED, ATH11K_FLAG_BTCOEX, ATH11K_FLAG_RECOVERY, ATH11K_FLAG_UNREGISTERING, ATH11K_FLAG_REGISTERED, ATH11K_FLAG_QMI_FAIL, ATH11K_FLAG_HTC_SUSPEND_COMPLETE, ATH11K_FLAG_CE_IRQ_ENABLED, ATH11K_FLAG_EXT_IRQ_ENABLED, ATH11K_FLAG_FIXED_MEM_RGN, ATH11K_FLAG_DEVICE_INIT_DONE, ATH11K_FLAG_MULTI_MSI_VECTORS, ATH11K_FLAG_FTM_SEGMENTED, }; enum ath11k_monitor_flags { ATH11K_FLAG_MONITOR_CONF_ENABLED, ATH11K_FLAG_MONITOR_STARTED, ATH11K_FLAG_MONITOR_VDEV_CREATED, }; #define ATH11K_IPV6_UC_TYPE 0 #define ATH11K_IPV6_AC_TYPE 1 #define ATH11K_IPV6_MAX_COUNT 16 #define ATH11K_IPV4_MAX_COUNT 2 struct ath11k_arp_ns_offload { u8 ipv4_addr[ATH11K_IPV4_MAX_COUNT][4]; u32 ipv4_count; u32 ipv6_count; u8 ipv6_addr[ATH11K_IPV6_MAX_COUNT][16]; u8 self_ipv6_addr[ATH11K_IPV6_MAX_COUNT][16]; u8 ipv6_type[ATH11K_IPV6_MAX_COUNT]; bool ipv6_valid[ATH11K_IPV6_MAX_COUNT]; u8 mac_addr[ETH_ALEN]; }; struct ath11k_rekey_data { u8 kck[NL80211_KCK_LEN]; u8 kek[NL80211_KCK_LEN]; u64 replay_ctr; bool enable_offload; }; /** * struct ath11k_chan_power_info - TPE containing power info per channel chunk * @chan_cfreq: channel center freq (MHz) * e.g. * channel 37/20 MHz, it is 6135 * channel 37/40 MHz, it is 6125 * channel 37/80 MHz, it is 6145 * channel 37/160 MHz, it is 6185 * @tx_power: transmit power (dBm) */ struct ath11k_chan_power_info { u16 chan_cfreq; s8 tx_power; }; /* ath11k only deals with 160 MHz, so 8 subchannels */ #define ATH11K_NUM_PWR_LEVELS 8 /** * struct ath11k_reg_tpc_power_info - regulatory TPC power info * @is_psd_power: is PSD power or not * @eirp_power: Maximum EIRP power (dBm), valid only if power is PSD * @ap_power_type: type of power (SP/LPI/VLP) * @num_pwr_levels: number of power levels * @reg_max: Array of maximum TX power (dBm) per PSD value * @tpe: TPE values processed from TPE IE * @chan_power_info: power info to send to firmware */ struct ath11k_reg_tpc_power_info { bool is_psd_power; u8 eirp_power; enum wmi_reg_6ghz_ap_type ap_power_type; u8 num_pwr_levels; u8 reg_max[ATH11K_NUM_PWR_LEVELS]; s8 tpe[ATH11K_NUM_PWR_LEVELS]; struct ath11k_chan_power_info chan_power_info[ATH11K_NUM_PWR_LEVELS]; }; struct ath11k_vif { u32 vdev_id; enum wmi_vdev_type vdev_type; enum wmi_vdev_subtype vdev_subtype; u32 beacon_interval; u32 dtim_period; u16 ast_hash; u16 ast_idx; u16 tcl_metadata; u8 hal_addr_search_flags; u8 search_type; struct ath11k *ar; struct ieee80211_vif *vif; struct wmi_wmm_params_all_arg wmm_params; struct wmi_wmm_params_all_arg muedca_params; struct list_head list; union { struct { u32 uapsd; } sta; struct { /* 127 stations; wmi limit */ u8 tim_bitmap[16]; u8 tim_len; u32 ssid_len; u8 ssid[IEEE80211_MAX_SSID_LEN]; bool hidden_ssid; /* P2P_IE with NoA attribute for P2P_GO case */ u32 noa_len; u8 *noa_data; } ap; } u; bool is_started; bool is_up; bool ftm_responder; bool spectral_enabled; bool ps; u32 aid; u8 bssid[ETH_ALEN]; struct cfg80211_bitrate_mask bitrate_mask; struct delayed_work connection_loss_work; struct work_struct bcn_tx_work; int num_legacy_stations; int rtscts_prot_mode; int txpower; bool rsnie_present; bool wpaie_present; bool bcca_zero_sent; bool do_not_send_tmpl; struct ath11k_arp_ns_offload arp_ns_offload; struct ath11k_rekey_data rekey_data; + u32 num_stations; + bool reinstall_group_keys; struct ath11k_reg_tpc_power_info reg_tpc_info; /* Must be last - ends in a flexible-array member. * * FIXME: Driver should not copy struct ieee80211_chanctx_conf, * especially because it has a flexible array. Find a better way. */ struct ieee80211_chanctx_conf chanctx; }; struct ath11k_vif_iter { u32 vdev_id; struct ath11k_vif *arvif; }; struct ath11k_rx_peer_stats { u64 num_msdu; u64 num_mpdu_fcs_ok; u64 num_mpdu_fcs_err; u64 tcp_msdu_count; u64 udp_msdu_count; u64 other_msdu_count; u64 ampdu_msdu_count; u64 non_ampdu_msdu_count; u64 stbc_count; u64 beamformed_count; u64 mcs_count[HAL_RX_MAX_MCS + 1]; u64 nss_count[HAL_RX_MAX_NSS]; u64 bw_count[HAL_RX_BW_MAX]; u64 gi_count[HAL_RX_GI_MAX]; u64 coding_count[HAL_RX_SU_MU_CODING_MAX]; u64 tid_count[IEEE80211_NUM_TIDS + 1]; u64 pream_cnt[HAL_RX_PREAMBLE_MAX]; u64 reception_type[HAL_RX_RECEPTION_TYPE_MAX]; u64 rx_duration; u64 dcm_count; u64 ru_alloc_cnt[HAL_RX_RU_ALLOC_TYPE_MAX]; }; #define ATH11K_HE_MCS_NUM 12 #define ATH11K_VHT_MCS_NUM 10 #define ATH11K_BW_NUM 4 #define ATH11K_NSS_NUM 4 #define ATH11K_LEGACY_NUM 12 #define ATH11K_GI_NUM 4 #define ATH11K_HT_MCS_NUM 32 enum ath11k_pkt_rx_err { ATH11K_PKT_RX_ERR_FCS, ATH11K_PKT_RX_ERR_TKIP, ATH11K_PKT_RX_ERR_CRYPT, ATH11K_PKT_RX_ERR_PEER_IDX_INVAL, ATH11K_PKT_RX_ERR_MAX, }; enum ath11k_ampdu_subfrm_num { ATH11K_AMPDU_SUBFRM_NUM_10, ATH11K_AMPDU_SUBFRM_NUM_20, ATH11K_AMPDU_SUBFRM_NUM_30, ATH11K_AMPDU_SUBFRM_NUM_40, ATH11K_AMPDU_SUBFRM_NUM_50, ATH11K_AMPDU_SUBFRM_NUM_60, ATH11K_AMPDU_SUBFRM_NUM_MORE, ATH11K_AMPDU_SUBFRM_NUM_MAX, }; enum ath11k_amsdu_subfrm_num { ATH11K_AMSDU_SUBFRM_NUM_1, ATH11K_AMSDU_SUBFRM_NUM_2, ATH11K_AMSDU_SUBFRM_NUM_3, ATH11K_AMSDU_SUBFRM_NUM_4, ATH11K_AMSDU_SUBFRM_NUM_MORE, ATH11K_AMSDU_SUBFRM_NUM_MAX, }; enum ath11k_counter_type { ATH11K_COUNTER_TYPE_BYTES, ATH11K_COUNTER_TYPE_PKTS, ATH11K_COUNTER_TYPE_MAX, }; enum ath11k_stats_type { ATH11K_STATS_TYPE_SUCC, ATH11K_STATS_TYPE_FAIL, ATH11K_STATS_TYPE_RETRY, ATH11K_STATS_TYPE_AMPDU, ATH11K_STATS_TYPE_MAX, }; struct ath11k_htt_data_stats { u64 legacy[ATH11K_COUNTER_TYPE_MAX][ATH11K_LEGACY_NUM]; u64 ht[ATH11K_COUNTER_TYPE_MAX][ATH11K_HT_MCS_NUM]; u64 vht[ATH11K_COUNTER_TYPE_MAX][ATH11K_VHT_MCS_NUM]; u64 he[ATH11K_COUNTER_TYPE_MAX][ATH11K_HE_MCS_NUM]; u64 bw[ATH11K_COUNTER_TYPE_MAX][ATH11K_BW_NUM]; u64 nss[ATH11K_COUNTER_TYPE_MAX][ATH11K_NSS_NUM]; u64 gi[ATH11K_COUNTER_TYPE_MAX][ATH11K_GI_NUM]; }; struct ath11k_htt_tx_stats { struct ath11k_htt_data_stats stats[ATH11K_STATS_TYPE_MAX]; u64 tx_duration; u64 ba_fails; u64 ack_fails; }; struct ath11k_per_ppdu_tx_stats { u16 succ_pkts; u16 failed_pkts; u16 retry_pkts; u32 succ_bytes; u32 failed_bytes; u32 retry_bytes; }; DECLARE_EWMA(avg_rssi, 10, 8) struct ath11k_sta { struct ath11k_vif *arvif; /* the following are protected by ar->data_lock */ u32 changed; /* IEEE80211_RC_* */ u32 bw; u32 nss; u32 smps; enum hal_pn_type pn_type; struct work_struct update_wk; struct work_struct set_4addr_wk; struct rate_info txrate; u32 peer_nss; struct rate_info last_txrate; u64 rx_duration; u64 tx_duration; u8 rssi_comb; struct ewma_avg_rssi avg_rssi; s8 rssi_beacon; s8 chain_signal[IEEE80211_MAX_CHAINS]; struct ath11k_htt_tx_stats *tx_stats; struct ath11k_rx_peer_stats *rx_stats; #ifdef CONFIG_MAC80211_DEBUGFS /* protected by conf_mutex */ bool aggr_mode; #endif bool use_4addr_set; u16 tcl_metadata; /* Protected with ar->data_lock */ enum ath11k_wmi_peer_ps_state peer_ps_state; u64 ps_start_time; u64 ps_start_jiffies; u64 ps_total_duration; bool peer_current_ps_valid; u32 bw_prev; }; #define ATH11K_MIN_5G_FREQ 4150 #define ATH11K_MIN_6G_FREQ 5925 #define ATH11K_MAX_6G_FREQ 7115 #define ATH11K_NUM_CHANS 102 #define ATH11K_MAX_5G_CHAN 177 enum ath11k_state { ATH11K_STATE_OFF, ATH11K_STATE_ON, ATH11K_STATE_RESTARTING, ATH11K_STATE_RESTARTED, ATH11K_STATE_WEDGED, ATH11K_STATE_FTM, /* Add other states as required */ }; /* Antenna noise floor */ #define ATH11K_DEFAULT_NOISE_FLOOR -95 #define ATH11K_INVALID_RSSI_FULL -1 #define ATH11K_INVALID_RSSI_EMPTY -128 struct ath11k_fw_stats { struct dentry *debugfs_fwstats; u32 pdev_id; u32 stats_id; struct list_head pdevs; struct list_head vdevs; struct list_head bcn; u32 num_vdev_recvd; u32 num_bcn_recvd; }; struct ath11k_dbg_htt_stats { u8 type; u8 reset; struct debug_htt_stats_req *stats_req; /* protects shared stats req buffer */ spinlock_t lock; }; #define MAX_MODULE_ID_BITMAP_WORDS 16 struct ath11k_debug { struct dentry *debugfs_pdev; struct ath11k_dbg_htt_stats htt_stats; u32 extd_tx_stats; u32 extd_rx_stats; u32 pktlog_filter; u32 pktlog_mode; u32 pktlog_peer_valid; u8 pktlog_peer_addr[ETH_ALEN]; u32 rx_filter; u32 mem_offset; u32 module_id_bitmap[MAX_MODULE_ID_BITMAP_WORDS]; struct ath11k_debug_dbr *dbr_debug[WMI_DIRECT_BUF_MAX]; }; struct ath11k_per_peer_tx_stats { u32 succ_bytes; u32 retry_bytes; u32 failed_bytes; u16 succ_pkts; u16 retry_pkts; u16 failed_pkts; u32 duration; u8 ba_fails; bool is_ampdu; }; #define ATH11K_FLUSH_TIMEOUT (5 * HZ) #define ATH11K_VDEV_DELETE_TIMEOUT_HZ (5 * HZ) struct ath11k { struct ath11k_base *ab; struct ath11k_pdev *pdev; struct ieee80211_hw *hw; struct ath11k_pdev_wmi *wmi; struct ath11k_pdev_dp dp; u8 mac_addr[ETH_ALEN]; struct ath11k_he ar_he; enum ath11k_state state; bool supports_6ghz; struct { struct completion started; struct completion completed; struct completion on_channel; struct delayed_work timeout; enum ath11k_scan_state state; bool is_roc; int vdev_id; int roc_freq; bool roc_notify; } scan; struct { struct ieee80211_supported_band sbands[NUM_NL80211_BANDS]; struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES]; } mac; unsigned long dev_flags; unsigned int filter_flags; unsigned long monitor_flags; u32 min_tx_power; u32 max_tx_power; u32 txpower_limit_2g; u32 txpower_limit_5g; u32 txpower_scale; u32 power_scale; u32 chan_tx_pwr; u32 num_stations; u32 max_num_stations; /* To synchronize concurrent synchronous mac80211 callback operations, * concurrent debugfs configuration and concurrent FW statistics events. */ struct mutex conf_mutex; /* protects the radio specific data like debug stats, ppdu_stats_info stats, * vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info, * channel context data, survey info, test mode data, channel_update_queue. */ spinlock_t data_lock; struct list_head arvifs; /* should never be NULL; needed for regular htt rx */ struct ieee80211_channel *rx_channel; /* valid during scan; needed for mgmt rx during scan */ struct ieee80211_channel *scan_channel; u8 cfg_tx_chainmask; u8 cfg_rx_chainmask; u8 num_rx_chains; u8 num_tx_chains; /* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */ u8 pdev_idx; u8 lmac_id; struct completion peer_assoc_done; struct completion peer_delete_done; int install_key_status; struct completion install_key_done; int last_wmi_vdev_start_status; struct completion vdev_setup_done; struct completion vdev_delete_done; int num_peers; int max_num_peers; u32 num_started_vdevs; u32 num_created_vdevs; unsigned long long allocated_vdev_map; struct idr txmgmt_idr; /* protects txmgmt_idr data */ spinlock_t txmgmt_idr_lock; atomic_t num_pending_mgmt_tx; wait_queue_head_t txmgmt_empty_waitq; /* cycle count is reported twice for each visited channel during scan. * access protected by data_lock */ u32 survey_last_rx_clear_count; u32 survey_last_cycle_count; /* Channel info events are expected to come in pairs without and with * COMPLETE flag set respectively for each channel visit during scan. * * However there are deviations from this rule. This flag is used to * avoid reporting garbage data. */ bool ch_info_can_report_survey; struct survey_info survey[ATH11K_NUM_CHANS]; struct completion bss_survey_done; struct work_struct regd_update_work; struct work_struct channel_update_work; /* protected with data_lock */ struct list_head channel_update_queue; struct work_struct wmi_mgmt_tx_work; struct sk_buff_head wmi_mgmt_tx_queue; struct ath11k_wow wow; struct completion target_suspend; bool target_suspend_ack; struct ath11k_per_peer_tx_stats peer_tx_stats; struct list_head ppdu_stats_info; u32 ppdu_stat_list_depth; struct ath11k_per_peer_tx_stats cached_stats; u32 last_ppdu_id; u32 cached_ppdu_id; int monitor_vdev_id; struct completion fw_mode_reset; u8 ftm_msgref; #ifdef CONFIG_ATH11K_DEBUGFS struct ath11k_debug debug; #endif #ifdef CONFIG_ATH11K_SPECTRAL struct ath11k_spectral spectral; #endif bool dfs_block_radar_events; struct ath11k_thermal thermal; u32 vdev_id_11d_scan; struct completion completed_11d_scan; enum ath11k_11d_state state_11d; bool regdom_set_by_user; int hw_rate_code; u8 twt_enabled; bool nlo_enabled; u8 alpha2[REG_ALPHA2_LEN + 1]; struct ath11k_fw_stats fw_stats; struct completion fw_stats_complete; struct completion fw_stats_done; /* protected by conf_mutex */ bool ps_state_enable; bool ps_timekeeper_enable; s8 max_allowed_tx_power; }; struct ath11k_band_cap { u32 phy_id; u32 max_bw_supported; u32 ht_cap_info; u32 he_cap_info[2]; u32 he_mcs; u32 he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE]; struct ath11k_ppe_threshold he_ppet; u16 he_6ghz_capa; }; struct ath11k_pdev_cap { u32 supported_bands; u32 ampdu_density; u32 vht_cap; u32 vht_mcs; u32 he_mcs; u32 tx_chain_mask; u32 rx_chain_mask; u32 tx_chain_mask_shift; u32 rx_chain_mask_shift; struct ath11k_band_cap band[NUM_NL80211_BANDS]; bool nss_ratio_enabled; u8 nss_ratio_info; }; struct ath11k_pdev { struct ath11k *ar; u32 pdev_id; struct ath11k_pdev_cap cap; u8 mac_addr[ETH_ALEN]; }; struct ath11k_board_data { const struct firmware *fw; const void *data; size_t len; }; struct ath11k_pci_ops { int (*wakeup)(struct ath11k_base *ab); void (*release)(struct ath11k_base *ab); int (*get_msi_irq)(struct ath11k_base *ab, unsigned int vector); void (*window_write32)(struct ath11k_base *ab, u32 offset, u32 value); u32 (*window_read32)(struct ath11k_base *ab, u32 offset); }; /* IPQ8074 HW channel counters frequency value in hertz */ #define IPQ8074_CC_FREQ_HERTZ 320000 struct ath11k_bp_stats { /* Head Pointer reported by the last HTT Backpressure event for the ring */ u16 hp; /* Tail Pointer reported by the last HTT Backpressure event for the ring */ u16 tp; /* Number of Backpressure events received for the ring */ u32 count; /* Last recorded event timestamp */ unsigned long jiffies; }; struct ath11k_dp_ring_bp_stats { struct ath11k_bp_stats umac_ring_bp_stats[HTT_SW_UMAC_RING_IDX_MAX]; struct ath11k_bp_stats lmac_ring_bp_stats[HTT_SW_LMAC_RING_IDX_MAX][MAX_RADIOS]; }; struct ath11k_soc_dp_tx_err_stats { /* TCL Ring Descriptor unavailable */ u32 desc_na[DP_TCL_NUM_RING_MAX]; /* Other failures during dp_tx due to mem allocation failure * idr unavailable etc. */ atomic_t misc_fail; }; struct ath11k_soc_dp_stats { u32 err_ring_pkts; u32 invalid_rbm; u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX]; u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX]; u32 hal_reo_error[DP_REO_DST_RING_MAX]; struct ath11k_soc_dp_tx_err_stats tx_err; struct ath11k_dp_ring_bp_stats bp_stats; }; struct ath11k_msi_user { char *name; int num_vectors; u32 base_vector; }; struct ath11k_msi_config { int total_vectors; int total_users; struct ath11k_msi_user *users; u16 hw_rev; }; enum ath11k_pm_policy { ATH11K_PM_DEFAULT, ATH11K_PM_WOW, }; /* Master structure to hold the hw data which may be used in core module */ struct ath11k_base { enum ath11k_hw_rev hw_rev; enum ath11k_firmware_mode fw_mode; struct platform_device *pdev; struct device *dev; struct ath11k_qmi qmi; struct ath11k_wmi_base wmi_ab; struct completion fw_ready; int num_radios; /* HW channel counters frequency value in hertz common to all MACs */ u32 cc_freq_hz; struct ath11k_dump_file_data *dump_data; size_t ath11k_coredump_len; struct work_struct dump_work; struct ath11k_htc htc; struct ath11k_dp dp; void __iomem *mem; void __iomem *mem_ce; unsigned long mem_len; struct { enum ath11k_bus bus; const struct ath11k_hif_ops *ops; } hif; struct { struct completion wakeup_completed; } wow; struct ath11k_ce ce; struct timer_list rx_replenish_retry; struct ath11k_hal hal; /* To synchronize core_start/core_stop */ struct mutex core_lock; /* Protects data like peers */ spinlock_t base_lock; struct ath11k_pdev pdevs[MAX_RADIOS]; struct { enum WMI_HOST_WLAN_BAND supported_bands; u32 pdev_id; } target_pdev_ids[MAX_RADIOS]; u8 target_pdev_count; struct ath11k_pdev __rcu *pdevs_active[MAX_RADIOS]; struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS]; unsigned long long free_vdev_map; /* To synchronize rhash tbl write operation */ struct mutex tbl_mtx_lock; /* The rhashtable containing struct ath11k_peer keyed by mac addr */ struct rhashtable *rhead_peer_addr; struct rhashtable_params rhash_peer_addr_param; /* The rhashtable containing struct ath11k_peer keyed by id */ struct rhashtable *rhead_peer_id; struct rhashtable_params rhash_peer_id_param; struct list_head peers; wait_queue_head_t peer_mapping_wq; u8 mac_addr[ETH_ALEN]; int irq_num[ATH11K_IRQ_NUM_MAX]; struct ath11k_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX]; struct ath11k_targ_cap target_caps; u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE]; bool pdevs_macaddr_valid; struct ath11k_hw_params hw_params; const struct firmware *cal_file; /* Below regd's are protected by ab->data_lock */ /* This is the regd set for every radio * by the firmware during initialization */ struct ieee80211_regdomain *default_regd[MAX_RADIOS]; /* This regd is set during dynamic country setting * This may or may not be used during the runtime */ struct ieee80211_regdomain *new_regd[MAX_RADIOS]; struct cur_regulatory_info *reg_info_store; /* Current DFS Regulatory */ enum ath11k_dfs_region dfs_region; #ifdef CONFIG_ATH11K_DEBUGFS struct dentry *debugfs_soc; #endif struct ath11k_soc_dp_stats soc_stats; unsigned long dev_flags; struct completion driver_recovery; struct workqueue_struct *workqueue; struct work_struct restart_work; struct work_struct update_11d_work; u8 new_alpha2[3]; struct workqueue_struct *workqueue_aux; struct work_struct reset_work; atomic_t reset_count; atomic_t recovery_count; atomic_t recovery_start_count; bool is_reset; struct completion reset_complete; struct completion reconfigure_complete; struct completion recovery_start; /* continuous recovery fail count */ atomic_t fail_cont_count; unsigned long reset_fail_timeout; struct { /* protected by data_lock */ u32 fw_crash_counter; } stats; u32 pktlog_defs_checksum; struct ath11k_dbring_cap *db_caps; u32 num_db_cap; /* To synchronize 11d scan vdev id */ struct mutex vdev_id_11d_lock; struct timer_list mon_reap_timer; struct completion htc_suspend; struct { enum ath11k_bdf_search bdf_search; u32 vendor; u32 device; u32 subsystem_vendor; u32 subsystem_device; } id; struct { struct { const struct ath11k_msi_config *config; u32 ep_base_data; u32 irqs[32]; u32 addr_lo; u32 addr_hi; } msi; const struct ath11k_pci_ops *ops; } pci; struct { u32 api_version; const struct firmware *fw; const u8 *amss_data; size_t amss_len; const u8 *m3_data; size_t m3_len; DECLARE_BITMAP(fw_features, ATH11K_FW_FEATURE_COUNT); } fw; struct completion restart_completed; #ifdef CONFIG_NL80211_TESTMODE struct { u32 data_pos; u32 expected_seq; u8 *eventdata; } testmode; #endif enum ath11k_pm_policy pm_policy; enum ath11k_pm_policy actual_pm_policy; struct notifier_block pm_nb; /* must be last */ u8 drv_priv[] __aligned(sizeof(void *)); }; struct ath11k_fw_stats_pdev { struct list_head list; /* PDEV stats */ s32 ch_noise_floor; /* Cycles spent transmitting frames */ u32 tx_frame_count; /* Cycles spent receiving frames */ u32 rx_frame_count; /* Total channel busy time, evidently */ u32 rx_clear_count; /* Total on-channel time */ u32 cycle_count; u32 phy_err_count; u32 chan_tx_power; u32 ack_rx_bad; u32 rts_bad; u32 rts_good; u32 fcs_bad; u32 no_beacons; u32 mib_int_count; /* PDEV TX stats */ /* Num HTT cookies queued to dispatch list */ s32 comp_queued; /* Num HTT cookies dispatched */ s32 comp_delivered; /* Num MSDU queued to WAL */ s32 msdu_enqued; /* Num MPDU queue to WAL */ s32 mpdu_enqued; /* Num MSDUs dropped by WMM limit */ s32 wmm_drop; /* Num Local frames queued */ s32 local_enqued; /* Num Local frames done */ s32 local_freed; /* Num queued to HW */ s32 hw_queued; /* Num PPDU reaped from HW */ s32 hw_reaped; /* Num underruns */ s32 underrun; /* Num hw paused */ u32 hw_paused; /* Num PPDUs cleaned up in TX abort */ s32 tx_abort; /* Num MPDUs requeued by SW */ s32 mpdus_requeued; /* excessive retries */ u32 tx_ko; u32 tx_xretry; /* data hw rate code */ u32 data_rc; /* Scheduler self triggers */ u32 self_triggers; /* frames dropped due to excessive sw retries */ u32 sw_retry_failure; /* illegal rate phy errors */ u32 illgl_rate_phy_err; /* wal pdev continuous xretry */ u32 pdev_cont_xretry; /* wal pdev tx timeouts */ u32 pdev_tx_timeout; /* wal pdev resets */ u32 pdev_resets; /* frames dropped due to non-availability of stateless TIDs */ u32 stateless_tid_alloc_failure; /* PhY/BB underrun */ u32 phy_underrun; /* MPDU is more than txop limit */ u32 txop_ovf; /* Num sequences posted */ u32 seq_posted; /* Num sequences failed in queueing */ u32 seq_failed_queueing; /* Num sequences completed */ u32 seq_completed; /* Num sequences restarted */ u32 seq_restarted; /* Num of MU sequences posted */ u32 mu_seq_posted; /* Num MPDUs flushed by SW, HWPAUSED, SW TXABORT * (Reset,channel change) */ s32 mpdus_sw_flush; /* Num MPDUs filtered by HW, all filter condition (TTL expired) */ s32 mpdus_hw_filter; /* Num MPDUs truncated by PDG (TXOP, TBTT, * PPDU_duration based on rate, dyn_bw) */ s32 mpdus_truncated; /* Num MPDUs that was tried but didn't receive ACK or BA */ s32 mpdus_ack_failed; /* Num MPDUs that was dropped du to expiry. */ s32 mpdus_expired; /* PDEV RX stats */ /* Cnts any change in ring routing mid-ppdu */ s32 mid_ppdu_route_change; /* Total number of statuses processed */ s32 status_rcvd; /* Extra frags on rings 0-3 */ s32 r0_frags; s32 r1_frags; s32 r2_frags; s32 r3_frags; /* MSDUs / MPDUs delivered to HTT */ s32 htt_msdus; s32 htt_mpdus; /* MSDUs / MPDUs delivered to local stack */ s32 loc_msdus; s32 loc_mpdus; /* AMSDUs that have more MSDUs than the status ring size */ s32 oversize_amsdu; /* Number of PHY errors */ s32 phy_errs; /* Number of PHY errors drops */ s32 phy_err_drop; /* Number of mpdu errors - FCS, MIC, ENC etc. */ s32 mpdu_errs; /* Num overflow errors */ s32 rx_ovfl_errs; }; struct ath11k_fw_stats_vdev { struct list_head list; u32 vdev_id; u32 beacon_snr; u32 data_snr; u32 num_tx_frames[WLAN_MAX_AC]; u32 num_rx_frames; u32 num_tx_frames_retries[WLAN_MAX_AC]; u32 num_tx_frames_failures[WLAN_MAX_AC]; u32 num_rts_fail; u32 num_rts_success; u32 num_rx_err; u32 num_rx_discard; u32 num_tx_not_acked; u32 tx_rate_history[MAX_TX_RATE_VALUES]; u32 beacon_rssi_history[MAX_TX_RATE_VALUES]; }; struct ath11k_fw_stats_bcn { struct list_head list; u32 vdev_id; u32 tx_bcn_succ_cnt; u32 tx_bcn_outage_cnt; }; void ath11k_fw_stats_init(struct ath11k *ar); void ath11k_fw_stats_pdevs_free(struct list_head *head); void ath11k_fw_stats_vdevs_free(struct list_head *head); void ath11k_fw_stats_bcn_free(struct list_head *head); void ath11k_fw_stats_free(struct ath11k_fw_stats *stats); extern const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[]; extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[]; extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[]; extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[]; extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[]; extern const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq5018[]; extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq5018[]; extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[]; extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[]; int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab); int ath11k_core_pre_init(struct ath11k_base *ab); int ath11k_core_init(struct ath11k_base *ath11k); void ath11k_core_deinit(struct ath11k_base *ath11k); struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, enum ath11k_bus bus); void ath11k_core_free(struct ath11k_base *ath11k); int ath11k_core_fetch_bdf(struct ath11k_base *ath11k, struct ath11k_board_data *bd); int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd); int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab, struct ath11k_board_data *bd, const char *name); void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd); int ath11k_core_check_dt(struct ath11k_base *ath11k); int ath11k_core_check_smbios(struct ath11k_base *ab); void ath11k_core_halt(struct ath11k *ar); int ath11k_core_resume_early(struct ath11k_base *ab); int ath11k_core_resume(struct ath11k_base *ab); int ath11k_core_suspend(struct ath11k_base *ab); int ath11k_core_suspend_late(struct ath11k_base *ab); void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab); bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab); const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab, const char *filename); static inline const char *ath11k_scan_state_str(enum ath11k_scan_state state) { switch (state) { case ATH11K_SCAN_IDLE: return "idle"; case ATH11K_SCAN_STARTING: return "starting"; case ATH11K_SCAN_RUNNING: return "running"; case ATH11K_SCAN_ABORTING: return "aborting"; } return "unknown"; } static inline struct ath11k_skb_cb *ATH11K_SKB_CB(struct sk_buff *skb) { BUILD_BUG_ON(sizeof(struct ath11k_skb_cb) > IEEE80211_TX_INFO_DRIVER_DATA_SIZE); return (struct ath11k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data; } static inline struct ath11k_skb_rxcb *ATH11K_SKB_RXCB(struct sk_buff *skb) { BUILD_BUG_ON(sizeof(struct ath11k_skb_rxcb) > sizeof(skb->cb)); return (struct ath11k_skb_rxcb *)skb->cb; } static inline struct ath11k_vif *ath11k_vif_to_arvif(struct ieee80211_vif *vif) { return (struct ath11k_vif *)vif->drv_priv; } static inline struct ath11k_sta *ath11k_sta_to_arsta(struct ieee80211_sta *sta) { return (struct ath11k_sta *)sta->drv_priv; } static inline struct ath11k *ath11k_ab_to_ar(struct ath11k_base *ab, int mac_id) { return ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; } static inline void ath11k_core_create_firmware_path(struct ath11k_base *ab, const char *filename, void *buf, size_t buf_len) { - snprintf(buf, buf_len, "%s/%s/%s", ATH11K_FW_DIR, - ab->hw_params.fw.dir, filename); + const char *fw_name = NULL; + + of_property_read_string(ab->dev->of_node, "firmware-name", &fw_name); + + if (fw_name && strncmp(filename, "board", 5)) + snprintf(buf, buf_len, "%s/%s/%s/%s", ATH11K_FW_DIR, + ab->hw_params.fw.dir, fw_name, filename); + else + snprintf(buf, buf_len, "%s/%s/%s", ATH11K_FW_DIR, + ab->hw_params.fw.dir, filename); } static inline const char *ath11k_bus_str(enum ath11k_bus bus) { switch (bus) { case ATH11K_BUS_PCI: return "pci"; case ATH11K_BUS_AHB: return "ahb"; } return "unknown"; } void ath11k_core_pm_notifier_unregister(struct ath11k_base *ab); #endif /* _CORE_H_ */ diff --git a/coredump.c b/coredump.c index b8bad358cebe..1949d57b007a 100644 --- a/coredump.c +++ b/coredump.c @@ -1,52 +1,54 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2020 The Linux Foundation. All rights reserved. * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include +#include #include "hif.h" #include "coredump.h" #include "debug.h" enum ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type) { enum ath11k_fw_crash_dump_type dump_type; switch (type) { case HOST_DDR_REGION_TYPE: dump_type = FW_CRASH_DUMP_REMOTE_MEM_DATA; break; case M3_DUMP_REGION_TYPE: dump_type = FW_CRASH_DUMP_M3_DUMP; break; case PAGEABLE_MEM_REGION_TYPE: dump_type = FW_CRASH_DUMP_PAGEABLE_DATA; break; case BDF_MEM_REGION_TYPE: case CALDB_MEM_REGION_TYPE: dump_type = FW_CRASH_DUMP_NONE; break; default: dump_type = FW_CRASH_DUMP_TYPE_MAX; break; } return dump_type; } EXPORT_SYMBOL(ath11k_coredump_get_dump_type); void ath11k_coredump_upload(struct work_struct *work) { struct ath11k_base *ab = container_of(work, struct ath11k_base, dump_work); ath11k_info(ab, "Uploading coredump\n"); /* dev_coredumpv() takes ownership of the buffer */ dev_coredumpv(ab->dev, ab->dump_data, ab->ath11k_coredump_len, GFP_KERNEL); ab->dump_data = NULL; } void ath11k_coredump_collect(struct ath11k_base *ab) { ath11k_hif_coredump_download(ab); } diff --git a/dbring.c b/dbring.c index fbb6e8d8a476..520d8b8662a2 100644 --- a/dbring.c +++ b/dbring.c @@ -1,400 +1,401 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include "core.h" #include "debug.h" #define ATH11K_DB_MAGIC_VALUE 0xdeadbeaf int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size) { u32 *temp; int idx; size = size >> 2; for (idx = 0, temp = buffer; idx < size; idx++, temp++) { if (*temp == ATH11K_DB_MAGIC_VALUE) return -EINVAL; } return 0; } static void ath11k_dbring_fill_magic_value(struct ath11k *ar, void *buffer, u32 size) { /* memset32 function fills buffer payload with the ATH11K_DB_MAGIC_VALUE * and the variable size is expected to be the number of u32 values * to be stored, not the number of bytes. */ size = size / sizeof(u32); memset32(buffer, ATH11K_DB_MAGIC_VALUE, size); } static int ath11k_dbring_bufs_replenish(struct ath11k *ar, struct ath11k_dbring *ring, struct ath11k_dbring_element *buff, enum wmi_direct_buffer_module id) { struct ath11k_base *ab = ar->ab; struct hal_srng *srng; dma_addr_t paddr; void *ptr_aligned, *ptr_unaligned, *desc; int ret; int buf_id; u32 cookie; srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; lockdep_assert_held(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); ptr_unaligned = buff->payload; ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align); ath11k_dbring_fill_magic_value(ar, ptr_aligned, ring->buf_sz); paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz, DMA_FROM_DEVICE); ret = dma_mapping_error(ab->dev, paddr); if (ret) goto err; spin_lock_bh(&ring->idr_lock); buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC); spin_unlock_bh(&ring->idr_lock); if (buf_id < 0) { ret = -ENOBUFS; goto err_dma_unmap; } desc = ath11k_hal_srng_src_get_next_entry(ab, srng); if (!desc) { ret = -ENOENT; goto err_idr_remove; } buff->paddr = paddr; cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) | FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0); ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng); ath11k_hal_srng_access_end(ab, srng); return 0; err_idr_remove: spin_lock_bh(&ring->idr_lock); idr_remove(&ring->bufs_idr, buf_id); spin_unlock_bh(&ring->idr_lock); err_dma_unmap: dma_unmap_single(ab->dev, paddr, ring->buf_sz, DMA_FROM_DEVICE); err: ath11k_hal_srng_access_end(ab, srng); return ret; } static int ath11k_dbring_fill_bufs(struct ath11k *ar, struct ath11k_dbring *ring, enum wmi_direct_buffer_module id) { struct ath11k_dbring_element *buff; struct hal_srng *srng; int num_remain, req_entries, num_free; u32 align; int size, ret; srng = &ar->ab->hal.srng_list[ring->refill_srng.ring_id]; spin_lock_bh(&srng->lock); num_free = ath11k_hal_srng_src_num_free(ar->ab, srng, true); req_entries = min(num_free, ring->bufs_max); num_remain = req_entries; align = ring->buf_align; size = ring->buf_sz + align - 1; while (num_remain > 0) { buff = kzalloc(sizeof(*buff), GFP_ATOMIC); if (!buff) break; buff->payload = kzalloc(size, GFP_ATOMIC); if (!buff->payload) { kfree(buff); break; } ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id); if (ret) { ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n", num_remain, req_entries); kfree(buff->payload); kfree(buff); break; } num_remain--; } spin_unlock_bh(&srng->lock); return num_remain; } int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar, struct ath11k_dbring *ring, enum wmi_direct_buffer_module id) { - struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {0}; + struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {}; int ret; if (id >= WMI_DIRECT_BUF_MAX) return -EINVAL; param.pdev_id = DP_SW2HW_MACID(ring->pdev_id); param.module_id = id; param.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr); param.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr); param.head_idx_paddr_lo = lower_32_bits(ring->hp_addr); param.head_idx_paddr_hi = upper_32_bits(ring->hp_addr); param.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr); param.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr); param.num_elems = ring->bufs_max; param.buf_size = ring->buf_sz; param.num_resp_per_event = ring->num_resp_per_event; param.event_timeout_ms = ring->event_timeout_ms; ret = ath11k_wmi_pdev_dma_ring_cfg(ar, ¶m); if (ret) { ath11k_warn(ar->ab, "failed to setup db ring cfg\n"); return ret; } return 0; } int ath11k_dbring_set_cfg(struct ath11k *ar, struct ath11k_dbring *ring, u32 num_resp_per_event, u32 event_timeout_ms, int (*handler)(struct ath11k *, struct ath11k_dbring_data *)) { if (WARN_ON(!ring)) return -EINVAL; ring->num_resp_per_event = num_resp_per_event; ring->event_timeout_ms = event_timeout_ms; ring->handler = handler; return 0; } int ath11k_dbring_buf_setup(struct ath11k *ar, struct ath11k_dbring *ring, struct ath11k_dbring_cap *db_cap) { struct ath11k_base *ab = ar->ab; struct hal_srng *srng; int ret; srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; ring->bufs_max = ring->refill_srng.size / ath11k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF); ring->buf_sz = db_cap->min_buf_sz; ring->buf_align = db_cap->min_buf_align; ring->pdev_id = db_cap->pdev_id; ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng); ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng); ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id); return ret; } int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring, int ring_num, int num_entries) { int ret; ret = ath11k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF, ring_num, ar->pdev_idx, num_entries); if (ret < 0) { ath11k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n", ret, ring_num); goto err; } return 0; err: ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng); return ret; } int ath11k_dbring_get_cap(struct ath11k_base *ab, u8 pdev_idx, enum wmi_direct_buffer_module id, struct ath11k_dbring_cap *db_cap) { int i; if (!ab->num_db_cap || !ab->db_caps) return -ENOENT; if (id >= WMI_DIRECT_BUF_MAX) return -EINVAL; for (i = 0; i < ab->num_db_cap; i++) { if (pdev_idx == ab->db_caps[i].pdev_id && id == ab->db_caps[i].id) { *db_cap = ab->db_caps[i]; return 0; } } return -ENOENT; } int ath11k_dbring_buffer_release_event(struct ath11k_base *ab, struct ath11k_dbring_buf_release_event *ev) { struct ath11k_dbring *ring; struct hal_srng *srng; struct ath11k *ar; struct ath11k_dbring_element *buff; struct ath11k_dbring_data handler_data; struct ath11k_buffer_addr desc; u8 *vaddr_unalign; u32 num_entry, num_buff_reaped; u8 pdev_idx, rbm, module_id; u32 cookie; int buf_id; int size; dma_addr_t paddr; int ret = 0; pdev_idx = ev->fixed.pdev_id; module_id = ev->fixed.module_id; if (pdev_idx >= ab->num_radios) { ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx); return -EINVAL; } if (ev->fixed.num_buf_release_entry != ev->fixed.num_meta_data_entry) { ath11k_warn(ab, "Buffer entry %d mismatch meta entry %d\n", ev->fixed.num_buf_release_entry, ev->fixed.num_meta_data_entry); return -EINVAL; } ar = ab->pdevs[pdev_idx].ar; rcu_read_lock(); if (!rcu_dereference(ab->pdevs_active[pdev_idx])) { ret = -EINVAL; goto rcu_unlock; } switch (ev->fixed.module_id) { case WMI_DIRECT_BUF_SPECTRAL: ring = ath11k_spectral_get_dbring(ar); break; default: ring = NULL; ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n", ev->fixed.module_id); break; } if (!ring) { ret = -EINVAL; goto rcu_unlock; } srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; num_entry = ev->fixed.num_buf_release_entry; size = ring->buf_sz + ring->buf_align - 1; num_buff_reaped = 0; spin_lock_bh(&srng->lock); while (num_buff_reaped < num_entry) { desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo; desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi; handler_data.meta = ev->meta_data[num_buff_reaped]; num_buff_reaped++; ath11k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm); buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); spin_lock_bh(&ring->idr_lock); buff = idr_find(&ring->bufs_idr, buf_id); if (!buff) { spin_unlock_bh(&ring->idr_lock); continue; } idr_remove(&ring->bufs_idr, buf_id); spin_unlock_bh(&ring->idr_lock); dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz, DMA_FROM_DEVICE); ath11k_debugfs_add_dbring_entry(ar, module_id, ATH11K_DBG_DBR_EVENT_RX, srng); if (ring->handler) { vaddr_unalign = buff->payload; handler_data.data = PTR_ALIGN(vaddr_unalign, ring->buf_align); handler_data.data_sz = ring->buf_sz; ring->handler(ar, &handler_data); } buff->paddr = 0; memset(buff->payload, 0, size); ath11k_dbring_bufs_replenish(ar, ring, buff, module_id); } spin_unlock_bh(&srng->lock); rcu_unlock: rcu_read_unlock(); return ret; } void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring) { ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng); } void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring) { struct ath11k_dbring_element *buff; int buf_id; spin_lock_bh(&ring->idr_lock); idr_for_each_entry(&ring->bufs_idr, buff, buf_id) { idr_remove(&ring->bufs_idr, buf_id); dma_unmap_single(ar->ab->dev, buff->paddr, ring->buf_sz, DMA_FROM_DEVICE); kfree(buff->payload); kfree(buff); } idr_destroy(&ring->bufs_idr); spin_unlock_bh(&ring->idr_lock); } diff --git a/debug.c b/debug.c index 2b8544355fc1..37d23a559ba3 100644 --- a/debug.c +++ b/debug.c @@ -1,111 +1,113 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ +#include #include #include "core.h" #include "debug.h" void ath11k_info(struct ath11k_base *ab, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; dev_info(ab->dev, "%pV", &vaf); trace_ath11k_log_info(ab, &vaf); va_end(args); } EXPORT_SYMBOL(ath11k_info); void ath11k_err(struct ath11k_base *ab, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; dev_err(ab->dev, "%pV", &vaf); trace_ath11k_log_err(ab, &vaf); va_end(args); } EXPORT_SYMBOL(ath11k_err); void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; va_start(args, fmt); vaf.va = &args; dev_warn_ratelimited(ab->dev, "%pV", &vaf); trace_ath11k_log_warn(ab, &vaf); va_end(args); } EXPORT_SYMBOL(ath11k_warn); #ifdef CONFIG_ATH11K_DEBUG void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (ath11k_debug_mask & mask) dev_printk(KERN_DEBUG, ab->dev, "%s %pV", ath11k_dbg_str(mask), &vaf); trace_ath11k_log_dbg(ab, mask, &vaf); va_end(args); } EXPORT_SYMBOL(__ath11k_dbg); void ath11k_dbg_dump(struct ath11k_base *ab, enum ath11k_debug_mask mask, const char *msg, const char *prefix, const void *buf, size_t len) { char linebuf[256]; size_t linebuflen; const void *ptr; if (ath11k_debug_mask & mask) { if (msg) __ath11k_dbg(ab, mask, "%s\n", msg); for (ptr = buf; (ptr - buf) < len; ptr += 16) { linebuflen = 0; linebuflen += scnprintf(linebuf + linebuflen, sizeof(linebuf) - linebuflen, "%s%08x: ", (prefix ? prefix : ""), (unsigned int)(ptr - buf)); hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1, linebuf + linebuflen, sizeof(linebuf) - linebuflen, true); dev_printk(KERN_DEBUG, ab->dev, "%s\n", linebuf); } } /* tracing code doesn't like null strings */ trace_ath11k_log_dbg_dump(ab, msg ? msg : "", prefix ? prefix : "", buf, len); } EXPORT_SYMBOL(ath11k_dbg_dump); #endif /* CONFIG_ATH11K_DEBUG */ diff --git a/debugfs.c b/debugfs.c index 5d46f8e4c231..977f945b6e66 100644 --- a/debugfs.c +++ b/debugfs.c @@ -1,1801 +1,1803 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ +#include #include #include "debugfs.h" #include "core.h" #include "debug.h" #include "wmi.h" #include "hal_rx.h" #include "dp_tx.h" #include "debugfs_htt_stats.h" #include "peer.h" #include "hif.h" static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = { "REO2SW1_RING", "REO2SW2_RING", "REO2SW3_RING", "REO2SW4_RING", "WBM2REO_LINK_RING", "REO2TCL_RING", "REO2FW_RING", "RELEASE_RING", "PPE_RELEASE_RING", "TCL2TQM_RING", "TQM_RELEASE_RING", "REO_RELEASE_RING", "WBM2SW0_RELEASE_RING", "WBM2SW1_RELEASE_RING", "WBM2SW2_RELEASE_RING", "WBM2SW3_RELEASE_RING", "REO_CMD_RING", "REO_STATUS_RING", }; static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = { "FW2RXDMA_BUF_RING", "FW2RXDMA_STATUS_RING", "FW2RXDMA_LINK_RING", "SW2RXDMA_BUF_RING", "WBM2RXDMA_LINK_RING", "RXDMA2FW_RING", "RXDMA2SW_RING", "RXDMA2RELEASE_RING", "RXDMA2REO_RING", "MONITOR_STATUS_RING", "MONITOR_BUF_RING", "MONITOR_DESC_RING", "MONITOR_DEST_RING", }; void ath11k_debugfs_add_dbring_entry(struct ath11k *ar, enum wmi_direct_buffer_module id, enum ath11k_dbg_dbr_event event, struct hal_srng *srng) { struct ath11k_debug_dbr *dbr_debug; struct ath11k_dbg_dbr_data *dbr_data; struct ath11k_dbg_dbr_entry *entry; if (id >= WMI_DIRECT_BUF_MAX || event >= ATH11K_DBG_DBR_EVENT_MAX) return; dbr_debug = ar->debug.dbr_debug[id]; if (!dbr_debug) return; if (!dbr_debug->dbr_debug_enabled) return; dbr_data = &dbr_debug->dbr_dbg_data; spin_lock_bh(&dbr_data->lock); if (dbr_data->entries) { entry = &dbr_data->entries[dbr_data->dbr_debug_idx]; entry->hp = srng->u.src_ring.hp; entry->tp = *srng->u.src_ring.tp_addr; entry->timestamp = jiffies; entry->event = event; dbr_data->dbr_debug_idx++; if (dbr_data->dbr_debug_idx == dbr_data->num_ring_debug_entries) dbr_data->dbr_debug_idx = 0; } spin_unlock_bh(&dbr_data->lock); } void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats) { struct ath11k_base *ab = ar->ab; bool is_end = true; /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_RSSI_PER_CHAIN_STAT and * WMI_REQUEST_VDEV_STAT requests have been already processed. */ if (stats->stats_id == WMI_REQUEST_BCN_STAT) { if (list_empty(&stats->bcn)) { ath11k_warn(ab, "empty bcn stats"); return; } /* Mark end until we reached the count of all started VDEVs * within the PDEV */ if (ar->num_started_vdevs) is_end = ((++ar->fw_stats.num_bcn_recvd) == ar->num_started_vdevs); list_splice_tail_init(&stats->bcn, &ar->fw_stats.bcn); if (is_end) complete(&ar->fw_stats_done); } } static int ath11k_open_pdev_stats(struct inode *inode, struct file *file) { struct ath11k *ar = inode->i_private; struct ath11k_base *ab = ar->ab; struct stats_request_params req_param; void *buf = NULL; int ret; mutex_lock(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON) { ret = -ENETDOWN; goto err_unlock; } buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE); if (!buf) { ret = -ENOMEM; goto err_unlock; } req_param.pdev_id = ar->pdev->pdev_id; req_param.vdev_id = 0; req_param.stats_id = WMI_REQUEST_PDEV_STAT; ret = ath11k_mac_fw_stats_request(ar, &req_param); if (ret) { ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret); goto err_free; } ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf); file->private_data = buf; mutex_unlock(&ar->conf_mutex); return 0; err_free: vfree(buf); err_unlock: mutex_unlock(&ar->conf_mutex); return ret; } static int ath11k_release_pdev_stats(struct inode *inode, struct file *file) { vfree(file->private_data); return 0; } static ssize_t ath11k_read_pdev_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const char *buf = file->private_data; size_t len = strlen(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_pdev_stats = { .open = ath11k_open_pdev_stats, .release = ath11k_release_pdev_stats, .read = ath11k_read_pdev_stats, .owner = THIS_MODULE, .llseek = default_llseek, }; static int ath11k_open_vdev_stats(struct inode *inode, struct file *file) { struct ath11k *ar = inode->i_private; struct stats_request_params req_param; void *buf = NULL; int ret; mutex_lock(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON) { ret = -ENETDOWN; goto err_unlock; } buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE); if (!buf) { ret = -ENOMEM; goto err_unlock; } req_param.pdev_id = ar->pdev->pdev_id; /* VDEV stats is always sent for all active VDEVs from FW */ req_param.vdev_id = 0; req_param.stats_id = WMI_REQUEST_VDEV_STAT; ret = ath11k_mac_fw_stats_request(ar, &req_param); if (ret) { ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret); goto err_free; } ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf); file->private_data = buf; mutex_unlock(&ar->conf_mutex); return 0; err_free: vfree(buf); err_unlock: mutex_unlock(&ar->conf_mutex); return ret; } static int ath11k_release_vdev_stats(struct inode *inode, struct file *file) { vfree(file->private_data); return 0; } static ssize_t ath11k_read_vdev_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const char *buf = file->private_data; size_t len = strlen(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_vdev_stats = { .open = ath11k_open_vdev_stats, .release = ath11k_release_vdev_stats, .read = ath11k_read_vdev_stats, .owner = THIS_MODULE, .llseek = default_llseek, }; static int ath11k_open_bcn_stats(struct inode *inode, struct file *file) { struct ath11k *ar = inode->i_private; struct ath11k_vif *arvif; struct stats_request_params req_param; void *buf = NULL; int ret; mutex_lock(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON) { ret = -ENETDOWN; goto err_unlock; } buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE); if (!buf) { ret = -ENOMEM; goto err_unlock; } req_param.stats_id = WMI_REQUEST_BCN_STAT; req_param.pdev_id = ar->pdev->pdev_id; /* loop all active VDEVs for bcn stats */ list_for_each_entry(arvif, &ar->arvifs, list) { if (!arvif->is_up) continue; req_param.vdev_id = arvif->vdev_id; ret = ath11k_mac_fw_stats_request(ar, &req_param); if (ret) { ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret); goto err_free; } } ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf); /* since beacon stats request is looped for all active VDEVs, saved fw * stats is not freed for each request until done for all active VDEVs */ spin_lock_bh(&ar->data_lock); ath11k_fw_stats_bcn_free(&ar->fw_stats.bcn); spin_unlock_bh(&ar->data_lock); file->private_data = buf; mutex_unlock(&ar->conf_mutex); return 0; err_free: vfree(buf); err_unlock: mutex_unlock(&ar->conf_mutex); return ret; } static int ath11k_release_bcn_stats(struct inode *inode, struct file *file) { vfree(file->private_data); return 0; } static ssize_t ath11k_read_bcn_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const char *buf = file->private_data; size_t len = strlen(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_bcn_stats = { .open = ath11k_open_bcn_stats, .release = ath11k_release_bcn_stats, .read = ath11k_read_bcn_stats, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_read_simulate_fw_crash(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { const char buf[] = "To simulate firmware crash write one of the keywords to this file:\n" "`assert` - this will send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n" "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n"; return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); } /* Simulate firmware crash: * 'soft': Call wmi command causing firmware hang. This firmware hang is * recoverable by warm firmware reset. * 'hard': Force firmware crash by setting any vdev parameter for not allowed * vdev id. This is hard firmware crash because it is recoverable only by cold * firmware reset. */ static ssize_t ath11k_write_simulate_fw_crash(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k_base *ab = file->private_data; struct ath11k_pdev *pdev; struct ath11k *ar = ab->pdevs[0].ar; - char buf[32] = {0}; + char buf[32] = {}; ssize_t rc; int i, ret, radioup = 0; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; if (ar && ar->state == ATH11K_STATE_ON) { radioup = 1; break; } } /* filter partial writes and invalid commands */ if (*ppos != 0 || count >= sizeof(buf) || count == 0) return -EINVAL; rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); if (rc < 0) return rc; /* drop the possible '\n' from the end */ if (buf[*ppos - 1] == '\n') buf[*ppos - 1] = '\0'; if (radioup == 0) { ret = -ENETDOWN; goto exit; } if (!strcmp(buf, "assert")) { ath11k_info(ab, "simulating firmware assert crash\n"); ret = ath11k_wmi_force_fw_hang_cmd(ar, ATH11K_WMI_FW_HANG_ASSERT_TYPE, ATH11K_WMI_FW_HANG_DELAY); } else if (!strcmp(buf, "hw-restart")) { ath11k_info(ab, "user requested hw restart\n"); queue_work(ab->workqueue_aux, &ab->reset_work); ret = 0; } else { ret = -EINVAL; goto exit; } if (ret) { ath11k_warn(ab, "failed to simulate firmware crash: %d\n", ret); goto exit; } ret = count; exit: return ret; } static const struct file_operations fops_simulate_fw_crash = { .read = ath11k_read_simulate_fw_crash, .write = ath11k_write_simulate_fw_crash, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_write_enable_extd_tx_stats(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; u32 filter; int ret; if (kstrtouint_from_user(ubuf, count, 0, &filter)) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON) { ret = -ENETDOWN; goto out; } if (filter == ar->debug.extd_tx_stats) { ret = count; goto out; } ar->debug.extd_tx_stats = filter; ret = count; out: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath11k_read_enable_extd_tx_stats(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { - char buf[32] = {0}; + char buf[32] = {}; struct ath11k *ar = file->private_data; int len = 0; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "%08x\n", ar->debug.extd_tx_stats); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(ubuf, count, ppos, buf, len); } static const struct file_operations fops_extd_tx_stats = { .read = ath11k_read_enable_extd_tx_stats, .write = ath11k_write_enable_extd_tx_stats, .open = simple_open }; static ssize_t ath11k_write_extd_rx_stats(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; struct ath11k_base *ab = ar->ab; - struct htt_rx_ring_tlv_filter tlv_filter = {0}; + struct htt_rx_ring_tlv_filter tlv_filter = {}; u32 enable, rx_filter = 0, ring_id; int i; int ret; if (kstrtouint_from_user(ubuf, count, 0, &enable)) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON) { ret = -ENETDOWN; goto exit; } if (enable > 1) { ret = -EINVAL; goto exit; } if (enable == ar->debug.extd_rx_stats) { ret = count; goto exit; } if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) { ar->debug.extd_rx_stats = enable; ret = count; goto exit; } if (enable) { rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START; rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START; rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END; rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS; rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT; rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE; tlv_filter.rx_filter = rx_filter; tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0; tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1; tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2; tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 | HTT_RX_FP_DATA_FILTER_FLASG3; } else { tlv_filter = ath11k_mac_mon_status_filter_default; } ar->debug.rx_filter = tlv_filter.rx_filter; for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id, HAL_RXDMA_MONITOR_STATUS, DP_RX_BUFFER_SIZE, &tlv_filter); if (ret) { ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n"); goto exit; } } ar->debug.extd_rx_stats = enable; ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath11k_read_extd_rx_stats(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; char buf[32]; int len = 0; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "%d\n", ar->debug.extd_rx_stats); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(ubuf, count, ppos, buf, len); } static const struct file_operations fops_extd_rx_stats = { .read = ath11k_read_extd_rx_stats, .write = ath11k_write_extd_rx_stats, .open = simple_open, }; static int ath11k_fill_bp_stats(struct ath11k_base *ab, struct ath11k_bp_stats *bp_stats, char *buf, int len, int size) { lockdep_assert_held(&ab->base_lock); len += scnprintf(buf + len, size - len, "count: %u\n", bp_stats->count); len += scnprintf(buf + len, size - len, "hp: %u\n", bp_stats->hp); len += scnprintf(buf + len, size - len, "tp: %u\n", bp_stats->tp); len += scnprintf(buf + len, size - len, "seen before: %ums\n\n", jiffies_to_msecs(jiffies - bp_stats->jiffies)); return len; } static ssize_t ath11k_debugfs_dump_soc_ring_bp_stats(struct ath11k_base *ab, char *buf, int size) { struct ath11k_bp_stats *bp_stats; bool stats_rxd = false; u8 i, pdev_idx; int len = 0; len += scnprintf(buf + len, size - len, "\nBackpressure Stats\n"); len += scnprintf(buf + len, size - len, "==================\n"); spin_lock_bh(&ab->base_lock); for (i = 0; i < HTT_SW_UMAC_RING_IDX_MAX; i++) { bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[i]; if (!bp_stats->count) continue; len += scnprintf(buf + len, size - len, "Ring: %s\n", htt_bp_umac_ring[i]); len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size); stats_rxd = true; } for (i = 0; i < HTT_SW_LMAC_RING_IDX_MAX; i++) { for (pdev_idx = 0; pdev_idx < MAX_RADIOS; pdev_idx++) { bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[i][pdev_idx]; if (!bp_stats->count) continue; len += scnprintf(buf + len, size - len, "Ring: %s\n", htt_bp_lmac_ring[i]); len += scnprintf(buf + len, size - len, "pdev: %d\n", pdev_idx); len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size); stats_rxd = true; } } spin_unlock_bh(&ab->base_lock); if (!stats_rxd) len += scnprintf(buf + len, size - len, "No Ring Backpressure stats received\n\n"); return len; } static ssize_t ath11k_debugfs_dump_soc_dp_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k_base *ab = file->private_data; struct ath11k_soc_dp_stats *soc_stats = &ab->soc_stats; int len = 0, i, retval; const int size = 4096; static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = { "Overflow", "MPDU len", "FCS", "Decrypt", "TKIP MIC", "Unencrypt", "MSDU len", "MSDU limit", "WiFi parse", "AMSDU parse", "SA timeout", "DA timeout", "Flow timeout", "Flush req"}; static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = { "Desc addr zero", "Desc inval", "AMPDU in non BA", "Non BA dup", "BA dup", "Frame 2k jump", "BAR 2k jump", "Frame OOR", "BAR OOR", "No BA session", "Frame SN equal SSN", "PN check fail", "2k err", "PN err", "Desc blocked"}; char *buf; buf = kzalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; len += scnprintf(buf + len, size - len, "SOC RX STATS:\n\n"); len += scnprintf(buf + len, size - len, "err ring pkts: %u\n", soc_stats->err_ring_pkts); len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n", soc_stats->invalid_rbm); len += scnprintf(buf + len, size - len, "RXDMA errors:\n"); for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++) len += scnprintf(buf + len, size - len, "%s: %u\n", rxdma_err[i], soc_stats->rxdma_error[i]); len += scnprintf(buf + len, size - len, "\nREO errors:\n"); for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++) len += scnprintf(buf + len, size - len, "%s: %u\n", reo_err[i], soc_stats->reo_error[i]); len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n"); len += scnprintf(buf + len, size - len, "ring0: %u\nring1: %u\nring2: %u\nring3: %u\n", soc_stats->hal_reo_error[0], soc_stats->hal_reo_error[1], soc_stats->hal_reo_error[2], soc_stats->hal_reo_error[3]); len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n"); len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n"); for (i = 0; i < ab->hw_params.max_tx_ring; i++) len += scnprintf(buf + len, size - len, "ring%d: %u\n", i, soc_stats->tx_err.desc_na[i]); len += scnprintf(buf + len, size - len, "\nMisc Transmit Failures: %d\n", atomic_read(&soc_stats->tx_err.misc_fail)); len += ath11k_debugfs_dump_soc_ring_bp_stats(ab, buf + len, size - len); if (len > size) len = size; retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return retval; } static const struct file_operations fops_soc_dp_stats = { .read = ath11k_debugfs_dump_soc_dp_stats, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_write_fw_dbglog(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; - char buf[128] = {0}; + char buf[128] = {}; struct ath11k_fw_dbglog dbglog; unsigned int param, mod_id_index, is_end; u64 value; int ret, num; ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); if (ret <= 0) return ret; num = sscanf(buf, "%u %llx %u %u", ¶m, &value, &mod_id_index, &is_end); if (num < 2) return -EINVAL; mutex_lock(&ar->conf_mutex); if (param == WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP || param == WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP) { if (num != 4 || mod_id_index > (MAX_MODULE_ID_BITMAP_WORDS - 1)) { ret = -EINVAL; goto out; } ar->debug.module_id_bitmap[mod_id_index] = upper_32_bits(value); if (!is_end) { ret = count; goto out; } } else { if (num != 2) { ret = -EINVAL; goto out; } } dbglog.param = param; dbglog.value = lower_32_bits(value); ret = ath11k_wmi_fw_dbglog_cfg(ar, ar->debug.module_id_bitmap, &dbglog); if (ret) { ath11k_warn(ar->ab, "fw dbglog config failed from debugfs: %d\n", ret); goto out; } ret = count; out: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_fw_dbglog = { .write = ath11k_write_fw_dbglog, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static int ath11k_open_sram_dump(struct inode *inode, struct file *file) { struct ath11k_base *ab = inode->i_private; u8 *buf; u32 start, end; int ret; start = ab->hw_params.sram_dump.start; end = ab->hw_params.sram_dump.end; buf = vmalloc(end - start + 1); if (!buf) return -ENOMEM; ret = ath11k_hif_read(ab, buf, start, end); if (ret) { ath11k_warn(ab, "failed to dump sram: %d\n", ret); vfree(buf); return ret; } file->private_data = buf; return 0; } static ssize_t ath11k_read_sram_dump(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k_base *ab = file->f_inode->i_private; const char *buf = file->private_data; int len; u32 start, end; start = ab->hw_params.sram_dump.start; end = ab->hw_params.sram_dump.end; len = end - start + 1; return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static int ath11k_release_sram_dump(struct inode *inode, struct file *file) { vfree(file->private_data); file->private_data = NULL; return 0; } static const struct file_operations fops_sram_dump = { .open = ath11k_open_sram_dump, .read = ath11k_read_sram_dump, .release = ath11k_release_sram_dump, .owner = THIS_MODULE, .llseek = default_llseek, }; int ath11k_debugfs_pdev_create(struct ath11k_base *ab) { if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) return 0; debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab, &fops_simulate_fw_crash); debugfs_create_file("soc_dp_stats", 0400, ab->debugfs_soc, ab, &fops_soc_dp_stats); if (ab->hw_params.sram_dump.start != 0) debugfs_create_file("sram", 0400, ab->debugfs_soc, ab, &fops_sram_dump); return 0; } void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab) { debugfs_remove_recursive(ab->debugfs_soc); ab->debugfs_soc = NULL; } int ath11k_debugfs_soc_create(struct ath11k_base *ab) { struct dentry *root; bool dput_needed; char name[64]; int ret; root = debugfs_lookup("ath11k", NULL); if (!root) { root = debugfs_create_dir("ath11k", NULL); if (IS_ERR_OR_NULL(root)) return PTR_ERR(root); dput_needed = false; } else { /* a dentry from lookup() needs dput() after we don't use it */ dput_needed = true; } scnprintf(name, sizeof(name), "%s-%s", ath11k_bus_str(ab->hif.bus), dev_name(ab->dev)); ab->debugfs_soc = debugfs_create_dir(name, root); if (IS_ERR_OR_NULL(ab->debugfs_soc)) { ret = PTR_ERR(ab->debugfs_soc); goto out; } ret = 0; out: if (dput_needed) dput(root); return ret; } void ath11k_debugfs_soc_destroy(struct ath11k_base *ab) { debugfs_remove_recursive(ab->debugfs_soc); ab->debugfs_soc = NULL; /* We are not removing ath11k directory on purpose, even if it * would be empty. This simplifies the directory handling and it's * a minor cosmetic issue to leave an empty ath11k directory to * debugfs. */ } EXPORT_SYMBOL(ath11k_debugfs_soc_destroy); void ath11k_debugfs_fw_stats_init(struct ath11k *ar) { struct dentry *fwstats_dir = debugfs_create_dir("fw_stats", ar->debug.debugfs_pdev); ar->fw_stats.debugfs_fwstats = fwstats_dir; /* all stats debugfs files created are under "fw_stats" directory * created per PDEV */ debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar, &fops_pdev_stats); debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar, &fops_vdev_stats); debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar, &fops_bcn_stats); } static ssize_t ath11k_write_pktlog_filter(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; struct ath11k_base *ab = ar->ab; - struct htt_rx_ring_tlv_filter tlv_filter = {0}; + struct htt_rx_ring_tlv_filter tlv_filter = {}; u32 rx_filter = 0, ring_id, filter, mode; - u8 buf[128] = {0}; + u8 buf[128] = {}; int i, ret, rx_buf_sz = 0; ssize_t rc; mutex_lock(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON) { ret = -ENETDOWN; goto out; } rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count); if (rc < 0) { ret = rc; goto out; } buf[rc] = '\0'; ret = sscanf(buf, "0x%x %u", &filter, &mode); if (ret != 2) { ret = -EINVAL; goto out; } if (filter) { ret = ath11k_wmi_pdev_pktlog_enable(ar, filter); if (ret) { ath11k_warn(ar->ab, "failed to enable pktlog filter %x: %d\n", ar->debug.pktlog_filter, ret); goto out; } } else { ret = ath11k_wmi_pdev_pktlog_disable(ar); if (ret) { ath11k_warn(ar->ab, "failed to disable pktlog: %d\n", ret); goto out; } } /* Clear rx filter set for monitor mode and rx status */ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id, HAL_RXDMA_MONITOR_STATUS, rx_buf_sz, &tlv_filter); if (ret) { ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n"); goto out; } } #define HTT_RX_FILTER_TLV_LITE_MODE \ (HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \ HTT_RX_FILTER_TLV_FLAGS_MPDU_START) if (mode == ATH11K_PKTLOG_MODE_FULL) { rx_filter = HTT_RX_FILTER_TLV_LITE_MODE | HTT_RX_FILTER_TLV_FLAGS_MSDU_START | HTT_RX_FILTER_TLV_FLAGS_MSDU_END | HTT_RX_FILTER_TLV_FLAGS_MPDU_END | HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | HTT_RX_FILTER_TLV_FLAGS_ATTENTION; rx_buf_sz = DP_RX_BUFFER_SIZE; } else if (mode == ATH11K_PKTLOG_MODE_LITE) { ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar, HTT_PPDU_STATS_TAG_PKTLOG); if (ret) { ath11k_err(ar->ab, "failed to enable pktlog lite: %d\n", ret); goto out; } rx_filter = HTT_RX_FILTER_TLV_LITE_MODE; rx_buf_sz = DP_RX_BUFFER_SIZE_LITE; } else { rx_buf_sz = DP_RX_BUFFER_SIZE; tlv_filter = ath11k_mac_mon_status_filter_default; rx_filter = tlv_filter.rx_filter; ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar, HTT_PPDU_STATS_TAG_DEFAULT); if (ret) { ath11k_err(ar->ab, "failed to send htt ppdu stats req: %d\n", ret); goto out; } } tlv_filter.rx_filter = rx_filter; if (rx_filter) { tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0; tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1; tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2; tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 | HTT_RX_FP_DATA_FILTER_FLASG3; } for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id, ar->dp.mac_id + i, HAL_RXDMA_MONITOR_STATUS, rx_buf_sz, &tlv_filter); if (ret) { ath11k_warn(ab, "failed to set rx filter for monitor status ring\n"); goto out; } } ath11k_info(ab, "pktlog mode %s\n", ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite")); ar->debug.pktlog_filter = filter; ar->debug.pktlog_mode = mode; ret = count; out: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath11k_read_pktlog_filter(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { - char buf[32] = {0}; + char buf[32] = {}; struct ath11k *ar = file->private_data; int len = 0; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "%08x %08x\n", ar->debug.pktlog_filter, ar->debug.pktlog_mode); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(ubuf, count, ppos, buf, len); } static const struct file_operations fops_pktlog_filter = { .read = ath11k_read_pktlog_filter, .write = ath11k_write_pktlog_filter, .open = simple_open }; static ssize_t ath11k_write_simulate_radar(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; int ret; ret = ath11k_wmi_simulate_radar(ar); if (ret) return ret; return count; } static const struct file_operations fops_simulate_radar = { .write = ath11k_write_simulate_radar, .open = simple_open }; static ssize_t ath11k_debug_dump_dbr_entries(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k_dbg_dbr_data *dbr_dbg_data = file->private_data; static const char * const event_id_to_string[] = {"empty", "Rx", "Replenish"}; int size = ATH11K_DEBUG_DBR_ENTRIES_MAX * 100; char *buf; int i, ret; int len = 0; buf = kzalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; len += scnprintf(buf + len, size - len, "-----------------------------------------\n"); len += scnprintf(buf + len, size - len, "| idx | hp | tp | timestamp | event |\n"); len += scnprintf(buf + len, size - len, "-----------------------------------------\n"); spin_lock_bh(&dbr_dbg_data->lock); for (i = 0; i < dbr_dbg_data->num_ring_debug_entries; i++) { len += scnprintf(buf + len, size - len, "|%4u|%8u|%8u|%11llu|%8s|\n", i, dbr_dbg_data->entries[i].hp, dbr_dbg_data->entries[i].tp, dbr_dbg_data->entries[i].timestamp, event_id_to_string[dbr_dbg_data->entries[i].event]); } spin_unlock_bh(&dbr_dbg_data->lock); ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); return ret; } static const struct file_operations fops_debug_dump_dbr_entries = { .read = ath11k_debug_dump_dbr_entries, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static void ath11k_debugfs_dbr_dbg_destroy(struct ath11k *ar, int dbr_id) { struct ath11k_debug_dbr *dbr_debug; struct ath11k_dbg_dbr_data *dbr_dbg_data; if (!ar->debug.dbr_debug[dbr_id]) return; dbr_debug = ar->debug.dbr_debug[dbr_id]; dbr_dbg_data = &dbr_debug->dbr_dbg_data; debugfs_remove_recursive(dbr_debug->dbr_debugfs); kfree(dbr_dbg_data->entries); kfree(dbr_debug); ar->debug.dbr_debug[dbr_id] = NULL; } static int ath11k_debugfs_dbr_dbg_init(struct ath11k *ar, int dbr_id) { struct ath11k_debug_dbr *dbr_debug; struct ath11k_dbg_dbr_data *dbr_dbg_data; static const char * const dbr_id_to_str[] = {"spectral", "CFR"}; if (ar->debug.dbr_debug[dbr_id]) return 0; ar->debug.dbr_debug[dbr_id] = kzalloc(sizeof(*dbr_debug), GFP_KERNEL); if (!ar->debug.dbr_debug[dbr_id]) return -ENOMEM; dbr_debug = ar->debug.dbr_debug[dbr_id]; dbr_dbg_data = &dbr_debug->dbr_dbg_data; if (dbr_debug->dbr_debugfs) return 0; dbr_debug->dbr_debugfs = debugfs_create_dir(dbr_id_to_str[dbr_id], ar->debug.debugfs_pdev); if (IS_ERR_OR_NULL(dbr_debug->dbr_debugfs)) { if (IS_ERR(dbr_debug->dbr_debugfs)) return PTR_ERR(dbr_debug->dbr_debugfs); return -ENOMEM; } dbr_debug->dbr_debug_enabled = true; dbr_dbg_data->num_ring_debug_entries = ATH11K_DEBUG_DBR_ENTRIES_MAX; dbr_dbg_data->dbr_debug_idx = 0; dbr_dbg_data->entries = kcalloc(ATH11K_DEBUG_DBR_ENTRIES_MAX, sizeof(struct ath11k_dbg_dbr_entry), GFP_KERNEL); if (!dbr_dbg_data->entries) return -ENOMEM; spin_lock_init(&dbr_dbg_data->lock); debugfs_create_file("dump_dbr_debug", 0444, dbr_debug->dbr_debugfs, dbr_dbg_data, &fops_debug_dump_dbr_entries); return 0; } static ssize_t ath11k_debugfs_write_enable_dbr_dbg(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; - char buf[32] = {0}; + char buf[32] = {}; u32 dbr_id, enable; int ret; mutex_lock(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON) { ret = -ENETDOWN; goto out; } ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count); if (ret < 0) goto out; buf[ret] = '\0'; ret = sscanf(buf, "%u %u", &dbr_id, &enable); if (ret != 2 || dbr_id > 1 || enable > 1) { ret = -EINVAL; ath11k_warn(ar->ab, "usage: echo dbr_id:0-Spectral 1-CFR val:0-disable 1-enable\n"); goto out; } if (enable) { ret = ath11k_debugfs_dbr_dbg_init(ar, dbr_id); if (ret) { ath11k_warn(ar->ab, "db ring module debugfs init failed: %d\n", ret); goto out; } } else { ath11k_debugfs_dbr_dbg_destroy(ar, dbr_id); } ret = count; out: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_dbr_debug = { .write = ath11k_debugfs_write_enable_dbr_dbg, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_write_ps_timekeeper_enable(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; ssize_t ret; u8 ps_timekeeper_enable; if (kstrtou8_from_user(user_buf, count, 0, &ps_timekeeper_enable)) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON) { ret = -ENETDOWN; goto exit; } if (!ar->ps_state_enable) { ret = -EINVAL; goto exit; } ar->ps_timekeeper_enable = !!ps_timekeeper_enable; ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath11k_read_ps_timekeeper_enable(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; char buf[32]; int len; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf), "%d\n", ar->ps_timekeeper_enable); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_ps_timekeeper_enable = { .read = ath11k_read_ps_timekeeper_enable, .write = ath11k_write_ps_timekeeper_enable, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static void ath11k_reset_peer_ps_duration(void *data, struct ieee80211_sta *sta) { struct ath11k *ar = data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); spin_lock_bh(&ar->data_lock); arsta->ps_total_duration = 0; spin_unlock_bh(&ar->data_lock); } static ssize_t ath11k_write_reset_ps_duration(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; int ret; u8 reset_ps_duration; if (kstrtou8_from_user(user_buf, count, 0, &reset_ps_duration)) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON) { ret = -ENETDOWN; goto exit; } if (!ar->ps_state_enable) { ret = -EINVAL; goto exit; } ieee80211_iterate_stations_atomic(ar->hw, ath11k_reset_peer_ps_duration, ar); ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_reset_ps_duration = { .write = ath11k_write_reset_ps_duration, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static void ath11k_peer_ps_state_disable(void *data, struct ieee80211_sta *sta) { struct ath11k *ar = data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); spin_lock_bh(&ar->data_lock); arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; arsta->ps_start_time = 0; arsta->ps_total_duration = 0; spin_unlock_bh(&ar->data_lock); } static ssize_t ath11k_write_ps_state_enable(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; struct ath11k_pdev *pdev = ar->pdev; int ret; u32 param; u8 ps_state_enable; if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable)) return -EINVAL; mutex_lock(&ar->conf_mutex); ps_state_enable = !!ps_state_enable; if (ar->ps_state_enable == ps_state_enable) { ret = count; goto exit; } param = WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE; ret = ath11k_wmi_pdev_set_param(ar, param, ps_state_enable, pdev->pdev_id); if (ret) { ath11k_warn(ar->ab, "failed to enable ps_state_enable: %d\n", ret); goto exit; } ar->ps_state_enable = ps_state_enable; if (!ar->ps_state_enable) { ar->ps_timekeeper_enable = false; ieee80211_iterate_stations_atomic(ar->hw, ath11k_peer_ps_state_disable, ar); } ret = count; exit: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath11k_read_ps_state_enable(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; char buf[32]; int len; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf), "%d\n", ar->ps_state_enable); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_ps_state_enable = { .read = ath11k_read_ps_state_enable, .write = ath11k_write_ps_state_enable, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; int ath11k_debugfs_register(struct ath11k *ar) { struct ath11k_base *ab = ar->ab; char pdev_name[10]; - char buf[100] = {0}; + char buf[100] = {}; snprintf(pdev_name, sizeof(pdev_name), "%s%u", "mac", ar->pdev_idx); ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc); if (IS_ERR(ar->debug.debugfs_pdev)) return PTR_ERR(ar->debug.debugfs_pdev); /* Create a symlink under ieee80211/phy* */ snprintf(buf, 100, "../../ath11k/%pd2", ar->debug.debugfs_pdev); debugfs_create_symlink("ath11k", ar->hw->wiphy->debugfsdir, buf); ath11k_debugfs_htt_stats_init(ar); ath11k_debugfs_fw_stats_init(ar); debugfs_create_file("ext_tx_stats", 0644, ar->debug.debugfs_pdev, ar, &fops_extd_tx_stats); debugfs_create_file("ext_rx_stats", 0644, ar->debug.debugfs_pdev, ar, &fops_extd_rx_stats); debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_pdev, ar, &fops_pktlog_filter); debugfs_create_file("fw_dbglog_config", 0600, ar->debug.debugfs_pdev, ar, &fops_fw_dbglog); if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) { debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_pdev, ar, &fops_simulate_radar); debugfs_create_bool("dfs_block_radar_events", 0200, ar->debug.debugfs_pdev, &ar->dfs_block_radar_events); } if (ab->hw_params.dbr_debug_support) debugfs_create_file("enable_dbr_debug", 0200, ar->debug.debugfs_pdev, ar, &fops_dbr_debug); debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_pdev, ar, &fops_ps_state_enable); if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT, ar->ab->wmi_ab.svc_map)) { debugfs_create_file("ps_timekeeper_enable", 0600, ar->debug.debugfs_pdev, ar, &fops_ps_timekeeper_enable); debugfs_create_file("reset_ps_duration", 0200, ar->debug.debugfs_pdev, ar, &fops_reset_ps_duration); } return 0; } void ath11k_debugfs_unregister(struct ath11k *ar) { struct ath11k_debug_dbr *dbr_debug; struct ath11k_dbg_dbr_data *dbr_dbg_data; int i; for (i = 0; i < WMI_DIRECT_BUF_MAX; i++) { dbr_debug = ar->debug.dbr_debug[i]; if (!dbr_debug) continue; dbr_dbg_data = &dbr_debug->dbr_dbg_data; kfree(dbr_dbg_data->entries); debugfs_remove_recursive(dbr_debug->dbr_debugfs); kfree(dbr_debug); ar->debug.dbr_debug[i] = NULL; } } static ssize_t ath11k_write_twt_add_dialog(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath11k_vif *arvif = file->private_data; - struct wmi_twt_add_dialog_params params = { 0 }; - struct wmi_twt_enable_params twt_params = {0}; + struct wmi_twt_add_dialog_params params = {}; + struct wmi_twt_enable_params twt_params = {}; struct ath11k *ar = arvif->ar; - u8 buf[128] = {0}; + u8 buf[128] = {}; int ret; if (ar->twt_enabled == 0) { ath11k_err(ar->ab, "twt support is not enabled\n"); return -EOPNOTSUPP; } ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count); if (ret < 0) return ret; buf[ret] = '\0'; ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u %u %u %hhu %hhu %hhu %hhu %hhu", ¶ms.peer_macaddr[0], ¶ms.peer_macaddr[1], ¶ms.peer_macaddr[2], ¶ms.peer_macaddr[3], ¶ms.peer_macaddr[4], ¶ms.peer_macaddr[5], ¶ms.dialog_id, ¶ms.wake_intvl_us, ¶ms.wake_intvl_mantis, ¶ms.wake_dura_us, ¶ms.sp_offset_us, ¶ms.twt_cmd, ¶ms.flag_bcast, ¶ms.flag_trigger, ¶ms.flag_flow_type, ¶ms.flag_protection); if (ret != 16) return -EINVAL; /* In the case of station vif, TWT is entirely handled by * the firmware based on the input parameters in the TWT enable * WMI command that is sent to the target during assoc. * For manually testing the TWT feature, we need to first disable * TWT and send enable command again with TWT input parameter * sta_cong_timer_ms set to 0. */ if (arvif->vif->type == NL80211_IFTYPE_STATION) { ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id); ath11k_wmi_fill_default_twt_params(&twt_params); twt_params.sta_cong_timer_ms = 0; ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params); } params.vdev_id = arvif->vdev_id; ret = ath11k_wmi_send_twt_add_dialog_cmd(arvif->ar, ¶ms); if (ret) goto err_twt_add_dialog; return count; err_twt_add_dialog: if (arvif->vif->type == NL80211_IFTYPE_STATION) { ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id); ath11k_wmi_fill_default_twt_params(&twt_params); ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params); } return ret; } static ssize_t ath11k_write_twt_del_dialog(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath11k_vif *arvif = file->private_data; - struct wmi_twt_del_dialog_params params = { 0 }; - struct wmi_twt_enable_params twt_params = {0}; + struct wmi_twt_del_dialog_params params = {}; + struct wmi_twt_enable_params twt_params = {}; struct ath11k *ar = arvif->ar; - u8 buf[64] = {0}; + u8 buf[64] = {}; int ret; if (ar->twt_enabled == 0) { ath11k_err(ar->ab, "twt support is not enabled\n"); return -EOPNOTSUPP; } ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count); if (ret < 0) return ret; buf[ret] = '\0'; ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u", ¶ms.peer_macaddr[0], ¶ms.peer_macaddr[1], ¶ms.peer_macaddr[2], ¶ms.peer_macaddr[3], ¶ms.peer_macaddr[4], ¶ms.peer_macaddr[5], ¶ms.dialog_id); if (ret != 7) return -EINVAL; params.vdev_id = arvif->vdev_id; ret = ath11k_wmi_send_twt_del_dialog_cmd(arvif->ar, ¶ms); if (ret) return ret; if (arvif->vif->type == NL80211_IFTYPE_STATION) { ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id); ath11k_wmi_fill_default_twt_params(&twt_params); ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params); } return count; } static ssize_t ath11k_write_twt_pause_dialog(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath11k_vif *arvif = file->private_data; - struct wmi_twt_pause_dialog_params params = { 0 }; - u8 buf[64] = {0}; + struct wmi_twt_pause_dialog_params params = {}; + u8 buf[64] = {}; int ret; if (arvif->ar->twt_enabled == 0) { ath11k_err(arvif->ar->ab, "twt support is not enabled\n"); return -EOPNOTSUPP; } ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count); if (ret < 0) return ret; buf[ret] = '\0'; ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u", ¶ms.peer_macaddr[0], ¶ms.peer_macaddr[1], ¶ms.peer_macaddr[2], ¶ms.peer_macaddr[3], ¶ms.peer_macaddr[4], ¶ms.peer_macaddr[5], ¶ms.dialog_id); if (ret != 7) return -EINVAL; params.vdev_id = arvif->vdev_id; ret = ath11k_wmi_send_twt_pause_dialog_cmd(arvif->ar, ¶ms); if (ret) return ret; return count; } static ssize_t ath11k_write_twt_resume_dialog(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct ath11k_vif *arvif = file->private_data; - struct wmi_twt_resume_dialog_params params = { 0 }; - u8 buf[64] = {0}; + struct wmi_twt_resume_dialog_params params = {}; + u8 buf[64] = {}; int ret; if (arvif->ar->twt_enabled == 0) { ath11k_err(arvif->ar->ab, "twt support is not enabled\n"); return -EOPNOTSUPP; } ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count); if (ret < 0) return ret; buf[ret] = '\0'; ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u", ¶ms.peer_macaddr[0], ¶ms.peer_macaddr[1], ¶ms.peer_macaddr[2], ¶ms.peer_macaddr[3], ¶ms.peer_macaddr[4], ¶ms.peer_macaddr[5], ¶ms.dialog_id, ¶ms.sp_offset_us, ¶ms.next_twt_size); if (ret != 9) return -EINVAL; params.vdev_id = arvif->vdev_id; ret = ath11k_wmi_send_twt_resume_dialog_cmd(arvif->ar, ¶ms); if (ret) return ret; return count; } static const struct file_operations ath11k_fops_twt_add_dialog = { .write = ath11k_write_twt_add_dialog, .open = simple_open }; static const struct file_operations ath11k_fops_twt_del_dialog = { .write = ath11k_write_twt_del_dialog, .open = simple_open }; static const struct file_operations ath11k_fops_twt_pause_dialog = { .write = ath11k_write_twt_pause_dialog, .open = simple_open }; static const struct file_operations ath11k_fops_twt_resume_dialog = { .write = ath11k_write_twt_resume_dialog, .open = simple_open }; void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_base *ab = arvif->ar->ab; struct dentry *debugfs_twt; if (arvif->vif->type != NL80211_IFTYPE_AP && !(arvif->vif->type == NL80211_IFTYPE_STATION && test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map))) return; debugfs_twt = debugfs_create_dir("twt", arvif->vif->debugfs_dir); debugfs_create_file("add_dialog", 0200, debugfs_twt, arvif, &ath11k_fops_twt_add_dialog); debugfs_create_file("del_dialog", 0200, debugfs_twt, arvif, &ath11k_fops_twt_del_dialog); debugfs_create_file("pause_dialog", 0200, debugfs_twt, arvif, &ath11k_fops_twt_pause_dialog); debugfs_create_file("resume_dialog", 0200, debugfs_twt, arvif, &ath11k_fops_twt_resume_dialog); } diff --git a/debugfs_htt_stats.c b/debugfs_htt_stats.c index 870e86a31bf8..11d28c42227e 100644 --- a/debugfs_htt_stats.c +++ b/debugfs_htt_stats.c @@ -1,4904 +1,4905 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include #include "core.h" #include "dp_tx.h" #include "dp_rx.h" #include "debug.h" #include "debugfs_htt_stats.h" #define HTT_MAX_PRINT_CHAR_PER_ELEM 15 #define HTT_TLV_HDR_LEN 4 #define PRINT_ARRAY_TO_BUF(out, buflen, arr, str, len, newline) \ do { \ int index = 0; u8 i; const char *str_val = str; \ const char *new_line = newline; \ if (str_val) { \ index += scnprintf((out + buflen), \ (ATH11K_HTT_STATS_BUF_SIZE - buflen), \ "%s = ", str_val); \ } \ for (i = 0; i < len; i++) { \ index += scnprintf((out + buflen) + index, \ (ATH11K_HTT_STATS_BUF_SIZE - buflen) - index, \ " %u:%u,", i, arr[i]); \ } \ index += scnprintf((out + buflen) + index, \ (ATH11K_HTT_STATS_BUF_SIZE - buflen) - index, \ "%s", new_line); \ buflen += index; \ } while (0) static inline void htt_print_stats_string_tlv(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_stats_string_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u8 i; tag_len = tag_len >> 2; len += scnprintf(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "data = "); for (i = 0; i < tag_len; i++) { len += scnprintf(buf + len, buf_len - len, "%.*s", 4, (char *)&(htt_stats_buf->data[i])); } /* New lines are added for better display */ len += scnprintf(buf + len, buf_len - len, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_pdev_stats_cmn_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n", htt_stats_buf->hw_queued); len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n", htt_stats_buf->hw_reaped); len += scnprintf(buf + len, buf_len - len, "underrun = %u\n", htt_stats_buf->underrun); len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n", htt_stats_buf->hw_paused); len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n", htt_stats_buf->hw_flush); len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n", htt_stats_buf->hw_filt); len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n", htt_stats_buf->tx_abort); len += scnprintf(buf + len, buf_len - len, "mpdu_requeued = %u\n", htt_stats_buf->mpdu_requeued); len += scnprintf(buf + len, buf_len - len, "tx_xretry = %u\n", htt_stats_buf->tx_xretry); len += scnprintf(buf + len, buf_len - len, "data_rc = %u\n", htt_stats_buf->data_rc); len += scnprintf(buf + len, buf_len - len, "mpdu_dropped_xretry = %u\n", htt_stats_buf->mpdu_dropped_xretry); len += scnprintf(buf + len, buf_len - len, "illegal_rate_phy_err = %u\n", htt_stats_buf->illgl_rate_phy_err); len += scnprintf(buf + len, buf_len - len, "cont_xretry = %u\n", htt_stats_buf->cont_xretry); len += scnprintf(buf + len, buf_len - len, "tx_timeout = %u\n", htt_stats_buf->tx_timeout); len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n", htt_stats_buf->pdev_resets); len += scnprintf(buf + len, buf_len - len, "phy_underrun = %u\n", htt_stats_buf->phy_underrun); len += scnprintf(buf + len, buf_len - len, "txop_ovf = %u\n", htt_stats_buf->txop_ovf); len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n", htt_stats_buf->seq_posted); len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n", htt_stats_buf->seq_failed_queueing); len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n", htt_stats_buf->seq_completed); len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n", htt_stats_buf->seq_restarted); len += scnprintf(buf + len, buf_len - len, "mu_seq_posted = %u\n", htt_stats_buf->mu_seq_posted); len += scnprintf(buf + len, buf_len - len, "seq_switch_hw_paused = %u\n", htt_stats_buf->seq_switch_hw_paused); len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n", htt_stats_buf->next_seq_posted_dsr); len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n", htt_stats_buf->seq_posted_isr); len += scnprintf(buf + len, buf_len - len, "seq_ctrl_cached = %u\n", htt_stats_buf->seq_ctrl_cached); len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n", htt_stats_buf->mpdu_count_tqm); len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n", htt_stats_buf->msdu_count_tqm); len += scnprintf(buf + len, buf_len - len, "mpdu_removed_tqm = %u\n", htt_stats_buf->mpdu_removed_tqm); len += scnprintf(buf + len, buf_len - len, "msdu_removed_tqm = %u\n", htt_stats_buf->msdu_removed_tqm); len += scnprintf(buf + len, buf_len - len, "mpdus_sw_flush = %u\n", htt_stats_buf->mpdus_sw_flush); len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n", htt_stats_buf->mpdus_hw_filter); len += scnprintf(buf + len, buf_len - len, "mpdus_truncated = %u\n", htt_stats_buf->mpdus_truncated); len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n", htt_stats_buf->mpdus_ack_failed); len += scnprintf(buf + len, buf_len - len, "mpdus_expired = %u\n", htt_stats_buf->mpdus_expired); len += scnprintf(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u\n", htt_stats_buf->mpdus_seq_hw_retry); len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n", htt_stats_buf->ack_tlv_proc); len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u\n", htt_stats_buf->coex_abort_mpdu_cnt_valid); len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u\n", htt_stats_buf->coex_abort_mpdu_cnt); len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n", htt_stats_buf->num_total_ppdus_tried_ota); len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n", htt_stats_buf->num_data_ppdus_tried_ota); len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u\n", htt_stats_buf->local_ctrl_mgmt_enqued); len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u\n", htt_stats_buf->local_ctrl_mgmt_freed); len += scnprintf(buf + len, buf_len - len, "local_data_enqued = %u\n", htt_stats_buf->local_data_enqued); len += scnprintf(buf + len, buf_len - len, "local_data_freed = %u\n", htt_stats_buf->local_data_freed); len += scnprintf(buf + len, buf_len - len, "mpdu_tried = %u\n", htt_stats_buf->mpdu_tried); len += scnprintf(buf + len, buf_len - len, "isr_wait_seq_posted = %u\n", htt_stats_buf->isr_wait_seq_posted); len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_low = %u\n", htt_stats_buf->tx_active_dur_us_low); len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n\n", htt_stats_buf->tx_active_dur_us_high); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_pdev_stats_urrn_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_tx_pdev_stats_urrn_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_URRN_STATS); len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_URRN_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->urrn_stats, "urrn_stats", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_pdev_stats_flush_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_tx_pdev_stats_flush_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_FLUSH_REASON_STATS); len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->flush_errs, "flush_errs", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_pdev_stats_sifs_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_tx_pdev_stats_sifs_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_STATS); len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_status, "sifs_status", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_pdev_stats_phy_err_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_tx_pdev_stats_phy_err_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_PHY_ERR_STATS); len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_errs, "phy_errs", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_pdev_stats_sifs_hist_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_tx_pdev_stats_sifs_hist_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS); len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_HIST_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_hist_status, "sifs_hist_status", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_TX_PPDU_STATS_TLV_V:\n"); len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u\n", htt_stats_buf->num_data_ppdus_legacy_su); len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u\n", htt_stats_buf->num_data_ppdus_ac_su); len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u\n", htt_stats_buf->num_data_ppdus_ax_su); len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u\n", htt_stats_buf->num_data_ppdus_ac_su_txbf); len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u\n\n", htt_stats_buf->num_data_ppdus_ax_su_txbf); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u32 num_elements = ((tag_len - sizeof(htt_stats_buf->hist_bin_size)) >> 2); len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_TRIED_MPDU_CNT_HIST_TLV_V:\n"); len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n", htt_stats_buf->hist_bin_size); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist, "tried_mpdu_cnt_hist", num_elements, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_hw_stats_intr_misc_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_hw_stats_intr_misc_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {0}; + char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {}; len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n"); memcpy(hw_intr_name, &(htt_stats_buf->hw_intr_name[0]), HTT_STATS_MAX_HW_INTR_NAME_LEN); len += scnprintf(buf + len, buf_len - len, "hw_intr_name = %s\n", hw_intr_name); len += scnprintf(buf + len, buf_len - len, "mask = %u\n", htt_stats_buf->mask); len += scnprintf(buf + len, buf_len - len, "count = %u\n\n", htt_stats_buf->count); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_hw_stats_wd_timeout_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_hw_stats_wd_timeout_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {0}; + char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {}; len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:\n"); memcpy(hw_module_name, &(htt_stats_buf->hw_module_name[0]), HTT_STATS_MAX_HW_MODULE_NAME_LEN); len += scnprintf(buf + len, buf_len - len, "hw_module_name = %s\n", hw_module_name); len += scnprintf(buf + len, buf_len - len, "count = %u\n", htt_stats_buf->count); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_hw_stats_pdev_errs_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n", htt_stats_buf->tx_abort); len += scnprintf(buf + len, buf_len - len, "tx_abort_fail_count = %u\n", htt_stats_buf->tx_abort_fail_count); len += scnprintf(buf + len, buf_len - len, "rx_abort = %u\n", htt_stats_buf->rx_abort); len += scnprintf(buf + len, buf_len - len, "rx_abort_fail_count = %u\n", htt_stats_buf->rx_abort_fail_count); len += scnprintf(buf + len, buf_len - len, "warm_reset = %u\n", htt_stats_buf->warm_reset); len += scnprintf(buf + len, buf_len - len, "cold_reset = %u\n", htt_stats_buf->cold_reset); len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n", htt_stats_buf->tx_flush); len += scnprintf(buf + len, buf_len - len, "tx_glb_reset = %u\n", htt_stats_buf->tx_glb_reset); len += scnprintf(buf + len, buf_len - len, "tx_txq_reset = %u\n", htt_stats_buf->tx_txq_reset); len += scnprintf(buf + len, buf_len - len, "rx_timeout_reset = %u\n\n", htt_stats_buf->rx_timeout_reset); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_msdu_flow_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_msdu_flow_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_MSDU_FLOW_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "last_update_timestamp = %u\n", htt_stats_buf->last_update_timestamp); len += scnprintf(buf + len, buf_len - len, "last_add_timestamp = %u\n", htt_stats_buf->last_add_timestamp); len += scnprintf(buf + len, buf_len - len, "last_remove_timestamp = %u\n", htt_stats_buf->last_remove_timestamp); len += scnprintf(buf + len, buf_len - len, "total_processed_msdu_count = %u\n", htt_stats_buf->total_processed_msdu_count); len += scnprintf(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u\n", htt_stats_buf->cur_msdu_count_in_flowq); len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n", htt_stats_buf->sw_peer_id); len += scnprintf(buf + len, buf_len - len, "tx_flow_no = %lu\n", FIELD_GET(HTT_MSDU_FLOW_STATS_TX_FLOW_NO, htt_stats_buf->tx_flow_no__tid_num__drop_rule)); len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n", FIELD_GET(HTT_MSDU_FLOW_STATS_TID_NUM, htt_stats_buf->tx_flow_no__tid_num__drop_rule)); len += scnprintf(buf + len, buf_len - len, "drop_rule = %lu\n", FIELD_GET(HTT_MSDU_FLOW_STATS_DROP_RULE, htt_stats_buf->tx_flow_no__tid_num__drop_rule)); len += scnprintf(buf + len, buf_len - len, "last_cycle_enqueue_count = %u\n", htt_stats_buf->last_cycle_enqueue_count); len += scnprintf(buf + len, buf_len - len, "last_cycle_dequeue_count = %u\n", htt_stats_buf->last_cycle_dequeue_count); len += scnprintf(buf + len, buf_len - len, "last_cycle_drop_count = %u\n", htt_stats_buf->last_cycle_drop_count); len += scnprintf(buf + len, buf_len - len, "current_drop_th = %u\n\n", htt_stats_buf->current_drop_th); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_tid_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_tid_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char tid_name[MAX_HTT_TID_NAME + 1] = {0}; + char tid_name[MAX_HTT_TID_NAME + 1] = {}; len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:\n"); memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME); len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name); len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n", FIELD_GET(HTT_TX_TID_STATS_SW_PEER_ID, htt_stats_buf->sw_peer_id__tid_num)); len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n", FIELD_GET(HTT_TX_TID_STATS_TID_NUM, htt_stats_buf->sw_peer_id__tid_num)); len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %lu\n", FIELD_GET(HTT_TX_TID_STATS_NUM_SCHED_PENDING, htt_stats_buf->num_sched_pending__num_ppdu_in_hwq)); len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %lu\n", FIELD_GET(HTT_TX_TID_STATS_NUM_PPDU_IN_HWQ, htt_stats_buf->num_sched_pending__num_ppdu_in_hwq)); len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n", htt_stats_buf->tid_flags); len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n", htt_stats_buf->hw_queued); len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n", htt_stats_buf->hw_reaped); len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n", htt_stats_buf->mpdus_hw_filter); len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n", htt_stats_buf->qdepth_bytes); len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n", htt_stats_buf->qdepth_num_msdu); len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n", htt_stats_buf->qdepth_num_mpdu); len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n", htt_stats_buf->last_scheduled_tsmp); len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n", htt_stats_buf->pause_module_id); len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n\n", htt_stats_buf->block_module_id); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_tid_stats_v1_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_tid_stats_v1_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char tid_name[MAX_HTT_TID_NAME + 1] = {0}; + char tid_name[MAX_HTT_TID_NAME + 1] = {}; len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:\n"); memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME); len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name); len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n", FIELD_GET(HTT_TX_TID_STATS_V1_SW_PEER_ID, htt_stats_buf->sw_peer_id__tid_num)); len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n", FIELD_GET(HTT_TX_TID_STATS_V1_TID_NUM, htt_stats_buf->sw_peer_id__tid_num)); len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %lu\n", FIELD_GET(HTT_TX_TID_STATS_V1_NUM_SCHED_PENDING, htt_stats_buf->num_sched_pending__num_ppdu_in_hwq)); len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %lu\n", FIELD_GET(HTT_TX_TID_STATS_V1_NUM_PPDU_IN_HWQ, htt_stats_buf->num_sched_pending__num_ppdu_in_hwq)); len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n", htt_stats_buf->tid_flags); len += scnprintf(buf + len, buf_len - len, "max_qdepth_bytes = %u\n", htt_stats_buf->max_qdepth_bytes); len += scnprintf(buf + len, buf_len - len, "max_qdepth_n_msdus = %u\n", htt_stats_buf->max_qdepth_n_msdus); len += scnprintf(buf + len, buf_len - len, "rsvd = %u\n", htt_stats_buf->rsvd); len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n", htt_stats_buf->qdepth_bytes); len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n", htt_stats_buf->qdepth_num_msdu); len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n", htt_stats_buf->qdepth_num_mpdu); len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n", htt_stats_buf->last_scheduled_tsmp); len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n", htt_stats_buf->pause_module_id); len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n", htt_stats_buf->block_module_id); len += scnprintf(buf + len, buf_len - len, "allow_n_flags = 0x%x\n", htt_stats_buf->allow_n_flags); len += scnprintf(buf + len, buf_len - len, "sendn_frms_allowed = %u\n\n", htt_stats_buf->sendn_frms_allowed); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_rx_tid_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_rx_tid_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; - char tid_name[MAX_HTT_TID_NAME + 1] = {0}; + char tid_name[MAX_HTT_TID_NAME + 1] = {}; len += scnprintf(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n", FIELD_GET(HTT_RX_TID_STATS_SW_PEER_ID, htt_stats_buf->sw_peer_id__tid_num)); len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n", FIELD_GET(HTT_RX_TID_STATS_TID_NUM, htt_stats_buf->sw_peer_id__tid_num)); memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME); len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name); len += scnprintf(buf + len, buf_len - len, "dup_in_reorder = %u\n", htt_stats_buf->dup_in_reorder); len += scnprintf(buf + len, buf_len - len, "dup_past_outside_window = %u\n", htt_stats_buf->dup_past_outside_window); len += scnprintf(buf + len, buf_len - len, "dup_past_within_window = %u\n", htt_stats_buf->dup_past_within_window); len += scnprintf(buf + len, buf_len - len, "rxdesc_err_decrypt = %u\n\n", htt_stats_buf->rxdesc_err_decrypt); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_counter_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_counter_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_COUNTER_TLV:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->counter_name, "counter_name", HTT_MAX_COUNTER_NAME, "\n"); len += scnprintf(buf + len, buf_len - len, "count = %u\n\n", htt_stats_buf->count); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_peer_stats_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_peer_stats_cmn_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_PEER_STATS_CMN_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "ppdu_cnt = %u\n", htt_stats_buf->ppdu_cnt); len += scnprintf(buf + len, buf_len - len, "mpdu_cnt = %u\n", htt_stats_buf->mpdu_cnt); len += scnprintf(buf + len, buf_len - len, "msdu_cnt = %u\n", htt_stats_buf->msdu_cnt); len += scnprintf(buf + len, buf_len - len, "pause_bitmap = %u\n", htt_stats_buf->pause_bitmap); len += scnprintf(buf + len, buf_len - len, "block_bitmap = %u\n", htt_stats_buf->block_bitmap); len += scnprintf(buf + len, buf_len - len, "last_rssi = %d\n", htt_stats_buf->rssi); len += scnprintf(buf + len, buf_len - len, "enqueued_count = %llu\n", htt_stats_buf->peer_enqueued_count_low | ((u64)htt_stats_buf->peer_enqueued_count_high << 32)); len += scnprintf(buf + len, buf_len - len, "dequeued_count = %llu\n", htt_stats_buf->peer_dequeued_count_low | ((u64)htt_stats_buf->peer_dequeued_count_high << 32)); len += scnprintf(buf + len, buf_len - len, "dropped_count = %llu\n", htt_stats_buf->peer_dropped_count_low | ((u64)htt_stats_buf->peer_dropped_count_high << 32)); len += scnprintf(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu\n", htt_stats_buf->ppdu_transmitted_bytes_low | ((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32)); len += scnprintf(buf + len, buf_len - len, "ttl_removed_count = %u\n", htt_stats_buf->peer_ttl_removed_count); len += scnprintf(buf + len, buf_len - len, "inactive_time = %u\n\n", htt_stats_buf->inactive_time); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_peer_details_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_peer_details_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_PEER_DETAILS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "peer_type = %u\n", htt_stats_buf->peer_type); len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n", htt_stats_buf->sw_peer_id); len += scnprintf(buf + len, buf_len - len, "vdev_id = %lu\n", FIELD_GET(HTT_PEER_DETAILS_VDEV_ID, htt_stats_buf->vdev_pdev_ast_idx)); len += scnprintf(buf + len, buf_len - len, "pdev_id = %lu\n", FIELD_GET(HTT_PEER_DETAILS_PDEV_ID, htt_stats_buf->vdev_pdev_ast_idx)); len += scnprintf(buf + len, buf_len - len, "ast_idx = %lu\n", FIELD_GET(HTT_PEER_DETAILS_AST_IDX, htt_stats_buf->vdev_pdev_ast_idx)); len += scnprintf(buf + len, buf_len - len, "mac_addr = %02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n", FIELD_GET(HTT_MAC_ADDR_L32_0, htt_stats_buf->mac_addr.mac_addr_l32), FIELD_GET(HTT_MAC_ADDR_L32_1, htt_stats_buf->mac_addr.mac_addr_l32), FIELD_GET(HTT_MAC_ADDR_L32_2, htt_stats_buf->mac_addr.mac_addr_l32), FIELD_GET(HTT_MAC_ADDR_L32_3, htt_stats_buf->mac_addr.mac_addr_l32), FIELD_GET(HTT_MAC_ADDR_H16_0, htt_stats_buf->mac_addr.mac_addr_h16), FIELD_GET(HTT_MAC_ADDR_H16_1, htt_stats_buf->mac_addr.mac_addr_h16)); len += scnprintf(buf + len, buf_len - len, "peer_flags = 0x%x\n", htt_stats_buf->peer_flags); len += scnprintf(buf + len, buf_len - len, "qpeer_flags = 0x%x\n\n", htt_stats_buf->qpeer_flags); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_peer_rate_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u8 j; len += scnprintf(buf + len, buf_len - len, "HTT_TX_PEER_RATE_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n", htt_stats_buf->tx_ldpc); len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n", htt_stats_buf->rts_cnt); len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n", htt_stats_buf->ack_rssi); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_su_mcs, "tx_su_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mu_mcs, "tx_mu_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss", HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw", HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream", HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n"); for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) { len += scnprintf(buf + len, buf_len - len, "tx_gi[%u] = ", j); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL, HTT_TX_PEER_STATS_NUM_MCS_COUNTERS, "\n"); } PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm", HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_rx_peer_rate_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u8 j; len += scnprintf(buf + len, buf_len - len, "HTT_RX_PEER_RATE_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "nsts = %u\n", htt_stats_buf->nsts); len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n", htt_stats_buf->rx_ldpc); len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n", htt_stats_buf->rts_cnt); len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n", htt_stats_buf->rssi_mgmt); len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n", htt_stats_buf->rssi_data); len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n", htt_stats_buf->rssi_comb); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss", HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm", HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw", HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) { len += scnprintf(buf + len, (buf_len - len), "rssi_chain[%u] = ", j); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL, HTT_RX_PEER_STATS_NUM_BW_COUNTERS, "\n"); } for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) { len += scnprintf(buf + len, (buf_len - len), "rx_gi[%u] = ", j); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL, HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); } PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream", HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_hwq_mu_mimo_sch_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_hwq_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n", htt_stats_buf->mu_mimo_sch_posted); len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n", htt_stats_buf->mu_mimo_sch_failed); len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n", htt_stats_buf->mu_mimo_ppdu_posted); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u\n", htt_stats_buf->mu_mimo_mpdus_queued_usr); len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u\n", htt_stats_buf->mu_mimo_mpdus_tried_usr); len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u\n", htt_stats_buf->mu_mimo_mpdus_failed_usr); len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u\n", htt_stats_buf->mu_mimo_mpdus_requeued_usr); len += scnprintf(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u\n", htt_stats_buf->mu_mimo_err_no_ba_usr); len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u\n", htt_stats_buf->mu_mimo_mpdu_underrun_usr); len += scnprintf(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u\n\n", htt_stats_buf->mu_mimo_ampdu_underrun_usr); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_hwq_mu_mimo_cmn_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_TX_HWQ_STATS_MAC_ID, htt_stats_buf->mac_id__hwq_id__word)); len += scnprintf(buf + len, buf_len - len, "hwq_id = %lu\n\n", FIELD_GET(HTT_TX_HWQ_STATS_HWQ_ID, htt_stats_buf->mac_id__hwq_id__word)); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_hwq_stats_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_hwq_stats_cmn_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; /* TODO: HKDBG */ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_TX_HWQ_STATS_MAC_ID, htt_stats_buf->mac_id__hwq_id__word)); len += scnprintf(buf + len, buf_len - len, "hwq_id = %lu\n", FIELD_GET(HTT_TX_HWQ_STATS_HWQ_ID, htt_stats_buf->mac_id__hwq_id__word)); len += scnprintf(buf + len, buf_len - len, "xretry = %u\n", htt_stats_buf->xretry); len += scnprintf(buf + len, buf_len - len, "underrun_cnt = %u\n", htt_stats_buf->underrun_cnt); len += scnprintf(buf + len, buf_len - len, "flush_cnt = %u\n", htt_stats_buf->flush_cnt); len += scnprintf(buf + len, buf_len - len, "filt_cnt = %u\n", htt_stats_buf->filt_cnt); len += scnprintf(buf + len, buf_len - len, "null_mpdu_bmap = %u\n", htt_stats_buf->null_mpdu_bmap); len += scnprintf(buf + len, buf_len - len, "user_ack_failure = %u\n", htt_stats_buf->user_ack_failure); len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n", htt_stats_buf->ack_tlv_proc); len += scnprintf(buf + len, buf_len - len, "sched_id_proc = %u\n", htt_stats_buf->sched_id_proc); len += scnprintf(buf + len, buf_len - len, "null_mpdu_tx_count = %u\n", htt_stats_buf->null_mpdu_tx_count); len += scnprintf(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u\n", htt_stats_buf->mpdu_bmap_not_recvd); len += scnprintf(buf + len, buf_len - len, "num_bar = %u\n", htt_stats_buf->num_bar); len += scnprintf(buf + len, buf_len - len, "rts = %u\n", htt_stats_buf->rts); len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n", htt_stats_buf->cts2self); len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n", htt_stats_buf->qos_null); len += scnprintf(buf + len, buf_len - len, "mpdu_tried_cnt = %u\n", htt_stats_buf->mpdu_tried_cnt); len += scnprintf(buf + len, buf_len - len, "mpdu_queued_cnt = %u\n", htt_stats_buf->mpdu_queued_cnt); len += scnprintf(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u\n", htt_stats_buf->mpdu_ack_fail_cnt); len += scnprintf(buf + len, buf_len - len, "mpdu_filt_cnt = %u\n", htt_stats_buf->mpdu_filt_cnt); len += scnprintf(buf + len, buf_len - len, "false_mpdu_ack_count = %u\n", htt_stats_buf->false_mpdu_ack_count); len += scnprintf(buf + len, buf_len - len, "txq_timeout = %u\n\n", htt_stats_buf->txq_timeout); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_hwq_difs_latency_stats_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_tx_hwq_difs_latency_stats_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS); len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:\n"); len += scnprintf(buf + len, buf_len - len, "hist_intvl = %u\n", htt_stats_buf->hist_intvl); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->difs_latency_hist, "difs_latency_hist", data_len, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_hwq_cmd_result_stats_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_tx_hwq_cmd_result_stats_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 data_len; data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_RESULT_STATS); len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_result, "cmd_result", data_len, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_hwq_cmd_stall_stats_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_tx_hwq_cmd_stall_stats_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems; num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_STALL_STATS); len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_stall_status, "cmd_stall_status", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_hwq_fes_result_stats_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_tx_hwq_fes_result_stats_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems; num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_FES_RESULT_STATS); len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fes_result, "fes_result", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u32 num_elements = ((tag_len - sizeof(htt_stats_buf->hist_bin_size)) >> 2); len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_TRIED_MPDU_CNT_HIST_TLV_V:\n"); len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n", htt_stats_buf->hist_bin_size); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist, "tried_mpdu_cnt_hist", num_elements, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_tx_hwq_txop_used_cnt_hist_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u32 num_elements = tag_len >> 2; len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_TXOP_USED_CNT_HIST_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->txop_used_cnt_hist, "txop_used_cnt_hist", num_elements, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_sounding_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { s32 i; const struct htt_tx_sounding_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; const u32 *cbf_20 = htt_stats_buf->cbf_20; const u32 *cbf_40 = htt_stats_buf->cbf_40; const u32 *cbf_80 = htt_stats_buf->cbf_80; const u32 *cbf_160 = htt_stats_buf->cbf_160; if (htt_stats_buf->tx_sounding_mode == HTT_TX_AC_SOUNDING_MODE) { len += scnprintf(buf + len, buf_len - len, "\nHTT_TX_AC_SOUNDING_STATS_TLV:\n\n"); len += scnprintf(buf + len, buf_len - len, "ac_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n", cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS], cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); len += scnprintf(buf + len, buf_len - len, "ac_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n", cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS], cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); len += scnprintf(buf + len, buf_len - len, "ac_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n", cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS], cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); len += scnprintf(buf + len, buf_len - len, "ac_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n", cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS], cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) { len += scnprintf(buf + len, buf_len - len, "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n", i, htt_stats_buf->sounding[0], htt_stats_buf->sounding[1], htt_stats_buf->sounding[2], htt_stats_buf->sounding[3]); } } else if (htt_stats_buf->tx_sounding_mode == HTT_TX_AX_SOUNDING_MODE) { len += scnprintf(buf + len, buf_len - len, "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "ax_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n", cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS], cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); len += scnprintf(buf + len, buf_len - len, "ax_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n", cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS], cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); len += scnprintf(buf + len, buf_len - len, "ax_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n", cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS], cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); len += scnprintf(buf + len, buf_len - len, "ax_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n", cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS], cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS], cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS], cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS], cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) { len += scnprintf(buf + len, buf_len - len, "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n", i, htt_stats_buf->sounding[0], htt_stats_buf->sounding[1], htt_stats_buf->sounding[2], htt_stats_buf->sounding[3]); } } if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_selfgen_cmn_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); len += scnprintf(buf + len, buf_len - len, "su_bar = %u\n", htt_stats_buf->su_bar); len += scnprintf(buf + len, buf_len - len, "rts = %u\n", htt_stats_buf->rts); len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n", htt_stats_buf->cts2self); len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n", htt_stats_buf->qos_null); len += scnprintf(buf + len, buf_len - len, "delayed_bar_1 = %u\n", htt_stats_buf->delayed_bar_1); len += scnprintf(buf + len, buf_len - len, "delayed_bar_2 = %u\n", htt_stats_buf->delayed_bar_2); len += scnprintf(buf + len, buf_len - len, "delayed_bar_3 = %u\n", htt_stats_buf->delayed_bar_3); len += scnprintf(buf + len, buf_len - len, "delayed_bar_4 = %u\n", htt_stats_buf->delayed_bar_4); len += scnprintf(buf + len, buf_len - len, "delayed_bar_5 = %u\n", htt_stats_buf->delayed_bar_5); len += scnprintf(buf + len, buf_len - len, "delayed_bar_6 = %u\n", htt_stats_buf->delayed_bar_6); len += scnprintf(buf + len, buf_len - len, "delayed_bar_7 = %u\n\n", htt_stats_buf->delayed_bar_7); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_selfgen_ac_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa = %u\n", htt_stats_buf->ac_su_ndpa); len += scnprintf(buf + len, buf_len - len, "ac_su_ndp = %u\n", htt_stats_buf->ac_su_ndp); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa = %u\n", htt_stats_buf->ac_mu_mimo_ndpa); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp = %u\n", htt_stats_buf->ac_mu_mimo_ndp); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1 = %u\n", htt_stats_buf->ac_mu_mimo_brpoll_1); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2 = %u\n", htt_stats_buf->ac_mu_mimo_brpoll_2); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3 = %u\n\n", htt_stats_buf->ac_mu_mimo_brpoll_3); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_selfgen_ax_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa = %u\n", htt_stats_buf->ax_su_ndpa); len += scnprintf(buf + len, buf_len - len, "ax_su_ndp = %u\n", htt_stats_buf->ax_su_ndp); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa = %u\n", htt_stats_buf->ax_mu_mimo_ndpa); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp = %u\n", htt_stats_buf->ax_mu_mimo_ndp); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_1 = %u\n", htt_stats_buf->ax_mu_mimo_brpoll_1); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_2 = %u\n", htt_stats_buf->ax_mu_mimo_brpoll_2); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_3 = %u\n", htt_stats_buf->ax_mu_mimo_brpoll_3); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_4 = %u\n", htt_stats_buf->ax_mu_mimo_brpoll_4); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_5 = %u\n", htt_stats_buf->ax_mu_mimo_brpoll_5); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_6 = %u\n", htt_stats_buf->ax_mu_mimo_brpoll_6); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_7 = %u\n", htt_stats_buf->ax_mu_mimo_brpoll_7); len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n", htt_stats_buf->ax_basic_trigger); len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_trigger = %u\n", htt_stats_buf->ax_ulmumimo_trigger); len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger = %u\n", htt_stats_buf->ax_bsr_trigger); len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger = %u\n", htt_stats_buf->ax_mu_bar_trigger); len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n\n", htt_stats_buf->ax_mu_rts_trigger); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_selfgen_ac_err_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_err = %u\n", htt_stats_buf->ac_su_ndp_err); len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_err = %u\n", htt_stats_buf->ac_su_ndpa_err); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u\n", htt_stats_buf->ac_mu_mimo_ndpa_err); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u\n", htt_stats_buf->ac_mu_mimo_ndp_err); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u\n", htt_stats_buf->ac_mu_mimo_brp1_err); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u\n", htt_stats_buf->ac_mu_mimo_brp2_err); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n\n", htt_stats_buf->ac_mu_mimo_brp3_err); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_selfgen_ax_err_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_err = %u\n", htt_stats_buf->ax_su_ndp_err); len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_err = %u\n", htt_stats_buf->ax_su_ndpa_err); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u\n", htt_stats_buf->ax_mu_mimo_ndpa_err); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u\n", htt_stats_buf->ax_mu_mimo_ndp_err); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp1_err = %u\n", htt_stats_buf->ax_mu_mimo_brp1_err); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp2_err = %u\n", htt_stats_buf->ax_mu_mimo_brp2_err); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp3_err = %u\n", htt_stats_buf->ax_mu_mimo_brp3_err); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp4_err = %u\n", htt_stats_buf->ax_mu_mimo_brp4_err); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp5_err = %u\n", htt_stats_buf->ax_mu_mimo_brp5_err); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp6_err = %u\n", htt_stats_buf->ax_mu_mimo_brp6_err); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp7_err = %u\n", htt_stats_buf->ax_mu_mimo_brp7_err); len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger_err = %u\n", htt_stats_buf->ax_basic_trigger_err); len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_trigger_err = %u\n", htt_stats_buf->ax_ulmumimo_trigger_err); len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger_err = %u\n", htt_stats_buf->ax_bsr_trigger_err); len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u\n", htt_stats_buf->ax_mu_bar_trigger_err); len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n\n", htt_stats_buf->ax_mu_rts_trigger_err); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_pdev_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u8 i; len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n", htt_stats_buf->mu_mimo_sch_posted); len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n", htt_stats_buf->mu_mimo_sch_failed); len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n", htt_stats_buf->mu_mimo_ppdu_posted); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_sch_posted_per_group_index %u = %u\n", i, htt_stats_buf->ac_mu_mimo_sch_posted_per_grp_sz[i]); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_sch_posted_per_group_index %u = %u\n", i, htt_stats_buf->ax_mu_mimo_sch_posted_per_grp_sz[i]); len += scnprintf(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:\n"); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_sch_nusers_%u = %u\n", i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]); len += scnprintf(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:\n"); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_sch_nusers_%u = %u\n", i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]); len += scnprintf(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:\n"); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) { len += scnprintf(buf + len, buf_len - len, "ax_ofdma_sch_nusers_%u = %u\n", i, htt_stats_buf->ax_ofdma_sch_nusers[i]); len += scnprintf(buf + len, buf_len - len, "ax_ul_ofdma_basic_sch_nusers_%u = %u\n", i, htt_stats_buf->ax_ul_ofdma_basic_sch_nusers[i]); len += scnprintf(buf + len, buf_len - len, "ax_ul_ofdma_bsr_sch_nusers_%u = %u\n", i, htt_stats_buf->ax_ul_ofdma_bsr_sch_nusers[i]); len += scnprintf(buf + len, buf_len - len, "ax_ul_ofdma_sch_bar_nusers_%u = %u\n", i, htt_stats_buf->ax_ul_ofdma_bar_sch_nusers[i]); len += scnprintf(buf + len, buf_len - len, "ax_ul_ofdma_brp_sch_nusers_%u = %u\n", i, htt_stats_buf->ax_ul_ofdma_brp_sch_nusers[i]); } len += scnprintf(buf + len, buf_len - len, "\n11ax UL MUMIO SCH STATS:\n"); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS; i++) { len += scnprintf(buf + len, buf_len - len, "ax_ul_mumimo_basic_sch_nusers_%u = %u\n", i, htt_stats_buf->ax_ul_mumimo_basic_sch_nusers[i]); len += scnprintf(buf + len, buf_len - len, "ax_ul_mumimo_brp_sch_nusers_%u = %u\n", i, htt_stats_buf->ax_ul_mumimo_brp_sch_nusers[i]); } if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_pdev_mpdu_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) { if (!htt_stats_buf->user_index) len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n"); if (htt_stats_buf->user_index < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS) { len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_mpdus_queued_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->mpdus_queued_usr); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_mpdus_tried_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->mpdus_tried_usr); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_mpdus_failed_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->mpdus_failed_usr); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_mpdus_requeued_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->mpdus_requeued_usr); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_err_no_ba_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->err_no_ba_usr); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_mpdu_underrun_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->mpdu_underrun_usr); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n\n", htt_stats_buf->user_index, htt_stats_buf->ampdu_underrun_usr); } } if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) { if (!htt_stats_buf->user_index) len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n"); if (htt_stats_buf->user_index < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS) { len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_mpdus_queued_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->mpdus_queued_usr); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_mpdus_tried_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->mpdus_tried_usr); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_mpdus_failed_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->mpdus_failed_usr); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_mpdus_requeued_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->mpdus_requeued_usr); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_err_no_ba_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->err_no_ba_usr); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_mpdu_underrun_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->mpdu_underrun_usr); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n\n", htt_stats_buf->user_index, htt_stats_buf->ampdu_underrun_usr); } } if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) { if (!htt_stats_buf->user_index) len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n"); if (htt_stats_buf->user_index < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS) { len += scnprintf(buf + len, buf_len - len, "ax_mu_ofdma_mpdus_queued_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->mpdus_queued_usr); len += scnprintf(buf + len, buf_len - len, "ax_mu_ofdma_mpdus_tried_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->mpdus_tried_usr); len += scnprintf(buf + len, buf_len - len, "ax_mu_ofdma_mpdus_failed_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->mpdus_failed_usr); len += scnprintf(buf + len, buf_len - len, "ax_mu_ofdma_mpdus_requeued_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->mpdus_requeued_usr); len += scnprintf(buf + len, buf_len - len, "ax_mu_ofdma_err_no_ba_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->err_no_ba_usr); len += scnprintf(buf + len, buf_len - len, "ax_mu_ofdma_mpdu_underrun_usr_%u = %u\n", htt_stats_buf->user_index, htt_stats_buf->mpdu_underrun_usr); len += scnprintf(buf + len, buf_len - len, "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n\n", htt_stats_buf->user_index, htt_stats_buf->ampdu_underrun_usr); } } if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_sched_txq_cmd_posted_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_sched_txq_cmd_posted_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX); len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_posted, "sched_cmd_posted", num_elements, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_sched_txq_cmd_reaped_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_sched_txq_cmd_reaped_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX); len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_reaped, "sched_cmd_reaped", num_elements, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_sched_txq_sched_order_su_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_sched_txq_sched_order_su_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; /* each entry is u32, i.e. 4 bytes */ u32 sched_order_su_num_entries = min_t(u32, (tag_len >> 2), HTT_TX_PDEV_NUM_SCHED_ORDER_LOG); len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_order_su, "sched_order_su", sched_order_su_num_entries, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_sched_txq_sched_ineligibility_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_sched_txq_sched_ineligibility_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; /* each entry is u32, i.e. 4 bytes */ u32 sched_ineligibility_num_entries = tag_len >> 2; len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_SCHED_INELIGIBILITY_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_ineligibility, "sched_ineligibility", sched_ineligibility_num_entries, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID, htt_stats_buf->mac_id__txq_id__word)); len += scnprintf(buf + len, buf_len - len, "txq_id = %lu\n", FIELD_GET(HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID, htt_stats_buf->mac_id__txq_id__word)); len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n", htt_stats_buf->sched_policy); len += scnprintf(buf + len, buf_len - len, "last_sched_cmd_posted_timestamp = %u\n", htt_stats_buf->last_sched_cmd_posted_timestamp); len += scnprintf(buf + len, buf_len - len, "last_sched_cmd_compl_timestamp = %u\n", htt_stats_buf->last_sched_cmd_compl_timestamp); len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n", htt_stats_buf->sched_2_tac_lwm_count); len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n", htt_stats_buf->sched_2_tac_ring_full); len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n", htt_stats_buf->sched_cmd_post_failure); len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n", htt_stats_buf->num_active_tids); len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n", htt_stats_buf->num_ps_schedules); len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n", htt_stats_buf->sched_cmds_pending); len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n", htt_stats_buf->num_tid_register); len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n", htt_stats_buf->num_tid_unregister); len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n", htt_stats_buf->num_qstats_queried); len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n", htt_stats_buf->qstats_update_pending); len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n", htt_stats_buf->last_qstats_query_timestamp); len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n", htt_stats_buf->num_tqm_cmdq_full); len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n", htt_stats_buf->num_de_sched_algo_trigger); len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n", htt_stats_buf->num_rt_sched_algo_trigger); len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n", htt_stats_buf->num_tqm_sched_algo_trigger); len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n\n", htt_stats_buf->notify_sched); len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n\n", htt_stats_buf->dur_based_sendn_term); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n\n", htt_stats_buf->current_timestamp); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_tqm_gen_mpdu_stats_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_tx_tqm_gen_mpdu_stats_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_TQM_MAX_LIST_MPDU_END_REASON); len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->gen_mpdu_end_reason, "gen_mpdu_end_reason", num_elements, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_tqm_list_mpdu_stats_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_tx_tqm_list_mpdu_stats_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_TQM_MAX_LIST_MPDU_END_REASON); len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_end_reason, "list_mpdu_end_reason", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_tqm_list_mpdu_cnt_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_tx_tqm_list_mpdu_cnt_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS); len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_cnt_hist, "list_mpdu_cnt_hist", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_tqm_pdev_stats_tlv_v(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_tqm_pdev_stats_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:\n"); len += scnprintf(buf + len, buf_len - len, "msdu_count = %u\n", htt_stats_buf->msdu_count); len += scnprintf(buf + len, buf_len - len, "mpdu_count = %u\n", htt_stats_buf->mpdu_count); len += scnprintf(buf + len, buf_len - len, "remove_msdu = %u\n", htt_stats_buf->remove_msdu); len += scnprintf(buf + len, buf_len - len, "remove_mpdu = %u\n", htt_stats_buf->remove_mpdu); len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl = %u\n", htt_stats_buf->remove_msdu_ttl); len += scnprintf(buf + len, buf_len - len, "send_bar = %u\n", htt_stats_buf->send_bar); len += scnprintf(buf + len, buf_len - len, "bar_sync = %u\n", htt_stats_buf->bar_sync); len += scnprintf(buf + len, buf_len - len, "notify_mpdu = %u\n", htt_stats_buf->notify_mpdu); len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n", htt_stats_buf->sync_cmd); len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n", htt_stats_buf->write_cmd); len += scnprintf(buf + len, buf_len - len, "hwsch_trigger = %u\n", htt_stats_buf->hwsch_trigger); len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n", htt_stats_buf->ack_tlv_proc); len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n", htt_stats_buf->gen_mpdu_cmd); len += scnprintf(buf + len, buf_len - len, "gen_list_cmd = %u\n", htt_stats_buf->gen_list_cmd); len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n", htt_stats_buf->remove_mpdu_cmd); len += scnprintf(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u\n", htt_stats_buf->remove_mpdu_tried_cmd); len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n", htt_stats_buf->mpdu_queue_stats_cmd); len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n", htt_stats_buf->mpdu_head_info_cmd); len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n", htt_stats_buf->msdu_flow_stats_cmd); len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n", htt_stats_buf->remove_msdu_cmd); len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u\n", htt_stats_buf->remove_msdu_ttl_cmd); len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n", htt_stats_buf->flush_cache_cmd); len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n", htt_stats_buf->update_mpduq_cmd); len += scnprintf(buf + len, buf_len - len, "enqueue = %u\n", htt_stats_buf->enqueue); len += scnprintf(buf + len, buf_len - len, "enqueue_notify = %u\n", htt_stats_buf->enqueue_notify); len += scnprintf(buf + len, buf_len - len, "notify_mpdu_at_head = %u\n", htt_stats_buf->notify_mpdu_at_head); len += scnprintf(buf + len, buf_len - len, "notify_mpdu_state_valid = %u\n", htt_stats_buf->notify_mpdu_state_valid); len += scnprintf(buf + len, buf_len - len, "sched_udp_notify1 = %u\n", htt_stats_buf->sched_udp_notify1); len += scnprintf(buf + len, buf_len - len, "sched_udp_notify2 = %u\n", htt_stats_buf->sched_udp_notify2); len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify1 = %u\n", htt_stats_buf->sched_nonudp_notify1); len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n\n", htt_stats_buf->sched_nonudp_notify2); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_tqm_cmn_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); len += scnprintf(buf + len, buf_len - len, "max_cmdq_id = %u\n", htt_stats_buf->max_cmdq_id); len += scnprintf(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u\n", htt_stats_buf->list_mpdu_cnt_hist_intvl); len += scnprintf(buf + len, buf_len - len, "add_msdu = %u\n", htt_stats_buf->add_msdu); len += scnprintf(buf + len, buf_len - len, "q_empty = %u\n", htt_stats_buf->q_empty); len += scnprintf(buf + len, buf_len - len, "q_not_empty = %u\n", htt_stats_buf->q_not_empty); len += scnprintf(buf + len, buf_len - len, "drop_notification = %u\n", htt_stats_buf->drop_notification); len += scnprintf(buf + len, buf_len - len, "desc_threshold = %u\n\n", htt_stats_buf->desc_threshold); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_tqm_error_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_tqm_error_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "q_empty_failure = %u\n", htt_stats_buf->q_empty_failure); len += scnprintf(buf + len, buf_len - len, "q_not_empty_failure = %u\n", htt_stats_buf->q_not_empty_failure); len += scnprintf(buf + len, buf_len - len, "add_msdu_failure = %u\n\n", htt_stats_buf->add_msdu_failure); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_tqm_cmdq_status_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_tqm_cmdq_status_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMDQ_STATUS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_TX_TQM_CMDQ_STATUS_MAC_ID, htt_stats_buf->mac_id__cmdq_id__word)); len += scnprintf(buf + len, buf_len - len, "cmdq_id = %lu\n\n", FIELD_GET(HTT_TX_TQM_CMDQ_STATUS_CMDQ_ID, htt_stats_buf->mac_id__cmdq_id__word)); len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n", htt_stats_buf->sync_cmd); len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n", htt_stats_buf->write_cmd); len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n", htt_stats_buf->gen_mpdu_cmd); len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n", htt_stats_buf->mpdu_queue_stats_cmd); len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n", htt_stats_buf->mpdu_head_info_cmd); len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n", htt_stats_buf->msdu_flow_stats_cmd); len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n", htt_stats_buf->remove_mpdu_cmd); len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n", htt_stats_buf->remove_msdu_cmd); len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n", htt_stats_buf->flush_cache_cmd); len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n", htt_stats_buf->update_mpduq_cmd); len += scnprintf(buf + len, buf_len - len, "update_msduq_cmd = %u\n\n", htt_stats_buf->update_msduq_cmd); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_de_eapol_packets_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "m1_packets = %u\n", htt_stats_buf->m1_packets); len += scnprintf(buf + len, buf_len - len, "m2_packets = %u\n", htt_stats_buf->m2_packets); len += scnprintf(buf + len, buf_len - len, "m3_packets = %u\n", htt_stats_buf->m3_packets); len += scnprintf(buf + len, buf_len - len, "m4_packets = %u\n", htt_stats_buf->m4_packets); len += scnprintf(buf + len, buf_len - len, "g1_packets = %u\n", htt_stats_buf->g1_packets); len += scnprintf(buf + len, buf_len - len, "g2_packets = %u\n\n", htt_stats_buf->g2_packets); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_de_classify_failed_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "ap_bss_peer_not_found = %u\n", htt_stats_buf->ap_bss_peer_not_found); len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u\n", htt_stats_buf->ap_bcast_mcast_no_peer); len += scnprintf(buf + len, buf_len - len, "sta_delete_in_progress = %u\n", htt_stats_buf->sta_delete_in_progress); len += scnprintf(buf + len, buf_len - len, "ibss_no_bss_peer = %u\n", htt_stats_buf->ibss_no_bss_peer); len += scnprintf(buf + len, buf_len - len, "invalid_vdev_type = %u\n", htt_stats_buf->invalid_vdev_type); len += scnprintf(buf + len, buf_len - len, "invalid_ast_peer_entry = %u\n", htt_stats_buf->invalid_ast_peer_entry); len += scnprintf(buf + len, buf_len - len, "peer_entry_invalid = %u\n", htt_stats_buf->peer_entry_invalid); len += scnprintf(buf + len, buf_len - len, "ethertype_not_ip = %u\n", htt_stats_buf->ethertype_not_ip); len += scnprintf(buf + len, buf_len - len, "eapol_lookup_failed = %u\n", htt_stats_buf->eapol_lookup_failed); len += scnprintf(buf + len, buf_len - len, "qpeer_not_allow_data = %u\n", htt_stats_buf->qpeer_not_allow_data); len += scnprintf(buf + len, buf_len - len, "fse_tid_override = %u\n", htt_stats_buf->fse_tid_override); len += scnprintf(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u\n", htt_stats_buf->ipv6_jumbogram_zero_length); len += scnprintf(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n\n", htt_stats_buf->qos_to_non_qos_in_prog); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_de_classify_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_de_classify_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "arp_packets = %u\n", htt_stats_buf->arp_packets); len += scnprintf(buf + len, buf_len - len, "igmp_packets = %u\n", htt_stats_buf->igmp_packets); len += scnprintf(buf + len, buf_len - len, "dhcp_packets = %u\n", htt_stats_buf->dhcp_packets); len += scnprintf(buf + len, buf_len - len, "host_inspected = %u\n", htt_stats_buf->host_inspected); len += scnprintf(buf + len, buf_len - len, "htt_included = %u\n", htt_stats_buf->htt_included); len += scnprintf(buf + len, buf_len - len, "htt_valid_mcs = %u\n", htt_stats_buf->htt_valid_mcs); len += scnprintf(buf + len, buf_len - len, "htt_valid_nss = %u\n", htt_stats_buf->htt_valid_nss); len += scnprintf(buf + len, buf_len - len, "htt_valid_preamble_type = %u\n", htt_stats_buf->htt_valid_preamble_type); len += scnprintf(buf + len, buf_len - len, "htt_valid_chainmask = %u\n", htt_stats_buf->htt_valid_chainmask); len += scnprintf(buf + len, buf_len - len, "htt_valid_guard_interval = %u\n", htt_stats_buf->htt_valid_guard_interval); len += scnprintf(buf + len, buf_len - len, "htt_valid_retries = %u\n", htt_stats_buf->htt_valid_retries); len += scnprintf(buf + len, buf_len - len, "htt_valid_bw_info = %u\n", htt_stats_buf->htt_valid_bw_info); len += scnprintf(buf + len, buf_len - len, "htt_valid_power = %u\n", htt_stats_buf->htt_valid_power); len += scnprintf(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x\n", htt_stats_buf->htt_valid_key_flags); len += scnprintf(buf + len, buf_len - len, "htt_valid_no_encryption = %u\n", htt_stats_buf->htt_valid_no_encryption); len += scnprintf(buf + len, buf_len - len, "fse_entry_count = %u\n", htt_stats_buf->fse_entry_count); len += scnprintf(buf + len, buf_len - len, "fse_priority_be = %u\n", htt_stats_buf->fse_priority_be); len += scnprintf(buf + len, buf_len - len, "fse_priority_high = %u\n", htt_stats_buf->fse_priority_high); len += scnprintf(buf + len, buf_len - len, "fse_priority_low = %u\n", htt_stats_buf->fse_priority_low); len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u\n", htt_stats_buf->fse_traffic_ptrn_be); len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u\n", htt_stats_buf->fse_traffic_ptrn_over_sub); len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u\n", htt_stats_buf->fse_traffic_ptrn_bursty); len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u\n", htt_stats_buf->fse_traffic_ptrn_interactive); len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u\n", htt_stats_buf->fse_traffic_ptrn_periodic); len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_alloc = %u\n", htt_stats_buf->fse_hwqueue_alloc); len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_created = %u\n", htt_stats_buf->fse_hwqueue_created); len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u\n", htt_stats_buf->fse_hwqueue_send_to_host); len += scnprintf(buf + len, buf_len - len, "mcast_entry = %u\n", htt_stats_buf->mcast_entry); len += scnprintf(buf + len, buf_len - len, "bcast_entry = %u\n", htt_stats_buf->bcast_entry); len += scnprintf(buf + len, buf_len - len, "htt_update_peer_cache = %u\n", htt_stats_buf->htt_update_peer_cache); len += scnprintf(buf + len, buf_len - len, "htt_learning_frame = %u\n", htt_stats_buf->htt_learning_frame); len += scnprintf(buf + len, buf_len - len, "fse_invalid_peer = %u\n", htt_stats_buf->fse_invalid_peer); len += scnprintf(buf + len, buf_len - len, "mec_notify = %u\n\n", htt_stats_buf->mec_notify); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_de_classify_status_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "eok = %u\n", htt_stats_buf->eok); len += scnprintf(buf + len, buf_len - len, "classify_done = %u\n", htt_stats_buf->classify_done); len += scnprintf(buf + len, buf_len - len, "lookup_failed = %u\n", htt_stats_buf->lookup_failed); len += scnprintf(buf + len, buf_len - len, "send_host_dhcp = %u\n", htt_stats_buf->send_host_dhcp); len += scnprintf(buf + len, buf_len - len, "send_host_mcast = %u\n", htt_stats_buf->send_host_mcast); len += scnprintf(buf + len, buf_len - len, "send_host_unknown_dest = %u\n", htt_stats_buf->send_host_unknown_dest); len += scnprintf(buf + len, buf_len - len, "send_host = %u\n", htt_stats_buf->send_host); len += scnprintf(buf + len, buf_len - len, "status_invalid = %u\n\n", htt_stats_buf->status_invalid); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_de_enqueue_packets_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "enqueued_pkts = %u\n", htt_stats_buf->enqueued_pkts); len += scnprintf(buf + len, buf_len - len, "to_tqm = %u\n", htt_stats_buf->to_tqm); len += scnprintf(buf + len, buf_len - len, "to_tqm_bypass = %u\n\n", htt_stats_buf->to_tqm_bypass); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_de_enqueue_discard_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "discarded_pkts = %u\n", htt_stats_buf->discarded_pkts); len += scnprintf(buf + len, buf_len - len, "local_frames = %u\n", htt_stats_buf->local_frames); len += scnprintf(buf + len, buf_len - len, "is_ext_msdu = %u\n\n", htt_stats_buf->is_ext_msdu); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_de_compl_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_de_compl_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "tcl_dummy_frame = %u\n", htt_stats_buf->tcl_dummy_frame); len += scnprintf(buf + len, buf_len - len, "tqm_dummy_frame = %u\n", htt_stats_buf->tqm_dummy_frame); len += scnprintf(buf + len, buf_len - len, "tqm_notify_frame = %u\n", htt_stats_buf->tqm_notify_frame); len += scnprintf(buf + len, buf_len - len, "fw2wbm_enq = %u\n", htt_stats_buf->fw2wbm_enq); len += scnprintf(buf + len, buf_len - len, "tqm_bypass_frame = %u\n\n", htt_stats_buf->tqm_bypass_frame); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_de_fw2wbm_ring_full_hist_tlv(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_tx_de_fw2wbm_ring_full_hist_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elements = tag_len >> 2; len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_FW2WBM_RING_FULL_HIST_TLV"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw2wbm_ring_full_hist, "fw2wbm_ring_full_hist", num_elements, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_de_cmn_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); len += scnprintf(buf + len, buf_len - len, "tcl2fw_entry_count = %u\n", htt_stats_buf->tcl2fw_entry_count); len += scnprintf(buf + len, buf_len - len, "not_to_fw = %u\n", htt_stats_buf->not_to_fw); len += scnprintf(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u\n", htt_stats_buf->invalid_pdev_vdev_peer); len += scnprintf(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u\n", htt_stats_buf->tcl_res_invalid_addrx); len += scnprintf(buf + len, buf_len - len, "wbm2fw_entry_count = %u\n", htt_stats_buf->wbm2fw_entry_count); len += scnprintf(buf + len, buf_len - len, "invalid_pdev = %u\n\n", htt_stats_buf->invalid_pdev); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_ring_if_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_ring_if_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "base_addr = %u\n", htt_stats_buf->base_addr); len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n", htt_stats_buf->elem_size); len += scnprintf(buf + len, buf_len - len, "num_elems = %lu\n", FIELD_GET(HTT_RING_IF_STATS_NUM_ELEMS, htt_stats_buf->num_elems__prefetch_tail_idx)); len += scnprintf(buf + len, buf_len - len, "prefetch_tail_idx = %lu\n", FIELD_GET(HTT_RING_IF_STATS_PREFETCH_TAIL_INDEX, htt_stats_buf->num_elems__prefetch_tail_idx)); len += scnprintf(buf + len, buf_len - len, "head_idx = %lu\n", FIELD_GET(HTT_RING_IF_STATS_HEAD_IDX, htt_stats_buf->head_idx__tail_idx)); len += scnprintf(buf + len, buf_len - len, "tail_idx = %lu\n", FIELD_GET(HTT_RING_IF_STATS_TAIL_IDX, htt_stats_buf->head_idx__tail_idx)); len += scnprintf(buf + len, buf_len - len, "shadow_head_idx = %lu\n", FIELD_GET(HTT_RING_IF_STATS_SHADOW_HEAD_IDX, htt_stats_buf->shadow_head_idx__shadow_tail_idx)); len += scnprintf(buf + len, buf_len - len, "shadow_tail_idx = %lu\n", FIELD_GET(HTT_RING_IF_STATS_SHADOW_TAIL_IDX, htt_stats_buf->shadow_head_idx__shadow_tail_idx)); len += scnprintf(buf + len, buf_len - len, "num_tail_incr = %u\n", htt_stats_buf->num_tail_incr); len += scnprintf(buf + len, buf_len - len, "lwm_thresh = %lu\n", FIELD_GET(HTT_RING_IF_STATS_LWM_THRESH, htt_stats_buf->lwm_thresh__hwm_thresh)); len += scnprintf(buf + len, buf_len - len, "hwm_thresh = %lu\n", FIELD_GET(HTT_RING_IF_STATS_HWM_THRESH, htt_stats_buf->lwm_thresh__hwm_thresh)); len += scnprintf(buf + len, buf_len - len, "overrun_hit_count = %u\n", htt_stats_buf->overrun_hit_count); len += scnprintf(buf + len, buf_len - len, "underrun_hit_count = %u\n", htt_stats_buf->underrun_hit_count); len += scnprintf(buf + len, buf_len - len, "prod_blockwait_count = %u\n", htt_stats_buf->prod_blockwait_count); len += scnprintf(buf + len, buf_len - len, "cons_blockwait_count = %u\n", htt_stats_buf->cons_blockwait_count); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->low_wm_hit_count, "low_wm_hit_count", HTT_STATS_LOW_WM_BINS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->high_wm_hit_count, "high_wm_hit_count", HTT_STATS_HIGH_WM_BINS, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_ring_if_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_ring_if_cmn_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_CMN_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n", htt_stats_buf->num_records); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_sfm_client_user_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_sfm_client_user_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems = tag_len >> 2; len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->dwords_used_by_user_n, "dwords_used_by_user_n", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_sfm_client_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_sfm_client_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "client_id = %u\n", htt_stats_buf->client_id); len += scnprintf(buf + len, buf_len - len, "buf_min = %u\n", htt_stats_buf->buf_min); len += scnprintf(buf + len, buf_len - len, "buf_max = %u\n", htt_stats_buf->buf_max); len += scnprintf(buf + len, buf_len - len, "buf_busy = %u\n", htt_stats_buf->buf_busy); len += scnprintf(buf + len, buf_len - len, "buf_alloc = %u\n", htt_stats_buf->buf_alloc); len += scnprintf(buf + len, buf_len - len, "buf_avail = %u\n", htt_stats_buf->buf_avail); len += scnprintf(buf + len, buf_len - len, "num_users = %u\n\n", htt_stats_buf->num_users); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_sfm_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_sfm_cmn_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); len += scnprintf(buf + len, buf_len - len, "buf_total = %u\n", htt_stats_buf->buf_total); len += scnprintf(buf + len, buf_len - len, "mem_empty = %u\n", htt_stats_buf->mem_empty); len += scnprintf(buf + len, buf_len - len, "deallocate_bufs = %u\n", htt_stats_buf->deallocate_bufs); len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n", htt_stats_buf->num_records); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_sring_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_sring_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_SRING_STATS_MAC_ID, htt_stats_buf->mac_id__ring_id__arena__ep)); len += scnprintf(buf + len, buf_len - len, "ring_id = %lu\n", FIELD_GET(HTT_SRING_STATS_RING_ID, htt_stats_buf->mac_id__ring_id__arena__ep)); len += scnprintf(buf + len, buf_len - len, "arena = %lu\n", FIELD_GET(HTT_SRING_STATS_ARENA, htt_stats_buf->mac_id__ring_id__arena__ep)); len += scnprintf(buf + len, buf_len - len, "ep = %lu\n", FIELD_GET(HTT_SRING_STATS_EP, htt_stats_buf->mac_id__ring_id__arena__ep)); len += scnprintf(buf + len, buf_len - len, "base_addr_lsb = 0x%x\n", htt_stats_buf->base_addr_lsb); len += scnprintf(buf + len, buf_len - len, "base_addr_msb = 0x%x\n", htt_stats_buf->base_addr_msb); len += scnprintf(buf + len, buf_len - len, "ring_size = %u\n", htt_stats_buf->ring_size); len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n", htt_stats_buf->elem_size); len += scnprintf(buf + len, buf_len - len, "num_avail_words = %lu\n", FIELD_GET(HTT_SRING_STATS_NUM_AVAIL_WORDS, htt_stats_buf->num_avail_words__num_valid_words)); len += scnprintf(buf + len, buf_len - len, "num_valid_words = %lu\n", FIELD_GET(HTT_SRING_STATS_NUM_VALID_WORDS, htt_stats_buf->num_avail_words__num_valid_words)); len += scnprintf(buf + len, buf_len - len, "head_ptr = %lu\n", FIELD_GET(HTT_SRING_STATS_HEAD_PTR, htt_stats_buf->head_ptr__tail_ptr)); len += scnprintf(buf + len, buf_len - len, "tail_ptr = %lu\n", FIELD_GET(HTT_SRING_STATS_TAIL_PTR, htt_stats_buf->head_ptr__tail_ptr)); len += scnprintf(buf + len, buf_len - len, "consumer_empty = %lu\n", FIELD_GET(HTT_SRING_STATS_CONSUMER_EMPTY, htt_stats_buf->consumer_empty__producer_full)); len += scnprintf(buf + len, buf_len - len, "producer_full = %lu\n", FIELD_GET(HTT_SRING_STATS_PRODUCER_FULL, htt_stats_buf->consumer_empty__producer_full)); len += scnprintf(buf + len, buf_len - len, "prefetch_count = %lu\n", FIELD_GET(HTT_SRING_STATS_PREFETCH_COUNT, htt_stats_buf->prefetch_count__internal_tail_ptr)); len += scnprintf(buf + len, buf_len - len, "internal_tail_ptr = %lu\n\n", FIELD_GET(HTT_SRING_STATS_INTERNAL_TAIL_PTR, htt_stats_buf->prefetch_count__internal_tail_ptr)); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_sring_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_sring_cmn_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_SRING_CMN_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n", htt_stats_buf->num_records); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_tx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u8 j; len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n", htt_stats_buf->tx_ldpc); len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u\n", htt_stats_buf->ac_mu_mimo_tx_ldpc); len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u\n", htt_stats_buf->ax_mu_mimo_tx_ldpc); len += scnprintf(buf + len, buf_len - len, "ofdma_tx_ldpc = %u\n", htt_stats_buf->ofdma_tx_ldpc); len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n", htt_stats_buf->rts_cnt); len += scnprintf(buf + len, buf_len - len, "rts_success = %u\n", htt_stats_buf->rts_success); len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n", htt_stats_buf->ack_rssi); len += scnprintf(buf + len, buf_len - len, "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 11 Mbps: %u\n", htt_stats_buf->tx_legacy_cck_rate[0], htt_stats_buf->tx_legacy_cck_rate[1], htt_stats_buf->tx_legacy_cck_rate[2], htt_stats_buf->tx_legacy_cck_rate[3]); len += scnprintf(buf + len, buf_len - len, "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n" " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u\n", htt_stats_buf->tx_legacy_ofdm_rate[0], htt_stats_buf->tx_legacy_ofdm_rate[1], htt_stats_buf->tx_legacy_ofdm_rate[2], htt_stats_buf->tx_legacy_ofdm_rate[3], htt_stats_buf->tx_legacy_ofdm_rate[4], htt_stats_buf->tx_legacy_ofdm_rate[5], htt_stats_buf->tx_legacy_ofdm_rate[6], htt_stats_buf->tx_legacy_ofdm_rate[7]); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_mcs, "ac_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_mcs, "ax_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_mcs, "ofdma_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss", HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_nss, "ac_mu_mimo_tx_nss", HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_nss, "ax_mu_mimo_tx_nss", HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_nss, "ofdma_tx_nss", HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw", HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_bw, "ac_mu_mimo_tx_bw", HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_bw, "ax_mu_mimo_tx_bw", HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_bw, "ofdma_tx_bw", HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream", HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n"); len += scnprintf(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u\n", htt_stats_buf->tx_he_ltf[1], htt_stats_buf->tx_he_ltf[2], htt_stats_buf->tx_he_ltf[3]); /* SU GI Stats */ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { len += scnprintf(buf + len, (buf_len - len), "tx_gi[%u] = ", j); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); } /* AC MU-MIMO GI Stats */ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { len += scnprintf(buf + len, (buf_len - len), "ac_mu_mimo_tx_gi[%u] = ", j); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_gi[j], NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); } /* AX MU-MIMO GI Stats */ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { len += scnprintf(buf + len, (buf_len - len), "ax_mu_mimo_tx_gi[%u] = ", j); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_gi[j], NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); } /* DL OFDMA GI Stats */ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { len += scnprintf(buf + len, (buf_len - len), "ofdma_tx_gi[%u] = ", j); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_gi[j], NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); } PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm", HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_rx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u8 i, j; len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); len += scnprintf(buf + len, buf_len - len, "nsts = %u\n", htt_stats_buf->nsts); len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n", htt_stats_buf->rx_ldpc); len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n", htt_stats_buf->rts_cnt); len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n", htt_stats_buf->rssi_mgmt); len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n", htt_stats_buf->rssi_data); len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n", htt_stats_buf->rssi_comb); len += scnprintf(buf + len, buf_len - len, "rssi_in_dbm = %d\n", htt_stats_buf->rssi_in_dbm); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss", HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm", HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw", HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); len += scnprintf(buf + len, buf_len - len, "rx_evm_nss_count = %u\n", htt_stats_buf->nss_count); len += scnprintf(buf + len, buf_len - len, "rx_evm_pilot_count = %u\n", htt_stats_buf->pilot_count); for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { len += scnprintf(buf + len, buf_len - len, "pilot_evm_db[%u] = ", j); for (i = 0; i < HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS; i++) len += scnprintf(buf + len, buf_len - len, " %u:%d,", i, htt_stats_buf->rx_pilot_evm_db[j][i]); len += scnprintf(buf + len, buf_len - len, "\n"); } len += scnprintf(buf + len, buf_len - len, "pilot_evm_db_mean = "); for (i = 0; i < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) len += scnprintf(buf + len, buf_len - len, " %u:%d,", i, htt_stats_buf->rx_pilot_evm_db_mean[i]); len += scnprintf(buf + len, buf_len - len, "\n"); for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { len += scnprintf(buf + len, buf_len - len, "rssi_chain[%u] = ", j); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL, HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); } for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) { len += scnprintf(buf + len, buf_len - len, "rx_gi[%u] = ", j); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL, HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); } PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream", HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n"); len += scnprintf(buf + len, buf_len - len, "rx_11ax_su_ext = %u\n", htt_stats_buf->rx_11ax_su_ext); len += scnprintf(buf + len, buf_len - len, "rx_11ac_mumimo = %u\n", htt_stats_buf->rx_11ac_mumimo); len += scnprintf(buf + len, buf_len - len, "rx_11ax_mumimo = %u\n", htt_stats_buf->rx_11ax_mumimo); len += scnprintf(buf + len, buf_len - len, "rx_11ax_ofdma = %u\n", htt_stats_buf->rx_11ax_ofdma); len += scnprintf(buf + len, buf_len - len, "txbf = %u\n", htt_stats_buf->txbf); len += scnprintf(buf + len, buf_len - len, "\nrx_su_ndpa = %u", htt_stats_buf->rx_su_ndpa); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_su_txbf_mcs, "rx_11ax_su_txbf_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); len += scnprintf(buf + len, buf_len - len, "\nrx_mu_ndpa = %u", htt_stats_buf->rx_mu_ndpa); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_mu_txbf_mcs, "rx_11ax_mu_txbf_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); len += scnprintf(buf + len, buf_len - len, "\nrx_br_poll = %u", htt_stats_buf->rx_br_poll); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_cck_rate, "rx_legacy_cck_rate", HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_ofdm_rate, "rx_legacy_ofdm_rate", HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS, "\n"); len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_low = %u\n", htt_stats_buf->rx_active_dur_us_low); len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_high = %u\n", htt_stats_buf->rx_active_dur_us_high); len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n", htt_stats_buf->rx_11ax_ul_ofdma); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_mcs, "ul_ofdma_rx_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) { len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_gi[%u] = ", j); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_gi[j], NULL, HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); } PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_nss, "ul_ofdma_rx_nss", HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_bw, "ul_ofdma_rx_bw", HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n", htt_stats_buf->ul_ofdma_rx_stbc); len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n", htt_stats_buf->ul_ofdma_rx_ldpc); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_ppdu, "rx_ulofdma_non_data_ppdu", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_ppdu, "rx_ulofdma_data_ppdu", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_ok, "rx_ulofdma_mpdu_ok", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_fail, "rx_ulofdma_mpdu_fail", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { len += scnprintf(buf + len, buf_len - len, "rx_ul_fd_rssi: nss[%u] = ", j); for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++) len += scnprintf(buf + len, buf_len - len, " %u:%d,", i, htt_stats_buf->rx_ul_fd_rssi[j][i]); len += scnprintf(buf + len, buf_len - len, "\n"); } PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_nusers, "rx_ulofdma_non_data_nusers", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_nusers, "rx_ulofdma_data_nusers", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_mcs, "rx_11ax_dl_ofdma_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_ru, "rx_11ax_dl_ofdma_ru", HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_non_data_ppdu, "rx_ulmumimo_non_data_ppdu", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_data_ppdu, "rx_ulmumimo_data_ppdu", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_mpdu_ok, "rx_ulmumimo_mpdu_ok", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_mpdu_fail, "rx_ulmumimo_mpdu_fail", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n"); len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n", htt_stats_buf->per_chain_rssi_pkt_type); for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { len += scnprintf(buf + len, buf_len - len, "rx_per_chain_rssi_in_dbm[%u] = ", j); for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++) len += scnprintf(buf + len, buf_len - len, " %u:%d,", i, htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]); len += scnprintf(buf + len, buf_len - len, "\n"); } len += scnprintf(buf + len, buf_len - len, "\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_rx_soc_fw_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_RX_SOC_FW_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u\n", htt_stats_buf->fw_reo_ring_data_msdu); len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u\n", htt_stats_buf->fw_to_host_data_msdu_bcmc); len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u\n", htt_stats_buf->fw_to_host_data_msdu_uc); len += scnprintf(buf + len, buf_len - len, "ofld_remote_data_buf_recycle_cnt = %u\n", htt_stats_buf->ofld_remote_data_buf_recycle_cnt); len += scnprintf(buf + len, buf_len - len, "ofld_remote_free_buf_indication_cnt = %u\n", htt_stats_buf->ofld_remote_free_buf_indication_cnt); len += scnprintf(buf + len, buf_len - len, "ofld_buf_to_host_data_msdu_uc = %u\n", htt_stats_buf->ofld_buf_to_host_data_msdu_uc); len += scnprintf(buf + len, buf_len - len, "reo_fw_ring_to_host_data_msdu_uc = %u\n", htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc); len += scnprintf(buf + len, buf_len - len, "wbm_sw_ring_reap = %u\n", htt_stats_buf->wbm_sw_ring_reap); len += scnprintf(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u\n", htt_stats_buf->wbm_forward_to_host_cnt); len += scnprintf(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u\n", htt_stats_buf->wbm_target_recycle_cnt); len += scnprintf(buf + len, buf_len - len, "target_refill_ring_recycle_cnt = %u\n", htt_stats_buf->target_refill_ring_recycle_cnt); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_rx_soc_fw_refill_ring_empty_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_rx_soc_fw_refill_ring_empty_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING); len += scnprintf(buf + len, buf_len - len, "HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_empty_cnt, "refill_ring_empty_cnt", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_RXDMA_MAX_ERR_CODE); len += scnprintf(buf + len, buf_len - len, "HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rxdma_err, "rxdma_err", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_REO_MAX_ERR_CODE); len += scnprintf(buf + len, buf_len - len, "HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reo_err, "reo_err", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_rx_reo_debug_stats_tlv_v(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_rx_reo_resource_stats_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_RX_REO_RESOURCE_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "sample_id = %u\n", htt_stats_buf->sample_id); len += scnprintf(buf + len, buf_len - len, "total_max = %u\n", htt_stats_buf->total_max); len += scnprintf(buf + len, buf_len - len, "total_avg = %u\n", htt_stats_buf->total_avg); len += scnprintf(buf + len, buf_len - len, "total_sample = %u\n", htt_stats_buf->total_sample); len += scnprintf(buf + len, buf_len - len, "non_zeros_avg = %u\n", htt_stats_buf->non_zeros_avg); len += scnprintf(buf + len, buf_len - len, "non_zeros_sample = %u\n", htt_stats_buf->non_zeros_sample); len += scnprintf(buf + len, buf_len - len, "last_non_zeros_max = %u\n", htt_stats_buf->last_non_zeros_max); len += scnprintf(buf + len, buf_len - len, "last_non_zeros_min %u\n", htt_stats_buf->last_non_zeros_min); len += scnprintf(buf + len, buf_len - len, "last_non_zeros_avg %u\n", htt_stats_buf->last_non_zeros_avg); len += scnprintf(buf + len, buf_len - len, "last_non_zeros_sample %u\n\n", htt_stats_buf->last_non_zeros_sample); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING); len += scnprintf(buf + len, buf_len - len, "HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_num_refill, "refill_ring_num_refill", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_rx_pdev_fw_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_rx_pdev_fw_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); len += scnprintf(buf + len, buf_len - len, "ppdu_recvd = %u\n", htt_stats_buf->ppdu_recvd); len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u\n", htt_stats_buf->mpdu_cnt_fcs_ok); len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u\n", htt_stats_buf->mpdu_cnt_fcs_err); len += scnprintf(buf + len, buf_len - len, "tcp_msdu_cnt = %u\n", htt_stats_buf->tcp_msdu_cnt); len += scnprintf(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u\n", htt_stats_buf->tcp_ack_msdu_cnt); len += scnprintf(buf + len, buf_len - len, "udp_msdu_cnt = %u\n", htt_stats_buf->udp_msdu_cnt); len += scnprintf(buf + len, buf_len - len, "other_msdu_cnt = %u\n", htt_stats_buf->other_msdu_cnt); len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u\n", htt_stats_buf->fw_ring_mpdu_ind); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mgmt_subtype, "fw_ring_mgmt_subtype", HTT_STATS_SUBTYPE_MAX, "\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_ctrl_subtype, "fw_ring_ctrl_subtype", HTT_STATS_SUBTYPE_MAX, "\n"); len += scnprintf(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u\n", htt_stats_buf->fw_ring_mcast_data_msdu); len += scnprintf(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u\n", htt_stats_buf->fw_ring_bcast_data_msdu); len += scnprintf(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u\n", htt_stats_buf->fw_ring_ucast_data_msdu); len += scnprintf(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u\n", htt_stats_buf->fw_ring_null_data_msdu); len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u\n", htt_stats_buf->fw_ring_mpdu_drop); len += scnprintf(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u\n", htt_stats_buf->ofld_local_data_ind_cnt); len += scnprintf(buf + len, buf_len - len, "ofld_local_data_buf_recycle_cnt = %u\n", htt_stats_buf->ofld_local_data_buf_recycle_cnt); len += scnprintf(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u\n", htt_stats_buf->drx_local_data_ind_cnt); len += scnprintf(buf + len, buf_len - len, "drx_local_data_buf_recycle_cnt = %u\n", htt_stats_buf->drx_local_data_buf_recycle_cnt); len += scnprintf(buf + len, buf_len - len, "local_nondata_ind_cnt = %u\n", htt_stats_buf->local_nondata_ind_cnt); len += scnprintf(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u\n", htt_stats_buf->local_nondata_buf_recycle_cnt); len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u\n", htt_stats_buf->fw_status_buf_ring_refill_cnt); len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u\n", htt_stats_buf->fw_status_buf_ring_empty_cnt); len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u\n", htt_stats_buf->fw_pkt_buf_ring_refill_cnt); len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u\n", htt_stats_buf->fw_pkt_buf_ring_empty_cnt); len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u\n", htt_stats_buf->fw_link_buf_ring_refill_cnt); len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u\n", htt_stats_buf->fw_link_buf_ring_empty_cnt); len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u\n", htt_stats_buf->host_pkt_buf_ring_refill_cnt); len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u\n", htt_stats_buf->host_pkt_buf_ring_empty_cnt); len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u\n", htt_stats_buf->mon_pkt_buf_ring_refill_cnt); len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u\n", htt_stats_buf->mon_pkt_buf_ring_empty_cnt); len += scnprintf(buf + len, buf_len - len, "mon_status_buf_ring_refill_cnt = %u\n", htt_stats_buf->mon_status_buf_ring_refill_cnt); len += scnprintf(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u\n", htt_stats_buf->mon_status_buf_ring_empty_cnt); len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u\n", htt_stats_buf->mon_desc_buf_ring_refill_cnt); len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u\n", htt_stats_buf->mon_desc_buf_ring_empty_cnt); len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u\n", htt_stats_buf->mon_dest_ring_update_cnt); len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u\n", htt_stats_buf->mon_dest_ring_full_cnt); len += scnprintf(buf + len, buf_len - len, "rx_suspend_cnt = %u\n", htt_stats_buf->rx_suspend_cnt); len += scnprintf(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u\n", htt_stats_buf->rx_suspend_fail_cnt); len += scnprintf(buf + len, buf_len - len, "rx_resume_cnt = %u\n", htt_stats_buf->rx_resume_cnt); len += scnprintf(buf + len, buf_len - len, "rx_resume_fail_cnt = %u\n", htt_stats_buf->rx_resume_fail_cnt); len += scnprintf(buf + len, buf_len - len, "rx_ring_switch_cnt = %u\n", htt_stats_buf->rx_ring_switch_cnt); len += scnprintf(buf + len, buf_len - len, "rx_ring_restore_cnt = %u\n", htt_stats_buf->rx_ring_restore_cnt); len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n", htt_stats_buf->rx_flush_cnt); len += scnprintf(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n\n", htt_stats_buf->rx_recovery_reset_cnt); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mpdu_err, "fw_ring_mpdu_err", HTT_RX_STATS_RXDMA_MAX_ERR, "\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_rx_pdev_fw_mpdu_drop_tlv_v(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_rx_pdev_fw_mpdu_drop_tlv_v *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_FW_DROP_REASON_MAX); len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:\n"); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_mpdu_drop, "fw_mpdu_drop", num_elems, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_rx_pdev_fw_stats_phy_err_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_rx_pdev_fw_stats_phy_err_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id__word = %u\n", htt_stats_buf->mac_id__word); len += scnprintf(buf + len, buf_len - len, "total_phy_err_nct = %u\n", htt_stats_buf->total_phy_err_cnt); PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_err, "phy_errs", HTT_STATS_PHY_ERR_MAX, "\n\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_pdev_cca_stats_hist_v1_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "\nHTT_PDEV_CCA_STATS_HIST_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n", htt_stats_buf->chan_num); len += scnprintf(buf + len, buf_len - len, "num_records = %u\n", htt_stats_buf->num_records); len += scnprintf(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x\n", htt_stats_buf->valid_cca_counters_bitmap); len += scnprintf(buf + len, buf_len - len, "collection_interval = %u\n\n", htt_stats_buf->collection_interval); len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)\n"); len += scnprintf(buf + len, buf_len - len, "| tx_frame| rx_frame| rx_clear| my_rx_frame| cnt| med_rx_idle| med_tx_idle_global| cca_obss|\n"); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_pdev_stats_cca_counters_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|\n", htt_stats_buf->tx_frame_usec, htt_stats_buf->rx_frame_usec, htt_stats_buf->rx_clear_usec, htt_stats_buf->my_rx_frame_usec, htt_stats_buf->usec_cnt, htt_stats_buf->med_rx_idle_usec, htt_stats_buf->med_tx_idle_global_usec, htt_stats_buf->cca_obss_usec); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_hw_stats_whal_tx_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_hw_stats_whal_tx_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n", FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word)); len += scnprintf(buf + len, buf_len - len, "last_unpause_ppdu_id = %u\n", htt_stats_buf->last_unpause_ppdu_id); len += scnprintf(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u\n", htt_stats_buf->hwsch_unpause_wait_tqm_write); len += scnprintf(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u\n", htt_stats_buf->hwsch_dummy_tlv_skipped); len += scnprintf(buf + len, buf_len - len, "hwsch_misaligned_offset_received = %u\n", htt_stats_buf->hwsch_misaligned_offset_received); len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n", htt_stats_buf->hwsch_reset_count); len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n", htt_stats_buf->hwsch_dev_reset_war); len += scnprintf(buf + len, buf_len - len, "hwsch_delayed_pause = %u\n", htt_stats_buf->hwsch_delayed_pause); len += scnprintf(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u\n", htt_stats_buf->hwsch_long_delayed_pause); len += scnprintf(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u\n", htt_stats_buf->sch_rx_ppdu_no_response); len += scnprintf(buf + len, buf_len - len, "sch_selfgen_response = %u\n", htt_stats_buf->sch_selfgen_response); len += scnprintf(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n\n", htt_stats_buf->sch_rx_sifs_resp_trigger); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_pdev_stats_twt_sessions_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_pdev_stats_twt_sessions_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSIONS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n", htt_stats_buf->pdev_id); len += scnprintf(buf + len, buf_len - len, "num_sessions = %u\n\n", htt_stats_buf->num_sessions); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_pdev_stats_twt_session_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_pdev_stats_twt_session_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSION_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n", htt_stats_buf->vdev_id); len += scnprintf(buf + len, buf_len - len, "peer_mac = %02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n", FIELD_GET(HTT_MAC_ADDR_L32_0, htt_stats_buf->peer_mac.mac_addr_l32), FIELD_GET(HTT_MAC_ADDR_L32_1, htt_stats_buf->peer_mac.mac_addr_l32), FIELD_GET(HTT_MAC_ADDR_L32_2, htt_stats_buf->peer_mac.mac_addr_l32), FIELD_GET(HTT_MAC_ADDR_L32_3, htt_stats_buf->peer_mac.mac_addr_l32), FIELD_GET(HTT_MAC_ADDR_H16_0, htt_stats_buf->peer_mac.mac_addr_h16), FIELD_GET(HTT_MAC_ADDR_H16_1, htt_stats_buf->peer_mac.mac_addr_h16)); len += scnprintf(buf + len, buf_len - len, "flow_id_flags = %u\n", htt_stats_buf->flow_id_flags); len += scnprintf(buf + len, buf_len - len, "dialog_id = %u\n", htt_stats_buf->dialog_id); len += scnprintf(buf + len, buf_len - len, "wake_dura_us = %u\n", htt_stats_buf->wake_dura_us); len += scnprintf(buf + len, buf_len - len, "wake_intvl_us = %u\n", htt_stats_buf->wake_intvl_us); len += scnprintf(buf + len, buf_len - len, "sp_offset_us = %u\n\n", htt_stats_buf->sp_offset_us); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_pdev_obss_pd_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "OBSS Tx success PPDU = %u\n", htt_stats_buf->num_obss_tx_ppdu_success); len += scnprintf(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n", htt_stats_buf->num_obss_tx_ppdu_failure); len += scnprintf(buf + len, buf_len - len, "Non-SRG Opportunities = %u\n", htt_stats_buf->num_non_srg_opportunities); len += scnprintf(buf + len, buf_len - len, "Non-SRG tried PPDU = %u\n", htt_stats_buf->num_non_srg_ppdu_tried); len += scnprintf(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n", htt_stats_buf->num_non_srg_ppdu_success); len += scnprintf(buf + len, buf_len - len, "SRG Opportunities = %u\n", htt_stats_buf->num_srg_opportunities); len += scnprintf(buf + len, buf_len - len, "SRG tried PPDU = %u\n", htt_stats_buf->num_srg_ppdu_tried); len += scnprintf(buf + len, buf_len - len, "SRG success PPDU = %u\n\n", htt_stats_buf->num_srg_ppdu_success); if (len >= buf_len) buf[buf_len - 1] = 0; else buf[len] = 0; stats_req->buf_len = len; } static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf, u8 *data) { struct debug_htt_stats_req *stats_req = (struct debug_htt_stats_req *)data; struct htt_ring_backpressure_stats_tlv *htt_stats_buf = (struct htt_ring_backpressure_stats_tlv *)tag_buf; int i; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n", htt_stats_buf->pdev_id); len += scnprintf(buf + len, buf_len - len, "current_head_idx = %u\n", htt_stats_buf->current_head_idx); len += scnprintf(buf + len, buf_len - len, "current_tail_idx = %u\n", htt_stats_buf->current_tail_idx); len += scnprintf(buf + len, buf_len - len, "num_htt_msgs_sent = %u\n", htt_stats_buf->num_htt_msgs_sent); len += scnprintf(buf + len, buf_len - len, "backpressure_time_ms = %u\n", htt_stats_buf->backpressure_time_ms); for (i = 0; i < 5; i++) len += scnprintf(buf + len, buf_len - len, "backpressure_hist_%u = %u\n", i + 1, htt_stats_buf->backpressure_hist[i]); len += scnprintf(buf + len, buf_len - len, "============================\n"); if (len >= buf_len) { buf[buf_len - 1] = 0; stats_req->buf_len = buf_len - 1; } else { buf[len] = 0; stats_req->buf_len = len; } } static inline void htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_pdev_txrate_txbf_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; int i; len += scnprintf(buf + len, buf_len - len, "HTT_STATS_PDEV_TX_RATE_TXBF_STATS:\n"); len += scnprintf(buf + len, buf_len - len, "tx_ol_mcs = "); for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++) len += scnprintf(buf + len, buf_len - len, "%d:%u,", i, htt_stats_buf->tx_su_ol_mcs[i]); len--; len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_mcs = "); for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++) len += scnprintf(buf + len, buf_len - len, "%d:%u,", i, htt_stats_buf->tx_su_ibf_mcs[i]); len--; len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_mcs ="); for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++) len += scnprintf(buf + len, buf_len - len, "%d:%u,", i, htt_stats_buf->tx_su_txbf_mcs[i]); len--; len += scnprintf(buf + len, buf_len - len, "\ntx_ol_nss = "); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) len += scnprintf(buf + len, buf_len - len, "%d:%u,", i, htt_stats_buf->tx_su_ol_nss[i]); len--; len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_nss = "); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) len += scnprintf(buf + len, buf_len - len, "%d:%u,", i, htt_stats_buf->tx_su_ibf_nss[i]); len--; len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_nss = "); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) len += scnprintf(buf + len, buf_len - len, "%d:%u,", i, htt_stats_buf->tx_su_txbf_nss[i]); len--; len += scnprintf(buf + len, buf_len - len, "\ntx_ol_bw = "); for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++) len += scnprintf(buf + len, buf_len - len, "%d:%u,", i, htt_stats_buf->tx_su_ol_bw[i]); len--; len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_bw = "); for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++) len += scnprintf(buf + len, buf_len - len, "%d:%u,", i, htt_stats_buf->tx_su_ibf_bw[i]); len--; len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_bw = "); for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++) len += scnprintf(buf + len, buf_len - len, "%d:%u,", i, htt_stats_buf->tx_su_txbf_bw[i]); len--; len += scnprintf(buf + len, buf_len - len, "\n"); stats_req->buf_len = len; } static inline void htt_print_txbf_ofdma_ndpa_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_txbf_ofdma_ndpa_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; int i; len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_NDPA_STATS_TLV:\n"); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) { len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndpa_queued_user%d = %u\n", i, htt_stats_buf->ax_ofdma_ndpa_queued[i]); len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndpa_tried_user%d = %u\n", i, htt_stats_buf->ax_ofdma_ndpa_tried[i]); len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndpa_flushed_user%d = %u\n", i, htt_stats_buf->ax_ofdma_ndpa_flushed[i]); len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndpa_err_user%d = %u\n", i, htt_stats_buf->ax_ofdma_ndpa_err[i]); len += scnprintf(buf + len, buf_len - len, "\n"); } stats_req->buf_len = len; } static inline void htt_print_txbf_ofdma_ndp_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_txbf_ofdma_ndp_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; int i; len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_NDP_STATS_TLV:\n"); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) { len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndp_queued_user%d = %u\n", i, htt_stats_buf->ax_ofdma_ndp_queued[i]); len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndp_tried_user%d = %u\n", i, htt_stats_buf->ax_ofdma_ndp_tried[i]); len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndp_flushed_user%d = %u\n", i, htt_stats_buf->ax_ofdma_ndp_flushed[i]); len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndp_err_user%d = %u\n", i, htt_stats_buf->ax_ofdma_ndp_err[i]); len += scnprintf(buf + len, buf_len - len, "\n"); } stats_req->buf_len = len; } static inline void htt_print_txbf_ofdma_brp_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_txbf_ofdma_brp_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; int i; len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_BRP_STATS_TLV:\n"); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) { len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brpoll_queued_user%d = %u\n", i, htt_stats_buf->ax_ofdma_brpoll_queued[i]); len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brpoll_tried_user%d = %u\n", i, htt_stats_buf->ax_ofdma_brpoll_tried[i]); len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brpoll_flushed_user%d = %u\n", i, htt_stats_buf->ax_ofdma_brpoll_flushed[i]); len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brp_err_user%d = %u\n", i, htt_stats_buf->ax_ofdma_brp_err[i]); len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brp_err_num_cbf_rcvd_user%d = %u\n", i, htt_stats_buf->ax_ofdma_brp_err_num_cbf_rcvd[i]); len += scnprintf(buf + len, buf_len - len, "\n"); } stats_req->buf_len = len; } static inline void htt_print_txbf_ofdma_steer_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_txbf_ofdma_steer_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; int i; len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_STEER_STATS_TLV:\n"); for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) { len += scnprintf(buf + len, buf_len - len, "ax_ofdma_num_ppdu_steer_user%d = %u\n", i, htt_stats_buf->ax_ofdma_num_ppdu_steer[i]); len += scnprintf(buf + len, buf_len - len, "ax_ofdma_num_ppdu_ol_user%d = %u\n", i, htt_stats_buf->ax_ofdma_num_ppdu_ol[i]); len += scnprintf(buf + len, buf_len - len, "ax_ofdma_num_usrs_prefetch_user%d = %u\n", i, htt_stats_buf->ax_ofdma_num_usrs_prefetch[i]); len += scnprintf(buf + len, buf_len - len, "ax_ofdma_num_usrs_sound_user%d = %u\n", i, htt_stats_buf->ax_ofdma_num_usrs_sound[i]); len += scnprintf(buf + len, buf_len - len, "ax_ofdma_num_usrs_force_sound_user%d = %u\n", i, htt_stats_buf->ax_ofdma_num_usrs_force_sound[i]); len += scnprintf(buf + len, buf_len - len, "\n"); } stats_req->buf_len = len; } static inline void htt_print_phy_counters_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_phy_counters_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; int i; len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n", htt_stats_buf->rx_ofdma_timing_err_cnt); len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n", htt_stats_buf->rx_cck_fail_cnt); len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n", htt_stats_buf->mactx_abort_cnt); len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n", htt_stats_buf->macrx_abort_cnt); len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n", htt_stats_buf->phytx_abort_cnt); len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n", htt_stats_buf->phyrx_abort_cnt); len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n", htt_stats_buf->phyrx_defer_abort_cnt); len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n", htt_stats_buf->rx_gain_adj_lstf_event_cnt); len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n", htt_stats_buf->rx_gain_adj_non_legacy_cnt); for (i = 0; i < HTT_MAX_RX_PKT_CNT; i++) len += scnprintf(buf + len, buf_len - len, "rx_pkt_cnt[%d] = %u\n", i, htt_stats_buf->rx_pkt_cnt[i]); for (i = 0; i < HTT_MAX_RX_PKT_CRC_PASS_CNT; i++) len += scnprintf(buf + len, buf_len - len, "rx_pkt_crc_pass_cnt[%d] = %u\n", i, htt_stats_buf->rx_pkt_crc_pass_cnt[i]); for (i = 0; i < HTT_MAX_PER_BLK_ERR_CNT; i++) len += scnprintf(buf + len, buf_len - len, "per_blk_err_cnt[%d] = %u\n", i, htt_stats_buf->per_blk_err_cnt[i]); for (i = 0; i < HTT_MAX_RX_OTA_ERR_CNT; i++) len += scnprintf(buf + len, buf_len - len, "rx_ota_err_cnt[%d] = %u\n", i, htt_stats_buf->rx_ota_err_cnt[i]); stats_req->buf_len = len; } static inline void htt_print_phy_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_phy_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; int i; len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n"); for (i = 0; i < HTT_STATS_MAX_CHAINS; i++) len += scnprintf(buf + len, buf_len - len, "nf_chain[%d] = %d\n", i, htt_stats_buf->nf_chain[i]); len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u\n", htt_stats_buf->false_radar_cnt); len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n", htt_stats_buf->radar_cs_cnt); len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n", htt_stats_buf->ani_level); len += scnprintf(buf + len, buf_len - len, "fw_run_time = %u\n", htt_stats_buf->fw_run_time); stats_req->buf_len = len; } static inline void htt_print_phy_reset_counters_tlv(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_phy_reset_counters_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; if (tag_len < sizeof(*htt_stats_buf)) return; len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_COUNTERS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n", htt_stats_buf->pdev_id); len += scnprintf(buf + len, buf_len - len, "cf_active_low_fail_cnt = %u\n", htt_stats_buf->cf_active_low_fail_cnt); len += scnprintf(buf + len, buf_len - len, "cf_active_low_pass_cnt = %u\n", htt_stats_buf->cf_active_low_pass_cnt); len += scnprintf(buf + len, buf_len - len, "phy_off_through_vreg_cnt = %u\n", htt_stats_buf->phy_off_through_vreg_cnt); len += scnprintf(buf + len, buf_len - len, "force_calibration_cnt = %u\n", htt_stats_buf->force_calibration_cnt); len += scnprintf(buf + len, buf_len - len, "rf_mode_switch_phy_off_cnt = %u\n", htt_stats_buf->rf_mode_switch_phy_off_cnt); stats_req->buf_len = len; } static inline void htt_print_phy_reset_stats_tlv(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) { const struct htt_phy_reset_stats_tlv *htt_stats_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; if (tag_len < sizeof(*htt_stats_buf)) return; len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_STATS_TLV:\n"); len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n", htt_stats_buf->pdev_id); len += scnprintf(buf + len, buf_len - len, "chan_mhz = %u\n", htt_stats_buf->chan_mhz); len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq1 = %u\n", htt_stats_buf->chan_band_center_freq1); len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq2 = %u\n", htt_stats_buf->chan_band_center_freq2); len += scnprintf(buf + len, buf_len - len, "chan_phy_mode = %u\n", htt_stats_buf->chan_phy_mode); len += scnprintf(buf + len, buf_len - len, "chan_flags = 0x%0x\n", htt_stats_buf->chan_flags); len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n", htt_stats_buf->chan_num); len += scnprintf(buf + len, buf_len - len, "reset_cause = 0x%0x\n", htt_stats_buf->reset_cause); len += scnprintf(buf + len, buf_len - len, "prev_reset_cause = 0x%0x\n", htt_stats_buf->prev_reset_cause); len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_src = 0x%0x\n", htt_stats_buf->phy_warm_reset_src); len += scnprintf(buf + len, buf_len - len, "rx_gain_tbl_mode = %d\n", htt_stats_buf->rx_gain_tbl_mode); len += scnprintf(buf + len, buf_len - len, "xbar_val = 0x%0x\n", htt_stats_buf->xbar_val); len += scnprintf(buf + len, buf_len - len, "force_calibration = %u\n", htt_stats_buf->force_calibration); len += scnprintf(buf + len, buf_len - len, "phyrf_mode = %u\n", htt_stats_buf->phyrf_mode); len += scnprintf(buf + len, buf_len - len, "phy_homechan = %u\n", htt_stats_buf->phy_homechan); len += scnprintf(buf + len, buf_len - len, "phy_tx_ch_mask = 0x%0x\n", htt_stats_buf->phy_tx_ch_mask); len += scnprintf(buf + len, buf_len - len, "phy_rx_ch_mask = 0x%0x\n", htt_stats_buf->phy_rx_ch_mask); len += scnprintf(buf + len, buf_len - len, "phybb_ini_mask = 0x%0x\n", htt_stats_buf->phybb_ini_mask); len += scnprintf(buf + len, buf_len - len, "phyrf_ini_mask = 0x%0x\n", htt_stats_buf->phyrf_ini_mask); len += scnprintf(buf + len, buf_len - len, "phy_dfs_en_mask = 0x%0x\n", htt_stats_buf->phy_dfs_en_mask); len += scnprintf(buf + len, buf_len - len, "phy_sscan_en_mask = 0x%0x\n", htt_stats_buf->phy_sscan_en_mask); len += scnprintf(buf + len, buf_len - len, "phy_synth_sel_mask = 0x%0x\n", htt_stats_buf->phy_synth_sel_mask); len += scnprintf(buf + len, buf_len - len, "phy_adfs_freq = %u\n", htt_stats_buf->phy_adfs_freq); len += scnprintf(buf + len, buf_len - len, "cck_fir_settings = 0x%0x\n", htt_stats_buf->cck_fir_settings); len += scnprintf(buf + len, buf_len - len, "phy_dyn_pri_chan = %u\n", htt_stats_buf->phy_dyn_pri_chan); len += scnprintf(buf + len, buf_len - len, "cca_thresh = 0x%0x\n", htt_stats_buf->cca_thresh); len += scnprintf(buf + len, buf_len - len, "dyn_cca_status = %u\n", htt_stats_buf->dyn_cca_status); len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_hw = 0x%x\n", htt_stats_buf->rxdesense_thresh_hw); len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_sw = 0x%x\n", htt_stats_buf->rxdesense_thresh_sw); stats_req->buf_len = len; } static inline void htt_print_peer_ctrl_path_txrx_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) { const struct htt_peer_ctrl_path_txrx_stats_tlv *htt_stat_buf = tag_buf; u8 *buf = stats_req->buf; u32 len = stats_req->buf_len; u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE; int i; const char *mgmt_frm_type[ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1] = { "assoc_req", "assoc_resp", "reassoc_req", "reassoc_resp", "probe_req", "probe_resp", "timing_advertisement", "reserved", "beacon", "atim", "disassoc", "auth", "deauth", "action", "action_no_ack"}; len += scnprintf(buf + len, buf_len - len, "HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:\n"); len += scnprintf(buf + len, buf_len - len, "peer_mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", htt_stat_buf->peer_mac_addr[0], htt_stat_buf->peer_mac_addr[1], htt_stat_buf->peer_mac_addr[2], htt_stat_buf->peer_mac_addr[3], htt_stat_buf->peer_mac_addr[4], htt_stat_buf->peer_mac_addr[5]); len += scnprintf(buf + len, buf_len - len, "peer_tx_mgmt_subtype:\n"); for (i = 0; i < ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1; i++) len += scnprintf(buf + len, buf_len - len, "%s:%u\n", mgmt_frm_type[i], htt_stat_buf->peer_rx_mgmt_subtype[i]); len += scnprintf(buf + len, buf_len - len, "peer_rx_mgmt_subtype:\n"); for (i = 0; i < ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1; i++) len += scnprintf(buf + len, buf_len - len, "%s:%u\n", mgmt_frm_type[i], htt_stat_buf->peer_rx_mgmt_subtype[i]); len += scnprintf(buf + len, buf_len - len, "\n"); stats_req->buf_len = len; } static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *tag_buf, void *user_data) { struct debug_htt_stats_req *stats_req = user_data; switch (tag) { case HTT_STATS_TX_PDEV_CMN_TAG: htt_print_tx_pdev_stats_cmn_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_PDEV_UNDERRUN_TAG: htt_print_tx_pdev_stats_urrn_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_TX_PDEV_SIFS_TAG: htt_print_tx_pdev_stats_sifs_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_TX_PDEV_FLUSH_TAG: htt_print_tx_pdev_stats_flush_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_TX_PDEV_PHY_ERR_TAG: htt_print_tx_pdev_stats_phy_err_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_TX_PDEV_SIFS_HIST_TAG: htt_print_tx_pdev_stats_sifs_hist_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_TX_PDEV_TX_PPDU_STATS_TAG: htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(tag_buf, stats_req); break; case HTT_STATS_TX_PDEV_TRIED_MPDU_CNT_HIST_TAG: htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_STRING_TAG: htt_print_stats_string_tlv(tag_buf, len, stats_req); break; case HTT_STATS_TX_HWQ_CMN_TAG: htt_print_tx_hwq_stats_cmn_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_HWQ_DIFS_LATENCY_TAG: htt_print_tx_hwq_difs_latency_stats_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_TX_HWQ_CMD_RESULT_TAG: htt_print_tx_hwq_cmd_result_stats_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_TX_HWQ_CMD_STALL_TAG: htt_print_tx_hwq_cmd_stall_stats_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_TX_HWQ_FES_STATUS_TAG: htt_print_tx_hwq_fes_result_stats_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_TX_HWQ_TRIED_MPDU_CNT_HIST_TAG: htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_TX_HWQ_TXOP_USED_CNT_HIST_TAG: htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_TX_TQM_GEN_MPDU_TAG: htt_print_tx_tqm_gen_mpdu_stats_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_TX_TQM_LIST_MPDU_TAG: htt_print_tx_tqm_list_mpdu_stats_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG: htt_print_tx_tqm_list_mpdu_cnt_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_TX_TQM_CMN_TAG: htt_print_tx_tqm_cmn_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_TQM_PDEV_TAG: htt_print_tx_tqm_pdev_stats_tlv_v(tag_buf, stats_req); break; case HTT_STATS_TX_TQM_CMDQ_STATUS_TAG: htt_print_tx_tqm_cmdq_status_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_DE_EAPOL_PACKETS_TAG: htt_print_tx_de_eapol_packets_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG: htt_print_tx_de_classify_failed_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_DE_CLASSIFY_STATS_TAG: htt_print_tx_de_classify_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG: htt_print_tx_de_classify_status_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG: htt_print_tx_de_enqueue_packets_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG: htt_print_tx_de_enqueue_discard_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_DE_FW2WBM_RING_FULL_HIST_TAG: htt_print_tx_de_fw2wbm_ring_full_hist_tlv(tag_buf, len, stats_req); break; case HTT_STATS_TX_DE_CMN_TAG: htt_print_tx_de_cmn_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_RING_IF_TAG: htt_print_ring_if_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG: htt_print_tx_pdev_mu_mimo_sch_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_SFM_CMN_TAG: htt_print_sfm_cmn_tlv(tag_buf, stats_req); break; case HTT_STATS_SRING_STATS_TAG: htt_print_sring_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_RX_PDEV_FW_STATS_TAG: htt_print_rx_pdev_fw_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG: htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(tag_buf, stats_req); break; case HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG: htt_print_rx_pdev_fw_mpdu_drop_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_RX_SOC_FW_STATS_TAG: htt_print_rx_soc_fw_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG: htt_print_rx_soc_fw_refill_ring_empty_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG: htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v( tag_buf, len, stats_req); break; case HTT_STATS_RX_REFILL_RXDMA_ERR_TAG: htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v( tag_buf, len, stats_req); break; case HTT_STATS_RX_REFILL_REO_ERR_TAG: htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v( tag_buf, len, stats_req); break; case HTT_STATS_RX_REO_RESOURCE_STATS_TAG: htt_print_rx_reo_debug_stats_tlv_v( tag_buf, stats_req); break; case HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG: htt_print_rx_pdev_fw_stats_phy_err_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_PDEV_RATE_STATS_TAG: htt_print_tx_pdev_rate_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_RX_PDEV_RATE_STATS_TAG: htt_print_rx_pdev_rate_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG: htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_SCHED_CMN_TAG: htt_print_stats_tx_sched_cmn_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_PDEV_MPDU_STATS_TAG: htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG: htt_print_sched_txq_cmd_posted_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_RING_IF_CMN_TAG: htt_print_ring_if_cmn_tlv(tag_buf, stats_req); break; case HTT_STATS_SFM_CLIENT_USER_TAG: htt_print_sfm_client_user_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_SFM_CLIENT_TAG: htt_print_sfm_client_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_TQM_ERROR_STATS_TAG: htt_print_tx_tqm_error_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG: htt_print_sched_txq_cmd_reaped_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_SRING_CMN_TAG: htt_print_sring_cmn_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_SOUNDING_STATS_TAG: htt_print_tx_sounding_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG: htt_print_tx_selfgen_ac_err_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_SELFGEN_CMN_STATS_TAG: htt_print_tx_selfgen_cmn_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_SELFGEN_AC_STATS_TAG: htt_print_tx_selfgen_ac_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_SELFGEN_AX_STATS_TAG: htt_print_tx_selfgen_ax_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG: htt_print_tx_selfgen_ax_err_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_HWQ_MUMIMO_SCH_STATS_TAG: htt_print_tx_hwq_mu_mimo_sch_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_HWQ_MUMIMO_MPDU_STATS_TAG: htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_HWQ_MUMIMO_CMN_STATS_TAG: htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_HW_INTR_MISC_TAG: htt_print_hw_stats_intr_misc_tlv(tag_buf, stats_req); break; case HTT_STATS_HW_WD_TIMEOUT_TAG: htt_print_hw_stats_wd_timeout_tlv(tag_buf, stats_req); break; case HTT_STATS_HW_PDEV_ERRS_TAG: htt_print_hw_stats_pdev_errs_tlv(tag_buf, stats_req); break; case HTT_STATS_COUNTER_NAME_TAG: htt_print_counter_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_TID_DETAILS_TAG: htt_print_tx_tid_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_TID_DETAILS_V1_TAG: htt_print_tx_tid_stats_v1_tlv(tag_buf, stats_req); break; case HTT_STATS_RX_TID_DETAILS_TAG: htt_print_rx_tid_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_PEER_STATS_CMN_TAG: htt_print_peer_stats_cmn_tlv(tag_buf, stats_req); break; case HTT_STATS_PEER_DETAILS_TAG: htt_print_peer_details_tlv(tag_buf, stats_req); break; case HTT_STATS_PEER_MSDU_FLOWQ_TAG: htt_print_msdu_flow_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_PEER_TX_RATE_STATS_TAG: htt_print_tx_peer_rate_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_PEER_RX_RATE_STATS_TAG: htt_print_rx_peer_rate_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TX_DE_COMPL_STATS_TAG: htt_print_tx_de_compl_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_PDEV_CCA_1SEC_HIST_TAG: case HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG: case HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG: htt_print_pdev_cca_stats_hist_tlv(tag_buf, stats_req); break; case HTT_STATS_PDEV_CCA_COUNTERS_TAG: htt_print_pdev_stats_cca_counters_tlv(tag_buf, stats_req); break; case HTT_STATS_WHAL_TX_TAG: htt_print_hw_stats_whal_tx_tlv(tag_buf, stats_req); break; case HTT_STATS_PDEV_TWT_SESSIONS_TAG: htt_print_pdev_stats_twt_sessions_tlv(tag_buf, stats_req); break; case HTT_STATS_PDEV_TWT_SESSION_TAG: htt_print_pdev_stats_twt_session_tlv(tag_buf, stats_req); break; case HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG: htt_print_sched_txq_sched_order_su_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG: htt_print_sched_txq_sched_ineligibility_tlv_v(tag_buf, len, stats_req); break; case HTT_STATS_PDEV_OBSS_PD_TAG: htt_print_pdev_obss_pd_stats_tlv_v(tag_buf, stats_req); break; case HTT_STATS_RING_BACKPRESSURE_STATS_TAG: htt_print_backpressure_stats_tlv_v(tag_buf, user_data); break; case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG: htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TXBF_OFDMA_NDPA_STATS_TAG: htt_print_txbf_ofdma_ndpa_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TXBF_OFDMA_NDP_STATS_TAG: htt_print_txbf_ofdma_ndp_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TXBF_OFDMA_BRP_STATS_TAG: htt_print_txbf_ofdma_brp_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG: htt_print_txbf_ofdma_steer_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_PHY_COUNTERS_TAG: htt_print_phy_counters_tlv(tag_buf, stats_req); break; case HTT_STATS_PHY_STATS_TAG: htt_print_phy_stats_tlv(tag_buf, stats_req); break; case HTT_STATS_PHY_RESET_COUNTERS_TAG: htt_print_phy_reset_counters_tlv(tag_buf, len, stats_req); break; case HTT_STATS_PHY_RESET_STATS_TAG: htt_print_phy_reset_stats_tlv(tag_buf, len, stats_req); break; case HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG: htt_print_peer_ctrl_path_txrx_stats_tlv(tag_buf, stats_req); break; default: break; } return 0; } void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k_htt_extd_stats_msg *msg; struct debug_htt_stats_req *stats_req; struct ath11k *ar; u32 len; u64 cookie; int ret; bool send_completion = false; u8 pdev_id; msg = (struct ath11k_htt_extd_stats_msg *)skb->data; cookie = msg->cookie; if (FIELD_GET(HTT_STATS_COOKIE_MSB, cookie) != HTT_STATS_MAGIC_VALUE) { ath11k_warn(ab, "received invalid htt ext stats event\n"); return; } pdev_id = FIELD_GET(HTT_STATS_COOKIE_LSB, cookie); rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); rcu_read_unlock(); if (!ar) { ath11k_warn(ab, "failed to get ar for pdev_id %d\n", pdev_id); return; } stats_req = ar->debug.htt_stats.stats_req; if (!stats_req) return; spin_lock_bh(&ar->debug.htt_stats.lock); stats_req->done = FIELD_GET(HTT_T2H_EXT_STATS_INFO1_DONE, msg->info1); if (stats_req->done) send_completion = true; spin_unlock_bh(&ar->debug.htt_stats.lock); len = FIELD_GET(HTT_T2H_EXT_STATS_INFO1_LENGTH, msg->info1); ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, ath11k_dbg_htt_ext_stats_parse, stats_req); if (ret) ath11k_warn(ab, "Failed to parse tlv %d\n", ret); if (send_completion) complete(&stats_req->cmpln); } static ssize_t ath11k_read_htt_stats_type(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; char buf[32]; size_t len; len = scnprintf(buf, sizeof(buf), "%u\n", ar->debug.htt_stats.type); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath11k_write_htt_stats_type(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; u8 type; int ret; ret = kstrtou8_from_user(user_buf, count, 0, &type); if (ret) return ret; if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS) return -E2BIG; if (type == ATH11K_DBG_HTT_EXT_STATS_RESET) return -EPERM; ar->debug.htt_stats.type = type; ret = count; return ret; } static const struct file_operations fops_htt_stats_type = { .read = ath11k_read_htt_stats_type, .write = ath11k_write_htt_stats_type, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static int ath11k_prep_htt_stats_cfg_params(struct ath11k *ar, u8 type, const u8 *mac_addr, struct htt_ext_stats_cfg_params *cfg_params) { if (!cfg_params) return -EINVAL; switch (type) { case ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ: case ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ: cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_HWQS; break; case ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED: cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_TXQS; break; case ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ: cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_CMDQS; break; case ATH11K_DBG_HTT_EXT_STATS_PEER_INFO: cfg_params->cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR; cfg_params->cfg0 |= FIELD_PREP(GENMASK(15, 1), HTT_PEER_STATS_REQ_MODE_FLUSH_TQM); cfg_params->cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE; cfg_params->cfg2 |= FIELD_PREP(GENMASK(7, 0), mac_addr[0]); cfg_params->cfg2 |= FIELD_PREP(GENMASK(15, 8), mac_addr[1]); cfg_params->cfg2 |= FIELD_PREP(GENMASK(23, 16), mac_addr[2]); cfg_params->cfg2 |= FIELD_PREP(GENMASK(31, 24), mac_addr[3]); cfg_params->cfg3 |= FIELD_PREP(GENMASK(7, 0), mac_addr[4]); cfg_params->cfg3 |= FIELD_PREP(GENMASK(15, 8), mac_addr[5]); break; case ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO: case ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO: cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_RINGS; break; case ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST: cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS; break; case ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS: cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE; break; case ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO: cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS; break; case ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS: cfg_params->cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR; cfg_params->cfg1 |= FIELD_PREP(GENMASK(7, 0), mac_addr[0]); cfg_params->cfg1 |= FIELD_PREP(GENMASK(15, 8), mac_addr[1]); cfg_params->cfg1 |= FIELD_PREP(GENMASK(23, 16), mac_addr[2]); cfg_params->cfg1 |= FIELD_PREP(GENMASK(31, 24), mac_addr[3]); cfg_params->cfg2 |= FIELD_PREP(GENMASK(7, 0), mac_addr[4]); cfg_params->cfg2 |= FIELD_PREP(GENMASK(15, 8), mac_addr[5]); break; default: break; } return 0; } int ath11k_debugfs_htt_stats_req(struct ath11k *ar) { struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req; u8 type = stats_req->type; u64 cookie = 0; int ret, pdev_id = ar->pdev->pdev_id; - struct htt_ext_stats_cfg_params cfg_params = { 0 }; + struct htt_ext_stats_cfg_params cfg_params = {}; init_completion(&stats_req->cmpln); stats_req->done = false; stats_req->pdev_id = pdev_id; cookie = FIELD_PREP(HTT_STATS_COOKIE_MSB, HTT_STATS_MAGIC_VALUE) | FIELD_PREP(HTT_STATS_COOKIE_LSB, pdev_id); ret = ath11k_prep_htt_stats_cfg_params(ar, type, stats_req->peer_addr, &cfg_params); if (ret) { ath11k_warn(ar->ab, "failed to set htt stats cfg params: %d\n", ret); return ret; } ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar, type, &cfg_params, cookie); if (ret) { ath11k_warn(ar->ab, "failed to send htt stats request: %d\n", ret); return ret; } while (!wait_for_completion_timeout(&stats_req->cmpln, 3 * HZ)) { spin_lock_bh(&ar->debug.htt_stats.lock); if (!stats_req->done) { stats_req->done = true; spin_unlock_bh(&ar->debug.htt_stats.lock); ath11k_warn(ar->ab, "stats request timed out\n"); return -ETIMEDOUT; } spin_unlock_bh(&ar->debug.htt_stats.lock); } return 0; } static int ath11k_open_htt_stats(struct inode *inode, struct file *file) { struct ath11k *ar = inode->i_private; struct debug_htt_stats_req *stats_req; u8 type = ar->debug.htt_stats.type; int ret; if (type == ATH11K_DBG_HTT_EXT_STATS_RESET || type == ATH11K_DBG_HTT_EXT_STATS_PEER_INFO || type == ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS) return -EPERM; mutex_lock(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON) { ret = -ENETDOWN; goto err_unlock; } if (ar->debug.htt_stats.stats_req) { ret = -EAGAIN; goto err_unlock; } stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE); if (!stats_req) { ret = -ENOMEM; goto err_unlock; } ar->debug.htt_stats.stats_req = stats_req; stats_req->type = type; ret = ath11k_debugfs_htt_stats_req(ar); if (ret < 0) goto out; file->private_data = stats_req; mutex_unlock(&ar->conf_mutex); return 0; out: vfree(stats_req); ar->debug.htt_stats.stats_req = NULL; err_unlock: mutex_unlock(&ar->conf_mutex); return ret; } static int ath11k_release_htt_stats(struct inode *inode, struct file *file) { struct ath11k *ar = inode->i_private; mutex_lock(&ar->conf_mutex); vfree(file->private_data); ar->debug.htt_stats.stats_req = NULL; mutex_unlock(&ar->conf_mutex); return 0; } static ssize_t ath11k_read_htt_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct debug_htt_stats_req *stats_req = file->private_data; char *buf; u32 length = 0; buf = stats_req->buf; length = min_t(u32, stats_req->buf_len, ATH11K_HTT_STATS_BUF_SIZE); return simple_read_from_buffer(user_buf, count, ppos, buf, length); } static const struct file_operations fops_dump_htt_stats = { .open = ath11k_open_htt_stats, .release = ath11k_release_htt_stats, .read = ath11k_read_htt_stats, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_read_htt_stats_reset(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; char buf[32]; size_t len; len = scnprintf(buf, sizeof(buf), "%u\n", ar->debug.htt_stats.reset); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath11k_write_htt_stats_reset(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; u8 type; - struct htt_ext_stats_cfg_params cfg_params = { 0 }; + struct htt_ext_stats_cfg_params cfg_params = {}; int ret; ret = kstrtou8_from_user(user_buf, count, 0, &type); if (ret) return ret; if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS || type == ATH11K_DBG_HTT_EXT_STATS_RESET) return -E2BIG; mutex_lock(&ar->conf_mutex); cfg_params.cfg0 = HTT_STAT_DEFAULT_RESET_START_OFFSET; cfg_params.cfg1 = 1 << (cfg_params.cfg0 + type); ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar, ATH11K_DBG_HTT_EXT_STATS_RESET, &cfg_params, 0ULL); if (ret) { ath11k_warn(ar->ab, "failed to send htt stats request: %d\n", ret); mutex_unlock(&ar->conf_mutex); return ret; } ar->debug.htt_stats.reset = type; mutex_unlock(&ar->conf_mutex); ret = count; return ret; } static const struct file_operations fops_htt_stats_reset = { .read = ath11k_read_htt_stats_reset, .write = ath11k_write_htt_stats_reset, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; void ath11k_debugfs_htt_stats_init(struct ath11k *ar) { spin_lock_init(&ar->debug.htt_stats.lock); debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev, ar, &fops_htt_stats_type); debugfs_create_file("htt_stats", 0400, ar->debug.debugfs_pdev, ar, &fops_dump_htt_stats); debugfs_create_file("htt_stats_reset", 0600, ar->debug.debugfs_pdev, ar, &fops_htt_stats_reset); } diff --git a/debugfs_sta.c b/debugfs_sta.c index f56a24b6c8da..d89d0f28d890 100644 --- a/debugfs_sta.c +++ b/debugfs_sta.c @@ -1,889 +1,890 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include #include "debugfs_sta.h" #include "core.h" #include "peer.h" #include "debug.h" #include "dp_tx.h" #include "debugfs_htt_stats.h" void ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta, struct ath11k_per_peer_tx_stats *peer_stats, u8 legacy_rate_idx) { struct rate_info *txrate = &arsta->txrate; struct ath11k_htt_tx_stats *tx_stats; int gi, mcs, bw, nss; if (!arsta->tx_stats) return; tx_stats = arsta->tx_stats; gi = FIELD_GET(RATE_INFO_FLAGS_SHORT_GI, arsta->txrate.flags); mcs = txrate->mcs; bw = ath11k_mac_mac80211_bw_to_ath11k_bw(txrate->bw); nss = txrate->nss - 1; #define STATS_OP_FMT(name) tx_stats->stats[ATH11K_STATS_TYPE_##name] if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) { STATS_OP_FMT(SUCC).he[0][mcs] += peer_stats->succ_bytes; STATS_OP_FMT(SUCC).he[1][mcs] += peer_stats->succ_pkts; STATS_OP_FMT(FAIL).he[0][mcs] += peer_stats->failed_bytes; STATS_OP_FMT(FAIL).he[1][mcs] += peer_stats->failed_pkts; STATS_OP_FMT(RETRY).he[0][mcs] += peer_stats->retry_bytes; STATS_OP_FMT(RETRY).he[1][mcs] += peer_stats->retry_pkts; } else if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) { STATS_OP_FMT(SUCC).vht[0][mcs] += peer_stats->succ_bytes; STATS_OP_FMT(SUCC).vht[1][mcs] += peer_stats->succ_pkts; STATS_OP_FMT(FAIL).vht[0][mcs] += peer_stats->failed_bytes; STATS_OP_FMT(FAIL).vht[1][mcs] += peer_stats->failed_pkts; STATS_OP_FMT(RETRY).vht[0][mcs] += peer_stats->retry_bytes; STATS_OP_FMT(RETRY).vht[1][mcs] += peer_stats->retry_pkts; } else if (txrate->flags & RATE_INFO_FLAGS_MCS) { STATS_OP_FMT(SUCC).ht[0][mcs] += peer_stats->succ_bytes; STATS_OP_FMT(SUCC).ht[1][mcs] += peer_stats->succ_pkts; STATS_OP_FMT(FAIL).ht[0][mcs] += peer_stats->failed_bytes; STATS_OP_FMT(FAIL).ht[1][mcs] += peer_stats->failed_pkts; STATS_OP_FMT(RETRY).ht[0][mcs] += peer_stats->retry_bytes; STATS_OP_FMT(RETRY).ht[1][mcs] += peer_stats->retry_pkts; } else { mcs = legacy_rate_idx; STATS_OP_FMT(SUCC).legacy[0][mcs] += peer_stats->succ_bytes; STATS_OP_FMT(SUCC).legacy[1][mcs] += peer_stats->succ_pkts; STATS_OP_FMT(FAIL).legacy[0][mcs] += peer_stats->failed_bytes; STATS_OP_FMT(FAIL).legacy[1][mcs] += peer_stats->failed_pkts; STATS_OP_FMT(RETRY).legacy[0][mcs] += peer_stats->retry_bytes; STATS_OP_FMT(RETRY).legacy[1][mcs] += peer_stats->retry_pkts; } if (peer_stats->is_ampdu) { tx_stats->ba_fails += peer_stats->ba_fails; if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) { STATS_OP_FMT(AMPDU).he[0][mcs] += peer_stats->succ_bytes + peer_stats->retry_bytes; STATS_OP_FMT(AMPDU).he[1][mcs] += peer_stats->succ_pkts + peer_stats->retry_pkts; } else if (txrate->flags & RATE_INFO_FLAGS_MCS) { STATS_OP_FMT(AMPDU).ht[0][mcs] += peer_stats->succ_bytes + peer_stats->retry_bytes; STATS_OP_FMT(AMPDU).ht[1][mcs] += peer_stats->succ_pkts + peer_stats->retry_pkts; } else { STATS_OP_FMT(AMPDU).vht[0][mcs] += peer_stats->succ_bytes + peer_stats->retry_bytes; STATS_OP_FMT(AMPDU).vht[1][mcs] += peer_stats->succ_pkts + peer_stats->retry_pkts; } STATS_OP_FMT(AMPDU).bw[0][bw] += peer_stats->succ_bytes + peer_stats->retry_bytes; STATS_OP_FMT(AMPDU).nss[0][nss] += peer_stats->succ_bytes + peer_stats->retry_bytes; STATS_OP_FMT(AMPDU).gi[0][gi] += peer_stats->succ_bytes + peer_stats->retry_bytes; STATS_OP_FMT(AMPDU).bw[1][bw] += peer_stats->succ_pkts + peer_stats->retry_pkts; STATS_OP_FMT(AMPDU).nss[1][nss] += peer_stats->succ_pkts + peer_stats->retry_pkts; STATS_OP_FMT(AMPDU).gi[1][gi] += peer_stats->succ_pkts + peer_stats->retry_pkts; } else { tx_stats->ack_fails += peer_stats->ba_fails; } STATS_OP_FMT(SUCC).bw[0][bw] += peer_stats->succ_bytes; STATS_OP_FMT(SUCC).nss[0][nss] += peer_stats->succ_bytes; STATS_OP_FMT(SUCC).gi[0][gi] += peer_stats->succ_bytes; STATS_OP_FMT(SUCC).bw[1][bw] += peer_stats->succ_pkts; STATS_OP_FMT(SUCC).nss[1][nss] += peer_stats->succ_pkts; STATS_OP_FMT(SUCC).gi[1][gi] += peer_stats->succ_pkts; STATS_OP_FMT(FAIL).bw[0][bw] += peer_stats->failed_bytes; STATS_OP_FMT(FAIL).nss[0][nss] += peer_stats->failed_bytes; STATS_OP_FMT(FAIL).gi[0][gi] += peer_stats->failed_bytes; STATS_OP_FMT(FAIL).bw[1][bw] += peer_stats->failed_pkts; STATS_OP_FMT(FAIL).nss[1][nss] += peer_stats->failed_pkts; STATS_OP_FMT(FAIL).gi[1][gi] += peer_stats->failed_pkts; STATS_OP_FMT(RETRY).bw[0][bw] += peer_stats->retry_bytes; STATS_OP_FMT(RETRY).nss[0][nss] += peer_stats->retry_bytes; STATS_OP_FMT(RETRY).gi[0][gi] += peer_stats->retry_bytes; STATS_OP_FMT(RETRY).bw[1][bw] += peer_stats->retry_pkts; STATS_OP_FMT(RETRY).nss[1][nss] += peer_stats->retry_pkts; STATS_OP_FMT(RETRY).gi[1][gi] += peer_stats->retry_pkts; tx_stats->tx_duration += peer_stats->duration; } void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts) { ath11k_dp_tx_update_txcompl(ar, ts); } static ssize_t ath11k_dbg_sta_dump_tx_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; struct ath11k_htt_data_stats *stats; static const char *str_name[ATH11K_STATS_TYPE_MAX] = {"succ", "fail", "retry", "ampdu"}; static const char *str[ATH11K_COUNTER_TYPE_MAX] = {"bytes", "packets"}; int len = 0, i, j, k, retval = 0; const int size = 2 * 4096; char *buf; if (!arsta->tx_stats) return -ENOENT; buf = kzalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); for (k = 0; k < ATH11K_STATS_TYPE_MAX; k++) { for (j = 0; j < ATH11K_COUNTER_TYPE_MAX; j++) { stats = &arsta->tx_stats->stats[k]; len += scnprintf(buf + len, size - len, "%s_%s\n", str_name[k], str[j]); len += scnprintf(buf + len, size - len, " HE MCS %s\n", str[j]); for (i = 0; i < ATH11K_HE_MCS_NUM; i++) len += scnprintf(buf + len, size - len, " %llu ", stats->he[j][i]); len += scnprintf(buf + len, size - len, "\n"); len += scnprintf(buf + len, size - len, " VHT MCS %s\n", str[j]); for (i = 0; i < ATH11K_VHT_MCS_NUM; i++) len += scnprintf(buf + len, size - len, " %llu ", stats->vht[j][i]); len += scnprintf(buf + len, size - len, "\n"); len += scnprintf(buf + len, size - len, " HT MCS %s\n", str[j]); for (i = 0; i < ATH11K_HT_MCS_NUM; i++) len += scnprintf(buf + len, size - len, " %llu ", stats->ht[j][i]); len += scnprintf(buf + len, size - len, "\n"); len += scnprintf(buf + len, size - len, " BW %s (20,40,80,160 MHz)\n", str[j]); len += scnprintf(buf + len, size - len, " %llu %llu %llu %llu\n", stats->bw[j][0], stats->bw[j][1], stats->bw[j][2], stats->bw[j][3]); len += scnprintf(buf + len, size - len, " NSS %s (1x1,2x2,3x3,4x4)\n", str[j]); len += scnprintf(buf + len, size - len, " %llu %llu %llu %llu\n", stats->nss[j][0], stats->nss[j][1], stats->nss[j][2], stats->nss[j][3]); len += scnprintf(buf + len, size - len, " GI %s (0.4us,0.8us,1.6us,3.2us)\n", str[j]); len += scnprintf(buf + len, size - len, " %llu %llu %llu %llu\n", stats->gi[j][0], stats->gi[j][1], stats->gi[j][2], stats->gi[j][3]); len += scnprintf(buf + len, size - len, " legacy rate %s (1,2 ... Mbps)\n ", str[j]); for (i = 0; i < ATH11K_LEGACY_NUM; i++) len += scnprintf(buf + len, size - len, "%llu ", stats->legacy[j][i]); len += scnprintf(buf + len, size - len, "\n"); } } len += scnprintf(buf + len, size - len, "\nTX duration\n %llu usecs\n", arsta->tx_stats->tx_duration); len += scnprintf(buf + len, size - len, "BA fails\n %llu\n", arsta->tx_stats->ba_fails); len += scnprintf(buf + len, size - len, "ack fails\n %llu\n", arsta->tx_stats->ack_fails); spin_unlock_bh(&ar->data_lock); if (len > size) len = size; retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); mutex_unlock(&ar->conf_mutex); return retval; } static const struct file_operations fops_tx_stats = { .read = ath11k_dbg_sta_dump_tx_stats, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_dbg_sta_dump_rx_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; int len = 0, i, retval = 0; const int size = 4096; char *buf; if (!rx_stats) return -ENOENT; buf = kzalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->ab->base_lock); len += scnprintf(buf + len, size - len, "RX peer stats:\n"); len += scnprintf(buf + len, size - len, "Num of MSDUs: %llu\n", rx_stats->num_msdu); len += scnprintf(buf + len, size - len, "Num of MSDUs with TCP L4: %llu\n", rx_stats->tcp_msdu_count); len += scnprintf(buf + len, size - len, "Num of MSDUs with UDP L4: %llu\n", rx_stats->udp_msdu_count); len += scnprintf(buf + len, size - len, "Num of MSDUs part of AMPDU: %llu\n", rx_stats->ampdu_msdu_count); len += scnprintf(buf + len, size - len, "Num of MSDUs not part of AMPDU: %llu\n", rx_stats->non_ampdu_msdu_count); len += scnprintf(buf + len, size - len, "Num of MSDUs using STBC: %llu\n", rx_stats->stbc_count); len += scnprintf(buf + len, size - len, "Num of MSDUs beamformed: %llu\n", rx_stats->beamformed_count); len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS ok: %llu\n", rx_stats->num_mpdu_fcs_ok); len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS error: %llu\n", rx_stats->num_mpdu_fcs_err); len += scnprintf(buf + len, size - len, "GI: 0.8us %llu 0.4us %llu 1.6us %llu 3.2us %llu\n", rx_stats->gi_count[0], rx_stats->gi_count[1], rx_stats->gi_count[2], rx_stats->gi_count[3]); len += scnprintf(buf + len, size - len, "BW: 20Mhz %llu 40Mhz %llu 80Mhz %llu 160Mhz %llu\n", rx_stats->bw_count[0], rx_stats->bw_count[1], rx_stats->bw_count[2], rx_stats->bw_count[3]); len += scnprintf(buf + len, size - len, "BCC %llu LDPC %llu\n", rx_stats->coding_count[0], rx_stats->coding_count[1]); len += scnprintf(buf + len, size - len, "preamble: 11A %llu 11B %llu 11N %llu 11AC %llu 11AX %llu\n", rx_stats->pream_cnt[0], rx_stats->pream_cnt[1], rx_stats->pream_cnt[2], rx_stats->pream_cnt[3], rx_stats->pream_cnt[4]); len += scnprintf(buf + len, size - len, "reception type: SU %llu MU_MIMO %llu MU_OFDMA %llu MU_OFDMA_MIMO %llu\n", rx_stats->reception_type[0], rx_stats->reception_type[1], rx_stats->reception_type[2], rx_stats->reception_type[3]); len += scnprintf(buf + len, size - len, "TID(0-15) Legacy TID(16):"); for (i = 0; i <= IEEE80211_NUM_TIDS; i++) len += scnprintf(buf + len, size - len, "%llu ", rx_stats->tid_count[i]); len += scnprintf(buf + len, size - len, "\nMCS(0-11) Legacy MCS(12):"); for (i = 0; i < HAL_RX_MAX_MCS + 1; i++) len += scnprintf(buf + len, size - len, "%llu ", rx_stats->mcs_count[i]); len += scnprintf(buf + len, size - len, "\nNSS(1-8):"); for (i = 0; i < HAL_RX_MAX_NSS; i++) len += scnprintf(buf + len, size - len, "%llu ", rx_stats->nss_count[i]); len += scnprintf(buf + len, size - len, "\nRX Duration:%llu ", rx_stats->rx_duration); len += scnprintf(buf + len, size - len, "\nDCM: %llu\nRU: 26 %llu 52: %llu 106: %llu 242: %llu 484: %llu 996: %llu\n", rx_stats->dcm_count, rx_stats->ru_alloc_cnt[0], rx_stats->ru_alloc_cnt[1], rx_stats->ru_alloc_cnt[2], rx_stats->ru_alloc_cnt[3], rx_stats->ru_alloc_cnt[4], rx_stats->ru_alloc_cnt[5]); len += scnprintf(buf + len, size - len, "\n"); spin_unlock_bh(&ar->ab->base_lock); if (len > size) len = size; retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); kfree(buf); mutex_unlock(&ar->conf_mutex); return retval; } static const struct file_operations fops_rx_stats = { .read = ath11k_dbg_sta_dump_rx_stats, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static int ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file) { struct ieee80211_sta *sta = inode->i_private; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; struct debug_htt_stats_req *stats_req; int type = ar->debug.htt_stats.type; int ret; if ((type != ATH11K_DBG_HTT_EXT_STATS_PEER_INFO && type != ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS) || type == ATH11K_DBG_HTT_EXT_STATS_RESET) return -EPERM; stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE); if (!stats_req) return -ENOMEM; mutex_lock(&ar->conf_mutex); ar->debug.htt_stats.stats_req = stats_req; stats_req->type = type; memcpy(stats_req->peer_addr, sta->addr, ETH_ALEN); ret = ath11k_debugfs_htt_stats_req(ar); mutex_unlock(&ar->conf_mutex); if (ret < 0) goto out; file->private_data = stats_req; return 0; out: vfree(stats_req); ar->debug.htt_stats.stats_req = NULL; return ret; } static int ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file) { struct ieee80211_sta *sta = inode->i_private; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; mutex_lock(&ar->conf_mutex); vfree(file->private_data); ar->debug.htt_stats.stats_req = NULL; mutex_unlock(&ar->conf_mutex); return 0; } static ssize_t ath11k_dbg_sta_read_htt_peer_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct debug_htt_stats_req *stats_req = file->private_data; char *buf; u32 length = 0; buf = stats_req->buf; length = min_t(u32, stats_req->buf_len, ATH11K_HTT_STATS_BUF_SIZE); return simple_read_from_buffer(user_buf, count, ppos, buf, length); } static const struct file_operations fops_htt_peer_stats = { .open = ath11k_dbg_sta_open_htt_peer_stats, .release = ath11k_dbg_sta_release_htt_peer_stats, .read = ath11k_dbg_sta_read_htt_peer_stats, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_dbg_sta_write_peer_pktlog(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; int ret, enable; mutex_lock(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON) { ret = -ENETDOWN; goto out; } ret = kstrtoint_from_user(buf, count, 0, &enable); if (ret) goto out; ar->debug.pktlog_peer_valid = enable; memcpy(ar->debug.pktlog_peer_addr, sta->addr, ETH_ALEN); /* Send peer based pktlog enable/disable */ ret = ath11k_wmi_pdev_peer_pktlog_filter(ar, sta->addr, enable); if (ret) { ath11k_warn(ar->ab, "failed to set peer pktlog filter %pM: %d\n", sta->addr, ret); goto out; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "peer pktlog filter set to %d\n", enable); ret = count; out: mutex_unlock(&ar->conf_mutex); return ret; } static ssize_t ath11k_dbg_sta_read_peer_pktlog(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; - char buf[32] = {0}; + char buf[32] = {}; int len; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf), "%08x %pM\n", ar->debug.pktlog_peer_valid, ar->debug.pktlog_peer_addr); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(ubuf, count, ppos, buf, len); } static const struct file_operations fops_peer_pktlog = { .write = ath11k_dbg_sta_write_peer_pktlog, .read = ath11k_dbg_sta_read_peer_pktlog, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_dbg_sta_write_delba(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; u32 tid, initiator, reason; int ret; - char buf[64] = {0}; + char buf[64] = {}; ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); if (ret <= 0) return ret; ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason); if (ret != 3) return -EINVAL; /* Valid TID values are 0 through 15 */ if (tid > HAL_DESC_REO_NON_QOS_TID - 1) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON || arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) { ret = count; goto out; } ret = ath11k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr, tid, initiator, reason); if (ret) { ath11k_warn(ar->ab, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n", arsta->arvif->vdev_id, sta->addr, tid, initiator, reason); } ret = count; out: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_delba = { .write = ath11k_dbg_sta_write_delba, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_dbg_sta_write_addba_resp(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; u32 tid, status; int ret; - char buf[64] = {0}; + char buf[64] = {}; ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); if (ret <= 0) return ret; ret = sscanf(buf, "%u %u", &tid, &status); if (ret != 2) return -EINVAL; /* Valid TID values are 0 through 15 */ if (tid > HAL_DESC_REO_NON_QOS_TID - 1) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON || arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) { ret = count; goto out; } ret = ath11k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr, tid, status); if (ret) { ath11k_warn(ar->ab, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n", arsta->arvif->vdev_id, sta->addr, tid, status); } ret = count; out: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_addba_resp = { .write = ath11k_dbg_sta_write_addba_resp, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_dbg_sta_write_addba(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; u32 tid, buf_size; int ret; - char buf[64] = {0}; + char buf[64] = {}; ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); if (ret <= 0) return ret; ret = sscanf(buf, "%u %u", &tid, &buf_size); if (ret != 2) return -EINVAL; /* Valid TID values are 0 through 15 */ if (tid > HAL_DESC_REO_NON_QOS_TID - 1) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON || arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) { ret = count; goto out; } ret = ath11k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr, tid, buf_size); if (ret) { ath11k_warn(ar->ab, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n", arsta->arvif->vdev_id, sta->addr, tid, buf_size); } ret = count; out: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_addba = { .write = ath11k_dbg_sta_write_addba, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_dbg_sta_read_aggr_mode(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; char buf[64]; int len = 0; mutex_lock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf) - len, "aggregation mode: %s\n\n%s\n%s\n", (arsta->aggr_mode == ATH11K_DBG_AGGR_MODE_AUTO) ? "auto" : "manual", "auto = 0", "manual = 1"); mutex_unlock(&ar->conf_mutex); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath11k_dbg_sta_write_aggr_mode(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; u32 aggr_mode; int ret; if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode)) return -EINVAL; if (aggr_mode >= ATH11K_DBG_AGGR_MODE_MAX) return -EINVAL; mutex_lock(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON || aggr_mode == arsta->aggr_mode) { ret = count; goto out; } ret = ath11k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr); if (ret) { ath11k_warn(ar->ab, "failed to clear addba session ret: %d\n", ret); goto out; } arsta->aggr_mode = aggr_mode; out: mutex_unlock(&ar->conf_mutex); return ret; } static const struct file_operations fops_aggr_mode = { .read = ath11k_dbg_sta_read_aggr_mode, .write = ath11k_dbg_sta_write_aggr_mode, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_write_htt_peer_stats_reset(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; - struct htt_ext_stats_cfg_params cfg_params = { 0 }; + struct htt_ext_stats_cfg_params cfg_params = {}; int ret; u8 type; ret = kstrtou8_from_user(user_buf, count, 0, &type); if (ret) return ret; if (!type) return ret; mutex_lock(&ar->conf_mutex); cfg_params.cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR; cfg_params.cfg0 |= FIELD_PREP(GENMASK(15, 1), HTT_PEER_STATS_REQ_MODE_FLUSH_TQM); cfg_params.cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE; cfg_params.cfg2 |= FIELD_PREP(GENMASK(7, 0), sta->addr[0]); cfg_params.cfg2 |= FIELD_PREP(GENMASK(15, 8), sta->addr[1]); cfg_params.cfg2 |= FIELD_PREP(GENMASK(23, 16), sta->addr[2]); cfg_params.cfg2 |= FIELD_PREP(GENMASK(31, 24), sta->addr[3]); cfg_params.cfg3 |= FIELD_PREP(GENMASK(7, 0), sta->addr[4]); cfg_params.cfg3 |= FIELD_PREP(GENMASK(15, 8), sta->addr[5]); cfg_params.cfg3 |= ATH11K_HTT_PEER_STATS_RESET; ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar, ATH11K_DBG_HTT_EXT_STATS_PEER_INFO, &cfg_params, 0ULL); if (ret) { ath11k_warn(ar->ab, "failed to send htt peer stats request: %d\n", ret); mutex_unlock(&ar->conf_mutex); return ret; } mutex_unlock(&ar->conf_mutex); ret = count; return ret; } static const struct file_operations fops_htt_peer_stats_reset = { .write = ath11k_write_htt_peer_stats_reset, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_dbg_sta_read_peer_ps_state(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; char buf[20]; int len; spin_lock_bh(&ar->data_lock); len = scnprintf(buf, sizeof(buf), "%d\n", arsta->peer_ps_state); spin_unlock_bh(&ar->data_lock); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_peer_ps_state = { .open = simple_open, .read = ath11k_dbg_sta_read_peer_ps_state, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_dbg_sta_read_current_ps_duration(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; u64 time_since_station_in_power_save; char buf[20]; int len; spin_lock_bh(&ar->data_lock); if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON && arsta->peer_current_ps_valid) time_since_station_in_power_save = jiffies_to_msecs(jiffies - arsta->ps_start_jiffies); else time_since_station_in_power_save = 0; len = scnprintf(buf, sizeof(buf), "%llu\n", time_since_station_in_power_save); spin_unlock_bh(&ar->data_lock); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_current_ps_duration = { .open = simple_open, .read = ath11k_dbg_sta_read_current_ps_duration, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_dbg_sta_read_total_ps_duration(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_sta *sta = file->private_data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; char buf[20]; u64 power_save_duration; int len; spin_lock_bh(&ar->data_lock); if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON && arsta->peer_current_ps_valid) power_save_duration = jiffies_to_msecs(jiffies - arsta->ps_start_jiffies) + arsta->ps_total_duration; else power_save_duration = arsta->ps_total_duration; len = scnprintf(buf, sizeof(buf), "%llu\n", power_save_duration); spin_unlock_bh(&ar->data_lock); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct file_operations fops_total_ps_duration = { .open = simple_open, .read = ath11k_dbg_sta_read_total_ps_duration, .owner = THIS_MODULE, .llseek = default_llseek, }; void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir) { struct ath11k *ar = hw->priv; if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) debugfs_create_file("tx_stats", 0400, dir, sta, &fops_tx_stats); if (ath11k_debugfs_is_extd_rx_stats_enabled(ar)) debugfs_create_file("rx_stats", 0400, dir, sta, &fops_rx_stats); debugfs_create_file("htt_peer_stats", 0400, dir, sta, &fops_htt_peer_stats); debugfs_create_file("peer_pktlog", 0644, dir, sta, &fops_peer_pktlog); debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode); debugfs_create_file("addba", 0200, dir, sta, &fops_addba); debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp); debugfs_create_file("delba", 0200, dir, sta, &fops_delba); if (test_bit(WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET, ar->ab->wmi_ab.svc_map)) debugfs_create_file("htt_peer_stats_reset", 0600, dir, sta, &fops_htt_peer_stats_reset); debugfs_create_file("peer_ps_state", 0400, dir, sta, &fops_peer_ps_state); if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT, ar->ab->wmi_ab.svc_map)) { debugfs_create_file("current_ps_duration", 0440, dir, sta, &fops_current_ps_duration); debugfs_create_file("total_ps_duration", 0440, dir, sta, &fops_total_ps_duration); } } diff --git a/dp.c b/dp.c index bf3928ada995..56b1a657e0b0 100644 --- a/dp.c +++ b/dp.c @@ -1,1192 +1,1194 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include +#include #include "core.h" #include "dp_tx.h" #include "hal_tx.h" #include "hif.h" #include "debug.h" #include "dp_rx.h" #include "peer.h" static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab, struct sk_buff *skb) { dev_kfree_skb_any(skb); } void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr) { struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; /* TODO: Any other peer specific DP cleanup */ spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, vdev_id, addr); if (!peer) { ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n", addr, vdev_id); spin_unlock_bh(&ab->base_lock); return; } ath11k_peer_rx_tid_cleanup(ar, peer); peer->dp_setup_done = false; crypto_free_shash(peer->tfm_mmic); spin_unlock_bh(&ab->base_lock); } int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr) { struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; u32 reo_dest; int ret = 0, tid; /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */ reo_dest = ar->dp.mac_id + 1; ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id, WMI_PEER_SET_DEFAULT_ROUTING, DP_RX_HASH_ENABLE | (reo_dest << 1)); if (ret) { ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n", ret, addr, vdev_id); return ret; } for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0, HAL_PN_TYPE_NONE); if (ret) { ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n", tid, ret); goto peer_clean; } } ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id); if (ret) { ath11k_warn(ab, "failed to setup rx defrag context\n"); tid--; goto peer_clean; } /* TODO: Setup other peer specific resource used in data path */ return 0; peer_clean: spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, vdev_id, addr); if (!peer) { ath11k_warn(ab, "failed to find the peer to del rx tid\n"); spin_unlock_bh(&ab->base_lock); return -ENOENT; } for (; tid >= 0; tid--) ath11k_peer_rx_tid_delete(ar, peer, tid); spin_unlock_bh(&ab->base_lock); return ret; } void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring) { if (!ring->vaddr_unaligned) return; if (ring->cached) dma_free_noncoherent(ab->dev, ring->size, ring->vaddr_unaligned, ring->paddr_unaligned, DMA_FROM_DEVICE); else dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, ring->paddr_unaligned); ring->vaddr_unaligned = NULL; } static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask) { int ext_group_num; u8 mask = 1 << ring_num; for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX; ext_group_num++) { if (mask & grp_mask[ext_group_num]) return ext_group_num; } return -ENOENT; } static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab, enum hal_ring_type type, int ring_num) { const u8 *grp_mask; switch (type) { case HAL_WBM2SW_RELEASE: if (ring_num == DP_RX_RELEASE_RING_NUM) { grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0]; ring_num = 0; } else { grp_mask = &ab->hw_params.ring_mask->tx[0]; } break; case HAL_REO_EXCEPTION: grp_mask = &ab->hw_params.ring_mask->rx_err[0]; break; case HAL_REO_DST: grp_mask = &ab->hw_params.ring_mask->rx[0]; break; case HAL_REO_STATUS: grp_mask = &ab->hw_params.ring_mask->reo_status[0]; break; case HAL_RXDMA_MONITOR_STATUS: case HAL_RXDMA_MONITOR_DST: grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0]; break; case HAL_RXDMA_DST: grp_mask = &ab->hw_params.ring_mask->rxdma2host[0]; break; case HAL_RXDMA_BUF: grp_mask = &ab->hw_params.ring_mask->host2rxdma[0]; break; case HAL_RXDMA_MONITOR_BUF: case HAL_TCL_DATA: case HAL_TCL_CMD: case HAL_REO_CMD: case HAL_SW2WBM_RELEASE: case HAL_WBM_IDLE_LINK: case HAL_TCL_STATUS: case HAL_REO_REINJECT: case HAL_CE_SRC: case HAL_CE_DST: case HAL_CE_DST_STATUS: default: return -ENOENT; } return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask); } static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab, struct hal_srng_params *ring_params, enum hal_ring_type type, int ring_num) { int msi_group_number, msi_data_count; u32 msi_data_start, msi_irq_start, addr_lo, addr_hi; int ret; ret = ath11k_get_user_msi_vector(ab, "DP", &msi_data_count, &msi_data_start, &msi_irq_start); if (ret) return; msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type, ring_num); if (msi_group_number < 0) { ath11k_dbg(ab, ATH11K_DBG_PCI, "ring not part of an ext_group; ring_type: %d,ring_num %d", type, ring_num); ring_params->msi_addr = 0; ring_params->msi_data = 0; return; } if (msi_group_number > msi_data_count) { ath11k_dbg(ab, ATH11K_DBG_PCI, "multiple msi_groups share one msi, msi_group_num %d", msi_group_number); } ath11k_get_msi_address(ab, &addr_lo, &addr_hi); ring_params->msi_addr = addr_lo; ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32); ring_params->msi_data = (msi_group_number % msi_data_count) + msi_data_start; ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR; } int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, enum hal_ring_type type, int ring_num, int mac_id, int num_entries) { - struct hal_srng_params params = { 0 }; + struct hal_srng_params params = {}; int entry_sz = ath11k_hal_srng_get_entrysize(ab, type); int max_entries = ath11k_hal_srng_get_max_entries(ab, type); int ret; bool cached = false; if (max_entries < 0 || entry_sz < 0) return -EINVAL; if (num_entries > max_entries) num_entries = max_entries; ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1; if (ab->hw_params.alloc_cacheable_memory) { /* Allocate the reo dst and tx completion rings from cacheable memory */ switch (type) { case HAL_REO_DST: case HAL_WBM2SW_RELEASE: cached = true; break; default: cached = false; } } if (cached) ring->vaddr_unaligned = dma_alloc_noncoherent(ab->dev, ring->size, &ring->paddr_unaligned, DMA_FROM_DEVICE, GFP_KERNEL); else ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size, &ring->paddr_unaligned, GFP_KERNEL); if (!ring->vaddr_unaligned) return -ENOMEM; ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN); ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr - (unsigned long)ring->vaddr_unaligned); params.ring_base_vaddr = ring->vaddr; params.ring_base_paddr = ring->paddr; params.num_entries = num_entries; ath11k_dp_srng_msi_setup(ab, ¶ms, type, ring_num + mac_id); switch (type) { case HAL_REO_DST: params.intr_batch_cntr_thres_entries = HAL_SRNG_INT_BATCH_THRESHOLD_RX; params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; break; case HAL_RXDMA_BUF: case HAL_RXDMA_MONITOR_BUF: case HAL_RXDMA_MONITOR_STATUS: params.low_threshold = num_entries >> 3; params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN; params.intr_batch_cntr_thres_entries = 0; params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX; break; case HAL_WBM2SW_RELEASE: if (ring_num < 3) { params.intr_batch_cntr_thres_entries = HAL_SRNG_INT_BATCH_THRESHOLD_TX; params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_TX; break; } /* follow through when ring_num >= 3 */ fallthrough; case HAL_REO_EXCEPTION: case HAL_REO_REINJECT: case HAL_REO_CMD: case HAL_REO_STATUS: case HAL_TCL_DATA: case HAL_TCL_CMD: case HAL_TCL_STATUS: case HAL_WBM_IDLE_LINK: case HAL_SW2WBM_RELEASE: case HAL_RXDMA_DST: case HAL_RXDMA_MONITOR_DST: case HAL_RXDMA_MONITOR_DESC: params.intr_batch_cntr_thres_entries = HAL_SRNG_INT_BATCH_THRESHOLD_OTHER; params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER; break; case HAL_RXDMA_DIR_BUF: break; default: ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type); return -EINVAL; } if (cached) { params.flags |= HAL_SRNG_FLAGS_CACHED; ring->cached = 1; } ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms); if (ret < 0) { ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n", ret, ring_num); return ret; } ring->ring_id = ret; return 0; } void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab) { int i; if (!ab->hw_params.supports_shadow_regs) return; for (i = 0; i < ab->hw_params.max_tx_ring; i++) ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]); ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer); } static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab) { struct ath11k_dp *dp = &ab->dp; int i; ath11k_dp_stop_shadow_timers(ab); ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring); ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring); ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring); for (i = 0; i < ab->hw_params.max_tx_ring; i++) { ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring); ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring); } ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring); ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring); ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring); ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring); ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring); } static int ath11k_dp_srng_common_setup(struct ath11k_base *ab) { struct ath11k_dp *dp = &ab->dp; struct hal_srng *srng; int i, ret; u8 tcl_num, wbm_num; ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring, HAL_SW2WBM_RELEASE, 0, 0, DP_WBM_RELEASE_RING_SIZE); if (ret) { ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n", ret); goto err; } ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0, DP_TCL_CMD_RING_SIZE); if (ret) { ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret); goto err; } ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS, 0, 0, DP_TCL_STATUS_RING_SIZE); if (ret) { ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret); goto err; } for (i = 0; i < ab->hw_params.max_tx_ring; i++) { tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num; wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num; ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring, HAL_TCL_DATA, tcl_num, 0, ab->hw_params.tx_ring_size); if (ret) { ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n", i, ret); goto err; } ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring, HAL_WBM2SW_RELEASE, wbm_num, 0, DP_TX_COMP_RING_SIZE); if (ret) { ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n", i, ret); goto err; } srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id]; ath11k_hal_tx_init_data_ring(ab, srng); ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i], ATH11K_SHADOW_DP_TIMER_INTERVAL, dp->tx_ring[i].tcl_data_ring.ring_id); } ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT, 0, 0, DP_REO_REINJECT_RING_SIZE); if (ret) { ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n", ret); goto err; } ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE, DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE); if (ret) { ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret); goto err; } ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION, 0, 0, DP_REO_EXCEPTION_RING_SIZE); if (ret) { ath11k_warn(ab, "failed to set up reo_exception ring :%d\n", ret); goto err; } ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD, 0, 0, DP_REO_CMD_RING_SIZE); if (ret) { ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret); goto err; } srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; ath11k_hal_reo_init_cmd_ring(ab, srng); ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer, ATH11K_SHADOW_CTRL_TIMER_INTERVAL, dp->reo_cmd_ring.ring_id); ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS, 0, 0, DP_REO_STATUS_RING_SIZE); if (ret) { ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret); goto err; } /* When hash based routing of rx packet is enabled, 32 entries to map * the hash values to the ring will be configured. */ ab->hw_params.hw_ops->reo_setup(ab); return 0; err: ath11k_dp_srng_common_cleanup(ab); return ret; } static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab) { struct ath11k_dp *dp = &ab->dp; struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; int i; for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) { if (!slist[i].vaddr) continue; dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, slist[i].vaddr, slist[i].paddr); slist[i].vaddr = NULL; } } static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab, int size, u32 n_link_desc_bank, u32 n_link_desc, u32 last_bank_sz) { struct ath11k_dp *dp = &ab->dp; struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks; struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; u32 n_entries_per_buf; int num_scatter_buf, scatter_idx; struct hal_wbm_link_desc *scatter_buf; int align_bytes, n_entries; dma_addr_t paddr; int rem_entries; int i; int ret = 0; u32 end_offset; n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE / ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK); num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE); if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX) return -EINVAL; for (i = 0; i < num_scatter_buf; i++) { slist[i].vaddr = dma_alloc_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX, &slist[i].paddr, GFP_KERNEL); if (!slist[i].vaddr) { ret = -ENOMEM; goto err; } } scatter_idx = 0; scatter_buf = slist[scatter_idx].vaddr; rem_entries = n_entries_per_buf; for (i = 0; i < n_link_desc_bank; i++) { align_bytes = link_desc_banks[i].vaddr - link_desc_banks[i].vaddr_unaligned; n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) / HAL_LINK_DESC_SIZE; paddr = link_desc_banks[i].paddr; while (n_entries) { ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr); n_entries--; paddr += HAL_LINK_DESC_SIZE; if (rem_entries) { rem_entries--; scatter_buf++; continue; } rem_entries = n_entries_per_buf; scatter_idx++; scatter_buf = slist[scatter_idx].vaddr; } } end_offset = (scatter_buf - slist[scatter_idx].vaddr) * sizeof(struct hal_wbm_link_desc); ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf, n_link_desc, end_offset); return 0; err: ath11k_dp_scatter_idle_link_desc_cleanup(ab); return ret; } static void ath11k_dp_link_desc_bank_free(struct ath11k_base *ab, struct dp_link_desc_bank *link_desc_banks) { int i; for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) { if (link_desc_banks[i].vaddr_unaligned) { dma_free_coherent(ab->dev, link_desc_banks[i].size, link_desc_banks[i].vaddr_unaligned, link_desc_banks[i].paddr_unaligned); link_desc_banks[i].vaddr_unaligned = NULL; } } } static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab, struct dp_link_desc_bank *desc_bank, int n_link_desc_bank, int last_bank_sz) { struct ath11k_dp *dp = &ab->dp; int i; int ret = 0; int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH; for (i = 0; i < n_link_desc_bank; i++) { if (i == (n_link_desc_bank - 1) && last_bank_sz) desc_sz = last_bank_sz; desc_bank[i].vaddr_unaligned = dma_alloc_coherent(ab->dev, desc_sz, &desc_bank[i].paddr_unaligned, GFP_KERNEL); if (!desc_bank[i].vaddr_unaligned) { ret = -ENOMEM; goto err; } desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned, HAL_LINK_DESC_ALIGN); desc_bank[i].paddr = desc_bank[i].paddr_unaligned + ((unsigned long)desc_bank[i].vaddr - (unsigned long)desc_bank[i].vaddr_unaligned); desc_bank[i].size = desc_sz; } return 0; err: ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks); return ret; } void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab, struct dp_link_desc_bank *desc_bank, u32 ring_type, struct dp_srng *ring) { ath11k_dp_link_desc_bank_free(ab, desc_bank); if (ring_type != HAL_RXDMA_MONITOR_DESC) { ath11k_dp_srng_cleanup(ab, ring); ath11k_dp_scatter_idle_link_desc_cleanup(ab); } } static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc) { struct ath11k_dp *dp = &ab->dp; u32 n_mpdu_link_desc, n_mpdu_queue_desc; u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc; int ret = 0; n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) / HAL_NUM_MPDUS_PER_LINK_DESC; n_mpdu_queue_desc = n_mpdu_link_desc / HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC; n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID * DP_AVG_MSDUS_PER_FLOW) / HAL_NUM_TX_MSDUS_PER_LINK_DESC; n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX * DP_AVG_MSDUS_PER_MPDU) / HAL_NUM_RX_MSDUS_PER_LINK_DESC; *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc + n_tx_msdu_link_desc + n_rx_msdu_link_desc; if (*n_link_desc & (*n_link_desc - 1)) *n_link_desc = 1 << fls(*n_link_desc); ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring, HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc); if (ret) { ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); return ret; } return ret; } int ath11k_dp_link_desc_setup(struct ath11k_base *ab, struct dp_link_desc_bank *link_desc_banks, u32 ring_type, struct hal_srng *srng, u32 n_link_desc) { u32 tot_mem_sz; u32 n_link_desc_bank, last_bank_sz; u32 entry_sz, align_bytes, n_entries; u32 paddr; u32 *desc; int i, ret; tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE; tot_mem_sz += HAL_LINK_DESC_ALIGN; if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) { n_link_desc_bank = 1; last_bank_sz = tot_mem_sz; } else { n_link_desc_bank = tot_mem_sz / (DP_LINK_DESC_ALLOC_SIZE_THRESH - HAL_LINK_DESC_ALIGN); last_bank_sz = tot_mem_sz % (DP_LINK_DESC_ALLOC_SIZE_THRESH - HAL_LINK_DESC_ALIGN); if (last_bank_sz) n_link_desc_bank += 1; } if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX) return -EINVAL; ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks, n_link_desc_bank, last_bank_sz); if (ret) return ret; /* Setup link desc idle list for HW internal usage */ entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type); tot_mem_sz = entry_sz * n_link_desc; /* Setup scatter desc list when the total memory requirement is more */ if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH && ring_type != HAL_RXDMA_MONITOR_DESC) { ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz, n_link_desc_bank, n_link_desc, last_bank_sz); if (ret) { ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n", ret); goto fail_desc_bank_free; } return 0; } spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); for (i = 0; i < n_link_desc_bank; i++) { align_bytes = link_desc_banks[i].vaddr - link_desc_banks[i].vaddr_unaligned; n_entries = (link_desc_banks[i].size - align_bytes) / HAL_LINK_DESC_SIZE; paddr = link_desc_banks[i].paddr; while (n_entries && (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) { ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc, i, paddr); n_entries--; paddr += HAL_LINK_DESC_SIZE; } } ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return 0; fail_desc_bank_free: ath11k_dp_link_desc_bank_free(ab, link_desc_banks); return ret; } int ath11k_dp_service_srng(struct ath11k_base *ab, struct ath11k_ext_irq_grp *irq_grp, int budget) { struct napi_struct *napi = &irq_grp->napi; const struct ath11k_hw_hal_params *hal_params; int grp_id = irq_grp->grp_id; int work_done = 0; int i, j; int tot_work_done = 0; for (i = 0; i < ab->hw_params.max_tx_ring; i++) { if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) & ab->hw_params.ring_mask->tx[grp_id]) ath11k_dp_tx_completion_handler(ab, i); } if (ab->hw_params.ring_mask->rx_err[grp_id]) { work_done = ath11k_dp_process_rx_err(ab, napi, budget); budget -= work_done; tot_work_done += work_done; if (budget <= 0) goto done; } if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) { work_done = ath11k_dp_rx_process_wbm_err(ab, napi, budget); budget -= work_done; tot_work_done += work_done; if (budget <= 0) goto done; } if (ab->hw_params.ring_mask->rx[grp_id]) { i = fls(ab->hw_params.ring_mask->rx[grp_id]) - 1; work_done = ath11k_dp_process_rx(ab, i, napi, budget); budget -= work_done; tot_work_done += work_done; if (budget <= 0) goto done; } if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) { for (i = 0; i < ab->num_radios; i++) { for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) { int id = i * ab->hw_params.num_rxdma_per_pdev + j; if (ab->hw_params.ring_mask->rx_mon_status[grp_id] & BIT(id)) { work_done = ath11k_dp_rx_process_mon_rings(ab, id, napi, budget); budget -= work_done; tot_work_done += work_done; if (budget <= 0) goto done; } } } } if (ab->hw_params.ring_mask->reo_status[grp_id]) ath11k_dp_process_reo_status(ab); for (i = 0; i < ab->num_radios; i++) { for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) { int id = i * ab->hw_params.num_rxdma_per_pdev + j; if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) { work_done = ath11k_dp_process_rxdma_err(ab, id, budget); budget -= work_done; tot_work_done += work_done; } if (budget <= 0) goto done; if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) { struct ath11k *ar = ath11k_ab_to_ar(ab, id); struct ath11k_pdev_dp *dp = &ar->dp; struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; hal_params = ab->hw_params.hal_params; ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0, hal_params->rx_buf_rbm); } } } /* TODO: Implement handler for other interrupts */ done: return tot_work_done; } EXPORT_SYMBOL(ath11k_dp_service_srng); void ath11k_dp_pdev_free(struct ath11k_base *ab) { struct ath11k *ar; int i; timer_delete_sync(&ab->mon_reap_timer); for (i = 0; i < ab->num_radios; i++) { ar = ab->pdevs[i].ar; ath11k_dp_rx_pdev_free(ab, i); ath11k_debugfs_unregister(ar); ath11k_dp_rx_pdev_mon_detach(ar); } } void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev_dp *dp; int i; int j; for (i = 0; i < ab->num_radios; i++) { ar = ab->pdevs[i].ar; dp = &ar->dp; dp->mac_id = i; idr_init(&dp->rx_refill_buf_ring.bufs_idr); spin_lock_init(&dp->rx_refill_buf_ring.idr_lock); atomic_set(&dp->num_tx_pending, 0); init_waitqueue_head(&dp->tx_empty_waitq); for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) { idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr); spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock); } idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); } } int ath11k_dp_pdev_alloc(struct ath11k_base *ab) { struct ath11k *ar; int ret; int i; /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */ for (i = 0; i < ab->num_radios; i++) { ar = ab->pdevs[i].ar; ret = ath11k_dp_rx_pdev_alloc(ab, i); if (ret) { ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n", i); goto err; } ret = ath11k_dp_rx_pdev_mon_attach(ar); if (ret) { ath11k_warn(ab, "failed to initialize mon pdev %d\n", i); goto err; } } return 0; err: ath11k_dp_pdev_free(ab); return ret; } int ath11k_dp_htt_connect(struct ath11k_dp *dp) { struct ath11k_htc_svc_conn_req conn_req; struct ath11k_htc_svc_conn_resp conn_resp; int status; memset(&conn_req, 0, sizeof(conn_req)); memset(&conn_resp, 0, sizeof(conn_resp)); conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete; conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler; /* connect to control service */ conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG; status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req, &conn_resp); if (status) return status; dp->eid = conn_resp.eid; return 0; } static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif) { /* When v2_map_support is true:for STA mode, enable address * search index, tcl uses ast_hash value in the descriptor. * When v2_map_support is false: for STA mode, don't enable * address search index. */ switch (arvif->vdev_type) { case WMI_VDEV_TYPE_STA: if (arvif->ar->ab->hw_params.htt_peer_map_v2) { arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN; arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX; } else { arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN; arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; } break; case WMI_VDEV_TYPE_AP: case WMI_VDEV_TYPE_IBSS: arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN; arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT; break; case WMI_VDEV_TYPE_MONITOR: default: return; } } void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif) { arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) | FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID, arvif->vdev_id) | FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID, ar->pdev->pdev_id); /* set HTT extension valid bit to 0 by default */ arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT; ath11k_dp_update_vdev_search(arvif); } static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx) { struct ath11k_base *ab = ctx; struct sk_buff *msdu = skb; dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, DMA_TO_DEVICE); dev_kfree_skb_any(msdu); return 0; } void ath11k_dp_free(struct ath11k_base *ab) { struct ath11k_dp *dp = &ab->dp; int i; ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks, HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); ath11k_dp_srng_common_cleanup(ab); ath11k_dp_reo_cmd_list_cleanup(ab); for (i = 0; i < ab->hw_params.max_tx_ring; i++) { spin_lock_bh(&dp->tx_ring[i].tx_idr_lock); idr_for_each(&dp->tx_ring[i].txbuf_idr, ath11k_dp_tx_pending_cleanup, ab); idr_destroy(&dp->tx_ring[i].txbuf_idr); spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock); kfree(dp->tx_ring[i].tx_status); } /* Deinit any SOC level resource */ } int ath11k_dp_alloc(struct ath11k_base *ab) { struct ath11k_dp *dp = &ab->dp; struct hal_srng *srng = NULL; size_t size = 0; u32 n_link_desc = 0; int ret; int i; dp->ab = ab; INIT_LIST_HEAD(&dp->reo_cmd_list); INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list); INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list); spin_lock_init(&dp->reo_cmd_lock); dp->reo_cmd_cache_flush_count = 0; ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc); if (ret) { ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret); return ret; } srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id]; ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks, HAL_WBM_IDLE_LINK, srng, n_link_desc); if (ret) { ath11k_warn(ab, "failed to setup link desc: %d\n", ret); return ret; } ret = ath11k_dp_srng_common_setup(ab); if (ret) goto fail_link_desc_cleanup; size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE; for (i = 0; i < ab->hw_params.max_tx_ring; i++) { idr_init(&dp->tx_ring[i].txbuf_idr); spin_lock_init(&dp->tx_ring[i].tx_idr_lock); dp->tx_ring[i].tcl_data_ring_id = i; dp->tx_ring[i].tx_status_head = 0; dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1; dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL); if (!dp->tx_ring[i].tx_status) { ret = -ENOMEM; goto fail_cmn_srng_cleanup; } } for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++) ath11k_hal_tx_set_dscp_tid_map(ab, i); /* Init any SOC level resource for DP */ return 0; fail_cmn_srng_cleanup: ath11k_dp_srng_common_cleanup(ab); fail_link_desc_cleanup: ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks, HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); return ret; } static void ath11k_dp_shadow_timer_handler(struct timer_list *t) { struct ath11k_hp_update_timer *update_timer = timer_container_of(update_timer, t, timer); struct ath11k_base *ab = update_timer->ab; struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id]; spin_lock_bh(&srng->lock); /* when the timer is fired, the handler checks whether there * are new TX happened. The handler updates HP only when there * are no TX operations during the timeout interval, and stop * the timer. Timer will be started again when TX happens again. */ if (update_timer->timer_tx_num != update_timer->tx_num) { update_timer->timer_tx_num = update_timer->tx_num; mod_timer(&update_timer->timer, jiffies + msecs_to_jiffies(update_timer->interval)); } else { update_timer->started = false; ath11k_hal_srng_shadow_update_hp_tp(ab, srng); } spin_unlock_bh(&srng->lock); } void ath11k_dp_shadow_start_timer(struct ath11k_base *ab, struct hal_srng *srng, struct ath11k_hp_update_timer *update_timer) { lockdep_assert_held(&srng->lock); if (!ab->hw_params.supports_shadow_regs) return; update_timer->tx_num++; if (update_timer->started) return; update_timer->started = true; update_timer->timer_tx_num = update_timer->tx_num; mod_timer(&update_timer->timer, jiffies + msecs_to_jiffies(update_timer->interval)); } void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab, struct ath11k_hp_update_timer *update_timer) { if (!ab->hw_params.supports_shadow_regs) return; if (!update_timer->init) return; timer_delete_sync(&update_timer->timer); } void ath11k_dp_shadow_init_timer(struct ath11k_base *ab, struct ath11k_hp_update_timer *update_timer, u32 interval, u32 ring_id) { if (!ab->hw_params.supports_shadow_regs) return; update_timer->tx_num = 0; update_timer->timer_tx_num = 0; update_timer->ab = ab; update_timer->ring_id = ring_id; update_timer->interval = interval; update_timer->init = true; timer_setup(&update_timer->timer, ath11k_dp_shadow_timer_handler, 0); } diff --git a/dp_rx.c b/dp_rx.c index 9230a965f6f0..ffc7482c77b6 100644 --- a/dp_rx.c +++ b/dp_rx.c @@ -1,5802 +1,5795 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include #include #include #include "core.h" #include "debug.h" #include "debugfs_htt_stats.h" #include "debugfs_sta.h" #include "hal_desc.h" #include "hw.h" #include "dp_rx.h" #include "hal_rx.h" #include "dp_tx.h" #include "peer.h" #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) static inline u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc); } static inline enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab, struct hal_rx_desc *desc) { if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc)) return HAL_ENCRYPT_TYPE_OPEN; return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc); } static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc); } static inline bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc); } static inline u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc); } static inline bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc); } static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc); } static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab, struct sk_buff *skb) { struct ieee80211_hdr *hdr; hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); return ieee80211_has_morefrags(hdr->frame_control); } static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab, struct sk_buff *skb) { struct ieee80211_hdr *hdr; hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz); return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; } static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc); } static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_attention(desc); } static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE, __le32_to_cpu(attn->info2)); } static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL, __le32_to_cpu(attn->info1)); } static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn) { return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL, __le32_to_cpu(attn->info1)); } static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn) { return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE, __le32_to_cpu(attn->info2)) == RX_DESC_DECRYPT_STATUS_CODE_OK); } static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn) { u32 info = __le32_to_cpu(attn->info1); u32 errmap = 0; if (info & RX_ATTENTION_INFO1_FCS_ERR) errmap |= DP_RX_MPDU_ERR_FCS; if (info & RX_ATTENTION_INFO1_DECRYPT_ERR) errmap |= DP_RX_MPDU_ERR_DECRYPT; if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR) errmap |= DP_RX_MPDU_ERR_TKIP_MIC; if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR) errmap |= DP_RX_MPDU_ERR_AMSDU_ERR; if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR) errmap |= DP_RX_MPDU_ERR_OVERFLOW; if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR) errmap |= DP_RX_MPDU_ERR_MSDU_LEN; if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR) errmap |= DP_RX_MPDU_ERR_MPDU_LEN; return errmap; } static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab, struct hal_rx_desc *desc) { struct rx_attention *rx_attention; u32 errmap; rx_attention = ath11k_dp_rx_get_attention(ab, desc); errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); return errmap & DP_RX_MPDU_ERR_MSDU_LEN; } static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc); } static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc); } static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc); } static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc); } static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc); } static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc); } static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab, struct hal_rx_desc *desc) { return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc)); } static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc); } static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc); } static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc); } static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc); } static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc); } static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab, struct hal_rx_desc *fdesc, struct hal_rx_desc *ldesc) { ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc); } static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn) { return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR, __le32_to_cpu(attn->info1)); } static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab, struct hal_rx_desc *rx_desc) { u8 *rx_pkt_hdr; rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc); return rx_pkt_hdr; } static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab, struct hal_rx_desc *rx_desc) { u32 tlv_tag; tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc); return tlv_tag == HAL_RX_MPDU_START; } static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab, struct hal_rx_desc *rx_desc) { return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc); } static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab, struct hal_rx_desc *desc, u16 len) { ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len); } static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab, struct hal_rx_desc *desc) { struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc); return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) && (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST, __le32_to_cpu(attn->info1))); } static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc); } static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab, struct hal_rx_desc *desc) { return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc); } static void ath11k_dp_service_mon_ring(struct timer_list *t) { struct ath11k_base *ab = timer_container_of(ab, t, mon_reap_timer); int i; for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); mod_timer(&ab->mon_reap_timer, jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); } static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab) { int i, reaped = 0; unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS); do { for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) reaped += ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); /* nothing more to reap */ if (reaped < DP_MON_SERVICE_BUDGET) return 0; } while (time_before(jiffies, timeout)); ath11k_warn(ab, "dp mon ring purge timeout"); return -ETIMEDOUT; } /* Returns number of Rx buffers replenished */ int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, struct dp_rxdma_ring *rx_ring, int req_entries, enum hal_rx_buf_return_buf_manager mgr) { struct hal_srng *srng; u32 *desc; struct sk_buff *skb; int num_free; int num_remain; int buf_id; u32 cookie; dma_addr_t paddr; req_entries = min(req_entries, rx_ring->bufs_max); srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); num_free = ath11k_hal_srng_src_num_free(ab, srng, true); if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) req_entries = num_free; req_entries = min(num_free, req_entries); num_remain = req_entries; while (num_remain > 0) { skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + DP_RX_BUFFER_ALIGN_SIZE); if (!skb) break; if (!IS_ALIGNED((unsigned long)skb->data, DP_RX_BUFFER_ALIGN_SIZE)) { skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - skb->data); } paddr = dma_map_single(ab->dev, skb->data, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); if (dma_mapping_error(ab->dev, paddr)) goto fail_free_skb; spin_lock_bh(&rx_ring->idr_lock); buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1, (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC); spin_unlock_bh(&rx_ring->idr_lock); if (buf_id <= 0) goto fail_dma_unmap; desc = ath11k_hal_srng_src_get_next_entry(ab, srng); if (!desc) goto fail_idr_remove; ATH11K_SKB_RXCB(skb)->paddr = paddr; cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); num_remain--; ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); } ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return req_entries - num_remain; fail_idr_remove: spin_lock_bh(&rx_ring->idr_lock); idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); fail_dma_unmap: dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); fail_free_skb: dev_kfree_skb_any(skb); ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return req_entries - num_remain; } static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, struct dp_rxdma_ring *rx_ring) { struct sk_buff *skb; int buf_id; spin_lock_bh(&rx_ring->idr_lock); idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { idr_remove(&rx_ring->bufs_idr, buf_id); /* TODO: Understand where internal driver does this dma_unmap * of rxdma_buffer. */ dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); } idr_destroy(&rx_ring->bufs_idr); spin_unlock_bh(&rx_ring->idr_lock); return 0; } static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_base *ab = ar->ab; struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; int i; ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); rx_ring = &dp->rxdma_mon_buf_ring; ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { rx_ring = &dp->rx_mon_status_refill_ring[i]; ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); } return 0; } static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, struct dp_rxdma_ring *rx_ring, u32 ringtype) { struct ath11k_pdev_dp *dp = &ar->dp; int num_entries; num_entries = rx_ring->refill_buf_ring.size / ath11k_hal_srng_get_entrysize(ar->ab, ringtype); rx_ring->bufs_max = num_entries; ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, ar->ab->hw_params.hal_params->rx_buf_rbm); return 0; } static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_base *ab = ar->ab; struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; int i; ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF); if (ar->ab->hw_params.rxdma1_enable) { rx_ring = &dp->rxdma_mon_buf_ring; ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); } for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { rx_ring = &dp->rx_mon_status_refill_ring[i]; ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); } return 0; } static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_base *ab = ar->ab; int i; ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { if (ab->hw_params.rx_mac_buf_ring) ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); ath11k_dp_srng_cleanup(ab, &dp->rx_mon_status_refill_ring[i].refill_buf_ring); } ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); } void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab) { struct ath11k_dp *dp = &ab->dp; int i; for (i = 0; i < DP_REO_DST_RING_MAX; i++) ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]); } int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab) { struct ath11k_dp *dp = &ab->dp; int ret; int i; for (i = 0; i < DP_REO_DST_RING_MAX; i++) { ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i], HAL_REO_DST, i, 0, DP_REO_DST_RING_SIZE); if (ret) { ath11k_warn(ab, "failed to setup reo_dst_ring\n"); goto err_reo_cleanup; } } return 0; err_reo_cleanup: ath11k_dp_pdev_reo_cleanup(ab); return ret; } static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_base *ab = ar->ab; struct dp_srng *srng = NULL; int i; int ret; ret = ath11k_dp_srng_setup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring, HAL_RXDMA_BUF, 0, dp->mac_id, DP_RXDMA_BUF_RING_SIZE); if (ret) { ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n"); return ret; } if (ar->ab->hw_params.rx_mac_buf_ring) { for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ret = ath11k_dp_srng_setup(ar->ab, &dp->rx_mac_buf_ring[i], HAL_RXDMA_BUF, 1, dp->mac_id + i, 1024); if (ret) { ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n", i); return ret; } } } for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i], HAL_RXDMA_DST, 0, dp->mac_id + i, DP_RXDMA_ERR_DST_RING_SIZE); if (ret) { ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i); return ret; } } for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; ret = ath11k_dp_srng_setup(ar->ab, srng, HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i, DP_RXDMA_MON_STATUS_RING_SIZE); if (ret) { ath11k_warn(ar->ab, "failed to setup rx_mon_status_refill_ring %d\n", i); return ret; } } /* if rxdma1_enable is false, then it doesn't need * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring * and rxdma_mon_desc_ring. * init reap timer for QCA6390. */ if (!ar->ab->hw_params.rxdma1_enable) { //init mon status buffer reap timer timer_setup(&ar->ab->mon_reap_timer, ath11k_dp_service_mon_ring, 0); return 0; } ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring, HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id, DP_RXDMA_MONITOR_BUF_RING_SIZE); if (ret) { ath11k_warn(ar->ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n"); return ret; } ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, HAL_RXDMA_MONITOR_DST, 0, dp->mac_id, DP_RXDMA_MONITOR_DST_RING_SIZE); if (ret) { ath11k_warn(ar->ab, "failed to setup HAL_RXDMA_MONITOR_DST\n"); return ret; } ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id, DP_RXDMA_MONITOR_DESC_RING_SIZE); if (ret) { ath11k_warn(ar->ab, "failed to setup HAL_RXDMA_MONITOR_DESC\n"); return ret; } return 0; } void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) { struct ath11k_dp *dp = &ab->dp; struct dp_reo_cmd *cmd, *tmp; struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; struct dp_rx_tid *rx_tid; spin_lock_bh(&dp->reo_cmd_lock); list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { list_del(&cmd->list); rx_tid = &cmd->data; if (rx_tid->vaddr_unaligned) { dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); rx_tid->vaddr_unaligned = NULL; } kfree(cmd); } list_for_each_entry_safe(cmd_cache, tmp_cache, &dp->reo_cmd_cache_flush_list, list) { list_del(&cmd_cache->list); dp->reo_cmd_cache_flush_count--; rx_tid = &cmd_cache->data; if (rx_tid->vaddr_unaligned) { dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); rx_tid->vaddr_unaligned = NULL; } kfree(cmd_cache); } spin_unlock_bh(&dp->reo_cmd_lock); } static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, enum hal_reo_cmd_status status) { struct dp_rx_tid *rx_tid = ctx; if (status != HAL_REO_CMD_SUCCESS) ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", rx_tid->tid, status); if (rx_tid->vaddr_unaligned) { dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); rx_tid->vaddr_unaligned = NULL; } } static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, struct dp_rx_tid *rx_tid) { - struct ath11k_hal_reo_cmd cmd = {0}; + struct ath11k_hal_reo_cmd cmd = {}; unsigned long tot_desc_sz, desc_sz; int ret; tot_desc_sz = rx_tid->size; desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID); while (tot_desc_sz > desc_sz) { tot_desc_sz -= desc_sz; cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); cmd.addr_hi = upper_32_bits(rx_tid->paddr); ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, HAL_REO_CMD_FLUSH_CACHE, &cmd, NULL); if (ret) ath11k_warn(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n", rx_tid->tid, ret); } memset(&cmd, 0, sizeof(cmd)); cmd.addr_lo = lower_32_bits(rx_tid->paddr); cmd.addr_hi = upper_32_bits(rx_tid->paddr); cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, HAL_REO_CMD_FLUSH_CACHE, &cmd, ath11k_dp_reo_cmd_free); if (ret) { ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", rx_tid->tid, ret); dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); rx_tid->vaddr_unaligned = NULL; } } static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, enum hal_reo_cmd_status status) { struct ath11k_base *ab = dp->ab; struct dp_rx_tid *rx_tid = ctx; struct dp_reo_cache_flush_elem *elem, *tmp; if (status == HAL_REO_CMD_DRAIN) { goto free_desc; } else if (status != HAL_REO_CMD_SUCCESS) { /* Shouldn't happen! Cleanup in case of other failure? */ ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n", rx_tid->tid, status); return; } elem = kzalloc(sizeof(*elem), GFP_ATOMIC); if (!elem) goto free_desc; elem->ts = jiffies; memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); spin_lock_bh(&dp->reo_cmd_lock); list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); dp->reo_cmd_cache_flush_count++; /* Flush and invalidate aged REO desc from HW cache */ list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, list) { if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD || time_after(jiffies, elem->ts + msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) { list_del(&elem->list); dp->reo_cmd_cache_flush_count--; spin_unlock_bh(&dp->reo_cmd_lock); ath11k_dp_reo_cache_flush(ab, &elem->data); kfree(elem); spin_lock_bh(&dp->reo_cmd_lock); } } spin_unlock_bh(&dp->reo_cmd_lock); return; free_desc: dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); rx_tid->vaddr_unaligned = NULL; } void ath11k_peer_rx_tid_delete(struct ath11k *ar, struct ath11k_peer *peer, u8 tid) { - struct ath11k_hal_reo_cmd cmd = {0}; + struct ath11k_hal_reo_cmd cmd = {}; struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; int ret; if (!rx_tid->active) return; rx_tid->active = false; cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; cmd.addr_lo = lower_32_bits(rx_tid->paddr); cmd.addr_hi = upper_32_bits(rx_tid->paddr); cmd.upd0 |= HAL_REO_CMD_UPD0_VLD; ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, ath11k_dp_rx_tid_del_func); if (ret) { if (ret != -ESHUTDOWN) ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", tid, ret); dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); rx_tid->vaddr_unaligned = NULL; } rx_tid->paddr = 0; rx_tid->paddr_unaligned = 0; rx_tid->size = 0; rx_tid->unaligned_size = 0; } static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, u32 *link_desc, enum hal_wbm_rel_bm_act action) { struct ath11k_dp *dp = &ab->dp; struct hal_srng *srng; u32 *desc; int ret = 0; srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); desc = ath11k_hal_srng_src_get_next_entry(ab, srng); if (!desc) { ret = -ENOBUFS; goto exit; } ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, action); exit: ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return ret; } static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc) { struct ath11k_base *ab = rx_tid->ab; lockdep_assert_held(&ab->base_lock); if (rx_tid->dst_ring_desc) { if (rel_link_desc) ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); kfree(rx_tid->dst_ring_desc); rx_tid->dst_ring_desc = NULL; } rx_tid->cur_sn = 0; rx_tid->last_frag_no = 0; rx_tid->rx_frag_bitmap = 0; __skb_queue_purge(&rx_tid->rx_frags); } void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer) { struct dp_rx_tid *rx_tid; int i; lockdep_assert_held(&ar->ab->base_lock); for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { rx_tid = &peer->rx_tid[i]; spin_unlock_bh(&ar->ab->base_lock); timer_delete_sync(&rx_tid->frag_timer); spin_lock_bh(&ar->ab->base_lock); ath11k_dp_rx_frags_cleanup(rx_tid, true); } } void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) { struct dp_rx_tid *rx_tid; int i; lockdep_assert_held(&ar->ab->base_lock); for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { rx_tid = &peer->rx_tid[i]; ath11k_peer_rx_tid_delete(ar, peer, i); ath11k_dp_rx_frags_cleanup(rx_tid, true); spin_unlock_bh(&ar->ab->base_lock); timer_delete_sync(&rx_tid->frag_timer); spin_lock_bh(&ar->ab->base_lock); } } static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, struct ath11k_peer *peer, struct dp_rx_tid *rx_tid, u32 ba_win_sz, u16 ssn, bool update_ssn) { - struct ath11k_hal_reo_cmd cmd = {0}; + struct ath11k_hal_reo_cmd cmd = {}; int ret; cmd.addr_lo = lower_32_bits(rx_tid->paddr); cmd.addr_hi = upper_32_bits(rx_tid->paddr); cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS; cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE; cmd.ba_window_size = ba_win_sz; if (update_ssn) { cmd.upd0 |= HAL_REO_CMD_UPD0_SSN; cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn); } ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, NULL); if (ret) { ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n", rx_tid->tid, ret); return ret; } rx_tid->ba_win_sz = ba_win_sz; return 0; } static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, const u8 *peer_mac, int vdev_id, u8 tid) { struct ath11k_peer *peer; struct dp_rx_tid *rx_tid; spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, vdev_id, peer_mac); if (!peer) { ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n"); goto unlock_exit; } rx_tid = &peer->rx_tid[tid]; if (!rx_tid->active) goto unlock_exit; dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); rx_tid->vaddr_unaligned = NULL; rx_tid->active = false; unlock_exit: spin_unlock_bh(&ab->base_lock); } int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, u8 tid, u32 ba_win_sz, u16 ssn, enum hal_pn_type pn_type) { struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; struct dp_rx_tid *rx_tid; u32 hw_desc_sz, *vaddr; void *vaddr_unaligned; dma_addr_t paddr; int ret; spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, vdev_id, peer_mac); if (!peer) { ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n", peer_mac); spin_unlock_bh(&ab->base_lock); return -ENOENT; } rx_tid = &peer->rx_tid[tid]; /* Update the tid queue if it is already setup */ if (rx_tid->active) { paddr = rx_tid->paddr; ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, ba_win_sz, ssn, true); spin_unlock_bh(&ab->base_lock); if (ret) { ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d", peer_mac, tid, ret); return ret; } ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, paddr, tid, 1, ba_win_sz); if (ret) ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n", peer_mac, tid, ret); return ret; } rx_tid->tid = tid; rx_tid->ba_win_sz = ba_win_sz; /* TODO: Optimize the memory allocation for qos tid based on * the actual BA window size in REO tid update path. */ if (tid == HAL_DESC_REO_NON_QOS_TID) hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); else hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1; vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr, DMA_BIDIRECTIONAL, GFP_ATOMIC); if (!vaddr_unaligned) { spin_unlock_bh(&ab->base_lock); return -ENOMEM; } rx_tid->vaddr_unaligned = vaddr_unaligned; vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN); rx_tid->paddr_unaligned = paddr; rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr - (unsigned long)rx_tid->vaddr_unaligned); ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type); rx_tid->size = hw_desc_sz; rx_tid->active = true; /* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup. * Since these changes are not reflected in the device, driver now needs to * explicitly call dma_sync_single_for_device. */ dma_sync_single_for_device(ab->dev, rx_tid->paddr, rx_tid->size, DMA_TO_DEVICE); spin_unlock_bh(&ab->base_lock); ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr, tid, 1, ba_win_sz); if (ret) { ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n", peer_mac, tid, ret); ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); } return ret; } int ath11k_dp_rx_ampdu_start(struct ath11k *ar, struct ieee80211_ampdu_params *params) { struct ath11k_base *ab = ar->ab; struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta); int vdev_id = arsta->arvif->vdev_id; int ret; ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, params->tid, params->buf_size, params->ssn, arsta->pn_type); if (ret) ath11k_warn(ab, "failed to setup rx tid %d\n", ret); return ret; } int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, struct ieee80211_ampdu_params *params) { struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta); int vdev_id = arsta->arvif->vdev_id; dma_addr_t paddr; bool active; int ret; spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); if (!peer) { ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n"); spin_unlock_bh(&ab->base_lock); return -ENOENT; } paddr = peer->rx_tid[params->tid].paddr; active = peer->rx_tid[params->tid].active; if (!active) { spin_unlock_bh(&ab->base_lock); return 0; } ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false); spin_unlock_bh(&ab->base_lock); if (ret) { ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n", params->tid, ret); return ret; } ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, params->sta->addr, paddr, params->tid, 1, 1); if (ret) ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n", ret); return ret; } int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif, const u8 *peer_addr, enum set_key_cmd key_cmd, struct ieee80211_key_conf *key) { struct ath11k *ar = arvif->ar; struct ath11k_base *ab = ar->ab; - struct ath11k_hal_reo_cmd cmd = {0}; + struct ath11k_hal_reo_cmd cmd = {}; struct ath11k_peer *peer; struct dp_rx_tid *rx_tid; u8 tid; int ret = 0; /* NOTE: Enable PN/TSC replay check offload only for unicast frames. * We use mac80211 PN/TSC replay check functionality for bcast/mcast * for now. */ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) return 0; cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS; cmd.upd0 |= HAL_REO_CMD_UPD0_PN | HAL_REO_CMD_UPD0_PN_SIZE | HAL_REO_CMD_UPD0_PN_VALID | HAL_REO_CMD_UPD0_PN_CHECK | HAL_REO_CMD_UPD0_SVLD; switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (key_cmd == SET_KEY) { cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK; cmd.pn_size = 48; } break; default: break; } spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); if (!peer) { ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n"); spin_unlock_bh(&ab->base_lock); return -ENOENT; } for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) { rx_tid = &peer->rx_tid[tid]; if (!rx_tid->active) continue; cmd.addr_lo = lower_32_bits(rx_tid->paddr); cmd.addr_hi = upper_32_bits(rx_tid->paddr); ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd, NULL); if (ret) { ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n", tid, ret); break; } } spin_unlock_bh(&ab->base_lock); return ret; } static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, u16 peer_id) { int i; for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) { if (ppdu_stats->user_stats[i].is_valid_peer_id) { if (peer_id == ppdu_stats->user_stats[i].peer_id) return i; } else { return i; } } return -EINVAL; } static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct htt_ppdu_stats_info *ppdu_info; struct htt_ppdu_user_stats *user_stats; int cur_user; u16 peer_id; ppdu_info = data; switch (tag) { case HTT_PPDU_STATS_TAG_COMMON: if (len < sizeof(struct htt_ppdu_stats_common)) { ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", len, tag); return -EINVAL; } memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, sizeof(struct htt_ppdu_stats_common)); break; case HTT_PPDU_STATS_TAG_USR_RATE: if (len < sizeof(struct htt_ppdu_stats_user_rate)) { ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", len, tag); return -EINVAL; } peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, peer_id); if (cur_user < 0) return -EINVAL; user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; user_stats->peer_id = peer_id; user_stats->is_valid_peer_id = true; memcpy((void *)&user_stats->rate, ptr, sizeof(struct htt_ppdu_stats_user_rate)); user_stats->tlv_flags |= BIT(tag); break; case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON: if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", len, tag); return -EINVAL; } peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, peer_id); if (cur_user < 0) return -EINVAL; user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; user_stats->peer_id = peer_id; user_stats->is_valid_peer_id = true; memcpy((void *)&user_stats->cmpltn_cmn, ptr, sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); user_stats->tlv_flags |= BIT(tag); break; case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS: if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n", len, tag); return -EINVAL; } peer_id = ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, peer_id); if (cur_user < 0) return -EINVAL; user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; user_stats->peer_id = peer_id; user_stats->is_valid_peer_id = true; memcpy((void *)&user_stats->ack_ba, ptr, sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); user_stats->tlv_flags |= BIT(tag); break; } return 0; } int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, const void *ptr, void *data), void *data) { const struct htt_tlv *tlv; const void *begin = ptr; u16 tlv_tag, tlv_len; int ret = -EINVAL; while (len > 0) { if (len < sizeof(*tlv)) { ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", ptr - begin, len, sizeof(*tlv)); return -EINVAL; } tlv = (struct htt_tlv *)ptr; tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header); tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header); ptr += sizeof(*tlv); len -= sizeof(*tlv); if (tlv_len > len) { ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", tlv_tag, ptr - begin, len, tlv_len); return -EINVAL; } ret = iter(ab, tlv_tag, tlv_len, ptr, data); if (ret == -ENOMEM) return ret; ptr += tlv_len; len -= tlv_len; } return 0; } static void ath11k_update_per_peer_tx_stats(struct ath11k *ar, struct htt_ppdu_stats *ppdu_stats, u8 user) { struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; struct ieee80211_sta *sta; struct ath11k_sta *arsta; struct htt_ppdu_stats_user_rate *user_rate; struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; struct htt_ppdu_stats_common *common = &ppdu_stats->common; int ret; u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0; u32 succ_bytes = 0; u16 rate = 0, succ_pkts = 0; u32 tx_duration = 0; u8 tid = HTT_PPDU_STATS_NON_QOS_TID; bool is_ampdu = false; if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE))) return; if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) is_ampdu = HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags); if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) { succ_bytes = usr_stats->ack_ba.success_bytes; succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M, usr_stats->ack_ba.info); tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM, usr_stats->ack_ba.info); } if (common->fes_duration_us) tx_duration = common->fes_duration_us; user_rate = &usr_stats->rate; flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags); bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2; nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1; mcs = HTT_USR_RATE_MCS(user_rate->rate_flags); sgi = HTT_USR_RATE_GI(user_rate->rate_flags); dcm = HTT_USR_RATE_DCM(user_rate->rate_flags); /* Note: If host configured fixed rates and in some other special * cases, the broadcast/management frames are sent in different rates. * Firmware rate's control to be skipped for this? */ if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) { ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs); return; } if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) { ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs); return; } if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) { ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats", mcs, nss); return; } if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) { ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, flags, &rate_idx, &rate); if (ret < 0) return; } rcu_read_lock(); spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); if (!peer || !peer->sta) { spin_unlock_bh(&ab->base_lock); rcu_read_unlock(); return; } sta = peer->sta; arsta = ath11k_sta_to_arsta(sta); memset(&arsta->txrate, 0, sizeof(arsta->txrate)); switch (flags) { case WMI_RATE_PREAMBLE_OFDM: arsta->txrate.legacy = rate; break; case WMI_RATE_PREAMBLE_CCK: arsta->txrate.legacy = rate; break; case WMI_RATE_PREAMBLE_HT: arsta->txrate.mcs = mcs + 8 * (nss - 1); arsta->txrate.flags = RATE_INFO_FLAGS_MCS; if (sgi) arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; break; case WMI_RATE_PREAMBLE_VHT: arsta->txrate.mcs = mcs; arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; if (sgi) arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; break; case WMI_RATE_PREAMBLE_HE: arsta->txrate.mcs = mcs; arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; arsta->txrate.he_dcm = dcm; arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi); arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc ((user_rate->ru_end - user_rate->ru_start) + 1); break; } arsta->txrate.nss = nss; arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); arsta->tx_duration += tx_duration; memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); /* PPDU stats reported for mgmt packet doesn't have valid tx bytes. * So skip peer stats update for mgmt packets. */ if (tid < HTT_PPDU_STATS_NON_QOS_TID) { memset(peer_stats, 0, sizeof(*peer_stats)); peer_stats->succ_pkts = succ_pkts; peer_stats->succ_bytes = succ_bytes; peer_stats->is_ampdu = is_ampdu; peer_stats->duration = tx_duration; peer_stats->ba_fails = HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); } spin_unlock_bh(&ab->base_lock); rcu_read_unlock(); } static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, struct htt_ppdu_stats *ppdu_stats) { u8 user; for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++) ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); } static struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, u32 ppdu_id) { struct htt_ppdu_stats_info *ppdu_info; lockdep_assert_held(&ar->data_lock); if (!list_empty(&ar->ppdu_stats_info)) { list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { if (ppdu_info->ppdu_id == ppdu_id) return ppdu_info; } if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) { ppdu_info = list_first_entry(&ar->ppdu_stats_info, typeof(*ppdu_info), list); list_del(&ppdu_info->list); ar->ppdu_stat_list_depth--; ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); kfree(ppdu_info); } } ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC); if (!ppdu_info) return NULL; list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); ar->ppdu_stat_list_depth++; return ppdu_info; } static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k_htt_ppdu_stats_msg *msg; struct htt_ppdu_stats_info *ppdu_info; struct ath11k *ar; int ret; u8 pdev_id; u32 ppdu_id, len; msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info); pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info); ppdu_id = msg->ppdu_id; rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); if (!ar) { ret = -EINVAL; goto out; } if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) trace_ath11k_htt_ppdu_stats(ar, skb->data, len); spin_lock_bh(&ar->data_lock); ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); if (!ppdu_info) { ret = -EINVAL; goto out_unlock_data; } ppdu_info->ppdu_id = ppdu_id; ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, ath11k_htt_tlv_ppdu_stats_parse, (void *)ppdu_info); if (ret) { ath11k_warn(ab, "Failed to parse tlv %d\n", ret); goto out_unlock_data; } out_unlock_data: spin_unlock_bh(&ar->data_lock); out: rcu_read_unlock(); return ret; } static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) { struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data; struct ath11k *ar; u8 pdev_id; pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr); rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); if (!ar) { ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id); goto out; } trace_ath11k_htt_pktlog(ar, data->payload, hdr->size, ar->ab->pktlog_defs_checksum); out: rcu_read_unlock(); } static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab, struct sk_buff *skb) { u32 *data = (u32 *)skb->data; u8 pdev_id, ring_type, ring_id, pdev_idx; u16 hp, tp; u32 backpressure_time; struct ath11k_bp_stats *bp_stats; pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data); ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data); ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data); ++data; hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data); tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data); ++data; backpressure_time = *data; ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n", pdev_id, ring_type, ring_id, hp, tp, backpressure_time); if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) { if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX) return; bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id]; } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) { pdev_idx = DP_HW2SW_MACID(pdev_id); if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS) return; bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx]; } else { ath11k_warn(ab, "unknown ring type received in htt bp event %d\n", ring_type); return; } spin_lock_bh(&ab->base_lock); bp_stats->hp = hp; bp_stats->tp = tp; bp_stats->count++; bp_stats->jiffies = jiffies; spin_unlock_bh(&ab->base_lock); } void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k_dp *dp = &ab->dp; struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp); u16 peer_id; u8 vdev_id; u8 mac_addr[ETH_ALEN]; u16 peer_mac_h16; u16 ast_hash; u16 hw_peer_id; ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type); switch (type) { case HTT_T2H_MSG_TYPE_VERSION_CONF: dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR, resp->version_msg.version); dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR, resp->version_msg.version); complete(&dp->htt_tgt_version_received); break; case HTT_T2H_MSG_TYPE_PEER_MAP: vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, resp->peer_map_ev.info); peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, resp->peer_map_ev.info); peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, resp->peer_map_ev.info1); ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, peer_mac_h16, mac_addr); ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0); break; case HTT_T2H_MSG_TYPE_PEER_MAP2: vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID, resp->peer_map_ev.info); peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID, resp->peer_map_ev.info); peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16, resp->peer_map_ev.info1); ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, peer_mac_h16, mac_addr); ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL, resp->peer_map_ev.info2); hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID, resp->peer_map_ev.info1); ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash, hw_peer_id); break; case HTT_T2H_MSG_TYPE_PEER_UNMAP: case HTT_T2H_MSG_TYPE_PEER_UNMAP2: peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID, resp->peer_unmap_ev.info); ath11k_peer_unmap_event(ab, peer_id); break; case HTT_T2H_MSG_TYPE_PPDU_STATS_IND: ath11k_htt_pull_ppdu_stats(ab, skb); break; case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: ath11k_debugfs_htt_ext_stats_handler(ab, skb); break; case HTT_T2H_MSG_TYPE_PKTLOG: ath11k_htt_pktlog(ab, skb); break; case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND: ath11k_htt_backpressure_event_handler(ab, skb); break; default: ath11k_warn(ab, "htt event %d not handled\n", type); break; } dev_kfree_skb_any(skb); } static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, struct sk_buff_head *msdu_list, struct sk_buff *first, struct sk_buff *last, u8 l3pad_bytes, int msdu_len) { struct ath11k_base *ab = ar->ab; struct sk_buff *skb; struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); int buf_first_hdr_len, buf_first_len; struct hal_rx_desc *ldesc; int space_extra, rem_len, buf_len; u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; /* As the msdu is spread across multiple rx buffers, * find the offset to the start of msdu for computing * the length of the msdu in the first buffer. */ buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes; buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len; if (WARN_ON_ONCE(msdu_len <= buf_first_len)) { skb_put(first, buf_first_hdr_len + msdu_len); skb_pull(first, buf_first_hdr_len); return 0; } ldesc = (struct hal_rx_desc *)last->data; rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc); rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc); /* MSDU spans over multiple buffers because the length of the MSDU * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. */ skb_put(first, DP_RX_BUFFER_SIZE); skb_pull(first, buf_first_hdr_len); /* When an MSDU spread over multiple buffers attention, MSDU_END and * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs. */ ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc); space_extra = msdu_len - (buf_first_len + skb_tailroom(first)); if (space_extra > 0 && (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) { /* Free up all buffers of the MSDU */ while ((skb = __skb_dequeue(msdu_list)) != NULL) { rxcb = ATH11K_SKB_RXCB(skb); if (!rxcb->is_continuation) { dev_kfree_skb_any(skb); break; } dev_kfree_skb_any(skb); } return -ENOMEM; } rem_len = msdu_len - buf_first_len; while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) { rxcb = ATH11K_SKB_RXCB(skb); if (rxcb->is_continuation) buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz; else buf_len = rem_len; if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) { WARN_ON_ONCE(1); dev_kfree_skb_any(skb); return -EINVAL; } skb_put(skb, buf_len + hal_rx_desc_sz); skb_pull(skb, hal_rx_desc_sz); skb_copy_from_linear_data(skb, skb_put(first, buf_len), buf_len); dev_kfree_skb_any(skb); rem_len -= buf_len; if (!rxcb->is_continuation) break; } return 0; } static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, struct sk_buff *first) { struct sk_buff *skb; struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first); if (!rxcb->is_continuation) return first; skb_queue_walk(msdu_list, skb) { rxcb = ATH11K_SKB_RXCB(skb); if (!rxcb->is_continuation) return skb; } return NULL; } static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); struct rx_attention *rx_attention; bool ip_csum_fail, l4_csum_fail; rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc); ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention); l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention); msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? CHECKSUM_NONE : CHECKSUM_UNNECESSARY; } int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype) { switch (enctype) { case HAL_ENCRYPT_TYPE_OPEN: case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: case HAL_ENCRYPT_TYPE_TKIP_MIC: return 0; case HAL_ENCRYPT_TYPE_CCMP_128: return IEEE80211_CCMP_MIC_LEN; case HAL_ENCRYPT_TYPE_CCMP_256: return IEEE80211_CCMP_256_MIC_LEN; case HAL_ENCRYPT_TYPE_GCMP_128: case HAL_ENCRYPT_TYPE_AES_GCMP_256: return IEEE80211_GCMP_MIC_LEN; case HAL_ENCRYPT_TYPE_WEP_40: case HAL_ENCRYPT_TYPE_WEP_104: case HAL_ENCRYPT_TYPE_WEP_128: case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: case HAL_ENCRYPT_TYPE_WAPI: break; } ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype); return 0; } static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, enum hal_encrypt_type enctype) { switch (enctype) { case HAL_ENCRYPT_TYPE_OPEN: return 0; case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: case HAL_ENCRYPT_TYPE_TKIP_MIC: return IEEE80211_TKIP_IV_LEN; case HAL_ENCRYPT_TYPE_CCMP_128: return IEEE80211_CCMP_HDR_LEN; case HAL_ENCRYPT_TYPE_CCMP_256: return IEEE80211_CCMP_256_HDR_LEN; case HAL_ENCRYPT_TYPE_GCMP_128: case HAL_ENCRYPT_TYPE_AES_GCMP_256: return IEEE80211_GCMP_HDR_LEN; case HAL_ENCRYPT_TYPE_WEP_40: case HAL_ENCRYPT_TYPE_WEP_104: case HAL_ENCRYPT_TYPE_WEP_128: case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: case HAL_ENCRYPT_TYPE_WAPI: break; } ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); return 0; } static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, enum hal_encrypt_type enctype) { switch (enctype) { case HAL_ENCRYPT_TYPE_OPEN: case HAL_ENCRYPT_TYPE_CCMP_128: case HAL_ENCRYPT_TYPE_CCMP_256: case HAL_ENCRYPT_TYPE_GCMP_128: case HAL_ENCRYPT_TYPE_AES_GCMP_256: return 0; case HAL_ENCRYPT_TYPE_TKIP_NO_MIC: case HAL_ENCRYPT_TYPE_TKIP_MIC: return IEEE80211_TKIP_ICV_LEN; case HAL_ENCRYPT_TYPE_WEP_40: case HAL_ENCRYPT_TYPE_WEP_104: case HAL_ENCRYPT_TYPE_WEP_128: case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4: case HAL_ENCRYPT_TYPE_WAPI: break; } ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype); return 0; } static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, struct sk_buff *msdu, u8 *first_hdr, enum hal_encrypt_type enctype, struct ieee80211_rx_status *status) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN]; struct ieee80211_hdr *hdr; size_t hdr_len; u8 da[ETH_ALEN]; u8 sa[ETH_ALEN]; u16 qos_ctl = 0; u8 *qos; /* copy SA & DA and pull decapped header */ hdr = (struct ieee80211_hdr *)msdu->data; hdr_len = ieee80211_hdrlen(hdr->frame_control); ether_addr_copy(da, ieee80211_get_DA(hdr)); ether_addr_copy(sa, ieee80211_get_SA(hdr)); skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); if (rxcb->is_first_msdu) { /* original 802.11 header is valid for the first msdu * hence we can reuse the same header */ hdr = (struct ieee80211_hdr *)first_hdr; hdr_len = ieee80211_hdrlen(hdr->frame_control); /* Each A-MSDU subframe will be reported as a separate MSDU, * so strip the A-MSDU bit from QoS Ctl. */ if (ieee80211_is_data_qos(hdr->frame_control)) { qos = ieee80211_get_qos_ctl(hdr); qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; } } else { /* Rebuild qos header if this is a middle/last msdu */ hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA); /* Reset the order bit as the HT_Control header is stripped */ hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER)); qos_ctl = rxcb->tid; if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc)) qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT; /* TODO Add other QoS ctl fields when required */ /* copy decap header before overwriting for reuse below */ memcpy(decap_hdr, (uint8_t *)hdr, hdr_len); } if (!(status->flag & RX_FLAG_IV_STRIPPED)) { memcpy(skb_push(msdu, ath11k_dp_rx_crypto_param_len(ar, enctype)), (void *)hdr + hdr_len, ath11k_dp_rx_crypto_param_len(ar, enctype)); } if (!rxcb->is_first_msdu) { memcpy(skb_push(msdu, IEEE80211_QOS_CTL_LEN), &qos_ctl, IEEE80211_QOS_CTL_LEN); memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len); return; } memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); /* original 802.11 header has a different DA and in * case of 4addr it may also have different SA */ hdr = (struct ieee80211_hdr *)msdu->data; ether_addr_copy(ieee80211_get_DA(hdr), da); ether_addr_copy(ieee80211_get_SA(hdr), sa); } static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, enum hal_encrypt_type enctype, struct ieee80211_rx_status *status, bool decrypted) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); struct ieee80211_hdr *hdr; size_t hdr_len; size_t crypto_len; if (!rxcb->is_first_msdu || !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { WARN_ON_ONCE(1); return; } skb_trim(msdu, msdu->len - FCS_LEN); if (!decrypted) return; hdr = (void *)msdu->data; /* Tail */ if (status->flag & RX_FLAG_IV_STRIPPED) { skb_trim(msdu, msdu->len - ath11k_dp_rx_crypto_mic_len(ar, enctype)); skb_trim(msdu, msdu->len - ath11k_dp_rx_crypto_icv_len(ar, enctype)); } else { /* MIC */ if (status->flag & RX_FLAG_MIC_STRIPPED) skb_trim(msdu, msdu->len - ath11k_dp_rx_crypto_mic_len(ar, enctype)); /* ICV */ if (status->flag & RX_FLAG_ICV_STRIPPED) skb_trim(msdu, msdu->len - ath11k_dp_rx_crypto_icv_len(ar, enctype)); } /* MMIC */ if ((status->flag & RX_FLAG_MMIC_STRIPPED) && !ieee80211_has_morefrags(hdr->frame_control) && enctype == HAL_ENCRYPT_TYPE_TKIP_MIC) skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN); /* Head */ if (status->flag & RX_FLAG_IV_STRIPPED) { hdr_len = ieee80211_hdrlen(hdr->frame_control); crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); memmove((void *)msdu->data + crypto_len, (void *)msdu->data, hdr_len); skb_pull(msdu, crypto_len); } } static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, struct sk_buff *msdu, enum hal_encrypt_type enctype) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); struct ieee80211_hdr *hdr; size_t hdr_len, crypto_len; void *rfc1042; bool is_amsdu; is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc); rfc1042 = hdr; if (rxcb->is_first_msdu) { hdr_len = ieee80211_hdrlen(hdr->frame_control); crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); rfc1042 += hdr_len + crypto_len; } if (is_amsdu) rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); return rfc1042; } static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, struct sk_buff *msdu, u8 *first_hdr, enum hal_encrypt_type enctype, struct ieee80211_rx_status *status) { struct ieee80211_hdr *hdr; struct ethhdr *eth; size_t hdr_len; u8 da[ETH_ALEN]; u8 sa[ETH_ALEN]; void *rfc1042; rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); if (WARN_ON_ONCE(!rfc1042)) return; /* pull decapped header and copy SA & DA */ eth = (struct ethhdr *)msdu->data; ether_addr_copy(da, eth->h_dest); ether_addr_copy(sa, eth->h_source); skb_pull(msdu, sizeof(struct ethhdr)); /* push rfc1042/llc/snap */ memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, sizeof(struct ath11k_dp_rfc1042_hdr)); /* push original 802.11 header */ hdr = (struct ieee80211_hdr *)first_hdr; hdr_len = ieee80211_hdrlen(hdr->frame_control); if (!(status->flag & RX_FLAG_IV_STRIPPED)) { memcpy(skb_push(msdu, ath11k_dp_rx_crypto_param_len(ar, enctype)), (void *)hdr + hdr_len, ath11k_dp_rx_crypto_param_len(ar, enctype)); } memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); /* original 802.11 header has a different DA and in * case of 4addr it may also have different SA */ hdr = (struct ieee80211_hdr *)msdu->data; ether_addr_copy(ieee80211_get_DA(hdr), da); ether_addr_copy(ieee80211_get_SA(hdr), sa); } static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, struct hal_rx_desc *rx_desc, enum hal_encrypt_type enctype, struct ieee80211_rx_status *status, bool decrypted) { u8 *first_hdr; u8 decap; struct ethhdr *ehdr; first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc); switch (decap) { case DP_RX_DECAP_TYPE_NATIVE_WIFI: ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, enctype, status); break; case DP_RX_DECAP_TYPE_RAW: ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, decrypted); break; case DP_RX_DECAP_TYPE_ETHERNET2_DIX: ehdr = (struct ethhdr *)msdu->data; /* mac80211 allows fast path only for authorized STA */ if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) { ATH11K_SKB_RXCB(msdu)->is_eapol = true; ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, enctype, status); break; } /* PN for mcast packets will be validated in mac80211; * remove eth header and add 802.11 header. */ if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted) ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, enctype, status); break; case DP_RX_DECAP_TYPE_8023: /* TODO: Handle undecap for these formats */ break; } } static struct ath11k_peer * ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); struct hal_rx_desc *rx_desc = rxcb->rx_desc; struct ath11k_peer *peer = NULL; lockdep_assert_held(&ab->base_lock); if (rxcb->peer_id) peer = ath11k_peer_find_by_id(ab, rxcb->peer_id); if (peer) return peer; if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc))) return NULL; peer = ath11k_peer_find_by_addr(ab, ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc)); return peer; } static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, struct sk_buff *msdu, struct hal_rx_desc *rx_desc, struct ieee80211_rx_status *rx_status) { bool fill_crypto_hdr; enum hal_encrypt_type enctype; bool is_decrypted = false; struct ath11k_skb_rxcb *rxcb; struct ieee80211_hdr *hdr; struct ath11k_peer *peer; struct rx_attention *rx_attention; u32 err_bitmap; /* PN for multicast packets will be checked in mac80211 */ rxcb = ATH11K_SKB_RXCB(msdu); fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc); rxcb->is_mcbc = fill_crypto_hdr; if (rxcb->is_mcbc) { rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc); rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc); } spin_lock_bh(&ar->ab->base_lock); peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu); if (peer) { if (rxcb->is_mcbc) enctype = peer->sec_type_grp; else enctype = peer->sec_type; } else { enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc); } spin_unlock_bh(&ar->ab->base_lock); rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap) is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); /* Clear per-MPDU flags while leaving per-PPDU flags intact */ rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | RX_FLAG_MMIC_ERROR | RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED); if (err_bitmap & DP_RX_MPDU_ERR_FCS) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC) rx_status->flag |= RX_FLAG_MMIC_ERROR; if (is_decrypted) { rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED; if (fill_crypto_hdr) rx_status->flag |= RX_FLAG_MIC_STRIPPED | RX_FLAG_ICV_STRIPPED; else rx_status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_PN_VALIDATED; } ath11k_dp_rx_h_csum_offload(ar, msdu); ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, enctype, rx_status, is_decrypted); if (!is_decrypted || fill_crypto_hdr) return; if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) != DP_RX_DECAP_TYPE_ETHERNET2_DIX) { hdr = (void *)msdu->data; hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); } } static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, struct ieee80211_rx_status *rx_status) { struct ieee80211_supported_band *sband; enum rx_msdu_start_pkt_type pkt_type; u8 bw; u8 rate_mcs, nss; u8 sgi; bool is_cck, is_ldpc; pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc); bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc); rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc); nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc); sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc); switch (pkt_type) { case RX_MSDU_START_PKT_TYPE_11A: case RX_MSDU_START_PKT_TYPE_11B: is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B); sband = &ar->mac.sbands[rx_status->band]; rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, is_cck); break; case RX_MSDU_START_PKT_TYPE_11N: rx_status->encoding = RX_ENC_HT; if (rate_mcs > ATH11K_HT_MCS_MAX) { ath11k_warn(ar->ab, "Received with invalid mcs in HT mode %d\n", rate_mcs); break; } rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); if (sgi) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); break; case RX_MSDU_START_PKT_TYPE_11AC: rx_status->encoding = RX_ENC_VHT; rx_status->rate_idx = rate_mcs; if (rate_mcs > ATH11K_VHT_MCS_MAX) { ath11k_warn(ar->ab, "Received with invalid mcs in VHT mode %d\n", rate_mcs); break; } rx_status->nss = nss; if (sgi) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc); if (is_ldpc) rx_status->enc_flags |= RX_ENC_FLAG_LDPC; break; case RX_MSDU_START_PKT_TYPE_11AX: rx_status->rate_idx = rate_mcs; if (rate_mcs > ATH11K_HE_MCS_MAX) { ath11k_warn(ar->ab, "Received with invalid mcs in HE mode %d\n", rate_mcs); break; } rx_status->encoding = RX_ENC_HE; rx_status->nss = nss; rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi); rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw); break; } } static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, struct ieee80211_rx_status *rx_status) { u8 channel_num; u32 center_freq, meta_data; struct ieee80211_channel *channel; rx_status->freq = 0; rx_status->rate_idx = 0; rx_status->nss = 0; rx_status->encoding = RX_ENC_LEGACY; rx_status->bw = RATE_INFO_BW_20; rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc); channel_num = meta_data; center_freq = meta_data >> 16; if (center_freq >= ATH11K_MIN_6G_FREQ && center_freq <= ATH11K_MAX_6G_FREQ) { rx_status->band = NL80211_BAND_6GHZ; rx_status->freq = center_freq; } else if (channel_num >= 1 && channel_num <= 14) { rx_status->band = NL80211_BAND_2GHZ; } else if (channel_num >= 36 && channel_num <= 177) { rx_status->band = NL80211_BAND_5GHZ; } else { spin_lock_bh(&ar->data_lock); channel = ar->rx_channel; if (channel) { rx_status->band = channel->band; channel_num = ieee80211_frequency_to_channel(channel->center_freq); } spin_unlock_bh(&ar->data_lock); ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ", rx_desc, sizeof(struct hal_rx_desc)); } if (rx_status->band != NL80211_BAND_6GHZ) rx_status->freq = ieee80211_channel_to_frequency(channel_num, rx_status->band); ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); } static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, struct sk_buff *msdu, struct ieee80211_rx_status *status) { static const struct ieee80211_radiotap_he known = { .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), }; struct ieee80211_rx_status *rx_status; struct ieee80211_radiotap_he *he = NULL; struct ieee80211_sta *pubsta = NULL; struct ath11k_peer *peer; struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); u8 decap = DP_RX_DECAP_TYPE_RAW; bool is_mcbc = rxcb->is_mcbc; bool is_eapol = rxcb->is_eapol; if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) && !(status->flag & RX_FLAG_SKIP_MONITOR)) { he = skb_push(msdu, sizeof(known)); memcpy(he, &known, sizeof(known)); status->flag |= RX_FLAG_RADIOTAP_HE; } if (!(status->flag & RX_FLAG_ONLY_MONITOR)) decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc); spin_lock_bh(&ar->ab->base_lock); peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu); if (peer && peer->sta) pubsta = peer->sta; spin_unlock_bh(&ar->ab->base_lock); ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", msdu, msdu->len, peer ? peer->addr : NULL, rxcb->tid, is_mcbc ? "mcast" : "ucast", rxcb->seq_no, (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", (status->encoding == RX_ENC_HT) ? "ht" : "", (status->encoding == RX_ENC_VHT) ? "vht" : "", (status->encoding == RX_ENC_HE) ? "he" : "", (status->bw == RATE_INFO_BW_40) ? "40" : "", (status->bw == RATE_INFO_BW_80) ? "80" : "", (status->bw == RATE_INFO_BW_160) ? "160" : "", status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", status->rate_idx, status->nss, status->freq, status->band, status->flag, !!(status->flag & RX_FLAG_FAILED_FCS_CRC), !!(status->flag & RX_FLAG_MMIC_ERROR), !!(status->flag & RX_FLAG_AMSDU_MORE)); ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ", msdu->data, msdu->len); rx_status = IEEE80211_SKB_RXCB(msdu); *rx_status = *status; /* TODO: trace rx packet */ /* PN for multicast packets are not validate in HW, * so skip 802.3 rx path * Also, fast_rx expects the STA to be authorized, hence * eapol packets are sent in slow path. */ if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol && !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) rx_status->flag |= RX_FLAG_8023; ieee80211_rx_napi(ar->hw, pubsta, msdu, napi); } static int ath11k_dp_rx_process_msdu(struct ath11k *ar, struct sk_buff *msdu, struct sk_buff_head *msdu_list, struct ieee80211_rx_status *rx_status) { struct ath11k_base *ab = ar->ab; struct hal_rx_desc *rx_desc, *lrx_desc; struct rx_attention *rx_attention; struct ath11k_skb_rxcb *rxcb; struct sk_buff *last_buf; u8 l3_pad_bytes; u8 *hdr_status; u16 msdu_len; int ret; u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); if (!last_buf) { ath11k_warn(ab, "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n"); ret = -EIO; goto free_out; } rx_desc = (struct hal_rx_desc *)msdu->data; if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) { ath11k_warn(ar->ab, "msdu len not valid\n"); ret = -EIO; goto free_out; } lrx_desc = (struct hal_rx_desc *)last_buf->data; rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc); if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { ath11k_warn(ab, "msdu_done bit in attention is not set\n"); ret = -EIO; goto free_out; } rxcb = ATH11K_SKB_RXCB(msdu); rxcb->rx_desc = rx_desc; msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc); l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc); if (rxcb->is_frag) { skb_pull(msdu, hal_rx_desc_sz); } else if (!rxcb->is_continuation) { if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc); ret = -EINVAL; ath11k_warn(ab, "invalid msdu len %u\n", msdu_len); ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status, sizeof(struct ieee80211_hdr)); ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc, sizeof(struct hal_rx_desc)); goto free_out; } skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len); skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes); } else { ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, msdu, last_buf, l3_pad_bytes, msdu_len); if (ret) { ath11k_warn(ab, "failed to coalesce msdu rx buffer%d\n", ret); goto free_out; } } ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status); ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status); rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED; return 0; free_out: return ret; } static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab, struct napi_struct *napi, struct sk_buff_head *msdu_list, int mac_id) { struct sk_buff *msdu; struct ath11k *ar; - struct ieee80211_rx_status rx_status = {0}; + struct ieee80211_rx_status rx_status = {}; int ret; if (skb_queue_empty(msdu_list)) return; if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) { __skb_queue_purge(msdu_list); return; } ar = ab->pdevs[mac_id].ar; if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) { __skb_queue_purge(msdu_list); return; } while ((msdu = __skb_dequeue(msdu_list))) { ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status); if (unlikely(ret)) { ath11k_dbg(ab, ATH11K_DBG_DATA, "Unable to process msdu %d", ret); dev_kfree_skb_any(msdu); continue; } ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status); } } int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id, struct napi_struct *napi, int budget) { struct ath11k_dp *dp = &ab->dp; struct dp_rxdma_ring *rx_ring; - int num_buffs_reaped[MAX_RADIOS] = {0}; + int num_buffs_reaped[MAX_RADIOS] = {}; struct sk_buff_head msdu_list[MAX_RADIOS]; struct ath11k_skb_rxcb *rxcb; int total_msdu_reaped = 0; struct hal_srng *srng; struct sk_buff *msdu; bool done = false; int buf_id, mac_id; struct ath11k *ar; struct hal_reo_dest_ring *desc; enum hal_reo_dest_ring_push_reason push_reason; - u32 cookie, info0, rx_msdu_info0, rx_mpdu_info0; + u32 cookie; int i; for (i = 0; i < MAX_RADIOS; i++) __skb_queue_head_init(&msdu_list[i]); srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; spin_lock_bh(&srng->lock); try_again: ath11k_hal_srng_access_begin(ab, srng); - /* Make sure descriptor is read after the head pointer. */ - dma_rmb(); - while (likely(desc = (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab, srng))) { cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, - READ_ONCE(desc->buf_addr_info.info1)); + desc->buf_addr_info.info1); buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie); if (unlikely(buf_id == 0)) continue; ar = ab->pdevs[mac_id].ar; rx_ring = &ar->dp.rx_refill_buf_ring; spin_lock_bh(&rx_ring->idr_lock); msdu = idr_find(&rx_ring->bufs_idr, buf_id); if (unlikely(!msdu)) { ath11k_warn(ab, "frame rx with invalid buf_id %d\n", buf_id); spin_unlock_bh(&rx_ring->idr_lock); continue; } idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); rxcb = ATH11K_SKB_RXCB(msdu); dma_unmap_single(ab->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu), DMA_FROM_DEVICE); num_buffs_reaped[mac_id]++; - info0 = READ_ONCE(desc->info0); push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON, - info0); + desc->info0); if (unlikely(push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) { dev_kfree_skb_any(msdu); ab->soc_stats.hal_reo_error[ring_id]++; continue; } - rx_msdu_info0 = READ_ONCE(desc->rx_msdu_info.info0); - rx_mpdu_info0 = READ_ONCE(desc->rx_mpdu_info.info0); - - rxcb->is_first_msdu = !!(rx_msdu_info0 & + rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 & RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); - rxcb->is_last_msdu = !!(rx_msdu_info0 & + rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 & RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); - rxcb->is_continuation = !!(rx_msdu_info0 & + rxcb->is_continuation = !!(desc->rx_msdu_info.info0 & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID, - READ_ONCE(desc->rx_mpdu_info.meta_data)); + desc->rx_mpdu_info.meta_data); rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM, - rx_mpdu_info0); + desc->rx_mpdu_info.info0); rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM, - info0); + desc->info0); rxcb->mac_id = mac_id; __skb_queue_tail(&msdu_list[mac_id], msdu); if (rxcb->is_continuation) { done = false; } else { total_msdu_reaped++; done = true; } if (total_msdu_reaped >= budget) break; } /* Hw might have updated the head pointer after we cached it. * In this case, even though there are entries in the ring we'll * get rx_desc NULL. Give the read another try with updated cached * head pointer so that we can reap complete MPDU in the current * rx processing. */ if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) { ath11k_hal_srng_access_end(ab, srng); goto try_again; } ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); if (unlikely(!total_msdu_reaped)) goto exit; for (i = 0; i < ab->num_radios; i++) { if (!num_buffs_reaped[i]) continue; ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i); ar = ab->pdevs[i].ar; rx_ring = &ar->dp.rx_refill_buf_ring; ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], ab->hw_params.hal_params->rx_buf_rbm); } exit: return total_msdu_reaped; } static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, struct hal_rx_mon_ppdu_info *ppdu_info) { struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; u32 num_msdu; int i; if (!rx_stats) return; arsta->rssi_comb = ppdu_info->rssi_comb; ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb); num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; rx_stats->num_msdu += num_msdu; rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count; rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; rx_stats->other_msdu_count += ppdu_info->other_msdu_count; if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) { ppdu_info->nss = 1; ppdu_info->mcs = HAL_RX_MAX_MCS; ppdu_info->tid = IEEE80211_NUM_TIDS; } if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; if (ppdu_info->mcs <= HAL_RX_MAX_MCS) rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; if (ppdu_info->gi < HAL_RX_GI_MAX) rx_stats->gi_count[ppdu_info->gi] += num_msdu; if (ppdu_info->bw < HAL_RX_BW_MAX) rx_stats->bw_count[ppdu_info->bw] += num_msdu; if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX) rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; if (ppdu_info->tid <= IEEE80211_NUM_TIDS) rx_stats->tid_count[ppdu_info->tid] += num_msdu; if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX) rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX) rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; if (ppdu_info->is_stbc) rx_stats->stbc_count += num_msdu; if (ppdu_info->beamformed) rx_stats->beamformed_count += num_msdu; if (ppdu_info->num_mpdu_fcs_ok > 1) rx_stats->ampdu_msdu_count += num_msdu; else rx_stats->non_ampdu_msdu_count += num_msdu; rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; rx_stats->dcm_count += ppdu_info->dcm; rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > ARRAY_SIZE(ppdu_info->rssi_chain_pri20)); for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++) arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i]; rx_stats->rx_duration += ppdu_info->rx_duration; arsta->rx_duration = rx_stats->rx_duration; } static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, struct dp_rxdma_ring *rx_ring, int *buf_id) { struct sk_buff *skb; dma_addr_t paddr; skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + DP_RX_BUFFER_ALIGN_SIZE); if (!skb) goto fail_alloc_skb; if (!IS_ALIGNED((unsigned long)skb->data, DP_RX_BUFFER_ALIGN_SIZE)) { skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) - skb->data); } paddr = dma_map_single(ab->dev, skb->data, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(ab->dev, paddr))) goto fail_free_skb; spin_lock_bh(&rx_ring->idr_lock); *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, rx_ring->bufs_max, GFP_ATOMIC); spin_unlock_bh(&rx_ring->idr_lock); if (*buf_id < 0) goto fail_dma_unmap; ATH11K_SKB_RXCB(skb)->paddr = paddr; return skb; fail_dma_unmap: dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); fail_free_skb: dev_kfree_skb_any(skb); fail_alloc_skb: return NULL; } int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, struct dp_rxdma_ring *rx_ring, int req_entries, enum hal_rx_buf_return_buf_manager mgr) { struct hal_srng *srng; u32 *desc; struct sk_buff *skb; int num_free; int num_remain; int buf_id; u32 cookie; dma_addr_t paddr; req_entries = min(req_entries, rx_ring->bufs_max); srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); num_free = ath11k_hal_srng_src_num_free(ab, srng, true); req_entries = min(num_free, req_entries); num_remain = req_entries; while (num_remain > 0) { skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, &buf_id); if (!skb) break; paddr = ATH11K_SKB_RXCB(skb)->paddr; desc = ath11k_hal_srng_src_get_next_entry(ab, srng); if (!desc) goto fail_desc_get; cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); num_remain--; ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); } ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return req_entries - num_remain; fail_desc_get: spin_lock_bh(&rx_ring->idr_lock); idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return req_entries - num_remain; } #define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535 static void ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon, struct hal_tlv_hdr *tlv) { struct hal_rx_ppdu_start *ppdu_start; u16 ppdu_id_diff, ppdu_id, tlv_len; u8 *ptr; /* PPDU id is part of second tlv, move ptr to second tlv */ tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl); ptr = (u8 *)tlv; ptr += sizeof(*tlv) + tlv_len; tlv = (struct hal_tlv_hdr *)ptr; if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START) return; ptr += sizeof(*tlv); ppdu_start = (struct hal_rx_ppdu_start *)ptr; ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID, __le32_to_cpu(ppdu_start->info0)); if (pmon->sw_mon_entries.ppdu_id < ppdu_id) { pmon->buf_state = DP_MON_STATUS_LEAD; ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id; if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP) pmon->buf_state = DP_MON_STATUS_LAG; } else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) { pmon->buf_state = DP_MON_STATUS_LAG; ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id; if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP) pmon->buf_state = DP_MON_STATUS_LEAD; } } static enum dp_mon_status_buf_state ath11k_dp_rx_mon_buf_done(struct ath11k_base *ab, struct hal_srng *srng, struct dp_rxdma_ring *rx_ring) { struct ath11k_skb_rxcb *rxcb; struct hal_tlv_hdr *tlv; struct sk_buff *skb; void *status_desc; dma_addr_t paddr; u32 cookie; int buf_id; u8 rbm; status_desc = ath11k_hal_srng_src_next_peek(ab, srng); if (!status_desc) return DP_MON_STATUS_NO_DMA; ath11k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm); buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); spin_lock_bh(&rx_ring->idr_lock); skb = idr_find(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); if (!skb) return DP_MON_STATUS_NO_DMA; rxcb = ATH11K_SKB_RXCB(skb); dma_sync_single_for_cpu(ab->dev, rxcb->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); tlv = (struct hal_tlv_hdr *)skb->data; if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE) return DP_MON_STATUS_NO_DMA; return DP_MON_STATUS_REPLINISH; } static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, int *budget, struct sk_buff_head *skb_list) { struct ath11k *ar; const struct ath11k_hw_hal_params *hal_params; enum dp_mon_status_buf_state reap_status; struct ath11k_pdev_dp *dp; struct dp_rxdma_ring *rx_ring; struct ath11k_mon_data *pmon; struct hal_srng *srng; void *rx_mon_status_desc; struct sk_buff *skb; struct ath11k_skb_rxcb *rxcb; struct hal_tlv_hdr *tlv; u32 cookie; int buf_id, srng_id; dma_addr_t paddr; u8 rbm; int num_buffs_reaped = 0; ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; dp = &ar->dp; pmon = &dp->mon_data; srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id); rx_ring = &dp->rx_mon_status_refill_ring[srng_id]; srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); while (*budget) { *budget -= 1; rx_mon_status_desc = ath11k_hal_srng_src_peek(ab, srng); if (!rx_mon_status_desc) { pmon->buf_state = DP_MON_STATUS_REPLINISH; break; } ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, &cookie, &rbm); if (paddr) { buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); spin_lock_bh(&rx_ring->idr_lock); skb = idr_find(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); if (!skb) { ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n", buf_id); pmon->buf_state = DP_MON_STATUS_REPLINISH; goto move_next; } rxcb = ATH11K_SKB_RXCB(skb); dma_sync_single_for_cpu(ab->dev, rxcb->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); tlv = (struct hal_tlv_hdr *)skb->data; if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE) { ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n", FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl), buf_id); /* RxDMA status done bit might not be set even * though tp is moved by HW. */ /* If done status is missing: * 1. As per MAC team's suggestion, * when HP + 1 entry is peeked and if DMA * is not done and if HP + 2 entry's DMA done * is set. skip HP + 1 entry and * start processing in next interrupt. * 2. If HP + 2 entry's DMA done is not set, * poll onto HP + 1 entry DMA done to be set. * Check status for same buffer for next time * dp_rx_mon_status_srng_process */ reap_status = ath11k_dp_rx_mon_buf_done(ab, srng, rx_ring); if (reap_status == DP_MON_STATUS_NO_DMA) continue; spin_lock_bh(&rx_ring->idr_lock); idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); dma_unmap_single(ab->dev, rxcb->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); pmon->buf_state = DP_MON_STATUS_REPLINISH; goto move_next; } spin_lock_bh(&rx_ring->idr_lock); idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); if (ab->hw_params.full_monitor_mode) { ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv); if (paddr == pmon->mon_status_paddr) pmon->buf_state = DP_MON_STATUS_MATCH; } dma_unmap_single(ab->dev, rxcb->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); __skb_queue_tail(skb_list, skb); } else { pmon->buf_state = DP_MON_STATUS_REPLINISH; } move_next: skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, &buf_id); if (!skb) { hal_params = ab->hw_params.hal_params; ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, hal_params->rx_buf_rbm); num_buffs_reaped++; break; } rxcb = ATH11K_SKB_RXCB(skb); cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) | FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, cookie, ab->hw_params.hal_params->rx_buf_rbm); ath11k_hal_srng_src_get_next_entry(ab, srng); num_buffs_reaped++; } ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return num_buffs_reaped; } static void ath11k_dp_rx_frag_timer(struct timer_list *timer) { struct dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer, frag_timer); spin_lock_bh(&rx_tid->ab->base_lock); if (rx_tid->last_frag_no && rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) { spin_unlock_bh(&rx_tid->ab->base_lock); return; } ath11k_dp_rx_frags_cleanup(rx_tid, true); spin_unlock_bh(&rx_tid->ab->base_lock); } int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id) { struct ath11k_base *ab = ar->ab; struct crypto_shash *tfm; struct ath11k_peer *peer; struct dp_rx_tid *rx_tid; int i; tfm = crypto_alloc_shash("michael_mic", 0, 0); if (IS_ERR(tfm)) { ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n", PTR_ERR(tfm)); return PTR_ERR(tfm); } spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, vdev_id, peer_mac); if (!peer) { ath11k_warn(ab, "failed to find the peer to set up fragment info\n"); spin_unlock_bh(&ab->base_lock); crypto_free_shash(tfm); return -ENOENT; } for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { rx_tid = &peer->rx_tid[i]; rx_tid->ab = ab; timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0); skb_queue_head_init(&rx_tid->rx_frags); } peer->tfm_mmic = tfm; peer->dp_setup_done = true; spin_unlock_bh(&ab->base_lock); return 0; } static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key, struct ieee80211_hdr *hdr, u8 *data, size_t data_len, u8 *mic) { SHASH_DESC_ON_STACK(desc, tfm); - u8 mic_hdr[16] = {0}; + u8 mic_hdr[16] = {}; u8 tid = 0; int ret; if (!tfm) return -EINVAL; desc->tfm = tfm; ret = crypto_shash_setkey(tfm, key, 8); if (ret) goto out; ret = crypto_shash_init(desc); if (ret) goto out; /* TKIP MIC header */ memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN); memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN); if (ieee80211_is_data_qos(hdr->frame_control)) tid = ieee80211_get_tid(hdr); mic_hdr[12] = tid; ret = crypto_shash_update(desc, mic_hdr, 16); if (ret) goto out; ret = crypto_shash_update(desc, data, data_len); if (ret) goto out; ret = crypto_shash_final(desc, mic); out: shash_desc_zero(desc); return ret; } static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer, struct sk_buff *msdu) { struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data; struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu); struct ieee80211_key_conf *key_conf; struct ieee80211_hdr *hdr; u8 mic[IEEE80211_CCMP_MIC_LEN]; int head_len, tail_len, ret; size_t data_len; u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; u8 *key, *data; u8 key_idx; if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC) return 0; hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); hdr_len = ieee80211_hdrlen(hdr->frame_control); head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN; tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN; if (!is_multicast_ether_addr(hdr->addr1)) key_idx = peer->ucast_keyidx; else key_idx = peer->mcast_keyidx; key_conf = peer->keys[key_idx]; data = msdu->data + head_len; data_len = msdu->len - head_len - tail_len; key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic); if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN)) goto mic_fail; return 0; mic_fail: (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true; (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true; rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; skb_pull(msdu, hal_rx_desc_sz); ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); ieee80211_rx(ar->hw, msdu); return -EINVAL; } static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu, enum hal_encrypt_type enctype, u32 flags) { struct ieee80211_hdr *hdr; size_t hdr_len; size_t crypto_len; u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; if (!flags) return; hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz); if (flags & RX_FLAG_MIC_STRIPPED) skb_trim(msdu, msdu->len - ath11k_dp_rx_crypto_mic_len(ar, enctype)); if (flags & RX_FLAG_ICV_STRIPPED) skb_trim(msdu, msdu->len - ath11k_dp_rx_crypto_icv_len(ar, enctype)); if (flags & RX_FLAG_IV_STRIPPED) { hdr_len = ieee80211_hdrlen(hdr->frame_control); crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len, (void *)msdu->data + hal_rx_desc_sz, hdr_len); skb_pull(msdu, crypto_len); } } static int ath11k_dp_rx_h_defrag(struct ath11k *ar, struct ath11k_peer *peer, struct dp_rx_tid *rx_tid, struct sk_buff **defrag_skb) { struct hal_rx_desc *rx_desc; struct sk_buff *skb, *first_frag, *last_frag; struct ieee80211_hdr *hdr; struct rx_attention *rx_attention; enum hal_encrypt_type enctype; bool is_decrypted = false; int msdu_len = 0; int extra_space; u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; first_frag = skb_peek(&rx_tid->rx_frags); last_frag = skb_peek_tail(&rx_tid->rx_frags); skb_queue_walk(&rx_tid->rx_frags, skb) { flags = 0; rx_desc = (struct hal_rx_desc *)skb->data; hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc); if (enctype != HAL_ENCRYPT_TYPE_OPEN) { rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc); is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention); } if (is_decrypted) { if (skb != first_frag) flags |= RX_FLAG_IV_STRIPPED; if (skb != last_frag) flags |= RX_FLAG_ICV_STRIPPED | RX_FLAG_MIC_STRIPPED; } /* RX fragments are always raw packets */ if (skb != last_frag) skb_trim(skb, skb->len - FCS_LEN); ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags); if (skb != first_frag) skb_pull(skb, hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control)); msdu_len += skb->len; } extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag)); if (extra_space > 0 && (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0)) return -ENOMEM; __skb_unlink(first_frag, &rx_tid->rx_frags); while ((skb = __skb_dequeue(&rx_tid->rx_frags))) { skb_put_data(first_frag, skb->data, skb->len); dev_kfree_skb_any(skb); } hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz); hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); ATH11K_SKB_RXCB(first_frag)->is_frag = 1; if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag)) first_frag = NULL; *defrag_skb = first_frag; return 0; } static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid, struct sk_buff *defrag_skb) { struct ath11k_base *ab = ar->ab; struct ath11k_pdev_dp *dp = &ar->dp; struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring; struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data; struct hal_reo_entrance_ring *reo_ent_ring; struct hal_reo_dest_ring *reo_dest_ring; struct dp_link_desc_bank *link_desc_banks; struct hal_rx_msdu_link *msdu_link; struct hal_rx_msdu_details *msdu0; struct hal_srng *srng; dma_addr_t paddr; u32 desc_bank, msdu_info, mpdu_info; u32 dst_idx, cookie, hal_rx_desc_sz; int ret, buf_id; hal_rx_desc_sz = ab->hw_params.hal_desc_sz; link_desc_banks = ab->dp.link_desc_banks; reo_dest_ring = rx_tid->dst_ring_desc; ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr + (paddr - link_desc_banks[desc_bank].paddr)); msdu0 = &msdu_link->msdu_link[0]; dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0); memset(msdu0, 0, sizeof(*msdu0)); msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) | FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) | FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) | FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH, defrag_skb->len - hal_rx_desc_sz) | FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) | FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) | FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1); msdu0->rx_msdu_info.info0 = msdu_info; /* change msdu len in hal rx desc */ ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz); paddr = dma_map_single(ab->dev, defrag_skb->data, defrag_skb->len + skb_tailroom(defrag_skb), DMA_TO_DEVICE); if (dma_mapping_error(ab->dev, paddr)) return -ENOMEM; spin_lock_bh(&rx_refill_ring->idr_lock); buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0, rx_refill_ring->bufs_max * 3, GFP_ATOMIC); spin_unlock_bh(&rx_refill_ring->idr_lock); if (buf_id < 0) { ret = -ENOMEM; goto err_unmap_dma; } ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr; cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) | FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, ab->hw_params.hal_params->rx_buf_rbm); /* Fill mpdu details into reo entrance ring */ srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); reo_ent_ring = (struct hal_reo_entrance_ring *) ath11k_hal_srng_src_get_next_entry(ab, srng); if (!reo_ent_ring) { ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); ret = -ENOSPC; goto err_free_idr; } memset(reo_ent_ring, 0, sizeof(*reo_ent_ring)); ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank); ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank, HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST); mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) | FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) | FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) | FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) | FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) | FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) | FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1); reo_ent_ring->rx_mpdu_info.info0 = mpdu_info; reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data; reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo; reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI, FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI, reo_dest_ring->info0)) | FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx); ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); return 0; err_free_idr: spin_lock_bh(&rx_refill_ring->idr_lock); idr_remove(&rx_refill_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_refill_ring->idr_lock); err_unmap_dma: dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb), DMA_TO_DEVICE); return ret; } static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar, struct sk_buff *a, struct sk_buff *b) { int frag1, frag2; frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a); frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b); return frag1 - frag2; } static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar, struct sk_buff_head *frag_list, struct sk_buff *cur_frag) { struct sk_buff *skb; int cmp; skb_queue_walk(frag_list, skb) { cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag); if (cmp < 0) continue; __skb_queue_before(frag_list, skb, cur_frag); return; } __skb_queue_tail(frag_list, cur_frag); } static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb) { struct ieee80211_hdr *hdr; u64 pn = 0; u8 *ehdr; u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz); ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control); pn = ehdr[0]; pn |= (u64)ehdr[1] << 8; pn |= (u64)ehdr[4] << 16; pn |= (u64)ehdr[5] << 24; pn |= (u64)ehdr[6] << 32; pn |= (u64)ehdr[7] << 40; return pn; } static bool ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid) { enum hal_encrypt_type encrypt_type; struct sk_buff *first_frag, *skb; struct hal_rx_desc *desc; u64 last_pn; u64 cur_pn; first_frag = skb_peek(&rx_tid->rx_frags); desc = (struct hal_rx_desc *)first_frag->data; encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc); if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 && encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 && encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 && encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256) return true; last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag); skb_queue_walk(&rx_tid->rx_frags, skb) { if (skb == first_frag) continue; cur_pn = ath11k_dp_rx_h_get_pn(ar, skb); if (cur_pn != last_pn + 1) return false; last_pn = cur_pn; } return true; } static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, struct sk_buff *msdu, u32 *ring_desc) { struct ath11k_base *ab = ar->ab; struct hal_rx_desc *rx_desc; struct ath11k_peer *peer; struct dp_rx_tid *rx_tid; struct sk_buff *defrag_skb = NULL; u32 peer_id; u16 seqno, frag_no; u8 tid; int ret = 0; bool more_frags; bool is_mcbc; rx_desc = (struct hal_rx_desc *)msdu->data; peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc); tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc); seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc); frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu); more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu); is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc); /* Multicast/Broadcast fragments are not expected */ if (is_mcbc) return -EINVAL; if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) || !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) || tid > IEEE80211_NUM_TIDS) return -EINVAL; /* received unfragmented packet in reo * exception ring, this shouldn't happen * as these packets typically come from * reo2sw srngs. */ if (WARN_ON_ONCE(!frag_no && !more_frags)) return -EINVAL; spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_id(ab, peer_id); if (!peer) { ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n", peer_id); ret = -ENOENT; goto out_unlock; } if (!peer->dp_setup_done) { ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n", peer->addr, peer_id); ret = -ENOENT; goto out_unlock; } rx_tid = &peer->rx_tid[tid]; if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) || skb_queue_empty(&rx_tid->rx_frags)) { /* Flush stored fragments and start a new sequence */ ath11k_dp_rx_frags_cleanup(rx_tid, true); rx_tid->cur_sn = seqno; } if (rx_tid->rx_frag_bitmap & BIT(frag_no)) { /* Fragment already present */ ret = -EINVAL; goto out_unlock; } if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap))) __skb_queue_tail(&rx_tid->rx_frags, msdu); else ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu); rx_tid->rx_frag_bitmap |= BIT(frag_no); if (!more_frags) rx_tid->last_frag_no = frag_no; if (frag_no == 0) { rx_tid->dst_ring_desc = kmemdup(ring_desc, sizeof(*rx_tid->dst_ring_desc), GFP_ATOMIC); if (!rx_tid->dst_ring_desc) { ret = -ENOMEM; goto out_unlock; } } else { ath11k_dp_rx_link_desc_return(ab, ring_desc, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); } if (!rx_tid->last_frag_no || rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) { mod_timer(&rx_tid->frag_timer, jiffies + ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS); goto out_unlock; } spin_unlock_bh(&ab->base_lock); timer_delete_sync(&rx_tid->frag_timer); spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_id(ab, peer_id); if (!peer) goto err_frags_cleanup; if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid)) goto err_frags_cleanup; if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb)) goto err_frags_cleanup; if (!defrag_skb) goto err_frags_cleanup; if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb)) goto err_frags_cleanup; ath11k_dp_rx_frags_cleanup(rx_tid, false); goto out_unlock; err_frags_cleanup: dev_kfree_skb_any(defrag_skb); ath11k_dp_rx_frags_cleanup(rx_tid, true); out_unlock: spin_unlock_bh(&ab->base_lock); return ret; } static int ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop) { struct ath11k_pdev_dp *dp = &ar->dp; struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; struct sk_buff *msdu; struct ath11k_skb_rxcb *rxcb; struct hal_rx_desc *rx_desc; u8 *hdr_status; u16 msdu_len; u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; spin_lock_bh(&rx_ring->idr_lock); msdu = idr_find(&rx_ring->bufs_idr, buf_id); if (!msdu) { ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n", buf_id); spin_unlock_bh(&rx_ring->idr_lock); return -EINVAL; } idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); rxcb = ATH11K_SKB_RXCB(msdu); dma_unmap_single(ar->ab->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu), DMA_FROM_DEVICE); if (drop) { dev_kfree_skb_any(msdu); return 0; } rcu_read_lock(); if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { dev_kfree_skb_any(msdu); goto exit; } if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { dev_kfree_skb_any(msdu); goto exit; } rx_desc = (struct hal_rx_desc *)msdu->data; msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc); if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) { hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc); ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len); ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status, sizeof(struct ieee80211_hdr)); ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc, sizeof(struct hal_rx_desc)); dev_kfree_skb_any(msdu); goto exit; } skb_put(msdu, hal_rx_desc_sz + msdu_len); if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) { dev_kfree_skb_any(msdu); ath11k_dp_rx_link_desc_return(ar->ab, ring_desc, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); } exit: rcu_read_unlock(); return 0; } int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, int budget) { u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; struct dp_link_desc_bank *link_desc_banks; enum hal_rx_buf_return_buf_manager rbm; int tot_n_bufs_reaped, quota, ret, i; - int n_bufs_reaped[MAX_RADIOS] = {0}; + int n_bufs_reaped[MAX_RADIOS] = {}; struct dp_rxdma_ring *rx_ring; struct dp_srng *reo_except; u32 desc_bank, num_msdus; struct hal_srng *srng; struct ath11k_dp *dp; void *link_desc_va; int buf_id, mac_id; struct ath11k *ar; dma_addr_t paddr; u32 *desc; bool is_frag; u8 drop = 0; tot_n_bufs_reaped = 0; quota = budget; dp = &ab->dp; reo_except = &dp->reo_except_ring; link_desc_banks = dp->link_desc_banks; srng = &ab->hal.srng_list[reo_except->ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); while (budget && (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc; ab->soc_stats.err_ring_pkts++; ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, &desc_bank); if (ret) { ath11k_warn(ab, "failed to parse error reo desc %d\n", ret); continue; } link_desc_va = link_desc_banks[desc_bank].vaddr + (paddr - link_desc_banks[desc_bank].paddr); ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, &rbm); if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && rbm != HAL_RX_BUF_RBM_SW1_BM && rbm != HAL_RX_BUF_RBM_SW3_BM) { ab->soc_stats.invalid_rbm++; ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); ath11k_dp_rx_link_desc_return(ab, desc, HAL_WBM_REL_BM_ACT_REL_MSDU); continue; } is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG); /* Process only rx fragments with one msdu per link desc below, and drop * msdu's indicated due to error reasons. */ if (!is_frag || num_msdus > 1) { drop = 1; /* Return the link desc back to wbm idle list */ ath11k_dp_rx_link_desc_return(ab, desc, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); } for (i = 0; i < num_msdus; i++) { buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, msdu_cookies[i]); mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, msdu_cookies[i]); ar = ab->pdevs[mac_id].ar; if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) { n_bufs_reaped[mac_id]++; tot_n_bufs_reaped++; } } if (tot_n_bufs_reaped >= quota) { tot_n_bufs_reaped = quota; goto exit; } budget = quota - tot_n_bufs_reaped; } exit: ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); for (i = 0; i < ab->num_radios; i++) { if (!n_bufs_reaped[i]) continue; ar = ab->pdevs[i].ar; rx_ring = &ar->dp.rx_refill_buf_ring; ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], ab->hw_params.hal_params->rx_buf_rbm); } return tot_n_bufs_reaped; } static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, int msdu_len, struct sk_buff_head *msdu_list) { struct sk_buff *skb, *tmp; struct ath11k_skb_rxcb *rxcb; int n_buffs; n_buffs = DIV_ROUND_UP(msdu_len, (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz)); skb_queue_walk_safe(msdu_list, skb, tmp) { rxcb = ATH11K_SKB_RXCB(skb); if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO && rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) { if (!n_buffs) break; __skb_unlink(skb, msdu_list); dev_kfree_skb_any(skb); n_buffs--; } } } static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, struct ieee80211_rx_status *status, struct sk_buff_head *msdu_list) { u16 msdu_len; struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; struct rx_attention *rx_attention; u8 l3pad_bytes; struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) { /* First buffer will be freed by the caller, so deduct it's length */ msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz); ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); return -EINVAL; } rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc); if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) { ath11k_warn(ar->ab, "msdu_done bit not set in null_q_des processing\n"); __skb_queue_purge(msdu_list); return -EIO; } /* Handle NULL queue descriptor violations arising out a missing * REO queue for a given peer or a given TID. This typically * may happen if a packet is received on a QOS enabled TID before the * ADDBA negotiation for that TID, when the TID queue is setup. Or * it may also happen for MC/BC frames if they are not routed to the * non-QOS TID queue, in the absence of any other default TID queue. * This error can show up both in a REO destination or WBM release ring. */ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); if (rxcb->is_frag) { skb_pull(msdu, hal_rx_desc_sz); } else { l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) return -EINVAL; skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); } ath11k_dp_rx_h_ppdu(ar, desc, status); ath11k_dp_rx_h_mpdu(ar, msdu, desc, status); rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc); /* Please note that caller will having the access to msdu and completing * rx with mac80211. Need not worry about cleaning up amsdu_list. */ return 0; } static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, struct ieee80211_rx_status *status, struct sk_buff_head *msdu_list) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); bool drop = false; ar->ab->soc_stats.reo_error[rxcb->err_code]++; switch (rxcb->err_code) { case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO: if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) drop = true; break; case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED: /* TODO: Do not drop PN failed packets in the driver; * instead, it is good to drop such packets in mac80211 * after incrementing the replay counters. */ fallthrough; default: /* TODO: Review other errors and process them to mac80211 * as appropriate. */ drop = true; break; } return drop; } static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, struct ieee80211_rx_status *status) { u16 msdu_len; struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; u8 l3pad_bytes; struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz; rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc); rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc); l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc); msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc); skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); ath11k_dp_rx_h_ppdu(ar, desc, status); status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | RX_FLAG_DECRYPTED); ath11k_dp_rx_h_undecap(ar, msdu, desc, HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); } static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, struct ieee80211_rx_status *status) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); bool drop = false; ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; switch (rxcb->err_code) { case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); break; default: /* TODO: Review other rxdma error code to check if anything is * worth reporting to mac80211 */ drop = true; break; } return drop; } static void ath11k_dp_rx_wbm_err(struct ath11k *ar, struct napi_struct *napi, struct sk_buff *msdu, struct sk_buff_head *msdu_list) { struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu); - struct ieee80211_rx_status rxs = {0}; + struct ieee80211_rx_status rxs = {}; bool drop = true; switch (rxcb->err_rel_src) { case HAL_WBM_REL_SRC_MODULE_REO: drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); break; case HAL_WBM_REL_SRC_MODULE_RXDMA: drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); break; default: /* msdu will get freed */ break; } if (drop) { dev_kfree_skb_any(msdu); return; } ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs); } int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, struct napi_struct *napi, int budget) { struct ath11k *ar; struct ath11k_dp *dp = &ab->dp; struct dp_rxdma_ring *rx_ring; struct hal_rx_wbm_rel_info err_info; struct hal_srng *srng; struct sk_buff *msdu; struct sk_buff_head msdu_list[MAX_RADIOS]; struct ath11k_skb_rxcb *rxcb; u32 *rx_desc; int buf_id, mac_id; - int num_buffs_reaped[MAX_RADIOS] = {0}; + int num_buffs_reaped[MAX_RADIOS] = {}; int total_num_buffs_reaped = 0; int ret, i; for (i = 0; i < ab->num_radios; i++) __skb_queue_head_init(&msdu_list[i]); srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); while (budget) { rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); if (!rx_desc) break; ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); if (ret) { ath11k_warn(ab, "failed to parse rx error in wbm_rel ring desc %d\n", ret); continue; } buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie); mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie); ar = ab->pdevs[mac_id].ar; rx_ring = &ar->dp.rx_refill_buf_ring; spin_lock_bh(&rx_ring->idr_lock); msdu = idr_find(&rx_ring->bufs_idr, buf_id); if (!msdu) { ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n", buf_id, mac_id); spin_unlock_bh(&rx_ring->idr_lock); continue; } idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); rxcb = ATH11K_SKB_RXCB(msdu); dma_unmap_single(ab->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu), DMA_FROM_DEVICE); num_buffs_reaped[mac_id]++; total_num_buffs_reaped++; budget--; if (err_info.push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { dev_kfree_skb_any(msdu); continue; } rxcb->err_rel_src = err_info.err_rel_src; rxcb->err_code = err_info.err_code; rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; __skb_queue_tail(&msdu_list[mac_id], msdu); } ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); if (!total_num_buffs_reaped) goto done; for (i = 0; i < ab->num_radios; i++) { if (!num_buffs_reaped[i]) continue; ar = ab->pdevs[i].ar; rx_ring = &ar->dp.rx_refill_buf_ring; ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], ab->hw_params.hal_params->rx_buf_rbm); } rcu_read_lock(); for (i = 0; i < ab->num_radios; i++) { if (!rcu_dereference(ab->pdevs_active[i])) { __skb_queue_purge(&msdu_list[i]); continue; } ar = ab->pdevs[i].ar; if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { __skb_queue_purge(&msdu_list[i]); continue; } while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL) ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); } rcu_read_unlock(); done: return total_num_buffs_reaped; } int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) { struct ath11k *ar; struct dp_srng *err_ring; struct dp_rxdma_ring *rx_ring; struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; struct hal_srng *srng; u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; enum hal_rx_buf_return_buf_manager rbm; enum hal_reo_entr_rxdma_ecode rxdma_err_code; struct ath11k_skb_rxcb *rxcb; struct sk_buff *skb; struct hal_reo_entrance_ring *entr_ring; void *desc; int num_buf_freed = 0; int quota = budget; dma_addr_t paddr; u32 desc_bank; void *link_desc_va; int num_msdus; int i; int buf_id; ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar; err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id)]; rx_ring = &ar->dp.rx_refill_buf_ring; srng = &ab->hal.srng_list[err_ring->ring_id]; spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); while (quota-- && (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); entr_ring = (struct hal_reo_entrance_ring *)desc; rxdma_err_code = FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, entr_ring->info1); ab->soc_stats.rxdma_error[rxdma_err_code]++; link_desc_va = link_desc_banks[desc_bank].vaddr + (paddr - link_desc_banks[desc_bank].paddr); ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, &rbm); for (i = 0; i < num_msdus; i++) { buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, msdu_cookies[i]); spin_lock_bh(&rx_ring->idr_lock); skb = idr_find(&rx_ring->bufs_idr, buf_id); if (!skb) { ath11k_warn(ab, "rxdma error with invalid buf_id %d\n", buf_id); spin_unlock_bh(&rx_ring->idr_lock); continue; } idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); rxcb = ATH11K_SKB_RXCB(skb); dma_unmap_single(ab->dev, rxcb->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); num_buf_freed++; } ath11k_dp_rx_link_desc_return(ab, desc, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); } ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); if (num_buf_freed) ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, ab->hw_params.hal_params->rx_buf_rbm); return budget - quota; } void ath11k_dp_process_reo_status(struct ath11k_base *ab) { struct ath11k_dp *dp = &ab->dp; struct hal_srng *srng; struct dp_reo_cmd *cmd, *tmp; bool found = false; u32 *reo_desc; u16 tag; struct hal_reo_status reo_status; srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; memset(&reo_status, 0, sizeof(reo_status)); spin_lock_bh(&srng->lock); ath11k_hal_srng_access_begin(ab, srng); while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc); switch (tag) { case HAL_REO_GET_QUEUE_STATS_STATUS: ath11k_hal_reo_status_queue_stats(ab, reo_desc, &reo_status); break; case HAL_REO_FLUSH_QUEUE_STATUS: ath11k_hal_reo_flush_queue_status(ab, reo_desc, &reo_status); break; case HAL_REO_FLUSH_CACHE_STATUS: ath11k_hal_reo_flush_cache_status(ab, reo_desc, &reo_status); break; case HAL_REO_UNBLOCK_CACHE_STATUS: ath11k_hal_reo_unblk_cache_status(ab, reo_desc, &reo_status); break; case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS: ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, &reo_status); break; case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS: ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, &reo_status); break; case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS: ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, &reo_status); break; default: ath11k_warn(ab, "Unknown reo status type %d\n", tag); continue; } spin_lock_bh(&dp->reo_cmd_lock); list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { found = true; list_del(&cmd->list); break; } } spin_unlock_bh(&dp->reo_cmd_lock); if (found) { cmd->handler(dp, (void *)&cmd->data, reo_status.uniform_hdr.cmd_status); kfree(cmd); } found = false; } ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); } void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) { struct ath11k *ar = ab->pdevs[mac_id].ar; ath11k_dp_rx_pdev_srng_free(ar); ath11k_dp_rxdma_pdev_buf_free(ar); } int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) { struct ath11k *ar = ab->pdevs[mac_id].ar; struct ath11k_pdev_dp *dp = &ar->dp; u32 ring_id; int i; int ret; ret = ath11k_dp_rx_pdev_srng_alloc(ar); if (ret) { ath11k_warn(ab, "failed to setup rx srngs\n"); return ret; } ret = ath11k_dp_rxdma_pdev_buf_setup(ar); if (ret) { ath11k_warn(ab, "failed to setup rxdma ring\n"); return ret; } ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF); if (ret) { ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n", ret); return ret; } if (ab->hw_params.rx_mac_buf_ring) { for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = dp->rx_mac_buf_ring[i].ring_id; ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, HAL_RXDMA_BUF); if (ret) { ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n", i, ret); return ret; } } } for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = dp->rxdma_err_dst_ring[i].ring_id; ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, HAL_RXDMA_DST); if (ret) { ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n", i, ret); return ret; } } if (!ab->hw_params.rxdma1_enable) goto config_refill_ring; ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_MONITOR_BUF); if (ret) { ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", ret); return ret; } ret = ath11k_dp_tx_htt_srng_setup(ab, dp->rxdma_mon_dst_ring.ring_id, mac_id, HAL_RXDMA_MONITOR_DST); if (ret) { ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", ret); return ret; } ret = ath11k_dp_tx_htt_srng_setup(ab, dp->rxdma_mon_desc_ring.ring_id, mac_id, HAL_RXDMA_MONITOR_DESC); if (ret) { ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n", ret); return ret; } config_refill_ring: for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, HAL_RXDMA_MONITOR_STATUS); if (ret) { ath11k_warn(ab, "failed to configure mon_status_refill_ring%d %d\n", i, ret); return ret; } } return 0; } static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) { if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) { *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc); *total_len -= *frag_len; } else { *frag_len = *total_len; *total_len = 0; } } static int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, void *p_last_buf_addr_info, u8 mac_id) { struct ath11k_pdev_dp *dp = &ar->dp; struct dp_srng *dp_srng; void *hal_srng; void *src_srng_desc; int ret = 0; if (ar->ab->hw_params.rxdma1_enable) { dp_srng = &dp->rxdma_mon_desc_ring; hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; } else { dp_srng = &ar->ab->dp.wbm_desc_rel_ring; hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; } ath11k_hal_srng_access_begin(ar->ab, hal_srng); src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); if (src_srng_desc) { struct ath11k_buffer_addr *src_desc = src_srng_desc; *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); } else { ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "Monitor Link Desc Ring %d Full", mac_id); ret = -ENOMEM; } ath11k_hal_srng_access_end(ar->ab, hal_srng); return ret; } static void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, dma_addr_t *paddr, u32 *sw_cookie, u8 *rbm, void **pp_buf_addr_info) { struct hal_rx_msdu_link *msdu_link = rx_msdu_link_desc; struct ath11k_buffer_addr *buf_addr_info; buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm); *pp_buf_addr_info = (void *)buf_addr_info; } static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) { if (skb->len > len) { skb_trim(skb, len); } else { if (skb_tailroom(skb) < len - skb->len) { if ((pskb_expand_head(skb, 0, len - skb->len - skb_tailroom(skb), GFP_ATOMIC))) { dev_kfree_skb_any(skb); return -ENOMEM; } } skb_put(skb, (len - skb->len)); } return 0; } static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, void *msdu_link_desc, struct hal_rx_msdu_list *msdu_list, u16 *num_msdus) { struct hal_rx_msdu_details *msdu_details = NULL; struct rx_msdu_desc *msdu_desc_info = NULL; struct hal_rx_msdu_link *msdu_link = NULL; int i; u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1); u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1); u8 tmp = 0; msdu_link = msdu_link_desc; msdu_details = &msdu_link->msdu_link[0]; for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) { if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR, msdu_details[i].buf_addr_info.info0) == 0) { msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; msdu_desc_info->info0 |= last; ; break; } msdu_desc_info = &msdu_details[i].rx_msdu_info; if (!i) msdu_desc_info->info0 |= first; else if (i == (HAL_RX_NUM_MSDU_DESC - 1)) msdu_desc_info->info0 |= last; msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; msdu_list->msdu_info[i].msdu_len = HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0); msdu_list->sw_cookie[i] = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, msdu_details[i].buf_addr_info.info1); tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, msdu_details[i].buf_addr_info.info1); msdu_list->rbm[i] = tmp; } *num_msdus = i; } static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, u32 *rx_bufs_used) { u32 ret = 0; if ((*ppdu_id < msdu_ppdu_id) && ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) { *ppdu_id = msdu_ppdu_id; ret = msdu_ppdu_id; } else if ((*ppdu_id > msdu_ppdu_id) && ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) { /* mon_dst is behind than mon_status * skip dst_ring and free it */ *rx_bufs_used += 1; *ppdu_id = msdu_ppdu_id; ret = msdu_ppdu_id; } return ret; } static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, bool *is_frag, u32 *total_len, u32 *frag_len, u32 *msdu_cnt) { if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) { if (!*is_frag) { *total_len = info->msdu_len; *is_frag = true; } ath11k_dp_mon_set_frag_len(total_len, frag_len); } else { if (*is_frag) { ath11k_dp_mon_set_frag_len(total_len, frag_len); } else { *frag_len = info->msdu_len; } *is_frag = false; *msdu_cnt -= 1; } } /* clang stack usage explodes if this is inlined */ static noinline_for_stack u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, void *ring_entry, struct sk_buff **head_msdu, struct sk_buff **tail_msdu, u32 *npackets, u32 *ppdu_id) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; struct sk_buff *msdu = NULL, *last = NULL; struct hal_rx_msdu_list msdu_list; void *p_buf_addr_info, *p_last_buf_addr_info; struct hal_rx_desc *rx_desc; void *rx_msdu_link_desc; dma_addr_t paddr; u16 num_msdus = 0; u32 rx_buf_size, rx_pkt_offset, sw_cookie; u32 rx_bufs_used = 0, i = 0; u32 msdu_ppdu_id = 0, msdu_cnt = 0; u32 total_len = 0, frag_len = 0; bool is_frag, is_first_msdu; bool drop_mpdu = false; struct ath11k_skb_rxcb *rxcb; struct hal_reo_entrance_ring *ent_desc = ring_entry; int buf_id; u32 rx_link_buf_info[2]; u8 rbm; if (!ar->ab->hw_params.rxdma1_enable) rx_ring = &dp->rx_refill_buf_ring; ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, &sw_cookie, &p_last_buf_addr_info, &rbm, &msdu_cnt); if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON, ent_desc->info1) == HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { u8 rxdma_err = FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE, ent_desc->info1); if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { drop_mpdu = true; pmon->rx_mon_stats.dest_mpdu_drop++; } } is_frag = false; is_first_msdu = true; do { if (pmon->mon_last_linkdesc_paddr == paddr) { pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; return rx_bufs_used; } if (ar->ab->hw_params.rxdma1_enable) rx_msdu_link_desc = (void *)pmon->link_desc_banks[sw_cookie].vaddr + (paddr - pmon->link_desc_banks[sw_cookie].paddr); else rx_msdu_link_desc = (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr + (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr); ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, &num_msdus); for (i = 0; i < num_msdus; i++) { u32 l2_hdr_offset; if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "i %d last_cookie %d is same\n", i, pmon->mon_last_buf_cookie); drop_mpdu = true; pmon->rx_mon_stats.dup_mon_buf_cnt++; continue; } buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, msdu_list.sw_cookie[i]); spin_lock_bh(&rx_ring->idr_lock); msdu = idr_find(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); if (!msdu) { ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "msdu_pop: invalid buf_id %d\n", buf_id); goto next_msdu; } rxcb = ATH11K_SKB_RXCB(msdu); if (!rxcb->unmapped) { dma_unmap_single(ar->ab->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu), DMA_FROM_DEVICE); rxcb->unmapped = 1; } if (drop_mpdu) { ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "i %d drop msdu %p *ppdu_id %x\n", i, msdu, *ppdu_id); dev_kfree_skb_any(msdu); msdu = NULL; goto next_msdu; } rx_desc = (struct hal_rx_desc *)msdu->data; rx_pkt_offset = sizeof(struct hal_rx_desc); l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc); if (is_first_msdu) { if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) { drop_mpdu = true; dev_kfree_skb_any(msdu); msdu = NULL; pmon->mon_last_linkdesc_paddr = paddr; goto next_msdu; } msdu_ppdu_id = ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc); if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, ppdu_id, &rx_bufs_used)) { if (rx_bufs_used) { drop_mpdu = true; dev_kfree_skb_any(msdu); msdu = NULL; goto next_msdu; } return rx_bufs_used; } pmon->mon_last_linkdesc_paddr = paddr; is_first_msdu = false; } ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], &is_frag, &total_len, &frag_len, &msdu_cnt); rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); if (!(*head_msdu)) *head_msdu = msdu; else if (last) last->next = msdu; last = msdu; next_msdu: pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; rx_bufs_used++; spin_lock_bh(&rx_ring->idr_lock); idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); } ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm); ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, &sw_cookie, &rbm, &p_buf_addr_info); if (ar->ab->hw_params.rxdma1_enable) { if (ath11k_dp_rx_monitor_link_desc_return(ar, p_last_buf_addr_info, dp->mac_id)) ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "dp_rx_monitor_link_desc_return failed"); } else { ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info, HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); } p_last_buf_addr_info = p_buf_addr_info; } while (paddr && msdu_cnt); if (last) last->next = NULL; *tail_msdu = msdu; if (msdu_cnt == 0) *npackets = 1; return rx_bufs_used; } static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu) { u32 rx_pkt_offset, l2_hdr_offset; rx_pkt_offset = ar->ab->hw_params.hal_desc_sz; l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, (struct hal_rx_desc *)msdu->data); skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); } static struct sk_buff * ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, u32 mac_id, struct sk_buff *head_msdu, struct sk_buff *last_msdu, struct ieee80211_rx_status *rxs, bool *fcs_err) { struct ath11k_base *ab = ar->ab; struct sk_buff *msdu, *prev_buf; struct hal_rx_desc *rx_desc; char *hdr_desc; u8 *dest, decap_format; struct ieee80211_hdr_3addr *wh; struct rx_attention *rx_attention; u32 err_bitmap; if (!head_msdu) goto err_merge_fail; rx_desc = (struct hal_rx_desc *)head_msdu->data; rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc); err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention); if (err_bitmap & DP_RX_MPDU_ERR_FCS) *fcs_err = true; if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention)) return NULL; decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc); ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); if (decap_format == DP_RX_DECAP_TYPE_RAW) { ath11k_dp_rx_msdus_set_payload(ar, head_msdu); prev_buf = head_msdu; msdu = head_msdu->next; while (msdu) { ath11k_dp_rx_msdus_set_payload(ar, msdu); prev_buf = msdu; msdu = msdu->next; } prev_buf->next = NULL; skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { u8 qos_pkt = 0; rx_desc = (struct hal_rx_desc *)head_msdu->data; hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc); /* Base size */ wh = (struct ieee80211_hdr_3addr *)hdr_desc; if (ieee80211_is_data_qos(wh->frame_control)) qos_pkt = 1; msdu = head_msdu; while (msdu) { ath11k_dp_rx_msdus_set_payload(ar, msdu); if (qos_pkt) { dest = skb_push(msdu, sizeof(__le16)); if (!dest) goto err_merge_fail; memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr)); } prev_buf = msdu; msdu = msdu->next; } dest = skb_put(prev_buf, HAL_RX_FCS_LEN); if (!dest) goto err_merge_fail; ath11k_dbg(ab, ATH11K_DBG_DATA, "mpdu_buf %p mpdu_buf->len %u", prev_buf, prev_buf->len); } else { ath11k_dbg(ab, ATH11K_DBG_DATA, "decap format %d is not supported!\n", decap_format); goto err_merge_fail; } return head_msdu; err_merge_fail: return NULL; } static void ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status, u8 *rtap_buf) { u32 rtap_len = 0; put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]); rtap_len += 2; put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]); rtap_len += 2; put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]); rtap_len += 2; put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]); rtap_len += 2; put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]); rtap_len += 2; put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]); } static void ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status, u8 *rtap_buf) { u32 rtap_len = 0; put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]); rtap_len += 2; put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]); rtap_len += 2; rtap_buf[rtap_len] = rx_status->he_RU[0]; rtap_len += 1; rtap_buf[rtap_len] = rx_status->he_RU[1]; rtap_len += 1; rtap_buf[rtap_len] = rx_status->he_RU[2]; rtap_len += 1; rtap_buf[rtap_len] = rx_status->he_RU[3]; } static void ath11k_update_radiotap(struct ath11k *ar, struct hal_rx_mon_ppdu_info *ppduinfo, struct sk_buff *mon_skb, struct ieee80211_rx_status *rxs) { struct ieee80211_supported_band *sband; u8 *ptr = NULL; rxs->flag |= RX_FLAG_MACTIME_START; rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR; if (ppduinfo->nss) rxs->nss = ppduinfo->nss; if (ppduinfo->he_mu_flags) { rxs->flag |= RX_FLAG_RADIOTAP_HE_MU; rxs->encoding = RX_ENC_HE; ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu)); ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr); } else if (ppduinfo->he_flags) { rxs->flag |= RX_FLAG_RADIOTAP_HE; rxs->encoding = RX_ENC_HE; ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he)); ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr); rxs->rate_idx = ppduinfo->rate; } else if (ppduinfo->vht_flags) { rxs->encoding = RX_ENC_VHT; rxs->rate_idx = ppduinfo->rate; } else if (ppduinfo->ht_flags) { rxs->encoding = RX_ENC_HT; rxs->rate_idx = ppduinfo->rate; } else { rxs->encoding = RX_ENC_LEGACY; sband = &ar->mac.sbands[rxs->band]; rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate, ppduinfo->cck_flag); } rxs->mactime = ppduinfo->tsft; } static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, struct sk_buff *head_msdu, struct hal_rx_mon_ppdu_info *ppduinfo, struct sk_buff *tail_msdu, struct napi_struct *napi) { struct ath11k_pdev_dp *dp = &ar->dp; struct sk_buff *mon_skb, *skb_next, *header; struct ieee80211_rx_status *rxs = &dp->rx_status; bool fcs_err = false; mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, tail_msdu, rxs, &fcs_err); if (!mon_skb) goto mon_deliver_fail; header = mon_skb; rxs->flag = 0; if (fcs_err) rxs->flag = RX_FLAG_FAILED_FCS_CRC; do { skb_next = mon_skb->next; if (!skb_next) rxs->flag &= ~RX_FLAG_AMSDU_MORE; else rxs->flag |= RX_FLAG_AMSDU_MORE; if (mon_skb == header) { header = NULL; rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN; } else { rxs->flag |= RX_FLAG_ALLOW_SAME_PN; } rxs->flag |= RX_FLAG_ONLY_MONITOR; ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs); ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs); mon_skb = skb_next; } while (mon_skb); rxs->flag = 0; return 0; mon_deliver_fail: mon_skb = head_msdu; while (mon_skb) { skb_next = mon_skb->next; dev_kfree_skb_any(mon_skb); mon_skb = skb_next; } return -EINVAL; } /* The destination ring processing is stuck if the destination is not * moving while status ring moves 16 PPDU. The destination ring processing * skips this destination ring PPDU as a workaround. */ #define MON_DEST_RING_STUCK_MAX_CNT 16 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, u32 quota, struct napi_struct *napi) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; const struct ath11k_hw_hal_params *hal_params; void *ring_entry; struct hal_srng *mon_dst_srng; u32 ppdu_id; u32 rx_bufs_used; u32 ring_id; struct ath11k_pdev_mon_stats *rx_mon_stats; u32 npackets = 0; u32 mpdu_rx_bufs_used; if (ar->ab->hw_params.rxdma1_enable) ring_id = dp->rxdma_mon_dst_ring.ring_id; else ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id; mon_dst_srng = &ar->ab->hal.srng_list[ring_id]; spin_lock_bh(&pmon->mon_lock); spin_lock_bh(&mon_dst_srng->lock); ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); ppdu_id = pmon->mon_ppdu_info.ppdu_id; rx_bufs_used = 0; rx_mon_stats = &pmon->rx_mon_stats; while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { struct sk_buff *head_msdu, *tail_msdu; head_msdu = NULL; tail_msdu = NULL; mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry, &head_msdu, &tail_msdu, &npackets, &ppdu_id); rx_bufs_used += mpdu_rx_bufs_used; if (mpdu_rx_bufs_used) { dp->mon_dest_ring_stuck_cnt = 0; } else { dp->mon_dest_ring_stuck_cnt++; rx_mon_stats->dest_mon_not_reaped++; } if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) { rx_mon_stats->dest_mon_stuck++; ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n", pmon->mon_ppdu_info.ppdu_id, ppdu_id, dp->mon_dest_ring_stuck_cnt, rx_mon_stats->dest_mon_not_reaped, rx_mon_stats->dest_mon_stuck); pmon->mon_ppdu_info.ppdu_id = ppdu_id; continue; } if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { pmon->mon_ppdu_status = DP_PPDU_STATUS_START; ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n", ppdu_id, pmon->mon_ppdu_info.ppdu_id, rx_mon_stats->dest_mon_not_reaped, rx_mon_stats->dest_mon_stuck); break; } if (head_msdu && tail_msdu) { ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, &pmon->mon_ppdu_info, tail_msdu, napi); rx_mon_stats->dest_mpdu_done++; } ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, mon_dst_srng); } ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); spin_unlock_bh(&mon_dst_srng->lock); spin_unlock_bh(&pmon->mon_lock); if (rx_bufs_used) { rx_mon_stats->dest_ppdu_done++; hal_params = ar->ab->hw_params.hal_params; if (ar->ab->hw_params.rxdma1_enable) ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, &dp->rxdma_mon_buf_ring, rx_bufs_used, hal_params->rx_buf_rbm); else ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, &dp->rx_refill_buf_ring, rx_bufs_used, hal_params->rx_buf_rbm); } } int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, struct napi_struct *napi, int budget) { struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); enum hal_rx_mon_status hal_status; struct sk_buff *skb; struct sk_buff_head skb_list; struct ath11k_peer *peer; struct ath11k_sta *arsta; int num_buffs_reaped = 0; u32 rx_buf_sz; u16 log_type; struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data; struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats; struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; __skb_queue_head_init(&skb_list); num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, &skb_list); if (!num_buffs_reaped) goto exit; memset(ppdu_info, 0, sizeof(*ppdu_info)); ppdu_info->peer_id = HAL_INVALID_PEERID; while ((skb = __skb_dequeue(&skb_list))) { if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) { log_type = ATH11K_PKTLOG_TYPE_LITE_RX; rx_buf_sz = DP_RX_BUFFER_SIZE_LITE; } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) { log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF; rx_buf_sz = DP_RX_BUFFER_SIZE; } else { log_type = ATH11K_PKTLOG_TYPE_INVALID; rx_buf_sz = 0; } if (log_type != ATH11K_PKTLOG_TYPE_INVALID) trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); memset(ppdu_info, 0, sizeof(*ppdu_info)); ppdu_info->peer_id = HAL_INVALID_PEERID; hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb); if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) && pmon->mon_ppdu_status == DP_PPDU_STATUS_START && hal_status == HAL_TLV_STATUS_PPDU_DONE) { rx_mon_stats->status_ppdu_done++; pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE; if (!ab->hw_params.full_monitor_mode) { ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi); pmon->mon_ppdu_status = DP_PPDU_STATUS_START; } } if (ppdu_info->peer_id == HAL_INVALID_PEERID || hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { dev_kfree_skb_any(skb); continue; } rcu_read_lock(); spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id); if (!peer || !peer->sta) { ath11k_dbg(ab, ATH11K_DBG_DATA, "failed to find the peer with peer_id %d\n", ppdu_info->peer_id); goto next_skb; } arsta = ath11k_sta_to_arsta(peer->sta); ath11k_dp_rx_update_peer_stats(arsta, ppdu_info); if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz); next_skb: spin_unlock_bh(&ab->base_lock); rcu_read_unlock(); dev_kfree_skb_any(skb); memset(ppdu_info, 0, sizeof(*ppdu_info)); ppdu_info->peer_id = HAL_INVALID_PEERID; } exit: return num_buffs_reaped; } static u32 ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar, void *ring_entry, struct sk_buff **head_msdu, struct sk_buff **tail_msdu, struct hal_sw_mon_ring_entries *sw_mon_entries) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = &dp->mon_data; struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; struct sk_buff *msdu = NULL, *last = NULL; struct hal_sw_monitor_ring *sw_desc = ring_entry; struct hal_rx_msdu_list msdu_list; struct hal_rx_desc *rx_desc; struct ath11k_skb_rxcb *rxcb; void *rx_msdu_link_desc; void *p_buf_addr_info, *p_last_buf_addr_info; int buf_id, i = 0; u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset; u32 rx_bufs_used = 0, msdu_cnt = 0; u32 total_len = 0, frag_len = 0, sw_cookie; u16 num_msdus = 0; u8 rxdma_err, rbm; bool is_frag, is_first_msdu; bool drop_mpdu = false; ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries); sw_cookie = sw_mon_entries->mon_dst_sw_cookie; sw_mon_entries->end_of_ppdu = false; sw_mon_entries->drop_ppdu = false; p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info; msdu_cnt = sw_mon_entries->msdu_cnt; sw_mon_entries->end_of_ppdu = FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0); if (sw_mon_entries->end_of_ppdu) return rx_bufs_used; if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON, sw_desc->info0) == HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) { rxdma_err = FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE, sw_desc->info0); if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR || rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR || rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) { pmon->rx_mon_stats.dest_mpdu_drop++; drop_mpdu = true; } } is_frag = false; is_first_msdu = true; do { rx_msdu_link_desc = (u8 *)pmon->link_desc_banks[sw_cookie].vaddr + (sw_mon_entries->mon_dst_paddr - pmon->link_desc_banks[sw_cookie].paddr); ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, &num_msdus); for (i = 0; i < num_msdus; i++) { buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, msdu_list.sw_cookie[i]); spin_lock_bh(&rx_ring->idr_lock); msdu = idr_find(&rx_ring->bufs_idr, buf_id); if (!msdu) { ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon msdu_pop: invalid buf_id %d\n", buf_id); spin_unlock_bh(&rx_ring->idr_lock); goto next_msdu; } idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); rxcb = ATH11K_SKB_RXCB(msdu); if (!rxcb->unmapped) { dma_unmap_single(ar->ab->dev, rxcb->paddr, msdu->len + skb_tailroom(msdu), DMA_FROM_DEVICE); rxcb->unmapped = 1; } if (drop_mpdu) { ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: i %d drop msdu %p *ppdu_id %x\n", i, msdu, sw_mon_entries->ppdu_id); dev_kfree_skb_any(msdu); msdu_cnt--; goto next_msdu; } rx_desc = (struct hal_rx_desc *)msdu->data; rx_pkt_offset = sizeof(struct hal_rx_desc); l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc); if (is_first_msdu) { if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) { drop_mpdu = true; dev_kfree_skb_any(msdu); msdu = NULL; goto next_msdu; } is_first_msdu = false; } ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], &is_frag, &total_len, &frag_len, &msdu_cnt); rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); if (!(*head_msdu)) *head_msdu = msdu; else if (last) last->next = msdu; last = msdu; next_msdu: rx_bufs_used++; } ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &sw_mon_entries->mon_dst_paddr, &sw_mon_entries->mon_dst_sw_cookie, &rbm, &p_buf_addr_info); if (ath11k_dp_rx_monitor_link_desc_return(ar, p_last_buf_addr_info, dp->mac_id)) ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: dp_rx_monitor_link_desc_return failed\n"); p_last_buf_addr_info = p_buf_addr_info; } while (sw_mon_entries->mon_dst_paddr && msdu_cnt); if (last) last->next = NULL; *tail_msdu = msdu; return rx_bufs_used; } static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp, struct dp_full_mon_mpdu *mon_mpdu, struct sk_buff *head, struct sk_buff *tail) { mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC); if (!mon_mpdu) return -ENOMEM; list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list); mon_mpdu->head = head; mon_mpdu->tail = tail; return 0; } static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp, struct dp_full_mon_mpdu *mon_mpdu) { struct dp_full_mon_mpdu *tmp; struct sk_buff *tmp_msdu, *skb_next; if (list_empty(&dp->dp_full_mon_mpdu_list)) return; list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) { list_del(&mon_mpdu->list); tmp_msdu = mon_mpdu->head; while (tmp_msdu) { skb_next = tmp_msdu->next; dev_kfree_skb_any(tmp_msdu); tmp_msdu = skb_next; } kfree(mon_mpdu); } } static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar, int mac_id, struct ath11k_mon_data *pmon, struct napi_struct *napi) { struct ath11k_pdev_mon_stats *rx_mon_stats; struct dp_full_mon_mpdu *tmp; struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu; struct sk_buff *head_msdu, *tail_msdu; struct ath11k_base *ab = ar->ab; struct ath11k_dp *dp = &ab->dp; int ret; rx_mon_stats = &pmon->rx_mon_stats; list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) { list_del(&mon_mpdu->list); head_msdu = mon_mpdu->head; tail_msdu = mon_mpdu->tail; if (head_msdu && tail_msdu) { ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu, &pmon->mon_ppdu_info, tail_msdu, napi); rx_mon_stats->dest_mpdu_done++; ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n"); } kfree(mon_mpdu); } return ret; } static int ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id, struct napi_struct *napi, int budget) { struct ath11k *ar = ab->pdevs[mac_id].ar; struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = &dp->mon_data; struct hal_sw_mon_ring_entries *sw_mon_entries; int quota = 0, work = 0, count; sw_mon_entries = &pmon->sw_mon_entries; while (pmon->hold_mon_dst_ring) { quota = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, 1); if (pmon->buf_state == DP_MON_STATUS_MATCH) { count = sw_mon_entries->status_buf_count; if (count > 1) { quota += ath11k_dp_rx_process_mon_status(ab, mac_id, napi, count); } ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id, pmon, napi); pmon->hold_mon_dst_ring = false; } else if (!pmon->mon_status_paddr || pmon->buf_state == DP_MON_STATUS_LEAD) { sw_mon_entries->drop_ppdu = true; pmon->hold_mon_dst_ring = false; } if (!quota) break; work += quota; } if (sw_mon_entries->drop_ppdu) ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu); return work; } static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id, struct napi_struct *napi, int budget) { struct ath11k *ar = ab->pdevs[mac_id].ar; struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = &dp->mon_data; struct hal_sw_mon_ring_entries *sw_mon_entries; struct ath11k_pdev_mon_stats *rx_mon_stats; struct sk_buff *head_msdu, *tail_msdu; struct hal_srng *mon_dst_srng; void *ring_entry; u32 rx_bufs_used = 0, mpdu_rx_bufs_used; int quota = 0, ret; bool break_dst_ring = false; spin_lock_bh(&pmon->mon_lock); sw_mon_entries = &pmon->sw_mon_entries; rx_mon_stats = &pmon->rx_mon_stats; if (pmon->hold_mon_dst_ring) { spin_unlock_bh(&pmon->mon_lock); goto reap_status_ring; } mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; spin_lock_bh(&mon_dst_srng->lock); ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { head_msdu = NULL; tail_msdu = NULL; mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry, &head_msdu, &tail_msdu, sw_mon_entries); rx_bufs_used += mpdu_rx_bufs_used; if (!sw_mon_entries->end_of_ppdu) { if (head_msdu) { ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp, pmon->mon_mpdu, head_msdu, tail_msdu); if (ret) break_dst_ring = true; } goto next_entry; } else { if (!sw_mon_entries->ppdu_id && !sw_mon_entries->mon_status_paddr) { break_dst_ring = true; goto next_entry; } } rx_mon_stats->dest_ppdu_done++; pmon->mon_ppdu_status = DP_PPDU_STATUS_START; pmon->buf_state = DP_MON_STATUS_LAG; pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr; pmon->hold_mon_dst_ring = true; next_entry: ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, mon_dst_srng); if (break_dst_ring) break; } ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); spin_unlock_bh(&mon_dst_srng->lock); spin_unlock_bh(&pmon->mon_lock); if (rx_bufs_used) { ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, &dp->rxdma_mon_buf_ring, rx_bufs_used, HAL_RX_BUF_RBM_SW3_BM); } reap_status_ring: quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id, napi, budget); return quota; } int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, struct napi_struct *napi, int budget) { struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id); int ret = 0; if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) && ab->hw_params.full_monitor_mode) ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget); else ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); return ret; } static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; pmon->mon_ppdu_status = DP_PPDU_STATUS_START; memset(&pmon->rx_mon_stats, 0, sizeof(pmon->rx_mon_stats)); return 0; } int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = &dp->mon_data; struct hal_srng *mon_desc_srng = NULL; struct dp_srng *dp_srng; int ret = 0; u32 n_link_desc = 0; ret = ath11k_dp_rx_pdev_mon_status_attach(ar); if (ret) { ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); return ret; } /* if rxdma1_enable is false, no need to setup * rxdma_mon_desc_ring. */ if (!ar->ab->hw_params.rxdma1_enable) return 0; dp_srng = &dp->rxdma_mon_desc_ring; n_link_desc = dp_srng->size / ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC); mon_desc_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, HAL_RXDMA_MONITOR_DESC, mon_desc_srng, n_link_desc); if (ret) { ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); return ret; } pmon->mon_last_linkdesc_paddr = 0; pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; spin_lock_init(&pmon->mon_lock); return 0; } static int ath11k_dp_mon_link_free(struct ath11k *ar) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_mon_data *pmon = &dp->mon_data; ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, HAL_RXDMA_MONITOR_DESC, &dp->rxdma_mon_desc_ring); return 0; } int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) { ath11k_dp_mon_link_free(ar); return 0; } int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab) { /* start reap timer */ mod_timer(&ab->mon_reap_timer, jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); return 0; } int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer) { int ret; if (stop_timer) timer_delete_sync(&ab->mon_reap_timer); /* reap all the monitor related rings */ ret = ath11k_dp_purge_mon_ring(ab); if (ret) { ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret); return ret; } return 0; } diff --git a/dp_tx.c b/dp_tx.c index 8522c67baabf..562aba66582f 100644 --- a/dp_tx.c +++ b/dp_tx.c @@ -1,1305 +1,1306 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include "core.h" #include "dp_tx.h" #include "debug.h" #include "debugfs_sta.h" #include "hw.h" #include "peer.h" #include "mac.h" static enum hal_tcl_encap_type ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ath11k_base *ab = arvif->ar->ab; if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) return HAL_TCL_ENCAP_TYPE_RAW; if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) return HAL_TCL_ENCAP_TYPE_ETHERNET; return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI; } static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb) { struct ieee80211_hdr *hdr = (void *)skb->data; u8 *qos_ctl; if (!ieee80211_is_data_qos(hdr->frame_control)) return; qos_ctl = ieee80211_get_qos_ctl(hdr); memmove(skb->data + IEEE80211_QOS_CTL_LEN, skb->data, (void *)qos_ctl - (void *)skb->data); skb_pull(skb, IEEE80211_QOS_CTL_LEN); hdr = (void *)skb->data; hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); } static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb) { struct ieee80211_hdr *hdr = (void *)skb->data; struct ath11k_skb_cb *cb = ATH11K_SKB_CB(skb); if (cb->flags & ATH11K_SKB_HW_80211_ENCAP) return skb->priority & IEEE80211_QOS_CTL_TID_MASK; else if (!ieee80211_is_data_qos(hdr->frame_control)) return HAL_DESC_REO_NON_QOS_TID; else return skb->priority & IEEE80211_QOS_CTL_TID_MASK; } enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher) { switch (cipher) { case WLAN_CIPHER_SUITE_WEP40: return HAL_ENCRYPT_TYPE_WEP_40; case WLAN_CIPHER_SUITE_WEP104: return HAL_ENCRYPT_TYPE_WEP_104; case WLAN_CIPHER_SUITE_TKIP: return HAL_ENCRYPT_TYPE_TKIP_MIC; case WLAN_CIPHER_SUITE_CCMP: return HAL_ENCRYPT_TYPE_CCMP_128; case WLAN_CIPHER_SUITE_CCMP_256: return HAL_ENCRYPT_TYPE_CCMP_256; case WLAN_CIPHER_SUITE_GCMP: return HAL_ENCRYPT_TYPE_GCMP_128; case WLAN_CIPHER_SUITE_GCMP_256: return HAL_ENCRYPT_TYPE_AES_GCMP_256; default: return HAL_ENCRYPT_TYPE_OPEN; } } int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, struct ath11k_sta *arsta, struct sk_buff *skb) { struct ath11k_base *ab = ar->ab; struct ath11k_dp *dp = &ab->dp; - struct hal_tx_info ti = {0}; + struct hal_tx_info ti = {}; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); struct hal_srng *tcl_ring; struct ieee80211_hdr *hdr = (void *)skb->data; struct dp_tx_ring *tx_ring; void *hal_tcl_desc; u8 pool_id; u8 hal_ring_id; int ret; u32 ring_selector = 0; u8 ring_map = 0; bool tcl_ring_retry; if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))) return -ESHUTDOWN; if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && !ieee80211_is_data(hdr->frame_control))) return -EOPNOTSUPP; pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1); ring_selector = ab->hw_params.hw_ops->get_ring_selector(skb); tcl_ring_sel: tcl_ring_retry = false; ti.ring_id = ring_selector % ab->hw_params.max_tx_ring; ti.rbm_id = ab->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id; ring_map |= BIT(ti.ring_id); tx_ring = &dp->tx_ring[ti.ring_id]; spin_lock_bh(&tx_ring->tx_idr_lock); ret = idr_alloc(&tx_ring->txbuf_idr, skb, 0, DP_TX_IDR_SIZE - 1, GFP_ATOMIC); spin_unlock_bh(&tx_ring->tx_idr_lock); if (unlikely(ret < 0)) { if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1) || !ab->hw_params.tcl_ring_retry) { atomic_inc(&ab->soc_stats.tx_err.misc_fail); return -ENOSPC; } /* Check if the next ring is available */ ring_selector++; goto tcl_ring_sel; } ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) | FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) | FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id); ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb); if (ieee80211_has_a4(hdr->frame_control) && is_multicast_ether_addr(hdr->addr3) && arsta && arsta->use_4addr_set) { ti.meta_data_flags = arsta->tcl_metadata; ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TO_FW, 1); } else { ti.meta_data_flags = arvif->tcl_metadata; } if (unlikely(ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW)) { if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) { ti.encrypt_type = ath11k_dp_tx_get_encrypt_type(skb_cb->cipher); if (ieee80211_has_protected(hdr->frame_control)) skb_put(skb, IEEE80211_CCMP_MIC_LEN); } else { ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN; } } ti.addr_search_flags = arvif->hal_addr_search_flags; ti.search_type = arvif->search_type; ti.type = HAL_TCL_DESC_TYPE_BUFFER; ti.pkt_offset = 0; ti.lmac_id = ar->lmac_id; ti.bss_ast_hash = arvif->ast_hash; ti.bss_ast_idx = arvif->ast_idx; ti.dscp_tid_tbl_idx = 0; if (likely(skb->ip_summed == CHECKSUM_PARTIAL && ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) { ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) | FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) | FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) | FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) | FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1); } if (ieee80211_vif_is_mesh(arvif->vif)) ti.enable_mesh = true; ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1); ti.tid = ath11k_dp_tx_get_tid(skb); switch (ti.encap_type) { case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI: ath11k_dp_tx_encap_nwifi(skb); break; case HAL_TCL_ENCAP_TYPE_RAW: if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) { ret = -EINVAL; goto fail_remove_idr; } break; case HAL_TCL_ENCAP_TYPE_ETHERNET: /* no need to encap */ break; case HAL_TCL_ENCAP_TYPE_802_3: default: /* TODO: Take care of other encap modes as well */ ret = -EINVAL; atomic_inc(&ab->soc_stats.tx_err.misc_fail); goto fail_remove_idr; } ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) { atomic_inc(&ab->soc_stats.tx_err.misc_fail); ath11k_warn(ab, "failed to DMA map data Tx buffer\n"); ret = -ENOMEM; goto fail_remove_idr; } ti.data_len = skb->len; skb_cb->paddr = ti.paddr; skb_cb->vif = arvif->vif; skb_cb->ar = ar; hal_ring_id = tx_ring->tcl_data_ring.ring_id; tcl_ring = &ab->hal.srng_list[hal_ring_id]; spin_lock_bh(&tcl_ring->lock); ath11k_hal_srng_access_begin(ab, tcl_ring); hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring); if (unlikely(!hal_tcl_desc)) { /* NOTE: It is highly unlikely we'll be running out of tcl_ring * desc because the desc is directly enqueued onto hw queue. */ ath11k_hal_srng_access_end(ab, tcl_ring); ab->soc_stats.tx_err.desc_na[ti.ring_id]++; spin_unlock_bh(&tcl_ring->lock); ret = -ENOMEM; /* Checking for available tcl descriptors in another ring in * case of failure due to full tcl ring now, is better than * checking this ring earlier for each pkt tx. * Restart ring selection if some rings are not checked yet. */ if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) && ab->hw_params.tcl_ring_retry && ab->hw_params.max_tx_ring > 1) { tcl_ring_retry = true; ring_selector++; } goto fail_unmap_dma; } ath11k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc + sizeof(struct hal_tlv_hdr), &ti); ath11k_hal_srng_access_end(ab, tcl_ring); ath11k_dp_shadow_start_timer(ab, tcl_ring, &dp->tx_ring_timer[ti.ring_id]); spin_unlock_bh(&tcl_ring->lock); ath11k_dbg_dump(ab, ATH11K_DBG_DP_TX, NULL, "dp tx msdu: ", skb->data, skb->len); atomic_inc(&ar->dp.num_tx_pending); return 0; fail_unmap_dma: dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE); fail_remove_idr: spin_lock_bh(&tx_ring->tx_idr_lock); idr_remove(&tx_ring->txbuf_idr, FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id)); spin_unlock_bh(&tx_ring->tx_idr_lock); if (tcl_ring_retry) goto tcl_ring_sel; return ret; } static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id, int msdu_id, struct dp_tx_ring *tx_ring) { struct ath11k *ar; struct sk_buff *msdu; struct ath11k_skb_cb *skb_cb; spin_lock(&tx_ring->tx_idr_lock); msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id); spin_unlock(&tx_ring->tx_idr_lock); if (unlikely(!msdu)) { ath11k_warn(ab, "tx completion for unknown msdu_id %d\n", msdu_id); return; } skb_cb = ATH11K_SKB_CB(msdu); dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); dev_kfree_skb_any(msdu); ar = ab->pdevs[mac_id].ar; if (atomic_dec_and_test(&ar->dp.num_tx_pending)) wake_up(&ar->dp.tx_empty_waitq); } static void ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, struct dp_tx_ring *tx_ring, struct ath11k_dp_htt_wbm_tx_status *ts) { - struct ieee80211_tx_status status = { 0 }; + struct ieee80211_tx_status status = {}; struct sk_buff *msdu; struct ieee80211_tx_info *info; struct ath11k_skb_cb *skb_cb; struct ath11k *ar; struct ath11k_peer *peer; spin_lock(&tx_ring->tx_idr_lock); msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id); spin_unlock(&tx_ring->tx_idr_lock); if (unlikely(!msdu)) { ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n", ts->msdu_id); return; } skb_cb = ATH11K_SKB_CB(msdu); info = IEEE80211_SKB_CB(msdu); ar = skb_cb->ar; if (atomic_dec_and_test(&ar->dp.num_tx_pending)) wake_up(&ar->dp.tx_empty_waitq); dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); if (!skb_cb->vif) { ieee80211_free_txskb(ar->hw, msdu); return; } memset(&info->status, 0, sizeof(info->status)); if (ts->acked) { if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { info->flags |= IEEE80211_TX_STAT_ACK; info->status.ack_signal = ts->ack_rssi; if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, ab->wmi_ab.svc_map)) info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR; info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; } else { info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; } } spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_id(ab, ts->peer_id); if (!peer || !peer->sta) { ath11k_dbg(ab, ATH11K_DBG_DATA, "dp_tx: failed to find the peer with peer_id %d\n", ts->peer_id); spin_unlock_bh(&ab->base_lock); ieee80211_free_txskb(ar->hw, msdu); return; } spin_unlock_bh(&ab->base_lock); status.sta = peer->sta; status.info = info; status.skb = msdu; ieee80211_tx_status_ext(ar->hw, &status); } static void ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab, void *desc, u8 mac_id, u32 msdu_id, struct dp_tx_ring *tx_ring) { struct htt_tx_wbm_completion *status_desc; - struct ath11k_dp_htt_wbm_tx_status ts = {0}; + struct ath11k_dp_htt_wbm_tx_status ts = {}; enum hal_wbm_htt_tx_comp_status wbm_status; status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET; wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS, status_desc->info0); switch (wbm_status) { case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK: case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP: case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL: ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK); ts.msdu_id = msdu_id; ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI, status_desc->info1); if (FIELD_GET(HTT_TX_WBM_COMP_INFO2_VALID, status_desc->info2)) ts.peer_id = FIELD_GET(HTT_TX_WBM_COMP_INFO2_SW_PEER_ID, status_desc->info2); else ts.peer_id = HTT_INVALID_PEER_ID; ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts); break; case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ: case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT: ath11k_dp_tx_free_txbuf(ab, mac_id, msdu_id, tx_ring); break; case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY: /* This event is to be handled only when the driver decides to * use WDS offload functionality. */ break; default: ath11k_warn(ab, "Unknown htt tx status %d\n", wbm_status); break; } } static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar, struct sk_buff *msdu, struct hal_tx_status *ts) { struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats; if (ts->try_cnt > 1) { peer_stats->retry_pkts += ts->try_cnt - 1; peer_stats->retry_bytes += (ts->try_cnt - 1) * msdu->len; if (ts->status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED) { peer_stats->failed_pkts += 1; peer_stats->failed_bytes += msdu->len; } } } void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts) { struct ath11k_base *ab = ar->ab; struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats; enum hal_tx_rate_stats_pkt_type pkt_type; enum hal_tx_rate_stats_sgi sgi; enum hal_tx_rate_stats_bw bw; struct ath11k_peer *peer; struct ath11k_sta *arsta; struct ieee80211_sta *sta; u16 rate, ru_tones; u8 mcs, rate_idx = 0, ofdma; int ret; spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_id(ab, ts->peer_id); if (!peer || !peer->sta) { ath11k_dbg(ab, ATH11K_DBG_DP_TX, "failed to find the peer by id %u\n", ts->peer_id); goto err_out; } sta = peer->sta; arsta = ath11k_sta_to_arsta(sta); memset(&arsta->txrate, 0, sizeof(arsta->txrate)); pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE, ts->rate_stats); mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS, ts->rate_stats); sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI, ts->rate_stats); bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats); ru_tones = FIELD_GET(HAL_TX_RATE_STATS_INFO0_TONES_IN_RU, ts->rate_stats); ofdma = FIELD_GET(HAL_TX_RATE_STATS_INFO0_OFDMA_TX, ts->rate_stats); /* This is to prefer choose the real NSS value arsta->last_txrate.nss, * if it is invalid, then choose the NSS value while assoc. */ if (arsta->last_txrate.nss) arsta->txrate.nss = arsta->last_txrate.nss; else arsta->txrate.nss = arsta->peer_nss; if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A || pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) { ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, pkt_type, &rate_idx, &rate); if (ret < 0) goto err_out; arsta->txrate.legacy = rate; } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) { if (mcs > 7) { ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs); goto err_out; } if (arsta->txrate.nss != 0) arsta->txrate.mcs = mcs + 8 * (arsta->txrate.nss - 1); arsta->txrate.flags = RATE_INFO_FLAGS_MCS; if (sgi) arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) { if (mcs > 9) { ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs); goto err_out; } arsta->txrate.mcs = mcs; arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; if (sgi) arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) { if (mcs > 11) { ath11k_warn(ab, "Invalid HE mcs index %d\n", mcs); goto err_out; } arsta->txrate.mcs = mcs; arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi); } arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); if (ofdma && pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) { arsta->txrate.bw = RATE_INFO_BW_HE_RU; arsta->txrate.he_ru_alloc = ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones); } if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); err_out: spin_unlock_bh(&ab->base_lock); } static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, struct sk_buff *msdu, struct hal_tx_status *ts) { - struct ieee80211_tx_status status = { 0 }; - struct ieee80211_rate_status status_rate = { 0 }; + struct ieee80211_tx_status status = {}; + struct ieee80211_rate_status status_rate = {}; struct ath11k_base *ab = ar->ab; struct ieee80211_tx_info *info; struct ath11k_skb_cb *skb_cb; struct ath11k_peer *peer; struct ath11k_sta *arsta; struct rate_info rate; if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) { /* Must not happen */ return; } skb_cb = ATH11K_SKB_CB(msdu); dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) { ieee80211_free_txskb(ar->hw, msdu); return; } if (unlikely(!skb_cb->vif)) { ieee80211_free_txskb(ar->hw, msdu); return; } info = IEEE80211_SKB_CB(msdu); memset(&info->status, 0, sizeof(info->status)); /* skip tx rate update from ieee80211_status*/ info->status.rates[0].idx = -1; if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED && !(info->flags & IEEE80211_TX_CTL_NO_ACK)) { info->flags |= IEEE80211_TX_STAT_ACK; info->status.ack_signal = ts->ack_rssi; if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, ab->wmi_ab.svc_map)) info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR; info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; } if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX && (info->flags & IEEE80211_TX_CTL_NO_ACK)) info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; if (unlikely(ath11k_debugfs_is_extd_tx_stats_enabled(ar)) || ab->hw_params.single_pdev_only) { if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) { if (ar->last_ppdu_id == 0) { ar->last_ppdu_id = ts->ppdu_id; } else if (ar->last_ppdu_id == ts->ppdu_id || ar->cached_ppdu_id == ar->last_ppdu_id) { ar->cached_ppdu_id = ar->last_ppdu_id; ar->cached_stats.is_ampdu = true; ath11k_dp_tx_update_txcompl(ar, ts); memset(&ar->cached_stats, 0, sizeof(struct ath11k_per_peer_tx_stats)); } else { ar->cached_stats.is_ampdu = false; ath11k_dp_tx_update_txcompl(ar, ts); memset(&ar->cached_stats, 0, sizeof(struct ath11k_per_peer_tx_stats)); } ar->last_ppdu_id = ts->ppdu_id; } ath11k_dp_tx_cache_peer_stats(ar, msdu, ts); } spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_id(ab, ts->peer_id); if (!peer || !peer->sta) { ath11k_dbg(ab, ATH11K_DBG_DATA, "dp_tx: failed to find the peer with peer_id %d\n", ts->peer_id); spin_unlock_bh(&ab->base_lock); ieee80211_free_txskb(ar->hw, msdu); return; } arsta = ath11k_sta_to_arsta(peer->sta); status.sta = peer->sta; status.skb = msdu; status.info = info; rate = arsta->last_txrate; status_rate.rate_idx = rate; status_rate.try_count = 1; status.rates = &status_rate; status.n_rates = 1; spin_unlock_bh(&ab->base_lock); ieee80211_tx_status_ext(ar->hw, &status); } static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab, struct hal_wbm_release_ring *desc, struct hal_tx_status *ts) { ts->buf_rel_source = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0); if (unlikely(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW && ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) return; if (unlikely(ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) return; ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON, desc->info0); ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER, desc->info1); ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT, desc->info1); ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI, desc->info2); if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU) ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU; ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3); ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3); if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID) ts->rate_stats = desc->rate_stats.info0; else ts->rate_stats = 0; } void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id) { struct ath11k *ar; struct ath11k_dp *dp = &ab->dp; int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id; struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id]; struct sk_buff *msdu; - struct hal_tx_status ts = { 0 }; + struct hal_tx_status ts = {}; struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id]; u32 *desc; u32 msdu_id; u8 mac_id; spin_lock_bh(&status_ring->lock); ath11k_hal_srng_access_begin(ab, status_ring); while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) != tx_ring->tx_status_tail) && (desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) { memcpy(&tx_ring->tx_status[tx_ring->tx_status_head], desc, sizeof(struct hal_wbm_release_ring)); tx_ring->tx_status_head = ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head); } if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) && (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail))) { /* TODO: Process pending tx_status messages when kfifo_is_full() */ ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n"); } ath11k_hal_srng_access_end(ab, status_ring); spin_unlock_bh(&status_ring->lock); while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) { struct hal_wbm_release_ring *tx_status; u32 desc_id; tx_ring->tx_status_tail = ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail); tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail]; ath11k_dp_tx_status_parse(ab, tx_status, &ts); desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, tx_status->buf_addr_info.info1); mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id); msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id); if (unlikely(ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) { ath11k_dp_tx_process_htt_tx_complete(ab, (void *)tx_status, mac_id, msdu_id, tx_ring); continue; } spin_lock(&tx_ring->tx_idr_lock); msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id); if (unlikely(!msdu)) { ath11k_warn(ab, "tx completion for unknown msdu_id %d\n", msdu_id); spin_unlock(&tx_ring->tx_idr_lock); continue; } spin_unlock(&tx_ring->tx_idr_lock); ar = ab->pdevs[mac_id].ar; if (atomic_dec_and_test(&ar->dp.num_tx_pending)) wake_up(&ar->dp.tx_empty_waitq); ath11k_dp_tx_complete_msdu(ar, msdu, &ts); } } int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid, enum hal_reo_cmd_type type, struct ath11k_hal_reo_cmd *cmd, void (*cb)(struct ath11k_dp *, void *, enum hal_reo_cmd_status)) { struct ath11k_dp *dp = &ab->dp; struct dp_reo_cmd *dp_cmd; struct hal_srng *cmd_ring; int cmd_num; if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) return -ESHUTDOWN; cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd); /* cmd_num should start from 1, during failure return the error code */ if (cmd_num < 0) return cmd_num; /* reo cmd ring descriptors has cmd_num starting from 1 */ if (cmd_num == 0) return -EINVAL; if (!cb) return 0; /* Can this be optimized so that we keep the pending command list only * for tid delete command to free up the resource on the command status * indication? */ dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC); if (!dp_cmd) return -ENOMEM; memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid)); dp_cmd->cmd_num = cmd_num; dp_cmd->handler = cb; spin_lock_bh(&dp->reo_cmd_lock); list_add_tail(&dp_cmd->list, &dp->reo_cmd_list); spin_unlock_bh(&dp->reo_cmd_lock); return 0; } static int ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab, int mac_id, u32 ring_id, enum hal_ring_type ring_type, enum htt_srng_ring_type *htt_ring_type, enum htt_srng_ring_id *htt_ring_id) { int lmac_ring_id_offset = 0; int ret = 0; switch (ring_type) { case HAL_RXDMA_BUF: lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC; /* for QCA6390, host fills rx buffer to fw and fw fills to * rxbuf ring for each rxdma */ if (!ab->hw_params.rx_mac_buf_ring) { if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF + lmac_ring_id_offset) || ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF + lmac_ring_id_offset))) { ret = -EINVAL; } *htt_ring_id = HTT_RXDMA_HOST_BUF_RING; *htt_ring_type = HTT_SW_TO_HW_RING; } else { if (ring_id == HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF) { *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING; *htt_ring_type = HTT_SW_TO_SW_RING; } else { *htt_ring_id = HTT_RXDMA_HOST_BUF_RING; *htt_ring_type = HTT_SW_TO_HW_RING; } } break; case HAL_RXDMA_DST: *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; *htt_ring_type = HTT_HW_TO_SW_RING; break; case HAL_RXDMA_MONITOR_BUF: *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; *htt_ring_type = HTT_SW_TO_HW_RING; break; case HAL_RXDMA_MONITOR_STATUS: *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; *htt_ring_type = HTT_SW_TO_HW_RING; break; case HAL_RXDMA_MONITOR_DST: *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; *htt_ring_type = HTT_HW_TO_SW_RING; break; case HAL_RXDMA_MONITOR_DESC: *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; *htt_ring_type = HTT_SW_TO_HW_RING; break; default: ath11k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type); ret = -EINVAL; } return ret; } int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id, int mac_id, enum hal_ring_type ring_type) { struct htt_srng_setup_cmd *cmd; struct hal_srng *srng = &ab->hal.srng_list[ring_id]; struct hal_srng_params params; struct sk_buff *skb; u32 ring_entry_sz; int len = sizeof(*cmd); dma_addr_t hp_addr, tp_addr; enum htt_srng_ring_type htt_ring_type; enum htt_srng_ring_id htt_ring_id; int ret; skb = ath11k_htc_alloc_skb(ab, len); if (!skb) return -ENOMEM; memset(¶ms, 0, sizeof(params)); ath11k_hal_srng_get_params(ab, srng, ¶ms); hp_addr = ath11k_hal_srng_get_hp_addr(ab, srng); tp_addr = ath11k_hal_srng_get_tp_addr(ab, srng); ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id, ring_type, &htt_ring_type, &htt_ring_id); if (ret) goto err_free; skb_put(skb, len); cmd = (struct htt_srng_setup_cmd *)skb->data; cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE, HTT_H2T_MSG_TYPE_SRING_SETUP); if (htt_ring_type == HTT_SW_TO_HW_RING || htt_ring_type == HTT_HW_TO_SW_RING) cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID, DP_SW2HW_MACID(mac_id)); else cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID, mac_id); cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE, htt_ring_type); cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id); cmd->ring_base_addr_lo = params.ring_base_paddr & HAL_ADDR_LSB_REG_MASK; cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT; ret = ath11k_hal_srng_get_entrysize(ab, ring_type); if (ret < 0) goto err_free; ring_entry_sz = ret; ring_entry_sz >>= 2; cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE, ring_entry_sz); cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE, params.num_entries * ring_entry_sz); cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP, !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP)); cmd->info1 |= FIELD_PREP( HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP, !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)); cmd->info1 |= FIELD_PREP( HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP, !!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)); if (htt_ring_type == HTT_SW_TO_HW_RING) cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS; cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK; cmd->ring_head_off32_remote_addr_hi = (u64)hp_addr >> HAL_ADDR_MSB_REG_SHIFT; cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK; cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >> HAL_ADDR_MSB_REG_SHIFT; cmd->ring_msi_addr_lo = lower_32_bits(params.msi_addr); cmd->ring_msi_addr_hi = upper_32_bits(params.msi_addr); cmd->msi_data = params.msi_data; cmd->intr_info = FIELD_PREP( HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH, params.intr_batch_cntr_thres_entries * ring_entry_sz); cmd->intr_info |= FIELD_PREP( HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH, params.intr_timer_thres_us >> 3); cmd->info2 = 0; if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) { cmd->info2 = FIELD_PREP( HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH, params.low_threshold); } ath11k_dbg(ab, ATH11K_DBG_DP_TX, "htt srng setup msi_addr_lo 0x%x msi_addr_hi 0x%x msi_data 0x%x ring_id %d ring_type %d intr_info 0x%x flags 0x%x\n", cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi, cmd->msi_data, ring_id, ring_type, cmd->intr_info, cmd->info2); ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb); if (ret) goto err_free; return 0; err_free: dev_kfree_skb_any(skb); return ret; } #define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ) int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab) { struct ath11k_dp *dp = &ab->dp; struct sk_buff *skb; struct htt_ver_req_cmd *cmd; int len = sizeof(*cmd); int ret; init_completion(&dp->htt_tgt_version_received); skb = ath11k_htc_alloc_skb(ab, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_ver_req_cmd *)skb->data; cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID, HTT_H2T_MSG_TYPE_VERSION_REQ); ret = ath11k_htc_send(&ab->htc, dp->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } ret = wait_for_completion_timeout(&dp->htt_tgt_version_received, HTT_TARGET_VERSION_TIMEOUT_HZ); if (ret == 0) { ath11k_warn(ab, "htt target version request timed out\n"); return -ETIMEDOUT; } if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) { ath11k_err(ab, "unsupported htt major version %d supported version is %d\n", dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR); return -EOPNOTSUPP; } return 0; } int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask) { struct ath11k_base *ab = ar->ab; struct ath11k_dp *dp = &ab->dp; struct sk_buff *skb; struct htt_ppdu_stats_cfg_cmd *cmd; int len = sizeof(*cmd); u8 pdev_mask; int ret; int i; for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { skb = ath11k_htc_alloc_skb(ab, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data; cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG); pdev_mask = 1 << (ar->pdev_idx + i); cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask); cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask); ret = ath11k_htc_send(&ab->htc, dp->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } } return 0; } int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id, int mac_id, enum hal_ring_type ring_type, int rx_buf_size, struct htt_rx_ring_tlv_filter *tlv_filter) { struct htt_rx_ring_selection_cfg_cmd *cmd; struct hal_srng *srng = &ab->hal.srng_list[ring_id]; struct hal_srng_params params; struct sk_buff *skb; int len = sizeof(*cmd); enum htt_srng_ring_type htt_ring_type; enum htt_srng_ring_id htt_ring_id; int ret; skb = ath11k_htc_alloc_skb(ab, len); if (!skb) return -ENOMEM; memset(¶ms, 0, sizeof(params)); ath11k_hal_srng_get_params(ab, srng, ¶ms); ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id, ring_type, &htt_ring_type, &htt_ring_id); if (ret) goto err_free; skb_put(skb, len); cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data; cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG); if (htt_ring_type == HTT_SW_TO_HW_RING || htt_ring_type == HTT_HW_TO_SW_RING) cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID, DP_SW2HW_MACID(mac_id)); else cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID, mac_id); cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID, htt_ring_id); cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS, !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP)); cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS, !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)); cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE, rx_buf_size); cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0; cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1; cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2; cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3; cmd->rx_filter_tlv = tlv_filter->rx_filter; ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb); if (ret) goto err_free; return 0; err_free: dev_kfree_skb_any(skb); return ret; } int ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type, struct htt_ext_stats_cfg_params *cfg_params, u64 cookie) { struct ath11k_base *ab = ar->ab; struct ath11k_dp *dp = &ab->dp; struct sk_buff *skb; struct htt_ext_stats_cfg_cmd *cmd; u32 pdev_id; int len = sizeof(*cmd); int ret; skb = ath11k_htc_alloc_skb(ab, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_ext_stats_cfg_cmd *)skb->data; memset(cmd, 0, sizeof(*cmd)); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG; if (ab->hw_params.single_pdev_only) pdev_id = ath11k_mac_get_target_pdev_id(ar); else pdev_id = ar->pdev->pdev_id; cmd->hdr.pdev_mask = 1 << pdev_id; cmd->hdr.stats_type = type; cmd->cfg_param0 = cfg_params->cfg0; cmd->cfg_param1 = cfg_params->cfg1; cmd->cfg_param2 = cfg_params->cfg2; cmd->cfg_param3 = cfg_params->cfg3; cmd->cookie_lsb = lower_32_bits(cookie); cmd->cookie_msb = upper_32_bits(cookie); ret = ath11k_htc_send(&ab->htc, dp->eid, skb); if (ret) { ath11k_warn(ab, "failed to send htt type stats request: %d", ret); dev_kfree_skb_any(skb); return ret; } return 0; } int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset) { struct ath11k_pdev_dp *dp = &ar->dp; struct ath11k_base *ab = ar->ab; - struct htt_rx_ring_tlv_filter tlv_filter = {0}; + struct htt_rx_ring_tlv_filter tlv_filter = {}; int ret = 0, ring_id = 0, i; if (ab->hw_params.full_monitor_mode) { ret = ath11k_dp_tx_htt_rx_full_mon_setup(ab, dp->mac_id, !reset); if (ret < 0) { ath11k_err(ab, "failed to setup full monitor %d\n", ret); return ret; } } ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; if (!reset) { tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING; tlv_filter.pkt_filter_flags0 = HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 | HTT_RX_MON_MO_MGMT_FILTER_FLAGS0; tlv_filter.pkt_filter_flags1 = HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 | HTT_RX_MON_MO_MGMT_FILTER_FLAGS1; tlv_filter.pkt_filter_flags2 = HTT_RX_MON_FP_CTRL_FILTER_FLASG2 | HTT_RX_MON_MO_CTRL_FILTER_FLASG2; tlv_filter.pkt_filter_flags3 = HTT_RX_MON_FP_CTRL_FILTER_FLASG3 | HTT_RX_MON_MO_CTRL_FILTER_FLASG3 | HTT_RX_MON_FP_DATA_FILTER_FLASG3 | HTT_RX_MON_MO_DATA_FILTER_FLASG3; } if (ab->hw_params.rxdma1_enable) { ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id, HAL_RXDMA_MONITOR_BUF, DP_RXDMA_REFILL_RING_SIZE, &tlv_filter); } else if (!reset) { /* set in monitor mode only */ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = dp->rx_mac_buf_ring[i].ring_id; ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id + i, HAL_RXDMA_BUF, 1024, &tlv_filter); } } if (ret) return ret; for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; if (!reset) { tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING; } else { tlv_filter = ath11k_mac_mon_status_filter_default; if (ath11k_debugfs_is_extd_rx_stats_enabled(ar)) tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar); } ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id, dp->mac_id + i, HAL_RXDMA_MONITOR_STATUS, DP_RXDMA_REFILL_RING_SIZE, &tlv_filter); } if (!ar->ab->hw_params.rxdma1_enable) mod_timer(&ar->ab->mon_reap_timer, jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); return ret; } int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id, bool config) { struct htt_rx_full_monitor_mode_cfg_cmd *cmd; struct sk_buff *skb; int ret, len = sizeof(*cmd); skb = ath11k_htc_alloc_skb(ab, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_rx_full_monitor_mode_cfg_cmd *)skb->data; memset(cmd, 0, sizeof(*cmd)); cmd->info0 = FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE); cmd->info0 |= FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID, mac_id); cmd->cfg = HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE | FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING, HTT_RX_MON_RING_SW); if (config) { cmd->cfg |= HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END | HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END; } ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb); if (ret) goto err_free; return 0; err_free: dev_kfree_skb_any(skb); return ret; } diff --git a/fw.c b/fw.c index cbbd8e57119f..07d775a7b528 100644 --- a/fw.c +++ b/fw.c @@ -1,169 +1,171 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ +#include #include "core.h" #include "debug.h" static int ath11k_fw_request_firmware_api_n(struct ath11k_base *ab, const char *name) { size_t magic_len, len, ie_len; int ie_id, i, index, bit, ret; struct ath11k_fw_ie *hdr; const u8 *data; __le32 *timestamp; ab->fw.fw = ath11k_core_firmware_request(ab, name); if (IS_ERR(ab->fw.fw)) { ret = PTR_ERR(ab->fw.fw); ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to load %s: %d\n", name, ret); ab->fw.fw = NULL; return ret; } data = ab->fw.fw->data; len = ab->fw.fw->size; /* magic also includes the null byte, check that as well */ magic_len = strlen(ATH11K_FIRMWARE_MAGIC) + 1; if (len < magic_len) { ath11k_err(ab, "firmware image too small to contain magic: %zu\n", len); ret = -EINVAL; goto err; } if (memcmp(data, ATH11K_FIRMWARE_MAGIC, magic_len) != 0) { ath11k_err(ab, "Invalid firmware magic\n"); ret = -EINVAL; goto err; } /* jump over the padding */ magic_len = ALIGN(magic_len, 4); /* make sure there's space for padding */ if (magic_len > len) { ath11k_err(ab, "No space for padding after magic\n"); ret = -EINVAL; goto err; } len -= magic_len; data += magic_len; /* loop elements */ while (len > sizeof(struct ath11k_fw_ie)) { hdr = (struct ath11k_fw_ie *)data; ie_id = le32_to_cpu(hdr->id); ie_len = le32_to_cpu(hdr->len); len -= sizeof(*hdr); data += sizeof(*hdr); if (len < ie_len) { ath11k_err(ab, "Invalid length for FW IE %d (%zu < %zu)\n", ie_id, len, ie_len); ret = -EINVAL; goto err; } switch (ie_id) { case ATH11K_FW_IE_TIMESTAMP: if (ie_len != sizeof(u32)) break; timestamp = (__le32 *)data; ath11k_dbg(ab, ATH11K_DBG_BOOT, "found fw timestamp %d\n", le32_to_cpup(timestamp)); break; case ATH11K_FW_IE_FEATURES: ath11k_dbg(ab, ATH11K_DBG_BOOT, "found firmware features ie (%zd B)\n", ie_len); for (i = 0; i < ATH11K_FW_FEATURE_COUNT; i++) { index = i / 8; bit = i % 8; if (index == ie_len) break; if (data[index] & (1 << bit)) __set_bit(i, ab->fw.fw_features); } ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "features", "", ab->fw.fw_features, sizeof(ab->fw.fw_features)); break; case ATH11K_FW_IE_AMSS_IMAGE: ath11k_dbg(ab, ATH11K_DBG_BOOT, "found fw image ie (%zd B)\n", ie_len); ab->fw.amss_data = data; ab->fw.amss_len = ie_len; break; case ATH11K_FW_IE_M3_IMAGE: ath11k_dbg(ab, ATH11K_DBG_BOOT, "found m3 image ie (%zd B)\n", ie_len); ab->fw.m3_data = data; ab->fw.m3_len = ie_len; break; default: ath11k_warn(ab, "Unknown FW IE: %u\n", ie_id); break; } /* jump over the padding */ ie_len = ALIGN(ie_len, 4); /* make sure there's space for padding */ if (ie_len > len) break; len -= ie_len; data += ie_len; } return 0; err: release_firmware(ab->fw.fw); ab->fw.fw = NULL; return ret; } int ath11k_fw_pre_init(struct ath11k_base *ab) { int ret; ret = ath11k_fw_request_firmware_api_n(ab, ATH11K_FW_API2_FILE); if (ret == 0) { ab->fw.api_version = 2; goto out; } ab->fw.api_version = 1; out: ath11k_dbg(ab, ATH11K_DBG_BOOT, "using fw api %d\n", ab->fw.api_version); return 0; } void ath11k_fw_destroy(struct ath11k_base *ab) { release_firmware(ab->fw.fw); } EXPORT_SYMBOL(ath11k_fw_destroy); diff --git a/hal.c b/hal.c index 8cb1505a5a0c..0c3ce7509ab8 100644 --- a/hal.c +++ b/hal.c @@ -1,1408 +1,1439 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include +#include #include "hal_tx.h" #include "debug.h" #include "hal_desc.h" #include "hif.h" static const struct hal_srng_config hw_srng_config_template[] = { /* TODO: max_rings can populated by querying HW capabilities */ { /* REO_DST */ .start_ring_id = HAL_SRNG_RING_ID_REO2SW1, .max_rings = 4, .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_DST, .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE, }, { /* REO_EXCEPTION */ /* Designating REO2TCL ring as exception ring. This ring is * similar to other REO2SW rings though it is named as REO2TCL. * Any of theREO2SW rings can be used as exception ring. */ .start_ring_id = HAL_SRNG_RING_ID_REO2TCL, .max_rings = 1, .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_DST, .max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE, }, { /* REO_REINJECT */ .start_ring_id = HAL_SRNG_RING_ID_SW2REO, .max_rings = 1, .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE, }, { /* REO_CMD */ .start_ring_id = HAL_SRNG_RING_ID_REO_CMD, .max_rings = 1, .entry_size = (sizeof(struct hal_tlv_hdr) + sizeof(struct hal_reo_get_queue_stats)) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE, }, { /* REO_STATUS */ .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS, .max_rings = 1, .entry_size = (sizeof(struct hal_tlv_hdr) + sizeof(struct hal_reo_get_queue_stats_status)) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_DST, .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE, }, { /* TCL_DATA */ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1, .max_rings = 3, .entry_size = (sizeof(struct hal_tlv_hdr) + sizeof(struct hal_tcl_data_cmd)) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE, }, { /* TCL_CMD */ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD, .max_rings = 1, .entry_size = (sizeof(struct hal_tlv_hdr) + sizeof(struct hal_tcl_gse_cmd)) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE, }, { /* TCL_STATUS */ .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS, .max_rings = 1, .entry_size = (sizeof(struct hal_tlv_hdr) + sizeof(struct hal_tcl_status_ring)) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_DST, .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE, }, { /* CE_SRC */ .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC, .max_rings = 12, .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE, }, { /* CE_DST */ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST, .max_rings = 12, .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE, }, { /* CE_DST_STATUS */ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS, .max_rings = 12, .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_DST, .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE, }, { /* WBM_IDLE_LINK */ .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK, .max_rings = 1, .entry_size = sizeof(struct hal_wbm_link_desc) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE, }, { /* SW2WBM_RELEASE */ .start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE, .max_rings = 1, .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE, }, { /* WBM2SW_RELEASE */ .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE, .max_rings = 5, .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, .lmac_ring = false, .ring_dir = HAL_SRNG_DIR_DST, .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE, }, { /* RXDMA_BUF */ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF, .max_rings = 2, .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, .lmac_ring = true, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_RXDMA_RING_MAX_SIZE, }, { /* RXDMA_DST */ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0, .max_rings = 1, .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, .lmac_ring = true, .ring_dir = HAL_SRNG_DIR_DST, .max_size = HAL_RXDMA_RING_MAX_SIZE, }, { /* RXDMA_MONITOR_BUF */ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF, .max_rings = 1, .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, .lmac_ring = true, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_RXDMA_RING_MAX_SIZE, }, { /* RXDMA_MONITOR_STATUS */ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF, .max_rings = 1, .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, .lmac_ring = true, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_RXDMA_RING_MAX_SIZE, }, { /* RXDMA_MONITOR_DST */ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1, .max_rings = 1, .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, .lmac_ring = true, .ring_dir = HAL_SRNG_DIR_DST, .max_size = HAL_RXDMA_RING_MAX_SIZE, }, { /* RXDMA_MONITOR_DESC */ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC, .max_rings = 1, .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, .lmac_ring = true, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_RXDMA_RING_MAX_SIZE, }, { /* RXDMA DIR BUF */ .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF, .max_rings = 1, .entry_size = 8 >> 2, /* TODO: Define the struct */ .lmac_ring = true, .ring_dir = HAL_SRNG_DIR_SRC, .max_size = HAL_RXDMA_RING_MAX_SIZE, }, }; static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; size_t size; size = sizeof(u32) * HAL_SRNG_RING_ID_MAX; hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr, GFP_KERNEL); if (!hal->rdp.vaddr) return -ENOMEM; return 0; } static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; size_t size; if (!hal->rdp.vaddr) return; size = sizeof(u32) * HAL_SRNG_RING_ID_MAX; dma_free_coherent(ab->dev, size, hal->rdp.vaddr, hal->rdp.paddr); hal->rdp.vaddr = NULL; } static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; size_t size; size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS; hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr, GFP_KERNEL); if (!hal->wrp.vaddr) return -ENOMEM; return 0; } static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; size_t size; if (!hal->wrp.vaddr) return; size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS; dma_free_coherent(ab->dev, size, hal->wrp.vaddr, hal->wrp.paddr); hal->wrp.vaddr = NULL; } static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab, struct hal_srng *srng, int ring_num) { struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST]; u32 addr; u32 val; addr = HAL_CE_DST_RING_CTRL + srng_config->reg_start[HAL_SRNG_REG_GRP_R0] + ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0]; val = ath11k_hif_read32(ab, addr); val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN; val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN, srng->u.dst_ring.max_buffer_length); ath11k_hif_write32(ab, addr, val); } static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab, struct hal_srng *srng) { struct ath11k_hal *hal = &ab->hal; u32 val; u64 hp_addr; u32 reg_base; reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) { ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab), srng->msi_addr); val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR, ((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT)) | HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE; ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab), val); ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(ab), srng->msi_data); } ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr); val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB, ((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE, (srng->entry_size * srng->num_entries)); ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(ab), val); val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) | FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size); ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET(ab), val); /* interrupt setup */ val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD, (srng->intr_timer_thres_us >> 3)); val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD, (srng->intr_batch_cntr_thres_entries * srng->entry_size)); ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab), val); hp_addr = hal->rdp.paddr + ((unsigned long)srng->u.dst_ring.hp_addr - (unsigned long)hal->rdp.vaddr); ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab), hp_addr & HAL_ADDR_LSB_REG_MASK); ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab), hp_addr >> HAL_ADDR_MSB_REG_SHIFT); /* Initialize head and tail pointers to indicate ring is empty */ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; ath11k_hif_write32(ab, reg_base, 0); ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET(ab), 0); *srng->u.dst_ring.hp_addr = 0; reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; val = 0; if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP) val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP; if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP) val |= HAL_REO1_RING_MISC_HOST_FW_SWAP; if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP) val |= HAL_REO1_RING_MISC_MSI_SWAP; val |= HAL_REO1_RING_MISC_SRNG_ENABLE; ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET(ab), val); } static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab, struct hal_srng *srng) { struct ath11k_hal *hal = &ab->hal; u32 val; u64 tp_addr; u32 reg_base; reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) { ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab), srng->msi_addr); val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR, ((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT)) | HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE; ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab), val); ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MSI1_DATA_OFFSET(ab), srng->msi_data); } ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr); val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB, ((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE, (srng->entry_size * srng->num_entries)); ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val); val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size); ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val); if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) { ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr); val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB, ((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE, (srng->entry_size * srng->num_entries)); ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val); } /* interrupt setup */ /* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the * unit of 8 usecs instead of 1 usec (as required by v1). */ val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD, srng->intr_timer_thres_us); val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD, (srng->intr_batch_cntr_thres_entries * srng->entry_size)); ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab), val); val = 0; if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) { val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD, srng->u.src_ring.low_threshold); } ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab), val); if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) { tp_addr = hal->rdp.paddr + ((unsigned long)srng->u.src_ring.tp_addr - (unsigned long)hal->rdp.vaddr); ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab), tp_addr & HAL_ADDR_LSB_REG_MASK); ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab), tp_addr >> HAL_ADDR_MSB_REG_SHIFT); } /* Initialize head and tail pointers to indicate ring is empty */ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; ath11k_hif_write32(ab, reg_base, 0); ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0); *srng->u.src_ring.tp_addr = 0; reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0]; val = 0; if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP) val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP; if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP) val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP; if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP) val |= HAL_TCL1_RING_MISC_MSI_SWAP; /* Loop count is not used for SRC rings */ val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE; val |= HAL_TCL1_RING_MISC_SRNG_ENABLE; ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val); } static void ath11k_hal_srng_hw_init(struct ath11k_base *ab, struct hal_srng *srng) { if (srng->ring_dir == HAL_SRNG_DIR_SRC) ath11k_hal_srng_src_hw_init(ab, srng); else ath11k_hal_srng_dst_hw_init(ab, srng); } static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab, enum hal_ring_type type, int ring_num, int mac_id) { struct hal_srng_config *srng_config = &ab->hal.srng_config[type]; int ring_id; if (ring_num >= srng_config->max_rings) { ath11k_warn(ab, "invalid ring number :%d\n", ring_num); return -EINVAL; } ring_id = srng_config->start_ring_id + ring_num; if (srng_config->lmac_ring) ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC; if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX)) return -EINVAL; return ring_id; } int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type) { struct hal_srng_config *srng_config; if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES)) return -EINVAL; srng_config = &ab->hal.srng_config[ring_type]; return (srng_config->entry_size << 2); } int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type) { struct hal_srng_config *srng_config; if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES)) return -EINVAL; srng_config = &ab->hal.srng_config[ring_type]; return (srng_config->max_size / srng_config->entry_size); } void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng, struct hal_srng_params *params) { params->ring_base_paddr = srng->ring_base_paddr; params->ring_base_vaddr = srng->ring_base_vaddr; params->num_entries = srng->num_entries; params->intr_timer_thres_us = srng->intr_timer_thres_us; params->intr_batch_cntr_thres_entries = srng->intr_batch_cntr_thres_entries; params->low_threshold = srng->u.src_ring.low_threshold; params->msi_addr = srng->msi_addr; params->msi_data = srng->msi_data; params->flags = srng->flags; } dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab, struct hal_srng *srng) { if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING)) return 0; if (srng->ring_dir == HAL_SRNG_DIR_SRC) return ab->hal.wrp.paddr + ((unsigned long)srng->u.src_ring.hp_addr - (unsigned long)ab->hal.wrp.vaddr); else return ab->hal.rdp.paddr + ((unsigned long)srng->u.dst_ring.hp_addr - (unsigned long)ab->hal.rdp.vaddr); } dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab, struct hal_srng *srng) { if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING)) return 0; if (srng->ring_dir == HAL_SRNG_DIR_SRC) return ab->hal.rdp.paddr + ((unsigned long)srng->u.src_ring.tp_addr - (unsigned long)ab->hal.rdp.vaddr); else return ab->hal.wrp.paddr + ((unsigned long)srng->u.dst_ring.tp_addr - (unsigned long)ab->hal.wrp.vaddr); } u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type) { switch (type) { case HAL_CE_DESC_SRC: return sizeof(struct hal_ce_srng_src_desc); case HAL_CE_DESC_DST: return sizeof(struct hal_ce_srng_dest_desc); case HAL_CE_DESC_DST_STATUS: return sizeof(struct hal_ce_srng_dst_status_desc); } return 0; } void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id, u8 byte_swap_data) { struct hal_ce_srng_src_desc *desc = buf; desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK; desc->buffer_addr_info = FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI, ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP, byte_swap_data) | FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) | FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len); desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id); } void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr) { struct hal_ce_srng_dest_desc *desc = buf; desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK; desc->buffer_addr_info = FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI, ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)); } u32 ath11k_hal_ce_dst_status_get_length(void *buf) { struct hal_ce_srng_dst_status_desc *desc = buf; u32 len; - len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, READ_ONCE(desc->flags)); + len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags); desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN; return len; } void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie, dma_addr_t paddr) { desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, (paddr & HAL_ADDR_LSB_REG_MASK)); desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR, ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) | FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie); } u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng) { lockdep_assert_held(&srng->lock); if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) return (srng->ring_base_vaddr + srng->u.dst_ring.tp); return NULL; } static u32 *ath11k_hal_srng_dst_peek_with_dma(struct ath11k_base *ab, struct hal_srng *srng, dma_addr_t *paddr) { lockdep_assert_held(&srng->lock); if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) { *paddr = srng->ring_base_paddr + sizeof(*srng->ring_base_vaddr) * srng->u.dst_ring.tp; return srng->ring_base_vaddr + srng->u.dst_ring.tp; } return NULL; } static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab, struct hal_srng *srng) { dma_addr_t desc_paddr; u32 *desc; /* prefetch only if desc is available */ desc = ath11k_hal_srng_dst_peek_with_dma(ab, srng, &desc_paddr); if (likely(desc)) { dma_sync_single_for_cpu(ab->dev, desc_paddr, (srng->entry_size * sizeof(u32)), DMA_FROM_DEVICE); prefetch(desc); } } u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab, struct hal_srng *srng) { u32 *desc; lockdep_assert_held(&srng->lock); if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp) return NULL; desc = srng->ring_base_vaddr + srng->u.dst_ring.tp; srng->u.dst_ring.tp += srng->entry_size; /* wrap around to start of ring*/ if (srng->u.dst_ring.tp == srng->ring_size) srng->u.dst_ring.tp = 0; /* Try to prefetch the next descriptor in the ring */ if (srng->flags & HAL_SRNG_FLAGS_CACHED) ath11k_hal_srng_prefetch_desc(ab, srng); return desc; } int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng, bool sync_hw_ptr) { u32 tp, hp; lockdep_assert_held(&srng->lock); tp = srng->u.dst_ring.tp; if (sync_hw_ptr) { hp = *srng->u.dst_ring.hp_addr; srng->u.dst_ring.cached_hp = hp; } else { hp = srng->u.dst_ring.cached_hp; } if (hp >= tp) return (hp - tp) / srng->entry_size; else return (srng->ring_size - tp + hp) / srng->entry_size; } /* Returns number of available entries in src ring */ int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng, bool sync_hw_ptr) { u32 tp, hp; lockdep_assert_held(&srng->lock); hp = srng->u.src_ring.hp; if (sync_hw_ptr) { tp = *srng->u.src_ring.tp_addr; srng->u.src_ring.cached_tp = tp; } else { tp = srng->u.src_ring.cached_tp; } if (tp > hp) return ((tp - hp) / srng->entry_size) - 1; else return ((srng->ring_size - hp + tp) / srng->entry_size) - 1; } u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab, struct hal_srng *srng) { u32 *desc; u32 next_hp; lockdep_assert_held(&srng->lock); /* TODO: Using % is expensive, but we have to do this since size of some * SRNG rings is not power of 2 (due to descriptor sizes). Need to see * if separate function is defined for rings having power of 2 ring size * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the * overhead of % by using mask (with &). */ next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size; if (next_hp == srng->u.src_ring.cached_tp) return NULL; desc = srng->ring_base_vaddr + srng->u.src_ring.hp; srng->u.src_ring.hp = next_hp; /* TODO: Reap functionality is not used by all rings. If particular * ring does not use reap functionality, we need not update reap_hp * with next_hp pointer. Need to make sure a separate function is used * before doing any optimization by removing below code updating * reap_hp. */ srng->u.src_ring.reap_hp = next_hp; return desc; } u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab, struct hal_srng *srng) { u32 *desc; u32 next_reap_hp; lockdep_assert_held(&srng->lock); next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) % srng->ring_size; if (next_reap_hp == srng->u.src_ring.cached_tp) return NULL; desc = srng->ring_base_vaddr + next_reap_hp; srng->u.src_ring.reap_hp = next_reap_hp; return desc; } u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab, struct hal_srng *srng) { u32 *desc; lockdep_assert_held(&srng->lock); if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp) return NULL; desc = srng->ring_base_vaddr + srng->u.src_ring.hp; srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size; return desc; } u32 *ath11k_hal_srng_src_next_peek(struct ath11k_base *ab, struct hal_srng *srng) { u32 next_hp; lockdep_assert_held(&srng->lock); next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size; if (next_hp != srng->u.src_ring.cached_tp) return srng->ring_base_vaddr + next_hp; return NULL; } u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng) { lockdep_assert_held(&srng->lock); if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) == srng->u.src_ring.cached_tp) return NULL; return srng->ring_base_vaddr + srng->u.src_ring.hp; } void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng) { + u32 hp; + lockdep_assert_held(&srng->lock); if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.cached_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; } else { - srng->u.dst_ring.cached_hp = READ_ONCE(*srng->u.dst_ring.hp_addr); + hp = READ_ONCE(*srng->u.dst_ring.hp_addr); + + if (hp != srng->u.dst_ring.cached_hp) { + srng->u.dst_ring.cached_hp = hp; + /* Make sure descriptor is read after the head + * pointer. + */ + dma_rmb(); + } /* Try to prefetch the next descriptor in the ring */ if (srng->flags & HAL_SRNG_FLAGS_CACHED) ath11k_hal_srng_prefetch_desc(ab, srng); } } /* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin() * should have been called before this. */ void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng) { lockdep_assert_held(&srng->lock); - /* TODO: See if we need a write memory barrier here */ if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) { /* For LMAC rings, ring pointer updates are done through FW and * hence written to a shared memory location that is read by FW */ if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.last_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; - *srng->u.src_ring.hp_addr = srng->u.src_ring.hp; + /* Make sure descriptor is written before updating the + * head pointer. + */ + dma_wmb(); + WRITE_ONCE(*srng->u.src_ring.hp_addr, srng->u.src_ring.hp); } else { srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; - *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp; + /* Make sure descriptor is read before updating the + * tail pointer. + */ + dma_mb(); + WRITE_ONCE(*srng->u.dst_ring.tp_addr, srng->u.dst_ring.tp); } } else { if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.last_tp = *(volatile u32 *)srng->u.src_ring.tp_addr; + /* Assume implementation use an MMIO write accessor + * which has the required wmb() so that the descriptor + * is written before the updating the head pointer. + */ ath11k_hif_write32(ab, (unsigned long)srng->u.src_ring.hp_addr - (unsigned long)ab->mem, srng->u.src_ring.hp); } else { srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr; + /* Make sure descriptor is read before updating the + * tail pointer. + */ + mb(); ath11k_hif_write32(ab, (unsigned long)srng->u.dst_ring.tp_addr - (unsigned long)ab->mem, srng->u.dst_ring.tp); } } srng->timestamp = jiffies; } void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab, struct hal_wbm_idle_scatter_list *sbuf, u32 nsbufs, u32 tot_link_desc, u32 end_offset) { struct ath11k_buffer_addr *link_addr; int i; u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64; link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE; for (i = 1; i < nsbufs; i++) { link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK; link_addr->info1 = FIELD_PREP( HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32, (u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) | FIELD_PREP( HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG, BASE_ADDR_MATCH_TAG_VAL); link_addr = (void *)sbuf[i].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE; } ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR, FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) | FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1)); ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR, FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST, reg_scatter_buf_sz * nsbufs)); ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_RING_BASE_LSB, FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK)); ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_RING_BASE_MSB, FIELD_PREP( HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32, (u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) | FIELD_PREP( HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG, BASE_ADDR_MATCH_TAG_VAL)); /* Setup head and tail pointers for the idle list */ ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0, FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[nsbufs - 1].paddr)); ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1, FIELD_PREP( HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32, ((u64)sbuf[nsbufs - 1].paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1, (end_offset >> 2))); ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0, FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr)); ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0, FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, sbuf[0].paddr)); ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1, FIELD_PREP( HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32, ((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) | FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1, 0)); ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR, 2 * tot_link_desc); /* Enable the SRNG */ ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab), 0x40); } int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, int ring_num, int mac_id, struct hal_srng_params *params) { struct ath11k_hal *hal = &ab->hal; struct hal_srng_config *srng_config = &ab->hal.srng_config[type]; struct hal_srng *srng; int ring_id; u32 lmac_idx; int i; u32 reg_base; ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id); if (ring_id < 0) return ring_id; srng = &hal->srng_list[ring_id]; srng->ring_id = ring_id; srng->ring_dir = srng_config->ring_dir; srng->ring_base_paddr = params->ring_base_paddr; srng->ring_base_vaddr = params->ring_base_vaddr; srng->entry_size = srng_config->entry_size; srng->num_entries = params->num_entries; srng->ring_size = srng->entry_size * srng->num_entries; srng->intr_batch_cntr_thres_entries = params->intr_batch_cntr_thres_entries; srng->intr_timer_thres_us = params->intr_timer_thres_us; srng->flags = params->flags; srng->msi_addr = params->msi_addr; srng->msi_data = params->msi_data; srng->initialized = 1; spin_lock_init(&srng->lock); lockdep_set_class(&srng->lock, hal->srng_key + ring_id); for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) { srng->hwreg_base[i] = srng_config->reg_start[i] + (ring_num * srng_config->reg_size[i]); } memset(srng->ring_base_vaddr, 0, (srng->entry_size * srng->num_entries) << 2); /* TODO: Add comments on these swap configurations */ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP | HAL_SRNG_FLAGS_RING_PTR_SWAP; reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2]; if (srng->ring_dir == HAL_SRNG_DIR_SRC) { srng->u.src_ring.hp = 0; srng->u.src_ring.cached_tp = 0; srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size; srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id); srng->u.src_ring.low_threshold = params->low_threshold * srng->entry_size; if (srng_config->lmac_ring) { lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START; srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr + lmac_idx); srng->flags |= HAL_SRNG_FLAGS_LMAC_RING; } else { if (!ab->hw_params.supports_shadow_regs) srng->u.src_ring.hp_addr = (u32 *)((unsigned long)ab->mem + reg_base); else ath11k_dbg(ab, ATH11K_DBG_HAL, "type %d ring_num %d reg_base 0x%x shadow 0x%lx\n", type, ring_num, reg_base, (unsigned long)srng->u.src_ring.hp_addr - (unsigned long)ab->mem); } } else { /* During initialization loop count in all the descriptors * will be set to zero, and HW will set it to 1 on completing * descriptor update in first loop, and increments it by 1 on * subsequent loops (loop count wraps around after reaching * 0xffff). The 'loop_cnt' in SW ring state is the expected * loop count in descriptors updated by HW (to be processed * by SW). */ srng->u.dst_ring.loop_cnt = 1; srng->u.dst_ring.tp = 0; srng->u.dst_ring.cached_hp = 0; srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id); if (srng_config->lmac_ring) { /* For LMAC rings, tail pointer updates will be done * through FW by writing to a shared memory location */ lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START; srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr + lmac_idx); srng->flags |= HAL_SRNG_FLAGS_LMAC_RING; } else { if (!ab->hw_params.supports_shadow_regs) srng->u.dst_ring.tp_addr = (u32 *)((unsigned long)ab->mem + reg_base + (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab))); else ath11k_dbg(ab, ATH11K_DBG_HAL, "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n", type, ring_num, reg_base + (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab)), (unsigned long)srng->u.dst_ring.tp_addr - (unsigned long)ab->mem); } } if (srng_config->lmac_ring) return ring_id; ath11k_hal_srng_hw_init(ab, srng); if (type == HAL_CE_DST) { srng->u.dst_ring.max_buffer_length = params->max_buffer_len; ath11k_hal_ce_dst_setup(ab, srng, ring_num); } return ring_id; } static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab, int shadow_cfg_idx, enum hal_ring_type ring_type, int ring_num) { struct hal_srng *srng; struct ath11k_hal *hal = &ab->hal; int ring_id; struct hal_srng_config *srng_config = &hal->srng_config[ring_type]; ring_id = ath11k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0); if (ring_id < 0) return; srng = &hal->srng_list[ring_id]; if (srng_config->ring_dir == HAL_SRNG_DIR_DST) srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) + (unsigned long)ab->mem); else srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) + (unsigned long)ab->mem); } int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab, enum hal_ring_type ring_type, int ring_num) { struct ath11k_hal *hal = &ab->hal; struct hal_srng_config *srng_config = &hal->srng_config[ring_type]; int shadow_cfg_idx = hal->num_shadow_reg_configured; u32 target_reg; if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS) return -EINVAL; hal->num_shadow_reg_configured++; target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START]; target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] * ring_num; /* For destination ring, shadow the TP */ if (srng_config->ring_dir == HAL_SRNG_DIR_DST) target_reg += HAL_OFFSET_FROM_HP_TO_TP; hal->shadow_reg_addr[shadow_cfg_idx] = target_reg; /* update hp/tp addr to hal structure*/ ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type, ring_num); ath11k_dbg(ab, ATH11K_DBG_HAL, "update shadow config target_reg %x shadow reg 0x%x shadow_idx 0x%x ring_type %d ring num %d", target_reg, HAL_SHADOW_REG(ab, shadow_cfg_idx), shadow_cfg_idx, ring_type, ring_num); return 0; } void ath11k_hal_srng_shadow_config(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; int ring_type, ring_num; /* update all the non-CE srngs. */ for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) { struct hal_srng_config *srng_config = &hal->srng_config[ring_type]; if (ring_type == HAL_CE_SRC || ring_type == HAL_CE_DST || ring_type == HAL_CE_DST_STATUS) continue; if (srng_config->lmac_ring) continue; for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++) ath11k_hal_srng_update_shadow_config(ab, ring_type, ring_num); } } void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab, u32 **cfg, u32 *len) { struct ath11k_hal *hal = &ab->hal; *len = hal->num_shadow_reg_configured; *cfg = hal->shadow_reg_addr; } void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab, struct hal_srng *srng) { lockdep_assert_held(&srng->lock); /* check whether the ring is empty. Update the shadow * HP only when then ring isn't empty. */ if (srng->ring_dir == HAL_SRNG_DIR_SRC && *srng->u.src_ring.tp_addr != srng->u.src_ring.hp) ath11k_hal_srng_access_end(ab, srng); } static int ath11k_hal_srng_create_config(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; struct hal_srng_config *s; hal->srng_config = kmemdup(hw_srng_config_template, sizeof(hw_srng_config_template), GFP_KERNEL); if (!hal->srng_config) return -ENOMEM; s = &hal->srng_config[HAL_REO_DST]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(ab); s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab); s->reg_size[1] = HAL_REO2_RING_HP(ab) - HAL_REO1_RING_HP(ab); s = &hal->srng_config[HAL_REO_EXCEPTION]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab); s = &hal->srng_config[HAL_REO_REINJECT]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(ab); s = &hal->srng_config[HAL_REO_CMD]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(ab); s = &hal->srng_config[HAL_REO_STATUS]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(ab); s = &hal->srng_config[HAL_TCL_DATA]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP; s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab); s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP; s = &hal->srng_config[HAL_TCL_CMD]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP; s = &hal->srng_config[HAL_TCL_STATUS]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP; s = &hal->srng_config[HAL_CE_SRC]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB + ATH11K_CE_OFFSET(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP + ATH11K_CE_OFFSET(ab); s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) - HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab); s = &hal->srng_config[HAL_CE_DST]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB + ATH11K_CE_OFFSET(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP + ATH11K_CE_OFFSET(ab); s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); s = &hal->srng_config[HAL_CE_DST_STATUS]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_BASE_LSB + ATH11K_CE_OFFSET(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP + ATH11K_CE_OFFSET(ab); s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) - HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab); s = &hal->srng_config[HAL_WBM_IDLE_LINK]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP; s = &hal->srng_config[HAL_SW2WBM_RELEASE]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP; s = &hal->srng_config[HAL_WBM2SW_RELEASE]; s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab); s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP; s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) - HAL_WBM0_RELEASE_RING_BASE_LSB(ab); s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP; return 0; } static void ath11k_hal_register_srng_key(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; u32 ring_id; for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++) lockdep_register_key(hal->srng_key + ring_id); } static void ath11k_hal_unregister_srng_key(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; u32 ring_id; for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++) lockdep_unregister_key(hal->srng_key + ring_id); } int ath11k_hal_srng_init(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; int ret; memset(hal, 0, sizeof(*hal)); ret = ath11k_hal_srng_create_config(ab); if (ret) goto err_hal; ret = ath11k_hal_alloc_cont_rdp(ab); if (ret) goto err_hal; ret = ath11k_hal_alloc_cont_wrp(ab); if (ret) goto err_free_cont_rdp; ath11k_hal_register_srng_key(ab); return 0; err_free_cont_rdp: ath11k_hal_free_cont_rdp(ab); err_hal: return ret; } EXPORT_SYMBOL(ath11k_hal_srng_init); void ath11k_hal_srng_deinit(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; + int i; + + for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) + ab->hal.srng_list[i].initialized = 0; ath11k_hal_unregister_srng_key(ab); ath11k_hal_free_cont_rdp(ab); ath11k_hal_free_cont_wrp(ab); kfree(hal->srng_config); hal->srng_config = NULL; } EXPORT_SYMBOL(ath11k_hal_srng_deinit); void ath11k_hal_dump_srng_stats(struct ath11k_base *ab) { struct hal_srng *srng; struct ath11k_ext_irq_grp *irq_grp; struct ath11k_ce_pipe *ce_pipe; int i; ath11k_err(ab, "Last interrupt received for each CE:\n"); for (i = 0; i < ab->hw_params.ce_count; i++) { ce_pipe = &ab->ce.ce_pipe[i]; if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n", i, ce_pipe->pipe_num, jiffies_to_msecs(jiffies - ce_pipe->timestamp)); } ath11k_err(ab, "\nLast interrupt received for each group:\n"); for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { irq_grp = &ab->ext_irq_grp[i]; ath11k_err(ab, "group_id %d %ums before\n", irq_grp->grp_id, jiffies_to_msecs(jiffies - irq_grp->timestamp)); } for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) { srng = &ab->hal.srng_list[i]; if (!srng->initialized) continue; if (srng->ring_dir == HAL_SRNG_DIR_SRC) ath11k_err(ab, "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n", srng->ring_id, srng->u.src_ring.hp, srng->u.src_ring.reap_hp, *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp, srng->u.src_ring.last_tp, jiffies_to_msecs(jiffies - srng->timestamp)); else if (srng->ring_dir == HAL_SRNG_DIR_DST) ath11k_err(ab, "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n", srng->ring_id, srng->u.dst_ring.tp, *srng->u.dst_ring.hp_addr, srng->u.dst_ring.cached_hp, srng->u.dst_ring.last_hp, jiffies_to_msecs(jiffies - srng->timestamp)); } } diff --git a/htc.c b/htc.c index 23054ab29a5e..4571d01cc33d 100644 --- a/htc.c +++ b/htc.c @@ -1,845 +1,845 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include #include "debug.h" #include "hif.h" struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ab, int size) { struct sk_buff *skb; skb = dev_alloc_skb(size + sizeof(struct ath11k_htc_hdr)); if (!skb) return NULL; skb_reserve(skb, sizeof(struct ath11k_htc_hdr)); /* FW/HTC requires 4-byte aligned streams */ if (!IS_ALIGNED((unsigned long)skb->data, 4)) ath11k_warn(ab, "Unaligned HTC tx skb\n"); return skb; } static void ath11k_htc_control_tx_complete(struct ath11k_base *ab, struct sk_buff *skb) { kfree_skb(skb); } static struct sk_buff *ath11k_htc_build_tx_ctrl_skb(void *ab) { struct sk_buff *skb; struct ath11k_skb_cb *skb_cb; skb = dev_alloc_skb(ATH11K_HTC_CONTROL_BUFFER_SIZE); if (!skb) return NULL; skb_reserve(skb, sizeof(struct ath11k_htc_hdr)); WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4)); skb_cb = ATH11K_SKB_CB(skb); memset(skb_cb, 0, sizeof(*skb_cb)); return skb; } static void ath11k_htc_prepare_tx_skb(struct ath11k_htc_ep *ep, struct sk_buff *skb) { struct ath11k_htc_hdr *hdr; hdr = (struct ath11k_htc_hdr *)skb->data; memset(hdr, 0, sizeof(*hdr)); hdr->htc_info = FIELD_PREP(HTC_HDR_ENDPOINTID, ep->eid) | FIELD_PREP(HTC_HDR_PAYLOADLEN, (skb->len - sizeof(*hdr))); if (ep->tx_credit_flow_enabled) hdr->htc_info |= FIELD_PREP(HTC_HDR_FLAGS, ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE); spin_lock_bh(&ep->htc->tx_lock); hdr->ctrl_info = FIELD_PREP(HTC_HDR_CONTROLBYTES1, ep->seq_no++); spin_unlock_bh(&ep->htc->tx_lock); } int ath11k_htc_send(struct ath11k_htc *htc, enum ath11k_htc_ep_id eid, struct sk_buff *skb) { struct ath11k_htc_ep *ep = &htc->endpoint[eid]; struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); struct device *dev = htc->ab->dev; struct ath11k_base *ab = htc->ab; int credits = 0; int ret; bool credit_flow_enabled = (ab->hw_params.credit_flow && ep->tx_credit_flow_enabled); if (eid >= ATH11K_HTC_EP_COUNT) { ath11k_warn(ab, "Invalid endpoint id: %d\n", eid); return -ENOENT; } skb_push(skb, sizeof(struct ath11k_htc_hdr)); if (credit_flow_enabled) { credits = DIV_ROUND_UP(skb->len, htc->target_credit_size); spin_lock_bh(&htc->tx_lock); if (ep->tx_credits < credits) { ath11k_dbg(ab, ATH11K_DBG_HTC, "ep %d insufficient credits required %d total %d\n", eid, credits, ep->tx_credits); spin_unlock_bh(&htc->tx_lock); ret = -EAGAIN; goto err_pull; } ep->tx_credits -= credits; ath11k_dbg(ab, ATH11K_DBG_HTC, "ep %d credits consumed %d total %d\n", eid, credits, ep->tx_credits); spin_unlock_bh(&htc->tx_lock); } ath11k_htc_prepare_tx_skb(ep, skb); skb_cb->eid = eid; skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE); ret = dma_mapping_error(dev, skb_cb->paddr); if (ret) { ret = -EIO; goto err_credits; } ath11k_dbg(ab, ATH11K_DBG_HTC, "tx skb %p eid %d paddr %pad\n", skb, skb_cb->eid, &skb_cb->paddr); ret = ath11k_ce_send(htc->ab, skb, ep->ul_pipe_id, ep->eid); if (ret) goto err_unmap; return 0; err_unmap: dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); err_credits: if (credit_flow_enabled) { spin_lock_bh(&htc->tx_lock); ep->tx_credits += credits; ath11k_dbg(ab, ATH11K_DBG_HTC, "ep %d credits reverted %d total %d\n", eid, credits, ep->tx_credits); spin_unlock_bh(&htc->tx_lock); if (ep->ep_ops.ep_tx_credits) ep->ep_ops.ep_tx_credits(htc->ab); } err_pull: skb_pull(skb, sizeof(struct ath11k_htc_hdr)); return ret; } static void ath11k_htc_process_credit_report(struct ath11k_htc *htc, const struct ath11k_htc_credit_report *report, int len, enum ath11k_htc_ep_id eid) { struct ath11k_base *ab = htc->ab; struct ath11k_htc_ep *ep; int i, n_reports; if (len % sizeof(*report)) ath11k_warn(ab, "Uneven credit report len %d", len); n_reports = len / sizeof(*report); spin_lock_bh(&htc->tx_lock); for (i = 0; i < n_reports; i++, report++) { if (report->eid >= ATH11K_HTC_EP_COUNT) break; ep = &htc->endpoint[report->eid]; ep->tx_credits += report->credits; ath11k_dbg(ab, ATH11K_DBG_HTC, "ep %d credits got %d total %d\n", report->eid, report->credits, ep->tx_credits); if (ep->ep_ops.ep_tx_credits) { spin_unlock_bh(&htc->tx_lock); ep->ep_ops.ep_tx_credits(htc->ab); spin_lock_bh(&htc->tx_lock); } } spin_unlock_bh(&htc->tx_lock); } static int ath11k_htc_process_trailer(struct ath11k_htc *htc, u8 *buffer, int length, enum ath11k_htc_ep_id src_eid) { struct ath11k_base *ab = htc->ab; int status = 0; struct ath11k_htc_record *record; size_t len; while (length > 0) { record = (struct ath11k_htc_record *)buffer; if (length < sizeof(record->hdr)) { status = -EINVAL; break; } if (record->hdr.len > length) { /* no room left in buffer for record */ ath11k_warn(ab, "Invalid record length: %d\n", record->hdr.len); status = -EINVAL; break; } if (ab->hw_params.credit_flow) { switch (record->hdr.id) { case ATH11K_HTC_RECORD_CREDITS: len = sizeof(struct ath11k_htc_credit_report); if (record->hdr.len < len) { ath11k_warn(ab, "Credit report too long\n"); status = -EINVAL; break; } ath11k_htc_process_credit_report(htc, record->credit_report, record->hdr.len, src_eid); break; default: ath11k_warn(ab, "Unhandled record: id:%d length:%d\n", record->hdr.id, record->hdr.len); break; } } if (status) break; /* multiple records may be present in a trailer */ buffer += sizeof(record->hdr) + record->hdr.len; length -= sizeof(record->hdr) + record->hdr.len; } return status; } static void ath11k_htc_suspend_complete(struct ath11k_base *ab, bool ack) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "suspend complete %d\n", ack); if (ack) set_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags); else clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags); complete(&ab->htc_suspend); } void ath11k_htc_tx_completion_handler(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k_htc *htc = &ab->htc; struct ath11k_htc_ep *ep; void (*ep_tx_complete)(struct ath11k_base *, struct sk_buff *); u8 eid; eid = ATH11K_SKB_CB(skb)->eid; if (eid >= ATH11K_HTC_EP_COUNT) { dev_kfree_skb_any(skb); return; } ep = &htc->endpoint[eid]; spin_lock_bh(&htc->tx_lock); ep_tx_complete = ep->ep_ops.ep_tx_complete; spin_unlock_bh(&htc->tx_lock); if (!ep_tx_complete) { dev_kfree_skb_any(skb); return; } ep_tx_complete(htc->ab, skb); } static void ath11k_htc_wakeup_from_suspend(struct ath11k_base *ab) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "wakeup from suspend is received\n"); } void ath11k_htc_rx_completion_handler(struct ath11k_base *ab, struct sk_buff *skb) { int status = 0; struct ath11k_htc *htc = &ab->htc; struct ath11k_htc_hdr *hdr; struct ath11k_htc_ep *ep; u16 payload_len; u32 message_id, trailer_len = 0; size_t min_len; u8 eid; bool trailer_present; hdr = (struct ath11k_htc_hdr *)skb->data; skb_pull(skb, sizeof(*hdr)); eid = FIELD_GET(HTC_HDR_ENDPOINTID, hdr->htc_info); if (eid >= ATH11K_HTC_EP_COUNT) { ath11k_warn(ab, "HTC Rx: invalid eid %d\n", eid); goto out; } ep = &htc->endpoint[eid]; payload_len = FIELD_GET(HTC_HDR_PAYLOADLEN, hdr->htc_info); if (payload_len + sizeof(*hdr) > ATH11K_HTC_MAX_LEN) { ath11k_warn(ab, "HTC rx frame too long, len: %zu\n", payload_len + sizeof(*hdr)); goto out; } if (skb->len < payload_len) { ath11k_warn(ab, "HTC Rx: insufficient length, got %d, expected %d\n", skb->len, payload_len); goto out; } /* get flags to check for trailer */ trailer_present = (FIELD_GET(HTC_HDR_FLAGS, hdr->htc_info)) & ATH11K_HTC_FLAG_TRAILER_PRESENT; ath11k_dbg(ab, ATH11K_DBG_HTC, "rx ep %d skb %p trailer_present %d\n", eid, skb, trailer_present); if (trailer_present) { u8 *trailer; trailer_len = FIELD_GET(HTC_HDR_CONTROLBYTES0, hdr->ctrl_info); min_len = sizeof(struct ath11k_htc_record_hdr); if ((trailer_len < min_len) || (trailer_len > payload_len)) { ath11k_warn(ab, "Invalid trailer length: %d\n", trailer_len); goto out; } trailer = (u8 *)hdr; trailer += sizeof(*hdr); trailer += payload_len; trailer -= trailer_len; status = ath11k_htc_process_trailer(htc, trailer, trailer_len, eid); if (status) goto out; skb_trim(skb, skb->len - trailer_len); } if (trailer_len >= payload_len) /* zero length packet with trailer data, just drop these */ goto out; if (eid == ATH11K_HTC_EP_0) { struct ath11k_htc_msg *msg = (struct ath11k_htc_msg *)skb->data; message_id = FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id); ath11k_dbg(ab, ATH11K_DBG_HTC, "rx ep %d skb %p message_id %d\n", eid, skb, message_id); switch (message_id) { case ATH11K_HTC_MSG_READY_ID: case ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID: /* handle HTC control message */ if (completion_done(&htc->ctl_resp)) { /* this is a fatal error, target should not be * sending unsolicited messages on the ep 0 */ ath11k_warn(ab, "HTC rx ctrl still processing\n"); complete(&htc->ctl_resp); goto out; } htc->control_resp_len = min_t(int, skb->len, ATH11K_HTC_MAX_CTRL_MSG_LEN); memcpy(htc->control_resp_buffer, skb->data, htc->control_resp_len); complete(&htc->ctl_resp); break; case ATH11K_HTC_MSG_SEND_SUSPEND_COMPLETE: ath11k_htc_suspend_complete(ab, true); break; case ATH11K_HTC_MSG_NACK_SUSPEND: ath11k_htc_suspend_complete(ab, false); break; case ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID: ath11k_htc_wakeup_from_suspend(ab); break; default: ath11k_warn(ab, "ignoring unsolicited htc ep0 event %ld\n", FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id)); break; } goto out; } ep->ep_ops.ep_rx_complete(ab, skb); /* poll tx completion for interrupt disabled CE's */ ath11k_ce_poll_send_completed(ab, ep->ul_pipe_id); /* skb is now owned by the rx completion handler */ skb = NULL; out: kfree_skb(skb); } static void ath11k_htc_control_rx_complete(struct ath11k_base *ab, struct sk_buff *skb) { /* This is unexpected. FW is not supposed to send regular rx on this * endpoint. */ ath11k_warn(ab, "unexpected htc rx\n"); kfree_skb(skb); } static const char *htc_service_name(enum ath11k_htc_svc_id id) { switch (id) { case ATH11K_HTC_SVC_ID_RESERVED: return "Reserved"; case ATH11K_HTC_SVC_ID_RSVD_CTRL: return "Control"; case ATH11K_HTC_SVC_ID_WMI_CONTROL: return "WMI"; case ATH11K_HTC_SVC_ID_WMI_DATA_BE: return "DATA BE"; case ATH11K_HTC_SVC_ID_WMI_DATA_BK: return "DATA BK"; case ATH11K_HTC_SVC_ID_WMI_DATA_VI: return "DATA VI"; case ATH11K_HTC_SVC_ID_WMI_DATA_VO: return "DATA VO"; case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1: return "WMI MAC1"; case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2: return "WMI MAC2"; case ATH11K_HTC_SVC_ID_NMI_CONTROL: return "NMI Control"; case ATH11K_HTC_SVC_ID_NMI_DATA: return "NMI Data"; case ATH11K_HTC_SVC_ID_HTT_DATA_MSG: return "HTT Data"; case ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS: return "RAW"; case ATH11K_HTC_SVC_ID_IPA_TX: return "IPA TX"; case ATH11K_HTC_SVC_ID_PKT_LOG: return "PKT LOG"; } return "Unknown"; } static void ath11k_htc_reset_endpoint_states(struct ath11k_htc *htc) { struct ath11k_htc_ep *ep; int i; for (i = ATH11K_HTC_EP_0; i < ATH11K_HTC_EP_COUNT; i++) { ep = &htc->endpoint[i]; ep->service_id = ATH11K_HTC_SVC_ID_UNUSED; ep->max_ep_message_len = 0; ep->max_tx_queue_depth = 0; ep->eid = i; ep->htc = htc; ep->tx_credit_flow_enabled = true; } } static u8 ath11k_htc_get_credit_allocation(struct ath11k_htc *htc, u16 service_id) { u8 i, allocation = 0; for (i = 0; i < ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) { if (htc->service_alloc_table[i].service_id == service_id) { allocation = htc->service_alloc_table[i].credit_allocation; } } return allocation; } static int ath11k_htc_setup_target_buffer_assignments(struct ath11k_htc *htc) { struct ath11k_htc_svc_tx_credits *serv_entry; - u32 svc_id[] = { + static const u32 svc_id[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL, ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1, ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2, }; int i, credits; credits = htc->total_transmit_credits; serv_entry = htc->service_alloc_table; if ((htc->wmi_ep_count == 0) || (htc->wmi_ep_count > ARRAY_SIZE(svc_id))) return -EINVAL; /* Divide credits among number of endpoints for WMI */ credits = credits / htc->wmi_ep_count; for (i = 0; i < htc->wmi_ep_count; i++) { serv_entry[i].service_id = svc_id[i]; serv_entry[i].credit_allocation = credits; } return 0; } int ath11k_htc_wait_target(struct ath11k_htc *htc) { int i, status = 0; struct ath11k_base *ab = htc->ab; unsigned long time_left; struct ath11k_htc_ready *ready; u16 message_id; u16 credit_count; u16 credit_size; time_left = wait_for_completion_timeout(&htc->ctl_resp, ATH11K_HTC_WAIT_TIMEOUT_HZ); if (!time_left) { ath11k_warn(ab, "failed to receive control response completion, polling..\n"); for (i = 0; i < ab->hw_params.ce_count; i++) ath11k_ce_per_engine_service(htc->ab, i); time_left = wait_for_completion_timeout(&htc->ctl_resp, ATH11K_HTC_WAIT_TIMEOUT_HZ); if (!time_left) status = -ETIMEDOUT; } if (status < 0) { ath11k_warn(ab, "ctl_resp never came in (%d)\n", status); return status; } if (htc->control_resp_len < sizeof(*ready)) { ath11k_warn(ab, "Invalid HTC ready msg len:%d\n", htc->control_resp_len); return -ECOMM; } ready = (struct ath11k_htc_ready *)htc->control_resp_buffer; message_id = FIELD_GET(HTC_MSG_MESSAGEID, ready->id_credit_count); credit_count = FIELD_GET(HTC_READY_MSG_CREDITCOUNT, ready->id_credit_count); credit_size = FIELD_GET(HTC_READY_MSG_CREDITSIZE, ready->size_ep); if (message_id != ATH11K_HTC_MSG_READY_ID) { ath11k_warn(ab, "Invalid HTC ready msg: 0x%x\n", message_id); return -ECOMM; } htc->total_transmit_credits = credit_count; htc->target_credit_size = credit_size; ath11k_dbg(ab, ATH11K_DBG_HTC, "target ready total_transmit_credits %d target_credit_size %d\n", htc->total_transmit_credits, htc->target_credit_size); if ((htc->total_transmit_credits == 0) || (htc->target_credit_size == 0)) { ath11k_warn(ab, "Invalid credit size received\n"); return -ECOMM; } /* For QCA6390, wmi endpoint uses 1 credit to avoid * back-to-back write. */ if (ab->hw_params.supports_shadow_regs) htc->total_transmit_credits = 1; ath11k_htc_setup_target_buffer_assignments(htc); return 0; } int ath11k_htc_connect_service(struct ath11k_htc *htc, struct ath11k_htc_svc_conn_req *conn_req, struct ath11k_htc_svc_conn_resp *conn_resp) { struct ath11k_base *ab = htc->ab; struct ath11k_htc_conn_svc *req_msg; struct ath11k_htc_conn_svc_resp resp_msg_dummy; struct ath11k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy; enum ath11k_htc_ep_id assigned_eid = ATH11K_HTC_EP_COUNT; struct ath11k_htc_ep *ep; struct sk_buff *skb; unsigned int max_msg_size = 0; int length, status; unsigned long time_left; bool disable_credit_flow_ctrl = false; u16 message_id, service_id, flags = 0; u8 tx_alloc = 0; /* special case for HTC pseudo control service */ if (conn_req->service_id == ATH11K_HTC_SVC_ID_RSVD_CTRL) { disable_credit_flow_ctrl = true; assigned_eid = ATH11K_HTC_EP_0; max_msg_size = ATH11K_HTC_MAX_CTRL_MSG_LEN; memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy)); goto setup; } tx_alloc = ath11k_htc_get_credit_allocation(htc, conn_req->service_id); if (!tx_alloc) ath11k_dbg(ab, ATH11K_DBG_BOOT, "htc service %s does not allocate target credits\n", htc_service_name(conn_req->service_id)); skb = ath11k_htc_build_tx_ctrl_skb(htc->ab); if (!skb) { ath11k_warn(ab, "Failed to allocate HTC packet\n"); return -ENOMEM; } length = sizeof(*req_msg); skb_put(skb, length); memset(skb->data, 0, length); req_msg = (struct ath11k_htc_conn_svc *)skb->data; req_msg->msg_svc_id = FIELD_PREP(HTC_MSG_MESSAGEID, ATH11K_HTC_MSG_CONNECT_SERVICE_ID); flags |= FIELD_PREP(ATH11K_HTC_CONN_FLAGS_RECV_ALLOC, tx_alloc); /* Only enable credit flow control for WMI ctrl service */ if (!(conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL || conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 || conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2)) { flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL; disable_credit_flow_ctrl = true; } if (!ab->hw_params.credit_flow) { flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL; disable_credit_flow_ctrl = true; } req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags); req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID, conn_req->service_id); reinit_completion(&htc->ctl_resp); status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb); if (status) { kfree_skb(skb); return status; } /* wait for response */ time_left = wait_for_completion_timeout(&htc->ctl_resp, ATH11K_HTC_CONN_SVC_TIMEOUT_HZ); if (!time_left) { ath11k_err(ab, "Service connect timeout\n"); return -ETIMEDOUT; } /* we controlled the buffer creation, it's aligned */ resp_msg = (struct ath11k_htc_conn_svc_resp *)htc->control_resp_buffer; message_id = FIELD_GET(HTC_MSG_MESSAGEID, resp_msg->msg_svc_id); service_id = FIELD_GET(HTC_SVC_RESP_MSG_SERVICEID, resp_msg->msg_svc_id); if ((message_id != ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID) || (htc->control_resp_len < sizeof(*resp_msg))) { ath11k_err(ab, "Invalid resp message ID 0x%x", message_id); return -EPROTO; } ath11k_dbg(ab, ATH11K_DBG_HTC, "service %s connect response status 0x%lx assigned ep 0x%lx\n", htc_service_name(service_id), FIELD_GET(HTC_SVC_RESP_MSG_STATUS, resp_msg->flags_len), FIELD_GET(HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len)); conn_resp->connect_resp_code = FIELD_GET(HTC_SVC_RESP_MSG_STATUS, resp_msg->flags_len); /* check response status */ if (conn_resp->connect_resp_code != ATH11K_HTC_CONN_SVC_STATUS_SUCCESS) { ath11k_err(ab, "HTC Service %s connect request failed: 0x%x)\n", htc_service_name(service_id), conn_resp->connect_resp_code); return -EPROTO; } assigned_eid = (enum ath11k_htc_ep_id)FIELD_GET( HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len); max_msg_size = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE, resp_msg->flags_len); setup: if (assigned_eid >= ATH11K_HTC_EP_COUNT) return -EPROTO; if (max_msg_size == 0) return -EPROTO; ep = &htc->endpoint[assigned_eid]; ep->eid = assigned_eid; if (ep->service_id != ATH11K_HTC_SVC_ID_UNUSED) return -EPROTO; /* return assigned endpoint to caller */ conn_resp->eid = assigned_eid; conn_resp->max_msg_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE, resp_msg->flags_len); /* setup the endpoint */ ep->service_id = conn_req->service_id; ep->max_tx_queue_depth = conn_req->max_send_queue_depth; ep->max_ep_message_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE, resp_msg->flags_len); ep->tx_credits = tx_alloc; /* copy all the callbacks */ ep->ep_ops = conn_req->ep_ops; status = ath11k_hif_map_service_to_pipe(htc->ab, ep->service_id, &ep->ul_pipe_id, &ep->dl_pipe_id); if (status) return status; ath11k_dbg(ab, ATH11K_DBG_BOOT, "htc service '%s' ul pipe %d dl pipe %d eid %d ready\n", htc_service_name(ep->service_id), ep->ul_pipe_id, ep->dl_pipe_id, ep->eid); if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) { ep->tx_credit_flow_enabled = false; ath11k_dbg(ab, ATH11K_DBG_BOOT, "htc service '%s' eid %d tx flow control disabled\n", htc_service_name(ep->service_id), assigned_eid); } return status; } int ath11k_htc_start(struct ath11k_htc *htc) { struct sk_buff *skb; int status = 0; struct ath11k_base *ab = htc->ab; struct ath11k_htc_setup_complete_extended *msg; skb = ath11k_htc_build_tx_ctrl_skb(htc->ab); if (!skb) return -ENOMEM; skb_put(skb, sizeof(*msg)); memset(skb->data, 0, skb->len); msg = (struct ath11k_htc_setup_complete_extended *)skb->data; msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID, ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID); if (ab->hw_params.credit_flow) ath11k_dbg(ab, ATH11K_DBG_HTC, "using tx credit flow control\n"); else msg->flags |= ATH11K_GLOBAL_DISABLE_CREDIT_FLOW; status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb); if (status) { kfree_skb(skb); return status; } return 0; } int ath11k_htc_init(struct ath11k_base *ab) { struct ath11k_htc *htc = &ab->htc; struct ath11k_htc_svc_conn_req conn_req; struct ath11k_htc_svc_conn_resp conn_resp; int ret; spin_lock_init(&htc->tx_lock); ath11k_htc_reset_endpoint_states(htc); htc->ab = ab; switch (ab->wmi_ab.preferred_hw_mode) { case WMI_HOST_HW_MODE_SINGLE: htc->wmi_ep_count = 1; break; case WMI_HOST_HW_MODE_DBS: case WMI_HOST_HW_MODE_DBS_OR_SBS: htc->wmi_ep_count = 2; break; case WMI_HOST_HW_MODE_DBS_SBS: htc->wmi_ep_count = 3; break; default: htc->wmi_ep_count = ab->hw_params.max_radios; break; } /* setup our pseudo HTC control endpoint connection */ memset(&conn_req, 0, sizeof(conn_req)); memset(&conn_resp, 0, sizeof(conn_resp)); conn_req.ep_ops.ep_tx_complete = ath11k_htc_control_tx_complete; conn_req.ep_ops.ep_rx_complete = ath11k_htc_control_rx_complete; conn_req.max_send_queue_depth = ATH11K_NUM_CONTROL_TX_BUFFERS; conn_req.service_id = ATH11K_HTC_SVC_ID_RSVD_CTRL; /* connect fake service */ ret = ath11k_htc_connect_service(htc, &conn_req, &conn_resp); if (ret) { ath11k_err(ab, "could not connect to htc service (%d)\n", ret); return ret; } init_completion(&htc->ctl_resp); return 0; } diff --git a/mac.c b/mac.c index 13301ca317a5..106e2530b64e 100644 --- a/mac.c +++ b/mac.c @@ -1,10599 +1,10696 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include #include #include #include #include #include #include "mac.h" #include "core.h" #include "debug.h" #include "wmi.h" #include "hw.h" #include "dp_tx.h" #include "dp_rx.h" #include "testmode.h" #include "peer.h" #include "debugfs_sta.h" #include "hif.h" #include "wow.h" #define CHAN2G(_channel, _freq, _flags) { \ .band = NL80211_BAND_2GHZ, \ .hw_value = (_channel), \ .center_freq = (_freq), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } #define CHAN5G(_channel, _freq, _flags) { \ .band = NL80211_BAND_5GHZ, \ .hw_value = (_channel), \ .center_freq = (_freq), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } #define CHAN6G(_channel, _freq, _flags) { \ .band = NL80211_BAND_6GHZ, \ .hw_value = (_channel), \ .center_freq = (_freq), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } static const struct ieee80211_channel ath11k_2ghz_channels[] = { CHAN2G(1, 2412, 0), CHAN2G(2, 2417, 0), CHAN2G(3, 2422, 0), CHAN2G(4, 2427, 0), CHAN2G(5, 2432, 0), CHAN2G(6, 2437, 0), CHAN2G(7, 2442, 0), CHAN2G(8, 2447, 0), CHAN2G(9, 2452, 0), CHAN2G(10, 2457, 0), CHAN2G(11, 2462, 0), CHAN2G(12, 2467, 0), CHAN2G(13, 2472, 0), CHAN2G(14, 2484, 0), }; static const struct ieee80211_channel ath11k_5ghz_channels[] = { CHAN5G(36, 5180, 0), CHAN5G(40, 5200, 0), CHAN5G(44, 5220, 0), CHAN5G(48, 5240, 0), CHAN5G(52, 5260, 0), CHAN5G(56, 5280, 0), CHAN5G(60, 5300, 0), CHAN5G(64, 5320, 0), CHAN5G(100, 5500, 0), CHAN5G(104, 5520, 0), CHAN5G(108, 5540, 0), CHAN5G(112, 5560, 0), CHAN5G(116, 5580, 0), CHAN5G(120, 5600, 0), CHAN5G(124, 5620, 0), CHAN5G(128, 5640, 0), CHAN5G(132, 5660, 0), CHAN5G(136, 5680, 0), CHAN5G(140, 5700, 0), CHAN5G(144, 5720, 0), CHAN5G(149, 5745, 0), CHAN5G(153, 5765, 0), CHAN5G(157, 5785, 0), CHAN5G(161, 5805, 0), CHAN5G(165, 5825, 0), CHAN5G(169, 5845, 0), CHAN5G(173, 5865, 0), CHAN5G(177, 5885, 0), }; static const struct ieee80211_channel ath11k_6ghz_channels[] = { CHAN6G(1, 5955, 0), CHAN6G(5, 5975, 0), CHAN6G(9, 5995, 0), CHAN6G(13, 6015, 0), CHAN6G(17, 6035, 0), CHAN6G(21, 6055, 0), CHAN6G(25, 6075, 0), CHAN6G(29, 6095, 0), CHAN6G(33, 6115, 0), CHAN6G(37, 6135, 0), CHAN6G(41, 6155, 0), CHAN6G(45, 6175, 0), CHAN6G(49, 6195, 0), CHAN6G(53, 6215, 0), CHAN6G(57, 6235, 0), CHAN6G(61, 6255, 0), CHAN6G(65, 6275, 0), CHAN6G(69, 6295, 0), CHAN6G(73, 6315, 0), CHAN6G(77, 6335, 0), CHAN6G(81, 6355, 0), CHAN6G(85, 6375, 0), CHAN6G(89, 6395, 0), CHAN6G(93, 6415, 0), CHAN6G(97, 6435, 0), CHAN6G(101, 6455, 0), CHAN6G(105, 6475, 0), CHAN6G(109, 6495, 0), CHAN6G(113, 6515, 0), CHAN6G(117, 6535, 0), CHAN6G(121, 6555, 0), CHAN6G(125, 6575, 0), CHAN6G(129, 6595, 0), CHAN6G(133, 6615, 0), CHAN6G(137, 6635, 0), CHAN6G(141, 6655, 0), CHAN6G(145, 6675, 0), CHAN6G(149, 6695, 0), CHAN6G(153, 6715, 0), CHAN6G(157, 6735, 0), CHAN6G(161, 6755, 0), CHAN6G(165, 6775, 0), CHAN6G(169, 6795, 0), CHAN6G(173, 6815, 0), CHAN6G(177, 6835, 0), CHAN6G(181, 6855, 0), CHAN6G(185, 6875, 0), CHAN6G(189, 6895, 0), CHAN6G(193, 6915, 0), CHAN6G(197, 6935, 0), CHAN6G(201, 6955, 0), CHAN6G(205, 6975, 0), CHAN6G(209, 6995, 0), CHAN6G(213, 7015, 0), CHAN6G(217, 7035, 0), CHAN6G(221, 7055, 0), CHAN6G(225, 7075, 0), CHAN6G(229, 7095, 0), CHAN6G(233, 7115, 0), /* new addition in IEEE Std 802.11ax-2021 */ CHAN6G(2, 5935, 0), }; static struct ieee80211_rate ath11k_legacy_rates[] = { { .bitrate = 10, .hw_value = ATH11K_HW_RATE_CCK_LP_1M }, { .bitrate = 20, .hw_value = ATH11K_HW_RATE_CCK_LP_2M, .hw_value_short = ATH11K_HW_RATE_CCK_SP_2M, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 55, .hw_value = ATH11K_HW_RATE_CCK_LP_5_5M, .hw_value_short = ATH11K_HW_RATE_CCK_SP_5_5M, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 110, .hw_value = ATH11K_HW_RATE_CCK_LP_11M, .hw_value_short = ATH11K_HW_RATE_CCK_SP_11M, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 60, .hw_value = ATH11K_HW_RATE_OFDM_6M }, { .bitrate = 90, .hw_value = ATH11K_HW_RATE_OFDM_9M }, { .bitrate = 120, .hw_value = ATH11K_HW_RATE_OFDM_12M }, { .bitrate = 180, .hw_value = ATH11K_HW_RATE_OFDM_18M }, { .bitrate = 240, .hw_value = ATH11K_HW_RATE_OFDM_24M }, { .bitrate = 360, .hw_value = ATH11K_HW_RATE_OFDM_36M }, { .bitrate = 480, .hw_value = ATH11K_HW_RATE_OFDM_48M }, { .bitrate = 540, .hw_value = ATH11K_HW_RATE_OFDM_54M }, }; static const int ath11k_phymodes[NUM_NL80211_BANDS][ATH11K_CHAN_WIDTH_NUM] = { [NL80211_BAND_2GHZ] = { [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20_2G, [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20_2G, [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40_2G, [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80_2G, [NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_160] = MODE_UNKNOWN, }, [NL80211_BAND_5GHZ] = { [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20, [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20, [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40, [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80, [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160, [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80, }, [NL80211_BAND_6GHZ] = { [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20, [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20, [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40, [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80, [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160, [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80, }, }; const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default = { .rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START | HTT_RX_FILTER_TLV_FLAGS_PPDU_END | HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE, .pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0, .pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1, .pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2, .pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 | HTT_RX_FP_CTRL_FILTER_FLASG3 }; #define ATH11K_MAC_FIRST_OFDM_RATE_IDX 4 #define ath11k_g_rates ath11k_legacy_rates #define ath11k_g_rates_size (ARRAY_SIZE(ath11k_legacy_rates)) #define ath11k_a_rates (ath11k_legacy_rates + 4) #define ath11k_a_rates_size (ARRAY_SIZE(ath11k_legacy_rates) - 4) #define ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD 200 /* in msecs */ /* Overhead due to the processing of channel switch events from FW */ #define ATH11K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD 10 /* in msecs */ static const u32 ath11k_smps_map[] = { [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, }; enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy) { enum nl80211_he_ru_alloc ret; switch (ru_phy) { case RU_26: ret = NL80211_RATE_INFO_HE_RU_ALLOC_26; break; case RU_52: ret = NL80211_RATE_INFO_HE_RU_ALLOC_52; break; case RU_106: ret = NL80211_RATE_INFO_HE_RU_ALLOC_106; break; case RU_242: ret = NL80211_RATE_INFO_HE_RU_ALLOC_242; break; case RU_484: ret = NL80211_RATE_INFO_HE_RU_ALLOC_484; break; case RU_996: ret = NL80211_RATE_INFO_HE_RU_ALLOC_996; break; default: ret = NL80211_RATE_INFO_HE_RU_ALLOC_26; break; } return ret; } enum nl80211_he_ru_alloc ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones) { enum nl80211_he_ru_alloc ret; switch (ru_tones) { case 26: ret = NL80211_RATE_INFO_HE_RU_ALLOC_26; break; case 52: ret = NL80211_RATE_INFO_HE_RU_ALLOC_52; break; case 106: ret = NL80211_RATE_INFO_HE_RU_ALLOC_106; break; case 242: ret = NL80211_RATE_INFO_HE_RU_ALLOC_242; break; case 484: ret = NL80211_RATE_INFO_HE_RU_ALLOC_484; break; case 996: ret = NL80211_RATE_INFO_HE_RU_ALLOC_996; break; case (996 * 2): ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; break; default: ret = NL80211_RATE_INFO_HE_RU_ALLOC_26; break; } return ret; } enum nl80211_he_gi ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi) { enum nl80211_he_gi ret; switch (sgi) { case RX_MSDU_START_SGI_0_8_US: ret = NL80211_RATE_INFO_HE_GI_0_8; break; case RX_MSDU_START_SGI_1_6_US: ret = NL80211_RATE_INFO_HE_GI_1_6; break; case RX_MSDU_START_SGI_3_2_US: ret = NL80211_RATE_INFO_HE_GI_3_2; break; default: ret = NL80211_RATE_INFO_HE_GI_0_8; break; } return ret; } u8 ath11k_mac_bw_to_mac80211_bw(u8 bw) { u8 ret = 0; switch (bw) { case ATH11K_BW_20: ret = RATE_INFO_BW_20; break; case ATH11K_BW_40: ret = RATE_INFO_BW_40; break; case ATH11K_BW_80: ret = RATE_INFO_BW_80; break; case ATH11K_BW_160: ret = RATE_INFO_BW_160; break; } return ret; } enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw) { switch (bw) { case RATE_INFO_BW_20: return ATH11K_BW_20; case RATE_INFO_BW_40: return ATH11K_BW_40; case RATE_INFO_BW_80: return ATH11K_BW_80; case RATE_INFO_BW_160: return ATH11K_BW_160; default: return ATH11K_BW_20; } } int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx, u16 *rate) { /* As default, it is OFDM rates */ int i = ATH11K_MAC_FIRST_OFDM_RATE_IDX; int max_rates_idx = ath11k_g_rates_size; if (preamble == WMI_RATE_PREAMBLE_CCK) { hw_rc &= ~ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK; i = 0; max_rates_idx = ATH11K_MAC_FIRST_OFDM_RATE_IDX; } while (i < max_rates_idx) { if (hw_rc == ath11k_legacy_rates[i].hw_value) { *rateidx = i; *rate = ath11k_legacy_rates[i].bitrate; return 0; } i++; } return -EINVAL; } static int get_num_chains(u32 mask) { int num_chains = 0; while (mask) { if (mask & BIT(0)) num_chains++; mask >>= 1; } return num_chains; } u8 ath11k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, u32 bitrate) { int i; for (i = 0; i < sband->n_bitrates; i++) if (sband->bitrates[i].bitrate == bitrate) return i; return 0; } static u32 ath11k_mac_max_ht_nss(const u8 *ht_mcs_mask) { int nss; for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--) if (ht_mcs_mask[nss]) return nss + 1; return 1; } static u32 ath11k_mac_max_vht_nss(const u16 *vht_mcs_mask) { int nss; for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--) if (vht_mcs_mask[nss]) return nss + 1; return 1; } static u32 ath11k_mac_max_he_nss(const u16 *he_mcs_mask) { int nss; for (nss = NL80211_HE_NSS_MAX - 1; nss >= 0; nss--) if (he_mcs_mask[nss]) return nss + 1; return 1; } static u8 ath11k_parse_mpdudensity(u8 mpdudensity) { /* 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": * 0 for no restriction * 1 for 1/4 us * 2 for 1/2 us * 3 for 1 us * 4 for 2 us * 5 for 4 us * 6 for 8 us * 7 for 16 us */ switch (mpdudensity) { case 0: return 0; case 1: case 2: case 3: /* Our lower layer calculations limit our precision to * 1 microsecond */ return 1; case 4: return 2; case 5: return 4; case 6: return 8; case 7: return 16; default: return 0; } } static int ath11k_mac_vif_chan(struct ieee80211_vif *vif, struct cfg80211_chan_def *def) { struct ieee80211_chanctx_conf *conf; rcu_read_lock(); conf = rcu_dereference(vif->bss_conf.chanctx_conf); if (!conf) { rcu_read_unlock(); return -ENOENT; } *def = conf->def; rcu_read_unlock(); return 0; } static bool ath11k_mac_bitrate_is_cck(int bitrate) { switch (bitrate) { case 10: case 20: case 55: case 110: return true; } return false; } u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, u8 hw_rate, bool cck) { const struct ieee80211_rate *rate; int i; for (i = 0; i < sband->n_bitrates; i++) { rate = &sband->bitrates[i]; if (ath11k_mac_bitrate_is_cck(rate->bitrate) != cck) continue; if (rate->hw_value == hw_rate) return i; else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && rate->hw_value_short == hw_rate) return i; } return 0; } static u8 ath11k_mac_bitrate_to_rate(int bitrate) { return DIV_ROUND_UP(bitrate, 5) | (ath11k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0); } static void ath11k_get_arvif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath11k_vif_iter *arvif_iter = data; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); if (arvif->vdev_id == arvif_iter->vdev_id) arvif_iter->arvif = arvif; } struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id) { struct ath11k_vif_iter arvif_iter; u32 flags; memset(&arvif_iter, 0, sizeof(struct ath11k_vif_iter)); arvif_iter.vdev_id = vdev_id; flags = IEEE80211_IFACE_ITER_RESUME_ALL; ieee80211_iterate_active_interfaces_atomic(ar->hw, flags, ath11k_get_arvif_iter, &arvif_iter); if (!arvif_iter.arvif) { ath11k_warn(ar->ab, "No VIF found for vdev %d\n", vdev_id); return NULL; } return arvif_iter.arvif; } struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab, u32 vdev_id) { int i; struct ath11k_pdev *pdev; struct ath11k_vif *arvif; for (i = 0; i < ab->num_radios; i++) { pdev = rcu_dereference(ab->pdevs_active[i]); if (pdev && pdev->ar && (pdev->ar->allocated_vdev_map & (1LL << vdev_id))) { arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id); if (arvif) return arvif; } } return NULL; } struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id) { int i; struct ath11k_pdev *pdev; for (i = 0; i < ab->num_radios; i++) { pdev = rcu_dereference(ab->pdevs_active[i]); if (pdev && pdev->ar) { if (pdev->ar->allocated_vdev_map & (1LL << vdev_id)) return pdev->ar; } } return NULL; } struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id) { int i; struct ath11k_pdev *pdev; if (ab->hw_params.single_pdev_only) { pdev = rcu_dereference(ab->pdevs_active[0]); return pdev ? pdev->ar : NULL; } if (WARN_ON(pdev_id > ab->num_radios)) return NULL; for (i = 0; i < ab->num_radios; i++) { if (ab->fw_mode == ATH11K_FIRMWARE_MODE_FTM) pdev = &ab->pdevs[i]; else pdev = rcu_dereference(ab->pdevs_active[i]); if (pdev && pdev->pdev_id == pdev_id) return (pdev->ar ? pdev->ar : NULL); } return NULL; } struct ath11k_vif *ath11k_mac_get_vif_up(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; struct ath11k_vif *arvif; int i; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; list_for_each_entry(arvif, &ar->arvifs, list) { if (arvif->is_up) return arvif; } } return NULL; } static bool ath11k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BAND band2) { return (((band1 == NL80211_BAND_2GHZ) && (band2 & WMI_HOST_WLAN_2G_CAP)) || (((band1 == NL80211_BAND_5GHZ) || (band1 == NL80211_BAND_6GHZ)) && (band2 & WMI_HOST_WLAN_5G_CAP))); } u8 ath11k_mac_get_target_pdev_id_from_vif(struct ath11k_vif *arvif) { struct ath11k *ar = arvif->ar; struct ath11k_base *ab = ar->ab; struct ieee80211_vif *vif = arvif->vif; struct cfg80211_chan_def def; enum nl80211_band band; u8 pdev_id = ab->target_pdev_ids[0].pdev_id; int i; if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return pdev_id; band = def.chan->band; for (i = 0; i < ab->target_pdev_count; i++) { if (ath11k_mac_band_match(band, ab->target_pdev_ids[i].supported_bands)) return ab->target_pdev_ids[i].pdev_id; } return pdev_id; } u8 ath11k_mac_get_target_pdev_id(struct ath11k *ar) { struct ath11k_vif *arvif; arvif = ath11k_mac_get_vif_up(ar->ab); if (arvif) return ath11k_mac_get_target_pdev_id_from_vif(arvif); else return ar->ab->target_pdev_ids[0].pdev_id; } static void ath11k_pdev_caps_update(struct ath11k *ar) { struct ath11k_base *ab = ar->ab; ar->max_tx_power = ab->target_caps.hw_max_tx_power; /* FIXME Set min_tx_power to ab->target_caps.hw_min_tx_power. * But since the received value in svcrdy is same as hw_max_tx_power, * we can set ar->min_tx_power to 0 currently until * this is fixed in firmware */ ar->min_tx_power = 0; ar->txpower_limit_2g = ar->max_tx_power; ar->txpower_limit_5g = ar->max_tx_power; ar->txpower_scale = WMI_HOST_TP_SCALE_MAX; } static int ath11k_mac_txpower_recalc(struct ath11k *ar) { struct ath11k_pdev *pdev = ar->pdev; struct ath11k_vif *arvif; int ret, txpower = -1; u32 param; lockdep_assert_held(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) { if (arvif->txpower <= 0) continue; if (txpower == -1) txpower = arvif->txpower; else txpower = min(txpower, arvif->txpower); } if (txpower == -1) return 0; /* txpwr is set as 2 units per dBm in FW*/ txpower = min_t(u32, max_t(u32, ar->min_tx_power, txpower), ar->max_tx_power) * 2; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower to set in hw %d\n", txpower / 2); if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) && ar->txpower_limit_2g != txpower) { param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G; ret = ath11k_wmi_pdev_set_param(ar, param, txpower, ar->pdev->pdev_id); if (ret) goto fail; ar->txpower_limit_2g = txpower; } if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) && ar->txpower_limit_5g != txpower) { param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G; ret = ath11k_wmi_pdev_set_param(ar, param, txpower, ar->pdev->pdev_id); if (ret) goto fail; ar->txpower_limit_5g = txpower; } return 0; fail: ath11k_warn(ar->ab, "failed to recalc txpower limit %d using pdev param %d: %d\n", txpower / 2, param, ret); return ret; } static int ath11k_recalc_rtscts_prot(struct ath11k_vif *arvif) { struct ath11k *ar = arvif->ar; u32 vdev_param, rts_cts = 0; int ret; lockdep_assert_held(&ar->conf_mutex); vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS; /* Enable RTS/CTS protection for sw retries (when legacy stations * are in BSS) or by default only for second rate series. * TODO: Check if we need to enable CTS 2 Self in any case */ rts_cts = WMI_USE_RTS_CTS; if (arvif->num_legacy_stations > 0) rts_cts |= WMI_RTSCTS_ACROSS_SW_RETRIES << 4; else rts_cts |= WMI_RTSCTS_FOR_SECOND_RATESERIES << 4; /* Need not send duplicate param value to firmware */ if (arvif->rtscts_prot_mode == rts_cts) return 0; arvif->rtscts_prot_mode = rts_cts; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d recalc rts/cts prot %d\n", arvif->vdev_id, rts_cts); ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, rts_cts); if (ret) ath11k_warn(ar->ab, "failed to recalculate rts/cts prot for vdev %d: %d\n", arvif->vdev_id, ret); return ret; } static int ath11k_mac_set_kickout(struct ath11k_vif *arvif) { struct ath11k *ar = arvif->ar; u32 param; int ret; ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_STA_KICKOUT_TH, ATH11K_KICKOUT_THRESHOLD, ar->pdev->pdev_id); if (ret) { ath11k_warn(ar->ab, "failed to set kickout threshold on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } param = WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, ATH11K_KEEPALIVE_MIN_IDLE); if (ret) { ath11k_warn(ar->ab, "failed to set keepalive minimum idle time on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, ATH11K_KEEPALIVE_MAX_IDLE); if (ret) { ath11k_warn(ar->ab, "failed to set keepalive maximum idle time on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, ATH11K_KEEPALIVE_MAX_UNRESPONSIVE); if (ret) { ath11k_warn(ar->ab, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } return 0; } void ath11k_mac_peer_cleanup_all(struct ath11k *ar) { struct ath11k_peer *peer, *tmp; struct ath11k_base *ab = ar->ab; lockdep_assert_held(&ar->conf_mutex); mutex_lock(&ab->tbl_mtx_lock); spin_lock_bh(&ab->base_lock); list_for_each_entry_safe(peer, tmp, &ab->peers, list) { ath11k_peer_rx_tid_cleanup(ar, peer); ath11k_peer_rhash_delete(ab, peer); list_del(&peer->list); kfree(peer); } spin_unlock_bh(&ab->base_lock); mutex_unlock(&ab->tbl_mtx_lock); ar->num_peers = 0; ar->num_stations = 0; } static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar) { lockdep_assert_held(&ar->conf_mutex); if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) return -ESHUTDOWN; if (!wait_for_completion_timeout(&ar->vdev_setup_done, ATH11K_VDEV_SETUP_TIMEOUT_HZ)) return -ETIMEDOUT; return ar->last_wmi_vdev_start_status ? -EINVAL : 0; } static void ath11k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf, void *data) { struct cfg80211_chan_def **def = data; *def = &conf->def; } static int ath11k_mac_monitor_vdev_start(struct ath11k *ar, int vdev_id, struct cfg80211_chan_def *chandef) { struct ieee80211_channel *channel; struct wmi_vdev_start_req_arg arg = {}; int ret; lockdep_assert_held(&ar->conf_mutex); channel = chandef->chan; arg.vdev_id = vdev_id; arg.channel.freq = channel->center_freq; arg.channel.band_center_freq1 = chandef->center_freq1; arg.channel.band_center_freq2 = chandef->center_freq2; arg.channel.mode = ath11k_phymodes[chandef->chan->band][chandef->width]; arg.channel.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR); arg.channel.min_power = 0; arg.channel.max_power = channel->max_power; arg.channel.max_reg_power = channel->max_reg_power; arg.channel.max_antenna_gain = channel->max_antenna_gain; arg.pref_tx_streams = ar->num_tx_chains; arg.pref_rx_streams = ar->num_rx_chains; arg.channel.passive = !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR); reinit_completion(&ar->vdev_setup_done); reinit_completion(&ar->vdev_delete_done); ret = ath11k_wmi_vdev_start(ar, &arg, false); if (ret) { ath11k_warn(ar->ab, "failed to request monitor vdev %i start: %d\n", vdev_id, ret); return ret; } ret = ath11k_mac_vdev_setup_sync(ar); if (ret) { ath11k_warn(ar->ab, "failed to synchronize setup for monitor vdev %i start: %d\n", vdev_id, ret); return ret; } ret = ath11k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr, NULL, 0, 0); if (ret) { ath11k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n", vdev_id, ret); goto vdev_stop; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %i started\n", vdev_id); return 0; vdev_stop: reinit_completion(&ar->vdev_setup_done); ret = ath11k_wmi_vdev_stop(ar, vdev_id); if (ret) { ath11k_warn(ar->ab, "failed to stop monitor vdev %i after start failure: %d\n", vdev_id, ret); return ret; } ret = ath11k_mac_vdev_setup_sync(ar); if (ret) { ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i stop: %d\n", vdev_id, ret); return ret; } return -EIO; } static int ath11k_mac_monitor_vdev_stop(struct ath11k *ar) { int ret; lockdep_assert_held(&ar->conf_mutex); reinit_completion(&ar->vdev_setup_done); ret = ath11k_wmi_vdev_stop(ar, ar->monitor_vdev_id); if (ret) { ath11k_warn(ar->ab, "failed to request monitor vdev %i stop: %d\n", ar->monitor_vdev_id, ret); return ret; } ret = ath11k_mac_vdev_setup_sync(ar); if (ret) { ath11k_warn(ar->ab, "failed to synchronize monitor vdev %i stop: %d\n", ar->monitor_vdev_id, ret); return ret; } ret = ath11k_wmi_vdev_down(ar, ar->monitor_vdev_id); if (ret) { ath11k_warn(ar->ab, "failed to put down monitor vdev %i: %d\n", ar->monitor_vdev_id, ret); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %i stopped\n", ar->monitor_vdev_id); return 0; } static int ath11k_mac_monitor_vdev_create(struct ath11k *ar) { struct ath11k_pdev *pdev = ar->pdev; struct vdev_create_params param = {}; int bit, ret; - u8 tmp_addr[6] = {0}; + u8 tmp_addr[6] = {}; u16 nss; lockdep_assert_held(&ar->conf_mutex); if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) return 0; if (ar->ab->free_vdev_map == 0) { ath11k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n"); return -ENOMEM; } bit = __ffs64(ar->ab->free_vdev_map); ar->monitor_vdev_id = bit; param.if_id = ar->monitor_vdev_id; param.type = WMI_VDEV_TYPE_MONITOR; param.subtype = WMI_VDEV_SUBTYPE_NONE; param.pdev_id = pdev->pdev_id; if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) { param.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains; param.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains; } if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) { param.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains; param.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains; } ret = ath11k_wmi_vdev_create(ar, tmp_addr, ¶m); if (ret) { ath11k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n", ar->monitor_vdev_id, ret); ar->monitor_vdev_id = -1; return ret; } nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1; ret = ath11k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id, WMI_VDEV_PARAM_NSS, nss); if (ret) { ath11k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n", ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret); goto err_vdev_del; } ret = ath11k_mac_txpower_recalc(ar); if (ret) { ath11k_warn(ar->ab, "failed to recalc txpower for monitor vdev %d: %d\n", ar->monitor_vdev_id, ret); goto err_vdev_del; } ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id; ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); ar->num_created_vdevs++; set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %d created\n", ar->monitor_vdev_id); return 0; err_vdev_del: ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id); ar->monitor_vdev_id = -1; return ret; } static int ath11k_mac_monitor_vdev_delete(struct ath11k *ar) { int ret; unsigned long time_left; lockdep_assert_held(&ar->conf_mutex); if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) return 0; reinit_completion(&ar->vdev_delete_done); ret = ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id); if (ret) { ath11k_warn(ar->ab, "failed to request wmi monitor vdev %i removal: %d\n", ar->monitor_vdev_id, ret); return ret; } time_left = wait_for_completion_timeout(&ar->vdev_delete_done, ATH11K_VDEV_DELETE_TIMEOUT_HZ); if (time_left == 0) { ath11k_warn(ar->ab, "Timeout in receiving vdev delete response\n"); } else { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %d deleted\n", ar->monitor_vdev_id); ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id); ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id); ar->num_created_vdevs--; ar->monitor_vdev_id = -1; clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); } return ret; } static int ath11k_mac_monitor_start(struct ath11k *ar) { struct cfg80211_chan_def *chandef = NULL; int ret; lockdep_assert_held(&ar->conf_mutex); if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) return 0; ieee80211_iter_chan_contexts_atomic(ar->hw, ath11k_mac_get_any_chandef_iter, &chandef); if (!chandef) return 0; ret = ath11k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef); if (ret) { ath11k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret); ath11k_mac_monitor_vdev_delete(ar); return ret; } set_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags); ar->num_started_vdevs++; ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, false); if (ret) { ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during start: %d", ret); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor started\n"); return 0; } static int ath11k_mac_monitor_stop(struct ath11k *ar) { int ret; lockdep_assert_held(&ar->conf_mutex); if (!test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) return 0; ret = ath11k_mac_monitor_vdev_stop(ar); if (ret) { ath11k_warn(ar->ab, "failed to stop monitor vdev: %d\n", ret); return ret; } clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags); ar->num_started_vdevs--; ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, true); if (ret) { ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during stop: %d", ret); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor stopped ret %d\n", ret); return 0; } static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif) { struct ath11k *ar = arvif->ar; struct ieee80211_vif *vif = arvif->vif; struct ieee80211_conf *conf = &ar->hw->conf; enum wmi_sta_powersave_param param; enum wmi_sta_ps_mode psmode; int ret; int timeout; bool enable_ps; lockdep_assert_held(&arvif->ar->conf_mutex); if (arvif->vif->type != NL80211_IFTYPE_STATION) return 0; enable_ps = arvif->ps; if (enable_ps) { psmode = WMI_STA_PS_MODE_ENABLED; param = WMI_STA_PS_PARAM_INACTIVITY_TIME; timeout = conf->dynamic_ps_timeout; if (timeout == 0) { /* firmware doesn't like 0 */ timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000; } ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, timeout); if (ret) { ath11k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n", arvif->vdev_id, ret); return ret; } } else { psmode = WMI_STA_PS_MODE_DISABLED; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d psmode %s\n", arvif->vdev_id, psmode ? "enable" : "disable"); ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode); if (ret) { ath11k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n", psmode, arvif->vdev_id, ret); return ret; } return 0; } static int ath11k_mac_config_ps(struct ath11k *ar) { struct ath11k_vif *arvif; int ret = 0; lockdep_assert_held(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) { ret = ath11k_mac_vif_setup_ps(arvif); if (ret) { ath11k_warn(ar->ab, "failed to setup powersave: %d\n", ret); break; } } return ret; } -static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed) +static int ath11k_mac_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed) { struct ath11k *ar = hw->priv; struct ieee80211_conf *conf = &hw->conf; int ret = 0; mutex_lock(&ar->conf_mutex); if (changed & IEEE80211_CONF_CHANGE_MONITOR) { if (conf->flags & IEEE80211_CONF_MONITOR) { set_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags); if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) goto out; ret = ath11k_mac_monitor_vdev_create(ar); if (ret) { ath11k_warn(ar->ab, "failed to create monitor vdev: %d", ret); goto out; } ret = ath11k_mac_monitor_start(ar); if (ret) { ath11k_warn(ar->ab, "failed to start monitor: %d", ret); goto err_mon_del; } } else { clear_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags); if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) goto out; ret = ath11k_mac_monitor_stop(ar); if (ret) { ath11k_warn(ar->ab, "failed to stop monitor: %d", ret); goto out; } ret = ath11k_mac_monitor_vdev_delete(ar); if (ret) { ath11k_warn(ar->ab, "failed to delete monitor vdev: %d", ret); goto out; } } } out: mutex_unlock(&ar->conf_mutex); return ret; err_mon_del: ath11k_mac_monitor_vdev_delete(ar); mutex_unlock(&ar->conf_mutex); return ret; } static void ath11k_mac_setup_nontx_vif_rsnie(struct ath11k_vif *arvif, bool tx_arvif_rsnie_present, const u8 *profile, u8 profile_len) { if (cfg80211_find_ie(WLAN_EID_RSN, profile, profile_len)) { arvif->rsnie_present = true; } else if (tx_arvif_rsnie_present) { int i; u8 nie_len; const u8 *nie = cfg80211_find_ext_ie(WLAN_EID_EXT_NON_INHERITANCE, profile, profile_len); if (!nie) return; nie_len = nie[1]; nie += 2; for (i = 0; i < nie_len; i++) { if (nie[i] == WLAN_EID_RSN) { arvif->rsnie_present = false; break; } } } } static bool ath11k_mac_set_nontx_vif_params(struct ath11k_vif *tx_arvif, struct ath11k_vif *arvif, struct sk_buff *bcn) { struct ieee80211_mgmt *mgmt; const u8 *ies, *profile, *next_profile; int ies_len; ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn); mgmt = (struct ieee80211_mgmt *)bcn->data; ies += sizeof(mgmt->u.beacon); ies_len = skb_tail_pointer(bcn) - ies; ies = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ies, ies_len); arvif->rsnie_present = tx_arvif->rsnie_present; while (ies) { u8 mbssid_len; ies_len -= (2 + ies[1]); mbssid_len = ies[1] - 1; profile = &ies[3]; while (mbssid_len) { u8 profile_len; profile_len = profile[1]; next_profile = profile + (2 + profile_len); mbssid_len -= (2 + profile_len); profile += 2; profile_len -= (2 + profile[1]); profile += (2 + profile[1]); /* nontx capabilities */ profile_len -= (2 + profile[1]); profile += (2 + profile[1]); /* SSID */ if (profile[2] == arvif->vif->bss_conf.bssid_index) { profile_len -= 5; profile = profile + 5; ath11k_mac_setup_nontx_vif_rsnie(arvif, tx_arvif->rsnie_present, profile, profile_len); return true; } profile = next_profile; } ies = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, profile, ies_len); } return false; } static int ath11k_mac_setup_bcn_p2p_ie(struct ath11k_vif *arvif, struct sk_buff *bcn) { struct ath11k *ar = arvif->ar; struct ieee80211_mgmt *mgmt; const u8 *p2p_ie; int ret; mgmt = (void *)bcn->data; p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, mgmt->u.beacon.variable, bcn->len - (mgmt->u.beacon.variable - bcn->data)); if (!p2p_ie) return -ENOENT; ret = ath11k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); if (ret) { ath11k_warn(ar->ab, "failed to submit P2P GO bcn ie for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } return ret; } static int ath11k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, u8 oui_type, size_t ie_offset) { size_t len; const u8 *next, *end; u8 *ie; if (WARN_ON(skb->len < ie_offset)) return -EINVAL; ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, skb->data + ie_offset, skb->len - ie_offset); if (!ie) return -ENOENT; len = ie[1] + 2; end = skb->data + skb->len; next = ie + len; if (WARN_ON(next > end)) return -EINVAL; memmove(ie, next, end - next); skb_trim(skb, skb->len - len); return 0; } static int ath11k_mac_set_vif_params(struct ath11k_vif *arvif, struct sk_buff *bcn) { struct ath11k_base *ab = arvif->ar->ab; struct ieee80211_mgmt *mgmt; int ret = 0; u8 *ies; ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn); mgmt = (struct ieee80211_mgmt *)bcn->data; ies += sizeof(mgmt->u.beacon); if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies))) arvif->rsnie_present = true; else arvif->rsnie_present = false; if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, ies, (skb_tail_pointer(bcn) - ies))) arvif->wpaie_present = true; else arvif->wpaie_present = false; if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) return ret; ret = ath11k_mac_setup_bcn_p2p_ie(arvif, bcn); if (ret) { ath11k_warn(ab, "failed to setup P2P GO bcn ie: %d\n", ret); return ret; } /* P2P IE is inserted by firmware automatically (as * configured above) so remove it from the base beacon * template to avoid duplicate P2P IEs in beacon frames. */ ret = ath11k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, offsetof(struct ieee80211_mgmt, u.beacon.variable)); if (ret) { ath11k_warn(ab, "failed to remove P2P vendor ie: %d\n", ret); return ret; } return ret; } static struct ath11k_vif *ath11k_mac_get_tx_arvif(struct ath11k_vif *arvif) { struct ieee80211_bss_conf *link_conf, *tx_bss_conf; lockdep_assert_wiphy(arvif->ar->hw->wiphy); link_conf = &arvif->vif->bss_conf; tx_bss_conf = wiphy_dereference(arvif->ar->hw->wiphy, link_conf->tx_bss_conf); if (tx_bss_conf) return ath11k_vif_to_arvif(tx_bss_conf->vif); return NULL; } static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif, struct ath11k_vif *tx_arvif) { struct ieee80211_ema_beacons *beacons; int ret = 0; bool nontx_vif_params_set = false; u32 params = 0; u8 i = 0; beacons = ieee80211_beacon_get_template_ema_list(tx_arvif->ar->hw, tx_arvif->vif, 0); if (!beacons || !beacons->cnt) { ath11k_warn(arvif->ar->ab, "failed to get ema beacon templates from mac80211\n"); return -EPERM; } if (tx_arvif == arvif) { if (ath11k_mac_set_vif_params(tx_arvif, beacons->bcn[0].skb)) return -EINVAL; } else { arvif->wpaie_present = tx_arvif->wpaie_present; } for (i = 0; i < beacons->cnt; i++) { if (tx_arvif != arvif && !nontx_vif_params_set) nontx_vif_params_set = ath11k_mac_set_nontx_vif_params(tx_arvif, arvif, beacons->bcn[i].skb); params = beacons->cnt; params |= (i << WMI_EMA_TMPL_IDX_SHIFT); params |= ((!i ? 1 : 0) << WMI_EMA_FIRST_TMPL_SHIFT); params |= ((i + 1 == beacons->cnt ? 1 : 0) << WMI_EMA_LAST_TMPL_SHIFT); ret = ath11k_wmi_bcn_tmpl(tx_arvif->ar, tx_arvif->vdev_id, &beacons->bcn[i].offs, beacons->bcn[i].skb, params); if (ret) { ath11k_warn(tx_arvif->ar->ab, "failed to set ema beacon template id %i error %d\n", i, ret); break; } } ieee80211_beacon_free_ema_list(beacons); if (tx_arvif != arvif && !nontx_vif_params_set) return -EINVAL; /* Profile not found in the beacons */ return ret; } static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif, struct ath11k_vif *tx_arvif) { struct ath11k *ar = arvif->ar; struct ath11k_base *ab = ar->ab; struct ieee80211_hw *hw = ar->hw; struct ieee80211_vif *vif = arvif->vif; struct ieee80211_mutable_offsets offs = {}; struct sk_buff *bcn; int ret; if (tx_arvif != arvif) { ar = tx_arvif->ar; ab = ar->ab; hw = ar->hw; vif = tx_arvif->vif; } bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0); if (!bcn) { ath11k_warn(ab, "failed to get beacon template from mac80211\n"); return -EPERM; } if (tx_arvif == arvif) { if (ath11k_mac_set_vif_params(tx_arvif, bcn)) return -EINVAL; } else if (!ath11k_mac_set_nontx_vif_params(tx_arvif, arvif, bcn)) { return -EINVAL; } ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, 0); kfree_skb(bcn); if (ret) ath11k_warn(ab, "failed to submit beacon template command: %d\n", ret); return ret; } static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) { struct ieee80211_vif *vif = arvif->vif; struct ath11k_vif *tx_arvif; if (arvif->vdev_type != WMI_VDEV_TYPE_AP) return 0; /* Target does not expect beacon templates for the already up * non-transmitting interfaces, and results in a crash if sent. */ tx_arvif = ath11k_mac_get_tx_arvif(arvif); if (tx_arvif) { if (arvif != tx_arvif && arvif->is_up) return 0; if (vif->bss_conf.ema_ap) return ath11k_mac_setup_bcn_tmpl_ema(arvif, tx_arvif); } else { tx_arvif = arvif; } return ath11k_mac_setup_bcn_tmpl_mbssid(arvif, tx_arvif); } void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif) { struct ieee80211_vif *vif = arvif->vif; if (!vif->bss_conf.color_change_active && !arvif->bcca_zero_sent) return; if (vif->bss_conf.color_change_active && ieee80211_beacon_cntdwn_is_complete(vif, 0)) { arvif->bcca_zero_sent = true; ieee80211_color_change_finish(vif, 0); return; } arvif->bcca_zero_sent = false; if (vif->bss_conf.color_change_active) ieee80211_beacon_update_cntdwn(vif, 0); ath11k_mac_setup_bcn_tmpl(arvif); } static void ath11k_control_beaconing(struct ath11k_vif *arvif, struct ieee80211_bss_conf *info) { struct ath11k *ar = arvif->ar; struct ath11k_vif *tx_arvif; int ret = 0; lockdep_assert_held(&arvif->ar->conf_mutex); if (!info->enable_beacon) { ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id); if (ret) ath11k_warn(ar->ab, "failed to down vdev_id %i: %d\n", arvif->vdev_id, ret); arvif->is_up = false; return; } /* Install the beacon template to the FW */ ret = ath11k_mac_setup_bcn_tmpl(arvif); if (ret) { ath11k_warn(ar->ab, "failed to update bcn tmpl during vdev up: %d\n", ret); return; } arvif->aid = 0; ether_addr_copy(arvif->bssid, info->bssid); tx_arvif = ath11k_mac_get_tx_arvif(arvif); ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, arvif->bssid, tx_arvif ? tx_arvif->bssid : NULL, info->bssid_index, 1 << info->bssid_indicator); if (ret) { ath11k_warn(ar->ab, "failed to bring up vdev %d: %i\n", arvif->vdev_id, ret); return; } arvif->is_up = true; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d up\n", arvif->vdev_id); } static void ath11k_mac_handle_beacon_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct sk_buff *skb = data; struct ieee80211_mgmt *mgmt = (void *)skb->data; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); if (vif->type != NL80211_IFTYPE_STATION) return; if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) return; cancel_delayed_work(&arvif->connection_loss_work); } void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb) { ieee80211_iterate_active_interfaces_atomic(ar->hw, IEEE80211_IFACE_ITER_NORMAL, ath11k_mac_handle_beacon_iter, skb); } static void ath11k_mac_handle_beacon_miss_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { u32 *vdev_id = data; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k *ar = arvif->ar; struct ieee80211_hw *hw = ar->hw; if (arvif->vdev_id != *vdev_id) return; if (!arvif->is_up) return; ieee80211_beacon_loss(vif); /* Firmware doesn't report beacon loss events repeatedly. If AP probe * (done by mac80211) succeeds but beacons do not resume then it * doesn't make sense to continue operation. Queue connection loss work * which can be cancelled when beacon is received. */ ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, ATH11K_CONNECTION_LOSS_HZ); } void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id) { ieee80211_iterate_active_interfaces_atomic(ar->hw, IEEE80211_IFACE_ITER_NORMAL, ath11k_mac_handle_beacon_miss_iter, &vdev_id); } static void ath11k_mac_vif_sta_connection_loss_work(struct work_struct *work) { struct ath11k_vif *arvif = container_of(work, struct ath11k_vif, connection_loss_work.work); struct ieee80211_vif *vif = arvif->vif; if (!arvif->is_up) return; ieee80211_connection_loss(vif); } static void ath11k_peer_assoc_h_basic(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); u32 aid; lockdep_assert_held(&ar->conf_mutex); if (vif->type == NL80211_IFTYPE_STATION) aid = vif->cfg.aid; else aid = sta->aid; ether_addr_copy(arg->peer_mac, sta->addr); arg->vdev_id = arvif->vdev_id; arg->peer_associd = aid; arg->auth_flag = true; /* TODO: STA WAR in ath10k for listen interval required? */ arg->peer_listen_intval = ar->hw->conf.listen_interval; arg->peer_nss = 1; arg->peer_caps = vif->bss_conf.assoc_capability; } static void ath11k_peer_assoc_h_crypto(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { struct ieee80211_bss_conf *info = &vif->bss_conf; struct cfg80211_chan_def def; struct cfg80211_bss *bss; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); const u8 *rsnie = NULL; const u8 *wpaie = NULL; lockdep_assert_held(&ar->conf_mutex); if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return; bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0, IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); if (arvif->rsnie_present || arvif->wpaie_present) { arg->need_ptk_4_way = true; if (arvif->wpaie_present) arg->need_gtk_2_way = true; } else if (bss) { const struct cfg80211_bss_ies *ies; rcu_read_lock(); rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); ies = rcu_dereference(bss->ies); wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, ies->data, ies->len); rcu_read_unlock(); cfg80211_put_bss(ar->hw->wiphy, bss); } /* FIXME: base on RSN IE/WPA IE is a correct idea? */ if (rsnie || wpaie) { ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "%s: rsn ie found\n", __func__); arg->need_ptk_4_way = true; } if (wpaie) { ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "%s: wpa ie found\n", __func__); arg->need_gtk_2_way = true; } if (sta->mfp) { /* TODO: Need to check if FW supports PMF? */ arg->is_pmf_enabled = true; } /* TODO: safe_mode_enabled (bypass 4-way handshake) flag req? */ } static void ath11k_peer_assoc_h_rates(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; struct cfg80211_chan_def def; const struct ieee80211_supported_band *sband; const struct ieee80211_rate *rates; enum nl80211_band band; u32 ratemask; u8 rate; int i; lockdep_assert_held(&ar->conf_mutex); if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return; band = def.chan->band; sband = ar->hw->wiphy->bands[band]; ratemask = sta->deflink.supp_rates[band]; ratemask &= arvif->bitrate_mask.control[band].legacy; rates = sband->bitrates; rateset->num_rates = 0; for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { if (!(ratemask & 1)) continue; rate = ath11k_mac_bitrate_to_rate(rates->bitrate); rateset->rates[rateset->num_rates] = rate; rateset->num_rates++; } } static bool ath11k_peer_assoc_h_ht_masked(const u8 *ht_mcs_mask) { int nss; for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++) if (ht_mcs_mask[nss]) return false; return true; } static bool ath11k_peer_assoc_h_vht_masked(const u16 *vht_mcs_mask) { int nss; for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) if (vht_mcs_mask[nss]) return false; return true; } static void ath11k_peer_assoc_h_ht(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; int i, n; u8 max_nss; u32 stbc; lockdep_assert_held(&ar->conf_mutex); if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return; if (!ht_cap->ht_supported) return; band = def.chan->band; ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; if (ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) return; arg->ht_flag = true; arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + ht_cap->ampdu_factor)) - 1; arg->peer_mpdu_density = ath11k_parse_mpdudensity(ht_cap->ampdu_density); arg->peer_ht_caps = ht_cap->cap; arg->peer_rate_caps |= WMI_HOST_RC_HT_FLAG; if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) arg->ldpc_flag = true; if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) { arg->bw_40 = true; arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG; } /* As firmware handles this two flags (IEEE80211_HT_CAP_SGI_20 * and IEEE80211_HT_CAP_SGI_40) for enabling SGI, we reset * both flags if guard interval is Default GI */ if (arvif->bitrate_mask.control[band].gi == NL80211_TXRATE_DEFAULT_GI) arg->peer_ht_caps &= ~(IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40); if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40)) arg->peer_rate_caps |= WMI_HOST_RC_SGI_FLAG; } if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { arg->peer_rate_caps |= WMI_HOST_RC_TX_STBC_FLAG; arg->stbc_flag = true; } if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; stbc = stbc << WMI_HOST_RC_RX_STBC_FLAG_S; arg->peer_rate_caps |= stbc; arg->stbc_flag = true; } if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) arg->peer_rate_caps |= WMI_HOST_RC_TS_FLAG; else if (ht_cap->mcs.rx_mask[1]) arg->peer_rate_caps |= WMI_HOST_RC_DS_FLAG; for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) && (ht_mcs_mask[i / 8] & BIT(i % 8))) { max_nss = (i / 8) + 1; arg->peer_ht_rates.rates[n++] = i; } /* This is a workaround for HT-enabled STAs which break the spec * and have no HT capabilities RX mask (no HT RX MCS map). * * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. * * Firmware asserts if such situation occurs. */ if (n == 0) { arg->peer_ht_rates.num_rates = 8; for (i = 0; i < arg->peer_ht_rates.num_rates; i++) arg->peer_ht_rates.rates[i] = i; } else { arg->peer_ht_rates.num_rates = n; arg->peer_nss = min(sta->deflink.rx_nss, max_nss); } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "ht peer %pM mcs cnt %d nss %d\n", arg->peer_mac, arg->peer_ht_rates.num_rates, arg->peer_nss); } static int ath11k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) { switch ((mcs_map >> (2 * nss)) & 0x3) { case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1; case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1; case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1; } return 0; } static u16 ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set, const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX]) { int idx_limit; int nss; u16 mcs_map; u16 mcs; for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { mcs_map = ath11k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & vht_mcs_limit[nss]; if (mcs_map) idx_limit = fls(mcs_map) - 1; else idx_limit = -1; switch (idx_limit) { case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: mcs = IEEE80211_VHT_MCS_SUPPORT_0_7; break; case 8: mcs = IEEE80211_VHT_MCS_SUPPORT_0_8; break; case 9: mcs = IEEE80211_VHT_MCS_SUPPORT_0_9; break; default: WARN_ON(1); fallthrough; case -1: mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED; break; } tx_mcs_set &= ~(0x3 << (nss * 2)); tx_mcs_set |= mcs << (nss * 2); } return tx_mcs_set; } static u8 ath11k_get_nss_160mhz(struct ath11k *ar, u8 max_nss) { u8 nss_ratio_info = ar->pdev->cap.nss_ratio_info; u8 max_sup_nss = 0; switch (nss_ratio_info) { case WMI_NSS_RATIO_1BY2_NSS: max_sup_nss = max_nss >> 1; break; case WMI_NSS_RATIO_3BY4_NSS: ath11k_warn(ar->ab, "WMI_NSS_RATIO_3BY4_NSS not supported\n"); break; case WMI_NSS_RATIO_1_NSS: max_sup_nss = max_nss; break; case WMI_NSS_RATIO_2_NSS: ath11k_warn(ar->ab, "WMI_NSS_RATIO_2_NSS not supported\n"); break; default: ath11k_warn(ar->ab, "invalid nss ratio received from firmware: %d\n", nss_ratio_info); break; } return max_sup_nss; } static void ath11k_peer_assoc_h_vht(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; u16 *vht_mcs_mask; u8 ampdu_factor; u8 max_nss, vht_mcs; int i, vht_nss, nss_idx; bool user_rate_valid = true; u32 rx_nss, tx_nss, nss_160; if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return; if (!vht_cap->vht_supported) return; band = def.chan->band; vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; if (ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) return; arg->vht_flag = true; /* TODO: similar flags required? */ arg->vht_capable = true; if (def.chan->band == NL80211_BAND_2GHZ) arg->vht_ng_flag = true; arg->peer_vht_caps = vht_cap->cap; ampdu_factor = (vht_cap->cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to * zero in VHT IE. Using it would result in degraded throughput. * arg->peer_max_mpdu at this point contains HT max_mpdu so keep * it if VHT max_mpdu is smaller. */ arg->peer_max_mpdu = max(arg->peer_max_mpdu, (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + ampdu_factor)) - 1); if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) arg->bw_80 = true; if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) arg->bw_160 = true; vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask); if (vht_nss > sta->deflink.rx_nss) { user_rate_valid = false; for (nss_idx = sta->deflink.rx_nss - 1; nss_idx >= 0; nss_idx--) { if (vht_mcs_mask[nss_idx]) { user_rate_valid = true; break; } } } if (!user_rate_valid) { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting vht range mcs value to peer supported nss %d for peer %pM\n", sta->deflink.rx_nss, sta->addr); vht_mcs_mask[sta->deflink.rx_nss - 1] = vht_mcs_mask[vht_nss - 1]; } /* Calculate peer NSS capability from VHT capabilities if STA * supports VHT. */ for (i = 0, max_nss = 0; i < NL80211_VHT_NSS_MAX; i++) { vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> (2 * i) & 3; if (vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED && vht_mcs_mask[i]) max_nss = i + 1; } arg->peer_nss = min(sta->deflink.rx_nss, max_nss); arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest); arg->tx_mcs_set = ath11k_peer_assoc_h_vht_limit( __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask); /* In IPQ8074 platform, VHT mcs rate 10 and 11 is enabled by default. * VHT mcs rate 10 and 11 is not supported in 11ac standard. * so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode. */ arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK; arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11; if ((arg->tx_mcs_set & IEEE80211_VHT_MCS_NOT_SUPPORTED) == IEEE80211_VHT_MCS_NOT_SUPPORTED) arg->peer_vht_caps &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; /* TODO: Check */ arg->tx_max_mcs_nss = 0xFF; if (arg->peer_phymode == MODE_11AC_VHT160 || arg->peer_phymode == MODE_11AC_VHT80_80) { tx_nss = ath11k_get_nss_160mhz(ar, max_nss); rx_nss = min(arg->peer_nss, tx_nss); arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE; if (!rx_nss) { ath11k_warn(ar->ab, "invalid max_nss\n"); return; } if (arg->peer_phymode == MODE_11AC_VHT160) nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1); else nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1); arg->peer_bw_rxnss_override |= nss_160; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vht peer %pM max_mpdu %d flags 0x%x nss_override 0x%x\n", sta->addr, arg->peer_max_mpdu, arg->peer_flags, arg->peer_bw_rxnss_override); } static int ath11k_mac_get_max_he_mcs_map(u16 mcs_map, int nss) { switch ((mcs_map >> (2 * nss)) & 0x3) { case IEEE80211_HE_MCS_SUPPORT_0_7: return BIT(8) - 1; case IEEE80211_HE_MCS_SUPPORT_0_9: return BIT(10) - 1; case IEEE80211_HE_MCS_SUPPORT_0_11: return BIT(12) - 1; } return 0; } static u16 ath11k_peer_assoc_h_he_limit(u16 tx_mcs_set, const u16 he_mcs_limit[NL80211_HE_NSS_MAX]) { int idx_limit; int nss; u16 mcs_map; u16 mcs; for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) { mcs_map = ath11k_mac_get_max_he_mcs_map(tx_mcs_set, nss) & he_mcs_limit[nss]; if (mcs_map) idx_limit = fls(mcs_map) - 1; else idx_limit = -1; switch (idx_limit) { case 0 ... 7: mcs = IEEE80211_HE_MCS_SUPPORT_0_7; break; case 8: case 9: mcs = IEEE80211_HE_MCS_SUPPORT_0_9; break; case 10: case 11: mcs = IEEE80211_HE_MCS_SUPPORT_0_11; break; default: WARN_ON(1); fallthrough; case -1: mcs = IEEE80211_HE_MCS_NOT_SUPPORTED; break; } tx_mcs_set &= ~(0x3 << (nss * 2)); tx_mcs_set |= mcs << (nss * 2); } return tx_mcs_set; } static bool ath11k_peer_assoc_h_he_masked(const u16 *he_mcs_mask) { int nss; for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) if (he_mcs_mask[nss]) return false; return true; } static void ath11k_peer_assoc_h_he(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; enum nl80211_band band; u16 he_mcs_mask[NL80211_HE_NSS_MAX]; u8 max_nss, he_mcs; u16 he_tx_mcs = 0, v = 0; int i, he_nss, nss_idx; bool user_rate_valid = true; u32 rx_nss, tx_nss, nss_160; u8 ampdu_factor, rx_mcs_80, rx_mcs_160; u16 mcs_160_map, mcs_80_map; bool support_160; if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return; if (!he_cap->has_he) return; band = def.chan->band; memcpy(he_mcs_mask, arvif->bitrate_mask.control[band].he_mcs, sizeof(he_mcs_mask)); if (ath11k_peer_assoc_h_he_masked(he_mcs_mask)) return; arg->he_flag = true; support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G); /* Supported HE-MCS and NSS Set of peer he_cap is intersection with self he_cp */ mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); /* Initialize rx_mcs_160 to 9 which is an invalid value */ rx_mcs_160 = 9; if (support_160) { for (i = 7; i >= 0; i--) { u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3; if (mcs_160 != IEEE80211_VHT_MCS_NOT_SUPPORTED) { rx_mcs_160 = i + 1; break; } } } /* Initialize rx_mcs_80 to 9 which is an invalid value */ rx_mcs_80 = 9; for (i = 7; i >= 0; i--) { u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3; if (mcs_80 != IEEE80211_VHT_MCS_NOT_SUPPORTED) { rx_mcs_80 = i + 1; break; } } if (support_160) max_nss = min(rx_mcs_80, rx_mcs_160); else max_nss = rx_mcs_80; arg->peer_nss = min(sta->deflink.rx_nss, max_nss); memcpy_and_pad(&arg->peer_he_cap_macinfo, sizeof(arg->peer_he_cap_macinfo), he_cap->he_cap_elem.mac_cap_info, sizeof(he_cap->he_cap_elem.mac_cap_info), 0); memcpy_and_pad(&arg->peer_he_cap_phyinfo, sizeof(arg->peer_he_cap_phyinfo), he_cap->he_cap_elem.phy_cap_info, sizeof(he_cap->he_cap_elem.phy_cap_info), 0); arg->peer_he_ops = vif->bss_conf.he_oper.params; /* the top most byte is used to indicate BSS color info */ arg->peer_he_ops &= 0xffffff; /* As per section 26.6.1 11ax Draft5.0, if the Max AMPDU Exponent Extension * in HE cap is zero, use the arg->peer_max_mpdu as calculated while parsing * VHT caps(if VHT caps is present) or HT caps (if VHT caps is not present). * * For non-zero value of Max AMPDU Extponent Extension in HE MAC caps, * if a HE STA sends VHT cap and HE cap IE in assoc request then, use * MAX_AMPDU_LEN_FACTOR as 20 to calculate max_ampdu length. * If a HE STA that does not send VHT cap, but HE and HT cap in assoc * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu * length. */ ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3], IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); if (ampdu_factor) { if (sta->deflink.vht_cap.vht_supported) arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR + ampdu_factor)) - 1; else if (sta->deflink.ht_cap.ht_supported) arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR + ampdu_factor)) - 1; } if (he_cap->he_cap_elem.phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { int bit = 7; int nss, ru; arg->peer_ppet.numss_m1 = he_cap->ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK; arg->peer_ppet.ru_bit_mask = (he_cap->ppe_thres[0] & IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >> IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS; for (nss = 0; nss <= arg->peer_ppet.numss_m1; nss++) { for (ru = 0; ru < 4; ru++) { u32 val = 0; int i; if ((arg->peer_ppet.ru_bit_mask & BIT(ru)) == 0) continue; for (i = 0; i < 6; i++) { val >>= 1; val |= ((he_cap->ppe_thres[bit / 8] >> (bit % 8)) & 0x1) << 5; bit++; } arg->peer_ppet.ppet16_ppet8_ru3_ru0[nss] |= val << (ru * 6); } } } if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES) arg->twt_responder = true; if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ) arg->twt_requester = true; he_nss = ath11k_mac_max_he_nss(he_mcs_mask); if (he_nss > sta->deflink.rx_nss) { user_rate_valid = false; for (nss_idx = sta->deflink.rx_nss - 1; nss_idx >= 0; nss_idx--) { if (he_mcs_mask[nss_idx]) { user_rate_valid = true; break; } } } if (!user_rate_valid) { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting he range mcs value to peer supported nss %d for peer %pM\n", sta->deflink.rx_nss, sta->addr); he_mcs_mask[sta->deflink.rx_nss - 1] = he_mcs_mask[he_nss - 1]; } switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_160: if (he_cap->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) { v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80); v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask); arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v; v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80); arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v; arg->peer_he_mcs_count++; he_tx_mcs = v; } v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v; v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160); v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask); arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v; arg->peer_he_mcs_count++; if (!he_tx_mcs) he_tx_mcs = v; fallthrough; default: v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v; v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80); v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask); arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v; arg->peer_he_mcs_count++; if (!he_tx_mcs) he_tx_mcs = v; break; } /* Calculate peer NSS capability from HE capabilities if STA * supports HE. */ for (i = 0, max_nss = 0; i < NL80211_HE_NSS_MAX; i++) { he_mcs = he_tx_mcs >> (2 * i) & 3; /* In case of fixed rates, MCS Range in he_tx_mcs might have * unsupported range, with he_mcs_mask set, so check either of them * to find nss. */ if (he_mcs != IEEE80211_HE_MCS_NOT_SUPPORTED || he_mcs_mask[i]) max_nss = i + 1; } arg->peer_nss = min(sta->deflink.rx_nss, max_nss); if (arg->peer_phymode == MODE_11AX_HE160 || arg->peer_phymode == MODE_11AX_HE80_80) { tx_nss = ath11k_get_nss_160mhz(ar, max_nss); rx_nss = min(arg->peer_nss, tx_nss); arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE; if (!rx_nss) { ath11k_warn(ar->ab, "invalid max_nss\n"); return; } if (arg->peer_phymode == MODE_11AX_HE160) nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1); else nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1); arg->peer_bw_rxnss_override |= nss_160; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "he peer %pM nss %d mcs cnt %d nss_override 0x%x\n", sta->addr, arg->peer_nss, arg->peer_he_mcs_count, arg->peer_bw_rxnss_override); } static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; struct cfg80211_chan_def def; enum nl80211_band band; u8 ampdu_factor; if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return; band = def.chan->band; if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa) return; if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) arg->bw_40 = true; if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) arg->bw_80 = true; if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) arg->bw_160 = true; arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa); arg->peer_mpdu_density = ath11k_parse_mpdudensity(FIELD_GET(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START, arg->peer_he_caps_6ghz)); /* From IEEE Std 802.11ax-2021 - Section 10.12.2: An HE STA shall be capable of * receiving A-MPDU where the A-MPDU pre-EOF padding length is up to the value * indicated by the Maximum A-MPDU Length Exponent Extension field in the HE * Capabilities element and the Maximum A-MPDU Length Exponent field in HE 6 GHz * Band Capabilities element in the 6 GHz band. * * Here, we are extracting the Max A-MPDU Exponent Extension from HE caps and * factor is the Maximum A-MPDU Length Exponent from HE 6 GHZ Band capability. */ ampdu_factor = FIELD_GET(IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK, he_cap->he_cap_elem.mac_cap_info[3]) + FIELD_GET(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP, arg->peer_he_caps_6ghz); arg->peer_max_mpdu = (1u << (IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR + ampdu_factor)) - 1; } static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta, struct peer_assoc_params *arg) { const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; int smps; if (!ht_cap->ht_supported && !sta->deflink.he_6ghz_capa.capa) return; if (ht_cap->ht_supported) { smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; } else { smps = le16_get_bits(sta->deflink.he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_SM_PS); } switch (smps) { case WLAN_HT_CAP_SM_PS_STATIC: arg->static_mimops_flag = true; break; case WLAN_HT_CAP_SM_PS_DYNAMIC: arg->dynamic_mimops_flag = true; break; case WLAN_HT_CAP_SM_PS_DISABLED: arg->spatial_mux_flag = true; break; default: break; } } static void ath11k_peer_assoc_h_qos(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); switch (arvif->vdev_type) { case WMI_VDEV_TYPE_AP: if (sta->wme) { /* TODO: Check WME vs QoS */ arg->is_wme_set = true; arg->qos_flag = true; } if (sta->wme && sta->uapsd_queues) { /* TODO: Check WME vs QoS */ arg->is_wme_set = true; arg->apsd_flag = true; arg->peer_rate_caps |= WMI_HOST_RC_UAPSD_FLAG; } break; case WMI_VDEV_TYPE_STA: if (sta->wme) { arg->is_wme_set = true; arg->qos_flag = true; } break; default: break; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "peer %pM qos %d\n", sta->addr, arg->qos_flag); } static int ath11k_peer_assoc_qos_ap(struct ath11k *ar, struct ath11k_vif *arvif, struct ieee80211_sta *sta) { struct ap_ps_params params; u32 max_sp; u32 uapsd; int ret; lockdep_assert_held(&ar->conf_mutex); params.vdev_id = arvif->vdev_id; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "uapsd_queues 0x%x max_sp %d\n", sta->uapsd_queues, sta->max_sp); uapsd = 0; if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; max_sp = 0; if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) max_sp = sta->max_sp; params.param = WMI_AP_PS_PEER_PARAM_UAPSD; params.value = uapsd; ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, ¶ms); if (ret) goto err; params.param = WMI_AP_PS_PEER_PARAM_MAX_SP; params.value = max_sp; ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, ¶ms); if (ret) goto err; /* TODO revisit during testing */ params.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE; params.value = DISABLE_SIFS_RESPONSE_TRIGGER; ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, ¶ms); if (ret) goto err; params.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD; params.value = DISABLE_SIFS_RESPONSE_TRIGGER; ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, ¶ms); if (ret) goto err; return 0; err: ath11k_warn(ar->ab, "failed to set ap ps peer param %d for vdev %i: %d\n", params.param, arvif->vdev_id, ret); return ret; } static bool ath11k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) { return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >> ATH11K_MAC_FIRST_OFDM_RATE_IDX; } static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar, struct ieee80211_sta *sta) { if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { switch (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: return MODE_11AC_VHT160; case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: return MODE_11AC_VHT80_80; default: /* not sure if this is a valid case? */ return MODE_11AC_VHT160; } } if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) return MODE_11AC_VHT80; if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) return MODE_11AC_VHT40; if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) return MODE_11AC_VHT20; return MODE_UNKNOWN; } static enum wmi_phy_mode ath11k_mac_get_phymode_he(struct ath11k *ar, struct ieee80211_sta *sta) { if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) return MODE_11AX_HE160; else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) return MODE_11AX_HE80_80; /* not sure if this is a valid case? */ return MODE_11AX_HE160; } if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) return MODE_11AX_HE80; if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) return MODE_11AX_HE40; if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) return MODE_11AX_HE20; return MODE_UNKNOWN; } static void ath11k_peer_assoc_h_phymode(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; const u16 *he_mcs_mask; enum wmi_phy_mode phymode = MODE_UNKNOWN; if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return; band = def.chan->band; ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs; switch (band) { case NL80211_BAND_2GHZ: if (sta->deflink.he_cap.has_he && !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) { if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) phymode = MODE_11AX_HE80_2G; else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11AX_HE40_2G; else phymode = MODE_11AX_HE20_2G; } else if (sta->deflink.vht_cap.vht_supported && !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) { if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11AC_VHT40; else phymode = MODE_11AC_VHT20; } else if (sta->deflink.ht_cap.ht_supported && !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) { if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) phymode = MODE_11NG_HT40; else phymode = MODE_11NG_HT20; } else if (ath11k_mac_sta_has_ofdm_only(sta)) { phymode = MODE_11G; } else { phymode = MODE_11B; } break; case NL80211_BAND_5GHZ: case NL80211_BAND_6GHZ: /* Check HE first */ if (sta->deflink.he_cap.has_he && !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) { phymode = ath11k_mac_get_phymode_he(ar, sta); } else if (sta->deflink.vht_cap.vht_supported && !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) { phymode = ath11k_mac_get_phymode_vht(ar, sta); } else if (sta->deflink.ht_cap.ht_supported && !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) { if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) phymode = MODE_11NA_HT40; else phymode = MODE_11NA_HT20; } else { phymode = MODE_11A; } break; default: break; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "peer %pM phymode %s\n", sta->addr, ath11k_wmi_phymode_str(phymode)); arg->peer_phymode = phymode; WARN_ON(phymode == MODE_UNKNOWN); } static void ath11k_peer_assoc_prepare(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct peer_assoc_params *arg, bool reassoc) { struct ath11k_sta *arsta; lockdep_assert_held(&ar->conf_mutex); arsta = ath11k_sta_to_arsta(sta); memset(arg, 0, sizeof(*arg)); reinit_completion(&ar->peer_assoc_done); arg->peer_new_assoc = !reassoc; ath11k_peer_assoc_h_basic(ar, vif, sta, arg); ath11k_peer_assoc_h_crypto(ar, vif, sta, arg); ath11k_peer_assoc_h_rates(ar, vif, sta, arg); ath11k_peer_assoc_h_phymode(ar, vif, sta, arg); ath11k_peer_assoc_h_ht(ar, vif, sta, arg); ath11k_peer_assoc_h_vht(ar, vif, sta, arg); ath11k_peer_assoc_h_he(ar, vif, sta, arg); ath11k_peer_assoc_h_he_6ghz(ar, vif, sta, arg); ath11k_peer_assoc_h_qos(ar, vif, sta, arg); ath11k_peer_assoc_h_smps(sta, arg); arsta->peer_nss = arg->peer_nss; /* TODO: amsdu_disable req? */ } static int ath11k_setup_peer_smps(struct ath11k *ar, struct ath11k_vif *arvif, const u8 *addr, const struct ieee80211_sta_ht_cap *ht_cap, u16 he_6ghz_capa) { int smps; if (!ht_cap->ht_supported && !he_6ghz_capa) return 0; if (ht_cap->ht_supported) { smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; } else { smps = FIELD_GET(IEEE80211_HE_6GHZ_CAP_SM_PS, he_6ghz_capa); } if (smps >= ARRAY_SIZE(ath11k_smps_map)) return -EINVAL; return ath11k_wmi_set_peer_param(ar, addr, arvif->vdev_id, WMI_PEER_MIMO_PS_STATE, ath11k_smps_map[smps]); } static bool ath11k_mac_set_he_txbf_conf(struct ath11k_vif *arvif) { struct ath11k *ar = arvif->ar; u32 param, value; int ret; if (!arvif->vif->bss_conf.he_support) return true; param = WMI_VDEV_PARAM_SET_HEMU_MODE; value = 0; if (arvif->vif->bss_conf.he_su_beamformer) { value |= FIELD_PREP(HE_MODE_SU_TX_BFER, HE_SU_BFER_ENABLE); if (arvif->vif->bss_conf.he_mu_beamformer && arvif->vdev_type == WMI_VDEV_TYPE_AP) value |= FIELD_PREP(HE_MODE_MU_TX_BFER, HE_MU_BFER_ENABLE); } if (arvif->vif->type != NL80211_IFTYPE_MESH_POINT) { value |= FIELD_PREP(HE_MODE_DL_OFDMA, HE_DL_MUOFDMA_ENABLE) | FIELD_PREP(HE_MODE_UL_OFDMA, HE_UL_MUOFDMA_ENABLE); if (arvif->vif->bss_conf.he_full_ul_mumimo) value |= FIELD_PREP(HE_MODE_UL_MUMIMO, HE_UL_MUMIMO_ENABLE); if (arvif->vif->bss_conf.he_su_beamformee) value |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE); } ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, value); if (ret) { ath11k_warn(ar->ab, "failed to set vdev %d HE MU mode: %d\n", arvif->vdev_id, ret); return false; } param = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE; value = FIELD_PREP(HE_VHT_SOUNDING_MODE, HE_VHT_SOUNDING_MODE_ENABLE) | FIELD_PREP(HE_TRIG_NONTRIG_SOUNDING_MODE, HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE); ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, value); if (ret) { ath11k_warn(ar->ab, "failed to set vdev %d sounding mode: %d\n", arvif->vdev_id, ret); return false; } return true; } static bool ath11k_mac_vif_recalc_sta_he_txbf(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta_he_cap *he_cap) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); - struct ieee80211_he_cap_elem he_cap_elem = {0}; + struct ieee80211_he_cap_elem he_cap_elem = {}; struct ieee80211_sta_he_cap *cap_band = NULL; struct cfg80211_chan_def def; u32 param = WMI_VDEV_PARAM_SET_HEMU_MODE; u32 hemode = 0; int ret; if (!vif->bss_conf.he_support) return true; if (vif->type != NL80211_IFTYPE_STATION) return false; if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return false; if (def.chan->band == NL80211_BAND_2GHZ) cap_band = &ar->mac.iftype[NL80211_BAND_2GHZ][vif->type].he_cap; else cap_band = &ar->mac.iftype[NL80211_BAND_5GHZ][vif->type].he_cap; memcpy(&he_cap_elem, &cap_band->he_cap_elem, sizeof(he_cap_elem)); if (HECAP_PHY_SUBFME_GET(he_cap_elem.phy_cap_info)) { if (HECAP_PHY_SUBFMR_GET(he_cap->he_cap_elem.phy_cap_info)) hemode |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE); if (HECAP_PHY_MUBFMR_GET(he_cap->he_cap_elem.phy_cap_info)) hemode |= FIELD_PREP(HE_MODE_MU_TX_BFEE, HE_MU_BFEE_ENABLE); } if (vif->type != NL80211_IFTYPE_MESH_POINT) { hemode |= FIELD_PREP(HE_MODE_DL_OFDMA, HE_DL_MUOFDMA_ENABLE) | FIELD_PREP(HE_MODE_UL_OFDMA, HE_UL_MUOFDMA_ENABLE); if (HECAP_PHY_ULMUMIMO_GET(he_cap_elem.phy_cap_info)) if (HECAP_PHY_ULMUMIMO_GET(he_cap->he_cap_elem.phy_cap_info)) hemode |= FIELD_PREP(HE_MODE_UL_MUMIMO, HE_UL_MUMIMO_ENABLE); if (FIELD_GET(HE_MODE_MU_TX_BFEE, hemode)) hemode |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE); if (FIELD_GET(HE_MODE_MU_TX_BFER, hemode)) hemode |= FIELD_PREP(HE_MODE_SU_TX_BFER, HE_SU_BFER_ENABLE); } ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, hemode); if (ret) { ath11k_warn(ar->ab, "failed to submit vdev param txbf 0x%x: %d\n", hemode, ret); return false; } return true; } static void ath11k_bss_assoc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct peer_assoc_params peer_arg; struct ieee80211_sta *ap_sta; struct ath11k_peer *peer; bool is_auth = false; struct ieee80211_sta_he_cap he_cap; int ret; lockdep_assert_held(&ar->conf_mutex); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %i assoc bssid %pM aid %d\n", arvif->vdev_id, arvif->bssid, arvif->aid); rcu_read_lock(); ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); if (!ap_sta) { ath11k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n", bss_conf->bssid, arvif->vdev_id); rcu_read_unlock(); return; } /* he_cap here is updated at assoc success for sta mode only */ he_cap = ap_sta->deflink.he_cap; ath11k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false); rcu_read_unlock(); if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) { ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n", arvif->vdev_id, bss_conf->bssid); return; } peer_arg.is_assoc = true; ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); if (ret) { ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n", bss_conf->bssid, arvif->vdev_id, ret); return; } if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) { ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", bss_conf->bssid, arvif->vdev_id); return; } ret = ath11k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ap_sta->deflink.ht_cap, le16_to_cpu(ap_sta->deflink.he_6ghz_capa.capa)); if (ret) { ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); return; } WARN_ON(arvif->is_up); arvif->aid = vif->cfg.aid; ether_addr_copy(arvif->bssid, bss_conf->bssid); ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid, NULL, 0, 0); if (ret) { ath11k_warn(ar->ab, "failed to set vdev %d up: %d\n", arvif->vdev_id, ret); return; } arvif->is_up = true; arvif->rekey_data.enable_offload = false; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d up (associated) bssid %pM aid %d\n", arvif->vdev_id, bss_conf->bssid, vif->cfg.aid); spin_lock_bh(&ar->ab->base_lock); peer = ath11k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid); if (peer && peer->is_authorized) is_auth = true; spin_unlock_bh(&ar->ab->base_lock); if (is_auth) { ret = ath11k_wmi_set_peer_param(ar, arvif->bssid, arvif->vdev_id, WMI_PEER_AUTHORIZE, 1); if (ret) ath11k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret); } ret = ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id, &bss_conf->he_obss_pd); if (ret) ath11k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n", arvif->vdev_id, ret); ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, WMI_VDEV_PARAM_DTIM_POLICY, WMI_DTIM_POLICY_STICK); if (ret) ath11k_warn(ar->ab, "failed to set vdev %d dtim policy: %d\n", arvif->vdev_id, ret); ath11k_mac_11d_scan_stop_all(ar->ab); } static void ath11k_bss_disassoc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; lockdep_assert_held(&ar->conf_mutex); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %i disassoc bssid %pM\n", arvif->vdev_id, arvif->bssid); ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id); if (ret) ath11k_warn(ar->ab, "failed to down vdev %i: %d\n", arvif->vdev_id, ret); arvif->is_up = false; memset(&arvif->rekey_data, 0, sizeof(arvif->rekey_data)); cancel_delayed_work_sync(&arvif->connection_loss_work); } static u32 ath11k_mac_get_rate_hw_value(int bitrate) { u32 preamble; u16 hw_value; int rate; size_t i; if (ath11k_mac_bitrate_is_cck(bitrate)) preamble = WMI_RATE_PREAMBLE_CCK; else preamble = WMI_RATE_PREAMBLE_OFDM; for (i = 0; i < ARRAY_SIZE(ath11k_legacy_rates); i++) { if (ath11k_legacy_rates[i].bitrate != bitrate) continue; hw_value = ath11k_legacy_rates[i].hw_value; rate = ATH11K_HW_RATE_CODE(hw_value, 0, preamble); return rate; } return -EINVAL; } static void ath11k_recalculate_mgmt_rate(struct ath11k *ar, struct ieee80211_vif *vif, struct cfg80211_chan_def *def) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); const struct ieee80211_supported_band *sband; u8 basic_rate_idx; int hw_rate_code; u32 vdev_param; u16 bitrate; int ret; lockdep_assert_held(&ar->conf_mutex); sband = ar->hw->wiphy->bands[def->chan->band]; basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; bitrate = sband->bitrates[basic_rate_idx].bitrate; hw_rate_code = ath11k_mac_get_rate_hw_value(bitrate); if (hw_rate_code < 0) { ath11k_warn(ar->ab, "bitrate not supported %d\n", bitrate); return; } vdev_param = WMI_VDEV_PARAM_MGMT_RATE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, hw_rate_code); if (ret) ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret); /* For WCN6855, firmware will clear this param when vdev starts, hence * cache it here so that we can reconfigure it once vdev starts. */ ar->hw_rate_code = hw_rate_code; vdev_param = WMI_VDEV_PARAM_BEACON_RATE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, hw_rate_code); if (ret) ath11k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret); } static int ath11k_mac_fils_discovery(struct ath11k_vif *arvif, struct ieee80211_bss_conf *info) { struct ath11k *ar = arvif->ar; struct sk_buff *tmpl; int ret; u32 interval; bool unsol_bcast_probe_resp_enabled = false; if (info->fils_discovery.max_interval) { interval = info->fils_discovery.max_interval; tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif); if (tmpl) ret = ath11k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id, tmpl); } else if (info->unsol_bcast_probe_resp_interval) { unsol_bcast_probe_resp_enabled = 1; interval = info->unsol_bcast_probe_resp_interval; tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw, arvif->vif); if (tmpl) ret = ath11k_wmi_probe_resp_tmpl(ar, arvif->vdev_id, tmpl); } else { /* Disable */ return ath11k_wmi_fils_discovery(ar, arvif->vdev_id, 0, false); } if (!tmpl) { ath11k_warn(ar->ab, "mac vdev %i failed to retrieve %s template\n", arvif->vdev_id, (unsol_bcast_probe_resp_enabled ? "unsolicited broadcast probe response" : "FILS discovery")); return -EPERM; } kfree_skb(tmpl); if (!ret) ret = ath11k_wmi_fils_discovery(ar, arvif->vdev_id, interval, unsol_bcast_probe_resp_enabled); return ret; } static int ath11k_mac_config_obss_pd(struct ath11k *ar, struct ieee80211_he_obss_pd *he_obss_pd) { u32 bitmap[2], param_id, param_val, pdev_id; int ret; s8 non_srg_th = 0, srg_th = 0; pdev_id = ar->pdev->pdev_id; /* Set and enable SRG/non-SRG OBSS PD Threshold */ param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD; if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) { ret = ath11k_wmi_pdev_set_param(ar, param_id, 0, pdev_id); if (ret) ath11k_warn(ar->ab, "failed to set obss_pd_threshold for pdev: %u\n", pdev_id); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "obss pd sr_ctrl %x non_srg_thres %u srg_max %u\n", he_obss_pd->sr_ctrl, he_obss_pd->non_srg_max_offset, he_obss_pd->max_offset); param_val = 0; if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED) { non_srg_th = ATH11K_OBSS_PD_MAX_THRESHOLD; } else { if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT) non_srg_th = (ATH11K_OBSS_PD_MAX_THRESHOLD + he_obss_pd->non_srg_max_offset); else non_srg_th = ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD; param_val |= ATH11K_OBSS_PD_NON_SRG_EN; } if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) { srg_th = ATH11K_OBSS_PD_MAX_THRESHOLD + he_obss_pd->max_offset; param_val |= ATH11K_OBSS_PD_SRG_EN; } if (test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT, ar->ab->wmi_ab.svc_map)) { param_val |= ATH11K_OBSS_PD_THRESHOLD_IN_DBM; param_val |= FIELD_PREP(GENMASK(15, 8), srg_th); } else { non_srg_th -= ATH11K_DEFAULT_NOISE_FLOOR; /* SRG not supported and threshold in dB */ param_val &= ~(ATH11K_OBSS_PD_SRG_EN | ATH11K_OBSS_PD_THRESHOLD_IN_DBM); } param_val |= (non_srg_th & GENMASK(7, 0)); ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id); if (ret) { ath11k_warn(ar->ab, "failed to set obss_pd_threshold for pdev: %u\n", pdev_id); return ret; } /* Enable OBSS PD for all access category */ param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC; param_val = 0xf; ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id); if (ret) { ath11k_warn(ar->ab, "failed to set obss_pd_per_ac for pdev: %u\n", pdev_id); return ret; } /* Set SR Prohibit */ param_id = WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT; param_val = !!(he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED); ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id); if (ret) { ath11k_warn(ar->ab, "failed to set sr_prohibit for pdev: %u\n", pdev_id); return ret; } if (!test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT, ar->ab->wmi_ab.svc_map)) return 0; /* Set SRG BSS Color Bitmap */ memcpy(bitmap, he_obss_pd->bss_color_bitmap, sizeof(bitmap)); ret = ath11k_wmi_pdev_set_srg_bss_color_bitmap(ar, bitmap); if (ret) { ath11k_warn(ar->ab, "failed to set bss_color_bitmap for pdev: %u\n", pdev_id); return ret; } /* Set SRG Partial BSSID Bitmap */ memcpy(bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(bitmap)); ret = ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(ar, bitmap); if (ret) { ath11k_warn(ar->ab, "failed to set partial_bssid_bitmap for pdev: %u\n", pdev_id); return ret; } memset(bitmap, 0xff, sizeof(bitmap)); /* Enable all BSS Colors for SRG */ ret = ath11k_wmi_pdev_srg_obss_color_enable_bitmap(ar, bitmap); if (ret) { ath11k_warn(ar->ab, "failed to set srg_color_en_bitmap pdev: %u\n", pdev_id); return ret; } /* Enable all partial BSSID mask for SRG */ ret = ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(ar, bitmap); if (ret) { ath11k_warn(ar->ab, "failed to set srg_bssid_en_bitmap pdev: %u\n", pdev_id); return ret; } /* Enable all BSS Colors for non-SRG */ ret = ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(ar, bitmap); if (ret) { ath11k_warn(ar->ab, "failed to set non_srg_color_en_bitmap pdev: %u\n", pdev_id); return ret; } /* Enable all partial BSSID mask for non-SRG */ ret = ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(ar, bitmap); if (ret) { ath11k_warn(ar->ab, "failed to set non_srg_bssid_en_bitmap pdev: %u\n", pdev_id); return ret; } return 0; } static bool ath11k_mac_supports_station_tpc(struct ath11k *ar, struct ath11k_vif *arvif, const struct cfg80211_chan_def *chandef) { return ath11k_wmi_supports_6ghz_cc_ext(ar) && test_bit(WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT, ar->ab->wmi_ab.svc_map) && arvif->vdev_type == WMI_VDEV_TYPE_STA && arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE && chandef->chan && chandef->chan->band == NL80211_BAND_6GHZ; } static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u64 changed) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; u32 param_id, param_value; enum nl80211_band band; u32 vdev_param; int mcast_rate; u32 preamble; u16 hw_value; u16 bitrate; int ret = 0; u8 rateidx; u32 rate, param; u32 ipv4_cnt; mutex_lock(&ar->conf_mutex); if (changed & BSS_CHANGED_BEACON_INT) { arvif->beacon_interval = info->beacon_int; param_id = WMI_VDEV_PARAM_BEACON_INTERVAL; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, arvif->beacon_interval); if (ret) ath11k_warn(ar->ab, "Failed to set beacon interval for VDEV: %d\n", arvif->vdev_id); else ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Beacon interval: %d set for VDEV: %d\n", arvif->beacon_interval, arvif->vdev_id); } if (changed & BSS_CHANGED_BEACON) { param_id = WMI_PDEV_PARAM_BEACON_TX_MODE; param_value = WMI_BEACON_STAGGERED_MODE; ret = ath11k_wmi_pdev_set_param(ar, param_id, param_value, ar->pdev->pdev_id); if (ret) ath11k_warn(ar->ab, "Failed to set beacon mode for VDEV: %d\n", arvif->vdev_id); else ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Set staggered beacon mode for VDEV: %d\n", arvif->vdev_id); if (!arvif->do_not_send_tmpl || !arvif->bcca_zero_sent) { ret = ath11k_mac_setup_bcn_tmpl(arvif); if (ret) ath11k_warn(ar->ab, "failed to update bcn template: %d\n", ret); } if (arvif->bcca_zero_sent) arvif->do_not_send_tmpl = true; else arvif->do_not_send_tmpl = false; if (vif->bss_conf.he_support) { ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, WMI_VDEV_PARAM_BA_MODE, WMI_BA_MODE_BUFFER_SIZE_256); if (ret) ath11k_warn(ar->ab, "failed to set BA BUFFER SIZE 256 for vdev: %d\n", arvif->vdev_id); else ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Set BA BUFFER SIZE 256 for VDEV: %d\n", arvif->vdev_id); } } if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { arvif->dtim_period = info->dtim_period; param_id = WMI_VDEV_PARAM_DTIM_PERIOD; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, arvif->dtim_period); if (ret) ath11k_warn(ar->ab, "Failed to set dtim period for VDEV %d: %i\n", arvif->vdev_id, ret); else ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "DTIM period: %d set for VDEV: %d\n", arvif->dtim_period, arvif->vdev_id); } if (changed & BSS_CHANGED_SSID && vif->type == NL80211_IFTYPE_AP) { arvif->u.ap.ssid_len = vif->cfg.ssid_len; if (vif->cfg.ssid_len) memcpy(arvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len); arvif->u.ap.hidden_ssid = info->hidden_ssid; } if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) ether_addr_copy(arvif->bssid, info->bssid); if (changed & BSS_CHANGED_BEACON_ENABLED) { if (info->enable_beacon) ath11k_mac_set_he_txbf_conf(arvif); ath11k_control_beaconing(arvif, info); if (arvif->is_up && vif->bss_conf.he_support && vif->bss_conf.he_oper.params) { param_id = WMI_VDEV_PARAM_HEOPS_0_31; param_value = vif->bss_conf.he_oper.params; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, param_value); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "he oper param: %x set for VDEV: %d\n", param_value, arvif->vdev_id); if (ret) ath11k_warn(ar->ab, "Failed to set he oper params %x for VDEV %d: %i\n", param_value, arvif->vdev_id, ret); } } if (changed & BSS_CHANGED_ERP_CTS_PROT) { u32 cts_prot; cts_prot = !!(info->use_cts_prot); param_id = WMI_VDEV_PARAM_PROTECTION_MODE; if (arvif->is_started) { ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, cts_prot); if (ret) ath11k_warn(ar->ab, "Failed to set CTS prot for VDEV: %d\n", arvif->vdev_id); else ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Set CTS prot: %d for VDEV: %d\n", cts_prot, arvif->vdev_id); } else { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "defer protection mode setup, vdev is not ready yet\n"); } } if (changed & BSS_CHANGED_ERP_SLOT) { u32 slottime; if (info->use_short_slot) slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ else slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ param_id = WMI_VDEV_PARAM_SLOT_TIME; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, slottime); if (ret) ath11k_warn(ar->ab, "Failed to set erp slot for VDEV: %d\n", arvif->vdev_id); else ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Set slottime: %d for VDEV: %d\n", slottime, arvif->vdev_id); } if (changed & BSS_CHANGED_ERP_PREAMBLE) { u32 preamble; if (info->use_short_preamble) preamble = WMI_VDEV_PREAMBLE_SHORT; else preamble = WMI_VDEV_PREAMBLE_LONG; param_id = WMI_VDEV_PARAM_PREAMBLE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, preamble); if (ret) ath11k_warn(ar->ab, "Failed to set preamble for VDEV: %d\n", arvif->vdev_id); else ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Set preamble: %d for VDEV: %d\n", preamble, arvif->vdev_id); } if (changed & BSS_CHANGED_ASSOC) { if (vif->cfg.assoc) ath11k_bss_assoc(hw, vif, info); else ath11k_bss_disassoc(hw, vif); } if (changed & BSS_CHANGED_TXPOWER) { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev_id %i txpower %d\n", arvif->vdev_id, info->txpower); arvif->txpower = info->txpower; ath11k_mac_txpower_recalc(ar); } if (changed & BSS_CHANGED_PS && ar->ab->hw_params.supports_sta_ps) { arvif->ps = vif->cfg.ps; ret = ath11k_mac_config_ps(ar); if (ret) ath11k_warn(ar->ab, "failed to setup ps on vdev %i: %d\n", arvif->vdev_id, ret); } if (changed & BSS_CHANGED_MCAST_RATE && !ath11k_mac_vif_chan(arvif->vif, &def)) { band = def.chan->band; mcast_rate = vif->bss_conf.mcast_rate[band]; if (mcast_rate > 0) rateidx = mcast_rate - 1; else rateidx = ffs(vif->bss_conf.basic_rates) - 1; if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) rateidx += ATH11K_MAC_FIRST_OFDM_RATE_IDX; bitrate = ath11k_legacy_rates[rateidx].bitrate; hw_value = ath11k_legacy_rates[rateidx].hw_value; if (ath11k_mac_bitrate_is_cck(bitrate)) preamble = WMI_RATE_PREAMBLE_CCK; else preamble = WMI_RATE_PREAMBLE_OFDM; rate = ATH11K_HW_RATE_CODE(hw_value, 0, preamble); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d mcast_rate %x\n", arvif->vdev_id, rate); vdev_param = WMI_VDEV_PARAM_MCAST_DATA_RATE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, rate); if (ret) ath11k_warn(ar->ab, "failed to set mcast rate on vdev %i: %d\n", arvif->vdev_id, ret); vdev_param = WMI_VDEV_PARAM_BCAST_DATA_RATE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, rate); if (ret) ath11k_warn(ar->ab, "failed to set bcast rate on vdev %i: %d\n", arvif->vdev_id, ret); } if (changed & BSS_CHANGED_BASIC_RATES && !ath11k_mac_vif_chan(arvif->vif, &def)) ath11k_recalculate_mgmt_rate(ar, vif, &def); if (changed & BSS_CHANGED_TWT) { - struct wmi_twt_enable_params twt_params = {0}; + struct wmi_twt_enable_params twt_params = {}; if (info->twt_requester || info->twt_responder) { ath11k_wmi_fill_default_twt_params(&twt_params); ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params); } else { ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id); } } if (changed & BSS_CHANGED_HE_OBSS_PD) ath11k_mac_config_obss_pd(ar, &info->he_obss_pd); if (changed & BSS_CHANGED_HE_BSS_COLOR) { if (vif->type == NL80211_IFTYPE_AP) { ret = ath11k_wmi_send_obss_color_collision_cfg_cmd( ar, arvif->vdev_id, info->he_bss_color.color, ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS, info->he_bss_color.enabled); if (ret) ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n", arvif->vdev_id, ret); param_id = WMI_VDEV_PARAM_BSS_COLOR; if (info->he_bss_color.enabled) param_value = info->he_bss_color.color << IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET; else param_value = IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, param_value); if (ret) ath11k_warn(ar->ab, "failed to set bss color param on vdev %i: %d\n", arvif->vdev_id, ret); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "bss color param 0x%x set on vdev %i\n", param_value, arvif->vdev_id); } else if (vif->type == NL80211_IFTYPE_STATION) { ret = ath11k_wmi_send_bss_color_change_enable_cmd(ar, arvif->vdev_id, 1); if (ret) ath11k_warn(ar->ab, "failed to enable bss color change on vdev %i: %d\n", arvif->vdev_id, ret); ret = ath11k_wmi_send_obss_color_collision_cfg_cmd( ar, arvif->vdev_id, 0, ATH11K_BSS_COLOR_COLLISION_DETECTION_STA_PERIOD_MS, 1); if (ret) ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n", arvif->vdev_id, ret); } } if (changed & BSS_CHANGED_FTM_RESPONDER && arvif->ftm_responder != info->ftm_responder && test_bit(WMI_TLV_SERVICE_RTT, ar->ab->wmi_ab.svc_map) && (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT)) { arvif->ftm_responder = info->ftm_responder; param = WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, arvif->ftm_responder); if (ret) ath11k_warn(ar->ab, "Failed to set ftm responder %i: %d\n", arvif->vdev_id, ret); } if (changed & BSS_CHANGED_FILS_DISCOVERY || changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP) ath11k_mac_fils_discovery(arvif, info); if (changed & BSS_CHANGED_ARP_FILTER) { ipv4_cnt = min(vif->cfg.arp_addr_cnt, ATH11K_IPV4_MAX_COUNT); memcpy(arvif->arp_ns_offload.ipv4_addr, vif->cfg.arp_addr_list, ipv4_cnt * sizeof(u32)); memcpy(arvif->arp_ns_offload.mac_addr, vif->addr, ETH_ALEN); arvif->arp_ns_offload.ipv4_count = ipv4_cnt; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n", vif->cfg.arp_addr_cnt, vif->addr, arvif->arp_ns_offload.ipv4_addr); } mutex_unlock(&ar->conf_mutex); } void __ath11k_mac_scan_finish(struct ath11k *ar) { lockdep_assert_held(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: break; case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: if (ar->scan.is_roc && ar->scan.roc_notify) ieee80211_remain_on_channel_expired(ar->hw); fallthrough; case ATH11K_SCAN_STARTING: if (!ar->scan.is_roc) { struct cfg80211_scan_info info = { .aborted = ((ar->scan.state == ATH11K_SCAN_ABORTING) || (ar->scan.state == ATH11K_SCAN_STARTING)), }; ieee80211_scan_completed(ar->hw, &info); } ar->scan.state = ATH11K_SCAN_IDLE; ar->scan_channel = NULL; ar->scan.roc_freq = 0; cancel_delayed_work(&ar->scan.timeout); complete_all(&ar->scan.completed); break; } } void ath11k_mac_scan_finish(struct ath11k *ar) { spin_lock_bh(&ar->data_lock); __ath11k_mac_scan_finish(ar); spin_unlock_bh(&ar->data_lock); } static int ath11k_scan_stop(struct ath11k *ar) { struct scan_cancel_param arg = { .req_type = WLAN_SCAN_CANCEL_SINGLE, .scan_id = ATH11K_SCAN_ID, }; int ret; lockdep_assert_held(&ar->conf_mutex); /* TODO: Fill other STOP Params */ arg.pdev_id = ar->pdev->pdev_id; ret = ath11k_wmi_send_scan_stop_cmd(ar, &arg); if (ret) { ath11k_warn(ar->ab, "failed to stop wmi scan: %d\n", ret); goto out; } ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); if (ret == 0) { ath11k_warn(ar->ab, "failed to receive scan abort comple: timed out\n"); ret = -ETIMEDOUT; } else if (ret > 0) { ret = 0; } out: /* Scan state should be updated upon scan completion but in case * firmware fails to deliver the event (for whatever reason) it is * desired to clean up scan state anyway. Firmware may have just * dropped the scan completion event delivery due to transport pipe * being overflown with data and/or it can recover on its own before * next scan request is submitted. */ spin_lock_bh(&ar->data_lock); if (ar->scan.state != ATH11K_SCAN_IDLE) __ath11k_mac_scan_finish(ar); spin_unlock_bh(&ar->data_lock); return ret; } static void ath11k_scan_abort(struct ath11k *ar) { int ret; lockdep_assert_held(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: /* This can happen if timeout worker kicked in and called * abortion while scan completion was being processed. */ break; case ATH11K_SCAN_STARTING: case ATH11K_SCAN_ABORTING: ath11k_warn(ar->ab, "refusing scan abortion due to invalid scan state: %d\n", ar->scan.state); break; case ATH11K_SCAN_RUNNING: ar->scan.state = ATH11K_SCAN_ABORTING; spin_unlock_bh(&ar->data_lock); ret = ath11k_scan_stop(ar); if (ret) ath11k_warn(ar->ab, "failed to abort scan: %d\n", ret); spin_lock_bh(&ar->data_lock); break; } spin_unlock_bh(&ar->data_lock); } static void ath11k_scan_timeout_work(struct work_struct *work) { struct ath11k *ar = container_of(work, struct ath11k, scan.timeout.work); mutex_lock(&ar->conf_mutex); ath11k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); } static int ath11k_start_scan(struct ath11k *ar, struct scan_req_params *arg) { int ret; unsigned long timeout = 1 * HZ; lockdep_assert_held(&ar->conf_mutex); if (ath11k_spectral_get_mode(ar) == ATH11K_SPECTRAL_BACKGROUND) ath11k_spectral_reset_buffer(ar); ret = ath11k_wmi_send_scan_start_cmd(ar, arg); if (ret) return ret; if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map)) { timeout = 5 * HZ; if (ar->supports_6ghz) timeout += 5 * HZ; } ret = wait_for_completion_timeout(&ar->scan.started, timeout); if (ret == 0) { ret = ath11k_scan_stop(ar); if (ret) ath11k_warn(ar->ab, "failed to stop scan: %d\n", ret); return -ETIMEDOUT; } /* If we failed to start the scan, return error code at * this point. This is probably due to some issue in the * firmware, but no need to wedge the driver due to that... */ spin_lock_bh(&ar->data_lock); if (ar->scan.state == ATH11K_SCAN_IDLE) { spin_unlock_bh(&ar->data_lock); return -EINVAL; } spin_unlock_bh(&ar->data_lock); return 0; } static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_scan_request *req = &hw_req->req; struct scan_req_params *arg = NULL; int ret = 0; int i; u32 scan_timeout; /* Firmwares advertising the support of triggering 11D algorithm * on the scan results of a regular scan expects driver to send * WMI_11D_SCAN_START_CMDID before sending WMI_START_SCAN_CMDID. * With this feature, separate 11D scan can be avoided since * regdomain can be determined with the scan results of the * regular scan. */ if (ar->state_11d == ATH11K_11D_PREPARING && test_bit(WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN, ar->ab->wmi_ab.svc_map)) ath11k_mac_11d_scan_start(ar, arvif->vdev_id); mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: reinit_completion(&ar->scan.started); reinit_completion(&ar->scan.completed); ar->scan.state = ATH11K_SCAN_STARTING; ar->scan.is_roc = false; ar->scan.vdev_id = arvif->vdev_id; ret = 0; break; case ATH11K_SCAN_STARTING: case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: ret = -EBUSY; break; } spin_unlock_bh(&ar->data_lock); if (ret) goto exit; arg = kzalloc(sizeof(*arg), GFP_KERNEL); if (!arg) { ret = -ENOMEM; goto exit; } ath11k_wmi_start_scan_init(ar, arg); arg->vdev_id = arvif->vdev_id; arg->scan_id = ATH11K_SCAN_ID; if (ar->ab->hw_params.single_pdev_only) arg->scan_f_filter_prb_req = 1; if (req->ie_len) { arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL); if (!arg->extraie.ptr) { ret = -ENOMEM; goto exit; } arg->extraie.len = req->ie_len; } if (req->n_ssids) { arg->num_ssids = req->n_ssids; for (i = 0; i < arg->num_ssids; i++) { arg->ssid[i].length = req->ssids[i].ssid_len; memcpy(&arg->ssid[i].ssid, req->ssids[i].ssid, req->ssids[i].ssid_len); } } else { arg->scan_f_passive = 1; } if (req->n_channels) { arg->num_chan = req->n_channels; arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list), GFP_KERNEL); if (!arg->chan_list) { ret = -ENOMEM; goto exit; } for (i = 0; i < arg->num_chan; i++) { if (test_bit(WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL, ar->ab->wmi_ab.svc_map)) { arg->chan_list[i] = u32_encode_bits(req->channels[i]->center_freq, WMI_SCAN_CONFIG_PER_CHANNEL_MASK); /* If NL80211_SCAN_FLAG_COLOCATED_6GHZ is set in scan * flags, then scan all PSC channels in 6 GHz band and * those non-PSC channels where RNR IE is found during * the legacy 2.4/5 GHz scan. * If NL80211_SCAN_FLAG_COLOCATED_6GHZ is not set, * then all channels in 6 GHz will be scanned. */ if (req->channels[i]->band == NL80211_BAND_6GHZ && req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ && !cfg80211_channel_is_psc(req->channels[i])) arg->chan_list[i] |= WMI_SCAN_CH_FLAG_SCAN_ONLY_IF_RNR_FOUND; } else { arg->chan_list[i] = req->channels[i]->center_freq; } } } if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { arg->scan_f_add_spoofed_mac_in_probe = 1; ether_addr_copy(arg->mac_addr.addr, req->mac_addr); ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask); } /* if duration is set, default dwell times will be overwritten */ if (req->duration) { arg->dwell_time_active = req->duration; arg->dwell_time_active_2g = req->duration; arg->dwell_time_active_6g = req->duration; arg->dwell_time_passive = req->duration; arg->dwell_time_passive_6g = req->duration; arg->burst_duration = req->duration; scan_timeout = min_t(u32, arg->max_rest_time * (arg->num_chan - 1) + (req->duration + ATH11K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) * arg->num_chan, arg->max_scan_time); } else { scan_timeout = arg->max_scan_time; } /* Add a margin to account for event/command processing */ scan_timeout += ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD; ret = ath11k_start_scan(ar, arg); if (ret) { ath11k_warn(ar->ab, "failed to start hw scan: %d\n", ret); spin_lock_bh(&ar->data_lock); ar->scan.state = ATH11K_SCAN_IDLE; spin_unlock_bh(&ar->data_lock); } ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, msecs_to_jiffies(scan_timeout)); exit: if (arg) { kfree(arg->chan_list); kfree(arg->extraie.ptr); kfree(arg); } mutex_unlock(&ar->conf_mutex); if (ar->state_11d == ATH11K_11D_PREPARING) ath11k_mac_11d_scan_start(ar, arvif->vdev_id); return ret; } static void ath11k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; mutex_lock(&ar->conf_mutex); ath11k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); cancel_delayed_work_sync(&ar->scan.timeout); } static int ath11k_install_key(struct ath11k_vif *arvif, struct ieee80211_key_conf *key, enum set_key_cmd cmd, const u8 *macaddr, u32 flags) { int ret; struct ath11k *ar = arvif->ar; struct wmi_vdev_install_key_arg arg = { .vdev_id = arvif->vdev_id, .key_idx = key->keyidx, .key_len = key->keylen, .key_data = key->key, .key_flags = flags, .macaddr = macaddr, }; lockdep_assert_held(&arvif->ar->conf_mutex); reinit_completion(&ar->install_key_done); if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) return 0; if (cmd == DISABLE_KEY) { arg.key_cipher = WMI_CIPHER_NONE; arg.key_data = NULL; goto install; } switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: arg.key_cipher = WMI_CIPHER_AES_CCM; /* TODO: Re-check if flag is valid */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; break; case WLAN_CIPHER_SUITE_TKIP: arg.key_cipher = WMI_CIPHER_TKIP; arg.key_txmic_len = 8; arg.key_rxmic_len = 8; break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: arg.key_cipher = WMI_CIPHER_AES_GCM; key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; break; default: ath11k_warn(ar->ab, "cipher %d is not supported\n", key->cipher); return -EOPNOTSUPP; } if (test_bit(ATH11K_FLAG_RAW_MODE, &ar->ab->dev_flags)) key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV | IEEE80211_KEY_FLAG_RESERVE_TAILROOM; install: ret = ath11k_wmi_vdev_install_key(arvif->ar, &arg); if (ret) return ret; if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ)) return -ETIMEDOUT; return ar->install_key_status ? -EINVAL : 0; } static int ath11k_clear_peer_keys(struct ath11k_vif *arvif, const u8 *addr) { struct ath11k *ar = arvif->ar; struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; int first_errno = 0; int ret; int i; u32 flags = 0; lockdep_assert_held(&ar->conf_mutex); spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, arvif->vdev_id, addr); spin_unlock_bh(&ab->base_lock); if (!peer) return -ENOENT; for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { if (!peer->keys[i]) continue; /* key flags are not required to delete the key */ ret = ath11k_install_key(arvif, peer->keys[i], DISABLE_KEY, addr, flags); if (ret < 0 && first_errno == 0) first_errno = ret; if (ret < 0) ath11k_warn(ab, "failed to remove peer key %d: %d\n", i, ret); spin_lock_bh(&ab->base_lock); peer->keys[i] = NULL; spin_unlock_bh(&ab->base_lock); } return first_errno; } +static int ath11k_set_group_keys(struct ath11k_vif *arvif) +{ + struct ath11k *ar = arvif->ar; + struct ath11k_base *ab = ar->ab; + const u8 *addr = arvif->bssid; + int i, ret, first_errno = 0; + struct ath11k_peer *peer; + + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find(ab, arvif->vdev_id, addr); + spin_unlock_bh(&ab->base_lock); + + if (!peer) + return -ENOENT; + + for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { + struct ieee80211_key_conf *key = peer->keys[i]; + + if (!key || (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + continue; + + ret = ath11k_install_key(arvif, key, SET_KEY, addr, + WMI_KEY_GROUP); + if (ret < 0 && first_errno == 0) + first_errno = ret; + + if (ret < 0) + ath11k_warn(ab, "failed to set group key of idx %d for vdev %d: %d\n", + i, arvif->vdev_id, ret); + } + + return first_errno; +} + static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_peer *peer; struct ath11k_sta *arsta; + bool is_ap_with_no_sta; const u8 *peer_addr; int ret = 0; u32 flags = 0; /* BIP needs to be done in software */ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) return 1; if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) return 1; if (key->keyidx > WMI_MAX_KEY_INDEX) return -ENOSPC; mutex_lock(&ar->conf_mutex); if (sta) peer_addr = sta->addr; else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) peer_addr = vif->bss_conf.bssid; else peer_addr = vif->addr; key->hw_key_idx = key->keyidx; /* the peer should not disappear in mid-way (unless FW goes awry) since * we already hold conf_mutex. we just make sure its there now. */ spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); /* flush the fragments cache during key (re)install to * ensure all frags in the new frag list belong to the same key. */ if (peer && sta && cmd == SET_KEY) ath11k_peer_frags_flush(ar, peer); spin_unlock_bh(&ab->base_lock); if (!peer) { if (cmd == SET_KEY) { ath11k_warn(ab, "cannot install key for non-existent peer %pM\n", peer_addr); ret = -EOPNOTSUPP; goto exit; } else { /* if the peer doesn't exist there is no key to disable * anymore */ goto exit; } } if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) flags |= WMI_KEY_PAIRWISE; else flags |= WMI_KEY_GROUP; - ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags); - if (ret) { - ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret); - goto exit; - } + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "%s for peer %pM on vdev %d flags 0x%X, type = %d, num_sta %d\n", + cmd == SET_KEY ? "SET_KEY" : "DEL_KEY", peer_addr, arvif->vdev_id, + flags, arvif->vdev_type, arvif->num_stations); + + /* Allow group key clearing only in AP mode when no stations are + * associated. There is a known race condition in firmware where + * group addressed packets may be dropped if the key is cleared + * and immediately set again during rekey. + * + * During GTK rekey, mac80211 issues a clear key (if the old key + * exists) followed by an install key operation for same key + * index. This causes ath11k to send two WMI commands in quick + * succession: one to clear the old key and another to install the + * new key in the same slot. + * + * Under certain conditions—especially under high load or time + * sensitive scenarios, firmware may process these commands + * asynchronously in a way that firmware assumes the key is + * cleared whereas hardware has a valid key. This inconsistency + * between hardware and firmware leads to group addressed packet + * drops after rekey. + * Only setting the same key again can restore a valid key in + * firmware and allow packets to be transmitted. + * + * There is a use case where an AP can transition from Secure mode + * to open mode without a vdev restart by just deleting all + * associated peers and clearing key, Hence allow clear key for + * that case alone. Mark arvif->reinstall_group_keys in such cases + * and reinstall the same key when the first peer is added, + * allowing firmware to recover from the race if it had occurred. + */ - ret = ath11k_dp_peer_rx_pn_replay_config(arvif, peer_addr, cmd, key); - if (ret) { - ath11k_warn(ab, "failed to offload PN replay detection %d\n", ret); - goto exit; + is_ap_with_no_sta = (vif->type == NL80211_IFTYPE_AP && + !arvif->num_stations); + if ((flags & WMI_KEY_PAIRWISE) || cmd == SET_KEY || is_ap_with_no_sta) { + ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags); + if (ret) { + ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret); + goto exit; + } + + ret = ath11k_dp_peer_rx_pn_replay_config(arvif, peer_addr, cmd, key); + if (ret) { + ath11k_warn(ab, "failed to offload PN replay detection %d\n", + ret); + goto exit; + } + + if ((flags & WMI_KEY_GROUP) && cmd == SET_KEY && is_ap_with_no_sta) + arvif->reinstall_group_keys = true; } spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); if (peer && cmd == SET_KEY) { peer->keys[key->keyidx] = key; if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { peer->ucast_keyidx = key->keyidx; peer->sec_type = ath11k_dp_tx_get_encrypt_type(key->cipher); } else { peer->mcast_keyidx = key->keyidx; peer->sec_type_grp = ath11k_dp_tx_get_encrypt_type(key->cipher); } } else if (peer && cmd == DISABLE_KEY) { peer->keys[key->keyidx] = NULL; if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) peer->ucast_keyidx = 0; else peer->mcast_keyidx = 0; } else if (!peer) /* impossible unless FW goes crazy */ ath11k_warn(ab, "peer %pM disappeared!\n", peer_addr); if (sta) { arsta = ath11k_sta_to_arsta(sta); switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (cmd == SET_KEY) arsta->pn_type = HAL_PN_TYPE_WPA; else arsta->pn_type = HAL_PN_TYPE_NONE; break; default: arsta->pn_type = HAL_PN_TYPE_NONE; break; } } spin_unlock_bh(&ab->base_lock); exit: mutex_unlock(&ar->conf_mutex); return ret; } static int ath11k_mac_bitrate_mask_num_ht_rates(struct ath11k *ar, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { int num_rates = 0; int i; for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) num_rates += hweight8(mask->control[band].ht_mcs[i]); return num_rates; } static int ath11k_mac_bitrate_mask_num_vht_rates(struct ath11k *ar, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { int num_rates = 0; int i; for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) num_rates += hweight16(mask->control[band].vht_mcs[i]); return num_rates; } static int ath11k_mac_bitrate_mask_num_he_rates(struct ath11k *ar, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { int num_rates = 0; int i; for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) num_rates += hweight16(mask->control[band].he_mcs[i]); return num_rates; } static int ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif, struct ieee80211_sta *sta, const struct cfg80211_bitrate_mask *mask, enum nl80211_band band) { struct ath11k *ar = arvif->ar; u8 vht_rate, nss; u32 rate_code; int ret, i; lockdep_assert_held(&ar->conf_mutex); nss = 0; for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { if (hweight16(mask->control[band].vht_mcs[i]) == 1) { nss = i + 1; vht_rate = ffs(mask->control[band].vht_mcs[i]) - 1; } } if (!nss) { ath11k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM", sta->addr); return -EINVAL; } /* Avoid updating invalid nss as fixed rate*/ if (nss > sta->deflink.rx_nss) return -EINVAL; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates", sta->addr); rate_code = ATH11K_HW_RATE_CODE(vht_rate, nss - 1, WMI_RATE_PREAMBLE_VHT); ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_PARAM_FIXED_RATE, rate_code); if (ret) ath11k_warn(ar->ab, "failed to update STA %pM Fixed Rate %d: %d\n", sta->addr, rate_code, ret); return ret; } static int ath11k_mac_set_peer_he_fixed_rate(struct ath11k_vif *arvif, struct ieee80211_sta *sta, const struct cfg80211_bitrate_mask *mask, enum nl80211_band band) { struct ath11k *ar = arvif->ar; u8 he_rate, nss; u32 rate_code; int ret, i; lockdep_assert_held(&ar->conf_mutex); nss = 0; for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) { if (hweight16(mask->control[band].he_mcs[i]) == 1) { nss = i + 1; he_rate = ffs(mask->control[band].he_mcs[i]) - 1; } } if (!nss) { ath11k_warn(ar->ab, "No single he fixed rate found to set for %pM", sta->addr); return -EINVAL; } /* Avoid updating invalid nss as fixed rate */ if (nss > sta->deflink.rx_nss) return -EINVAL; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting fixed he rate for peer %pM, device will not switch to any other selected rates", sta->addr); rate_code = ATH11K_HW_RATE_CODE(he_rate, nss - 1, WMI_RATE_PREAMBLE_HE); ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_PARAM_FIXED_RATE, rate_code); if (ret) ath11k_warn(ar->ab, "failed to update sta %pM fixed rate %d: %d\n", sta->addr, rate_code, ret); return ret; } static int ath11k_mac_set_peer_ht_fixed_rate(struct ath11k_vif *arvif, struct ieee80211_sta *sta, const struct cfg80211_bitrate_mask *mask, enum nl80211_band band) { struct ath11k *ar = arvif->ar; u8 ht_rate, nss = 0; u32 rate_code; int ret, i; lockdep_assert_held(&ar->conf_mutex); for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { if (hweight8(mask->control[band].ht_mcs[i]) == 1) { nss = i + 1; ht_rate = ffs(mask->control[band].ht_mcs[i]) - 1; } } if (!nss) { ath11k_warn(ar->ab, "No single HT Fixed rate found to set for %pM", sta->addr); return -EINVAL; } /* Avoid updating invalid nss as fixed rate*/ if (nss > sta->deflink.rx_nss) return -EINVAL; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Setting Fixed HT Rate for peer %pM. Device will not switch to any other selected rates", sta->addr); rate_code = ATH11K_HW_RATE_CODE(ht_rate, nss - 1, WMI_RATE_PREAMBLE_HT); ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_PARAM_FIXED_RATE, rate_code); if (ret) ath11k_warn(ar->ab, "failed to update STA %pM HT Fixed Rate %d: %d\n", sta->addr, rate_code, ret); return ret; } static int ath11k_station_assoc(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool reassoc) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct peer_assoc_params peer_arg; int ret = 0; struct cfg80211_chan_def def; enum nl80211_band band; struct cfg80211_bitrate_mask *mask; u8 num_ht_rates, num_vht_rates, num_he_rates; lockdep_assert_held(&ar->conf_mutex); if (WARN_ON(ath11k_mac_vif_chan(vif, &def))) return -EPERM; band = def.chan->band; mask = &arvif->bitrate_mask; ath11k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc); peer_arg.is_assoc = true; ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); if (ret) { ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", sta->addr, arvif->vdev_id, ret); return ret; } if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) { ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", sta->addr, arvif->vdev_id); return -ETIMEDOUT; } num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask); num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask); num_ht_rates = ath11k_mac_bitrate_mask_num_ht_rates(ar, band, mask); /* If single VHT/HE rate is configured (by set_bitrate_mask()), * peer_assoc will disable VHT/HE. This is now enabled by a peer specific * fixed param. * Note that all other rates and NSS will be disabled for this peer. */ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { ret = ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, band); if (ret) return ret; } else if (sta->deflink.he_cap.has_he && num_he_rates == 1) { ret = ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask, band); if (ret) return ret; } else if (sta->deflink.ht_cap.ht_supported && num_ht_rates == 1) { ret = ath11k_mac_set_peer_ht_fixed_rate(arvif, sta, mask, band); if (ret) return ret; } /* Re-assoc is run only to update supported rates for given station. It * doesn't make much sense to reconfigure the peer completely. */ if (reassoc) return 0; ret = ath11k_setup_peer_smps(ar, arvif, sta->addr, &sta->deflink.ht_cap, le16_to_cpu(sta->deflink.he_6ghz_capa.capa)); if (ret) { ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); return ret; } if (!sta->wme) { arvif->num_legacy_stations++; ret = ath11k_recalc_rtscts_prot(arvif); if (ret) return ret; } if (sta->wme && sta->uapsd_queues) { ret = ath11k_peer_assoc_qos_ap(ar, arvif, sta); if (ret) { ath11k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n", sta->addr, arvif->vdev_id, ret); return ret; } } return 0; } static int ath11k_station_disassoc(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret = 0; lockdep_assert_held(&ar->conf_mutex); if (!sta->wme) { arvif->num_legacy_stations--; ret = ath11k_recalc_rtscts_prot(arvif); if (ret) return ret; } ret = ath11k_clear_peer_keys(arvif, sta->addr); if (ret) { ath11k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } return 0; } static u32 ath11k_mac_max_nss(const u8 *ht_mcs_mask, const u16 *vht_mcs_mask, const u16 *he_mcs_mask) { return max3(ath11k_mac_max_ht_nss(ht_mcs_mask), ath11k_mac_max_vht_nss(vht_mcs_mask), ath11k_mac_max_he_nss(he_mcs_mask)); } static void ath11k_sta_rc_update_wk(struct work_struct *wk) { struct ath11k *ar; struct ath11k_vif *arvif; struct ath11k_sta *arsta; struct ieee80211_sta *sta; struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; const u16 *he_mcs_mask; u32 changed, bw, nss, smps, bw_prev; int err, num_ht_rates, num_vht_rates, num_he_rates; const struct cfg80211_bitrate_mask *mask; struct peer_assoc_params peer_arg; enum wmi_phy_mode peer_phymode; arsta = container_of(wk, struct ath11k_sta, update_wk); sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); arvif = arsta->arvif; ar = arvif->ar; if (WARN_ON(ath11k_mac_vif_chan(arvif->vif, &def))) return; band = def.chan->band; ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs; spin_lock_bh(&ar->data_lock); changed = arsta->changed; arsta->changed = 0; bw = arsta->bw; bw_prev = arsta->bw_prev; nss = arsta->nss; smps = arsta->smps; spin_unlock_bh(&ar->data_lock); mutex_lock(&ar->conf_mutex); nss = max_t(u32, 1, nss); nss = min(nss, ath11k_mac_max_nss(ht_mcs_mask, vht_mcs_mask, he_mcs_mask)); if (changed & IEEE80211_RC_BW_CHANGED) { /* Get the peer phymode */ ath11k_peer_assoc_h_phymode(ar, arvif->vif, sta, &peer_arg); peer_phymode = peer_arg.peer_phymode; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "update sta %pM peer bw %d phymode %d\n", sta->addr, bw, peer_phymode); if (bw > bw_prev) { /* BW is upgraded. In this case we send WMI_PEER_PHYMODE * followed by WMI_PEER_CHWIDTH */ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "BW upgrade for sta %pM new BW %d, old BW %d\n", sta->addr, bw, bw_prev); err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_PHYMODE, peer_phymode); if (err) { ath11k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n", sta->addr, peer_phymode, err); goto err_rc_bw_changed; } err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_CHWIDTH, bw); if (err) ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n", sta->addr, bw, err); } else { /* BW is downgraded. In this case we send WMI_PEER_CHWIDTH * followed by WMI_PEER_PHYMODE */ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "BW downgrade for sta %pM new BW %d,old BW %d\n", sta->addr, bw, bw_prev); err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_CHWIDTH, bw); if (err) { ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n", sta->addr, bw, err); goto err_rc_bw_changed; } err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_PHYMODE, peer_phymode); if (err) ath11k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n", sta->addr, peer_phymode, err); } } if (changed & IEEE80211_RC_NSS_CHANGED) { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "update sta %pM nss %d\n", sta->addr, nss); err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_NSS, nss); if (err) ath11k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n", sta->addr, nss, err); } if (changed & IEEE80211_RC_SMPS_CHANGED) { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "update sta %pM smps %d\n", sta->addr, smps); err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_MIMO_PS_STATE, smps); if (err) ath11k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n", sta->addr, smps, err); } if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { mask = &arvif->bitrate_mask; num_ht_rates = ath11k_mac_bitrate_mask_num_ht_rates(ar, band, mask); num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask); num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask); /* Peer_assoc_prepare will reject vht rates in * bitrate_mask if its not available in range format and * sets vht tx_rateset as unsupported. So multiple VHT MCS * setting(eg. MCS 4,5,6) per peer is not supported here. * But, Single rate in VHT mask can be set as per-peer * fixed rate. But even if any HT rates are configured in * the bitrate mask, device will not switch to those rates * when per-peer Fixed rate is set. * TODO: Check RATEMASK_CMDID to support auto rates selection * across HT/VHT and for multiple VHT MCS support. */ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, band); } else if (sta->deflink.he_cap.has_he && num_he_rates == 1) { ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask, band); } else if (sta->deflink.ht_cap.ht_supported && num_ht_rates == 1) { ath11k_mac_set_peer_ht_fixed_rate(arvif, sta, mask, band); } else { /* If the peer is non-VHT/HE or no fixed VHT/HE rate * is provided in the new bitrate mask we set the * other rates using peer_assoc command. Also clear * the peer fixed rate settings as it has higher proprity * than peer assoc */ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_PARAM_FIXED_RATE, WMI_FIXED_RATE_NONE); if (err) ath11k_warn(ar->ab, "failed to disable peer fixed rate for sta %pM: %d\n", sta->addr, err); ath11k_peer_assoc_prepare(ar, arvif->vif, sta, &peer_arg, true); peer_arg.is_assoc = false; err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); if (err) ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", sta->addr, arvif->vdev_id, err); if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", sta->addr, arvif->vdev_id); } } err_rc_bw_changed: mutex_unlock(&ar->conf_mutex); } static void ath11k_sta_set_4addr_wk(struct work_struct *wk) { struct ath11k *ar; struct ath11k_vif *arvif; struct ath11k_sta *arsta; struct ieee80211_sta *sta; int ret = 0; arsta = container_of(wk, struct ath11k_sta, set_4addr_wk); sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); arvif = arsta->arvif; ar = arvif->ar; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting USE_4ADDR for peer %pM\n", sta->addr); ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_USE_4ADDR, 1); if (ret) ath11k_warn(ar->ab, "failed to set peer %pM 4addr capability: %d\n", sta->addr, ret); } static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif, struct ieee80211_sta *sta) { struct ath11k *ar = arvif->ar; lockdep_assert_held(&ar->conf_mutex); if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) return 0; if (ar->num_stations >= ar->max_num_stations) return -ENOBUFS; ar->num_stations++; + arvif->num_stations++; return 0; } static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif, struct ieee80211_sta *sta) { struct ath11k *ar = arvif->ar; lockdep_assert_held(&ar->conf_mutex); if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) return; ar->num_stations--; + arvif->num_stations--; } static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar, struct ieee80211_sta *sta) { u32 bw = WMI_PEER_CHWIDTH_20MHZ; switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_20: bw = WMI_PEER_CHWIDTH_20MHZ; break; case IEEE80211_STA_RX_BW_40: bw = WMI_PEER_CHWIDTH_40MHZ; break; case IEEE80211_STA_RX_BW_80: bw = WMI_PEER_CHWIDTH_80MHZ; break; case IEEE80211_STA_RX_BW_160: bw = WMI_PEER_CHWIDTH_160MHZ; break; default: ath11k_warn(ar->ab, "Invalid bandwidth %d for %pM\n", sta->deflink.bandwidth, sta->addr); bw = WMI_PEER_CHWIDTH_20MHZ; break; } return bw; } static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret = 0; s16 txpwr; if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) { txpwr = 0; } else { txpwr = sta->deflink.txpwr.power; if (!txpwr) return -EINVAL; } if (txpwr > ATH11K_TX_POWER_MAX_VAL || txpwr < ATH11K_TX_POWER_MIN_VAL) return -EINVAL; mutex_lock(&ar->conf_mutex); ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_USE_FIXED_PWR, txpwr); if (ret) { ath11k_warn(ar->ab, "failed to set tx power for station ret: %d\n", ret); goto out; } out: mutex_unlock(&ar->conf_mutex); return ret; } static void ath11k_mac_op_sta_set_4addr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enabled) { struct ath11k *ar = hw->priv; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); if (enabled && !arsta->use_4addr_set) { ieee80211_queue_work(ar->hw, &arsta->set_4addr_wk); arsta->use_4addr_set = true; } } static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_link_sta *link_sta, u32 changed) { struct ieee80211_sta *sta = link_sta->sta; struct ath11k *ar = hw->priv; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_peer *peer; u32 bw, smps; spin_lock_bh(&ar->ab->base_lock); peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); if (!peer) { spin_unlock_bh(&ar->ab->base_lock); ath11k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n", sta->addr, arvif->vdev_id); return; } spin_unlock_bh(&ar->ab->base_lock); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "sta rc update for %pM changed %08x bw %d nss %d smps %d\n", sta->addr, changed, sta->deflink.bandwidth, sta->deflink.rx_nss, sta->deflink.smps_mode); spin_lock_bh(&ar->data_lock); if (changed & IEEE80211_RC_BW_CHANGED) { bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta); arsta->bw_prev = arsta->bw; arsta->bw = bw; } if (changed & IEEE80211_RC_NSS_CHANGED) arsta->nss = sta->deflink.rx_nss; if (changed & IEEE80211_RC_SMPS_CHANGED) { smps = WMI_PEER_SMPS_PS_NONE; switch (sta->deflink.smps_mode) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_OFF: smps = WMI_PEER_SMPS_PS_NONE; break; case IEEE80211_SMPS_STATIC: smps = WMI_PEER_SMPS_STATIC; break; case IEEE80211_SMPS_DYNAMIC: smps = WMI_PEER_SMPS_DYNAMIC; break; default: ath11k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n", sta->deflink.smps_mode, sta->addr); smps = WMI_PEER_SMPS_PS_NONE; break; } arsta->smps = smps; } arsta->changed |= changed; spin_unlock_bh(&ar->data_lock); ieee80211_queue_work(hw, &arsta->update_wk); } static int ath11k_conf_tx_uapsd(struct ath11k *ar, struct ieee80211_vif *vif, u16 ac, bool enable) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); u32 value = 0; int ret = 0; if (arvif->vdev_type != WMI_VDEV_TYPE_STA) return 0; switch (ac) { case IEEE80211_AC_VO: value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; break; case IEEE80211_AC_VI: value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; break; case IEEE80211_AC_BE: value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; break; case IEEE80211_AC_BK: value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; break; } if (enable) arvif->u.sta.uapsd |= value; else arvif->u.sta.uapsd &= ~value; ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, WMI_STA_PS_PARAM_UAPSD, arvif->u.sta.uapsd); if (ret) { ath11k_warn(ar->ab, "could not set uapsd params %d\n", ret); goto exit; } if (arvif->u.sta.uapsd) value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; else value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, WMI_STA_PS_PARAM_RX_WAKE_POLICY, value); if (ret) ath11k_warn(ar->ab, "could not set rx wake param %d\n", ret); exit: return ret; } static int ath11k_mac_op_conf_tx_mu_edca(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 ac, const struct ieee80211_tx_queue_params *params) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k *ar = hw->priv; struct wmi_wmm_params_arg *p; int ret; switch (ac) { case IEEE80211_AC_VO: p = &arvif->muedca_params.ac_vo; break; case IEEE80211_AC_VI: p = &arvif->muedca_params.ac_vi; break; case IEEE80211_AC_BE: p = &arvif->muedca_params.ac_be; break; case IEEE80211_AC_BK: p = &arvif->muedca_params.ac_bk; break; default: ath11k_warn(ar->ab, "error ac: %d", ac); return -EINVAL; } p->cwmin = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(3, 0)); p->cwmax = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(7, 4)); p->aifs = u8_get_bits(params->mu_edca_param_rec.aifsn, GENMASK(3, 0)); p->txop = params->mu_edca_param_rec.mu_edca_timer; ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id, &arvif->muedca_params, WMI_WMM_PARAM_TYPE_11AX_MU_EDCA); return ret; } static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 ac, const struct ieee80211_tx_queue_params *params) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct wmi_wmm_params_arg *p = NULL; int ret; mutex_lock(&ar->conf_mutex); switch (ac) { case IEEE80211_AC_VO: p = &arvif->wmm_params.ac_vo; break; case IEEE80211_AC_VI: p = &arvif->wmm_params.ac_vi; break; case IEEE80211_AC_BE: p = &arvif->wmm_params.ac_be; break; case IEEE80211_AC_BK: p = &arvif->wmm_params.ac_bk; break; } if (WARN_ON(!p)) { ret = -EINVAL; goto exit; } p->cwmin = params->cw_min; p->cwmax = params->cw_max; p->aifs = params->aifs; p->txop = params->txop; ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id, &arvif->wmm_params, WMI_WMM_PARAM_TYPE_LEGACY); if (ret) { ath11k_warn(ar->ab, "failed to set wmm params: %d\n", ret); goto exit; } if (params->mu_edca) { ret = ath11k_mac_op_conf_tx_mu_edca(hw, vif, link_id, ac, params); if (ret) { ath11k_warn(ar->ab, "failed to set mu_edca params: %d\n", ret); goto exit; } } ret = ath11k_conf_tx_uapsd(ar, vif, ac, params->uapsd); if (ret) ath11k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret); exit: mutex_unlock(&ar->conf_mutex); return ret; } static struct ieee80211_sta_ht_cap ath11k_create_ht_cap(struct ath11k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask) { int i; - struct ieee80211_sta_ht_cap ht_cap = {0}; + struct ieee80211_sta_ht_cap ht_cap = {}; u32 ar_vht_cap = ar->pdev->cap.vht_cap; if (!(ar_ht_cap & WMI_HT_CAP_ENABLED)) return ht_cap; ht_cap.ht_supported = 1; ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE; ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT; if (ar_ht_cap & WMI_HT_CAP_HT20_SGI) ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; if (ar_ht_cap & WMI_HT_CAP_HT40_SGI) ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; if (ar_ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) { u32 smps; smps = WLAN_HT_CAP_SM_PS_DYNAMIC; smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; ht_cap.cap |= smps; } if (ar_ht_cap & WMI_HT_CAP_TX_STBC) ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; if (ar_ht_cap & WMI_HT_CAP_RX_STBC) { u32 stbc; stbc = ar_ht_cap; stbc &= WMI_HT_CAP_RX_STBC; stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; stbc &= IEEE80211_HT_CAP_RX_STBC; ht_cap.cap |= stbc; } if (ar_ht_cap & WMI_HT_CAP_RX_LDPC) ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; if (ar_ht_cap & WMI_HT_CAP_L_SIG_TXOP_PROT) ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; if (ar_vht_cap & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; for (i = 0; i < ar->num_rx_chains; i++) { if (rate_cap_rx_chainmask & BIT(i)) ht_cap.mcs.rx_mask[i] = 0xFF; } ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; return ht_cap; } static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif) { u32 value = 0; struct ath11k *ar = arvif->ar; int nsts; int sound_dim; u32 vht_cap = ar->pdev->cap.vht_cap; u32 vdev_param = WMI_VDEV_PARAM_TXBF; if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) { nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); } if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { sound_dim = vht_cap & IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; if (sound_dim > (ar->num_tx_chains - 1)) sound_dim = ar->num_tx_chains - 1; value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET); } if (!value) return 0; if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) { value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) && arvif->vdev_type == WMI_VDEV_TYPE_AP) value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; } /* TODO: SUBFEE not validated in HK, disable here until validated? */ if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) { value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) && arvif->vdev_type == WMI_VDEV_TYPE_STA) value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; } return ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, value); } static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap) { bool subfer, subfee; int sound_dim = 0, nsts = 0; subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)); subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)); if (ar->num_tx_chains < 2) { *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); subfer = false; } if (ar->num_rx_chains < 2) { *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE); subfee = false; } /* If SU Beaformer is not set, then disable MU Beamformer Capability */ if (!subfer) *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE); /* If SU Beaformee is not set, then disable MU Beamformee Capability */ if (!subfee) *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); sound_dim = (*vht_cap & IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK); sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; *vht_cap &= ~IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; nsts = (*vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK); nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; *vht_cap &= ~IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; /* Enable Sounding Dimension Field only if SU BF is enabled */ if (subfer) { if (sound_dim > (ar->num_tx_chains - 1)) sound_dim = ar->num_tx_chains - 1; sound_dim <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; *vht_cap |= sound_dim; } /* Enable Beamformee STS Field only if SU BF is enabled */ if (subfee) { nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; *vht_cap |= nsts; } } static struct ieee80211_sta_vht_cap ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask, u32 rate_cap_rx_chainmask) { - struct ieee80211_sta_vht_cap vht_cap = {0}; + struct ieee80211_sta_vht_cap vht_cap = {}; u16 txmcs_map, rxmcs_map; int i; vht_cap.vht_supported = 1; vht_cap.cap = ar->pdev->cap.vht_cap; if (ar->pdev->cap.nss_ratio_enabled) vht_cap.vht_mcs.tx_highest |= cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); ath11k_set_vht_txbf_cap(ar, &vht_cap.cap); rxmcs_map = 0; txmcs_map = 0; for (i = 0; i < 8; i++) { if (i < ar->num_tx_chains && rate_cap_tx_chainmask & BIT(i)) txmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); else txmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); if (i < ar->num_rx_chains && rate_cap_rx_chainmask & BIT(i)) rxmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); else rxmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); } if (rate_cap_tx_chainmask <= 1) vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map); vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map); return vht_cap; } static void ath11k_mac_setup_ht_vht_cap(struct ath11k *ar, struct ath11k_pdev_cap *cap, u32 *ht_cap_info) { struct ieee80211_supported_band *band; u32 rate_cap_tx_chainmask; u32 rate_cap_rx_chainmask; u32 ht_cap; rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift; rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift; if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) { band = &ar->mac.sbands[NL80211_BAND_2GHZ]; ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info; if (ht_cap_info) *ht_cap_info = ht_cap; band->ht_cap = ath11k_create_ht_cap(ar, ht_cap, rate_cap_rx_chainmask); } if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && (ar->ab->hw_params.single_pdev_only || !ar->supports_6ghz)) { band = &ar->mac.sbands[NL80211_BAND_5GHZ]; ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info; if (ht_cap_info) *ht_cap_info = ht_cap; band->ht_cap = ath11k_create_ht_cap(ar, ht_cap, rate_cap_rx_chainmask); band->vht_cap = ath11k_create_vht_cap(ar, rate_cap_tx_chainmask, rate_cap_rx_chainmask); } } static int ath11k_check_chain_mask(struct ath11k *ar, u32 ant, bool is_tx_ant) { /* TODO: Check the request chainmask against the supported * chainmask table which is advertised in extented_service_ready event */ return 0; } static void ath11k_gen_ppe_thresh(struct ath11k_ppe_threshold *fw_ppet, u8 *he_ppet) { int nss, ru; u8 bit = 7; he_ppet[0] = fw_ppet->numss_m1 & IEEE80211_PPE_THRES_NSS_MASK; he_ppet[0] |= (fw_ppet->ru_bit_mask << IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) & IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK; for (nss = 0; nss <= fw_ppet->numss_m1; nss++) { for (ru = 0; ru < 4; ru++) { u8 val; int i; if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0) continue; val = (fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) & 0x3f; val = ((val >> 3) & 0x7) | ((val & 0x7) << 3); for (i = 5; i >= 0; i--) { he_ppet[bit / 8] |= ((val >> i) & 0x1) << ((bit % 8)); bit++; } } } } static void ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem) { u8 m; m = IEEE80211_HE_MAC_CAP0_TWT_RES | IEEE80211_HE_MAC_CAP0_TWT_REQ; he_cap_elem->mac_cap_info[0] &= ~m; m = IEEE80211_HE_MAC_CAP2_TRS | IEEE80211_HE_MAC_CAP2_BCAST_TWT | IEEE80211_HE_MAC_CAP2_MU_CASCADING; he_cap_elem->mac_cap_info[2] &= ~m; m = IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED | IEEE80211_HE_MAC_CAP2_BCAST_TWT | IEEE80211_HE_MAC_CAP2_MU_CASCADING; he_cap_elem->mac_cap_info[3] &= ~m; m = IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG | IEEE80211_HE_MAC_CAP4_BQR; he_cap_elem->mac_cap_info[4] &= ~m; m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION | IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU | IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING | IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX; he_cap_elem->mac_cap_info[5] &= ~m; m = IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO; he_cap_elem->phy_cap_info[2] &= ~m; m = IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU | IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK | IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK; he_cap_elem->phy_cap_info[3] &= ~m; m = IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; he_cap_elem->phy_cap_info[4] &= ~m; m = IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK; he_cap_elem->phy_cap_info[5] &= ~m; m = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB | IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB | IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO; he_cap_elem->phy_cap_info[6] &= ~m; m = IEEE80211_HE_PHY_CAP7_PSR_BASED_SR | IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP | IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ | IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ; he_cap_elem->phy_cap_info[7] &= ~m; m = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU; he_cap_elem->phy_cap_info[8] &= ~m; m = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM | IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK | IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU | IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB; he_cap_elem->phy_cap_info[9] &= ~m; } static __le16 ath11k_mac_setup_he_6ghz_cap(struct ath11k_pdev_cap *pcap, struct ath11k_band_cap *bcap) { u8 val; bcap->he_6ghz_capa = IEEE80211_HT_MPDU_DENSITY_NONE; if (bcap->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) bcap->he_6ghz_capa |= FIELD_PREP(IEEE80211_HE_6GHZ_CAP_SM_PS, WLAN_HT_CAP_SM_PS_DYNAMIC); else bcap->he_6ghz_capa |= FIELD_PREP(IEEE80211_HE_6GHZ_CAP_SM_PS, WLAN_HT_CAP_SM_PS_DISABLED); val = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, pcap->vht_cap); bcap->he_6ghz_capa |= FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP, val); val = FIELD_GET(IEEE80211_VHT_CAP_MAX_MPDU_MASK, pcap->vht_cap); bcap->he_6ghz_capa |= FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN, val); if (pcap->vht_cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN) bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS; if (pcap->vht_cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN) bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS; return cpu_to_le16(bcap->he_6ghz_capa); } static void ath11k_mac_set_hemcsmap(struct ath11k *ar, struct ath11k_pdev_cap *cap, struct ieee80211_sta_he_cap *he_cap, int band) { u16 txmcs_map, rxmcs_map; u32 i; rxmcs_map = 0; txmcs_map = 0; for (i = 0; i < 8; i++) { if (i < ar->num_tx_chains && (ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift) & BIT(i)) txmcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2); else txmcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2); if (i < ar->num_rx_chains && (ar->cfg_rx_chainmask >> cap->tx_chain_mask_shift) & BIT(i)) rxmcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2); else rxmcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2); } he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(rxmcs_map & 0xffff); he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(txmcs_map & 0xffff); he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(rxmcs_map & 0xffff); he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(txmcs_map & 0xffff); he_cap->he_mcs_nss_supp.rx_mcs_80p80 = cpu_to_le16(rxmcs_map & 0xffff); he_cap->he_mcs_nss_supp.tx_mcs_80p80 = cpu_to_le16(txmcs_map & 0xffff); } static int ath11k_mac_copy_he_cap(struct ath11k *ar, struct ath11k_pdev_cap *cap, struct ieee80211_sband_iftype_data *data, int band) { int i, idx = 0; for (i = 0; i < NUM_NL80211_IFTYPES; i++) { struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap; struct ath11k_band_cap *band_cap = &cap->band[band]; struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem; switch (i) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: break; default: continue; } data[idx].types_mask = BIT(i); he_cap->has_he = true; memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info, sizeof(he_cap_elem->mac_cap_info)); memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info, sizeof(he_cap_elem->phy_cap_info)); he_cap_elem->mac_cap_info[1] &= IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK; he_cap_elem->phy_cap_info[5] &= ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK; he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1; switch (i) { case NL80211_IFTYPE_AP: he_cap_elem->phy_cap_info[3] &= ~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK; he_cap_elem->phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; break; case NL80211_IFTYPE_STATION: he_cap_elem->mac_cap_info[0] &= ~IEEE80211_HE_MAC_CAP0_TWT_RES; he_cap_elem->mac_cap_info[0] |= IEEE80211_HE_MAC_CAP0_TWT_REQ; he_cap_elem->phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; break; case NL80211_IFTYPE_MESH_POINT: ath11k_mac_filter_he_cap_mesh(he_cap_elem); break; } ath11k_mac_set_hemcsmap(ar, cap, he_cap, band); memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres)); if (he_cap_elem->phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) ath11k_gen_ppe_thresh(&band_cap->he_ppet, he_cap->ppe_thres); if (band == NL80211_BAND_6GHZ) { data[idx].he_6ghz_capa.capa = ath11k_mac_setup_he_6ghz_cap(cap, band_cap); } idx++; } return idx; } static void ath11k_mac_setup_he_cap(struct ath11k *ar, struct ath11k_pdev_cap *cap) { struct ieee80211_supported_band *band; int count; if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) { count = ath11k_mac_copy_he_cap(ar, cap, ar->mac.iftype[NL80211_BAND_2GHZ], NL80211_BAND_2GHZ); band = &ar->mac.sbands[NL80211_BAND_2GHZ]; _ieee80211_set_sband_iftype_data(band, ar->mac.iftype[NL80211_BAND_2GHZ], count); } if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) { count = ath11k_mac_copy_he_cap(ar, cap, ar->mac.iftype[NL80211_BAND_5GHZ], NL80211_BAND_5GHZ); band = &ar->mac.sbands[NL80211_BAND_5GHZ]; _ieee80211_set_sband_iftype_data(band, ar->mac.iftype[NL80211_BAND_5GHZ], count); } if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && ar->supports_6ghz) { count = ath11k_mac_copy_he_cap(ar, cap, ar->mac.iftype[NL80211_BAND_6GHZ], NL80211_BAND_6GHZ); band = &ar->mac.sbands[NL80211_BAND_6GHZ]; _ieee80211_set_sband_iftype_data(band, ar->mac.iftype[NL80211_BAND_6GHZ], count); } } static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant) { int ret; lockdep_assert_held(&ar->conf_mutex); if (ath11k_check_chain_mask(ar, tx_ant, true)) return -EINVAL; if (ath11k_check_chain_mask(ar, rx_ant, false)) return -EINVAL; ar->cfg_tx_chainmask = tx_ant; ar->cfg_rx_chainmask = rx_ant; if (ar->state != ATH11K_STATE_ON && ar->state != ATH11K_STATE_RESTARTED) return 0; ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK, tx_ant, ar->pdev->pdev_id); if (ret) { ath11k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n", ret, tx_ant); return ret; } ar->num_tx_chains = get_num_chains(tx_ant); ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RX_CHAIN_MASK, rx_ant, ar->pdev->pdev_id); if (ret) { ath11k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n", ret, rx_ant); return ret; } ar->num_rx_chains = get_num_chains(rx_ant); /* Reload HT/VHT/HE capability */ ath11k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL); ath11k_mac_setup_he_cap(ar, &ar->pdev->cap); return 0; } static void ath11k_mgmt_over_wmi_tx_drop(struct ath11k *ar, struct sk_buff *skb) { int num_mgmt; ieee80211_free_txskb(ar->hw, skb); num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); if (num_mgmt < 0) WARN_ON_ONCE(1); if (!num_mgmt) wake_up(&ar->txmgmt_empty_waitq); } static void ath11k_mac_tx_mgmt_free(struct ath11k *ar, int buf_id) { struct sk_buff *msdu; struct ieee80211_tx_info *info; spin_lock_bh(&ar->txmgmt_idr_lock); msdu = idr_remove(&ar->txmgmt_idr, buf_id); spin_unlock_bh(&ar->txmgmt_idr_lock); if (!msdu) return; dma_unmap_single(ar->ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, DMA_TO_DEVICE); info = IEEE80211_SKB_CB(msdu); memset(&info->status, 0, sizeof(info->status)); ath11k_mgmt_over_wmi_tx_drop(ar, msdu); } int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) { struct ath11k *ar = ctx; ath11k_mac_tx_mgmt_free(ar, buf_id); return 0; } static int ath11k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx) { struct ieee80211_vif *vif = ctx; struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb); struct ath11k *ar = skb_cb->ar; if (skb_cb->vif == vif) ath11k_mac_tx_mgmt_free(ar, buf_id); return 0; } static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif, struct sk_buff *skb) { struct ath11k_base *ab = ar->ab; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); struct ieee80211_tx_info *info; enum hal_encrypt_type enctype; unsigned int mic_len; dma_addr_t paddr; int buf_id; int ret; ATH11K_SKB_CB(skb)->ar = ar; spin_lock_bh(&ar->txmgmt_idr_lock); buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0, ATH11K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC); spin_unlock_bh(&ar->txmgmt_idr_lock); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "tx mgmt frame, buf id %d\n", buf_id); if (buf_id < 0) return -ENOSPC; info = IEEE80211_SKB_CB(skb); if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) { if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { if (!(skb_cb->flags & ATH11K_SKB_CIPHER_SET)) ath11k_warn(ab, "WMI management tx frame without ATH11K_SKB_CIPHER_SET"); enctype = ath11k_dp_tx_get_encrypt_type(skb_cb->cipher); mic_len = ath11k_dp_rx_crypto_mic_len(ar, enctype); skb_put(skb, mic_len); } } paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(ab->dev, paddr)) { ath11k_warn(ab, "failed to DMA map mgmt Tx buffer\n"); ret = -EIO; goto err_free_idr; } ATH11K_SKB_CB(skb)->paddr = paddr; ret = ath11k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb); if (ret) { ath11k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret); goto err_unmap_buf; } return 0; err_unmap_buf: dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len, DMA_TO_DEVICE); err_free_idr: spin_lock_bh(&ar->txmgmt_idr_lock); idr_remove(&ar->txmgmt_idr, buf_id); spin_unlock_bh(&ar->txmgmt_idr_lock); return ret; } static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar) { struct sk_buff *skb; while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) ath11k_mgmt_over_wmi_tx_drop(ar, skb); } static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) { struct ath11k *ar = container_of(work, struct ath11k, wmi_mgmt_tx_work); struct ath11k_skb_cb *skb_cb; struct ath11k_vif *arvif; struct sk_buff *skb; int ret; while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) { skb_cb = ATH11K_SKB_CB(skb); if (!skb_cb->vif) { ath11k_warn(ar->ab, "no vif found for mgmt frame\n"); ath11k_mgmt_over_wmi_tx_drop(ar, skb); continue; } arvif = ath11k_vif_to_arvif(skb_cb->vif); mutex_lock(&ar->conf_mutex); if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) { ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb); if (ret) { ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n", arvif->vdev_id, ret); ath11k_mgmt_over_wmi_tx_drop(ar, skb); } else { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "tx mgmt frame, vdev_id %d\n", arvif->vdev_id); } } else { ath11k_warn(ar->ab, "dropping mgmt frame for vdev %d, is_started %d\n", arvif->vdev_id, arvif->is_started); ath11k_mgmt_over_wmi_tx_drop(ar, skb); } mutex_unlock(&ar->conf_mutex); } } static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb, bool is_prb_rsp) { struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) return -ESHUTDOWN; /* Drop probe response packets when the pending management tx * count has reached a certain threshold, so as to prioritize * other mgmt packets like auth and assoc to be sent on time * for establishing successful connections. */ if (is_prb_rsp && atomic_read(&ar->num_pending_mgmt_tx) > ATH11K_PRB_RSP_DROP_THRESHOLD) { ath11k_warn(ar->ab, "dropping probe response as pending queue is almost full\n"); return -ENOSPC; } if (skb_queue_len_lockless(q) >= ATH11K_TX_MGMT_NUM_PENDING_MAX) { ath11k_warn(ar->ab, "mgmt tx queue is full\n"); return -ENOSPC; } skb_queue_tail(q, skb); atomic_inc(&ar->num_pending_mgmt_tx); queue_work(ar->ab->workqueue_aux, &ar->wmi_mgmt_tx_work); return 0; } static void ath11k_mac_op_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); struct ath11k *ar = hw->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_key_conf *key = info->control.hw_key; struct ath11k_sta *arsta = NULL; u32 info_flags = info->flags; bool is_prb_rsp; int ret; memset(skb_cb, 0, sizeof(*skb_cb)); skb_cb->vif = vif; if (key) { skb_cb->cipher = key->cipher; skb_cb->flags |= ATH11K_SKB_CIPHER_SET; } if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { skb_cb->flags |= ATH11K_SKB_HW_80211_ENCAP; } else if (ieee80211_is_mgmt(hdr->frame_control)) { is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control); ret = ath11k_mac_mgmt_tx(ar, skb, is_prb_rsp); if (ret) { ath11k_warn(ar->ab, "failed to queue management frame %d\n", ret); ieee80211_free_txskb(ar->hw, skb); } return; } if (control->sta) arsta = ath11k_sta_to_arsta(control->sta); ret = ath11k_dp_tx(ar, arvif, arsta, skb); if (unlikely(ret)) { ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret); ieee80211_free_txskb(ar->hw, skb); } } void ath11k_mac_drain_tx(struct ath11k *ar) { /* make sure rcu-protected mac80211 tx path itself is drained */ synchronize_net(); cancel_work_sync(&ar->wmi_mgmt_tx_work); ath11k_mgmt_over_wmi_tx_purge(ar); } static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable) { - struct htt_rx_ring_tlv_filter tlv_filter = {0}; + struct htt_rx_ring_tlv_filter tlv_filter = {}; struct ath11k_base *ab = ar->ab; int i, ret = 0; u32 ring_id; if (enable) { tlv_filter = ath11k_mac_mon_status_filter_default; if (ath11k_debugfs_rx_filter(ar)) tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar); } for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id + i, HAL_RXDMA_MONITOR_STATUS, DP_RX_BUFFER_SIZE, &tlv_filter); } if (enable && !ar->ab->hw_params.rxdma1_enable) mod_timer(&ar->ab->mon_reap_timer, jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); return ret; } static void ath11k_mac_wait_reconfigure(struct ath11k_base *ab) { int recovery_start_count; if (!ab->is_reset) return; recovery_start_count = atomic_inc_return(&ab->recovery_start_count); ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery start count %d\n", recovery_start_count); if (recovery_start_count == ab->num_radios) { complete(&ab->recovery_start); ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery started success\n"); } ath11k_dbg(ab, ATH11K_DBG_MAC, "waiting reconfigure...\n"); wait_for_completion_timeout(&ab->reconfigure_complete, ATH11K_RECONFIGURE_TIMEOUT_HZ); } static int ath11k_mac_op_start(struct ieee80211_hw *hw) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_pdev *pdev = ar->pdev; int ret; if (ath11k_ftm_mode) { ath11k_warn(ab, "mac operations not supported in factory test mode\n"); return -EOPNOTSUPP; } ath11k_mac_drain_tx(ar); mutex_lock(&ar->conf_mutex); switch (ar->state) { case ATH11K_STATE_OFF: ar->state = ATH11K_STATE_ON; break; case ATH11K_STATE_RESTARTING: ar->state = ATH11K_STATE_RESTARTED; ath11k_mac_wait_reconfigure(ab); break; case ATH11K_STATE_RESTARTED: case ATH11K_STATE_WEDGED: case ATH11K_STATE_ON: case ATH11K_STATE_FTM: WARN_ON(1); ret = -EINVAL; goto err; } ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1, pdev->pdev_id); if (ret) { ath11k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret); goto err; } ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1, pdev->pdev_id); if (ret) { ath11k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret); goto err; } if (test_bit(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi->wmi_ab->svc_map)) { ret = ath11k_wmi_scan_prob_req_oui(ar, ar->mac_addr); if (ret) { ath11k_err(ab, "failed to set prob req oui: %i\n", ret); goto err; } } ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE, 0, pdev->pdev_id); if (ret) { ath11k_err(ab, "failed to set ac override for ARP: %d\n", ret); goto err; } ret = ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(ar, pdev->pdev_id); if (ret) { ath11k_err(ab, "failed to offload radar detection: %d\n", ret); goto err; } ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar, HTT_PPDU_STATS_TAG_DEFAULT); if (ret) { ath11k_err(ab, "failed to req ppdu stats: %d\n", ret); goto err; } ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_MESH_MCAST_ENABLE, 1, pdev->pdev_id); if (ret) { ath11k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret); goto err; } __ath11k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); /* TODO: Do we need to enable ANI? */ ath11k_reg_update_chan_list(ar, false); ar->num_started_vdevs = 0; ar->num_created_vdevs = 0; ar->num_peers = 0; ar->allocated_vdev_map = 0; /* Configure monitor status ring with default rx_filter to get rx status * such as rssi, rx_duration. */ ret = ath11k_mac_config_mon_status_default(ar, true); if (ret) { ath11k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n", ret); goto err; } /* Configure the hash seed for hash based reo dest ring selection */ ath11k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id); /* allow device to enter IMPS */ if (ab->hw_params.idle_ps) { ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG, 1, pdev->pdev_id); if (ret) { ath11k_err(ab, "failed to enable idle ps: %d\n", ret); goto err; } } mutex_unlock(&ar->conf_mutex); rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], &ab->pdevs[ar->pdev_idx]); return 0; err: ar->state = ATH11K_STATE_OFF; mutex_unlock(&ar->conf_mutex); return ret; } static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend) { struct ath11k *ar = hw->priv; struct htt_ppdu_stats_info *ppdu_stats, *tmp; struct scan_chan_list_params *params; int ret; ath11k_mac_drain_tx(ar); mutex_lock(&ar->conf_mutex); ret = ath11k_mac_config_mon_status_default(ar, false); if (ret) ath11k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n", ret); clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags); ar->state = ATH11K_STATE_OFF; mutex_unlock(&ar->conf_mutex); cancel_delayed_work_sync(&ar->scan.timeout); cancel_work_sync(&ar->channel_update_work); cancel_work_sync(&ar->regd_update_work); cancel_work_sync(&ar->ab->update_11d_work); if (ar->state_11d == ATH11K_11D_PREPARING) { ar->state_11d = ATH11K_11D_IDLE; complete(&ar->completed_11d_scan); } spin_lock_bh(&ar->data_lock); list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) { list_del(&ppdu_stats->list); kfree(ppdu_stats); } while ((params = list_first_entry_or_null(&ar->channel_update_queue, struct scan_chan_list_params, list))) { list_del(¶ms->list); kfree(params); } spin_unlock_bh(&ar->data_lock); rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL); synchronize_rcu(); atomic_set(&ar->num_pending_mgmt_tx, 0); } static int ath11k_mac_setup_vdev_params_mbssid(struct ath11k_vif *arvif, u32 *flags, u32 *tx_vdev_id) { struct ath11k *ar = arvif->ar; struct ath11k_vif *tx_arvif; *tx_vdev_id = 0; tx_arvif = ath11k_mac_get_tx_arvif(arvif); if (!tx_arvif) { *flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP; return 0; } if (arvif->vif->bss_conf.nontransmitted) { if (ar->hw->wiphy != tx_arvif->ar->hw->wiphy) return -EINVAL; *flags = WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP; *tx_vdev_id = tx_arvif->vdev_id; } else if (tx_arvif == arvif) { *flags = WMI_HOST_VDEV_FLAGS_TRANSMIT_AP; } else { return -EINVAL; } if (arvif->vif->bss_conf.ema_ap) *flags |= WMI_HOST_VDEV_FLAGS_EMA_MODE; return 0; } static int ath11k_mac_setup_vdev_create_params(struct ath11k_vif *arvif, struct vdev_create_params *params) { struct ath11k *ar = arvif->ar; struct ath11k_pdev *pdev = ar->pdev; int ret; params->if_id = arvif->vdev_id; params->type = arvif->vdev_type; params->subtype = arvif->vdev_subtype; params->pdev_id = pdev->pdev_id; params->mbssid_flags = 0; params->mbssid_tx_vdev_id = 0; if (!test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT, ar->ab->wmi_ab.svc_map)) { ret = ath11k_mac_setup_vdev_params_mbssid(arvif, ¶ms->mbssid_flags, ¶ms->mbssid_tx_vdev_id); if (ret) return ret; } if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) { params->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains; params->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains; } if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) { params->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains; params->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains; } if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP && ar->supports_6ghz) { params->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains; params->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains; } return 0; } static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); u32 param_id, param_value; int ret; param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE; if (ath11k_frame_mode != ATH11K_HW_TXRX_ETHERNET || (vif->type != NL80211_IFTYPE_STATION && vif->type != NL80211_IFTYPE_AP)) vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED | IEEE80211_OFFLOAD_DECAP_ENABLED); if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) param_value = ATH11K_HW_TXRX_ETHERNET; else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) param_value = ATH11K_HW_TXRX_RAW; else param_value = ATH11K_HW_TXRX_NATIVE_WIFI; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, param_value); if (ret) { ath11k_warn(ab, "failed to set vdev %d tx encap mode: %d\n", arvif->vdev_id, ret); vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; } param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE; if (vif->offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED) param_value = ATH11K_HW_TXRX_ETHERNET; else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) param_value = ATH11K_HW_TXRX_RAW; else param_value = ATH11K_HW_TXRX_NATIVE_WIFI; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, param_value); if (ret) { ath11k_warn(ab, "failed to set vdev %d rx decap mode: %d\n", arvif->vdev_id, ret); vif->offload_flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED; } } static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; struct ath11k_vif *arvif; int i; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; list_for_each_entry(arvif, &ar->arvifs, list) { if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_AP) return true; } } return false; } void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id) { struct wmi_11d_scan_start_params param; int ret; mutex_lock(&ar->ab->vdev_id_11d_lock); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev id for 11d scan %d\n", ar->vdev_id_11d_scan); if (ar->regdom_set_by_user) goto fin; if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID) goto fin; if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map)) goto fin; if (ath11k_mac_vif_ap_active_any(ar->ab)) goto fin; param.vdev_id = vdev_id; param.start_interval_msec = 0; param.scan_period_msec = ATH11K_SCAN_11D_INTERVAL; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "start 11d scan\n"); ret = ath11k_wmi_send_11d_scan_start_cmd(ar, ¶m); if (ret) { ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n", vdev_id, ret); } else { ar->vdev_id_11d_scan = vdev_id; if (ar->state_11d == ATH11K_11D_PREPARING) ar->state_11d = ATH11K_11D_RUNNING; } fin: if (ar->state_11d == ATH11K_11D_PREPARING) { ar->state_11d = ATH11K_11D_IDLE; complete(&ar->completed_11d_scan); } mutex_unlock(&ar->ab->vdev_id_11d_lock); } void ath11k_mac_11d_scan_stop(struct ath11k *ar) { int ret; u32 vdev_id; if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map)) return; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "stop 11d scan\n"); mutex_lock(&ar->ab->vdev_id_11d_lock); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "stop 11d vdev id %d\n", ar->vdev_id_11d_scan); if (ar->state_11d == ATH11K_11D_PREPARING) { ar->state_11d = ATH11K_11D_IDLE; complete(&ar->completed_11d_scan); } if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID) { vdev_id = ar->vdev_id_11d_scan; ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id); if (ret) { ath11k_warn(ar->ab, "failed to stopt 11d scan vdev %d ret: %d\n", vdev_id, ret); } else { ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID; ar->state_11d = ATH11K_11D_IDLE; complete(&ar->completed_11d_scan); } } mutex_unlock(&ar->ab->vdev_id_11d_lock); } void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; int i; ath11k_dbg(ab, ATH11K_DBG_MAC, "stop soc 11d scan\n"); for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; ath11k_mac_11d_scan_stop(ar); } } static int ath11k_mac_vdev_delete(struct ath11k *ar, struct ath11k_vif *arvif) { unsigned long time_left; struct ieee80211_vif *vif = arvif->vif; int ret = 0; lockdep_assert_held(&ar->conf_mutex); reinit_completion(&ar->vdev_delete_done); ret = ath11k_wmi_vdev_delete(ar, arvif->vdev_id); if (ret) { ath11k_warn(ar->ab, "failed to delete WMI vdev %d: %d\n", arvif->vdev_id, ret); return ret; } time_left = wait_for_completion_timeout(&ar->vdev_delete_done, ATH11K_VDEV_DELETE_TIMEOUT_HZ); if (time_left == 0) { ath11k_warn(ar->ab, "Timeout in receiving vdev delete response\n"); return -ETIMEDOUT; } ar->ab->free_vdev_map |= 1LL << (arvif->vdev_id); ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); ar->num_created_vdevs--; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n", vif->addr, arvif->vdev_id); return ret; } static void ath11k_mac_bcn_tx_work(struct work_struct *work) { struct ath11k_vif *arvif = container_of(work, struct ath11k_vif, bcn_tx_work); mutex_lock(&arvif->ar->conf_mutex); ath11k_mac_bcn_tx_event(arvif); mutex_unlock(&arvif->ar->conf_mutex); } static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); - struct vdev_create_params vdev_param = {0}; + struct vdev_create_params vdev_param = {}; struct peer_create_params peer_param; u32 param_id, param_value; u16 nss; int i; int ret, fbret; int bit; vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; mutex_lock(&ar->conf_mutex); if (vif->type == NL80211_IFTYPE_AP && ar->num_peers > (ar->max_num_peers - 1)) { ath11k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n"); ret = -ENOBUFS; goto err; } if (ar->num_created_vdevs > (TARGET_NUM_VDEVS(ab) - 1)) { ath11k_warn(ab, "failed to create vdev %u, reached max vdev limit %d\n", ar->num_created_vdevs, TARGET_NUM_VDEVS(ab)); ret = -EBUSY; goto err; } memset(arvif, 0, sizeof(*arvif)); arvif->ar = ar; arvif->vif = vif; INIT_LIST_HEAD(&arvif->list); INIT_WORK(&arvif->bcn_tx_work, ath11k_mac_bcn_tx_work); INIT_DELAYED_WORK(&arvif->connection_loss_work, ath11k_mac_vif_sta_connection_loss_work); for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { arvif->bitrate_mask.control[i].legacy = 0xffffffff; arvif->bitrate_mask.control[i].gi = NL80211_TXRATE_FORCE_SGI; memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, sizeof(arvif->bitrate_mask.control[i].ht_mcs)); memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, sizeof(arvif->bitrate_mask.control[i].vht_mcs)); memset(arvif->bitrate_mask.control[i].he_mcs, 0xff, sizeof(arvif->bitrate_mask.control[i].he_mcs)); } bit = __ffs64(ab->free_vdev_map); arvif->vdev_id = bit; arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; switch (vif->type) { case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_STATION: arvif->vdev_type = WMI_VDEV_TYPE_STA; if (vif->p2p) arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT; break; case NL80211_IFTYPE_MESH_POINT: arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S; fallthrough; case NL80211_IFTYPE_AP: arvif->vdev_type = WMI_VDEV_TYPE_AP; if (vif->p2p) arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO; break; case NL80211_IFTYPE_MONITOR: arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; ar->monitor_vdev_id = bit; break; case NL80211_IFTYPE_P2P_DEVICE: arvif->vdev_type = WMI_VDEV_TYPE_STA; arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; break; default: WARN_ON(1); break; } ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "add interface id %d type %d subtype %d map %llx\n", arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, ab->free_vdev_map); vif->cab_queue = arvif->vdev_id % (ATH11K_HW_MAX_QUEUES - 1); for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) vif->hw_queue[i] = i % (ATH11K_HW_MAX_QUEUES - 1); ret = ath11k_mac_setup_vdev_create_params(arvif, &vdev_param); if (ret) { ath11k_warn(ab, "failed to create vdev parameters %d: %d\n", arvif->vdev_id, ret); goto err; } ret = ath11k_wmi_vdev_create(ar, vif->addr, &vdev_param); if (ret) { ath11k_warn(ab, "failed to create WMI vdev %d: %d\n", arvif->vdev_id, ret); goto err; } ar->num_created_vdevs++; ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM created, vdev_id %d\n", vif->addr, arvif->vdev_id); ar->allocated_vdev_map |= 1LL << arvif->vdev_id; ab->free_vdev_map &= ~(1LL << arvif->vdev_id); spin_lock_bh(&ar->data_lock); list_add(&arvif->list, &ar->arvifs); spin_unlock_bh(&ar->data_lock); ath11k_mac_op_update_vif_offload(hw, vif); nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, WMI_VDEV_PARAM_NSS, nss); if (ret) { ath11k_warn(ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n", arvif->vdev_id, ar->cfg_tx_chainmask, nss, ret); goto err_vdev_del; } switch (arvif->vdev_type) { case WMI_VDEV_TYPE_AP: peer_param.vdev_id = arvif->vdev_id; peer_param.peer_addr = vif->addr; peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; ret = ath11k_peer_create(ar, arvif, NULL, &peer_param); if (ret) { ath11k_warn(ab, "failed to vdev %d create peer for AP: %d\n", arvif->vdev_id, ret); goto err_vdev_del; } ret = ath11k_mac_set_kickout(arvif); if (ret) { ath11k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n", arvif->vdev_id, ret); goto err_peer_del; } ath11k_mac_11d_scan_stop_all(ar->ab); break; case WMI_VDEV_TYPE_STA: param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY; param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param_id, param_value); if (ret) { ath11k_warn(ar->ab, "failed to set vdev %d RX wake policy: %d\n", arvif->vdev_id, ret); goto err_peer_del; } param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param_id, param_value); if (ret) { ath11k_warn(ar->ab, "failed to set vdev %d TX wake threshold: %d\n", arvif->vdev_id, ret); goto err_peer_del; } param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT; param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param_id, param_value); if (ret) { ath11k_warn(ar->ab, "failed to set vdev %d pspoll count: %d\n", arvif->vdev_id, ret); goto err_peer_del; } ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, WMI_STA_PS_MODE_DISABLED); if (ret) { ath11k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n", arvif->vdev_id, ret); goto err_peer_del; } if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map)) { reinit_completion(&ar->completed_11d_scan); ar->state_11d = ATH11K_11D_PREPARING; } break; case WMI_VDEV_TYPE_MONITOR: set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); break; default: break; } arvif->txpower = vif->bss_conf.txpower; ret = ath11k_mac_txpower_recalc(ar); if (ret) goto err_peer_del; param_id = WMI_VDEV_PARAM_RTS_THRESHOLD; param_value = ar->hw->wiphy->rts_threshold; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, param_value); if (ret) { ath11k_warn(ar->ab, "failed to set rts threshold for vdev %d: %d\n", arvif->vdev_id, ret); } ath11k_dp_vdev_tx_attach(ar, arvif); if (vif->type != NL80211_IFTYPE_MONITOR && test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) { ret = ath11k_mac_monitor_vdev_create(ar); if (ret) ath11k_warn(ar->ab, "failed to create monitor vdev during add interface: %d", ret); } if (ath11k_wmi_supports_6ghz_cc_ext(ar)) { struct cur_regulatory_info *reg_info; reg_info = &ab->reg_info_store[ar->pdev_idx]; ath11k_dbg(ab, ATH11K_DBG_MAC, "interface added to change reg rules\n"); ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_LPI_AP); } mutex_unlock(&ar->conf_mutex); return 0; err_peer_del: if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { fbret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr); if (fbret) { ath11k_warn(ar->ab, "fallback fail to delete peer addr %pM vdev_id %d ret %d\n", vif->addr, arvif->vdev_id, fbret); goto err; } } err_vdev_del: ath11k_mac_vdev_delete(ar, arvif); spin_lock_bh(&ar->data_lock); list_del(&arvif->list); spin_unlock_bh(&ar->data_lock); err: mutex_unlock(&ar->conf_mutex); return ret; } static int ath11k_mac_vif_unref(int buf_id, void *skb, void *ctx) { struct ieee80211_vif *vif = ctx; struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); if (skb_cb->vif == vif) skb_cb->vif = NULL; return 0; } static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_base *ab = ar->ab; int ret; int i; cancel_delayed_work_sync(&arvif->connection_loss_work); cancel_work_sync(&arvif->bcn_tx_work); mutex_lock(&ar->conf_mutex); ath11k_dbg(ab, ATH11K_DBG_MAC, "remove interface (vdev %d)\n", arvif->vdev_id); ret = ath11k_spectral_vif_stop(arvif); if (ret) ath11k_warn(ab, "failed to stop spectral for vdev %i: %d\n", arvif->vdev_id, ret); if (arvif->vdev_type == WMI_VDEV_TYPE_STA) ath11k_mac_11d_scan_stop(ar); if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { ret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr); if (ret) ath11k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n", arvif->vdev_id, ret); } ret = ath11k_mac_vdev_delete(ar, arvif); if (ret) { ath11k_warn(ab, "failed to delete vdev %d: %d\n", arvif->vdev_id, ret); goto err_vdev_del; } if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); ar->monitor_vdev_id = -1; } else if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags) && !test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) { ret = ath11k_mac_monitor_vdev_delete(ar); if (ret) /* continue even if there's an error */ ath11k_warn(ar->ab, "failed to delete vdev monitor during remove interface: %d", ret); } err_vdev_del: spin_lock_bh(&ar->data_lock); list_del(&arvif->list); spin_unlock_bh(&ar->data_lock); ath11k_peer_cleanup(ar, arvif->vdev_id); idr_for_each(&ar->txmgmt_idr, ath11k_mac_vif_txmgmt_idr_remove, vif); for (i = 0; i < ab->hw_params.max_tx_ring; i++) { spin_lock_bh(&ab->dp.tx_ring[i].tx_idr_lock); idr_for_each(&ab->dp.tx_ring[i].txbuf_idr, ath11k_mac_vif_unref, vif); spin_unlock_bh(&ab->dp.tx_ring[i].tx_idr_lock); } /* Recalc txpower for remaining vdev */ ath11k_mac_txpower_recalc(ar); /* TODO: recalc traffic pause state based on the available vdevs */ mutex_unlock(&ar->conf_mutex); } /* FIXME: Has to be verified. */ #define SUPPORTED_FILTERS \ (FIF_ALLMULTI | \ FIF_CONTROL | \ FIF_PSPOLL | \ FIF_OTHER_BSS | \ FIF_BCN_PRBRESP_PROMISC | \ FIF_PROBE_REQ | \ FIF_FCSFAIL) static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct ath11k *ar = hw->priv; mutex_lock(&ar->conf_mutex); *total_flags &= SUPPORTED_FILTERS; ar->filter_flags = *total_flags; mutex_unlock(&ar->conf_mutex); } -static int ath11k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) +static int ath11k_mac_op_get_antenna(struct ieee80211_hw *hw, int radio_idx, + u32 *tx_ant, u32 *rx_ant) { struct ath11k *ar = hw->priv; mutex_lock(&ar->conf_mutex); *tx_ant = ar->cfg_tx_chainmask; *rx_ant = ar->cfg_rx_chainmask; mutex_unlock(&ar->conf_mutex); return 0; } -static int ath11k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) +static int ath11k_mac_op_set_antenna(struct ieee80211_hw *hw, int radio_idx, + u32 tx_ant, u32 rx_ant) { struct ath11k *ar = hw->priv; int ret; mutex_lock(&ar->conf_mutex); ret = __ath11k_set_antenna(ar, tx_ant, rx_ant); mutex_unlock(&ar->conf_mutex); return ret; } static int ath11k_mac_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) { struct ath11k *ar = hw->priv; int ret = -EINVAL; mutex_lock(&ar->conf_mutex); switch (params->action) { case IEEE80211_AMPDU_RX_START: ret = ath11k_dp_rx_ampdu_start(ar, params); break; case IEEE80211_AMPDU_RX_STOP: ret = ath11k_dp_rx_ampdu_stop(ar, params); break; case IEEE80211_AMPDU_TX_START: case IEEE80211_AMPDU_TX_STOP_CONT: case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: case IEEE80211_AMPDU_TX_OPERATIONAL: /* Tx A-MPDU aggregation offloaded to hw/fw so deny mac80211 * Tx aggregation requests. */ ret = -EOPNOTSUPP; break; } mutex_unlock(&ar->conf_mutex); return ret; } static int ath11k_mac_op_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx add freq %u width %d ptr %p\n", ctx->def.chan->center_freq, ctx->def.width, ctx); mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); /* TODO: In case of multiple channel context, populate rx_channel from * Rx PPDU desc information. */ ar->rx_channel = ctx->def.chan; spin_unlock_bh(&ar->data_lock); mutex_unlock(&ar->conf_mutex); return 0; } static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx remove freq %u width %d ptr %p\n", ctx->def.chan->center_freq, ctx->def.width, ctx); mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); /* TODO: In case of there is one more channel context left, populate * rx_channel with the channel of that remaining channel context. */ ar->rx_channel = NULL; spin_unlock_bh(&ar->data_lock); mutex_unlock(&ar->conf_mutex); } static int ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif, struct ieee80211_chanctx_conf *ctx, bool restart) { struct ath11k *ar = arvif->ar; struct ath11k_base *ab = ar->ab; struct wmi_vdev_start_req_arg arg = {}; const struct cfg80211_chan_def *chandef = &ctx->def; int ret = 0; unsigned int dfs_cac_time; lockdep_assert_held(&ar->conf_mutex); reinit_completion(&ar->vdev_setup_done); arg.vdev_id = arvif->vdev_id; arg.dtim_period = arvif->dtim_period; arg.bcn_intval = arvif->beacon_interval; arg.channel.freq = chandef->chan->center_freq; arg.channel.band_center_freq1 = chandef->center_freq1; arg.channel.band_center_freq2 = chandef->center_freq2; arg.channel.mode = ath11k_phymodes[chandef->chan->band][chandef->width]; arg.channel.min_power = 0; arg.channel.max_power = chandef->chan->max_power; arg.channel.max_reg_power = chandef->chan->max_reg_power; arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain; arg.pref_tx_streams = ar->num_tx_chains; arg.pref_rx_streams = ar->num_rx_chains; arg.mbssid_flags = 0; arg.mbssid_tx_vdev_id = 0; if (test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT, ar->ab->wmi_ab.svc_map)) { ret = ath11k_mac_setup_vdev_params_mbssid(arvif, &arg.mbssid_flags, &arg.mbssid_tx_vdev_id); if (ret) return ret; } if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { arg.ssid = arvif->u.ap.ssid; arg.ssid_len = arvif->u.ap.ssid_len; arg.hidden_ssid = arvif->u.ap.hidden_ssid; /* For now allow DFS for AP mode */ arg.channel.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); arg.channel.freq2_radar = ctx->radar_enabled; arg.channel.passive = arg.channel.chan_radar; spin_lock_bh(&ab->base_lock); arg.regdomain = ar->ab->dfs_region; spin_unlock_bh(&ab->base_lock); } arg.channel.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR); ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %d start center_freq %d phymode %s\n", arg.vdev_id, arg.channel.freq, ath11k_wmi_phymode_str(arg.channel.mode)); ret = ath11k_wmi_vdev_start(ar, &arg, restart); if (ret) { ath11k_warn(ar->ab, "failed to %s WMI vdev %i\n", restart ? "restart" : "start", arg.vdev_id); return ret; } ret = ath11k_mac_vdev_setup_sync(ar); if (ret) { ath11k_warn(ab, "failed to synchronize setup for vdev %i %s: %d\n", arg.vdev_id, restart ? "restart" : "start", ret); return ret; } /* TODO: For now we only set TPC power here. However when * channel changes, say CSA, it should be updated again. */ if (ath11k_mac_supports_station_tpc(ar, arvif, chandef)) { ath11k_mac_fill_reg_tpc_info(ar, arvif->vif, &arvif->chanctx); ath11k_wmi_send_vdev_set_tpc_power(ar, arvif->vdev_id, &arvif->reg_tpc_info); } if (!restart) ar->num_started_vdevs++; ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM started, vdev_id %d\n", arvif->vif->addr, arvif->vdev_id); /* Enable CAC Flag in the driver by checking the all sub-channel's DFS * state as NL80211_DFS_USABLE which indicates CAC needs to be * done before channel usage. This flags is used to drop rx packets. * during CAC. */ /* TODO Set the flag for other interface types as required */ if (arvif->vdev_type == WMI_VDEV_TYPE_AP && ctx->radar_enabled && cfg80211_chandef_dfs_usable(ar->hw->wiphy, chandef)) { set_bit(ATH11K_CAC_RUNNING, &ar->dev_flags); dfs_cac_time = cfg80211_chandef_dfs_cac_time(ar->hw->wiphy, chandef); ath11k_dbg(ab, ATH11K_DBG_MAC, "cac started dfs_cac_time %u center_freq %d center_freq1 %d for vdev %d\n", dfs_cac_time, arg.channel.freq, chandef->center_freq1, arg.vdev_id); } ret = ath11k_mac_set_txbf_conf(arvif); if (ret) ath11k_warn(ab, "failed to set txbf conf for vdev %d: %d\n", arvif->vdev_id, ret); return 0; } static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif) { struct ath11k *ar = arvif->ar; int ret; lockdep_assert_held(&ar->conf_mutex); reinit_completion(&ar->vdev_setup_done); ret = ath11k_wmi_vdev_stop(ar, arvif->vdev_id); if (ret) { ath11k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n", arvif->vdev_id, ret); goto err; } ret = ath11k_mac_vdev_setup_sync(ar); if (ret) { ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n", arvif->vdev_id, ret); goto err; } WARN_ON(ar->num_started_vdevs == 0); ar->num_started_vdevs--; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n", arvif->vif->addr, arvif->vdev_id); if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "CAC Stopped for vdev %d\n", arvif->vdev_id); } return 0; err: return ret; } static int ath11k_mac_vdev_start(struct ath11k_vif *arvif, struct ieee80211_chanctx_conf *ctx) { return ath11k_mac_vdev_start_restart(arvif, ctx, false); } static int ath11k_mac_vdev_restart(struct ath11k_vif *arvif, struct ieee80211_chanctx_conf *ctx) { return ath11k_mac_vdev_start_restart(arvif, ctx, true); } struct ath11k_mac_change_chanctx_arg { struct ieee80211_chanctx_conf *ctx; struct ieee80211_vif_chanctx_switch *vifs; int n_vifs; int next_vif; }; static void ath11k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath11k_mac_change_chanctx_arg *arg = data; if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx) return; arg->n_vifs++; } static void ath11k_mac_change_chanctx_fill_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath11k_mac_change_chanctx_arg *arg = data; struct ieee80211_chanctx_conf *ctx; ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf); if (ctx != arg->ctx) return; if (WARN_ON(arg->next_vif == arg->n_vifs)) return; arg->vifs[arg->next_vif].vif = vif; arg->vifs[arg->next_vif].old_ctx = ctx; arg->vifs[arg->next_vif].new_ctx = ctx; arg->next_vif++; } static void ath11k_mac_update_vif_chan(struct ath11k *ar, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs) { struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif, *tx_arvif; int ret; int i; bool monitor_vif = false; lockdep_assert_held(&ar->conf_mutex); /* Associated channel resources of all relevant vdevs * should be available for the channel switch now. */ /* TODO: Update ar->rx_channel */ for (i = 0; i < n_vifs; i++) { arvif = ath11k_vif_to_arvif(vifs[i].vif); if (WARN_ON(!arvif->is_started)) continue; /* change_chanctx can be called even before vdev_up from * ieee80211_start_ap->ieee80211_vif_use_channel-> * ieee80211_recalc_radar_chanctx. * * Firmware expect vdev_restart only if vdev is up. * If vdev is down then it expect vdev_stop->vdev_start. */ if (arvif->is_up) { ret = ath11k_mac_vdev_restart(arvif, vifs[i].new_ctx); if (ret) { ath11k_warn(ab, "failed to restart vdev %d: %d\n", arvif->vdev_id, ret); continue; } } else { ret = ath11k_mac_vdev_stop(arvif); if (ret) { ath11k_warn(ab, "failed to stop vdev %d: %d\n", arvif->vdev_id, ret); continue; } ret = ath11k_mac_vdev_start(arvif, vifs[i].new_ctx); if (ret) ath11k_warn(ab, "failed to start vdev %d: %d\n", arvif->vdev_id, ret); continue; } ret = ath11k_mac_setup_bcn_tmpl(arvif); if (ret) ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n", ret); tx_arvif = ath11k_mac_get_tx_arvif(arvif); ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, arvif->bssid, tx_arvif ? tx_arvif->bssid : NULL, arvif->vif->bss_conf.bssid_index, 1 << arvif->vif->bss_conf.bssid_indicator); if (ret) { ath11k_warn(ab, "failed to bring vdev up %d: %d\n", arvif->vdev_id, ret); continue; } } /* Restart the internal monitor vdev on new channel */ if (!monitor_vif && test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) { ret = ath11k_mac_monitor_stop(ar); if (ret) { ath11k_warn(ar->ab, "failed to stop monitor during vif channel update: %d", ret); return; } ret = ath11k_mac_monitor_start(ar); if (ret) { ath11k_warn(ar->ab, "failed to start monitor during vif channel update: %d", ret); return; } } } static void ath11k_mac_update_active_vif_chan(struct ath11k *ar, struct ieee80211_chanctx_conf *ctx) { struct ath11k_mac_change_chanctx_arg arg = { .ctx = ctx }; lockdep_assert_held(&ar->conf_mutex); ieee80211_iterate_active_interfaces_atomic(ar->hw, IEEE80211_IFACE_ITER_NORMAL, ath11k_mac_change_chanctx_cnt_iter, &arg); if (arg.n_vifs == 0) return; arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), GFP_KERNEL); if (!arg.vifs) return; ieee80211_iterate_active_interfaces_atomic(ar->hw, IEEE80211_IFACE_ITER_NORMAL, ath11k_mac_change_chanctx_fill_iter, &arg); ath11k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); kfree(arg.vifs); } static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; mutex_lock(&ar->conf_mutex); ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx change freq %u width %d ptr %p changed %x\n", ctx->def.chan->center_freq, ctx->def.width, ctx, changed); /* This shouldn't really happen because channel switching should use * switch_vif_chanctx(). */ if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) goto unlock; if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH || changed & IEEE80211_CHANCTX_CHANGE_RADAR) ath11k_mac_update_active_vif_chan(ar, ctx); /* TODO: Recalc radar detection */ unlock: mutex_unlock(&ar->conf_mutex); } static int ath11k_mac_start_vdev_delay(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; if (WARN_ON(arvif->is_started)) return -EBUSY; ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx); if (ret) { ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", arvif->vdev_id, vif->addr, arvif->chanctx.def.chan->center_freq, ret); return ret; } /* Reconfigure hardware rate code since it is cleared by firmware. */ if (ar->hw_rate_code > 0) { u32 vdev_param = WMI_VDEV_PARAM_MGMT_RATE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, ar->hw_rate_code); if (ret) { ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret); return ret; } } if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, 0, ar->mac_addr, NULL, 0, 0); if (ret) { ath11k_warn(ab, "failed put monitor up: %d\n", ret); return ret; } } arvif->is_started = true; /* TODO: Setup ps and cts/rts protection */ return 0; } static int ath11k_mac_stop_vdev_early(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; if (WARN_ON(!arvif->is_started)) return -EBUSY; ret = ath11k_mac_vdev_stop(arvif); if (ret) { ath11k_warn(ab, "failed to stop vdev %i: %d\n", arvif->vdev_id, ret); return ret; } arvif->is_started = false; /* TODO: Setup ps and cts/rts protection */ return 0; } static u8 ath11k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def) { if (chan_def->chan->flags & IEEE80211_CHAN_PSD) { switch (chan_def->width) { case NL80211_CHAN_WIDTH_20: return 1; case NL80211_CHAN_WIDTH_40: return 2; case NL80211_CHAN_WIDTH_80: return 4; case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: return 8; default: return 1; } } else { switch (chan_def->width) { case NL80211_CHAN_WIDTH_20: return 1; case NL80211_CHAN_WIDTH_40: return 2; case NL80211_CHAN_WIDTH_80: return 3; case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: return 4; default: return 1; } } } static u16 ath11k_mac_get_6ghz_start_frequency(struct cfg80211_chan_def *chan_def) { u16 diff_seq; /* It is to get the lowest channel number's center frequency of the chan. * For example, * bandwidth=40 MHz, center frequency is 5965, lowest channel is 1 * with center frequency 5955, its diff is 5965 - 5955 = 10. * bandwidth=80 MHz, center frequency is 5985, lowest channel is 1 * with center frequency 5955, its diff is 5985 - 5955 = 30. * bandwidth=160 MHz, center frequency is 6025, lowest channel is 1 * with center frequency 5955, its diff is 6025 - 5955 = 70. */ switch (chan_def->width) { case NL80211_CHAN_WIDTH_160: diff_seq = 70; break; case NL80211_CHAN_WIDTH_80: case NL80211_CHAN_WIDTH_80P80: diff_seq = 30; break; case NL80211_CHAN_WIDTH_40: diff_seq = 10; break; default: diff_seq = 0; } return chan_def->center_freq1 - diff_seq; } static u16 ath11k_mac_get_seg_freq(struct cfg80211_chan_def *chan_def, u16 start_seq, u8 seq) { u16 seg_seq; /* It is to get the center frequency of the specific bandwidth. * start_seq means the lowest channel number's center frequency. * seq 0/1/2/3 means 20 MHz/40 MHz/80 MHz/160 MHz&80P80. * For example, * lowest channel is 1, its center frequency 5955, * center frequency is 5955 when bandwidth=20 MHz, its diff is 5955 - 5955 = 0. * lowest channel is 1, its center frequency 5955, * center frequency is 5965 when bandwidth=40 MHz, its diff is 5965 - 5955 = 10. * lowest channel is 1, its center frequency 5955, * center frequency is 5985 when bandwidth=80 MHz, its diff is 5985 - 5955 = 30. * lowest channel is 1, its center frequency 5955, * center frequency is 6025 when bandwidth=160 MHz, its diff is 6025 - 5955 = 70. */ if (chan_def->width == NL80211_CHAN_WIDTH_80P80 && seq == 3) return chan_def->center_freq2; seg_seq = 10 * (BIT(seq) - 1); return seg_seq + start_seq; } static void ath11k_mac_get_psd_channel(struct ath11k *ar, u16 step_freq, u16 *start_freq, u16 *center_freq, u8 i, struct ieee80211_channel **temp_chan, s8 *tx_power) { /* It is to get the center frequency for each 20 MHz. * For example, if the chan is 160 MHz and center frequency is 6025, * then it include 8 channels, they are 1/5/9/13/17/21/25/29, * channel number 1's center frequency is 5955, it is parameter start_freq. * parameter i is the step of the 8 channels. i is 0~7 for the 8 channels. * the channel 1/5/9/13/17/21/25/29 maps i=0/1/2/3/4/5/6/7, * and maps its center frequency is 5955/5975/5995/6015/6035/6055/6075/6095, * the gap is 20 for each channel, parameter step_freq means the gap. * after get the center frequency of each channel, it is easy to find the * struct ieee80211_channel of it and get the max_reg_power. */ *center_freq = *start_freq + i * step_freq; *temp_chan = ieee80211_get_channel(ar->hw->wiphy, *center_freq); *tx_power = (*temp_chan)->max_reg_power; } static void ath11k_mac_get_eirp_power(struct ath11k *ar, u16 *start_freq, u16 *center_freq, u8 i, struct ieee80211_channel **temp_chan, struct cfg80211_chan_def *def, s8 *tx_power) { /* It is to get the center frequency for 20 MHz/40 MHz/80 MHz/ * 160 MHz&80P80 bandwidth, and then plus 10 to the center frequency, * it is the center frequency of a channel number. * For example, when configured channel number is 1. * center frequency is 5965 when bandwidth=40 MHz, after plus 10, it is 5975, * then it is channel number 5. * center frequency is 5985 when bandwidth=80 MHz, after plus 10, it is 5995, * then it is channel number 9. * center frequency is 6025 when bandwidth=160 MHz, after plus 10, it is 6035, * then it is channel number 17. * after get the center frequency of each channel, it is easy to find the * struct ieee80211_channel of it and get the max_reg_power. */ *center_freq = ath11k_mac_get_seg_freq(def, *start_freq, i); /* For the 20 MHz, its center frequency is same with same channel */ if (i != 0) *center_freq += 10; *temp_chan = ieee80211_get_channel(ar->hw->wiphy, *center_freq); *tx_power = (*temp_chan)->max_reg_power; } void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx) { struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; struct ath11k_reg_tpc_power_info *reg_tpc_info = &arvif->reg_tpc_info; struct ieee80211_channel *chan, *temp_chan; u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction; bool is_psd_power = false, is_tpe_present = false; s8 max_tx_power[ATH11K_NUM_PWR_LEVELS], psd_power, tx_power; s8 eirp_power = 0; u16 start_freq, center_freq; chan = ctx->def.chan; start_freq = ath11k_mac_get_6ghz_start_frequency(&ctx->def); pwr_reduction = bss_conf->pwr_reduction; if (arvif->reg_tpc_info.num_pwr_levels) { is_tpe_present = true; num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels; } else { num_pwr_levels = ath11k_mac_get_num_pwr_levels(&bss_conf->chanreq.oper); } for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) { /* STA received TPE IE*/ if (is_tpe_present) { /* local power is PSD power*/ if (chan->flags & IEEE80211_CHAN_PSD) { /* Connecting AP is psd power */ if (reg_tpc_info->is_psd_power) { is_psd_power = true; ath11k_mac_get_psd_channel(ar, 20, &start_freq, ¢er_freq, pwr_lvl_idx, &temp_chan, &tx_power); psd_power = temp_chan->psd; eirp_power = tx_power; max_tx_power[pwr_lvl_idx] = min_t(s8, psd_power, reg_tpc_info->tpe[pwr_lvl_idx]); /* Connecting AP is not psd power */ } else { ath11k_mac_get_eirp_power(ar, &start_freq, ¢er_freq, pwr_lvl_idx, &temp_chan, &ctx->def, &tx_power); psd_power = temp_chan->psd; /* convert psd power to EIRP power based * on channel width */ tx_power = min_t(s8, tx_power, psd_power + 13 + pwr_lvl_idx * 3); max_tx_power[pwr_lvl_idx] = min_t(s8, tx_power, reg_tpc_info->tpe[pwr_lvl_idx]); } /* local power is not PSD power */ } else { /* Connecting AP is psd power */ if (reg_tpc_info->is_psd_power) { is_psd_power = true; ath11k_mac_get_psd_channel(ar, 20, &start_freq, ¢er_freq, pwr_lvl_idx, &temp_chan, &tx_power); eirp_power = tx_power; max_tx_power[pwr_lvl_idx] = reg_tpc_info->tpe[pwr_lvl_idx]; /* Connecting AP is not psd power */ } else { ath11k_mac_get_eirp_power(ar, &start_freq, ¢er_freq, pwr_lvl_idx, &temp_chan, &ctx->def, &tx_power); max_tx_power[pwr_lvl_idx] = min_t(s8, tx_power, reg_tpc_info->tpe[pwr_lvl_idx]); } } /* STA not received TPE IE */ } else { /* local power is PSD power*/ if (chan->flags & IEEE80211_CHAN_PSD) { is_psd_power = true; ath11k_mac_get_psd_channel(ar, 20, &start_freq, ¢er_freq, pwr_lvl_idx, &temp_chan, &tx_power); psd_power = temp_chan->psd; eirp_power = tx_power; max_tx_power[pwr_lvl_idx] = psd_power; } else { ath11k_mac_get_eirp_power(ar, &start_freq, ¢er_freq, pwr_lvl_idx, &temp_chan, &ctx->def, &tx_power); max_tx_power[pwr_lvl_idx] = tx_power; } } if (is_psd_power) { /* If AP local power constraint is present */ if (pwr_reduction) eirp_power = eirp_power - pwr_reduction; /* If firmware updated max tx power is non zero, then take * the min of firmware updated ap tx power * and max power derived from above mentioned parameters. */ ath11k_dbg(ab, ATH11K_DBG_MAC, "eirp power : %d firmware report power : %d\n", eirp_power, ar->max_allowed_tx_power); /* Firmware reports lower max_allowed_tx_power during vdev * start response. In case of 6 GHz, firmware is not aware * of EIRP power unless driver sets EIRP power through WMI * TPC command. So radio which does not support idle power * save can set maximum calculated EIRP power directly to * firmware through TPC command without min comparison with * vdev start response's max_allowed_tx_power. */ if (ar->max_allowed_tx_power && ab->hw_params.idle_ps) eirp_power = min_t(s8, eirp_power, ar->max_allowed_tx_power); } else { /* If AP local power constraint is present */ if (pwr_reduction) max_tx_power[pwr_lvl_idx] = max_tx_power[pwr_lvl_idx] - pwr_reduction; /* If firmware updated max tx power is non zero, then take * the min of firmware updated ap tx power * and max power derived from above mentioned parameters. */ if (ar->max_allowed_tx_power && ab->hw_params.idle_ps) max_tx_power[pwr_lvl_idx] = min_t(s8, max_tx_power[pwr_lvl_idx], ar->max_allowed_tx_power); } reg_tpc_info->chan_power_info[pwr_lvl_idx].chan_cfreq = center_freq; reg_tpc_info->chan_power_info[pwr_lvl_idx].tx_power = max_tx_power[pwr_lvl_idx]; } reg_tpc_info->num_pwr_levels = num_pwr_levels; reg_tpc_info->is_psd_power = is_psd_power; reg_tpc_info->eirp_power = eirp_power; reg_tpc_info->ap_power_type = ath11k_reg_ap_pwr_convert(vif->bss_conf.power_type); } static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_chanctx_conf *ctx) { struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; struct ieee80211_parsed_tpe_eirp *non_psd = NULL; struct ieee80211_parsed_tpe_psd *psd = NULL; enum wmi_reg_6ghz_client_type client_type; struct cur_regulatory_info *reg_info; u8 local_tpe_count, reg_tpe_count; bool use_local_tpe; int i; reg_info = &ab->reg_info_store[ar->pdev_idx]; client_type = reg_info->client_type; local_tpe_count = bss_conf->tpe.max_local[client_type].valid + bss_conf->tpe.psd_local[client_type].valid; reg_tpe_count = bss_conf->tpe.max_reg_client[client_type].valid + bss_conf->tpe.psd_reg_client[client_type].valid; if (!reg_tpe_count && !local_tpe_count) { ath11k_warn(ab, "no transmit power envelope match client power type %d\n", client_type); return; } else if (!reg_tpe_count) { use_local_tpe = true; } else { use_local_tpe = false; } if (use_local_tpe) { psd = &bss_conf->tpe.psd_local[client_type]; if (!psd->valid) psd = NULL; non_psd = &bss_conf->tpe.max_local[client_type]; if (!non_psd->valid) non_psd = NULL; } else { psd = &bss_conf->tpe.psd_reg_client[client_type]; if (!psd->valid) psd = NULL; non_psd = &bss_conf->tpe.max_reg_client[client_type]; if (!non_psd->valid) non_psd = NULL; } if (non_psd && !psd) { arvif->reg_tpc_info.is_psd_power = false; arvif->reg_tpc_info.eirp_power = 0; arvif->reg_tpc_info.num_pwr_levels = non_psd->count; for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) { ath11k_dbg(ab, ATH11K_DBG_MAC, "non PSD power[%d] : %d\n", i, non_psd->power[i]); arvif->reg_tpc_info.tpe[i] = non_psd->power[i] / 2; } } if (psd) { arvif->reg_tpc_info.is_psd_power = true; arvif->reg_tpc_info.num_pwr_levels = psd->count; for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) { ath11k_dbg(ab, ATH11K_DBG_MAC, "TPE PSD power[%d] : %d\n", i, psd->power[i]); arvif->reg_tpc_info.tpe[i] = psd->power[i] / 2; } } } static int ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; mutex_lock(&ar->conf_mutex); ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx assign ptr %p vdev_id %i\n", ctx, arvif->vdev_id); if (ath11k_wmi_supports_6ghz_cc_ext(ar) && ctx->def.chan->band == NL80211_BAND_6GHZ && arvif->vdev_type == WMI_VDEV_TYPE_STA) { arvif->chanctx = *ctx; ath11k_mac_parse_tx_pwr_env(ar, vif, ctx); } /* for QCA6390 bss peer must be created before vdev_start */ if (ab->hw_params.vdev_start_delay && arvif->vdev_type != WMI_VDEV_TYPE_AP && arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && !ath11k_peer_find_by_vdev_id(ab, arvif->vdev_id)) { memcpy(&arvif->chanctx, ctx, sizeof(*ctx)); ret = 0; goto out; } if (WARN_ON(arvif->is_started)) { ret = -EBUSY; goto out; } if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ret = ath11k_mac_monitor_start(ar); if (ret) { ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d", ret); goto out; } arvif->is_started = true; goto out; } if (!arvif->is_started) { ret = ath11k_mac_vdev_start(arvif, ctx); if (ret) { ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", arvif->vdev_id, vif->addr, ctx->def.chan->center_freq, ret); goto out; } arvif->is_started = true; } if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) { ret = ath11k_mac_monitor_start(ar); if (ret) { ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d", ret); goto out; } } /* TODO: Setup ps and cts/rts protection */ ret = 0; out: mutex_unlock(&ar->conf_mutex); return ret; } static void ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_peer *peer; int ret; mutex_lock(&ar->conf_mutex); ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx unassign ptr %p vdev_id %i\n", ctx, arvif->vdev_id); if (ab->hw_params.vdev_start_delay && arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_addr(ab, ar->mac_addr); spin_unlock_bh(&ab->base_lock); if (peer) ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr); } if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ret = ath11k_mac_monitor_stop(ar); if (ret) { ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d", ret); mutex_unlock(&ar->conf_mutex); return; } arvif->is_started = false; mutex_unlock(&ar->conf_mutex); return; } if (arvif->is_started) { ret = ath11k_mac_vdev_stop(arvif); if (ret) ath11k_warn(ab, "failed to stop vdev %i: %d\n", arvif->vdev_id, ret); arvif->is_started = false; } if (ab->hw_params.vdev_start_delay && arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) ath11k_wmi_vdev_down(ar, arvif->vdev_id); if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->num_started_vdevs == 1 && test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) { ret = ath11k_mac_monitor_stop(ar); if (ret) /* continue even if there's an error */ ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d", ret); } if (arvif->vdev_type == WMI_VDEV_TYPE_STA) ath11k_mac_11d_scan_start(ar, arvif->vdev_id); mutex_unlock(&ar->conf_mutex); } static int ath11k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs, enum ieee80211_chanctx_switch_mode mode) { struct ath11k *ar = hw->priv; mutex_lock(&ar->conf_mutex); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "chanctx switch n_vifs %d mode %d\n", n_vifs, mode); ath11k_mac_update_vif_chan(ar, vifs, n_vifs); mutex_unlock(&ar->conf_mutex); return 0; } static int ath11k_set_vdev_param_to_all_vifs(struct ath11k *ar, int param, u32 value) { struct ath11k_vif *arvif; int ret = 0; mutex_lock(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting mac vdev %d param %d value %d\n", param, arvif->vdev_id, value); ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, value); if (ret) { ath11k_warn(ar->ab, "failed to set param %d for vdev %d: %d\n", param, arvif->vdev_id, ret); break; } } mutex_unlock(&ar->conf_mutex); return ret; } /* mac80211 stores device specific RTS/Fragmentation threshold value, * this is set interface specific to firmware from ath11k driver */ -static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, + int radio_idx, u32 value) { struct ath11k *ar = hw->priv; int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD; return ath11k_set_vdev_param_to_all_vifs(ar, param_id, value); } -static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) +static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, + int radio_idx, u32 value) { /* Even though there's a WMI vdev param for fragmentation threshold no * known firmware actually implements it. Moreover it is not possible to * rely frame fragmentation to mac80211 because firmware clears the * "more fragments" bit in frame control making it impossible for remote * devices to reassemble frames. * * Hence implement a dummy callback just to say fragmentation isn't * supported. This effectively prevents mac80211 from doing frame * fragmentation in software. */ return -EOPNOTSUPP; } static int ath11k_mac_flush_tx_complete(struct ath11k *ar) { long time_left; int ret = 0; time_left = wait_event_timeout(ar->dp.tx_empty_waitq, (atomic_read(&ar->dp.num_tx_pending) == 0), ATH11K_FLUSH_TIMEOUT); if (time_left == 0) { ath11k_warn(ar->ab, "failed to flush transmit queue, data pkts pending %d\n", atomic_read(&ar->dp.num_tx_pending)); ret = -ETIMEDOUT; } time_left = wait_event_timeout(ar->txmgmt_empty_waitq, (atomic_read(&ar->num_pending_mgmt_tx) == 0), ATH11K_FLUSH_TIMEOUT); if (time_left == 0) { ath11k_warn(ar->ab, "failed to flush mgmt transmit queue, mgmt pkts pending %d\n", atomic_read(&ar->num_pending_mgmt_tx)); ret = -ETIMEDOUT; } return ret; } int ath11k_mac_wait_tx_complete(struct ath11k *ar) { ath11k_mac_drain_tx(ar); return ath11k_mac_flush_tx_complete(ar); } static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { struct ath11k *ar = hw->priv; if (drop) return; ath11k_mac_flush_tx_complete(ar); } static bool ath11k_mac_has_single_legacy_rate(struct ath11k *ar, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { int num_rates = 0; num_rates = hweight32(mask->control[band].legacy); if (ath11k_mac_bitrate_mask_num_ht_rates(ar, band, mask)) return false; if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask)) return false; if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask)) return false; return num_rates == 1; } static __le16 ath11k_mac_get_tx_mcs_map(const struct ieee80211_sta_he_cap *he_cap) { if (he_cap->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) return he_cap->he_mcs_nss_supp.tx_mcs_80p80; if (he_cap->he_cap_elem.phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) return he_cap->he_mcs_nss_supp.tx_mcs_160; return he_cap->he_mcs_nss_supp.tx_mcs_80; } static bool ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar, struct ath11k_vif *arvif, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask, int *nss) { struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); const struct ieee80211_sta_he_cap *he_cap; u16 he_mcs_map = 0; u8 ht_nss_mask = 0; u8 vht_nss_mask = 0; u8 he_nss_mask = 0; int i; /* No need to consider legacy here. Basic rates are always present * in bitrate mask */ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { if (mask->control[band].ht_mcs[i] == 0) continue; else if (mask->control[band].ht_mcs[i] == sband->ht_cap.mcs.rx_mask[i]) ht_nss_mask |= BIT(i); else return false; } for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { if (mask->control[band].vht_mcs[i] == 0) continue; else if (mask->control[band].vht_mcs[i] == ath11k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) vht_nss_mask |= BIT(i); else return false; } he_cap = ieee80211_get_he_iftype_cap_vif(sband, arvif->vif); if (!he_cap) return false; he_mcs_map = le16_to_cpu(ath11k_mac_get_tx_mcs_map(he_cap)); for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) { if (mask->control[band].he_mcs[i] == 0) continue; if (mask->control[band].he_mcs[i] == ath11k_mac_get_max_he_mcs_map(he_mcs_map, i)) he_nss_mask |= BIT(i); else return false; } if (ht_nss_mask != vht_nss_mask || ht_nss_mask != he_nss_mask) return false; if (ht_nss_mask == 0) return false; if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask) return false; *nss = fls(ht_nss_mask); return true; } static int ath11k_mac_get_single_legacy_rate(struct ath11k *ar, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask, u32 *rate, u8 *nss) { int rate_idx; u16 bitrate; u8 preamble; u8 hw_rate; if (hweight32(mask->control[band].legacy) != 1) return -EINVAL; rate_idx = ffs(mask->control[band].legacy) - 1; if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) rate_idx += ATH11K_MAC_FIRST_OFDM_RATE_IDX; hw_rate = ath11k_legacy_rates[rate_idx].hw_value; bitrate = ath11k_legacy_rates[rate_idx].bitrate; if (ath11k_mac_bitrate_is_cck(bitrate)) preamble = WMI_RATE_PREAMBLE_CCK; else preamble = WMI_RATE_PREAMBLE_OFDM; *nss = 1; *rate = ATH11K_HW_RATE_CODE(hw_rate, 0, preamble); return 0; } static int ath11k_mac_set_fixed_rate_gi_ltf(struct ath11k_vif *arvif, u8 he_gi, u8 he_ltf) { struct ath11k *ar = arvif->ar; int ret; /* 0.8 = 0, 1.6 = 2 and 3.2 = 3. */ if (he_gi && he_gi != 0xFF) he_gi += 1; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, WMI_VDEV_PARAM_SGI, he_gi); if (ret) { ath11k_warn(ar->ab, "failed to set he gi %d: %d\n", he_gi, ret); return ret; } /* start from 1 */ if (he_ltf != 0xFF) he_ltf += 1; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, WMI_VDEV_PARAM_HE_LTF, he_ltf); if (ret) { ath11k_warn(ar->ab, "failed to set he ltf %d: %d\n", he_ltf, ret); return ret; } return 0; } static int ath11k_mac_set_auto_rate_gi_ltf(struct ath11k_vif *arvif, u16 he_gi, u8 he_ltf) { struct ath11k *ar = arvif->ar; int ret; u32 he_ar_gi_ltf; if (he_gi != 0xFF) { switch (he_gi) { case NL80211_RATE_INFO_HE_GI_0_8: he_gi = WMI_AUTORATE_800NS_GI; break; case NL80211_RATE_INFO_HE_GI_1_6: he_gi = WMI_AUTORATE_1600NS_GI; break; case NL80211_RATE_INFO_HE_GI_3_2: he_gi = WMI_AUTORATE_3200NS_GI; break; default: ath11k_warn(ar->ab, "invalid he gi: %d\n", he_gi); return -EINVAL; } } if (he_ltf != 0xFF) { switch (he_ltf) { case NL80211_RATE_INFO_HE_1XLTF: he_ltf = WMI_HE_AUTORATE_LTF_1X; break; case NL80211_RATE_INFO_HE_2XLTF: he_ltf = WMI_HE_AUTORATE_LTF_2X; break; case NL80211_RATE_INFO_HE_4XLTF: he_ltf = WMI_HE_AUTORATE_LTF_4X; break; default: ath11k_warn(ar->ab, "invalid he ltf: %d\n", he_ltf); return -EINVAL; } } he_ar_gi_ltf = he_gi | he_ltf; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, WMI_VDEV_PARAM_AUTORATE_MISC_CFG, he_ar_gi_ltf); if (ret) { ath11k_warn(ar->ab, "failed to set he autorate gi %u ltf %u: %d\n", he_gi, he_ltf, ret); return ret; } return 0; } static int ath11k_mac_set_rate_params(struct ath11k_vif *arvif, u32 rate, u8 nss, u8 sgi, u8 ldpc, u8 he_gi, u8 he_ltf, bool he_fixed_rate) { struct ath11k *ar = arvif->ar; u32 vdev_param; int ret; lockdep_assert_held(&ar->conf_mutex); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "set rate params vdev %i rate 0x%02x nss 0x%02x sgi 0x%02x ldpc 0x%02x he_gi 0x%02x he_ltf 0x%02x he_fixed_rate %d\n", arvif->vdev_id, rate, nss, sgi, ldpc, he_gi, he_ltf, he_fixed_rate); if (!arvif->vif->bss_conf.he_support) { vdev_param = WMI_VDEV_PARAM_FIXED_RATE; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, rate); if (ret) { ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n", rate, ret); return ret; } } vdev_param = WMI_VDEV_PARAM_NSS; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, nss); if (ret) { ath11k_warn(ar->ab, "failed to set nss param %d: %d\n", nss, ret); return ret; } vdev_param = WMI_VDEV_PARAM_LDPC; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, ldpc); if (ret) { ath11k_warn(ar->ab, "failed to set ldpc param %d: %d\n", ldpc, ret); return ret; } if (arvif->vif->bss_conf.he_support) { if (he_fixed_rate) { ret = ath11k_mac_set_fixed_rate_gi_ltf(arvif, he_gi, he_ltf); if (ret) { ath11k_warn(ar->ab, "failed to set fixed rate gi ltf: %d\n", ret); return ret; } } else { ret = ath11k_mac_set_auto_rate_gi_ltf(arvif, he_gi, he_ltf); if (ret) { ath11k_warn(ar->ab, "failed to set auto rate gi ltf: %d\n", ret); return ret; } } } else { vdev_param = WMI_VDEV_PARAM_SGI; ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, sgi); if (ret) { ath11k_warn(ar->ab, "failed to set sgi param %d: %d\n", sgi, ret); return ret; } } return 0; } static bool ath11k_mac_vht_mcs_range_present(struct ath11k *ar, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { int i; u16 vht_mcs; for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { vht_mcs = mask->control[band].vht_mcs[i]; switch (vht_mcs) { case 0: case BIT(8) - 1: case BIT(9) - 1: case BIT(10) - 1: break; default: return false; } } return true; } static bool ath11k_mac_he_mcs_range_present(struct ath11k *ar, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { int i; u16 he_mcs; for (i = 0; i < NL80211_HE_NSS_MAX; i++) { he_mcs = mask->control[band].he_mcs[i]; switch (he_mcs) { case 0: case BIT(8) - 1: case BIT(10) - 1: case BIT(12) - 1: break; default: return false; } } return true; } static void ath11k_mac_set_bitrate_mask_iter(void *data, struct ieee80211_sta *sta) { struct ath11k_vif *arvif = data; struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arvif->ar; spin_lock_bh(&ar->data_lock); arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; spin_unlock_bh(&ar->data_lock); ieee80211_queue_work(ar->hw, &arsta->update_wk); } static void ath11k_mac_disable_peer_fixed_rate(void *data, struct ieee80211_sta *sta) { struct ath11k_vif *arvif = data; struct ath11k *ar = arvif->ar; int ret; ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_PARAM_FIXED_RATE, WMI_FIXED_RATE_NONE); if (ret) ath11k_warn(ar->ab, "failed to disable peer fixed rate for STA %pM ret %d\n", sta->addr, ret); } static bool ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_band band, const struct cfg80211_bitrate_mask *mask) { bool he_fixed_rate = false, vht_fixed_rate = false; struct ath11k_peer *peer; const u16 *vht_mcs_mask, *he_mcs_mask; struct ieee80211_link_sta *deflink; u8 vht_nss, he_nss; bool ret = true; vht_mcs_mask = mask->control[band].vht_mcs; he_mcs_mask = mask->control[band].he_mcs; if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask) == 1) vht_fixed_rate = true; if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask) == 1) he_fixed_rate = true; if (!vht_fixed_rate && !he_fixed_rate) return true; vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask); he_nss = ath11k_mac_max_he_nss(he_mcs_mask); rcu_read_lock(); spin_lock_bh(&ar->ab->base_lock); list_for_each_entry(peer, &ar->ab->peers, list) { if (peer->sta) { deflink = &peer->sta->deflink; if (vht_fixed_rate && (!deflink->vht_cap.vht_supported || deflink->rx_nss < vht_nss)) { ret = false; goto out; } if (he_fixed_rate && (!deflink->he_cap.has_he || deflink->rx_nss < he_nss)) { ret = false; goto out; } } } out: spin_unlock_bh(&ar->ab->base_lock); rcu_read_unlock(); return ret; } static int ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct cfg80211_chan_def def; struct ath11k_pdev_cap *cap; struct ath11k *ar = arvif->ar; enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; const u16 *he_mcs_mask; u8 he_ltf = 0; u8 he_gi = 0; u32 rate; u8 nss; u8 sgi; u8 ldpc; int single_nss; int ret; int num_rates; bool he_fixed_rate = false; if (ath11k_mac_vif_chan(vif, &def)) return -EPERM; band = def.chan->band; cap = &ar->pdev->cap; ht_mcs_mask = mask->control[band].ht_mcs; vht_mcs_mask = mask->control[band].vht_mcs; he_mcs_mask = mask->control[band].he_mcs; ldpc = !!(cap->band[band].ht_cap_info & WMI_HT_CAP_TX_LDPC); sgi = mask->control[band].gi; if (sgi == NL80211_TXRATE_FORCE_LGI) return -EINVAL; he_gi = mask->control[band].he_gi; he_ltf = mask->control[band].he_ltf; /* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it * requires passing at least one of used basic rates along with them. * Fixed rate setting across different preambles(legacy, HT, VHT) is * not supported by the FW. Hence use of FIXED_RATE vdev param is not * suitable for setting single HT/VHT rates. * But, there could be a single basic rate passed from userspace which * can be done through the FIXED_RATE param. */ if (ath11k_mac_has_single_legacy_rate(ar, band, mask)) { ret = ath11k_mac_get_single_legacy_rate(ar, band, mask, &rate, &nss); if (ret) { ath11k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } - ieee80211_iterate_stations_atomic(ar->hw, - ath11k_mac_disable_peer_fixed_rate, - arvif); + ieee80211_iterate_stations_mtx(ar->hw, + ath11k_mac_disable_peer_fixed_rate, + arvif); } else if (ath11k_mac_bitrate_mask_get_single_nss(ar, arvif, band, mask, &single_nss)) { rate = WMI_FIXED_RATE_NONE; nss = single_nss; mutex_lock(&ar->conf_mutex); arvif->bitrate_mask = *mask; ieee80211_iterate_stations_atomic(ar->hw, ath11k_mac_set_bitrate_mask_iter, arvif); mutex_unlock(&ar->conf_mutex); } else { rate = WMI_FIXED_RATE_NONE; if (!ath11k_mac_validate_vht_he_fixed_rate_settings(ar, band, mask)) ath11k_warn(ar->ab, "could not update fixed rate settings to all peers due to mcs/nss incompatibility\n"); nss = min_t(u32, ar->num_tx_chains, ath11k_mac_max_nss(ht_mcs_mask, vht_mcs_mask, he_mcs_mask)); /* If multiple rates across different preambles are given * we can reconfigure this info with all peers using PEER_ASSOC * command with the below exception cases. * - Single VHT Rate : peer_assoc command accommodates only MCS * range values i.e 0-7, 0-8, 0-9 for VHT. Though mac80211 * mandates passing basic rates along with HT/VHT rates, FW * doesn't allow switching from VHT to Legacy. Hence instead of * setting legacy and VHT rates using RATEMASK_CMD vdev cmd, * we could set this VHT rate as peer fixed rate param, which * will override FIXED rate and FW rate control algorithm. * If single VHT rate is passed along with HT rates, we select * the VHT rate as fixed rate for vht peers. * - Multiple VHT Rates : When Multiple VHT rates are given,this * can be set using RATEMASK CMD which uses FW rate-ctl alg. * TODO: Setting multiple VHT MCS and replacing peer_assoc with * RATEMASK_CMDID can cover all use cases of setting rates * across multiple preambles and rates within same type. * But requires more validation of the command at this point. */ num_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask); if (!ath11k_mac_vht_mcs_range_present(ar, band, mask) && num_rates > 1) { /* TODO: Handle multiple VHT MCS values setting using * RATEMASK CMD */ ath11k_warn(ar->ab, "setting %d mcs values in bitrate mask not supported\n", num_rates); return -EINVAL; } num_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask); if (num_rates == 1) he_fixed_rate = true; if (!ath11k_mac_he_mcs_range_present(ar, band, mask) && num_rates > 1) { ath11k_warn(ar->ab, "Setting more than one HE MCS Value in bitrate mask not supported\n"); return -EINVAL; } mutex_lock(&ar->conf_mutex); - ieee80211_iterate_stations_atomic(ar->hw, - ath11k_mac_disable_peer_fixed_rate, - arvif); + ieee80211_iterate_stations_mtx(ar->hw, + ath11k_mac_disable_peer_fixed_rate, + arvif); arvif->bitrate_mask = *mask; ieee80211_iterate_stations_atomic(ar->hw, ath11k_mac_set_bitrate_mask_iter, arvif); mutex_unlock(&ar->conf_mutex); } mutex_lock(&ar->conf_mutex); ret = ath11k_mac_set_rate_params(arvif, rate, nss, sgi, ldpc, he_gi, he_ltf, he_fixed_rate); if (ret) { ath11k_warn(ar->ab, "failed to set rate params on vdev %i: %d\n", arvif->vdev_id, ret); } mutex_unlock(&ar->conf_mutex); return ret; } static void ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; int recovery_count; struct ath11k_vif *arvif; if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) return; mutex_lock(&ar->conf_mutex); if (ar->state == ATH11K_STATE_RESTARTED) { ath11k_warn(ar->ab, "pdev %d successfully recovered\n", ar->pdev->pdev_id); ar->state = ATH11K_STATE_ON; ieee80211_wake_queues(ar->hw); if (ar->ab->hw_params.current_cc_support && ar->alpha2[0] != 0 && ar->alpha2[1] != 0) ath11k_reg_set_cc(ar); if (ab->is_reset) { recovery_count = atomic_inc_return(&ab->recovery_count); ath11k_dbg(ab, ATH11K_DBG_BOOT, "recovery count %d\n", recovery_count); /* When there are multiple radios in an SOC, * the recovery has to be done for each radio */ if (recovery_count == ab->num_radios) { atomic_dec(&ab->reset_count); complete(&ab->reset_complete); ab->is_reset = false; atomic_set(&ab->fail_cont_count, 0); ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset success\n"); } } if (ar->ab->hw_params.support_fw_mac_sequence) { list_for_each_entry(arvif, &ar->arvifs, list) { if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA) ieee80211_hw_restart_disconnect(arvif->vif); } } } mutex_unlock(&ar->conf_mutex); } static void ath11k_mac_update_bss_chan_survey(struct ath11k *ar, struct ieee80211_channel *channel) { int ret; enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ; lockdep_assert_held(&ar->conf_mutex); if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) || ar->rx_channel != channel) return; if (ar->scan.state != ATH11K_SCAN_IDLE) { ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "ignoring bss chan info req while scanning..\n"); return; } reinit_completion(&ar->bss_survey_done); ret = ath11k_wmi_pdev_bss_chan_info_request(ar, type); if (ret) { ath11k_warn(ar->ab, "failed to send pdev bss chan info request\n"); return; } ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ); if (ret == 0) ath11k_warn(ar->ab, "bss channel survey timed out\n"); } static int ath11k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct ath11k *ar = hw->priv; struct ieee80211_supported_band *sband; struct survey_info *ar_survey; int ret = 0; if (idx >= ATH11K_NUM_CHANS) return -ENOENT; ar_survey = &ar->survey[idx]; mutex_lock(&ar->conf_mutex); sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; if (sband && idx >= sband->n_channels) { idx -= sband->n_channels; sband = NULL; } if (!sband) sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; if (sband && idx >= sband->n_channels) { idx -= sband->n_channels; sband = NULL; } if (!sband) sband = hw->wiphy->bands[NL80211_BAND_6GHZ]; if (!sband || idx >= sband->n_channels) { ret = -ENOENT; goto exit; } ath11k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); spin_lock_bh(&ar->data_lock); memcpy(survey, ar_survey, sizeof(*survey)); spin_unlock_bh(&ar->data_lock); survey->channel = &sband->channels[idx]; if (ar->rx_channel == survey->channel) survey->filled |= SURVEY_INFO_IN_USE; exit: mutex_unlock(&ar->conf_mutex); return ret; } static void ath11k_mac_put_chain_rssi(struct station_info *sinfo, struct ath11k_sta *arsta, char *pre, bool clear) { struct ath11k *ar = arsta->arvif->ar; int i; s8 rssi; for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { sinfo->chains &= ~BIT(i); rssi = arsta->chain_signal[i]; if (clear) arsta->chain_signal[i] = ATH11K_INVALID_RSSI_FULL; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "sta statistics %s rssi[%d] %d\n", pre, i, rssi); if (rssi != ATH11K_DEFAULT_NOISE_FLOOR && rssi != ATH11K_INVALID_RSSI_FULL && rssi != ATH11K_INVALID_RSSI_EMPTY && rssi != 0) { sinfo->chain_signal[i] = rssi; sinfo->chains |= BIT(i); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); } } } static void ath11k_mac_fw_stats_reset(struct ath11k *ar) { spin_lock_bh(&ar->data_lock); ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs); ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs); ar->fw_stats.num_vdev_recvd = 0; ar->fw_stats.num_bcn_recvd = 0; spin_unlock_bh(&ar->data_lock); } int ath11k_mac_fw_stats_request(struct ath11k *ar, struct stats_request_params *req_param) { struct ath11k_base *ab = ar->ab; unsigned long time_left; int ret; lockdep_assert_held(&ar->conf_mutex); ath11k_mac_fw_stats_reset(ar); reinit_completion(&ar->fw_stats_complete); reinit_completion(&ar->fw_stats_done); ret = ath11k_wmi_send_stats_request_cmd(ar, req_param); if (ret) { ath11k_warn(ab, "could not request fw stats (%d)\n", ret); return ret; } time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ); if (!time_left) return -ETIMEDOUT; /* FW stats can get split when exceeding the stats data buffer limit. * In that case, since there is no end marking for the back-to-back * received 'update stats' event, we keep a 3 seconds timeout in case, * fw_stats_done is not marked yet */ time_left = wait_for_completion_timeout(&ar->fw_stats_done, 3 * HZ); if (!time_left) return -ETIMEDOUT; return 0; } static int ath11k_mac_get_fw_stats(struct ath11k *ar, u32 pdev_id, u32 vdev_id, u32 stats_id) { struct ath11k_base *ab = ar->ab; struct stats_request_params req_param; int ret; lockdep_assert_held(&ar->conf_mutex); if (ar->state != ATH11K_STATE_ON) return -ENETDOWN; req_param.pdev_id = pdev_id; req_param.vdev_id = vdev_id; req_param.stats_id = stats_id; ret = ath11k_mac_fw_stats_request(ar, &req_param); if (ret) ath11k_warn(ab, "failed to request fw stats: %d\n", ret); ath11k_dbg(ab, ATH11K_DBG_WMI, "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n", pdev_id, vdev_id, stats_id); return ret; } static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct station_info *sinfo) { struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct ath11k *ar = arsta->arvif->ar; s8 signal; bool db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, ar->ab->wmi_ab.svc_map); sinfo->rx_duration = arsta->rx_duration; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); sinfo->tx_duration = arsta->tx_duration; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); if (arsta->txrate.legacy || arsta->txrate.nss) { if (arsta->txrate.legacy) { sinfo->txrate.legacy = arsta->txrate.legacy; } else { sinfo->txrate.mcs = arsta->txrate.mcs; sinfo->txrate.nss = arsta->txrate.nss; sinfo->txrate.bw = arsta->txrate.bw; sinfo->txrate.he_gi = arsta->txrate.he_gi; sinfo->txrate.he_dcm = arsta->txrate.he_dcm; sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc; } sinfo->txrate.flags = arsta->txrate.flags; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } ath11k_mac_put_chain_rssi(sinfo, arsta, "ppdu", false); mutex_lock(&ar->conf_mutex); if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) && arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA && ar->ab->hw_params.supports_rssi_stats && !ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0, WMI_REQUEST_RSSI_PER_CHAIN_STAT)) { ath11k_mac_put_chain_rssi(sinfo, arsta, "fw stats", true); } signal = arsta->rssi_comb; if (!signal && arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA && ar->ab->hw_params.supports_rssi_stats && !(ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0, WMI_REQUEST_VDEV_STAT))) signal = arsta->rssi_beacon; mutex_unlock(&ar->conf_mutex); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "sta statistics db2dbm %u rssi comb %d rssi beacon %d\n", db2dbm, arsta->rssi_comb, arsta->rssi_beacon); if (signal) { sinfo->signal = db2dbm ? signal : signal + ATH11K_DEFAULT_NOISE_FLOOR; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); } sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi); if (!db2dbm) sinfo->signal_avg += ATH11K_DEFAULT_NOISE_FLOOR; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } #if IS_ENABLED(CONFIG_IPV6) static void ath11k_generate_ns_mc_addr(struct ath11k *ar, struct ath11k_arp_ns_offload *offload) { int i; for (i = 0; i < offload->ipv6_count; i++) { offload->self_ipv6_addr[i][0] = 0xff; offload->self_ipv6_addr[i][1] = 0x02; offload->self_ipv6_addr[i][11] = 0x01; offload->self_ipv6_addr[i][12] = 0xff; offload->self_ipv6_addr[i][13] = offload->ipv6_addr[i][13]; offload->self_ipv6_addr[i][14] = offload->ipv6_addr[i][14]; offload->self_ipv6_addr[i][15] = offload->ipv6_addr[i][15]; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "NS solicited addr %pI6\n", offload->self_ipv6_addr[i]); } } static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct inet6_dev *idev) { struct ath11k *ar = hw->priv; struct ath11k_arp_ns_offload *offload; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct inet6_ifaddr *ifa6; struct ifacaddr6 *ifaca6; u32 count, scope; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "op ipv6 changed\n"); offload = &arvif->arp_ns_offload; count = 0; /* The _ipv6_changed() is called with RCU lock already held in * atomic_notifier_call_chain(), so we don't need to call * rcu_read_lock() again here. But note that with CONFIG_PREEMPT_RT * enabled, read_lock_bh() also calls rcu_read_lock(). This is OK * because RCU read critical section is allowed to get nested. */ read_lock_bh(&idev->lock); memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr)); memset(offload->self_ipv6_addr, 0, sizeof(offload->self_ipv6_addr)); memcpy(offload->mac_addr, vif->addr, ETH_ALEN); /* get unicast address */ list_for_each_entry(ifa6, &idev->addr_list, if_list) { if (count >= ATH11K_IPV6_MAX_COUNT) goto generate; if (ifa6->flags & IFA_F_DADFAILED) continue; scope = ipv6_addr_src_scope(&ifa6->addr); if (scope == IPV6_ADDR_SCOPE_LINKLOCAL || scope == IPV6_ADDR_SCOPE_GLOBAL) { memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr, sizeof(ifa6->addr.s6_addr)); offload->ipv6_type[count] = ATH11K_IPV6_UC_TYPE; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "count %d ipv6 uc %pI6 scope %d\n", count, offload->ipv6_addr[count], scope); count++; } else { ath11k_warn(ar->ab, "Unsupported ipv6 scope: %d\n", scope); } } /* get anycast address */ for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6; ifaca6 = rcu_dereference(ifaca6->aca_next)) { if (count >= ATH11K_IPV6_MAX_COUNT) goto generate; scope = ipv6_addr_src_scope(&ifaca6->aca_addr); if (scope == IPV6_ADDR_SCOPE_LINKLOCAL || scope == IPV6_ADDR_SCOPE_GLOBAL) { memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr, sizeof(ifaca6->aca_addr)); offload->ipv6_type[count] = ATH11K_IPV6_AC_TYPE; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "count %d ipv6 ac %pI6 scope %d\n", count, offload->ipv6_addr[count], scope); count++; } else { ath11k_warn(ar->ab, "Unsupported ipv scope: %d\n", scope); } } generate: offload->ipv6_count = count; read_unlock_bh(&idev->lock); /* generate ns multicast address */ ath11k_generate_ns_mc_addr(ar, offload); } #endif static void ath11k_mac_op_set_rekey_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *data) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_rekey_data *rekey_data = &arvif->rekey_data; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "set rekey data vdev %d\n", arvif->vdev_id); mutex_lock(&ar->conf_mutex); memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN); memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN); /* The supplicant works on big-endian, the firmware expects it on * little endian. */ rekey_data->replay_ctr = get_unaligned_be64(data->replay_ctr); arvif->rekey_data.enable_offload = true; ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kck", NULL, rekey_data->kck, NL80211_KCK_LEN); ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kek", NULL, rekey_data->kck, NL80211_KEK_LEN); ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "replay ctr", NULL, &rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr)); mutex_unlock(&ar->conf_mutex); } static int ath11k_mac_op_set_bios_sar_specs(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar) { struct ath11k *ar = hw->priv; const struct cfg80211_sar_sub_specs *sspec; int ret, index; u8 *sar_tbl; u32 i; if (!sar || sar->type != NL80211_SAR_TYPE_POWER || sar->num_sub_specs == 0) return -EINVAL; mutex_lock(&ar->conf_mutex); if (!test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) || !ar->ab->hw_params.bios_sar_capa) { ret = -EOPNOTSUPP; goto exit; } ret = ath11k_wmi_pdev_set_bios_geo_table_param(ar); if (ret) { ath11k_warn(ar->ab, "failed to set geo table: %d\n", ret); goto exit; } sar_tbl = kzalloc(BIOS_SAR_TABLE_LEN, GFP_KERNEL); if (!sar_tbl) { ret = -ENOMEM; goto exit; } sspec = sar->sub_specs; for (i = 0; i < sar->num_sub_specs; i++) { if (sspec->freq_range_index >= (BIOS_SAR_TABLE_LEN >> 1)) { ath11k_warn(ar->ab, "Ignore bad frequency index %u, max allowed %u\n", sspec->freq_range_index, BIOS_SAR_TABLE_LEN >> 1); continue; } /* chain0 and chain1 share same power setting */ sar_tbl[sspec->freq_range_index] = sspec->power; index = sspec->freq_range_index + (BIOS_SAR_TABLE_LEN >> 1); sar_tbl[index] = sspec->power; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "sar tbl[%d] = %d\n", sspec->freq_range_index, sar_tbl[sspec->freq_range_index]); sspec++; } ret = ath11k_wmi_pdev_set_bios_sar_table_param(ar, sar_tbl); if (ret) ath11k_warn(ar->ab, "failed to set sar power: %d", ret); kfree(sar_tbl); exit: mutex_unlock(&ar->conf_mutex); return ret; } static int ath11k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath11k *ar = hw->priv; mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); ar->scan.roc_notify = false; spin_unlock_bh(&ar->data_lock); ath11k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); cancel_delayed_work_sync(&ar->scan.timeout); return 0; } static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *chan, int duration, enum ieee80211_roc_type type) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct scan_req_params *arg; int ret; u32 scan_time_msec; mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: reinit_completion(&ar->scan.started); reinit_completion(&ar->scan.completed); reinit_completion(&ar->scan.on_channel); ar->scan.state = ATH11K_SCAN_STARTING; ar->scan.is_roc = true; ar->scan.vdev_id = arvif->vdev_id; ar->scan.roc_freq = chan->center_freq; ar->scan.roc_notify = true; ret = 0; break; case ATH11K_SCAN_STARTING: case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: ret = -EBUSY; break; } spin_unlock_bh(&ar->data_lock); if (ret) goto exit; scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; arg = kzalloc(sizeof(*arg), GFP_KERNEL); if (!arg) { ret = -ENOMEM; goto exit; } ath11k_wmi_start_scan_init(ar, arg); arg->num_chan = 1; arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list), GFP_KERNEL); if (!arg->chan_list) { ret = -ENOMEM; goto free_arg; } arg->vdev_id = arvif->vdev_id; arg->scan_id = ATH11K_SCAN_ID; arg->chan_list[0] = chan->center_freq; arg->dwell_time_active = scan_time_msec; arg->dwell_time_passive = scan_time_msec; arg->max_scan_time = scan_time_msec; arg->scan_f_passive = 1; arg->burst_duration = duration; if (!ar->ab->hw_params.single_pdev_only) arg->scan_f_filter_prb_req = 1; ret = ath11k_start_scan(ar, arg); if (ret) { ath11k_warn(ar->ab, "failed to start roc scan: %d\n", ret); spin_lock_bh(&ar->data_lock); ar->scan.state = ATH11K_SCAN_IDLE; spin_unlock_bh(&ar->data_lock); goto free_chan_list; } ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); if (ret == 0) { ath11k_warn(ar->ab, "failed to switch to channel for roc scan\n"); ret = ath11k_scan_stop(ar); if (ret) ath11k_warn(ar->ab, "failed to stop scan: %d\n", ret); ret = -ETIMEDOUT; goto free_chan_list; } ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, msecs_to_jiffies(duration)); ret = 0; free_chan_list: kfree(arg->chan_list); free_arg: kfree(arg); exit: mutex_unlock(&ar->conf_mutex); return ret; } static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, int *dbm) { struct ath11k *ar = hw->priv; struct ath11k_base *ab = ar->ab; struct ath11k_fw_stats_pdev *pdev; int ret; /* Final Tx power is minimum of Target Power, CTL power, Regulatory * Power, PSD EIRP Power. We just know the Regulatory power from the * regulatory rules obtained. FW knows all these power and sets the min * of these. Hence, we request the FW pdev stats in which FW reports * the minimum of all vdev's channel Tx power. */ mutex_lock(&ar->conf_mutex); /* Firmware doesn't provide Tx power during CAC hence no need to fetch * the stats. */ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) { mutex_unlock(&ar->conf_mutex); return -EAGAIN; } ret = ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0, WMI_REQUEST_PDEV_STAT); if (ret) { ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret); goto err_fallback; } spin_lock_bh(&ar->data_lock); pdev = list_first_entry_or_null(&ar->fw_stats.pdevs, struct ath11k_fw_stats_pdev, list); if (!pdev) { spin_unlock_bh(&ar->data_lock); goto err_fallback; } /* tx power is set as 2 units per dBm in FW. */ *dbm = pdev->chan_tx_power / 2; spin_unlock_bh(&ar->data_lock); mutex_unlock(&ar->conf_mutex); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware %d, reported %d dBm\n", pdev->chan_tx_power, *dbm); return 0; err_fallback: mutex_unlock(&ar->conf_mutex); /* We didn't get txpower from FW. Hence, relying on vif->bss_conf.txpower */ *dbm = vif->bss_conf.txpower; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n", *dbm); return 0; } static int ath11k_mac_station_add(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); struct peer_create_params peer_param; int ret; lockdep_assert_held(&ar->conf_mutex); ret = ath11k_mac_inc_num_stations(arvif, sta); if (ret) { ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n", ar->max_num_stations); goto exit; } + /* Driver allows the DEL KEY followed by SET KEY sequence for + * group keys for only when there is no clients associated, if at + * all firmware has entered the race during that window, + * reinstalling the same key when the first sta connects will allow + * firmware to recover from the race. + */ + if (arvif->num_stations == 1 && arvif->reinstall_group_keys) { + ath11k_dbg(ab, ATH11K_DBG_MAC, "set group keys on 1st station add for vdev %d\n", + arvif->vdev_id); + ret = ath11k_set_group_keys(arvif); + if (ret) + goto dec_num_station; + arvif->reinstall_group_keys = false; + } + arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL); if (!arsta->rx_stats) { ret = -ENOMEM; goto dec_num_station; } peer_param.vdev_id = arvif->vdev_id; peer_param.peer_addr = sta->addr; peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; ret = ath11k_peer_create(ar, arvif, sta, &peer_param); if (ret) { ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n", sta->addr, arvif->vdev_id); goto free_rx_stats; } ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n", sta->addr, arvif->vdev_id); if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) { arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL); if (!arsta->tx_stats) { ret = -ENOMEM; goto free_peer; } } if (ieee80211_vif_is_mesh(vif)) { ath11k_dbg(ab, ATH11K_DBG_MAC, "setting USE_4ADDR for mesh STA %pM\n", sta->addr); ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_USE_4ADDR, 1); if (ret) { ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n", sta->addr, ret); goto free_tx_stats; } } ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr); if (ret) { ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n", sta->addr, arvif->vdev_id, ret); goto free_tx_stats; } if (ab->hw_params.vdev_start_delay && !arvif->is_started && arvif->vdev_type != WMI_VDEV_TYPE_AP) { ret = ath11k_mac_start_vdev_delay(ar->hw, vif); if (ret) { ath11k_warn(ab, "failed to delay vdev start: %d\n", ret); goto free_tx_stats; } } ewma_avg_rssi_init(&arsta->avg_rssi); return 0; free_tx_stats: kfree(arsta->tx_stats); arsta->tx_stats = NULL; free_peer: ath11k_peer_delete(ar, arvif->vdev_id, sta->addr); free_rx_stats: kfree(arsta->rx_stats); arsta->rx_stats = NULL; dec_num_station: ath11k_mac_dec_num_stations(arvif, sta); exit: return ret; } static int ath11k_mac_station_remove(struct ath11k *ar, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); int ret; if (ab->hw_params.vdev_start_delay && arvif->is_started && arvif->vdev_type != WMI_VDEV_TYPE_AP) { ret = ath11k_mac_stop_vdev_early(ar->hw, vif); if (ret) { ath11k_warn(ab, "failed to do early vdev stop: %d\n", ret); return ret; } } ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr); ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr); if (ret) ath11k_warn(ab, "Failed to delete peer: %pM for VDEV: %d\n", sta->addr, arvif->vdev_id); else ath11k_dbg(ab, ATH11K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n", sta->addr, arvif->vdev_id); ath11k_mac_dec_num_stations(arvif, sta); kfree(arsta->tx_stats); arsta->tx_stats = NULL; kfree(arsta->rx_stats); arsta->rx_stats = NULL; return ret; } static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) { struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); enum ieee80211_ap_reg_power power_type; struct cur_regulatory_info *reg_info; struct ath11k_peer *peer; int ret = 0; /* cancel must be done outside the mutex to avoid deadlock */ if ((old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST)) { cancel_work_sync(&arsta->update_wk); cancel_work_sync(&arsta->set_4addr_wk); } mutex_lock(&ar->conf_mutex); if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { memset(arsta, 0, sizeof(*arsta)); arsta->arvif = arvif; arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk); INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk); ret = ath11k_mac_station_add(ar, vif, sta); if (ret) ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n", sta->addr, arvif->vdev_id); } else if ((old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST)) { ret = ath11k_mac_station_remove(ar, vif, sta); if (ret) ath11k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n", sta->addr, arvif->vdev_id); mutex_lock(&ar->ab->tbl_mtx_lock); spin_lock_bh(&ar->ab->base_lock); peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); if (peer && peer->sta == sta) { ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n", vif->addr, arvif->vdev_id); ath11k_peer_rhash_delete(ar->ab, peer); peer->sta = NULL; list_del(&peer->list); kfree(peer); ar->num_peers--; } spin_unlock_bh(&ar->ab->base_lock); mutex_unlock(&ar->ab->tbl_mtx_lock); } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC && (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT || vif->type == NL80211_IFTYPE_ADHOC)) { ret = ath11k_station_assoc(ar, vif, sta, false); if (ret) ath11k_warn(ar->ab, "Failed to associate station: %pM\n", sta->addr); spin_lock_bh(&ar->data_lock); /* Set arsta bw and prev bw */ arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta); arsta->bw_prev = arsta->bw; spin_unlock_bh(&ar->data_lock); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { spin_lock_bh(&ar->ab->base_lock); peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); if (peer) peer->is_authorized = true; spin_unlock_bh(&ar->ab->base_lock); if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) { ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_AUTHORIZE, 1); if (ret) ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", sta->addr, arvif->vdev_id, ret); } if (!ret && ath11k_wmi_supports_6ghz_cc_ext(ar) && arvif->vdev_type == WMI_VDEV_TYPE_STA && arvif->chanctx.def.chan && arvif->chanctx.def.chan->band == NL80211_BAND_6GHZ) { reg_info = &ar->ab->reg_info_store[ar->pdev_idx]; power_type = vif->bss_conf.power_type; if (power_type == IEEE80211_REG_UNSET_AP) { ath11k_warn(ar->ab, "invalid power type %d\n", power_type); ret = -EINVAL; } else { ret = ath11k_reg_handle_chan_list(ar->ab, reg_info, power_type); if (ret) ath11k_warn(ar->ab, "failed to handle chan list with power type %d\n", power_type); } } } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { spin_lock_bh(&ar->ab->base_lock); peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); if (peer) peer->is_authorized = false; spin_unlock_bh(&ar->ab->base_lock); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH && (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT || vif->type == NL80211_IFTYPE_ADHOC)) { ret = ath11k_station_disassoc(ar, vif, sta); if (ret) ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n", sta->addr); } mutex_unlock(&ar->conf_mutex); return ret; } static const struct ieee80211_ops ath11k_ops = { .tx = ath11k_mac_op_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = ath11k_mac_op_start, .stop = ath11k_mac_op_stop, .reconfig_complete = ath11k_mac_op_reconfig_complete, .add_interface = ath11k_mac_op_add_interface, .remove_interface = ath11k_mac_op_remove_interface, .update_vif_offload = ath11k_mac_op_update_vif_offload, .config = ath11k_mac_op_config, .bss_info_changed = ath11k_mac_op_bss_info_changed, .configure_filter = ath11k_mac_op_configure_filter, .hw_scan = ath11k_mac_op_hw_scan, .cancel_hw_scan = ath11k_mac_op_cancel_hw_scan, .set_key = ath11k_mac_op_set_key, .set_rekey_data = ath11k_mac_op_set_rekey_data, .sta_state = ath11k_mac_op_sta_state, .sta_set_4addr = ath11k_mac_op_sta_set_4addr, .sta_set_txpwr = ath11k_mac_op_sta_set_txpwr, .link_sta_rc_update = ath11k_mac_op_sta_rc_update, .conf_tx = ath11k_mac_op_conf_tx, .set_antenna = ath11k_mac_op_set_antenna, .get_antenna = ath11k_mac_op_get_antenna, .ampdu_action = ath11k_mac_op_ampdu_action, .add_chanctx = ath11k_mac_op_add_chanctx, .remove_chanctx = ath11k_mac_op_remove_chanctx, .change_chanctx = ath11k_mac_op_change_chanctx, .assign_vif_chanctx = ath11k_mac_op_assign_vif_chanctx, .unassign_vif_chanctx = ath11k_mac_op_unassign_vif_chanctx, .switch_vif_chanctx = ath11k_mac_op_switch_vif_chanctx, .set_rts_threshold = ath11k_mac_op_set_rts_threshold, .set_frag_threshold = ath11k_mac_op_set_frag_threshold, .set_bitrate_mask = ath11k_mac_op_set_bitrate_mask, .get_survey = ath11k_mac_op_get_survey, .flush = ath11k_mac_op_flush, .sta_statistics = ath11k_mac_op_sta_statistics, CFG80211_TESTMODE_CMD(ath11k_tm_cmd) #ifdef CONFIG_PM .suspend = ath11k_wow_op_suspend, .resume = ath11k_wow_op_resume, .set_wakeup = ath11k_wow_op_set_wakeup, #endif #ifdef CONFIG_ATH11K_DEBUGFS .vif_add_debugfs = ath11k_debugfs_op_vif_add, .sta_add_debugfs = ath11k_debugfs_sta_op_add, #endif #if IS_ENABLED(CONFIG_IPV6) .ipv6_addr_change = ath11k_mac_op_ipv6_changed, #endif .get_txpower = ath11k_mac_op_get_txpower, .set_sar_specs = ath11k_mac_op_set_bios_sar_specs, .remain_on_channel = ath11k_mac_op_remain_on_channel, .cancel_remain_on_channel = ath11k_mac_op_cancel_remain_on_channel, }; static void ath11k_mac_update_ch_list(struct ath11k *ar, struct ieee80211_supported_band *band, u32 freq_low, u32 freq_high) { int i; if (!(freq_low && freq_high)) return; for (i = 0; i < band->n_channels; i++) { if (band->channels[i].center_freq < freq_low || band->channels[i].center_freq > freq_high) band->channels[i].flags |= IEEE80211_CHAN_DISABLED; } } static u32 ath11k_get_phy_id(struct ath11k *ar, u32 band) { struct ath11k_pdev *pdev = ar->pdev; struct ath11k_pdev_cap *pdev_cap = &pdev->cap; if (band == WMI_HOST_WLAN_2G_CAP) return pdev_cap->band[NL80211_BAND_2GHZ].phy_id; if (band == WMI_HOST_WLAN_5G_CAP) return pdev_cap->band[NL80211_BAND_5GHZ].phy_id; ath11k_warn(ar->ab, "unsupported phy cap:%d\n", band); return 0; } static int ath11k_mac_setup_channels_rates(struct ath11k *ar, u32 supported_bands) { struct ieee80211_supported_band *band; struct ath11k_hal_reg_capabilities_ext *reg_cap, *temp_reg_cap; void *channels; u32 phy_id; BUILD_BUG_ON((ARRAY_SIZE(ath11k_2ghz_channels) + ARRAY_SIZE(ath11k_5ghz_channels) + ARRAY_SIZE(ath11k_6ghz_channels)) != ATH11K_NUM_CHANS); reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx]; temp_reg_cap = reg_cap; if (supported_bands & WMI_HOST_WLAN_2G_CAP) { channels = kmemdup(ath11k_2ghz_channels, sizeof(ath11k_2ghz_channels), GFP_KERNEL); if (!channels) return -ENOMEM; band = &ar->mac.sbands[NL80211_BAND_2GHZ]; band->band = NL80211_BAND_2GHZ; band->n_channels = ARRAY_SIZE(ath11k_2ghz_channels); band->channels = channels; band->n_bitrates = ath11k_g_rates_size; band->bitrates = ath11k_g_rates; ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; if (ar->ab->hw_params.single_pdev_only) { phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP); temp_reg_cap = &ar->ab->hal_reg_cap[phy_id]; } ath11k_mac_update_ch_list(ar, band, temp_reg_cap->low_2ghz_chan, temp_reg_cap->high_2ghz_chan); } if (supported_bands & WMI_HOST_WLAN_5G_CAP) { if (reg_cap->high_5ghz_chan >= ATH11K_MIN_6G_FREQ) { channels = kmemdup(ath11k_6ghz_channels, sizeof(ath11k_6ghz_channels), GFP_KERNEL); if (!channels) { kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); return -ENOMEM; } ar->supports_6ghz = true; band = &ar->mac.sbands[NL80211_BAND_6GHZ]; band->band = NL80211_BAND_6GHZ; band->n_channels = ARRAY_SIZE(ath11k_6ghz_channels); band->channels = channels; band->n_bitrates = ath11k_a_rates_size; band->bitrates = ath11k_a_rates; ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band; if (ar->ab->hw_params.single_pdev_only) { phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP); temp_reg_cap = &ar->ab->hal_reg_cap[phy_id]; } ath11k_mac_update_ch_list(ar, band, temp_reg_cap->low_5ghz_chan, temp_reg_cap->high_5ghz_chan); } if (reg_cap->low_5ghz_chan < ATH11K_MIN_6G_FREQ) { channels = kmemdup(ath11k_5ghz_channels, sizeof(ath11k_5ghz_channels), GFP_KERNEL); if (!channels) { kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); return -ENOMEM; } band = &ar->mac.sbands[NL80211_BAND_5GHZ]; band->band = NL80211_BAND_5GHZ; band->n_channels = ARRAY_SIZE(ath11k_5ghz_channels); band->channels = channels; band->n_bitrates = ath11k_a_rates_size; band->bitrates = ath11k_a_rates; ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; if (ar->ab->hw_params.single_pdev_only) { phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP); temp_reg_cap = &ar->ab->hal_reg_cap[phy_id]; } ath11k_mac_update_ch_list(ar, band, temp_reg_cap->low_5ghz_chan, temp_reg_cap->high_5ghz_chan); } } return 0; } static void ath11k_mac_setup_mac_address_list(struct ath11k *ar) { struct mac_address *addresses; u16 n_addresses; int i; if (!ar->ab->hw_params.support_dual_stations) return; n_addresses = ar->ab->hw_params.num_vdevs; addresses = kcalloc(n_addresses, sizeof(*addresses), GFP_KERNEL); if (!addresses) return; memcpy(addresses[0].addr, ar->mac_addr, ETH_ALEN); for (i = 1; i < n_addresses; i++) { memcpy(addresses[i].addr, ar->mac_addr, ETH_ALEN); /* set Local Administered Address bit */ addresses[i].addr[0] |= 0x2; addresses[i].addr[0] += (i - 1) << 4; } ar->hw->wiphy->addresses = addresses; ar->hw->wiphy->n_addresses = n_addresses; } static int ath11k_mac_setup_iface_combinations(struct ath11k *ar) { struct ath11k_base *ab = ar->ab; struct ieee80211_iface_combination *combinations; struct ieee80211_iface_limit *limits; int n_limits, n_combos; bool p2p; p2p = ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE); if (ab->hw_params.support_dual_stations) n_combos = 2; else n_combos = 1; combinations = kcalloc(n_combos, sizeof(*combinations), GFP_KERNEL); if (!combinations) return -ENOMEM; if (p2p) n_limits = 3; else n_limits = 2; limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL); if (!limits) { kfree(combinations); return -ENOMEM; } limits[0].max = 1; limits[0].types |= BIT(NL80211_IFTYPE_STATION); limits[1].max = 16; limits[1].types |= BIT(NL80211_IFTYPE_AP); if (IS_ENABLED(CONFIG_MAC80211_MESH) && ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT)) limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT); combinations[0].limits = limits; combinations[0].n_limits = n_limits; combinations[0].beacon_int_infra_match = true; combinations[0].beacon_int_min_gcd = 100; combinations[0].max_interfaces = 16; combinations[0].num_different_channels = 1; combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | BIT(NL80211_CHAN_WIDTH_40) | BIT(NL80211_CHAN_WIDTH_80) | BIT(NL80211_CHAN_WIDTH_80P80) | BIT(NL80211_CHAN_WIDTH_160); if (ab->hw_params.support_dual_stations) { limits[0].max = 2; combinations[1].limits = limits; combinations[1].n_limits = n_limits; combinations[1].beacon_int_infra_match = true; combinations[1].beacon_int_min_gcd = 100; combinations[1].max_interfaces = ab->hw_params.num_vdevs; combinations[1].num_different_channels = 2; } if (p2p) { limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); limits[2].max = 1; limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE); } ar->hw->wiphy->iface_combinations = combinations; ar->hw->wiphy->n_iface_combinations = n_combos; return 0; } static const u8 ath11k_if_types_ext_capa[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, }; static const u8 ath11k_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, }; static const u8 ath11k_if_types_ext_capa_ap[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT, [10] = WLAN_EXT_CAPA11_EMA_SUPPORT, }; static const struct wiphy_iftype_ext_capab ath11k_iftypes_ext_capa[] = { { .extended_capabilities = ath11k_if_types_ext_capa, .extended_capabilities_mask = ath11k_if_types_ext_capa, .extended_capabilities_len = sizeof(ath11k_if_types_ext_capa), }, { .iftype = NL80211_IFTYPE_STATION, .extended_capabilities = ath11k_if_types_ext_capa_sta, .extended_capabilities_mask = ath11k_if_types_ext_capa_sta, .extended_capabilities_len = sizeof(ath11k_if_types_ext_capa_sta), }, { .iftype = NL80211_IFTYPE_AP, .extended_capabilities = ath11k_if_types_ext_capa_ap, .extended_capabilities_mask = ath11k_if_types_ext_capa_ap, .extended_capabilities_len = sizeof(ath11k_if_types_ext_capa_ap), }, }; static void __ath11k_mac_unregister(struct ath11k *ar) { cancel_work_sync(&ar->channel_update_work); cancel_work_sync(&ar->regd_update_work); ieee80211_unregister_hw(ar->hw); idr_for_each(&ar->txmgmt_idr, ath11k_mac_tx_mgmt_pending_free, ar); idr_destroy(&ar->txmgmt_idr); kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); kfree(ar->hw->wiphy->iface_combinations[0].limits); kfree(ar->hw->wiphy->iface_combinations); kfree(ar->hw->wiphy->addresses); SET_IEEE80211_DEV(ar->hw, NULL); } void ath11k_mac_unregister(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; int i; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; if (!ar) continue; __ath11k_mac_unregister(ar); } ath11k_peer_rhash_tbl_destroy(ab); } static int __ath11k_mac_register(struct ath11k *ar) { struct ath11k_base *ab = ar->ab; struct ath11k_pdev_cap *cap = &ar->pdev->cap; static const u32 cipher_suites[] = { WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, WLAN_CIPHER_SUITE_AES_CMAC, WLAN_CIPHER_SUITE_BIP_CMAC_256, WLAN_CIPHER_SUITE_BIP_GMAC_128, WLAN_CIPHER_SUITE_BIP_GMAC_256, WLAN_CIPHER_SUITE_GCMP, WLAN_CIPHER_SUITE_GCMP_256, WLAN_CIPHER_SUITE_CCMP_256, }; int ret; u32 ht_cap = 0; ath11k_pdev_caps_update(ar); SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); ath11k_mac_setup_mac_address_list(ar); SET_IEEE80211_DEV(ar->hw, ab->dev); ret = ath11k_mac_setup_channels_rates(ar, cap->supported_bands); if (ret) goto err; wiphy_read_of_freq_limits(ar->hw->wiphy); ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap); ath11k_mac_setup_he_cap(ar, cap); ret = ath11k_mac_setup_iface_combinations(ar); if (ret) { ath11k_err(ar->ab, "failed to setup interface combinations: %d\n", ret); goto err_free_channels; } ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask; ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask; ar->hw->wiphy->interface_modes = ab->hw_params.interface_modes; if (ab->hw_params.single_pdev_only && ar->supports_6ghz) ieee80211_hw_set(ar->hw, SINGLE_SCAN_ON_ALL_BANDS); if (ab->hw_params.supports_multi_bssid) { ieee80211_hw_set(ar->hw, SUPPORTS_MULTI_BSSID); ieee80211_hw_set(ar->hw, SUPPORTS_ONLY_HE_MULTI_BSSID); } ieee80211_hw_set(ar->hw, SIGNAL_DBM); ieee80211_hw_set(ar->hw, SUPPORTS_PS); ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(ar->hw, MFP_CAPABLE); ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); ieee80211_hw_set(ar->hw, AP_LINK_PS); ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF); ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); ieee80211_hw_set(ar->hw, QUEUE_CONTROL); ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG); ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK); if (ath11k_frame_mode == ATH11K_HW_TXRX_ETHERNET) { ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD); ieee80211_hw_set(ar->hw, SUPPORTS_RX_DECAP_OFFLOAD); } if (cap->nss_ratio_enabled) ieee80211_hw_set(ar->hw, SUPPORTS_VHT_EXT_NSS_BW); if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) { ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION); ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW); ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER); ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(ar->hw, USES_RSS); } ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; /* TODO: Check if HT capability advertised from firmware is different * for each band for a dual band capable radio. It will be tricky to * handle it when the ht capability different for each band. */ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS || (ar->supports_6ghz && ab->hw_params.supports_dynamic_smps_6ghz)) ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; ar->hw->max_listen_interval = ATH11K_MAX_HW_LISTEN_INTERVAL; ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; ar->hw->wiphy->max_remain_on_channel_duration = 5000; ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | NL80211_FEATURE_AP_SCAN; ar->max_num_stations = TARGET_NUM_STATIONS(ab); ar->max_num_peers = TARGET_NUM_PEERS_PDEV(ab); ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; if (test_bit(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi->wmi_ab->svc_map)) { ar->hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; } if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) { ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS; ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH; ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS; ar->hw->wiphy->max_sched_scan_plan_interval = WMI_PNO_MAX_SCHED_SCAN_PLAN_INT; ar->hw->wiphy->max_sched_scan_plan_iterations = WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS; ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR; } ret = ath11k_wow_init(ar); if (ret) { ath11k_warn(ar->ab, "failed to init wow: %d\n", ret); goto err_free_if_combs; } if (test_bit(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI, ar->ab->wmi_ab.svc_map)) wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT); ar->hw->queues = ATH11K_HW_MAX_QUEUES; ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN; ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1; ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; ar->hw->vif_data_size = sizeof(struct ath11k_vif); ar->hw->sta_data_size = sizeof(struct ath11k_sta); wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR); if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD, ar->ab->wmi_ab.svc_map)) { wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_BSS_COLOR); ieee80211_hw_set(ar->hw, DETECTS_COLOR_COLLISION); } ar->hw->wiphy->cipher_suites = cipher_suites; ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); ar->hw->wiphy->iftype_ext_capab = ath11k_iftypes_ext_capa; ar->hw->wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath11k_iftypes_ext_capa); if (ar->supports_6ghz) { wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY); wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP); } wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL); if (test_bit(WMI_TLV_SERVICE_RTT, ar->ab->wmi_ab.svc_map)) wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); ar->hw->wiphy->mbssid_max_interfaces = TARGET_NUM_VDEVS(ab); ar->hw->wiphy->ema_max_profile_periodicity = TARGET_EMA_MAX_PROFILE_PERIOD; ath11k_reg_init(ar); if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) { ar->hw->netdev_features = NETIF_F_HW_CSUM; ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); } if (test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) && ab->hw_params.bios_sar_capa) ar->hw->wiphy->sar_capa = ab->hw_params.bios_sar_capa; ret = ieee80211_register_hw(ar->hw); if (ret) { ath11k_err(ar->ab, "ieee80211 registration failed: %d\n", ret); goto err_free_if_combs; } if (!ab->hw_params.supports_monitor) /* There's a race between calling ieee80211_register_hw() * and here where the monitor mode is enabled for a little * while. But that time is so short and in practise it make * a difference in real life. */ ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR); /* Apply the regd received during initialization */ ret = ath11k_regd_update(ar); if (ret) { ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret); goto err_unregister_hw; } if (ab->hw_params.current_cc_support && ab->new_alpha2[0]) { memcpy(&ar->alpha2, ab->new_alpha2, 2); ret = ath11k_reg_set_cc(ar); if (ret) ath11k_warn(ar->ab, "failed set cc code for mac register: %d\n", ret); } ret = ath11k_debugfs_register(ar); if (ret) { ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret); goto err_unregister_hw; } return 0; err_unregister_hw: ieee80211_unregister_hw(ar->hw); err_free_if_combs: kfree(ar->hw->wiphy->iface_combinations[0].limits); kfree(ar->hw->wiphy->iface_combinations); err_free_channels: kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); err: SET_IEEE80211_DEV(ar->hw, NULL); return ret; } int ath11k_mac_register(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; int i; int ret; - u8 mac_addr[ETH_ALEN] = {0}; + u8 mac_addr[ETH_ALEN] = {}; if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) return 0; /* Initialize channel counters frequency value in hertz */ ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ; ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1; ret = ath11k_peer_rhash_tbl_init(ab); if (ret) return ret; device_get_mac_address(ab->dev, mac_addr); for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; if (ab->pdevs_macaddr_valid) { ether_addr_copy(ar->mac_addr, pdev->mac_addr); } else { if (is_zero_ether_addr(mac_addr)) ether_addr_copy(ar->mac_addr, ab->mac_addr); else ether_addr_copy(ar->mac_addr, mac_addr); ar->mac_addr[4] += i; } idr_init(&ar->txmgmt_idr); spin_lock_init(&ar->txmgmt_idr_lock); ret = __ath11k_mac_register(ar); if (ret) goto err_cleanup; init_waitqueue_head(&ar->txmgmt_empty_waitq); } return 0; err_cleanup: for (i = i - 1; i >= 0; i--) { pdev = &ab->pdevs[i]; ar = pdev->ar; __ath11k_mac_unregister(ar); } ath11k_peer_rhash_tbl_destroy(ab); return ret; } int ath11k_mac_allocate(struct ath11k_base *ab) { struct ieee80211_hw *hw; struct ath11k *ar; struct ath11k_pdev *pdev; int ret; int i; if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) return 0; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; hw = ieee80211_alloc_hw(sizeof(struct ath11k), &ath11k_ops); if (!hw) { ath11k_warn(ab, "failed to allocate mac80211 hw device\n"); ret = -ENOMEM; goto err_free_mac; } ar = hw->priv; ar->hw = hw; ar->ab = ab; ar->pdev = pdev; ar->pdev_idx = i; ar->lmac_id = ath11k_hw_get_mac_from_pdev_id(&ab->hw_params, i); ar->wmi = &ab->wmi_ab.wmi[i]; /* FIXME wmi[0] is already initialized during attach, * Should we do this again? */ ath11k_wmi_pdev_attach(ab, i); ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask; ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask; ar->num_tx_chains = get_num_chains(pdev->cap.tx_chain_mask); ar->num_rx_chains = get_num_chains(pdev->cap.rx_chain_mask); pdev->ar = ar; spin_lock_init(&ar->data_lock); INIT_LIST_HEAD(&ar->arvifs); INIT_LIST_HEAD(&ar->ppdu_stats_info); mutex_init(&ar->conf_mutex); init_completion(&ar->vdev_setup_done); init_completion(&ar->vdev_delete_done); init_completion(&ar->peer_assoc_done); init_completion(&ar->peer_delete_done); init_completion(&ar->install_key_done); init_completion(&ar->bss_survey_done); init_completion(&ar->scan.started); init_completion(&ar->scan.completed); init_completion(&ar->scan.on_channel); init_completion(&ar->thermal.wmi_sync); INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work); INIT_WORK(&ar->channel_update_work, ath11k_regd_update_chan_list_work); INIT_LIST_HEAD(&ar->channel_update_queue); INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work); INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work); skb_queue_head_init(&ar->wmi_mgmt_tx_queue); clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags); ar->monitor_vdev_id = -1; clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID; init_completion(&ar->completed_11d_scan); ath11k_fw_stats_init(ar); } return 0; err_free_mac: ath11k_mac_destroy(ab); return ret; } void ath11k_mac_destroy(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_pdev *pdev; int i; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; if (!ar) continue; ath11k_fw_stats_free(&ar->fw_stats); ieee80211_free_hw(ar->hw); pdev->ar = NULL; } } int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif, enum wmi_sta_keepalive_method method, u32 interval) { struct ath11k *ar = arvif->ar; struct wmi_sta_keepalive_arg arg = {}; int ret; lockdep_assert_held(&ar->conf_mutex); if (arvif->vdev_type != WMI_VDEV_TYPE_STA) return 0; if (!test_bit(WMI_TLV_SERVICE_STA_KEEP_ALIVE, ar->ab->wmi_ab.svc_map)) return 0; arg.vdev_id = arvif->vdev_id; arg.enabled = 1; arg.method = method; arg.interval = interval; ret = ath11k_wmi_sta_keepalive(ar, &arg); if (ret) { ath11k_warn(ar->ab, "failed to set keepalive on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } return 0; } diff --git a/pci.c b/pci.c index 78444f8ea153..d8655badd96d 100644 --- a/pci.c +++ b/pci.c @@ -1,1304 +1,1304 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include #include #include #include #include #include "pci.h" #include "core.h" #include "hif.h" #include "mhi.h" #include "debug.h" #include "pcic.h" #include "qmi.h" #define ATH11K_PCI_BAR_NUM 0 #define ATH11K_PCI_DMA_MASK 36 #define ATH11K_PCI_COHERENT_DMA_MASK 32 #define TCSR_SOC_HW_VERSION 0x0224 #define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8) #define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0) #define QCA6390_DEVICE_ID 0x1101 #define QCN9074_DEVICE_ID 0x1104 #define WCN6855_DEVICE_ID 0x1103 #define TCSR_SOC_HW_SUB_VER 0x1910010 static const struct pci_device_id ath11k_pci_id_table[] = { { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) }, { PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) }, { PCI_VDEVICE(QCOM, QCN9074_DEVICE_ID) }, - {0} + {} }; MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table); static int ath11k_pci_bus_wake_up(struct ath11k_base *ab) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); } static void ath11k_pci_bus_release(struct ath11k_base *ab) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); } static u32 ath11k_pci_get_window_start(struct ath11k_base *ab, u32 offset) { if (!ab->hw_params.static_window_map) return ATH11K_PCI_WINDOW_START; if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK) /* if offset lies within DP register range, use 3rd window */ return 3 * ATH11K_PCI_WINDOW_START; else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) < ATH11K_PCI_WINDOW_RANGE_MASK) /* if offset lies within CE register range, use 2nd window */ return 2 * ATH11K_PCI_WINDOW_START; else return ATH11K_PCI_WINDOW_START; } static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset) { struct ath11k_base *ab = ab_pci->ab; u32 window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, offset); lockdep_assert_held(&ab_pci->window_lock); if (window != ab_pci->register_window) { iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window, ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); ab_pci->register_window = window; } } static void ath11k_pci_window_write32(struct ath11k_base *ab, u32 offset, u32 value) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); u32 window_start; window_start = ath11k_pci_get_window_start(ab, offset); if (window_start == ATH11K_PCI_WINDOW_START) { spin_lock_bh(&ab_pci->window_lock); ath11k_pci_select_window(ab_pci, offset); iowrite32(value, ab->mem + window_start + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); spin_unlock_bh(&ab_pci->window_lock); } else { iowrite32(value, ab->mem + window_start + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); } } static u32 ath11k_pci_window_read32(struct ath11k_base *ab, u32 offset) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); u32 window_start, val; window_start = ath11k_pci_get_window_start(ab, offset); if (window_start == ATH11K_PCI_WINDOW_START) { spin_lock_bh(&ab_pci->window_lock); ath11k_pci_select_window(ab_pci, offset); val = ioread32(ab->mem + window_start + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); spin_unlock_bh(&ab_pci->window_lock); } else { val = ioread32(ab->mem + window_start + (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); } return val; } int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector) { struct pci_dev *pci_dev = to_pci_dev(ab->dev); return pci_irq_vector(pci_dev, vector); } static const struct ath11k_pci_ops ath11k_pci_ops_qca6390 = { .wakeup = ath11k_pci_bus_wake_up, .release = ath11k_pci_bus_release, .get_msi_irq = ath11k_pci_get_msi_irq, .window_write32 = ath11k_pci_window_write32, .window_read32 = ath11k_pci_window_read32, }; static const struct ath11k_pci_ops ath11k_pci_ops_qcn9074 = { .wakeup = NULL, .release = NULL, .get_msi_irq = ath11k_pci_get_msi_irq, .window_write32 = ath11k_pci_window_write32, .window_read32 = ath11k_pci_window_read32, }; static const struct ath11k_msi_config msi_config_one_msi = { .total_vectors = 1, .total_users = 4, .users = (struct ath11k_msi_user[]) { { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, { .name = "CE", .num_vectors = 1, .base_vector = 0 }, { .name = "WAKE", .num_vectors = 1, .base_vector = 0 }, { .name = "DP", .num_vectors = 1, .base_vector = 0 }, }, }; static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci) { u32 umac_window; u32 ce_window; u32 window; umac_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET); ce_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE); window = (umac_window << 12) | (ce_window << 6); iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS); } static void ath11k_pci_soc_global_reset(struct ath11k_base *ab) { u32 val, delay; val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET); val |= PCIE_SOC_GLOBAL_RESET_V; ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val); /* TODO: exact time to sleep is uncertain */ delay = 10; mdelay(delay); /* Need to toggle V bit back otherwise stuck in reset status */ val &= ~PCIE_SOC_GLOBAL_RESET_V; ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val); mdelay(delay); val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET); if (val == 0xffffffff) ath11k_warn(ab, "link down error during global reset\n"); } static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab) { u32 val; /* read cookie */ val = ath11k_pcic_read32(ab, PCIE_Q6_COOKIE_ADDR); ath11k_dbg(ab, ATH11K_DBG_PCI, "pcie_q6_cookie_addr 0x%x\n", val); val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY); ath11k_dbg(ab, ATH11K_DBG_PCI, "wlaon_warm_sw_entry 0x%x\n", val); /* TODO: exact time to sleep is uncertain */ mdelay(10); /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from * continuing warm path and entering dead loop. */ ath11k_pcic_write32(ab, WLAON_WARM_SW_ENTRY, 0); mdelay(10); val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY); ath11k_dbg(ab, ATH11K_DBG_PCI, "wlaon_warm_sw_entry 0x%x\n", val); /* A read clear register. clear the register to prevent * Q6 from entering wrong code path. */ val = ath11k_pcic_read32(ab, WLAON_SOC_RESET_CAUSE_REG); ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause %d\n", val); } static int ath11k_pci_set_link_reg(struct ath11k_base *ab, u32 offset, u32 value, u32 mask) { u32 v; int i; v = ath11k_pcic_read32(ab, offset); if ((v & mask) == value) return 0; for (i = 0; i < 10; i++) { ath11k_pcic_write32(ab, offset, (v & ~mask) | value); v = ath11k_pcic_read32(ab, offset); if ((v & mask) == value) return 0; mdelay(2); } ath11k_warn(ab, "failed to set pcie link register 0x%08x: 0x%08x != 0x%08x\n", offset, v & mask, value); return -ETIMEDOUT; } static int ath11k_pci_fix_l1ss(struct ath11k_base *ab) { int ret; ret = ath11k_pci_set_link_reg(ab, PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab), PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL, PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK); if (ret) { ath11k_warn(ab, "failed to set sysclk: %d\n", ret); return ret; } ret = ath11k_pci_set_link_reg(ab, PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab), PCIE_PCS_OSC_DTCT_CONFIG1_VAL, PCIE_PCS_OSC_DTCT_CONFIG_MSK); if (ret) { ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret); return ret; } ret = ath11k_pci_set_link_reg(ab, PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab), PCIE_PCS_OSC_DTCT_CONFIG2_VAL, PCIE_PCS_OSC_DTCT_CONFIG_MSK); if (ret) { ath11k_warn(ab, "failed to set dtct config2: %d\n", ret); return ret; } ret = ath11k_pci_set_link_reg(ab, PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab), PCIE_PCS_OSC_DTCT_CONFIG4_VAL, PCIE_PCS_OSC_DTCT_CONFIG_MSK); if (ret) { ath11k_warn(ab, "failed to set dtct config4: %d\n", ret); return ret; } return 0; } static void ath11k_pci_enable_ltssm(struct ath11k_base *ab) { u32 val; int i; val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM); /* PCIE link seems very unstable after the Hot Reset*/ for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) { if (val == 0xffffffff) mdelay(5); ath11k_pcic_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE); val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM); } ath11k_dbg(ab, ATH11K_DBG_PCI, "ltssm 0x%x\n", val); val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST); val |= GCC_GCC_PCIE_HOT_RST_VAL; ath11k_pcic_write32(ab, GCC_GCC_PCIE_HOT_RST, val); val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST); ath11k_dbg(ab, ATH11K_DBG_PCI, "pcie_hot_rst 0x%x\n", val); mdelay(5); } static void ath11k_pci_clear_all_intrs(struct ath11k_base *ab) { /* This is a WAR for PCIE Hotreset. * When target receive Hotreset, but will set the interrupt. * So when download SBL again, SBL will open Interrupt and * receive it, and crash immediately. */ ath11k_pcic_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL); } static void ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base *ab) { u32 val; val = ath11k_pcic_read32(ab, WLAON_QFPROM_PWR_CTRL_REG); val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK; ath11k_pcic_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val); } static void ath11k_pci_force_wake(struct ath11k_base *ab) { ath11k_pcic_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1); mdelay(5); } static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on) { mdelay(100); if (power_on) { ath11k_pci_enable_ltssm(ab); ath11k_pci_clear_all_intrs(ab); ath11k_pci_set_wlaon_pwr_ctrl(ab); if (ab->hw_params.fix_l1ss) ath11k_pci_fix_l1ss(ab); } ath11k_mhi_clear_vector(ab); ath11k_pci_clear_dbg_registers(ab); ath11k_pci_soc_global_reset(ab); ath11k_mhi_set_mhictrl_reset(ab); } static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab) { struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; cfg->tgt_ce = ab->hw_params.target_ce_config; cfg->tgt_ce_len = ab->hw_params.target_ce_count; cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map; cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len; ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id; ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2, &cfg->shadow_reg_v2_len); } static void ath11k_pci_msi_config(struct ath11k_pci *ab_pci, bool enable) { struct pci_dev *dev = ab_pci->pdev; u16 control; pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); if (enable) control |= PCI_MSI_FLAGS_ENABLE; else control &= ~PCI_MSI_FLAGS_ENABLE; pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); } static void ath11k_pci_msi_enable(struct ath11k_pci *ab_pci) { ath11k_pci_msi_config(ab_pci, true); } static void ath11k_pci_msi_disable(struct ath11k_pci *ab_pci) { ath11k_pci_msi_config(ab_pci, false); } static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci) { struct ath11k_base *ab = ab_pci->ab; const struct ath11k_msi_config *msi_config = ab->pci.msi.config; struct pci_dev *pci_dev = ab_pci->pdev; struct msi_desc *msi_desc; int num_vectors; int ret; num_vectors = pci_alloc_irq_vectors(pci_dev, msi_config->total_vectors, msi_config->total_vectors, PCI_IRQ_MSI); if (num_vectors == msi_config->total_vectors) { set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags); } else { num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, 1, 1, PCI_IRQ_MSI); if (num_vectors < 0) { ret = -EINVAL; goto reset_msi_config; } clear_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags); ab->pci.msi.config = &msi_config_one_msi; ath11k_dbg(ab, ATH11K_DBG_PCI, "request one msi vector\n"); } ath11k_info(ab, "MSI vectors: %d\n", num_vectors); ath11k_pci_msi_disable(ab_pci); msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); if (!msi_desc) { ath11k_err(ab, "msi_desc is NULL!\n"); ret = -EINVAL; goto free_msi_vector; } ab->pci.msi.ep_base_data = msi_desc->msg.data; pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, &ab->pci.msi.addr_lo); if (msi_desc->pci.msi_attrib.is_64) { pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, &ab->pci.msi.addr_hi); } else { ab->pci.msi.addr_hi = 0; } ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab->pci.msi.ep_base_data); return 0; free_msi_vector: pci_free_irq_vectors(ab_pci->pdev); reset_msi_config: return ret; } static void ath11k_pci_free_msi(struct ath11k_pci *ab_pci) { pci_free_irq_vectors(ab_pci->pdev); } static int ath11k_pci_config_msi_data(struct ath11k_pci *ab_pci) { struct msi_desc *msi_desc; msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); if (!msi_desc) { ath11k_err(ab_pci->ab, "msi_desc is NULL!\n"); pci_free_irq_vectors(ab_pci->pdev); return -EINVAL; } ab_pci->ab->pci.msi.ep_base_data = msi_desc->msg.data; ath11k_dbg(ab_pci->ab, ATH11K_DBG_PCI, "after request_irq msi_ep_base_data %d\n", ab_pci->ab->pci.msi.ep_base_data); return 0; } static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev) { struct ath11k_base *ab = ab_pci->ab; u16 device_id; int ret = 0; pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); if (device_id != ab_pci->dev_id) { ath11k_err(ab, "pci device id mismatch: 0x%x 0x%x\n", device_id, ab_pci->dev_id); ret = -EIO; goto out; } ret = pci_assign_resource(pdev, ATH11K_PCI_BAR_NUM); if (ret) { ath11k_err(ab, "failed to assign pci resource: %d\n", ret); goto out; } ret = pci_enable_device(pdev); if (ret) { ath11k_err(ab, "failed to enable pci device: %d\n", ret); goto out; } ret = pci_request_region(pdev, ATH11K_PCI_BAR_NUM, "ath11k_pci"); if (ret) { ath11k_err(ab, "failed to request pci region: %d\n", ret); goto disable_device; } ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK)); if (ret) { ath11k_err(ab, "failed to set pci dma mask to %d: %d\n", ATH11K_PCI_DMA_MASK, ret); goto release_region; } ab_pci->dma_mask = DMA_BIT_MASK(ATH11K_PCI_DMA_MASK); ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ATH11K_PCI_COHERENT_DMA_MASK)); if (ret) { ath11k_err(ab, "failed to set pci coherent dma mask to %d: %d\n", ATH11K_PCI_COHERENT_DMA_MASK, ret); goto release_region; } pci_set_master(pdev); ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM); ab->mem = pci_iomap(pdev, ATH11K_PCI_BAR_NUM, 0); if (!ab->mem) { ath11k_err(ab, "failed to map pci bar %d\n", ATH11K_PCI_BAR_NUM); ret = -EIO; goto release_region; } ab->mem_ce = ab->mem; ath11k_dbg(ab, ATH11K_DBG_BOOT, "pci_mem 0x%p\n", ab->mem); return 0; release_region: pci_release_region(pdev, ATH11K_PCI_BAR_NUM); disable_device: pci_disable_device(pdev); out: return ret; } static void ath11k_pci_free_region(struct ath11k_pci *ab_pci) { struct ath11k_base *ab = ab_pci->ab; struct pci_dev *pci_dev = ab_pci->pdev; pci_iounmap(pci_dev, ab->mem); ab->mem = NULL; pci_release_region(pci_dev, ATH11K_PCI_BAR_NUM); if (pci_is_enabled(pci_dev)) pci_disable_device(pci_dev); } static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci) { struct ath11k_base *ab = ab_pci->ab; pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL, &ab_pci->link_ctl); ath11k_dbg(ab, ATH11K_DBG_PCI, "link_ctl 0x%04x L0s %d L1 %d\n", ab_pci->link_ctl, u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S), u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1)); /* disable L0s and L1 */ pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_ASPMC); set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags); } static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci) { if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags)) pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_ASPMC, ab_pci->link_ctl & PCI_EXP_LNKCTL_ASPMC); } #ifdef CONFIG_DEV_COREDUMP static int ath11k_pci_coredump_calculate_size(struct ath11k_base *ab, u32 *dump_seg_sz) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl; struct image_info *rddm_img, *fw_img; struct ath11k_tlv_dump_data *dump_tlv; enum ath11k_fw_crash_dump_type mem_type; u32 len = 0, rddm_tlv_sz = 0, paging_tlv_sz = 0; struct ath11k_dump_file_data *file_data; int i; rddm_img = mhi_ctrl->rddm_image; if (!rddm_img) { ath11k_err(ab, "No RDDM dump found\n"); return 0; } fw_img = mhi_ctrl->fbc_image; for (i = 0; i < fw_img->entries ; i++) { if (!fw_img->mhi_buf[i].buf) continue; paging_tlv_sz += fw_img->mhi_buf[i].len; } dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA] = paging_tlv_sz; for (i = 0; i < rddm_img->entries; i++) { if (!rddm_img->mhi_buf[i].buf) continue; rddm_tlv_sz += rddm_img->mhi_buf[i].len; } dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA] = rddm_tlv_sz; for (i = 0; i < ab->qmi.mem_seg_count; i++) { mem_type = ath11k_coredump_get_dump_type(ab->qmi.target_mem[i].type); if (mem_type == FW_CRASH_DUMP_NONE) continue; if (mem_type == FW_CRASH_DUMP_TYPE_MAX) { ath11k_dbg(ab, ATH11K_DBG_PCI, "target mem region type %d not supported", ab->qmi.target_mem[i].type); continue; } if (!ab->qmi.target_mem[i].anyaddr) continue; dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size; } for (i = 0; i < FW_CRASH_DUMP_TYPE_MAX; i++) { if (!dump_seg_sz[i]) continue; len += sizeof(*dump_tlv) + dump_seg_sz[i]; } if (len) len += sizeof(*file_data); return len; } static void ath11k_pci_coredump_download(struct ath11k_base *ab) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl; struct image_info *rddm_img, *fw_img; struct timespec64 timestamp; int i, len, mem_idx; enum ath11k_fw_crash_dump_type mem_type; struct ath11k_dump_file_data *file_data; struct ath11k_tlv_dump_data *dump_tlv; size_t hdr_len = sizeof(*file_data); void *buf; - u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = { 0 }; + u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = {}; ath11k_mhi_coredump(mhi_ctrl, false); len = ath11k_pci_coredump_calculate_size(ab, dump_seg_sz); if (!len) { ath11k_warn(ab, "No crash dump data found for devcoredump"); return; } rddm_img = mhi_ctrl->rddm_image; fw_img = mhi_ctrl->fbc_image; /* dev_coredumpv() requires vmalloc data */ buf = vzalloc(len); if (!buf) return; ab->dump_data = buf; ab->ath11k_coredump_len = len; file_data = ab->dump_data; strscpy(file_data->df_magic, "ATH11K-FW-DUMP", sizeof(file_data->df_magic)); file_data->len = cpu_to_le32(len); file_data->version = cpu_to_le32(ATH11K_FW_CRASH_DUMP_V2); file_data->chip_id = cpu_to_le32(ab_pci->dev_id); file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id); file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus)); guid_gen(&file_data->guid); ktime_get_real_ts64(×tamp); file_data->tv_sec = cpu_to_le64(timestamp.tv_sec); file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec); buf += hdr_len; dump_tlv = buf; dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA); dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]); buf += COREDUMP_TLV_HDR_SIZE; /* append all segments together as they are all part of a single contiguous * block of memory */ for (i = 0; i < fw_img->entries ; i++) { if (!fw_img->mhi_buf[i].buf) continue; memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf, fw_img->mhi_buf[i].len); buf += fw_img->mhi_buf[i].len; } dump_tlv = buf; dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA); dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]); buf += COREDUMP_TLV_HDR_SIZE; /* append all segments together as they are all part of a single contiguous * block of memory */ for (i = 0; i < rddm_img->entries; i++) { if (!rddm_img->mhi_buf[i].buf) continue; memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf, rddm_img->mhi_buf[i].len); buf += rddm_img->mhi_buf[i].len; } mem_idx = FW_CRASH_DUMP_REMOTE_MEM_DATA; for (; mem_idx < FW_CRASH_DUMP_TYPE_MAX; mem_idx++) { if (mem_idx == FW_CRASH_DUMP_NONE) continue; for (i = 0; i < ab->qmi.mem_seg_count; i++) { mem_type = ath11k_coredump_get_dump_type (ab->qmi.target_mem[i].type); if (mem_type != mem_idx) continue; if (!ab->qmi.target_mem[i].anyaddr) { ath11k_dbg(ab, ATH11K_DBG_PCI, "Skipping mem region type %d", ab->qmi.target_mem[i].type); continue; } dump_tlv = buf; dump_tlv->type = cpu_to_le32(mem_idx); dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]); buf += COREDUMP_TLV_HDR_SIZE; memcpy_fromio(buf, ab->qmi.target_mem[i].iaddr, ab->qmi.target_mem[i].size); buf += ab->qmi.target_mem[i].size; } } queue_work(ab->workqueue, &ab->dump_work); } #endif static int ath11k_pci_power_up(struct ath11k_base *ab) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); int ret; ab_pci->register_window = 0; clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags); ath11k_pci_sw_reset(ab_pci->ab, true); /* Disable ASPM during firmware download due to problems switching * to AMSS state. */ ath11k_pci_aspm_disable(ab_pci); ath11k_pci_msi_enable(ab_pci); ret = ath11k_mhi_start(ab_pci); if (ret) { ath11k_err(ab, "failed to start mhi: %d\n", ret); return ret; } if (ab->hw_params.static_window_map) ath11k_pci_select_static_window(ab_pci); return 0; } static void ath11k_pci_power_down(struct ath11k_base *ab, bool is_suspend) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); /* restore aspm in case firmware bootup fails */ ath11k_pci_aspm_restore(ab_pci); ath11k_pci_force_wake(ab_pci->ab); ath11k_pci_msi_disable(ab_pci); ath11k_mhi_stop(ab_pci, is_suspend); clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags); ath11k_pci_sw_reset(ab_pci->ab, false); } static int ath11k_pci_hif_suspend(struct ath11k_base *ab) { struct ath11k_pci *ar_pci = ath11k_pci_priv(ab); return ath11k_mhi_suspend(ar_pci); } static int ath11k_pci_hif_resume(struct ath11k_base *ab) { struct ath11k_pci *ar_pci = ath11k_pci_priv(ab); return ath11k_mhi_resume(ar_pci); } static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab) { ath11k_pcic_ce_irqs_enable(ab); } static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab) { ath11k_pcic_ce_irq_disable_sync(ab); } static int ath11k_pci_start(struct ath11k_base *ab) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); /* TODO: for now don't restore ASPM in case of single MSI * vector as MHI register reading in M2 causes system hang. */ if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) ath11k_pci_aspm_restore(ab_pci); else ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n"); ath11k_pcic_start(ab); return 0; } static const struct ath11k_hif_ops ath11k_pci_hif_ops = { .start = ath11k_pci_start, .stop = ath11k_pcic_stop, .read32 = ath11k_pcic_read32, .write32 = ath11k_pcic_write32, .read = ath11k_pcic_read, .power_down = ath11k_pci_power_down, .power_up = ath11k_pci_power_up, .suspend = ath11k_pci_hif_suspend, .resume = ath11k_pci_hif_resume, .irq_enable = ath11k_pcic_ext_irq_enable, .irq_disable = ath11k_pcic_ext_irq_disable, .get_msi_address = ath11k_pcic_get_msi_address, .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment, .map_service_to_pipe = ath11k_pcic_map_service_to_pipe, .ce_irq_enable = ath11k_pci_hif_ce_irq_enable, .ce_irq_disable = ath11k_pci_hif_ce_irq_disable, .get_ce_msi_idx = ath11k_pcic_get_ce_msi_idx, #ifdef CONFIG_DEV_COREDUMP .coredump_download = ath11k_pci_coredump_download, #endif }; static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor) { u32 soc_hw_version; soc_hw_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_VERSION); *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK, soc_hw_version); *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK, soc_hw_version); ath11k_dbg(ab, ATH11K_DBG_PCI, "tcsr_soc_hw_version major %d minor %d\n", *major, *minor); } static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci, const struct cpumask *m) { if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab_pci->ab->dev_flags)) return 0; return irq_set_affinity_and_hint(ab_pci->pdev->irq, m); } static int ath11k_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_dev) { struct ath11k_base *ab; struct ath11k_pci *ab_pci; u32 soc_hw_version_major, soc_hw_version_minor; int ret; u32 sub_version; ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI); if (!ab) { dev_err(&pdev->dev, "failed to allocate ath11k base\n"); return -ENOMEM; } ab->dev = &pdev->dev; pci_set_drvdata(pdev, ab); ab_pci = ath11k_pci_priv(ab); ab_pci->dev_id = pci_dev->device; ab_pci->ab = ab; ab_pci->pdev = pdev; ab->hif.ops = &ath11k_pci_hif_ops; ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL; pci_set_drvdata(pdev, ab); spin_lock_init(&ab_pci->window_lock); /* Set fixed_mem_region to true for platforms support reserved memory * from DT. If memory is reserved from DT for FW, ath11k driver need not * allocate memory. */ if (of_property_present(ab->dev->of_node, "memory-region")) set_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags); ret = ath11k_pci_claim(ab_pci, pdev); if (ret) { ath11k_err(ab, "failed to claim device: %d\n", ret); goto err_free_core; } ath11k_dbg(ab, ATH11K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device); ab->id.vendor = pdev->vendor; ab->id.device = pdev->device; ab->id.subsystem_vendor = pdev->subsystem_vendor; ab->id.subsystem_device = pdev->subsystem_device; switch (pci_dev->device) { case QCA6390_DEVICE_ID: ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390); if (ret) { ath11k_err(ab, "failed to register PCI ops: %d\n", ret); goto err_pci_free_region; } ath11k_pci_read_hw_version(ab, &soc_hw_version_major, &soc_hw_version_minor); switch (soc_hw_version_major) { case 2: ab->hw_rev = ATH11K_HW_QCA6390_HW20; break; default: dev_err(&pdev->dev, "Unsupported QCA6390 SOC hardware version: %d %d\n", soc_hw_version_major, soc_hw_version_minor); ret = -EOPNOTSUPP; goto err_pci_free_region; } break; case QCN9074_DEVICE_ID: ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qcn9074); if (ret) { ath11k_err(ab, "failed to register PCI ops: %d\n", ret); goto err_pci_free_region; } ab->hw_rev = ATH11K_HW_QCN9074_HW10; break; case WCN6855_DEVICE_ID: ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390); if (ret) { ath11k_err(ab, "failed to register PCI ops: %d\n", ret); goto err_pci_free_region; } ab->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD; ath11k_pci_read_hw_version(ab, &soc_hw_version_major, &soc_hw_version_minor); switch (soc_hw_version_major) { case 2: switch (soc_hw_version_minor) { case 0x00: case 0x01: ab->hw_rev = ATH11K_HW_WCN6855_HW20; break; case 0x10: case 0x11: sub_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_SUB_VER); ath11k_dbg(ab, ATH11K_DBG_PCI, "sub_version 0x%x\n", sub_version); switch (sub_version) { case 0x1019A0E1: case 0x1019B0E1: case 0x1019C0E1: case 0x1019D0E1: ab->hw_rev = ATH11K_HW_QCA2066_HW21; break; case 0x001e60e1: ab->hw_rev = ATH11K_HW_QCA6698AQ_HW21; break; default: ab->hw_rev = ATH11K_HW_WCN6855_HW21; } break; default: goto unsupported_wcn6855_soc; } break; default: unsupported_wcn6855_soc: dev_err(&pdev->dev, "Unsupported WCN6855 SOC hardware version: %d %d\n", soc_hw_version_major, soc_hw_version_minor); ret = -EOPNOTSUPP; goto err_pci_free_region; } break; default: dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", pci_dev->device); ret = -EOPNOTSUPP; goto err_pci_free_region; } ret = ath11k_pcic_init_msi_config(ab); if (ret) { ath11k_err(ab, "failed to init msi config: %d\n", ret); goto err_pci_free_region; } ret = ath11k_pci_alloc_msi(ab_pci); if (ret) { ath11k_err(ab, "failed to enable msi: %d\n", ret); goto err_pci_free_region; } ret = ath11k_core_pre_init(ab); if (ret) goto err_pci_disable_msi; ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0)); if (ret) { ath11k_err(ab, "failed to set irq affinity %d\n", ret); goto err_pci_disable_msi; } ret = ath11k_mhi_register(ab_pci); if (ret) { ath11k_err(ab, "failed to register mhi: %d\n", ret); goto err_irq_affinity_cleanup; } ret = ath11k_hal_srng_init(ab); if (ret) goto err_mhi_unregister; ret = ath11k_ce_alloc_pipes(ab); if (ret) { ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret); goto err_hal_srng_deinit; } ath11k_pci_init_qmi_ce_config(ab); ret = ath11k_pcic_config_irq(ab); if (ret) { ath11k_err(ab, "failed to config irq: %d\n", ret); goto err_ce_free; } /* kernel may allocate a dummy vector before request_irq and * then allocate a real vector when request_irq is called. * So get msi_data here again to avoid spurious interrupt * as msi_data will configured to srngs. */ ret = ath11k_pci_config_msi_data(ab_pci); if (ret) { ath11k_err(ab, "failed to config msi_data: %d\n", ret); goto err_free_irq; } ret = ath11k_core_init(ab); if (ret) { ath11k_err(ab, "failed to init core: %d\n", ret); goto err_free_irq; } ath11k_qmi_fwreset_from_cold_boot(ab); return 0; err_free_irq: /* __free_irq() expects the caller to have cleared the affinity hint */ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); ath11k_pcic_free_irq(ab); err_ce_free: ath11k_ce_free_pipes(ab); err_hal_srng_deinit: ath11k_hal_srng_deinit(ab); err_mhi_unregister: ath11k_mhi_unregister(ab_pci); err_irq_affinity_cleanup: ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); err_pci_disable_msi: ath11k_pci_free_msi(ab_pci); err_pci_free_region: ath11k_pci_free_region(ab_pci); err_free_core: ath11k_core_free(ab); return ret; } static void ath11k_pci_remove(struct pci_dev *pdev) { struct ath11k_base *ab = pci_get_drvdata(pdev); struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { ath11k_pci_power_down(ab, false); ath11k_debugfs_soc_destroy(ab); ath11k_qmi_deinit_service(ab); ath11k_core_pm_notifier_unregister(ab); goto qmi_fail; } set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags); cancel_work_sync(&ab->reset_work); cancel_work_sync(&ab->dump_work); ath11k_core_deinit(ab); qmi_fail: ath11k_fw_destroy(ab); ath11k_mhi_unregister(ab_pci); ath11k_pcic_free_irq(ab); ath11k_pci_free_msi(ab_pci); ath11k_pci_free_region(ab_pci); ath11k_hal_srng_deinit(ab); ath11k_ce_free_pipes(ab); ath11k_core_free(ab); } static void ath11k_pci_shutdown(struct pci_dev *pdev) { struct ath11k_base *ab = pci_get_drvdata(pdev); struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); ath11k_pci_power_down(ab, false); } static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev) { struct ath11k_base *ab = dev_get_drvdata(dev); int ret; if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci suspend as qmi is not initialised\n"); return 0; } ret = ath11k_core_suspend(ab); if (ret) ath11k_warn(ab, "failed to suspend core: %d\n", ret); return 0; } static __maybe_unused int ath11k_pci_pm_resume(struct device *dev) { struct ath11k_base *ab = dev_get_drvdata(dev); int ret; if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci resume as qmi is not initialised\n"); return 0; } ret = ath11k_core_resume(ab); if (ret) ath11k_warn(ab, "failed to resume core: %d\n", ret); return ret; } static __maybe_unused int ath11k_pci_pm_suspend_late(struct device *dev) { struct ath11k_base *ab = dev_get_drvdata(dev); int ret; ret = ath11k_core_suspend_late(ab); if (ret) ath11k_warn(ab, "failed to late suspend core: %d\n", ret); /* Similar to ath11k_pci_pm_suspend(), we return success here * even error happens, to allow system suspend/hibernation survive. */ return 0; } static __maybe_unused int ath11k_pci_pm_resume_early(struct device *dev) { struct ath11k_base *ab = dev_get_drvdata(dev); int ret; ret = ath11k_core_resume_early(ab); if (ret) ath11k_warn(ab, "failed to early resume core: %d\n", ret); return ret; } static const struct dev_pm_ops __maybe_unused ath11k_pci_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend, ath11k_pci_pm_resume) SET_LATE_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend_late, ath11k_pci_pm_resume_early) }; static struct pci_driver ath11k_pci_driver = { .name = "ath11k_pci", .id_table = ath11k_pci_id_table, .probe = ath11k_pci_probe, .remove = ath11k_pci_remove, .shutdown = ath11k_pci_shutdown, #ifdef CONFIG_PM .driver.pm = &ath11k_pci_pm_ops, #endif }; static int ath11k_pci_init(void) { int ret; ret = pci_register_driver(&ath11k_pci_driver); if (ret) pr_err("failed to register ath11k pci driver: %d\n", ret); return ret; } module_init(ath11k_pci_init); static void ath11k_pci_exit(void) { pci_unregister_driver(&ath11k_pci_driver); } module_exit(ath11k_pci_exit); MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11ax WLAN devices"); MODULE_LICENSE("Dual BSD/GPL"); /* firmware files */ MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/*"); MODULE_FIRMWARE(ATH11K_FW_DIR "/QCN9074/hw1.0/*"); MODULE_FIRMWARE(ATH11K_FW_DIR "/WCN6855/hw2.0/*"); MODULE_FIRMWARE(ATH11K_FW_DIR "/WCN6855/hw2.1/*"); diff --git a/pcic.c b/pcic.c index 3fe77310c71f..fc6e7da05c60 100644 --- a/pcic.c +++ b/pcic.c @@ -1,863 +1,865 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ +#include #include "core.h" #include "pcic.h" #include "debug.h" static const char *irq_name[ATH11K_IRQ_NUM_MAX] = { "bhi", "mhi-er0", "mhi-er1", "ce0", "ce1", "ce2", "ce3", "ce4", "ce5", "ce6", "ce7", "ce8", "ce9", "ce10", "ce11", "host2wbm-desc-feed", "host2reo-re-injection", "host2reo-command", "host2rxdma-monitor-ring3", "host2rxdma-monitor-ring2", "host2rxdma-monitor-ring1", "reo2ost-exception", "wbm2host-rx-release", "reo2host-status", "reo2host-destination-ring4", "reo2host-destination-ring3", "reo2host-destination-ring2", "reo2host-destination-ring1", "rxdma2host-monitor-destination-mac3", "rxdma2host-monitor-destination-mac2", "rxdma2host-monitor-destination-mac1", "ppdu-end-interrupts-mac3", "ppdu-end-interrupts-mac2", "ppdu-end-interrupts-mac1", "rxdma2host-monitor-status-ring-mac3", "rxdma2host-monitor-status-ring-mac2", "rxdma2host-monitor-status-ring-mac1", "host2rxdma-host-buf-ring-mac3", "host2rxdma-host-buf-ring-mac2", "host2rxdma-host-buf-ring-mac1", "rxdma2host-destination-ring-mac3", "rxdma2host-destination-ring-mac2", "rxdma2host-destination-ring-mac1", "host2tcl-input-ring4", "host2tcl-input-ring3", "host2tcl-input-ring2", "host2tcl-input-ring1", "wbm2host-tx-completions-ring3", "wbm2host-tx-completions-ring2", "wbm2host-tx-completions-ring1", "tcl2host-status-ring", }; static const struct ath11k_msi_config ath11k_msi_config[] = { { .total_vectors = 32, .total_users = 4, .users = (struct ath11k_msi_user[]) { { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, { .name = "CE", .num_vectors = 10, .base_vector = 3 }, { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, { .name = "DP", .num_vectors = 18, .base_vector = 14 }, }, .hw_rev = ATH11K_HW_QCA6390_HW20, }, { .total_vectors = 16, .total_users = 3, .users = (struct ath11k_msi_user[]) { { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, { .name = "CE", .num_vectors = 5, .base_vector = 3 }, { .name = "DP", .num_vectors = 8, .base_vector = 8 }, }, .hw_rev = ATH11K_HW_QCN9074_HW10, }, { .total_vectors = 32, .total_users = 4, .users = (struct ath11k_msi_user[]) { { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, { .name = "CE", .num_vectors = 10, .base_vector = 3 }, { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, { .name = "DP", .num_vectors = 18, .base_vector = 14 }, }, .hw_rev = ATH11K_HW_WCN6855_HW20, }, { .total_vectors = 32, .total_users = 4, .users = (struct ath11k_msi_user[]) { { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, { .name = "CE", .num_vectors = 10, .base_vector = 3 }, { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, { .name = "DP", .num_vectors = 18, .base_vector = 14 }, }, .hw_rev = ATH11K_HW_WCN6855_HW21, }, { .total_vectors = 28, .total_users = 2, .users = (struct ath11k_msi_user[]) { { .name = "CE", .num_vectors = 10, .base_vector = 0 }, { .name = "DP", .num_vectors = 18, .base_vector = 10 }, }, .hw_rev = ATH11K_HW_WCN6750_HW10, }, { .total_vectors = 32, .total_users = 4, .users = (struct ath11k_msi_user[]) { { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, { .name = "CE", .num_vectors = 10, .base_vector = 3 }, { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, { .name = "DP", .num_vectors = 18, .base_vector = 14 }, }, .hw_rev = ATH11K_HW_QCA2066_HW21, }, { .total_vectors = 32, .total_users = 4, .users = (struct ath11k_msi_user[]) { { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, { .name = "CE", .num_vectors = 10, .base_vector = 3 }, { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, { .name = "DP", .num_vectors = 18, .base_vector = 14 }, }, .hw_rev = ATH11K_HW_QCA6698AQ_HW21, }, }; int ath11k_pcic_init_msi_config(struct ath11k_base *ab) { const struct ath11k_msi_config *msi_config; int i; for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) { msi_config = &ath11k_msi_config[i]; if (msi_config->hw_rev == ab->hw_rev) break; } if (i == ARRAY_SIZE(ath11k_msi_config)) { ath11k_err(ab, "failed to fetch msi config, unsupported hw version: 0x%x\n", ab->hw_rev); return -EINVAL; } ab->pci.msi.config = msi_config; return 0; } EXPORT_SYMBOL(ath11k_pcic_init_msi_config); static void __ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value) { if (offset < ATH11K_PCI_WINDOW_START) iowrite32(value, ab->mem + offset); else ab->pci.ops->window_write32(ab, offset, value); } void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value) { int ret = 0; bool wakeup_required; /* for offset beyond BAR + 4K - 32, may * need to wakeup the device to access. */ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) && offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF; if (wakeup_required && ab->pci.ops->wakeup) ret = ab->pci.ops->wakeup(ab); __ath11k_pcic_write32(ab, offset, value); if (wakeup_required && !ret && ab->pci.ops->release) ab->pci.ops->release(ab); } EXPORT_SYMBOL(ath11k_pcic_write32); static u32 __ath11k_pcic_read32(struct ath11k_base *ab, u32 offset) { u32 val; if (offset < ATH11K_PCI_WINDOW_START) val = ioread32(ab->mem + offset); else val = ab->pci.ops->window_read32(ab, offset); return val; } u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset) { int ret = 0; u32 val; bool wakeup_required; /* for offset beyond BAR + 4K - 32, may * need to wakeup the device to access. */ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) && offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF; if (wakeup_required && ab->pci.ops->wakeup) ret = ab->pci.ops->wakeup(ab); val = __ath11k_pcic_read32(ab, offset); if (wakeup_required && !ret && ab->pci.ops->release) ab->pci.ops->release(ab); return val; } EXPORT_SYMBOL(ath11k_pcic_read32); int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end) { int ret = 0; bool wakeup_required; u32 *data = buf; u32 i; /* for offset beyond BAR + 4K - 32, may * need to wakeup the device to access. */ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) && end >= ATH11K_PCI_ACCESS_ALWAYS_OFF; if (wakeup_required && ab->pci.ops->wakeup) { ret = ab->pci.ops->wakeup(ab); if (ret) { ath11k_warn(ab, "wakeup failed, data may be invalid: %d", ret); /* Even though wakeup() failed, continue processing rather * than returning because some parts of the data may still * be valid and useful in some cases, e.g. could give us * some clues on firmware crash. * Mislead due to invalid data could be avoided because we * are aware of the wakeup failure. */ } } for (i = start; i < end + 1; i += 4) *data++ = __ath11k_pcic_read32(ab, i); if (wakeup_required && ab->pci.ops->release) ab->pci.ops->release(ab); return 0; } EXPORT_SYMBOL(ath11k_pcic_read); void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, u32 *msi_addr_hi) { *msi_addr_lo = ab->pci.msi.addr_lo; *msi_addr_hi = ab->pci.msi.addr_hi; } EXPORT_SYMBOL(ath11k_pcic_get_msi_address); int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, int *num_vectors, u32 *user_base_data, u32 *base_vector) { const struct ath11k_msi_config *msi_config = ab->pci.msi.config; int idx; for (idx = 0; idx < msi_config->total_users; idx++) { if (strcmp(user_name, msi_config->users[idx].name) == 0) { *num_vectors = msi_config->users[idx].num_vectors; *base_vector = msi_config->users[idx].base_vector; *user_base_data = *base_vector + ab->pci.msi.ep_base_data; ath11k_dbg(ab, ATH11K_DBG_PCI, "msi assignment %s num_vectors %d user_base_data %u base_vector %u\n", user_name, *num_vectors, *user_base_data, *base_vector); return 0; } } ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name); return -EINVAL; } EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment); void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx) { u32 i, msi_data_idx; for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; if (ce_id == i) break; msi_data_idx++; } *msi_idx = msi_data_idx; } EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx); static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab) { int i, j; for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; for (j = 0; j < irq_grp->num_irq; j++) free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); netif_napi_del(&irq_grp->napi); free_netdev(irq_grp->napi_ndev); } } void ath11k_pcic_free_irq(struct ath11k_base *ab) { int i, irq_idx; for (i = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); } ath11k_pcic_free_ext_irq(ab); } EXPORT_SYMBOL(ath11k_pcic_free_irq); static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) { u32 irq_idx; /* In case of one MSI vector, we handle irq enable/disable in a * uniform way since we only have one irq */ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) return; irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; enable_irq(ab->irq_num[irq_idx]); } static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) { u32 irq_idx; /* In case of one MSI vector, we handle irq enable/disable in a * uniform way since we only have one irq */ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) return; irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; disable_irq_nosync(ab->irq_num[irq_idx]); } static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab) { int i; clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); for (i = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; ath11k_pcic_ce_irq_disable(ab, i); } } static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab) { int i; int irq_idx; for (i = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; synchronize_irq(ab->irq_num[irq_idx]); } } static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t) { struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); enable_irq(ce_pipe->ab->irq_num[irq_idx]); } static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg) { struct ath11k_ce_pipe *ce_pipe = arg; struct ath11k_base *ab = ce_pipe->ab; int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags)) return IRQ_HANDLED; /* last interrupt received for this CE */ ce_pipe->timestamp = jiffies; disable_irq_nosync(ab->irq_num[irq_idx]); tasklet_schedule(&ce_pipe->intr_tq); return IRQ_HANDLED; } static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) { struct ath11k_base *ab = irq_grp->ab; int i; /* In case of one MSI vector, we handle irq enable/disable * in a uniform way since we only have one irq */ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) return; for (i = 0; i < irq_grp->num_irq; i++) disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *ab) { int i; clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; ath11k_pcic_ext_grp_disable(irq_grp); if (irq_grp->napi_enabled) { napi_synchronize(&irq_grp->napi); napi_disable(&irq_grp->napi); irq_grp->napi_enabled = false; } } } static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp) { struct ath11k_base *ab = irq_grp->ab; int i; /* In case of one MSI vector, we handle irq enable/disable in a * uniform way since we only have one irq */ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) return; for (i = 0; i < irq_grp->num_irq; i++) enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab) { int i; for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; if (!irq_grp->napi_enabled) { napi_enable(&irq_grp->napi); irq_grp->napi_enabled = true; } ath11k_pcic_ext_grp_enable(irq_grp); } set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); } EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable); static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab) { int i, j, irq_idx; for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; for (j = 0; j < irq_grp->num_irq; j++) { irq_idx = irq_grp->irqs[j]; synchronize_irq(ab->irq_num[irq_idx]); } } } void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab) { __ath11k_pcic_ext_irq_disable(ab); ath11k_pcic_sync_ext_irqs(ab); } EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable); static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget) { struct ath11k_ext_irq_grp *irq_grp = container_of(napi, struct ath11k_ext_irq_grp, napi); struct ath11k_base *ab = irq_grp->ab; int work_done; int i; work_done = ath11k_dp_service_srng(ab, irq_grp, budget); if (work_done < budget) { napi_complete_done(napi, work_done); for (i = 0; i < irq_grp->num_irq; i++) enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); } if (work_done > budget) work_done = budget; return work_done; } static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg) { struct ath11k_ext_irq_grp *irq_grp = arg; struct ath11k_base *ab = irq_grp->ab; int i; if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) return IRQ_HANDLED; ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq %d\n", irq); /* last interrupt received for this group */ irq_grp->timestamp = jiffies; for (i = 0; i < irq_grp->num_irq; i++) disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); napi_schedule(&irq_grp->napi); return IRQ_HANDLED; } static int ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector) { return ab->pci.ops->get_msi_irq(ab, vector); } static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) { int i, j, n, ret, num_vectors = 0; u32 user_base_data = 0, base_vector = 0; struct ath11k_ext_irq_grp *irq_grp; unsigned long irq_flags; ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors, &user_base_data, &base_vector); if (ret < 0) return ret; irq_flags = IRQF_SHARED; if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) irq_flags |= IRQF_NOBALANCING; for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { irq_grp = &ab->ext_irq_grp[i]; u32 num_irq = 0; irq_grp->ab = ab; irq_grp->grp_id = i; irq_grp->napi_ndev = alloc_netdev_dummy(0); if (!irq_grp->napi_ndev) { ret = -ENOMEM; goto fail_allocate; } netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, ath11k_pcic_ext_grp_napi_poll); if (ab->hw_params.ring_mask->tx[i] || ab->hw_params.ring_mask->rx[i] || ab->hw_params.ring_mask->rx_err[i] || ab->hw_params.ring_mask->rx_wbm_rel[i] || ab->hw_params.ring_mask->reo_status[i] || ab->hw_params.ring_mask->rxdma2host[i] || ab->hw_params.ring_mask->host2rxdma[i] || ab->hw_params.ring_mask->rx_mon_status[i]) { num_irq = 1; } irq_grp->num_irq = num_irq; irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i; for (j = 0; j < irq_grp->num_irq; j++) { int irq_idx = irq_grp->irqs[j]; int vector = (i % num_vectors) + base_vector; int irq = ath11k_pcic_get_msi_irq(ab, vector); if (irq < 0) { ret = irq; goto fail_irq; } ab->irq_num[irq_idx] = irq; ath11k_dbg(ab, ATH11K_DBG_PCI, "irq %d group %d\n", irq, i); irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler, irq_flags, "DP_EXT_IRQ", irq_grp); if (ret) { ath11k_err(ab, "failed request irq %d: %d\n", vector, ret); for (n = 0; n <= i; n++) { irq_grp = &ab->ext_irq_grp[n]; free_netdev(irq_grp->napi_ndev); } return ret; } } ath11k_pcic_ext_grp_disable(irq_grp); } return 0; fail_irq: /* i ->napi_ndev was properly allocated. Free it also */ i += 1; fail_allocate: for (n = 0; n < i; n++) { irq_grp = &ab->ext_irq_grp[n]; free_netdev(irq_grp->napi_ndev); } return ret; } int ath11k_pcic_config_irq(struct ath11k_base *ab) { struct ath11k_ce_pipe *ce_pipe; u32 msi_data_start; u32 msi_data_count, msi_data_idx; u32 msi_irq_start; unsigned int msi_data; int irq, i, ret, irq_idx; unsigned long irq_flags; ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count, &msi_data_start, &msi_irq_start); if (ret) return ret; irq_flags = IRQF_SHARED; if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) irq_flags |= IRQF_NOBALANCING; /* Configure CE irqs */ for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; msi_data = (msi_data_idx % msi_data_count) + msi_irq_start; irq = ath11k_pcic_get_msi_irq(ab, msi_data); if (irq < 0) return irq; ce_pipe = &ab->ce.ce_pipe[i]; irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet); ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler, irq_flags, irq_name[irq_idx], ce_pipe); if (ret) { ath11k_err(ab, "failed to request irq %d: %d\n", irq_idx, ret); return ret; } ab->irq_num[irq_idx] = irq; msi_data_idx++; ath11k_pcic_ce_irq_disable(ab, i); } ret = ath11k_pcic_ext_irq_config(ab); if (ret) return ret; return 0; } EXPORT_SYMBOL(ath11k_pcic_config_irq); void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab) { int i; set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); for (i = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; ath11k_pcic_ce_irq_enable(ab, i); } } EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable); static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab) { int i; for (i = 0; i < ab->hw_params.ce_count; i++) { struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; tasklet_kill(&ce_pipe->intr_tq); } } void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab) { ath11k_pcic_ce_irqs_disable(ab); ath11k_pcic_sync_ce_irqs(ab); ath11k_pcic_kill_tasklets(ab); } EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync); void ath11k_pcic_stop(struct ath11k_base *ab) { ath11k_pcic_ce_irq_disable_sync(ab); ath11k_ce_cleanup_pipes(ab); } EXPORT_SYMBOL(ath11k_pcic_stop); int ath11k_pcic_start(struct ath11k_base *ab) { set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags); ath11k_pcic_ce_irqs_enable(ab); ath11k_ce_rx_post_buf(ab); return 0; } EXPORT_SYMBOL(ath11k_pcic_start); int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, u8 *ul_pipe, u8 *dl_pipe) { const struct service_to_pipe *entry; bool ul_set = false, dl_set = false; int i; for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) { entry = &ab->hw_params.svc_to_ce_map[i]; if (__le32_to_cpu(entry->service_id) != service_id) continue; switch (__le32_to_cpu(entry->pipedir)) { case PIPEDIR_NONE: break; case PIPEDIR_IN: WARN_ON(dl_set); *dl_pipe = __le32_to_cpu(entry->pipenum); dl_set = true; break; case PIPEDIR_OUT: WARN_ON(ul_set); *ul_pipe = __le32_to_cpu(entry->pipenum); ul_set = true; break; case PIPEDIR_INOUT: WARN_ON(dl_set); WARN_ON(ul_set); *dl_pipe = __le32_to_cpu(entry->pipenum); *ul_pipe = __le32_to_cpu(entry->pipenum); dl_set = true; ul_set = true; break; } } if (WARN_ON(!ul_set || !dl_set)) return -ENOENT; return 0; } EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe); int ath11k_pcic_register_pci_ops(struct ath11k_base *ab, const struct ath11k_pci_ops *pci_ops) { if (!pci_ops) return 0; /* Return error if mandatory pci_ops callbacks are missing */ if (!pci_ops->get_msi_irq || !pci_ops->window_write32 || !pci_ops->window_read32) return -EINVAL; ab->pci.ops = pci_ops; return 0; } EXPORT_SYMBOL(ath11k_pcic_register_pci_ops); void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab) { int i; for (i = 0; i < ab->hw_params.ce_count; i++) { if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR || i == ATH11K_PCI_CE_WAKE_IRQ) continue; ath11k_pcic_ce_irq_enable(ab, i); } } EXPORT_SYMBOL(ath11k_pci_enable_ce_irqs_except_wake_irq); void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab) { int i; int irq_idx; struct ath11k_ce_pipe *ce_pipe; for (i = 0; i < ab->hw_params.ce_count; i++) { ce_pipe = &ab->ce.ce_pipe[i]; irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR || i == ATH11K_PCI_CE_WAKE_IRQ) continue; disable_irq_nosync(ab->irq_num[irq_idx]); synchronize_irq(ab->irq_num[irq_idx]); tasklet_kill(&ce_pipe->intr_tq); } } EXPORT_SYMBOL(ath11k_pci_disable_ce_irqs_except_wake_irq); diff --git a/qmi.c b/qmi.c index 2782f4723e41..378ac96b861b 100644 --- a/qmi.c +++ b/qmi.c @@ -1,3373 +1,3375 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include +#include #include "qmi.h" #include "core.h" #include "debug.h" #include "hif.h" #include #include #include #include #include #define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02 #define HOST_CSTATE_BIT 0x04 #define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08 #define PLATFORM_CAP_PCIE_PME_D3COLD 0x10 #define FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING=" bool ath11k_cold_boot_cal = 1; EXPORT_SYMBOL(ath11k_cold_boot_cal); module_param_named(cold_boot_cal, ath11k_cold_boot_cal, bool, 0644); MODULE_PARM_DESC(cold_boot_cal, "Decrease the channel switch time but increase the driver load time (Default: true)"); static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, num_clients_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, num_clients), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, wake_msi_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, wake_msi), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, gpios_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, gpios_len), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01, .elem_size = sizeof(u32), .array_type = VAR_LEN_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, gpios), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, nm_modem_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, nm_modem), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, bdf_support_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, bdf_support), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, bdf_cache_support_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, bdf_cache_support), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, m3_support_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, m3_support), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, m3_cache_support_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, m3_cache_support), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, cal_filesys_support_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, cal_filesys_support), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, cal_cache_support_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, cal_cache_support), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, cal_done_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, cal_done), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1B, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, mem_bucket_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x1B, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, mem_bucket), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1C, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, mem_cfg_mode_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1C, .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, mem_cfg_mode), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, fw_ready_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, fw_ready_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, initiate_cal_download_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, initiate_cal_download_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, initiate_cal_update_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, initiate_cal_update_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, msa_ready_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, msa_ready_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, pin_connect_result_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, pin_connect_result_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, client_id_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, client_id), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, request_mem_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, request_mem_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, fw_mem_ready_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, fw_mem_ready_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, fw_init_done_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, fw_init_done_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, rejuvenate_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, rejuvenate_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, xo_cal_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, xo_cal_enable), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1B, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, cal_done_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x1B, .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, cal_done_enable), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01, fw_status_valid), }, { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01, fw_status), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, size), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, type), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_WLANFW_MAX_NUM_MEM_CFG_V01, .elem_size = sizeof(struct qmi_wlanfw_mem_cfg_s_v01), .array_type = VAR_LEN_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg), .ei_array = qmi_wlanfw_mem_cfg_s_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = { { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01, mem_seg_len), }, { .data_type = QMI_STRUCT, .elem_len = ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01, .elem_size = sizeof(struct qmi_wlanfw_mem_seg_s_v01), .array_type = VAR_LEN_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01, mem_seg), .ei_array = qmi_wlanfw_mem_seg_s_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = { { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01, mem_seg_len), }, { .data_type = QMI_STRUCT, .elem_len = ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01, .elem_size = sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01), .array_type = VAR_LEN_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01, mem_seg), .ei_array = qmi_wlanfw_mem_seg_resp_s_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_device_info_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlfw_device_info_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01, bar_addr_valid), }, { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01, bar_addr), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01, bar_size_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01, bar_size), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01, chip_id), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01, chip_family), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_rf_board_info_s_v01, board_id), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01, fw_version), }, { .data_type = QMI_STRING, .elem_len = ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1, .elem_size = sizeof(char), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01, fw_build_timestamp), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, chip_info_valid), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_wlanfw_rf_chip_info_s_v01), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, chip_info), .ei_array = qmi_wlanfw_rf_chip_info_s_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, board_info_valid), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_wlanfw_rf_board_info_s_v01), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, board_info), .ei_array = qmi_wlanfw_rf_board_info_s_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, soc_info_valid), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_wlanfw_soc_info_s_v01), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, soc_info), .ei_array = qmi_wlanfw_soc_info_s_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, fw_version_info_valid), }, { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_wlanfw_fw_version_info_s_v01), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, fw_version_info), .ei_array = qmi_wlanfw_fw_version_info_s_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, fw_build_id_valid), }, { .data_type = QMI_STRING, .elem_len = ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1, .elem_size = sizeof(char), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, fw_build_id), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, num_macs_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, num_macs), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, voltage_mv_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, voltage_mv), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, time_freq_hz_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, time_freq_hz), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, otp_version_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, otp_version), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, eeprom_read_timeout_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, eeprom_read_timeout), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, valid), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, file_id_valid), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, file_id), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, total_size_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, total_size), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, seg_id_valid), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, seg_id), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, data_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u16), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, data_len), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = QMI_WLANFW_MAX_DATA_SIZE_V01, .elem_size = sizeof(u8), .array_type = VAR_LEN_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, data), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, end_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, end), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, bdf_type_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, bdf_type), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = { { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(u64), .array_type = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, pipe_num), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, pipe_dir), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, nentries), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, nbytes_max), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, flags), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01, service_id), }, { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01, pipe_dir), }, { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01, pipe_num), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(u16), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id), }, { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(u16), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, offset), }, { .data_type = QMI_EOTI, .array_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0, .offset = offsetof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01, addr), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01, mode), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01, hw_debug_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01, hw_debug), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, host_version_valid), }, { .data_type = QMI_STRING, .elem_len = QMI_WLANFW_MAX_STR_LEN_V01 + 1, .elem_size = sizeof(char), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, host_version), }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, tgt_cfg_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, tgt_cfg_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_WLANFW_MAX_NUM_CE_V01, .elem_size = sizeof( struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01), .array_type = VAR_LEN_ARRAY, .tlv_type = 0x11, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, tgt_cfg), .ei_array = qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, svc_cfg_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, svc_cfg_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_WLANFW_MAX_NUM_SVC_V01, .elem_size = sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01), .array_type = VAR_LEN_ARRAY, .tlv_type = 0x12, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, svc_cfg), .ei_array = qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, shadow_reg_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, shadow_reg_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V01, .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01), .array_type = VAR_LEN_ARRAY, .tlv_type = 0x13, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, shadow_reg), .ei_array = qmi_wlanfw_shadow_reg_cfg_s_v01_ei, }, { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, shadow_reg_v2_valid), }, { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, shadow_reg_v2_len), }, { .data_type = QMI_STRUCT, .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01, .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01), .array_type = VAR_LEN_ARRAY, .tlv_type = 0x14, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, shadow_reg_v2), .ei_array = qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .array_type = NO_ARRAY, }, }; static const struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .array_type = NO_ARRAY, }, }; static const struct qmi_elem_info qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .array_type = NO_ARRAY, }, }; static const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01, enablefwlog_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(u8), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01, enablefwlog), }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01, resp), .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; static const struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .array_type = NO_ARRAY, }, }; /* clang stack usage explodes if this is inlined */ static noinline_for_stack int ath11k_qmi_host_cap_send(struct ath11k_base *ab) { struct qmi_wlanfw_host_cap_req_msg_v01 req; struct qmi_wlanfw_host_cap_resp_msg_v01 resp; struct qmi_txn txn; int ret = 0; memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); req.num_clients_valid = 1; req.num_clients = 1; req.mem_cfg_mode = ab->qmi.target_mem_mode; req.mem_cfg_mode_valid = 1; req.bdf_support_valid = 1; req.bdf_support = 1; if (ab->hw_params.m3_fw_support) { req.m3_support_valid = 1; req.m3_support = 1; req.m3_cache_support_valid = 1; req.m3_cache_support = 1; } else { req.m3_support_valid = 0; req.m3_support = 0; req.m3_cache_support_valid = 0; req.m3_cache_support = 0; } req.cal_done_valid = 1; req.cal_done = ab->qmi.cal_done; if (ab->hw_params.internal_sleep_clock) { req.nm_modem_valid = 1; /* Notify firmware that this is non-qualcomm platform. */ req.nm_modem |= HOST_CSTATE_BIT; /* Notify firmware about the sleep clock selection, * nm_modem_bit[1] is used for this purpose. Host driver on * non-qualcomm platforms should select internal sleep * clock. */ req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT; } if (ab->hw_params.global_reset) req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET; req.nm_modem |= PLATFORM_CAP_PCIE_PME_D3COLD; ath11k_dbg(ab, ATH11K_DBG_QMI, "host cap request\n"); ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp); if (ret < 0) goto out; ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_HOST_CAP_REQ_V01, QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_host_cap_req_msg_v01_ei, &req); if (ret < 0) { qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send host capability request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) goto out; if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "host capability request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; } out: return ret; } static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab) { struct qmi_wlanfw_ind_register_req_msg_v01 *req; struct qmi_wlanfw_ind_register_resp_msg_v01 *resp; struct qmi_handle *handle = &ab->qmi.handle; struct qmi_txn txn; int ret; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; resp = kzalloc(sizeof(*resp), GFP_KERNEL); if (!resp) { ret = -ENOMEM; goto resp_out; } req->client_id_valid = 1; req->client_id = QMI_WLANFW_CLIENT_ID; req->fw_ready_enable_valid = 1; req->fw_ready_enable = 1; req->cal_done_enable_valid = 1; req->cal_done_enable = 1; req->fw_init_done_enable_valid = 1; req->fw_init_done_enable = 1; req->pin_connect_result_enable_valid = 0; req->pin_connect_result_enable = 0; /* WCN6750 doesn't request for DDR memory via QMI, * instead it uses a fixed 12MB reserved memory * region in DDR. */ if (!ab->hw_params.fixed_fw_mem) { req->request_mem_enable_valid = 1; req->request_mem_enable = 1; req->fw_mem_ready_enable_valid = 1; req->fw_mem_ready_enable = 1; } ret = qmi_txn_init(handle, &txn, qmi_wlanfw_ind_register_resp_msg_v01_ei, resp); if (ret < 0) goto out; ath11k_dbg(ab, ATH11K_DBG_QMI, "indication register request\n"); ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_IND_REGISTER_REQ_V01, QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_ind_register_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send indication register request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { ath11k_warn(ab, "failed to register fw indication: %d\n", ret); goto out; } if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "firmware indication register request failed: %d %d\n", resp->resp.result, resp->resp.error); ret = -EINVAL; goto out; } out: kfree(resp); resp_out: kfree(req); return ret; } static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab) { struct qmi_wlanfw_respond_mem_req_msg_v01 *req; struct qmi_wlanfw_respond_mem_resp_msg_v01 resp; struct qmi_txn txn; int ret = 0, i; bool delayed; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; memset(&resp, 0, sizeof(resp)); /* For QCA6390 by default FW requests a block of ~4M contiguous * DMA memory, it's hard to allocate from OS. So host returns * failure to FW and FW will then request multiple blocks of small * chunk size memory. */ if (!(ab->hw_params.fixed_mem_region || test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) && ab->qmi.target_mem_delayed) { delayed = true; ath11k_dbg(ab, ATH11K_DBG_QMI, "delays mem_request %d\n", ab->qmi.mem_seg_count); memset(req, 0, sizeof(*req)); } else { delayed = false; req->mem_seg_len = ab->qmi.mem_seg_count; for (i = 0; i < req->mem_seg_len ; i++) { req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr; req->mem_seg[i].size = ab->qmi.target_mem[i].size; req->mem_seg[i].type = ab->qmi.target_mem[i].type; ath11k_dbg(ab, ATH11K_DBG_QMI, "req mem_seg[%d] %pad %u %u\n", i, &ab->qmi.target_mem[i].paddr, ab->qmi.target_mem[i].size, ab->qmi.target_mem[i].type); } } ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_respond_mem_resp_msg_v01_ei, &resp); if (ret < 0) goto out; ath11k_dbg(ab, ATH11K_DBG_QMI, "respond memory request delayed %i\n", delayed); ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_RESPOND_MEM_REQ_V01, QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_respond_mem_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to respond qmi memory request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { ath11k_warn(ab, "failed to wait qmi memory request: %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { /* the error response is expected when * target_mem_delayed is true. */ if (delayed && resp.resp.error == 0) goto out; ath11k_warn(ab, "qmi respond memory request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; } out: kfree(req); return ret; } static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab) { int i; for (i = 0; i < ab->qmi.mem_seg_count; i++) { if (!ab->qmi.target_mem[i].anyaddr) continue; if (ab->hw_params.fixed_mem_region || test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) { iounmap(ab->qmi.target_mem[i].iaddr); ab->qmi.target_mem[i].iaddr = NULL; continue; } dma_free_coherent(ab->dev, ab->qmi.target_mem[i].prev_size, ab->qmi.target_mem[i].vaddr, ab->qmi.target_mem[i].paddr); ab->qmi.target_mem[i].vaddr = NULL; } } static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab) { int i; struct target_mem_chunk *chunk; ab->qmi.target_mem_delayed = false; for (i = 0; i < ab->qmi.mem_seg_count; i++) { chunk = &ab->qmi.target_mem[i]; /* Firmware reloads in coldboot/firmware recovery. * in such case, no need to allocate memory for FW again. */ if (chunk->vaddr) { if (chunk->prev_type == chunk->type && chunk->prev_size == chunk->size) continue; if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) { ath11k_dbg(ab, ATH11K_DBG_QMI, "size/type mismatch (current %d %u) (prev %d %u), try later with small size\n", chunk->size, chunk->type, chunk->prev_size, chunk->prev_type); ab->qmi.target_mem_delayed = true; return 0; } /* cannot reuse the existing chunk */ dma_free_coherent(ab->dev, chunk->prev_size, chunk->vaddr, chunk->paddr); chunk->vaddr = NULL; } chunk->vaddr = dma_alloc_coherent(ab->dev, chunk->size, &chunk->paddr, GFP_KERNEL | __GFP_NOWARN); if (!chunk->vaddr) { if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) { ath11k_dbg(ab, ATH11K_DBG_QMI, "dma allocation failed (%d B type %u), will try later with small size\n", chunk->size, chunk->type); ath11k_qmi_free_target_mem_chunk(ab); ab->qmi.target_mem_delayed = true; return 0; } ath11k_err(ab, "failed to allocate dma memory for qmi (%d B type %u)\n", chunk->size, chunk->type); return -EINVAL; } chunk->prev_type = chunk->type; chunk->prev_size = chunk->size; } return 0; } static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab) { struct device *dev = ab->dev; struct device_node *hremote_node = NULL; struct resource res; u32 host_ddr_sz; int i, idx, ret; for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) { switch (ab->qmi.target_mem[i].type) { case HOST_DDR_REGION_TYPE: hremote_node = of_parse_phandle(dev->of_node, "memory-region", 0); if (!hremote_node) { ath11k_dbg(ab, ATH11K_DBG_QMI, "fail to get hremote_node\n"); return -ENODEV; } ret = of_address_to_resource(hremote_node, 0, &res); of_node_put(hremote_node); if (ret) { ath11k_dbg(ab, ATH11K_DBG_QMI, "fail to get reg from hremote\n"); return ret; } if (res.end - res.start + 1 < ab->qmi.target_mem[i].size) { ath11k_dbg(ab, ATH11K_DBG_QMI, "fail to assign memory of sz\n"); return -EINVAL; } ab->qmi.target_mem[idx].paddr = res.start; ab->qmi.target_mem[idx].iaddr = ioremap(ab->qmi.target_mem[idx].paddr, ab->qmi.target_mem[i].size); if (!ab->qmi.target_mem[idx].iaddr) return -EIO; ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; host_ddr_sz = ab->qmi.target_mem[i].size; ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; idx++; break; case BDF_MEM_REGION_TYPE: ab->qmi.target_mem[idx].paddr = ab->hw_params.bdf_addr; ab->qmi.target_mem[idx].iaddr = NULL; ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; idx++; break; case CALDB_MEM_REGION_TYPE: if (ab->qmi.target_mem[i].size > ATH11K_QMI_CALDB_SIZE) { ath11k_warn(ab, "qmi mem size is low to load caldata\n"); return -EINVAL; } if (ath11k_core_coldboot_cal_support(ab)) { if (hremote_node) { ab->qmi.target_mem[idx].paddr = res.start + host_ddr_sz; ab->qmi.target_mem[idx].iaddr = ioremap(ab->qmi.target_mem[idx].paddr, ab->qmi.target_mem[i].size); if (!ab->qmi.target_mem[idx].iaddr) return -EIO; } else { ab->qmi.target_mem[idx].paddr = ATH11K_QMI_CALDB_ADDRESS; ab->qmi.target_mem[idx].iaddr = NULL; } } else { ab->qmi.target_mem[idx].paddr = 0; ab->qmi.target_mem[idx].iaddr = NULL; } ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; idx++; break; default: ath11k_warn(ab, "qmi ignore invalid mem req type %d\n", ab->qmi.target_mem[i].type); break; } } ab->qmi.mem_seg_count = idx; return 0; } static int ath11k_qmi_request_device_info(struct ath11k_base *ab) { struct qmi_wlanfw_device_info_req_msg_v01 req = {}; struct qmi_wlanfw_device_info_resp_msg_v01 resp = {}; struct qmi_txn txn; void __iomem *bar_addr_va; int ret; /* device info message req is only sent for hybrid bus devices */ if (!ab->hw_params.hybrid_bus_type) return 0; ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlfw_device_info_resp_msg_v01_ei, &resp); if (ret < 0) goto out; ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_DEVICE_INFO_REQ_V01, QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_device_info_req_msg_v01_ei, &req); if (ret < 0) { qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send qmi target device info request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { ath11k_warn(ab, "failed to wait qmi target device info request: %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "qmi device info request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; } if (!resp.bar_addr_valid || !resp.bar_size_valid) { ath11k_warn(ab, "qmi device info response invalid: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; } if (!resp.bar_addr || resp.bar_size != ATH11K_QMI_DEVICE_BAR_SIZE) { ath11k_warn(ab, "qmi device info invalid address and size: %llu %u\n", resp.bar_addr, resp.bar_size); ret = -EINVAL; goto out; } bar_addr_va = devm_ioremap(ab->dev, resp.bar_addr, resp.bar_size); if (!bar_addr_va) { ath11k_warn(ab, "qmi device info ioremap failed\n"); ab->mem_len = 0; ret = -EIO; goto out; } ab->mem = bar_addr_va; ab->mem_len = resp.bar_size; if (!ab->hw_params.ce_remap) ab->mem_ce = ab->mem; return 0; out: return ret; } static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) { struct qmi_wlanfw_cap_req_msg_v01 req; struct qmi_wlanfw_cap_resp_msg_v01 resp; struct qmi_txn txn; int ret = 0; int r; char *fw_build_id; int fw_build_id_mask_len; memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_cap_resp_msg_v01_ei, &resp); if (ret < 0) goto out; ath11k_dbg(ab, ATH11K_DBG_QMI, "target cap request\n"); ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_CAP_REQ_V01, QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_cap_req_msg_v01_ei, &req); if (ret < 0) { qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send qmi cap request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { ath11k_warn(ab, "failed to wait qmi cap request: %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "qmi cap request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; } if (resp.chip_info_valid) { ab->qmi.target.chip_id = resp.chip_info.chip_id; ab->qmi.target.chip_family = resp.chip_info.chip_family; } if (resp.board_info_valid) ab->qmi.target.board_id = resp.board_info.board_id; else ab->qmi.target.board_id = 0xFF; if (resp.soc_info_valid) ab->qmi.target.soc_id = resp.soc_info.soc_id; if (resp.fw_version_info_valid) { ab->qmi.target.fw_version = resp.fw_version_info.fw_version; strscpy(ab->qmi.target.fw_build_timestamp, resp.fw_version_info.fw_build_timestamp, sizeof(ab->qmi.target.fw_build_timestamp)); } if (resp.fw_build_id_valid) strscpy(ab->qmi.target.fw_build_id, resp.fw_build_id, sizeof(ab->qmi.target.fw_build_id)); if (resp.eeprom_read_timeout_valid) { ab->qmi.target.eeprom_caldata = resp.eeprom_read_timeout; ath11k_dbg(ab, ATH11K_DBG_QMI, "cal data supported from eeprom\n"); } fw_build_id = ab->qmi.target.fw_build_id; fw_build_id_mask_len = strlen(FW_BUILD_ID_MASK); if (!strncmp(fw_build_id, FW_BUILD_ID_MASK, fw_build_id_mask_len)) fw_build_id = fw_build_id + fw_build_id_mask_len; ath11k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n", ab->qmi.target.chip_id, ab->qmi.target.chip_family, ab->qmi.target.board_id, ab->qmi.target.soc_id); ath11k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s", ab->qmi.target.fw_version, ab->qmi.target.fw_build_timestamp, fw_build_id); r = ath11k_core_check_smbios(ab); if (r) ath11k_dbg(ab, ATH11K_DBG_QMI, "SMBIOS bdf variant name not set.\n"); r = ath11k_core_check_dt(ab); if (r) ath11k_dbg(ab, ATH11K_DBG_QMI, "DT bdf variant name not set.\n"); out: return ret; } static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab, const u8 *data, u32 len, u8 type) { struct qmi_wlanfw_bdf_download_req_msg_v01 *req; struct qmi_wlanfw_bdf_download_resp_msg_v01 resp; struct qmi_txn txn; const u8 *temp = data; void __iomem *bdf_addr = NULL; int ret = 0; u32 remaining = len; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; memset(&resp, 0, sizeof(resp)); if (ab->hw_params.fixed_bdf_addr) { bdf_addr = ioremap(ab->hw_params.bdf_addr, ab->hw_params.fw.board_size); if (!bdf_addr) { ath11k_warn(ab, "qmi ioremap error for bdf_addr\n"); ret = -EIO; goto err_free_req; } } while (remaining) { req->valid = 1; req->file_id_valid = 1; req->file_id = ab->qmi.target.board_id; req->total_size_valid = 1; req->total_size = remaining; req->seg_id_valid = 1; req->data_valid = 1; req->bdf_type = type; req->bdf_type_valid = 1; req->end_valid = 1; req->end = 0; if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) { req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01; } else { req->data_len = remaining; req->end = 1; } if (ab->hw_params.fixed_bdf_addr || type == ATH11K_QMI_FILE_TYPE_EEPROM) { req->data_valid = 0; req->end = 1; req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE; } else { memcpy(req->data, temp, req->data_len); } if (ab->hw_params.fixed_bdf_addr) { if (type == ATH11K_QMI_FILE_TYPE_CALDATA) bdf_addr += ab->hw_params.fw.cal_offset; memcpy_toio(bdf_addr, temp, len); } ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_bdf_download_resp_msg_v01_ei, &resp); if (ret < 0) goto err_iounmap; ath11k_dbg(ab, ATH11K_DBG_QMI, "bdf download req fixed addr type %d\n", type); ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_BDF_DOWNLOAD_REQ_V01, QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_bdf_download_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); goto err_iounmap; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { ath11k_warn(ab, "failed to wait board file download request: %d\n", ret); goto err_iounmap; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "board file download request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto err_iounmap; } if (ab->hw_params.fixed_bdf_addr || type == ATH11K_QMI_FILE_TYPE_EEPROM) { remaining = 0; } else { remaining -= req->data_len; temp += req->data_len; req->seg_id++; ath11k_dbg(ab, ATH11K_DBG_QMI, "bdf download request remaining %i\n", remaining); } } err_iounmap: if (ab->hw_params.fixed_bdf_addr) iounmap(bdf_addr); err_free_req: kfree(req); return ret; } static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab, bool regdb) { struct device *dev = ab->dev; char filename[ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE]; const struct firmware *fw_entry; struct ath11k_board_data bd; u32 fw_size, file_type; int ret = 0, bdf_type; const u8 *tmp; memset(&bd, 0, sizeof(bd)); if (regdb) { ret = ath11k_core_fetch_regdb(ab, &bd); } else { ret = ath11k_core_fetch_bdf(ab, &bd); if (ret) ath11k_warn(ab, "qmi failed to fetch board file: %d\n", ret); } if (ret) goto out; if (regdb) bdf_type = ATH11K_QMI_BDF_TYPE_REGDB; else if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0) bdf_type = ATH11K_QMI_BDF_TYPE_ELF; else bdf_type = ATH11K_QMI_BDF_TYPE_BIN; ath11k_dbg(ab, ATH11K_DBG_QMI, "bdf_type %d\n", bdf_type); fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len); ret = ath11k_qmi_load_file_target_mem(ab, bd.data, fw_size, bdf_type); if (ret < 0) { ath11k_warn(ab, "qmi failed to load bdf file\n"); goto out; } /* QCA6390/WCN6855 does not support cal data, skip it */ if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF || bdf_type == ATH11K_QMI_BDF_TYPE_REGDB) goto out; if (ab->qmi.target.eeprom_caldata) { file_type = ATH11K_QMI_FILE_TYPE_EEPROM; tmp = filename; fw_size = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE; } else { file_type = ATH11K_QMI_FILE_TYPE_CALDATA; /* cal--.bin */ snprintf(filename, sizeof(filename), "cal-%s-%s.bin", ath11k_bus_str(ab->hif.bus), dev_name(dev)); fw_entry = ath11k_core_firmware_request(ab, filename); if (!IS_ERR(fw_entry)) goto success; fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE); if (IS_ERR(fw_entry)) { /* Caldata may not be present during first time calibration in * factory hence allow to boot without loading caldata in ftm mode */ if (ath11k_ftm_mode) { ath11k_info(ab, "Booting without cal data file in factory test mode\n"); return 0; } ret = PTR_ERR(fw_entry); ath11k_warn(ab, "qmi failed to load CAL data file:%s\n", filename); goto out; } success: fw_size = min_t(u32, ab->hw_params.fw.board_size, fw_entry->size); tmp = fw_entry->data; } ret = ath11k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type); if (ret < 0) { ath11k_warn(ab, "qmi failed to load caldata\n"); goto out_qmi_cal; } ath11k_dbg(ab, ATH11K_DBG_QMI, "caldata type: %u\n", file_type); out_qmi_cal: if (!ab->qmi.target.eeprom_caldata) release_firmware(fw_entry); out: ath11k_core_free_bdf(ab, &bd); ath11k_dbg(ab, ATH11K_DBG_QMI, "BDF download sequence completed\n"); return ret; } static int ath11k_qmi_m3_load(struct ath11k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; const struct firmware *fw = NULL; const void *m3_data; char path[100]; size_t m3_len; int ret; if (m3_mem->vaddr) /* m3 firmware buffer is already available in the DMA buffer */ return 0; if (ab->fw.m3_data && ab->fw.m3_len > 0) { /* firmware-N.bin had a m3 firmware file so use that */ m3_data = ab->fw.m3_data; m3_len = ab->fw.m3_len; } else { /* No m3 file in firmware-N.bin so try to request old * separate m3.bin. */ fw = ath11k_core_firmware_request(ab, ATH11K_M3_FILE); if (IS_ERR(fw)) { ret = PTR_ERR(fw); ath11k_core_create_firmware_path(ab, ATH11K_M3_FILE, path, sizeof(path)); ath11k_err(ab, "failed to load %s: %d\n", path, ret); return ret; } m3_data = fw->data; m3_len = fw->size; } m3_mem->vaddr = dma_alloc_coherent(ab->dev, m3_len, &m3_mem->paddr, GFP_KERNEL); if (!m3_mem->vaddr) { ath11k_err(ab, "failed to allocate memory for M3 with size %zu\n", fw->size); ret = -ENOMEM; goto out; } memcpy(m3_mem->vaddr, m3_data, m3_len); m3_mem->size = m3_len; ret = 0; out: release_firmware(fw); return ret; } static void ath11k_qmi_m3_free(struct ath11k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; if (!ab->hw_params.m3_fw_support || !m3_mem->vaddr) return; dma_free_coherent(ab->dev, m3_mem->size, m3_mem->vaddr, m3_mem->paddr); m3_mem->vaddr = NULL; m3_mem->size = 0; } /* clang stack usage explodes if this is inlined */ static noinline_for_stack int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab) { struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; struct qmi_wlanfw_m3_info_req_msg_v01 req; struct qmi_wlanfw_m3_info_resp_msg_v01 resp; struct qmi_txn txn; int ret = 0; memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); if (ab->hw_params.m3_fw_support) { ret = ath11k_qmi_m3_load(ab); if (ret) { ath11k_err(ab, "failed to load m3 firmware: %d", ret); return ret; } req.addr = m3_mem->paddr; req.size = m3_mem->size; } else { req.addr = 0; req.size = 0; } ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp); if (ret < 0) goto out; ath11k_dbg(ab, ATH11K_DBG_QMI, "m3 info req\n"); ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_M3_INFO_REQ_V01, QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN, qmi_wlanfw_m3_info_req_msg_v01_ei, &req); if (ret < 0) { qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send m3 information request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { ath11k_warn(ab, "failed to wait m3 information request: %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "m3 info request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; } out: return ret; } static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab, u32 mode) { struct qmi_wlanfw_wlan_mode_req_msg_v01 req; struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp; struct qmi_txn txn; int ret = 0; memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); req.mode = mode; req.hw_debug_valid = 1; req.hw_debug = 0; ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_wlan_mode_resp_msg_v01_ei, &resp); if (ret < 0) goto out; ath11k_dbg(ab, ATH11K_DBG_QMI, "wlan mode req mode %d\n", mode); ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_WLAN_MODE_REQ_V01, QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req); if (ret < 0) { qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send wlan mode request (mode %d): %d\n", mode, ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { if (mode == ATH11K_FIRMWARE_MODE_OFF && ret == -ENETRESET) { ath11k_warn(ab, "WLFW service is dis-connected\n"); return 0; } ath11k_warn(ab, "failed to wait wlan mode request (mode %d): %d\n", mode, ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "wlan mode request failed (mode: %d): %d %d\n", mode, resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; } out: return ret; } static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab) { struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req; struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp; struct ce_pipe_config *ce_cfg; struct service_to_pipe *svc_cfg; struct qmi_txn txn; int ret = 0, pipe_num; ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce; svc_cfg = (struct service_to_pipe *)ab->qmi.ce_cfg.svc_to_ce_map; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; memset(&resp, 0, sizeof(resp)); req->host_version_valid = 1; strscpy(req->host_version, ATH11K_HOST_VERSION_STRING, sizeof(req->host_version)); req->tgt_cfg_valid = 1; /* This is number of CE configs */ req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len; for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) { req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum; req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir; req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries; req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max; req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags; } req->svc_cfg_valid = 1; /* This is number of Service/CE configs */ req->svc_cfg_len = ab->qmi.ce_cfg.svc_to_ce_map_len; for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) { req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id; req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir; req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum; } req->shadow_reg_valid = 0; /* set shadow v2 configuration */ if (ab->hw_params.supports_shadow_regs) { req->shadow_reg_v2_valid = 1; req->shadow_reg_v2_len = min_t(u32, ab->qmi.ce_cfg.shadow_reg_v2_len, QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01); memcpy(&req->shadow_reg_v2, ab->qmi.ce_cfg.shadow_reg_v2, sizeof(u32) * req->shadow_reg_v2_len); } else { req->shadow_reg_v2_valid = 0; } ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp); if (ret < 0) goto out; ath11k_dbg(ab, ATH11K_DBG_QMI, "wlan cfg req\n"); ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_WLAN_CFG_REQ_V01, QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req); if (ret < 0) { qmi_txn_cancel(&txn); ath11k_warn(ab, "failed to send wlan config request: %d\n", ret); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { ath11k_warn(ab, "failed to wait wlan config request: %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "wlan config request failed: %d %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; goto out; } out: kfree(req); return ret; } static int ath11k_qmi_wlanfw_wlan_ini_send(struct ath11k_base *ab, bool enable) { int ret; struct qmi_txn txn; struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {}; struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp = {}; req.enablefwlog_valid = true; req.enablefwlog = enable ? 1 : 0; ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_wlan_ini_resp_msg_v01_ei, &resp); if (ret < 0) goto out; ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, QMI_WLANFW_WLAN_INI_REQ_V01, QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN, qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req); if (ret < 0) { ath11k_warn(ab, "qmi failed to send wlan ini request, err = %d\n", ret); qmi_txn_cancel(&txn); goto out; } ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS)); if (ret < 0) { ath11k_warn(ab, "qmi failed wlan ini request, err = %d\n", ret); goto out; } if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { ath11k_warn(ab, "qmi wlan ini request failed, result: %d, err: %d\n", resp.resp.result, resp.resp.error); ret = -EINVAL; } out: return ret; } void ath11k_qmi_firmware_stop(struct ath11k_base *ab) { int ret; ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware stop\n"); ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_OFF); if (ret < 0) { ath11k_warn(ab, "qmi failed to send wlan mode off: %d\n", ret); return; } } int ath11k_qmi_firmware_start(struct ath11k_base *ab, u32 mode) { int ret; ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware start\n"); if (ab->hw_params.fw_wmi_diag_event) { ret = ath11k_qmi_wlanfw_wlan_ini_send(ab, true); if (ret < 0) { ath11k_warn(ab, "qmi failed to send wlan fw ini:%d\n", ret); return ret; } } ret = ath11k_qmi_wlanfw_wlan_cfg_send(ab); if (ret < 0) { ath11k_warn(ab, "qmi failed to send wlan cfg: %d\n", ret); return ret; } ret = ath11k_qmi_wlanfw_mode_send(ab, mode); if (ret < 0) { ath11k_warn(ab, "qmi failed to send wlan fw mode: %d\n", ret); return ret; } return 0; } int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab) { long time_left; if (!ath11k_core_coldboot_cal_support(ab) || ab->hw_params.cbcal_restart_fw == 0) return 0; ath11k_dbg(ab, ATH11K_DBG_QMI, "wait for cold boot done\n"); time_left = wait_event_timeout(ab->qmi.cold_boot_waitq, (ab->qmi.cal_done == 1), ATH11K_COLD_BOOT_FW_RESET_DELAY); if (time_left <= 0) { ath11k_warn(ab, "Coldboot Calibration timed out\n"); return -ETIMEDOUT; } /* reset the firmware */ ath11k_hif_power_down(ab, false); ath11k_hif_power_up(ab); ath11k_dbg(ab, ATH11K_DBG_QMI, "exit wait for cold boot done\n"); return 0; } EXPORT_SYMBOL(ath11k_qmi_fwreset_from_cold_boot); static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab) { long time_left; int ret; ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_COLD_BOOT); if (ret < 0) { ath11k_warn(ab, "qmi failed to send wlan fw mode: %d\n", ret); return ret; } ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration wait started\n"); time_left = wait_event_timeout(ab->qmi.cold_boot_waitq, (ab->qmi.cal_done == 1), ATH11K_COLD_BOOT_FW_RESET_DELAY); if (time_left <= 0) { ath11k_warn(ab, "coldboot calibration timed out\n"); return 0; } ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration done\n"); return 0; } static int ath11k_qmi_driver_event_post(struct ath11k_qmi *qmi, enum ath11k_qmi_event_type type, void *data) { struct ath11k_qmi_driver_event *event; event = kzalloc(sizeof(*event), GFP_ATOMIC); if (!event) return -ENOMEM; event->type = type; event->data = data; spin_lock(&qmi->event_lock); list_add_tail(&event->list, &qmi->event_list); spin_unlock(&qmi->event_lock); queue_work(qmi->event_wq, &qmi->event_work); return 0; } static int ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi) { struct ath11k_base *ab = qmi->ab; int ret; ret = ath11k_qmi_respond_fw_mem_request(ab); if (ret < 0) { ath11k_warn(ab, "qmi failed to respond fw mem req: %d\n", ret); return ret; } return ret; } static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi) { struct ath11k_base *ab = qmi->ab; int ret; ret = ath11k_qmi_request_target_cap(ab); if (ret < 0) { ath11k_warn(ab, "failed to request qmi target capabilities: %d\n", ret); return ret; } ret = ath11k_qmi_request_device_info(ab); if (ret < 0) { ath11k_warn(ab, "failed to request qmi device info: %d\n", ret); return ret; } if (ab->hw_params.supports_regdb) ath11k_qmi_load_bdf_qmi(ab, true); ret = ath11k_qmi_load_bdf_qmi(ab, false); if (ret < 0) { ath11k_warn(ab, "failed to load board data file: %d\n", ret); return ret; } return 0; } static int ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi) { struct ath11k_base *ab = qmi->ab; int ret; ret = ath11k_qmi_fw_ind_register_send(ab); if (ret < 0) { ath11k_warn(ab, "failed to send qmi firmware indication: %d\n", ret); return ret; } ret = ath11k_qmi_host_cap_send(ab); if (ret < 0) { ath11k_warn(ab, "failed to send qmi host cap: %d\n", ret); return ret; } if (!ab->hw_params.fixed_fw_mem) return ret; ret = ath11k_qmi_event_load_bdf(qmi); if (ret < 0) { ath11k_warn(ab, "qmi failed to download BDF:%d\n", ret); return ret; } return ret; } static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *data) { struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); struct ath11k_base *ab = qmi->ab; const struct qmi_wlanfw_request_mem_ind_msg_v01 *msg = data; int i, ret; ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware request memory request\n"); if (msg->mem_seg_len == 0 || msg->mem_seg_len > ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01) ath11k_warn(ab, "invalid memory segment length: %u\n", msg->mem_seg_len); ab->qmi.mem_seg_count = msg->mem_seg_len; for (i = 0; i < qmi->mem_seg_count ; i++) { ab->qmi.target_mem[i].type = msg->mem_seg[i].type; ab->qmi.target_mem[i].size = msg->mem_seg[i].size; ath11k_dbg(ab, ATH11K_DBG_QMI, "mem seg type %d size %d\n", msg->mem_seg[i].type, msg->mem_seg[i].size); } if (ab->hw_params.fixed_mem_region || test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) { ret = ath11k_qmi_assign_target_mem_chunk(ab); if (ret) { ath11k_warn(ab, "failed to assign qmi target memory: %d\n", ret); return; } } else { ret = ath11k_qmi_alloc_target_mem_chunk(ab); if (ret) { ath11k_warn(ab, "failed to allocate qmi target memory: %d\n", ret); return; } } ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_REQUEST_MEM, NULL); } static void ath11k_qmi_msg_mem_ready_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded) { struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); struct ath11k_base *ab = qmi->ab; ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware memory ready indication\n"); ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_MEM_READY, NULL); } static void ath11k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded) { struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); struct ath11k_base *ab = qmi->ab; ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware ready\n"); if (!ab->qmi.cal_done) { ab->qmi.cal_done = 1; wake_up(&ab->qmi.cold_boot_waitq); } ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_READY, NULL); } static void ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded) { struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); struct ath11k_base *ab = qmi->ab; ab->qmi.cal_done = 1; wake_up(&ab->qmi.cold_boot_waitq); ath11k_dbg(ab, ATH11K_DBG_QMI, "cold boot calibration done\n"); } static void ath11k_qmi_msg_fw_init_done_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded) { struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); struct ath11k_base *ab = qmi->ab; ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_INIT_DONE, NULL); ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware init done\n"); } static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = { { .type = QMI_INDICATION, .msg_id = QMI_WLFW_REQUEST_MEM_IND_V01, .ei = qmi_wlanfw_request_mem_ind_msg_v01_ei, .decoded_size = sizeof(struct qmi_wlanfw_request_mem_ind_msg_v01), .fn = ath11k_qmi_msg_mem_request_cb, }, { .type = QMI_INDICATION, .msg_id = QMI_WLFW_FW_MEM_READY_IND_V01, .ei = qmi_wlanfw_mem_ready_ind_msg_v01_ei, .decoded_size = sizeof(struct qmi_wlanfw_fw_mem_ready_ind_msg_v01), .fn = ath11k_qmi_msg_mem_ready_cb, }, { .type = QMI_INDICATION, .msg_id = QMI_WLFW_FW_READY_IND_V01, .ei = qmi_wlanfw_fw_ready_ind_msg_v01_ei, .decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01), .fn = ath11k_qmi_msg_fw_ready_cb, }, { .type = QMI_INDICATION, .msg_id = QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01, .ei = qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei, .decoded_size = sizeof(struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01), .fn = ath11k_qmi_msg_cold_boot_cal_done_cb, }, { .type = QMI_INDICATION, .msg_id = QMI_WLFW_FW_INIT_DONE_IND_V01, .ei = qmi_wlfw_fw_init_done_ind_msg_v01_ei, .decoded_size = sizeof(struct qmi_wlfw_fw_init_done_ind_msg_v01), .fn = ath11k_qmi_msg_fw_init_done_cb, }, /* end of list */ {}, }; static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl, struct qmi_service *service) { struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); struct ath11k_base *ab = qmi->ab; struct sockaddr_qrtr *sq = &qmi->sq; int ret; sq->sq_family = AF_QIPCRTR; sq->sq_node = service->node; sq->sq_port = service->port; ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq, sizeof(*sq), 0); if (ret) { ath11k_warn(ab, "failed to connect to qmi remote service: %d\n", ret); return ret; } ath11k_dbg(ab, ATH11K_DBG_QMI, "wifi fw qmi service connected\n"); ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_ARRIVE, NULL); return ret; } static void ath11k_qmi_ops_del_server(struct qmi_handle *qmi_hdl, struct qmi_service *service) { struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); struct ath11k_base *ab = qmi->ab; ath11k_dbg(ab, ATH11K_DBG_QMI, "wifi fw del server\n"); ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_EXIT, NULL); } static const struct qmi_ops ath11k_qmi_ops = { .new_server = ath11k_qmi_ops_new_server, .del_server = ath11k_qmi_ops_del_server, }; static void ath11k_qmi_driver_event_work(struct work_struct *work) { struct ath11k_qmi *qmi = container_of(work, struct ath11k_qmi, event_work); struct ath11k_qmi_driver_event *event; struct ath11k_base *ab = qmi->ab; int ret; spin_lock(&qmi->event_lock); while (!list_empty(&qmi->event_list)) { event = list_first_entry(&qmi->event_list, struct ath11k_qmi_driver_event, list); list_del(&event->list); spin_unlock(&qmi->event_lock); if (test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)) { kfree(event); return; } switch (event->type) { case ATH11K_QMI_EVENT_SERVER_ARRIVE: ret = ath11k_qmi_event_server_arrive(qmi); if (ret < 0) set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); break; case ATH11K_QMI_EVENT_SERVER_EXIT: set_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags); set_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags); if (!ab->is_reset) ath11k_core_pre_reconfigure_recovery(ab); break; case ATH11K_QMI_EVENT_REQUEST_MEM: ret = ath11k_qmi_event_mem_request(qmi); if (ret < 0) set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); break; case ATH11K_QMI_EVENT_FW_MEM_READY: ret = ath11k_qmi_event_load_bdf(qmi); if (ret < 0) { set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); break; } ret = ath11k_qmi_wlanfw_m3_info_send(ab); if (ret < 0) { ath11k_warn(ab, "failed to send qmi m3 info req: %d\n", ret); set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); } break; case ATH11K_QMI_EVENT_FW_INIT_DONE: clear_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) { if (ab->is_reset) ath11k_hal_dump_srng_stats(ab); queue_work(ab->workqueue, &ab->restart_work); break; } if (ab->qmi.cal_done == 0 && ath11k_core_coldboot_cal_support(ab)) { ath11k_qmi_process_coldboot_calibration(ab); } else { clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags); clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags); ret = ath11k_core_qmi_firmware_ready(ab); if (ret) { set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); break; } set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags); } break; case ATH11K_QMI_EVENT_FW_READY: /* For targets requiring a FW restart upon cold * boot completion, there is no need to process * FW ready; such targets will receive FW init * done message after FW restart. */ if (ab->hw_params.cbcal_restart_fw) break; clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags); clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags); ath11k_core_qmi_firmware_ready(ab); set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags); break; case ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE: break; default: ath11k_warn(ab, "invalid qmi event type: %d", event->type); break; } kfree(event); spin_lock(&qmi->event_lock); } spin_unlock(&qmi->event_lock); } int ath11k_qmi_init_service(struct ath11k_base *ab) { int ret; memset(&ab->qmi.target, 0, sizeof(struct target_info)); memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk)); ab->qmi.ab = ab; ab->qmi.target_mem_mode = ab->hw_params.fw_mem_mode; ret = qmi_handle_init(&ab->qmi.handle, ATH11K_QMI_RESP_LEN_MAX, &ath11k_qmi_ops, ath11k_qmi_msg_handlers); if (ret < 0) { ath11k_warn(ab, "failed to initialize qmi handle: %d\n", ret); return ret; } ab->qmi.event_wq = alloc_ordered_workqueue("ath11k_qmi_driver_event", 0); if (!ab->qmi.event_wq) { ath11k_err(ab, "failed to allocate workqueue\n"); return -EFAULT; } INIT_LIST_HEAD(&ab->qmi.event_list); spin_lock_init(&ab->qmi.event_lock); INIT_WORK(&ab->qmi.event_work, ath11k_qmi_driver_event_work); ret = qmi_add_lookup(&ab->qmi.handle, ATH11K_QMI_WLFW_SERVICE_ID_V01, ATH11K_QMI_WLFW_SERVICE_VERS_V01, ab->qmi.service_ins_id); if (ret < 0) { ath11k_warn(ab, "failed to add qmi lookup: %d\n", ret); destroy_workqueue(ab->qmi.event_wq); return ret; } return ret; } void ath11k_qmi_deinit_service(struct ath11k_base *ab) { qmi_handle_release(&ab->qmi.handle); cancel_work_sync(&ab->qmi.event_work); destroy_workqueue(ab->qmi.event_wq); ath11k_qmi_m3_free(ab); ath11k_qmi_free_target_mem_chunk(ab); } EXPORT_SYMBOL(ath11k_qmi_deinit_service); void ath11k_qmi_free_resource(struct ath11k_base *ab) { ath11k_qmi_free_target_mem_chunk(ab); ath11k_qmi_m3_free(ab); } diff --git a/spectral.c b/spectral.c index 79e091134515..b6b0516819a6 100644 --- a/spectral.c +++ b/spectral.c @@ -1,1055 +1,1056 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include #include "core.h" #include "debug.h" #define ATH11K_SPECTRAL_NUM_RESP_PER_EVENT 2 #define ATH11K_SPECTRAL_EVENT_TIMEOUT_MS 1 #define ATH11K_SPECTRAL_DWORD_SIZE 4 #define ATH11K_SPECTRAL_MIN_BINS 32 #define ATH11K_SPECTRAL_MIN_IB_BINS (ATH11K_SPECTRAL_MIN_BINS >> 1) #define ATH11K_SPECTRAL_MAX_IB_BINS(x) ((x)->hw_params.spectral.max_fft_bins >> 1) #define ATH11K_SPECTRAL_SCAN_COUNT_MAX 4095 /* Max channel computed by sum of 2g and 5g band channels */ #define ATH11K_SPECTRAL_TOTAL_CHANNEL 41 #define ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL 70 #define ATH11K_SPECTRAL_PER_SAMPLE_SIZE(x) (sizeof(struct fft_sample_ath11k) + \ ATH11K_SPECTRAL_MAX_IB_BINS(x)) #define ATH11K_SPECTRAL_TOTAL_SAMPLE (ATH11K_SPECTRAL_TOTAL_CHANNEL * \ ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL) #define ATH11K_SPECTRAL_SUB_BUFF_SIZE(x) ATH11K_SPECTRAL_PER_SAMPLE_SIZE(x) #define ATH11K_SPECTRAL_NUM_SUB_BUF ATH11K_SPECTRAL_TOTAL_SAMPLE #define ATH11K_SPECTRAL_20MHZ 20 #define ATH11K_SPECTRAL_40MHZ 40 #define ATH11K_SPECTRAL_80MHZ 80 #define ATH11K_SPECTRAL_160MHZ 160 #define ATH11K_SPECTRAL_SIGNATURE 0xFA #define ATH11K_SPECTRAL_TAG_RADAR_SUMMARY 0x0 #define ATH11K_SPECTRAL_TAG_RADAR_FFT 0x1 #define ATH11K_SPECTRAL_TAG_SCAN_SUMMARY 0x2 #define ATH11K_SPECTRAL_TAG_SCAN_SEARCH 0x3 #define SPECTRAL_TLV_HDR_LEN GENMASK(15, 0) #define SPECTRAL_TLV_HDR_TAG GENMASK(23, 16) #define SPECTRAL_TLV_HDR_SIGN GENMASK(31, 24) #define SPECTRAL_SUMMARY_INFO0_AGC_TOTAL_GAIN GENMASK(7, 0) #define SPECTRAL_SUMMARY_INFO0_OB_FLAG BIT(8) #define SPECTRAL_SUMMARY_INFO0_GRP_IDX GENMASK(16, 9) #define SPECTRAL_SUMMARY_INFO0_RECENT_RFSAT BIT(17) #define SPECTRAL_SUMMARY_INFO0_INBAND_PWR_DB GENMASK(27, 18) #define SPECTRAL_SUMMARY_INFO0_FALSE_SCAN BIT(28) #define SPECTRAL_SUMMARY_INFO0_DETECTOR_ID GENMASK(30, 29) #define SPECTRAL_SUMMARY_INFO0_PRI80 BIT(31) #define SPECTRAL_SUMMARY_INFO2_PEAK_SIGNED_IDX GENMASK(11, 0) #define SPECTRAL_SUMMARY_INFO2_PEAK_MAGNITUDE GENMASK(21, 12) #define SPECTRAL_SUMMARY_INFO2_NARROWBAND_MASK GENMASK(29, 22) #define SPECTRAL_SUMMARY_INFO2_GAIN_CHANGE BIT(30) struct spectral_tlv { __le32 timestamp; __le32 header; } __packed; struct spectral_summary_fft_report { __le32 timestamp; __le32 tlv_header; __le32 info0; __le32 reserve0; __le32 info2; __le32 reserve1; } __packed; struct ath11k_spectral_summary_report { struct wmi_dma_buf_release_meta_data meta; u32 timestamp; u8 agc_total_gain; u8 grp_idx; u16 inb_pwr_db; s16 peak_idx; u16 peak_mag; u8 detector_id; bool out_of_band_flag; bool rf_saturation; bool primary80; bool gain_change; bool false_scan; }; #define SPECTRAL_FFT_REPORT_INFO0_DETECTOR_ID GENMASK(1, 0) #define SPECTRAL_FFT_REPORT_INFO0_FFT_NUM GENMASK(4, 2) #define SPECTRAL_FFT_REPORT_INFO0_RADAR_CHECK GENMASK(16, 5) #define SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX GENMASK(27, 17) #define SPECTRAL_FFT_REPORT_INFO0_CHAIN_IDX GENMASK(30, 28) #define SPECTRAL_FFT_REPORT_INFO1_BASE_PWR_DB GENMASK(8, 0) #define SPECTRAL_FFT_REPORT_INFO1_TOTAL_GAIN_DB GENMASK(16, 9) #define SPECTRAL_FFT_REPORT_INFO2_NUM_STRONG_BINS GENMASK(7, 0) #define SPECTRAL_FFT_REPORT_INFO2_PEAK_MAGNITUDE GENMASK(17, 8) #define SPECTRAL_FFT_REPORT_INFO2_AVG_PWR_DB GENMASK(24, 18) #define SPECTRAL_FFT_REPORT_INFO2_REL_PWR_DB GENMASK(31, 25) struct spectral_search_fft_report { __le32 timestamp; __le32 tlv_header; __le32 info0; __le32 info1; __le32 info2; __le32 reserve0; u8 bins[]; } __packed; struct ath11k_spectral_search_report { u32 timestamp; u8 detector_id; u8 fft_count; u16 radar_check; s16 peak_idx; u8 chain_idx; u16 base_pwr_db; u8 total_gain_db; u8 strong_bin_count; u16 peak_mag; u8 avg_pwr_db; u8 rel_pwr_db; }; static struct dentry *create_buf_file_handler(const char *filename, struct dentry *parent, umode_t mode, struct rchan_buf *buf, int *is_global) { struct dentry *buf_file; buf_file = debugfs_create_file(filename, mode, parent, buf, &relay_file_operations); *is_global = 1; return buf_file; } static int remove_buf_file_handler(struct dentry *dentry) { debugfs_remove(dentry); return 0; } static const struct rchan_callbacks rfs_scan_cb = { .create_buf_file = create_buf_file_handler, .remove_buf_file = remove_buf_file_handler, }; static struct ath11k_vif *ath11k_spectral_get_vdev(struct ath11k *ar) { struct ath11k_vif *arvif; lockdep_assert_held(&ar->conf_mutex); if (list_empty(&ar->arvifs)) return NULL; /* if there already is a vif doing spectral, return that. */ list_for_each_entry(arvif, &ar->arvifs, list) if (arvif->spectral_enabled) return arvif; /* otherwise, return the first vif. */ return list_first_entry(&ar->arvifs, typeof(*arvif), list); } static int ath11k_spectral_scan_trigger(struct ath11k *ar) { struct ath11k_vif *arvif; int ret; lockdep_assert_held(&ar->conf_mutex); arvif = ath11k_spectral_get_vdev(ar); if (!arvif) return -ENODEV; if (ar->spectral.mode == ATH11K_SPECTRAL_DISABLED) return 0; ar->spectral.is_primary = true; ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id, ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR, ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE); if (ret) return ret; ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id, ATH11K_WMI_SPECTRAL_TRIGGER_CMD_TRIGGER, ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE); if (ret) return ret; return 0; } static int ath11k_spectral_scan_config(struct ath11k *ar, enum ath11k_spectral_mode mode) { - struct ath11k_wmi_vdev_spectral_conf_param param = { 0 }; + struct ath11k_wmi_vdev_spectral_conf_param param = {}; struct ath11k_vif *arvif; int ret, count; lockdep_assert_held(&ar->conf_mutex); arvif = ath11k_spectral_get_vdev(ar); if (!arvif) return -ENODEV; arvif->spectral_enabled = (mode != ATH11K_SPECTRAL_DISABLED); spin_lock_bh(&ar->spectral.lock); ar->spectral.mode = mode; spin_unlock_bh(&ar->spectral.lock); ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id, ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR, ATH11K_WMI_SPECTRAL_ENABLE_CMD_DISABLE); if (ret) { ath11k_warn(ar->ab, "failed to enable spectral scan: %d\n", ret); return ret; } if (mode == ATH11K_SPECTRAL_DISABLED) return 0; if (mode == ATH11K_SPECTRAL_BACKGROUND) count = ATH11K_WMI_SPECTRAL_COUNT_DEFAULT; else count = max_t(u16, 1, ar->spectral.count); param.vdev_id = arvif->vdev_id; param.scan_count = count; param.scan_fft_size = ar->spectral.fft_size; param.scan_period = ATH11K_WMI_SPECTRAL_PERIOD_DEFAULT; param.scan_priority = ATH11K_WMI_SPECTRAL_PRIORITY_DEFAULT; param.scan_gc_ena = ATH11K_WMI_SPECTRAL_GC_ENA_DEFAULT; param.scan_restart_ena = ATH11K_WMI_SPECTRAL_RESTART_ENA_DEFAULT; param.scan_noise_floor_ref = ATH11K_WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT; param.scan_init_delay = ATH11K_WMI_SPECTRAL_INIT_DELAY_DEFAULT; param.scan_nb_tone_thr = ATH11K_WMI_SPECTRAL_NB_TONE_THR_DEFAULT; param.scan_str_bin_thr = ATH11K_WMI_SPECTRAL_STR_BIN_THR_DEFAULT; param.scan_wb_rpt_mode = ATH11K_WMI_SPECTRAL_WB_RPT_MODE_DEFAULT; param.scan_rssi_rpt_mode = ATH11K_WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT; param.scan_rssi_thr = ATH11K_WMI_SPECTRAL_RSSI_THR_DEFAULT; param.scan_pwr_format = ATH11K_WMI_SPECTRAL_PWR_FORMAT_DEFAULT; param.scan_rpt_mode = ATH11K_WMI_SPECTRAL_RPT_MODE_DEFAULT; param.scan_bin_scale = ATH11K_WMI_SPECTRAL_BIN_SCALE_DEFAULT; param.scan_dbm_adj = ATH11K_WMI_SPECTRAL_DBM_ADJ_DEFAULT; param.scan_chn_mask = ATH11K_WMI_SPECTRAL_CHN_MASK_DEFAULT; ret = ath11k_wmi_vdev_spectral_conf(ar, ¶m); if (ret) { ath11k_warn(ar->ab, "failed to configure spectral scan: %d\n", ret); return ret; } return 0; } static ssize_t ath11k_read_file_spec_scan_ctl(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; char *mode = ""; size_t len; enum ath11k_spectral_mode spectral_mode; mutex_lock(&ar->conf_mutex); spectral_mode = ar->spectral.mode; mutex_unlock(&ar->conf_mutex); switch (spectral_mode) { case ATH11K_SPECTRAL_DISABLED: mode = "disable"; break; case ATH11K_SPECTRAL_BACKGROUND: mode = "background"; break; case ATH11K_SPECTRAL_MANUAL: mode = "manual"; break; } len = strlen(mode); return simple_read_from_buffer(user_buf, count, ppos, mode, len); } static ssize_t ath11k_write_file_spec_scan_ctl(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; char buf[32]; ssize_t len; int ret; len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) return -EFAULT; buf[len] = '\0'; mutex_lock(&ar->conf_mutex); if (strncmp("trigger", buf, 7) == 0) { if (ar->spectral.mode == ATH11K_SPECTRAL_MANUAL || ar->spectral.mode == ATH11K_SPECTRAL_BACKGROUND) { /* reset the configuration to adopt possibly changed * debugfs parameters */ ret = ath11k_spectral_scan_config(ar, ar->spectral.mode); if (ret) { ath11k_warn(ar->ab, "failed to reconfigure spectral scan: %d\n", ret); goto unlock; } ret = ath11k_spectral_scan_trigger(ar); if (ret) { ath11k_warn(ar->ab, "failed to trigger spectral scan: %d\n", ret); } } else { ret = -EINVAL; } } else if (strncmp("background", buf, 10) == 0) { ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_BACKGROUND); } else if (strncmp("manual", buf, 6) == 0) { ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_MANUAL); } else if (strncmp("disable", buf, 7) == 0) { ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED); } else { ret = -EINVAL; } unlock: mutex_unlock(&ar->conf_mutex); if (ret) return ret; return count; } static const struct file_operations fops_scan_ctl = { .read = ath11k_read_file_spec_scan_ctl, .write = ath11k_write_file_spec_scan_ctl, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_read_file_spectral_count(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; char buf[32]; size_t len; u16 spectral_count; mutex_lock(&ar->conf_mutex); spectral_count = ar->spectral.count; mutex_unlock(&ar->conf_mutex); len = sprintf(buf, "%d\n", spectral_count); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath11k_write_file_spectral_count(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; unsigned long val; ssize_t ret; ret = kstrtoul_from_user(user_buf, count, 0, &val); if (ret) return ret; if (val > ATH11K_SPECTRAL_SCAN_COUNT_MAX) return -EINVAL; mutex_lock(&ar->conf_mutex); ar->spectral.count = val; mutex_unlock(&ar->conf_mutex); return count; } static const struct file_operations fops_scan_count = { .read = ath11k_read_file_spectral_count, .write = ath11k_write_file_spectral_count, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static ssize_t ath11k_read_file_spectral_bins(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; char buf[32]; unsigned int bins, fft_size; size_t len; mutex_lock(&ar->conf_mutex); fft_size = ar->spectral.fft_size; bins = 1 << fft_size; mutex_unlock(&ar->conf_mutex); len = sprintf(buf, "%d\n", bins); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t ath11k_write_file_spectral_bins(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k *ar = file->private_data; unsigned long val; ssize_t ret; ret = kstrtoul_from_user(user_buf, count, 0, &val); if (ret) return ret; if (val < ATH11K_SPECTRAL_MIN_BINS || val > ar->ab->hw_params.spectral.max_fft_bins) return -EINVAL; if (!is_power_of_2(val)) return -EINVAL; mutex_lock(&ar->conf_mutex); ar->spectral.fft_size = ilog2(val); mutex_unlock(&ar->conf_mutex); return count; } static const struct file_operations fops_scan_bins = { .read = ath11k_read_file_spectral_bins, .write = ath11k_write_file_spectral_bins, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, }; static int ath11k_spectral_pull_summary(struct ath11k *ar, struct wmi_dma_buf_release_meta_data *meta, struct spectral_summary_fft_report *summary, struct ath11k_spectral_summary_report *report) { report->timestamp = __le32_to_cpu(summary->timestamp); report->agc_total_gain = FIELD_GET(SPECTRAL_SUMMARY_INFO0_AGC_TOTAL_GAIN, __le32_to_cpu(summary->info0)); report->out_of_band_flag = FIELD_GET(SPECTRAL_SUMMARY_INFO0_OB_FLAG, __le32_to_cpu(summary->info0)); report->grp_idx = FIELD_GET(SPECTRAL_SUMMARY_INFO0_GRP_IDX, __le32_to_cpu(summary->info0)); report->rf_saturation = FIELD_GET(SPECTRAL_SUMMARY_INFO0_RECENT_RFSAT, __le32_to_cpu(summary->info0)); report->inb_pwr_db = FIELD_GET(SPECTRAL_SUMMARY_INFO0_INBAND_PWR_DB, __le32_to_cpu(summary->info0)); report->false_scan = FIELD_GET(SPECTRAL_SUMMARY_INFO0_FALSE_SCAN, __le32_to_cpu(summary->info0)); report->detector_id = FIELD_GET(SPECTRAL_SUMMARY_INFO0_DETECTOR_ID, __le32_to_cpu(summary->info0)); report->primary80 = FIELD_GET(SPECTRAL_SUMMARY_INFO0_PRI80, __le32_to_cpu(summary->info0)); report->peak_idx = FIELD_GET(SPECTRAL_SUMMARY_INFO2_PEAK_SIGNED_IDX, __le32_to_cpu(summary->info2)); report->peak_mag = FIELD_GET(SPECTRAL_SUMMARY_INFO2_PEAK_MAGNITUDE, __le32_to_cpu(summary->info2)); report->gain_change = FIELD_GET(SPECTRAL_SUMMARY_INFO2_GAIN_CHANGE, __le32_to_cpu(summary->info2)); memcpy(&report->meta, meta, sizeof(*meta)); return 0; } static int ath11k_spectral_pull_search(struct ath11k *ar, struct spectral_search_fft_report *search, struct ath11k_spectral_search_report *report) { report->timestamp = __le32_to_cpu(search->timestamp); report->detector_id = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_DETECTOR_ID, __le32_to_cpu(search->info0)); report->fft_count = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_FFT_NUM, __le32_to_cpu(search->info0)); report->radar_check = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_RADAR_CHECK, __le32_to_cpu(search->info0)); report->peak_idx = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX, __le32_to_cpu(search->info0)); report->chain_idx = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_CHAIN_IDX, __le32_to_cpu(search->info0)); report->base_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO1_BASE_PWR_DB, __le32_to_cpu(search->info1)); report->total_gain_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO1_TOTAL_GAIN_DB, __le32_to_cpu(search->info1)); report->strong_bin_count = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_NUM_STRONG_BINS, __le32_to_cpu(search->info2)); report->peak_mag = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_PEAK_MAGNITUDE, __le32_to_cpu(search->info2)); report->avg_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_AVG_PWR_DB, __le32_to_cpu(search->info2)); report->rel_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_REL_PWR_DB, __le32_to_cpu(search->info2)); return 0; } static u8 ath11k_spectral_get_max_exp(s8 max_index, u8 max_magnitude, int bin_len, u8 *bins) { int dc_pos; u8 max_exp; dc_pos = bin_len / 2; /* peak index outside of bins */ if (dc_pos <= max_index || -dc_pos >= max_index) return 0; for (max_exp = 0; max_exp < 8; max_exp++) { if (bins[dc_pos + max_index] == (max_magnitude >> max_exp)) break; } /* max_exp not found */ if (bins[dc_pos + max_index] != (max_magnitude >> max_exp)) return 0; return max_exp; } static void ath11k_spectral_parse_fft(u8 *outbins, u8 *inbins, int num_bins, u8 fft_sz) { int i, j; i = 0; j = 0; while (i < num_bins) { outbins[i] = inbins[j]; i++; j += fft_sz; } } static int ath11k_spectral_process_fft(struct ath11k *ar, struct ath11k_spectral_summary_report *summary, void *data, struct fft_sample_ath11k *fft_sample, u32 data_len) { struct ath11k_base *ab = ar->ab; struct spectral_search_fft_report *fft_report = data; struct ath11k_spectral_search_report search; struct spectral_tlv *tlv; int tlv_len, bin_len, num_bins; u16 length, freq; u8 chan_width_mhz, bin_sz; int ret; u32 check_length; bool fragment_sample = false; lockdep_assert_held(&ar->spectral.lock); if (!ab->hw_params.spectral.fft_sz) { ath11k_warn(ab, "invalid bin size type for hw rev %d\n", ab->hw_rev); return -EINVAL; } tlv = data; tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, __le32_to_cpu(tlv->header)); /* convert Dword into bytes */ tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE; bin_len = tlv_len - ab->hw_params.spectral.fft_hdr_len; if (data_len < (bin_len + sizeof(*fft_report))) { ath11k_warn(ab, "mismatch in expected bin len %d and data len %d\n", bin_len, data_len); return -EINVAL; } bin_sz = ab->hw_params.spectral.fft_sz + ab->hw_params.spectral.fft_pad_sz; num_bins = bin_len / bin_sz; /* Only In-band bins are useful to user for visualize */ num_bins >>= 1; if (num_bins < ATH11K_SPECTRAL_MIN_IB_BINS || num_bins > ATH11K_SPECTRAL_MAX_IB_BINS(ab) || !is_power_of_2(num_bins)) { ath11k_warn(ab, "Invalid num of bins %d\n", num_bins); return -EINVAL; } check_length = sizeof(*fft_report) + (num_bins * ab->hw_params.spectral.fft_sz); ret = ath11k_dbring_validate_buffer(ar, data, check_length); if (ret) { ath11k_warn(ar->ab, "found magic value in fft data, dropping\n"); return ret; } ret = ath11k_spectral_pull_search(ar, data, &search); if (ret) { ath11k_warn(ab, "failed to pull search report %d\n", ret); return ret; } chan_width_mhz = summary->meta.ch_width; switch (chan_width_mhz) { case ATH11K_SPECTRAL_20MHZ: case ATH11K_SPECTRAL_40MHZ: case ATH11K_SPECTRAL_80MHZ: fft_sample->chan_width_mhz = chan_width_mhz; break; case ATH11K_SPECTRAL_160MHZ: if (ab->hw_params.spectral.fragment_160mhz) { chan_width_mhz /= 2; fragment_sample = true; } fft_sample->chan_width_mhz = chan_width_mhz; break; default: ath11k_warn(ab, "invalid channel width %d\n", chan_width_mhz); return -EINVAL; } length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + num_bins; fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH11K; fft_sample->tlv.length = __cpu_to_be16(length); fft_sample->tsf = __cpu_to_be32(search.timestamp); fft_sample->max_magnitude = __cpu_to_be16(search.peak_mag); fft_sample->max_index = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX, __le32_to_cpu(fft_report->info0)); summary->inb_pwr_db >>= 1; fft_sample->rssi = __cpu_to_be16(summary->inb_pwr_db); fft_sample->noise = __cpu_to_be32(summary->meta.noise_floor[search.chain_idx]); freq = summary->meta.freq1; fft_sample->freq1 = __cpu_to_be16(freq); freq = summary->meta.freq2; fft_sample->freq2 = __cpu_to_be16(freq); /* If freq2 is available then the spectral scan results are fragmented * as primary and secondary */ if (fragment_sample && freq) { if (!ar->spectral.is_primary) fft_sample->freq1 = cpu_to_be16(freq); /* We have to toggle the is_primary to handle the next report */ ar->spectral.is_primary = !ar->spectral.is_primary; } ath11k_spectral_parse_fft(fft_sample->data, fft_report->bins, num_bins, ab->hw_params.spectral.fft_sz); fft_sample->max_exp = ath11k_spectral_get_max_exp(fft_sample->max_index, search.peak_mag, num_bins, fft_sample->data); if (ar->spectral.rfs_scan) relay_write(ar->spectral.rfs_scan, fft_sample, length + sizeof(struct fft_sample_tlv)); return 0; } static int ath11k_spectral_process_data(struct ath11k *ar, struct ath11k_dbring_data *param) { struct ath11k_base *ab = ar->ab; struct spectral_tlv *tlv; struct spectral_summary_fft_report *summary = NULL; struct ath11k_spectral_summary_report summ_rpt; struct fft_sample_ath11k *fft_sample = NULL; u8 *data; u32 data_len, i; u8 sign, tag; int tlv_len, sample_sz; int ret; bool quit = false; spin_lock_bh(&ar->spectral.lock); if (!ar->spectral.enabled) { ret = -EINVAL; goto unlock; } sample_sz = sizeof(*fft_sample) + ATH11K_SPECTRAL_MAX_IB_BINS(ab); fft_sample = kmalloc(sample_sz, GFP_ATOMIC); if (!fft_sample) { ret = -ENOBUFS; goto unlock; } data = param->data; data_len = param->data_sz; i = 0; while (!quit && (i < data_len)) { if ((i + sizeof(*tlv)) > data_len) { ath11k_warn(ab, "failed to parse spectral tlv hdr at bytes %d\n", i); ret = -EINVAL; goto err; } tlv = (struct spectral_tlv *)&data[i]; sign = FIELD_GET(SPECTRAL_TLV_HDR_SIGN, __le32_to_cpu(tlv->header)); if (sign != ATH11K_SPECTRAL_SIGNATURE) { ath11k_warn(ab, "Invalid sign 0x%x at bytes %d\n", sign, i); ret = -EINVAL; goto err; } tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, __le32_to_cpu(tlv->header)); /* convert Dword into bytes */ tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE; if ((i + sizeof(*tlv) + tlv_len) > data_len) { ath11k_warn(ab, "failed to parse spectral tlv payload at bytes %d tlv_len:%d data_len:%d\n", i, tlv_len, data_len); ret = -EINVAL; goto err; } tag = FIELD_GET(SPECTRAL_TLV_HDR_TAG, __le32_to_cpu(tlv->header)); switch (tag) { case ATH11K_SPECTRAL_TAG_SCAN_SUMMARY: /* HW bug in tlv length of summary report, * HW report 3 DWORD size but the data payload * is 4 DWORD size (16 bytes). * Need to remove this workaround once HW bug fixed */ tlv_len = sizeof(*summary) - sizeof(*tlv) + ab->hw_params.spectral.summary_pad_sz; if (tlv_len < (sizeof(*summary) - sizeof(*tlv))) { ath11k_warn(ab, "failed to parse spectral summary at bytes %d tlv_len:%d\n", i, tlv_len); ret = -EINVAL; goto err; } ret = ath11k_dbring_validate_buffer(ar, data, tlv_len); if (ret) { ath11k_warn(ar->ab, "found magic value in spectral summary, dropping\n"); goto err; } summary = (struct spectral_summary_fft_report *)tlv; ath11k_spectral_pull_summary(ar, ¶m->meta, summary, &summ_rpt); break; case ATH11K_SPECTRAL_TAG_SCAN_SEARCH: if (tlv_len < (sizeof(struct spectral_search_fft_report) - sizeof(*tlv))) { ath11k_warn(ab, "failed to parse spectral search fft at bytes %d\n", i); ret = -EINVAL; goto err; } memset(fft_sample, 0, sample_sz); ret = ath11k_spectral_process_fft(ar, &summ_rpt, tlv, fft_sample, data_len - i); if (ret) { ath11k_warn(ab, "failed to process spectral fft at bytes %d\n", i); goto err; } quit = true; break; } i += sizeof(*tlv) + tlv_len; } ret = 0; err: kfree(fft_sample); unlock: spin_unlock_bh(&ar->spectral.lock); return ret; } static int ath11k_spectral_ring_alloc(struct ath11k *ar, struct ath11k_dbring_cap *db_cap) { struct ath11k_spectral *sp = &ar->spectral; int ret; ret = ath11k_dbring_srng_setup(ar, &sp->rx_ring, 0, db_cap->min_elem); if (ret) { ath11k_warn(ar->ab, "failed to setup db ring\n"); return ret; } ath11k_dbring_set_cfg(ar, &sp->rx_ring, ATH11K_SPECTRAL_NUM_RESP_PER_EVENT, ATH11K_SPECTRAL_EVENT_TIMEOUT_MS, ath11k_spectral_process_data); ret = ath11k_dbring_buf_setup(ar, &sp->rx_ring, db_cap); if (ret) { ath11k_warn(ar->ab, "failed to setup db ring buffer\n"); goto srng_cleanup; } ret = ath11k_dbring_wmi_cfg_setup(ar, &sp->rx_ring, WMI_DIRECT_BUF_SPECTRAL); if (ret) { ath11k_warn(ar->ab, "failed to setup db ring cfg\n"); goto buffer_cleanup; } return 0; buffer_cleanup: ath11k_dbring_buf_cleanup(ar, &sp->rx_ring); srng_cleanup: ath11k_dbring_srng_cleanup(ar, &sp->rx_ring); return ret; } static inline void ath11k_spectral_ring_free(struct ath11k *ar) { struct ath11k_spectral *sp = &ar->spectral; ath11k_dbring_srng_cleanup(ar, &sp->rx_ring); ath11k_dbring_buf_cleanup(ar, &sp->rx_ring); } static inline void ath11k_spectral_debug_unregister(struct ath11k *ar) { debugfs_remove(ar->spectral.scan_bins); ar->spectral.scan_bins = NULL; debugfs_remove(ar->spectral.scan_count); ar->spectral.scan_count = NULL; debugfs_remove(ar->spectral.scan_ctl); ar->spectral.scan_ctl = NULL; if (ar->spectral.rfs_scan) { relay_close(ar->spectral.rfs_scan); ar->spectral.rfs_scan = NULL; } } int ath11k_spectral_vif_stop(struct ath11k_vif *arvif) { if (!arvif->spectral_enabled) return 0; return ath11k_spectral_scan_config(arvif->ar, ATH11K_SPECTRAL_DISABLED); } void ath11k_spectral_reset_buffer(struct ath11k *ar) { if (!ar->spectral.enabled) return; if (ar->spectral.rfs_scan) relay_reset(ar->spectral.rfs_scan); } void ath11k_spectral_deinit(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_spectral *sp; int i; for (i = 0; i < ab->num_radios; i++) { ar = ab->pdevs[i].ar; sp = &ar->spectral; if (!sp->enabled) continue; mutex_lock(&ar->conf_mutex); ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED); mutex_unlock(&ar->conf_mutex); spin_lock_bh(&sp->lock); sp->enabled = false; spin_unlock_bh(&sp->lock); ath11k_spectral_debug_unregister(ar); ath11k_spectral_ring_free(ar); } } static inline int ath11k_spectral_debug_register(struct ath11k *ar) { int ret; ar->spectral.rfs_scan = relay_open("spectral_scan", ar->debug.debugfs_pdev, ATH11K_SPECTRAL_SUB_BUFF_SIZE(ar->ab), ATH11K_SPECTRAL_NUM_SUB_BUF, &rfs_scan_cb, NULL); if (!ar->spectral.rfs_scan) { ath11k_warn(ar->ab, "failed to open relay in pdev %d\n", ar->pdev_idx); return -EINVAL; } ar->spectral.scan_ctl = debugfs_create_file("spectral_scan_ctl", 0600, ar->debug.debugfs_pdev, ar, &fops_scan_ctl); if (!ar->spectral.scan_ctl) { ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n", ar->pdev_idx); ret = -EINVAL; goto debug_unregister; } ar->spectral.scan_count = debugfs_create_file("spectral_count", 0600, ar->debug.debugfs_pdev, ar, &fops_scan_count); if (!ar->spectral.scan_count) { ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n", ar->pdev_idx); ret = -EINVAL; goto debug_unregister; } ar->spectral.scan_bins = debugfs_create_file("spectral_bins", 0600, ar->debug.debugfs_pdev, ar, &fops_scan_bins); if (!ar->spectral.scan_bins) { ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n", ar->pdev_idx); ret = -EINVAL; goto debug_unregister; } return 0; debug_unregister: ath11k_spectral_debug_unregister(ar); return ret; } int ath11k_spectral_init(struct ath11k_base *ab) { struct ath11k *ar; struct ath11k_spectral *sp; struct ath11k_dbring_cap db_cap; int ret; int i; if (!test_bit(WMI_TLV_SERVICE_FREQINFO_IN_METADATA, ab->wmi_ab.svc_map)) return 0; if (!ab->hw_params.spectral.fft_sz) return 0; for (i = 0; i < ab->num_radios; i++) { ar = ab->pdevs[i].ar; sp = &ar->spectral; ret = ath11k_dbring_get_cap(ar->ab, ar->pdev_idx, WMI_DIRECT_BUF_SPECTRAL, &db_cap); if (ret) continue; idr_init(&sp->rx_ring.bufs_idr); spin_lock_init(&sp->rx_ring.idr_lock); spin_lock_init(&sp->lock); ret = ath11k_spectral_ring_alloc(ar, &db_cap); if (ret) { ath11k_warn(ab, "failed to init spectral ring for pdev %d\n", i); goto deinit; } spin_lock_bh(&sp->lock); sp->mode = ATH11K_SPECTRAL_DISABLED; sp->count = ATH11K_WMI_SPECTRAL_COUNT_DEFAULT; sp->fft_size = ATH11K_WMI_SPECTRAL_FFT_SIZE_DEFAULT; sp->enabled = true; spin_unlock_bh(&sp->lock); ret = ath11k_spectral_debug_register(ar); if (ret) { ath11k_warn(ab, "failed to register spectral for pdev %d\n", i); goto deinit; } } return 0; deinit: ath11k_spectral_deinit(ab); return ret; } enum ath11k_spectral_mode ath11k_spectral_get_mode(struct ath11k *ar) { if (ar->spectral.enabled) return ar->spectral.mode; else return ATH11K_SPECTRAL_DISABLED; } struct ath11k_dbring *ath11k_spectral_get_dbring(struct ath11k *ar) { if (ar->spectral.enabled) return &ar->spectral.rx_ring; else return NULL; } diff --git a/trace.c b/trace.c index 6620650d7845..44ff8e9eff5d 100644 --- a/trace.c +++ b/trace.c @@ -1,10 +1,12 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ +#include #include #define CREATE_TRACE_POINTS #include "trace.h" EXPORT_SYMBOL(__tracepoint_ath11k_log_dbg); diff --git a/wmi.c b/wmi.c index 56af2e9634f4..0491e3fd6b5e 100644 --- a/wmi.c +++ b/wmi.c @@ -1,9900 +1,9900 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include #include #include #include #include #include #include #include #include #include #include "core.h" #include "debug.h" #include "mac.h" #include "hw.h" #include "peer.h" #include "testmode.h" #include "p2p.h" struct wmi_tlv_policy { size_t min_len; }; struct wmi_tlv_svc_ready_parse { bool wmi_svc_bitmap_done; }; struct wmi_tlv_dma_ring_caps_parse { struct wmi_dma_ring_capabilities *dma_ring_caps; u32 n_dma_ring_caps; }; struct wmi_tlv_svc_rdy_ext_parse { struct ath11k_service_ext_param param; struct wmi_soc_mac_phy_hw_mode_caps *hw_caps; struct wmi_hw_mode_capabilities *hw_mode_caps; u32 n_hw_mode_caps; u32 tot_phy_id; struct wmi_hw_mode_capabilities pref_hw_mode_caps; struct wmi_mac_phy_capabilities *mac_phy_caps; u32 n_mac_phy_caps; struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps; struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps; u32 n_ext_hal_reg_caps; struct wmi_tlv_dma_ring_caps_parse dma_caps_parse; bool hw_mode_done; bool mac_phy_done; bool ext_hal_reg_done; bool mac_phy_chainmask_combo_done; bool mac_phy_chainmask_cap_done; bool oem_dma_ring_cap_done; bool dma_ring_cap_done; }; struct wmi_tlv_svc_rdy_ext2_parse { struct wmi_tlv_dma_ring_caps_parse dma_caps_parse; bool dma_ring_cap_done; }; struct wmi_tlv_rdy_parse { u32 num_extra_mac_addr; }; struct wmi_tlv_dma_buf_release_parse { struct ath11k_wmi_dma_buf_release_fixed_param fixed; struct wmi_dma_buf_release_entry *buf_entry; struct wmi_dma_buf_release_meta_data *meta_data; u32 num_buf_entry; u32 num_meta; bool buf_entry_done; bool meta_data_done; }; struct wmi_tlv_fw_stats_parse { const struct wmi_stats_event *ev; const struct wmi_per_chain_rssi_stats *rssi; struct ath11k_fw_stats *stats; int rssi_num; bool chain_rssi_done; }; struct wmi_tlv_mgmt_rx_parse { const struct wmi_mgmt_rx_hdr *fixed; const u8 *frame_buf; bool frame_buf_done; }; static const struct wmi_tlv_policy wmi_tlv_policies[] = { [WMI_TAG_ARRAY_BYTE] = { .min_len = 0 }, [WMI_TAG_ARRAY_UINT32] = { .min_len = 0 }, [WMI_TAG_SERVICE_READY_EVENT] = { .min_len = sizeof(struct wmi_service_ready_event) }, [WMI_TAG_SERVICE_READY_EXT_EVENT] = { .min_len = sizeof(struct wmi_service_ready_ext_event) }, [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) }, [WMI_TAG_SOC_HAL_REG_CAPABILITIES] = { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) }, [WMI_TAG_VDEV_START_RESPONSE_EVENT] = { .min_len = sizeof(struct wmi_vdev_start_resp_event) }, [WMI_TAG_PEER_DELETE_RESP_EVENT] = { .min_len = sizeof(struct wmi_peer_delete_resp_event) }, [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = { .min_len = sizeof(struct wmi_bcn_tx_status_event) }, [WMI_TAG_VDEV_STOPPED_EVENT] = { .min_len = sizeof(struct wmi_vdev_stopped_event) }, [WMI_TAG_REG_CHAN_LIST_CC_EVENT] = { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) }, [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = { .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) }, [WMI_TAG_MGMT_RX_HDR] = { .min_len = sizeof(struct wmi_mgmt_rx_hdr) }, [WMI_TAG_MGMT_TX_COMPL_EVENT] = { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) }, [WMI_TAG_SCAN_EVENT] = { .min_len = sizeof(struct wmi_scan_event) }, [WMI_TAG_PEER_STA_KICKOUT_EVENT] = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) }, [WMI_TAG_ROAM_EVENT] = { .min_len = sizeof(struct wmi_roam_event) }, [WMI_TAG_CHAN_INFO_EVENT] = { .min_len = sizeof(struct wmi_chan_info_event) }, [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) }, [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) }, [WMI_TAG_READY_EVENT] = { .min_len = sizeof(struct wmi_ready_event_min) }, [WMI_TAG_SERVICE_AVAILABLE_EVENT] = {.min_len = sizeof(struct wmi_service_available_event) }, [WMI_TAG_PEER_ASSOC_CONF_EVENT] = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) }, [WMI_TAG_STATS_EVENT] = { .min_len = sizeof(struct wmi_stats_event) }, [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) }, [WMI_TAG_HOST_SWFDA_EVENT] = { .min_len = sizeof(struct wmi_fils_discovery_event) }, [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = { .min_len = sizeof(struct wmi_probe_resp_tx_status_event) }, [WMI_TAG_VDEV_DELETE_RESP_EVENT] = { .min_len = sizeof(struct wmi_vdev_delete_resp_event) }, [WMI_TAG_OBSS_COLOR_COLLISION_EVT] = { .min_len = sizeof(struct wmi_obss_color_collision_event) }, [WMI_TAG_11D_NEW_COUNTRY_EVENT] = { .min_len = sizeof(struct wmi_11d_new_cc_ev) }, [WMI_TAG_PER_CHAIN_RSSI_STATS] = { .min_len = sizeof(struct wmi_per_chain_rssi_stats) }, [WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = { .min_len = sizeof(struct wmi_twt_add_dialog_event) }, [WMI_TAG_P2P_NOA_INFO] = { .min_len = sizeof(struct ath11k_wmi_p2p_noa_info) }, [WMI_TAG_P2P_NOA_EVENT] = { .min_len = sizeof(struct wmi_p2p_noa_event) }, }; #define PRIMAP(_hw_mode_) \ [_hw_mode_] = _hw_mode_##_PRI static const int ath11k_hw_mode_pri_map[] = { PRIMAP(WMI_HOST_HW_MODE_SINGLE), PRIMAP(WMI_HOST_HW_MODE_DBS), PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE), PRIMAP(WMI_HOST_HW_MODE_SBS), PRIMAP(WMI_HOST_HW_MODE_DBS_SBS), PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS), /* keep last */ PRIMAP(WMI_HOST_HW_MODE_MAX), }; static int ath11k_wmi_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, int (*iter)(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data), void *data) { const void *begin = ptr; const struct wmi_tlv *tlv; u16 tlv_tag, tlv_len; int ret; while (len > 0) { if (len < sizeof(*tlv)) { ath11k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", ptr - begin, len, sizeof(*tlv)); return -EINVAL; } tlv = ptr; tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header); tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header); ptr += sizeof(*tlv); len -= sizeof(*tlv); if (tlv_len > len) { ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", tlv_tag, ptr - begin, len, tlv_len); return -EINVAL; } if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) && wmi_tlv_policies[tlv_tag].min_len && wmi_tlv_policies[tlv_tag].min_len > tlv_len) { ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n", tlv_tag, ptr - begin, tlv_len, wmi_tlv_policies[tlv_tag].min_len); return -EINVAL; } ret = iter(ab, tlv_tag, tlv_len, ptr, data); if (ret) return ret; ptr += tlv_len; len -= tlv_len; } return 0; } static int ath11k_wmi_tlv_iter_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { const void **tb = data; if (tag < WMI_TAG_MAX) tb[tag] = ptr; return 0; } static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb, const void *ptr, size_t len) { return ath11k_wmi_tlv_iter(ar, ptr, len, ath11k_wmi_tlv_iter_parse, (void *)tb); } const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, struct sk_buff *skb, gfp_t gfp) { const void **tb; int ret; tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp); if (!tb) return ERR_PTR(-ENOMEM); ret = ath11k_wmi_tlv_parse(ab, tb, skb->data, skb->len); if (ret) { kfree(tb); return ERR_PTR(ret); } return tb; } static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, u32 cmd_id) { struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_cmd_hdr *cmd_hdr; int ret; u32 cmd = 0; if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL) return -ENOMEM; cmd |= FIELD_PREP(WMI_CMD_HDR_CMD_ID, cmd_id); cmd_hdr = (struct wmi_cmd_hdr *)skb->data; cmd_hdr->cmd_id = cmd; trace_ath11k_wmi_cmd(ab, cmd_id, skb->data, skb->len); memset(skb_cb, 0, sizeof(*skb_cb)); ret = ath11k_htc_send(&ab->htc, wmi->eid, skb); if (ret) goto err_pull; return 0; err_pull: skb_pull(skb, sizeof(struct wmi_cmd_hdr)); return ret; } int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, u32 cmd_id) { struct ath11k_wmi_base *wmi_ab = wmi->wmi_ab; int ret = -EOPNOTSUPP; struct ath11k_base *ab = wmi_ab->ab; might_sleep(); if (ab->hw_params.credit_flow) { wait_event_timeout(wmi_ab->tx_credits_wq, ({ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags)) ret = -ESHUTDOWN; (ret != -EAGAIN); }), WMI_SEND_TIMEOUT_HZ); } else { wait_event_timeout(wmi->tx_ce_desc_wq, ({ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags)) ret = -ESHUTDOWN; (ret != -ENOBUFS); }), WMI_SEND_TIMEOUT_HZ); } if (ret == -EAGAIN) ath11k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id); if (ret == -ENOBUFS) ath11k_warn(wmi_ab->ab, "ce desc not available for wmi command %d\n", cmd_id); return ret; } static int ath11k_pull_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, const void *ptr, struct ath11k_service_ext_param *param) { const struct wmi_service_ready_ext_event *ev = ptr; if (!ev) return -EINVAL; /* Move this to host based bitmap */ param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits; param->default_fw_config_bits = ev->default_fw_config_bits; param->he_cap_info = ev->he_cap_info; param->mpdu_density = ev->mpdu_density; param->max_bssid_rx_filters = ev->max_bssid_rx_filters; memcpy(¶m->ppet, &ev->ppet, sizeof(param->ppet)); return 0; } static int ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, struct wmi_soc_mac_phy_hw_mode_caps *hw_caps, struct wmi_hw_mode_capabilities *wmi_hw_mode_caps, struct wmi_soc_hal_reg_capabilities *hal_reg_caps, struct wmi_mac_phy_capabilities *wmi_mac_phy_caps, u8 hw_mode_id, u8 phy_id, struct ath11k_pdev *pdev) { struct wmi_mac_phy_capabilities *mac_phy_caps; struct ath11k_base *ab = wmi_handle->wmi_ab->ab; struct ath11k_band_cap *cap_band; struct ath11k_pdev_cap *pdev_cap = &pdev->cap; u32 phy_map; u32 hw_idx, phy_idx = 0; if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps) return -EINVAL; for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) { if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id) break; phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map; while (phy_map) { phy_map >>= 1; phy_idx++; } } if (hw_idx == hw_caps->num_hw_modes) return -EINVAL; phy_idx += phy_id; if (phy_id >= hal_reg_caps->num_phy) return -EINVAL; mac_phy_caps = wmi_mac_phy_caps + phy_idx; pdev->pdev_id = mac_phy_caps->pdev_id; pdev_cap->supported_bands |= mac_phy_caps->supported_bands; pdev_cap->ampdu_density = mac_phy_caps->ampdu_density; ab->target_pdev_ids[ab->target_pdev_count].supported_bands = mac_phy_caps->supported_bands; ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id; ab->target_pdev_count++; if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) && !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP)) return -EINVAL; /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from * band to band for a single radio, need to see how this should be * handled. */ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) { pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g; pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g; } if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) { pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g; pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g; pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g; pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g; pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g; pdev_cap->nss_ratio_enabled = WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio); pdev_cap->nss_ratio_info = WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio); } /* tx/rx chainmask reported from fw depends on the actual hw chains used, * For example, for 4x4 capable macphys, first 4 chains can be used for first * mac and the remaining 4 chains can be used for the second mac or vice-versa. * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0 * will be advertised for second mac or vice-versa. Compute the shift value * for tx/rx chainmask which will be used to advertise supported ht/vht rates to * mac80211. */ pdev_cap->tx_chain_mask_shift = find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32); pdev_cap->rx_chain_mask_shift = find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32); if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) { cap_band = &pdev_cap->band[NL80211_BAND_2GHZ]; cap_band->phy_id = mac_phy_caps->phy_id; cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g; cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g; cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g; cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext; cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g; memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_2g, sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE); memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g, sizeof(struct ath11k_ppe_threshold)); } if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) { cap_band = &pdev_cap->band[NL80211_BAND_5GHZ]; cap_band->phy_id = mac_phy_caps->phy_id; cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g; cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g; cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g; cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext; cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g; memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g, sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE); memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g, sizeof(struct ath11k_ppe_threshold)); cap_band = &pdev_cap->band[NL80211_BAND_6GHZ]; cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g; cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g; cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g; cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext; cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g; memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g, sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE); memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g, sizeof(struct ath11k_ppe_threshold)); } return 0; } static int ath11k_pull_reg_cap_svc_rdy_ext(struct ath11k_pdev_wmi *wmi_handle, struct wmi_soc_hal_reg_capabilities *reg_caps, struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap, u8 phy_idx, struct ath11k_hal_reg_capabilities_ext *param) { struct wmi_hal_reg_capabilities_ext *ext_reg_cap; if (!reg_caps || !wmi_ext_reg_cap) return -EINVAL; if (phy_idx >= reg_caps->num_phy) return -EINVAL; ext_reg_cap = &wmi_ext_reg_cap[phy_idx]; param->phy_id = ext_reg_cap->phy_id; param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain; param->eeprom_reg_domain_ext = ext_reg_cap->eeprom_reg_domain_ext; param->regcap1 = ext_reg_cap->regcap1; param->regcap2 = ext_reg_cap->regcap2; /* check if param->wireless_mode is needed */ param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan; param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan; param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan; param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan; return 0; } static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab, const void *evt_buf, struct ath11k_targ_cap *cap) { const struct wmi_service_ready_event *ev = evt_buf; if (!ev) { ath11k_err(ab, "%s: failed by NULL param\n", __func__); return -EINVAL; } cap->phy_capability = ev->phy_capability; cap->max_frag_entry = ev->max_frag_entry; cap->num_rf_chains = ev->num_rf_chains; cap->ht_cap_info = ev->ht_cap_info; cap->vht_cap_info = ev->vht_cap_info; cap->vht_supp_mcs = ev->vht_supp_mcs; cap->hw_min_tx_power = ev->hw_min_tx_power; cap->hw_max_tx_power = ev->hw_max_tx_power; cap->sys_cap_info = ev->sys_cap_info; cap->min_pkt_size_enable = ev->min_pkt_size_enable; cap->max_bcn_ie_size = ev->max_bcn_ie_size; cap->max_num_scan_channels = ev->max_num_scan_channels; cap->max_supported_macs = ev->max_supported_macs; cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps; cap->txrx_chainmask = ev->txrx_chainmask; cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index; cap->num_msdu_desc = ev->num_msdu_desc; return 0; } /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each * 4-byte word. */ static void ath11k_wmi_service_bitmap_copy(struct ath11k_pdev_wmi *wmi, const u32 *wmi_svc_bm) { int i, j; for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) { do { if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32)) set_bit(j, wmi->wmi_ab->svc_map); } while (++j % WMI_SERVICE_BITS_IN_SIZE32); } } static int ath11k_wmi_tlv_svc_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_svc_ready_parse *svc_ready = data; struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0]; u16 expect_len; switch (tag) { case WMI_TAG_SERVICE_READY_EVENT: if (ath11k_pull_service_ready_tlv(ab, ptr, &ab->target_caps)) return -EINVAL; break; case WMI_TAG_ARRAY_UINT32: if (!svc_ready->wmi_svc_bitmap_done) { expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32); if (len < expect_len) { ath11k_warn(ab, "invalid len %d for the tag 0x%x\n", len, tag); return -EINVAL; } ath11k_wmi_service_bitmap_copy(wmi_handle, ptr); svc_ready->wmi_svc_bitmap_done = true; } break; default: break; } return 0; } static int ath11k_service_ready_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_tlv_svc_ready_parse svc_ready = { }; int ret; ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_svc_rdy_parse, &svc_ready); if (ret) { ath11k_warn(ab, "failed to parse tlv %d\n", ret); return ret; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready"); return 0; } struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_ab, u32 len) { struct sk_buff *skb; struct ath11k_base *ab = wmi_ab->ab; u32 round_len = roundup(len, 4); skb = ath11k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len); if (!skb) return NULL; skb_reserve(skb, WMI_SKB_HEADROOM); if (!IS_ALIGNED((unsigned long)skb->data, 4)) ath11k_warn(ab, "unaligned WMI skb data\n"); skb_put(skb, round_len); memset(skb->data, 0, round_len); return skb; } static u32 ath11k_wmi_mgmt_get_freq(struct ath11k *ar, struct ieee80211_tx_info *info) { struct ath11k_base *ab = ar->ab; u32 freq = 0; if (ab->hw_params.support_off_channel_tx && ar->scan.is_roc && (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) freq = ar->scan.roc_freq; return freq; } int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id, struct sk_buff *frame) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame); struct wmi_mgmt_send_cmd *cmd; struct wmi_tlv *frame_tlv; struct sk_buff *skb; u32 buf_len; int ret, len; buf_len = frame->len < WMI_MGMT_SEND_DOWNLD_LEN ? frame->len : WMI_MGMT_SEND_DOWNLD_LEN; len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_mgmt_send_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MGMT_TX_SEND_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->desc_id = buf_id; cmd->chanfreq = ath11k_wmi_mgmt_get_freq(ar, info); cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr); cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr); cmd->frame_len = frame->len; cmd->buf_len = buf_len; cmd->tx_params_valid = 0; frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, buf_len); memcpy(frame_tlv->value, frame->data, buf_len); ath11k_ce_byte_swap(frame_tlv->value, buf_len); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd mgmt tx send"); return ret; } int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr, struct vdev_create_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_create_cmd *cmd; struct sk_buff *skb; struct wmi_vdev_txrx_streams *txrx_streams; struct wmi_tlv *tlv; int ret, len; void *ptr; /* It can be optimized my sending tx/rx chain configuration * only for supported bands instead of always sending it for * both the bands. */ len = sizeof(*cmd) + TLV_HDR_SIZE + (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_create_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CREATE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = param->if_id; cmd->vdev_type = param->type; cmd->vdev_subtype = param->subtype; cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX; cmd->pdev_id = param->pdev_id; cmd->mbssid_flags = param->mbssid_flags; cmd->mbssid_tx_vdev_id = param->mbssid_tx_vdev_id; ether_addr_copy(cmd->vdev_macaddr.addr, macaddr); ptr = skb->data + sizeof(*cmd); len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; txrx_streams = ptr; len = sizeof(*txrx_streams); txrx_streams->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G; txrx_streams->supported_tx_streams = param->chains[NL80211_BAND_2GHZ].tx; txrx_streams->supported_rx_streams = param->chains[NL80211_BAND_2GHZ].rx; txrx_streams++; txrx_streams->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G; txrx_streams->supported_tx_streams = param->chains[NL80211_BAND_5GHZ].tx; txrx_streams->supported_rx_streams = param->chains[NL80211_BAND_5GHZ].rx; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit WMI_VDEV_CREATE_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev create id %d type %d subtype %d macaddr %pM pdevid %d\n", param->if_id, param->type, param->subtype, macaddr, param->pdev_id); return ret; } int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_delete_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_delete_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DELETE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev delete id %d\n", vdev_id); return ret; } int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_stop_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_stop_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_STOP_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev stop id 0x%x\n", vdev_id); return ret; } int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_down_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_down_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DOWN_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev down id 0x%x\n", vdev_id); return ret; } static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan, struct wmi_vdev_start_req_arg *arg) { u32 center_freq1 = arg->channel.band_center_freq1; memset(chan, 0, sizeof(*chan)); chan->mhz = arg->channel.freq; chan->band_center_freq1 = arg->channel.band_center_freq1; if (arg->channel.mode == MODE_11AX_HE160) { if (arg->channel.freq > arg->channel.band_center_freq1) chan->band_center_freq1 = center_freq1 + 40; else chan->band_center_freq1 = center_freq1 - 40; chan->band_center_freq2 = arg->channel.band_center_freq1; } else if ((arg->channel.mode == MODE_11AC_VHT80_80) || (arg->channel.mode == MODE_11AX_HE80_80)) { chan->band_center_freq2 = arg->channel.band_center_freq2; } else { chan->band_center_freq2 = 0; } chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode); if (arg->channel.passive) chan->info |= WMI_CHAN_INFO_PASSIVE; if (arg->channel.allow_ibss) chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED; if (arg->channel.allow_ht) chan->info |= WMI_CHAN_INFO_ALLOW_HT; if (arg->channel.allow_vht) chan->info |= WMI_CHAN_INFO_ALLOW_VHT; if (arg->channel.allow_he) chan->info |= WMI_CHAN_INFO_ALLOW_HE; if (arg->channel.ht40plus) chan->info |= WMI_CHAN_INFO_HT40_PLUS; if (arg->channel.chan_radar) chan->info |= WMI_CHAN_INFO_DFS; if (arg->channel.freq2_radar) chan->info |= WMI_CHAN_INFO_DFS_FREQ2; chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR, arg->channel.max_power) | FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR, arg->channel.max_reg_power); chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX, arg->channel.max_antenna_gain) | FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR, arg->channel.max_power); } int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg, bool restart) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_start_request_cmd *cmd; struct sk_buff *skb; struct wmi_channel *chan; struct wmi_tlv *tlv; void *ptr; int ret, len; if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) return -EINVAL; len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_start_request_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_START_REQUEST_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = arg->vdev_id; cmd->beacon_interval = arg->bcn_intval; cmd->bcn_tx_rate = arg->bcn_tx_rate; cmd->dtim_period = arg->dtim_period; cmd->num_noa_descriptors = arg->num_noa_descriptors; cmd->preferred_rx_streams = arg->pref_rx_streams; cmd->preferred_tx_streams = arg->pref_tx_streams; cmd->cac_duration_ms = arg->cac_duration_ms; cmd->regdomain = arg->regdomain; cmd->he_ops = arg->he_ops; cmd->mbssid_flags = arg->mbssid_flags; cmd->mbssid_tx_vdev_id = arg->mbssid_tx_vdev_id; if (!restart) { if (arg->ssid) { cmd->ssid.ssid_len = arg->ssid_len; memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); } if (arg->hidden_ssid) cmd->flags |= WMI_VDEV_START_HIDDEN_SSID; if (arg->pmf_enabled) cmd->flags |= WMI_VDEV_START_PMF_ENABLED; } cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED; if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED; ptr = skb->data + sizeof(*cmd); chan = ptr; ath11k_wmi_put_wmi_channel(chan, arg); chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) | FIELD_PREP(WMI_TLV_LEN, sizeof(*chan) - TLV_HDR_SIZE); ptr += sizeof(*chan); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, 0); /* Note: This is a nested TLV containing: * [wmi_tlv][ath11k_wmi_p2p_noa_descriptor][wmi_tlv].. */ ptr += sizeof(*tlv); if (restart) ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_RESTART_REQUEST_CMDID); else ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_START_REQUEST_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit vdev_%s cmd\n", restart ? "restart" : "start"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev %s id 0x%x freq 0x%x mode 0x%x\n", restart ? "restart" : "start", arg->vdev_id, arg->channel.freq, arg->channel.mode); return ret; } int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid, u8 *tx_bssid, u32 nontx_profile_idx, u32 nontx_profile_cnt) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_up_cmd *cmd; struct ieee80211_bss_conf *bss_conf; struct ath11k_vif *arvif; struct sk_buff *skb; int ret; arvif = ath11k_mac_get_arvif(ar, vdev_id); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_up_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_UP_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->vdev_assoc_id = aid; ether_addr_copy(cmd->vdev_bssid.addr, bssid); cmd->nontx_profile_idx = nontx_profile_idx; cmd->nontx_profile_cnt = nontx_profile_cnt; if (tx_bssid) ether_addr_copy(cmd->tx_vdev_bssid.addr, tx_bssid); if (arvif && arvif->vif->type == NL80211_IFTYPE_STATION) { bss_conf = &arvif->vif->bss_conf; if (bss_conf->nontransmitted) { ether_addr_copy(cmd->tx_vdev_bssid.addr, bss_conf->transmitter_bssid); cmd->nontx_profile_idx = bss_conf->bssid_index; cmd->nontx_profile_cnt = bss_conf->bssid_indicator; } } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev up id 0x%x assoc id %d bssid %pM\n", vdev_id, aid, bssid); return ret; } int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar, struct peer_create_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_peer_create_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_peer_create_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); ether_addr_copy(cmd->peer_macaddr.addr, param->peer_addr); cmd->peer_type = param->peer_type; cmd->vdev_id = param->vdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd peer create vdev_id %d peer_addr %pM\n", param->vdev_id, param->peer_addr); return ret; } int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar, const u8 *peer_addr, u8 vdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_peer_delete_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_peer_delete_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); cmd->vdev_id = vdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd peer delete vdev_id %d peer_addr %pM\n", vdev_id, peer_addr); return ret; } int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar, struct pdev_set_regdomain_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_set_regdomain_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_REGDOMAIN_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->reg_domain = param->current_rd_in_use; cmd->reg_domain_2g = param->current_rd_2g; cmd->reg_domain_5g = param->current_rd_5g; cmd->conformance_test_limit_2g = param->ctl_2g; cmd->conformance_test_limit_5g = param->ctl_5g; cmd->dfs_domain = param->dfs_domain; cmd->pdev_id = param->pdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n", param->current_rd_in_use, param->current_rd_2g, param->current_rd_5g, param->dfs_domain, param->pdev_id); return ret; } int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr, u32 vdev_id, u32 param_id, u32 param_val) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_peer_set_param_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_peer_set_param_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); cmd->vdev_id = vdev_id; cmd->param_id = param_id; cmd->param_value = param_val; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd peer set param vdev %d peer 0x%pM set param %d value %d\n", vdev_id, peer_addr, param_id, param_val); return ret; } int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar, u8 peer_addr[ETH_ALEN], struct peer_flush_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_peer_flush_tids_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_FLUSH_TIDS_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); cmd->peer_tid_bitmap = param->peer_tid_bitmap; cmd->vdev_id = param->vdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PEER_FLUSH_TIDS cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd peer flush tids vdev_id %d peer_addr %pM tids %08x\n", param->vdev_id, peer_addr, param->peer_tid_bitmap); return ret; } int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar, int vdev_id, const u8 *addr, dma_addr_t paddr, u8 tid, u8 ba_window_size_valid, u32 ba_window_size) { struct wmi_peer_reorder_queue_setup_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_REORDER_QUEUE_SETUP_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); ether_addr_copy(cmd->peer_macaddr.addr, addr); cmd->vdev_id = vdev_id; cmd->tid = tid; cmd->queue_ptr_lo = lower_32_bits(paddr); cmd->queue_ptr_hi = upper_32_bits(paddr); cmd->queue_no = tid; cmd->ba_window_size_valid = ba_window_size_valid; cmd->ba_window_size = ba_window_size; ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_PEER_REORDER_QUEUE_SETUP_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd peer reorder queue setup addr %pM vdev_id %d tid %d\n", addr, vdev_id, tid); return ret; } int ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar, struct rx_reorder_queue_remove_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_peer_reorder_queue_remove_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_REORDER_QUEUE_REMOVE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); ether_addr_copy(cmd->peer_macaddr.addr, param->peer_macaddr); cmd->vdev_id = param->vdev_id; cmd->tid_mask = param->peer_tid_bitmap; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_REORDER_QUEUE_REMOVE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd peer reorder queue remove peer_macaddr %pM vdev_id %d tid_map %d", param->peer_macaddr, param->vdev_id, param->peer_tid_bitmap); return ret; } int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id, u32 param_value, u8 pdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_set_param_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_set_param_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_PARAM_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = pdev_id; cmd->param_id = param_id; cmd->param_value = param_value; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev set param %d pdev id %d value %d\n", param_id, pdev_id, param_value); return ret; } int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, enum wmi_sta_ps_mode psmode) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_set_ps_mode_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_MODE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->sta_ps_mode = psmode; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd sta powersave mode psmode %d vdev id %d\n", psmode, vdev_id); return ret; } int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt, u32 pdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_suspend_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_suspend_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SUSPEND_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->suspend_opt = suspend_opt; cmd->pdev_id = pdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev suspend pdev_id %d\n", pdev_id); return ret; } int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_resume_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_resume_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_RESUME_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = pdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev resume pdev id %d\n", pdev_id); return ret; } /* TODO FW Support for the cmd is not available yet. * Can be tested once the command and corresponding * event is implemented in FW */ int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar, enum wmi_bss_chan_info_req_type type) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_bss_chan_info_req_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->req_type = type; cmd->pdev_id = ar->pdev->pdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev bss chan info request type %d\n", type); return ret; } int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr, struct ap_ps_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_ap_ps_peer_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_AP_PS_PEER_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = param->vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); cmd->param = param->param; cmd->value = param->value; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd ap ps peer param vdev id %d peer %pM param %d value %d\n", param->vdev_id, peer_addr, param->param, param->value); return ret; } int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id, u32 param, u32 param_value) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_sta_powersave_param_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_PARAM_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->param = param; cmd->value = param_value; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd set powersave param vdev_id %d param %d value %d\n", vdev_id, param, param_value); return ret; } int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_force_fw_hang_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_force_fw_hang_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_FORCE_FW_HANG_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->type = type; cmd->delay_time_ms = delay_time_ms; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID); if (ret) { ath11k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd force fw hang"); return ret; } int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id, u32 param_id, u32 param_value) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_set_param_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_set_param_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_PARAM_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->param_id = param_id; cmd->param_value = param_value; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_VDEV_SET_PARAM_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev set param vdev 0x%x param %d value %d\n", vdev_id, param_id, param_value); return ret; } int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar, struct stats_request_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_request_stats_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_request_stats_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_REQUEST_STATS_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->stats_id = param->stats_id; cmd->vdev_id = param->vdev_id; cmd->pdev_id = param->pdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd request stats 0x%x vdev id %d pdev id %d\n", param->stats_id, param->vdev_id, param->pdev_id); return ret; } int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k *ar) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_get_pdev_temperature_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_GET_TEMPERATURE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id); return ret; } int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar, u32 vdev_id, u32 bcn_ctrl_op) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_bcn_offload_ctrl_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_OFFLOAD_CTRL_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->bcn_ctrl_op = bcn_ctrl_op; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd bcn offload ctrl vdev id %d ctrl_op %d\n", vdev_id, bcn_ctrl_op); return ret; } int ath11k_wmi_p2p_go_bcn_ie(struct ath11k *ar, u32 vdev_id, const u8 *p2p_ie) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_p2p_go_set_beacon_ie_cmd *cmd; size_t p2p_ie_len, aligned_len; struct wmi_tlv *tlv; struct sk_buff *skb; int ret, len; p2p_ie_len = p2p_ie[1] + 2; aligned_len = roundup(p2p_ie_len, 4); len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_p2p_go_set_beacon_ie_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_P2P_GO_SET_BEACON_IE) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->ie_buf_len = p2p_ie_len; tlv = (struct wmi_tlv *)cmd->tlv; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, aligned_len); memcpy(tlv->value, p2p_ie, p2p_ie_len); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n"); dev_kfree_skb(skb); } return ret; } int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id, struct ieee80211_mutable_offsets *offs, struct sk_buff *bcn, u32 ema_params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_bcn_tmpl_cmd *cmd; struct wmi_bcn_prb_info *bcn_prb_info; struct wmi_tlv *tlv; struct sk_buff *skb; void *ptr; int ret, len; size_t aligned_len = roundup(bcn->len, 4); struct ieee80211_vif *vif; struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev_id); if (!arvif) { ath11k_warn(ar->ab, "failed to find arvif with vdev id %d\n", vdev_id); return -EINVAL; } vif = arvif->vif; len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_bcn_tmpl_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_TMPL_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->tim_ie_offset = offs->tim_offset; if (vif->bss_conf.csa_active) { cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0]; cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1]; } cmd->buf_len = bcn->len; cmd->mbssid_ie_offset = offs->mbssid_off; cmd->ema_params = ema_params; ptr = skb->data + sizeof(*cmd); bcn_prb_info = ptr; len = sizeof(*bcn_prb_info); bcn_prb_info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_PRB_INFO) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); bcn_prb_info->caps = 0; bcn_prb_info->erp = 0; ptr += sizeof(*bcn_prb_info); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, aligned_len); memcpy(tlv->value, bcn->data, bcn->len); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd bcn tmpl"); return ret; } int ath11k_wmi_vdev_install_key(struct ath11k *ar, struct wmi_vdev_install_key_arg *arg) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_install_key_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; int ret, len; int key_len_aligned = roundup(arg->key_len, sizeof(uint32_t)); len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_install_key_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_INSTALL_KEY_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = arg->vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); cmd->key_idx = arg->key_idx; cmd->key_flags = arg->key_flags; cmd->key_cipher = arg->key_cipher; cmd->key_len = arg->key_len; cmd->key_txmic_len = arg->key_txmic_len; cmd->key_rxmic_len = arg->key_rxmic_len; if (arg->key_rsc_counter) memcpy(&cmd->key_rsc_counter, &arg->key_rsc_counter, sizeof(struct wmi_key_seq_counter)); tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, key_len_aligned); if (arg->key_data) memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_VDEV_INSTALL_KEY cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev install key idx %d cipher %d len %d\n", arg->key_idx, arg->key_cipher, arg->key_len); return ret; } static inline void ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, struct peer_assoc_params *param, bool hw_crypto_disabled) { cmd->peer_flags = 0; if (param->is_wme_set) { if (param->qos_flag) cmd->peer_flags |= WMI_PEER_QOS; if (param->apsd_flag) cmd->peer_flags |= WMI_PEER_APSD; if (param->ht_flag) cmd->peer_flags |= WMI_PEER_HT; if (param->bw_40) cmd->peer_flags |= WMI_PEER_40MHZ; if (param->bw_80) cmd->peer_flags |= WMI_PEER_80MHZ; if (param->bw_160) cmd->peer_flags |= WMI_PEER_160MHZ; /* Typically if STBC is enabled for VHT it should be enabled * for HT as well **/ if (param->stbc_flag) cmd->peer_flags |= WMI_PEER_STBC; /* Typically if LDPC is enabled for VHT it should be enabled * for HT as well **/ if (param->ldpc_flag) cmd->peer_flags |= WMI_PEER_LDPC; if (param->static_mimops_flag) cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS; if (param->dynamic_mimops_flag) cmd->peer_flags |= WMI_PEER_DYN_MIMOPS; if (param->spatial_mux_flag) cmd->peer_flags |= WMI_PEER_SPATIAL_MUX; if (param->vht_flag) cmd->peer_flags |= WMI_PEER_VHT; if (param->he_flag) cmd->peer_flags |= WMI_PEER_HE; if (param->twt_requester) cmd->peer_flags |= WMI_PEER_TWT_REQ; if (param->twt_responder) cmd->peer_flags |= WMI_PEER_TWT_RESP; } /* Suppress authorization for all AUTH modes that need 4-way handshake * (during re-association). * Authorization will be done for these modes on key installation. */ if (param->auth_flag) cmd->peer_flags |= WMI_PEER_AUTH; if (param->need_ptk_4_way) { cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; if (!hw_crypto_disabled && param->is_assoc) cmd->peer_flags &= ~WMI_PEER_AUTH; } if (param->need_gtk_2_way) cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; /* safe mode bypass the 4-way handshake */ if (param->safe_mode_enabled) cmd->peer_flags &= ~(WMI_PEER_NEED_PTK_4_WAY | WMI_PEER_NEED_GTK_2_WAY); if (param->is_pmf_enabled) cmd->peer_flags |= WMI_PEER_PMF; /* Disable AMSDU for station transmit, if user configures it */ /* Disable AMSDU for AP transmit to 11n Stations, if user configures * it * if (param->amsdu_disable) Add after FW support **/ /* Target asserts if node is marked HT and all MCS is set to 0. * Mark the node as non-HT if all the mcs rates are disabled through * iwpriv **/ if (param->peer_ht_rates.num_rates == 0) cmd->peer_flags &= ~WMI_PEER_HT; } int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar, struct peer_assoc_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_peer_assoc_complete_cmd *cmd; struct wmi_vht_rate_set *mcs; struct wmi_he_rate_set *he_mcs; struct sk_buff *skb; struct wmi_tlv *tlv; void *ptr; u32 peer_legacy_rates_align; u32 peer_ht_rates_align; int i, ret, len; peer_legacy_rates_align = roundup(param->peer_legacy_rates.num_rates, sizeof(u32)); peer_ht_rates_align = roundup(param->peer_ht_rates.num_rates, sizeof(u32)); len = sizeof(*cmd) + TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) + TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) + sizeof(*mcs) + TLV_HDR_SIZE + (sizeof(*he_mcs) * param->peer_he_mcs_count); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; ptr = skb->data; cmd = ptr; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_ASSOC_COMPLETE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = param->vdev_id; cmd->peer_new_assoc = param->peer_new_assoc; cmd->peer_associd = param->peer_associd; ath11k_wmi_copy_peer_flags(cmd, param, test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)); ether_addr_copy(cmd->peer_macaddr.addr, param->peer_mac); cmd->peer_rate_caps = param->peer_rate_caps; cmd->peer_caps = param->peer_caps; cmd->peer_listen_intval = param->peer_listen_intval; cmd->peer_ht_caps = param->peer_ht_caps; cmd->peer_max_mpdu = param->peer_max_mpdu; cmd->peer_mpdu_density = param->peer_mpdu_density; cmd->peer_vht_caps = param->peer_vht_caps; cmd->peer_phymode = param->peer_phymode; /* Update 11ax capabilities */ cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0]; cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1]; cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal; cmd->peer_he_caps_6ghz = param->peer_he_caps_6ghz; cmd->peer_he_ops = param->peer_he_ops; memcpy(&cmd->peer_he_cap_phy, ¶m->peer_he_cap_phyinfo, sizeof(param->peer_he_cap_phyinfo)); memcpy(&cmd->peer_ppet, ¶m->peer_ppet, sizeof(param->peer_ppet)); /* Update peer legacy rate information */ ptr += sizeof(*cmd); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, peer_legacy_rates_align); ptr += TLV_HDR_SIZE; cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates; memcpy(ptr, param->peer_legacy_rates.rates, param->peer_legacy_rates.num_rates); /* Update peer HT rate information */ ptr += peer_legacy_rates_align; tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, peer_ht_rates_align); ptr += TLV_HDR_SIZE; cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates; memcpy(ptr, param->peer_ht_rates.rates, param->peer_ht_rates.num_rates); /* VHT Rates */ ptr += peer_ht_rates_align; mcs = ptr; mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VHT_RATE_SET) | FIELD_PREP(WMI_TLV_LEN, sizeof(*mcs) - TLV_HDR_SIZE); cmd->peer_nss = param->peer_nss; /* Update bandwidth-NSS mapping */ cmd->peer_bw_rxnss_override = 0; cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override; if (param->vht_capable) { mcs->rx_max_rate = param->rx_max_rate; mcs->rx_mcs_set = param->rx_mcs_set; mcs->tx_max_rate = param->tx_max_rate; mcs->tx_mcs_set = param->tx_mcs_set; } /* HE Rates */ cmd->peer_he_mcs = param->peer_he_mcs_count; cmd->min_data_rate = param->min_data_rate; ptr += sizeof(*mcs); len = param->peer_he_mcs_count * sizeof(*he_mcs); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; /* Loop through the HE rate set */ for (i = 0; i < param->peer_he_mcs_count; i++) { he_mcs = ptr; he_mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HE_RATE_SET) | FIELD_PREP(WMI_TLV_LEN, sizeof(*he_mcs) - TLV_HDR_SIZE); he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i]; he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i]; ptr += sizeof(*he_mcs); } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PEER_ASSOC_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n", cmd->vdev_id, cmd->peer_associd, param->peer_mac, cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps, cmd->peer_listen_intval, cmd->peer_ht_caps, cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode, cmd->peer_mpdu_density, cmd->peer_vht_caps, cmd->peer_he_cap_info, cmd->peer_he_ops, cmd->peer_he_cap_info_ext, cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1], cmd->peer_he_cap_phy[2], cmd->peer_bw_rxnss_override); return ret; } void ath11k_wmi_start_scan_init(struct ath11k *ar, struct scan_req_params *arg) { /* setup commonly used values */ arg->scan_req_id = 1; if (ar->state_11d == ATH11K_11D_PREPARING) arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM; else arg->scan_priority = WMI_SCAN_PRIORITY_LOW; arg->dwell_time_active = 50; arg->dwell_time_active_2g = 0; arg->dwell_time_passive = 150; arg->dwell_time_active_6g = 40; arg->dwell_time_passive_6g = 30; arg->min_rest_time = 50; arg->max_rest_time = 500; arg->repeat_probe_time = 0; arg->probe_spacing_time = 0; arg->idle_time = 0; arg->max_scan_time = 20000; arg->probe_delay = 5; arg->notify_scan_events = WMI_SCAN_EVENT_STARTED | WMI_SCAN_EVENT_COMPLETED | WMI_SCAN_EVENT_BSS_CHANNEL | WMI_SCAN_EVENT_FOREIGN_CHAN | WMI_SCAN_EVENT_DEQUEUED; arg->scan_f_chan_stat_evnt = 1; if (test_bit(WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE, ar->ab->wmi_ab.svc_map)) arg->scan_ctrl_flags_ext |= WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE; arg->num_bssid = 1; /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be * ZEROs in probe request */ eth_broadcast_addr(arg->bssid_list[0].addr); } static inline void ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd, struct scan_req_params *param) { /* Scan events subscription */ if (param->scan_ev_started) cmd->notify_scan_events |= WMI_SCAN_EVENT_STARTED; if (param->scan_ev_completed) cmd->notify_scan_events |= WMI_SCAN_EVENT_COMPLETED; if (param->scan_ev_bss_chan) cmd->notify_scan_events |= WMI_SCAN_EVENT_BSS_CHANNEL; if (param->scan_ev_foreign_chan) cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN; if (param->scan_ev_dequeued) cmd->notify_scan_events |= WMI_SCAN_EVENT_DEQUEUED; if (param->scan_ev_preempted) cmd->notify_scan_events |= WMI_SCAN_EVENT_PREEMPTED; if (param->scan_ev_start_failed) cmd->notify_scan_events |= WMI_SCAN_EVENT_START_FAILED; if (param->scan_ev_restarted) cmd->notify_scan_events |= WMI_SCAN_EVENT_RESTARTED; if (param->scan_ev_foreign_chn_exit) cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT; if (param->scan_ev_suspended) cmd->notify_scan_events |= WMI_SCAN_EVENT_SUSPENDED; if (param->scan_ev_resumed) cmd->notify_scan_events |= WMI_SCAN_EVENT_RESUMED; /** Set scan control flags */ cmd->scan_ctrl_flags = 0; if (param->scan_f_passive) cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; if (param->scan_f_strict_passive_pch) cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN; if (param->scan_f_promisc_mode) cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROMISCUOS; if (param->scan_f_capture_phy_err) cmd->scan_ctrl_flags |= WMI_SCAN_CAPTURE_PHY_ERROR; if (param->scan_f_half_rate) cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_HALF_RATE_SUPPORT; if (param->scan_f_quarter_rate) cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT; if (param->scan_f_cck_rates) cmd->scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES; if (param->scan_f_ofdm_rates) cmd->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES; if (param->scan_f_chan_stat_evnt) cmd->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; if (param->scan_f_filter_prb_req) cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; if (param->scan_f_bcast_probe) cmd->scan_ctrl_flags |= WMI_SCAN_ADD_BCAST_PROBE_REQ; if (param->scan_f_offchan_mgmt_tx) cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_MGMT_TX; if (param->scan_f_offchan_data_tx) cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_DATA_TX; if (param->scan_f_force_active_dfs_chn) cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS; if (param->scan_f_add_tpc_ie_in_probe) cmd->scan_ctrl_flags |= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ; if (param->scan_f_add_ds_ie_in_probe) cmd->scan_ctrl_flags |= WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ; if (param->scan_f_add_spoofed_mac_in_probe) cmd->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ; if (param->scan_f_add_rand_seq_in_probe) cmd->scan_ctrl_flags |= WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ; if (param->scan_f_en_ie_whitelist_in_probe) cmd->scan_ctrl_flags |= WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ; /* for adaptive scan mode using 3 bits (21 - 23 bits) */ WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags, param->adaptive_dwell_time_mode); cmd->scan_ctrl_flags_ext = param->scan_ctrl_flags_ext; } int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, struct scan_req_params *params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_start_scan_cmd *cmd; struct wmi_ssid *ssid = NULL; struct wmi_mac_addr *bssid; struct sk_buff *skb; struct wmi_tlv *tlv; void *ptr; int i, ret, len; u32 *tmp_ptr; u16 extraie_len_with_pad = 0; struct hint_short_ssid *s_ssid = NULL; struct hint_bssid *hint_bssid = NULL; len = sizeof(*cmd); len += TLV_HDR_SIZE; if (params->num_chan) len += params->num_chan * sizeof(u32); len += TLV_HDR_SIZE; if (params->num_ssids) len += params->num_ssids * sizeof(*ssid); len += TLV_HDR_SIZE; if (params->num_bssid) len += sizeof(*bssid) * params->num_bssid; len += TLV_HDR_SIZE; if (params->extraie.len && params->extraie.len <= 0xFFFF) extraie_len_with_pad = roundup(params->extraie.len, sizeof(u32)); len += extraie_len_with_pad; if (params->num_hint_bssid) len += TLV_HDR_SIZE + params->num_hint_bssid * sizeof(struct hint_bssid); if (params->num_hint_s_ssid) len += TLV_HDR_SIZE + params->num_hint_s_ssid * sizeof(struct hint_short_ssid); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; ptr = skb->data; cmd = ptr; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_START_SCAN_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->scan_id = params->scan_id; cmd->scan_req_id = params->scan_req_id; cmd->vdev_id = params->vdev_id; cmd->scan_priority = params->scan_priority; cmd->notify_scan_events = params->notify_scan_events; ath11k_wmi_copy_scan_event_cntrl_flags(cmd, params); cmd->dwell_time_active = params->dwell_time_active; cmd->dwell_time_active_2g = params->dwell_time_active_2g; cmd->dwell_time_passive = params->dwell_time_passive; cmd->dwell_time_active_6g = params->dwell_time_active_6g; cmd->dwell_time_passive_6g = params->dwell_time_passive_6g; cmd->min_rest_time = params->min_rest_time; cmd->max_rest_time = params->max_rest_time; cmd->repeat_probe_time = params->repeat_probe_time; cmd->probe_spacing_time = params->probe_spacing_time; cmd->idle_time = params->idle_time; cmd->max_scan_time = params->max_scan_time; cmd->probe_delay = params->probe_delay; cmd->burst_duration = params->burst_duration; cmd->num_chan = params->num_chan; cmd->num_bssid = params->num_bssid; cmd->num_ssids = params->num_ssids; cmd->ie_len = params->extraie.len; cmd->n_probes = params->n_probes; ether_addr_copy(cmd->mac_addr.addr, params->mac_addr.addr); ether_addr_copy(cmd->mac_mask.addr, params->mac_mask.addr); ptr += sizeof(*cmd); len = params->num_chan * sizeof(u32); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; tmp_ptr = ptr; for (i = 0; i < params->num_chan; ++i) tmp_ptr[i] = params->chan_list[i]; ptr += len; len = params->num_ssids * sizeof(*ssid); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; if (params->num_ssids) { ssid = ptr; for (i = 0; i < params->num_ssids; ++i) { ssid->ssid_len = params->ssid[i].length; memcpy(ssid->ssid, params->ssid[i].ssid, params->ssid[i].length); ssid++; } } ptr += (params->num_ssids * sizeof(*ssid)); len = params->num_bssid * sizeof(*bssid); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; bssid = ptr; if (params->num_bssid) { for (i = 0; i < params->num_bssid; ++i) { ether_addr_copy(bssid->addr, params->bssid_list[i].addr); bssid++; } } ptr += params->num_bssid * sizeof(*bssid); len = extraie_len_with_pad; tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; if (extraie_len_with_pad) memcpy(ptr, params->extraie.ptr, params->extraie.len); ptr += extraie_len_with_pad; if (params->num_hint_s_ssid) { len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; s_ssid = ptr; for (i = 0; i < params->num_hint_s_ssid; ++i) { s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags; s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid; s_ssid++; } ptr += len; } if (params->num_hint_bssid) { len = params->num_hint_bssid * sizeof(struct hint_bssid); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; hint_bssid = ptr; for (i = 0; i < params->num_hint_bssid; ++i) { hint_bssid->freq_flags = params->hint_bssid[i].freq_flags; ether_addr_copy(¶ms->hint_bssid[i].bssid.addr[0], &hint_bssid->bssid.addr[0]); hint_bssid++; } } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_START_SCAN_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd start scan"); return ret; } int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar, u32 vdev_id, struct ath11k_reg_tpc_power_info *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_set_tpc_power_cmd *cmd; struct wmi_vdev_ch_power_info *ch; struct sk_buff *skb; struct wmi_tlv *tlv; u8 *ptr; int i, ret, len, array_len; array_len = sizeof(*ch) * param->num_pwr_levels; len = sizeof(*cmd) + TLV_HDR_SIZE + array_len; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; ptr = skb->data; cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_TPC_POWER_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->psd_power = param->is_psd_power; cmd->eirp_power = param->eirp_power; cmd->power_type_6ghz = param->ap_power_type; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n", vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type); ptr += sizeof(*cmd); tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, array_len); ptr += TLV_HDR_SIZE; ch = (struct wmi_vdev_ch_power_info *)ptr; for (i = 0; i < param->num_pwr_levels; i++, ch++) { ch->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CH_POWER_INFO) | FIELD_PREP(WMI_TLV_LEN, sizeof(*ch) - TLV_HDR_SIZE); ch->chan_cfreq = param->chan_power_info[i].chan_cfreq; ch->tx_power = param->chan_power_info[i].tx_power; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tpc chan freq %d TX power %d\n", ch->chan_cfreq, ch->tx_power); } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n"); dev_kfree_skb(skb); return ret; } return 0; } int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar, struct scan_cancel_param *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_stop_scan_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_stop_scan_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STOP_SCAN_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = param->vdev_id; cmd->requestor = param->requester; cmd->scan_id = param->scan_id; cmd->pdev_id = param->pdev_id; /* stop the scan with the corresponding scan_id */ if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) { /* Cancelling all scans */ cmd->req_type = WMI_SCAN_STOP_ALL; } else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) { /* Cancelling VAP scans */ cmd->req_type = WMI_SCN_STOP_VAP_ALL; } else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) { /* Cancelling specific scan */ cmd->req_type = WMI_SCAN_STOP_ONE; } else { ath11k_warn(ar->ab, "invalid scan cancel param %d", param->req_type); dev_kfree_skb(skb); return -EINVAL; } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STOP_SCAN_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd stop scan"); return ret; } int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar, struct scan_chan_list_params *chan_list) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_scan_chan_list_cmd *cmd; struct sk_buff *skb; struct wmi_channel *chan_info; struct channel_param *tchan_info; struct wmi_tlv *tlv; void *ptr; int i, ret, len; u16 num_send_chans, num_sends = 0, max_chan_limit = 0; u32 *reg1, *reg2; tchan_info = chan_list->ch_param; while (chan_list->nallchans) { len = sizeof(*cmd) + TLV_HDR_SIZE; max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) / sizeof(*chan_info); if (chan_list->nallchans > max_chan_limit) num_send_chans = max_chan_limit; else num_send_chans = chan_list->nallchans; chan_list->nallchans -= num_send_chans; len += sizeof(*chan_info) * num_send_chans; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_scan_chan_list_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = chan_list->pdev_id; cmd->num_scan_chans = num_send_chans; if (num_sends) cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "no.of chan = %d len = %d pdev_id = %d num_sends = %d\n", num_send_chans, len, cmd->pdev_id, num_sends); ptr = skb->data + sizeof(*cmd); len = sizeof(*chan_info) * num_send_chans; tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); ptr += TLV_HDR_SIZE; for (i = 0; i < num_send_chans; ++i) { chan_info = ptr; memset(chan_info, 0, sizeof(*chan_info)); len = sizeof(*chan_info); chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); reg1 = &chan_info->reg_info_1; reg2 = &chan_info->reg_info_2; chan_info->mhz = tchan_info->mhz; chan_info->band_center_freq1 = tchan_info->cfreq1; chan_info->band_center_freq2 = tchan_info->cfreq2; if (tchan_info->is_chan_passive) chan_info->info |= WMI_CHAN_INFO_PASSIVE; if (tchan_info->allow_he) chan_info->info |= WMI_CHAN_INFO_ALLOW_HE; else if (tchan_info->allow_vht) chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT; else if (tchan_info->allow_ht) chan_info->info |= WMI_CHAN_INFO_ALLOW_HT; if (tchan_info->half_rate) chan_info->info |= WMI_CHAN_INFO_HALF_RATE; if (tchan_info->quarter_rate) chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE; if (tchan_info->psc_channel) chan_info->info |= WMI_CHAN_INFO_PSC; if (tchan_info->dfs_set) chan_info->info |= WMI_CHAN_INFO_DFS; chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, tchan_info->phy_mode); *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR, tchan_info->minpower); *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR, tchan_info->maxpower); *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR, tchan_info->maxregpower); *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS, tchan_info->reg_class_id); *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX, tchan_info->antennamax); *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR, tchan_info->maxregpower); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "chan scan list chan[%d] = %u, chan_info->info %8x\n", i, chan_info->mhz, chan_info->info); ptr += sizeof(*chan_info); tchan_info++; } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd scan chan list channels %d", num_send_chans); num_sends++; } return 0; } int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id, struct wmi_wmm_params_all_arg *param, enum wmi_wmm_params_type wmm_param_type) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_set_wmm_params_cmd *cmd; struct wmi_wmm_params *wmm_param; struct wmi_wmm_params_arg *wmi_wmm_arg; struct sk_buff *skb; int ret, ac; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->wmm_param_type = wmm_param_type; for (ac = 0; ac < WME_NUM_AC; ac++) { switch (ac) { case WME_AC_BE: wmi_wmm_arg = ¶m->ac_be; break; case WME_AC_BK: wmi_wmm_arg = ¶m->ac_bk; break; case WME_AC_VI: wmi_wmm_arg = ¶m->ac_vi; break; case WME_AC_VO: wmi_wmm_arg = ¶m->ac_vo; break; } wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac]; wmm_param->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*wmm_param) - TLV_HDR_SIZE); wmm_param->aifs = wmi_wmm_arg->aifs; wmm_param->cwmin = wmi_wmm_arg->cwmin; wmm_param->cwmax = wmi_wmm_arg->cwmax; wmm_param->txoplimit = wmi_wmm_arg->txop; wmm_param->acm = wmi_wmm_arg->acm; wmm_param->no_ack = wmi_wmm_arg->no_ack; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmm set type %d ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n", wmm_param_type, ac, wmm_param->aifs, wmm_param->cwmin, wmm_param->cwmax, wmm_param->txoplimit, wmm_param->acm, wmm_param->no_ack); } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_WMM_PARAMS_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev set wmm params"); return ret; } int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar, u32 pdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_dfs_phyerr_offload_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = pdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev dfs phyerr offload enable pdev id %d\n", pdev_id); return ret; } int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac, u32 tid, u32 initiator, u32 reason) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_delba_send_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_delba_send_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DELBA_SEND_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, mac); cmd->tid = tid; cmd->initiator = initiator; cmd->reasoncode = reason; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_DELBA_SEND_CMDID cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n", vdev_id, mac, tid, initiator, reason); return ret; } int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac, u32 tid, u32 status) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_addba_setresponse_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_addba_setresponse_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SETRESPONSE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, mac); cmd->tid = tid; cmd->statuscode = status; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n", vdev_id, mac, tid, status); return ret; } int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac, u32 tid, u32 buf_size) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_addba_send_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_addba_send_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SEND_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, mac); cmd->tid = tid; cmd->buffersize = buf_size; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_ADDBA_SEND_CMDID cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n", vdev_id, mac, tid, buf_size); return ret; } int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_addba_clear_resp_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_addba_clear_resp_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_CLEAR_RESP_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, mac); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd addba clear resp vdev_id 0x%X mac_addr %pM\n", vdev_id, mac); return ret; } int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_pktlog_filter_cmd *cmd; struct wmi_pdev_pktlog_filter_info *info; struct sk_buff *skb; struct wmi_tlv *tlv; void *ptr; int ret, len; len = sizeof(*cmd) + sizeof(*info) + TLV_HDR_SIZE; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_pktlog_filter_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id); cmd->num_mac = 1; cmd->enable = enable; ptr = skb->data + sizeof(*cmd); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, sizeof(*info)); ptr += TLV_HDR_SIZE; info = ptr; ether_addr_copy(info->peer_macaddr.addr, addr); info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO) | FIELD_PREP(WMI_TLV_LEN, sizeof(*info) - TLV_HDR_SIZE); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_PKTLOG_FILTER_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog filter"); return ret; } int ath11k_wmi_send_init_country_cmd(struct ath11k *ar, struct wmi_init_country_params init_cc_params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_init_country_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_init_country_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SET_INIT_COUNTRY_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; switch (init_cc_params.flags) { case ALPHA_IS_SET: cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA; memcpy((u8 *)&cmd->cc_info.alpha2, init_cc_params.cc_info.alpha2, 3); break; case CC_IS_SET: cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE; cmd->cc_info.country_code = init_cc_params.cc_info.country_code; break; case REGDMN_IS_SET: cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_REGDOMAIN; cmd->cc_info.regdom_id = init_cc_params.cc_info.regdom_id; break; default: ath11k_warn(ar->ab, "unknown cc params flags: 0x%x", init_cc_params.flags); ret = -EINVAL; goto err; } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_INIT_COUNTRY_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n", ret); goto err; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd set init country"); return 0; err: dev_kfree_skb(skb); return ret; } int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar, struct wmi_set_current_country_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_set_current_country_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_set_current_country_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SET_CURRENT_COUNTRY_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; memcpy(&cmd->new_alpha2, ¶m->alpha2, 3); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd set current country pdev id %d alpha2 %c%c\n", ar->pdev->pdev_id, param->alpha2[0], param->alpha2[1]); return ret; } int ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar, struct thermal_mitigation_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_therm_throt_config_request_cmd *cmd; struct wmi_therm_throt_level_config_info *lvl_conf; struct wmi_tlv *tlv; struct sk_buff *skb; int i, ret, len; len = sizeof(*cmd) + TLV_HDR_SIZE + THERMAL_LEVELS * sizeof(struct wmi_therm_throt_level_config_info); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_therm_throt_config_request_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_CONFIG_REQUEST) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; cmd->enable = param->enable; cmd->dc = param->dc; cmd->dc_per_event = param->dc_per_event; cmd->therm_throt_levels = THERMAL_LEVELS; tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, (THERMAL_LEVELS * sizeof(struct wmi_therm_throt_level_config_info))); lvl_conf = (struct wmi_therm_throt_level_config_info *)(skb->data + sizeof(*cmd) + TLV_HDR_SIZE); for (i = 0; i < THERMAL_LEVELS; i++) { lvl_conf->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO) | FIELD_PREP(WMI_TLV_LEN, sizeof(*lvl_conf) - TLV_HDR_SIZE); lvl_conf->temp_lwm = param->levelconf[i].tmplwm; lvl_conf->temp_hwm = param->levelconf[i].tmphwm; lvl_conf->dc_off_percent = param->levelconf[i].dcoffpercent; lvl_conf->prio = param->levelconf[i].priority; lvl_conf++; } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_THERM_THROT_SET_CONF_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send THERM_THROT_SET_CONF cmd\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd therm throt set conf pdev_id %d enable %d dc %d dc_per_event %x levels %d\n", ar->pdev->pdev_id, param->enable, param->dc, param->dc_per_event, THERMAL_LEVELS); return ret; } int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar, struct wmi_11d_scan_start_params *param) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_11d_scan_start_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_11d_scan_start_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = param->vdev_id; cmd->scan_period_msec = param->scan_period_msec; cmd->start_interval_msec = param->start_interval_msec; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd 11d scan start vdev id %d period %d ms internal %d ms\n", cmd->vdev_id, cmd->scan_period_msec, cmd->start_interval_msec); return ret; } int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_11d_scan_stop_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_11d_scan_stop_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_STOP_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd 11d scan stop vdev id %d\n", cmd->vdev_id); return ret; } int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pktlog_enable_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_pktlog_enable_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_ENABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id); cmd->evlist = pktlog_filter; cmd->enable = ATH11K_WMI_PKTLOG_ENABLE_FORCE; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_PKTLOG_ENABLE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog enable"); return ret; } int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pktlog_disable_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_pktlog_disable_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_DISABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_PKTLOG_DISABLE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog disable"); return ret; } void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params) { twt_params->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS; twt_params->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE; twt_params->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP; twt_params->congestion_thresh_teardown = ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN; twt_params->congestion_thresh_critical = ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL; twt_params->interference_thresh_teardown = ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN; twt_params->interference_thresh_setup = ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP; twt_params->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP; twt_params->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN; twt_params->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS; twt_params->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS; twt_params->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT; twt_params->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL; twt_params->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL; twt_params->remove_sta_slot_interval = ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL; /* TODO add MBSSID support */ twt_params->mbss_support = 0; } int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id, struct wmi_twt_enable_params *params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_twt_enable_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_twt_enable_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ENABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = pdev_id; cmd->sta_cong_timer_ms = params->sta_cong_timer_ms; cmd->default_slot_size = params->default_slot_size; cmd->congestion_thresh_setup = params->congestion_thresh_setup; cmd->congestion_thresh_teardown = params->congestion_thresh_teardown; cmd->congestion_thresh_critical = params->congestion_thresh_critical; cmd->interference_thresh_teardown = params->interference_thresh_teardown; cmd->interference_thresh_setup = params->interference_thresh_setup; cmd->min_no_sta_setup = params->min_no_sta_setup; cmd->min_no_sta_teardown = params->min_no_sta_teardown; cmd->no_of_bcast_mcast_slots = params->no_of_bcast_mcast_slots; cmd->min_no_twt_slots = params->min_no_twt_slots; cmd->max_no_sta_twt = params->max_no_sta_twt; cmd->mode_check_interval = params->mode_check_interval; cmd->add_sta_slot_interval = params->add_sta_slot_interval; cmd->remove_sta_slot_interval = params->remove_sta_slot_interval; cmd->mbss_support = params->mbss_support; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID); if (ret) { ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID"); dev_kfree_skb(skb); return ret; } ar->twt_enabled = 1; ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd twt enable"); return 0; } int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_twt_disable_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_twt_disable_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DISABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = pdev_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DISABLE_CMDID); if (ret) { ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd twt disable"); ar->twt_enabled = 0; return 0; } int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar, struct wmi_twt_add_dialog_params *params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_twt_add_dialog_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_twt_add_dialog_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ADD_DIALOG_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = params->vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr); cmd->dialog_id = params->dialog_id; cmd->wake_intvl_us = params->wake_intvl_us; cmd->wake_intvl_mantis = params->wake_intvl_mantis; cmd->wake_dura_us = params->wake_dura_us; cmd->sp_offset_us = params->sp_offset_us; cmd->flags = params->twt_cmd; if (params->flag_bcast) cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_BCAST; if (params->flag_trigger) cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_TRIGGER; if (params->flag_flow_type) cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE; if (params->flag_protection) cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_PROTECTION; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ADD_DIALOG_CMDID); if (ret) { ath11k_warn(ab, "failed to send wmi command to add twt dialog: %d", ret); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd twt add dialog vdev %u dialog id %u wake interval %u mantissa %u wake duration %u service period offset %u flags 0x%x\n", cmd->vdev_id, cmd->dialog_id, cmd->wake_intvl_us, cmd->wake_intvl_mantis, cmd->wake_dura_us, cmd->sp_offset_us, cmd->flags); return 0; } int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar, struct wmi_twt_del_dialog_params *params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_twt_del_dialog_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_twt_del_dialog_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DEL_DIALOG_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = params->vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr); cmd->dialog_id = params->dialog_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DEL_DIALOG_CMDID); if (ret) { ath11k_warn(ab, "failed to send wmi command to delete twt dialog: %d", ret); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd twt del dialog vdev %u dialog id %u\n", cmd->vdev_id, cmd->dialog_id); return 0; } int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar, struct wmi_twt_pause_dialog_params *params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_twt_pause_dialog_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_twt_pause_dialog_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_PAUSE_DIALOG_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = params->vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr); cmd->dialog_id = params->dialog_id; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_PAUSE_DIALOG_CMDID); if (ret) { ath11k_warn(ab, "failed to send wmi command to pause twt dialog: %d", ret); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd twt pause dialog vdev %u dialog id %u\n", cmd->vdev_id, cmd->dialog_id); return 0; } int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar, struct wmi_twt_resume_dialog_params *params) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_twt_resume_dialog_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_twt_resume_dialog_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_RESUME_DIALOG_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = params->vdev_id; ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr); cmd->dialog_id = params->dialog_id; cmd->sp_offset_us = params->sp_offset_us; cmd->next_twt_size = params->next_twt_size; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_RESUME_DIALOG_CMDID); if (ret) { ath11k_warn(ab, "failed to send wmi command to resume twt dialog: %d", ret); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd twt resume dialog vdev %u dialog id %u service period offset %u next twt subfield size %u\n", cmd->vdev_id, cmd->dialog_id, cmd->sp_offset_us, cmd->next_twt_size); return 0; } int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id, struct ieee80211_he_obss_pd *he_obss_pd) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_obss_spatial_reuse_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->enable = he_obss_pd->enable; cmd->obss_min = he_obss_pd->min_offset; cmd->obss_max = he_obss_pd->max_offset; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID); if (ret) { ath11k_warn(ab, "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd pdev obss pd spatial reuse"); return 0; } int ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_pdev_obss_pd_bitmap_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev set srg bss color bitmap pdev_id %d bss color bitmap %08x %08x\n", cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); return 0; } int ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath11k *ar, u32 *bitmap) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_pdev_obss_pd_bitmap_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev set srg partial bssid bitmap pdev_id %d partial bssid bitmap %08x %08x\n", cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); return 0; } int ath11k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_pdev_obss_pd_bitmap_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev set srg obsscolor enable pdev_id %d bss color enable bitmap %08x %08x\n", cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); return 0; } int ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_pdev_obss_pd_bitmap_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev set srg obss bssid enable bitmap pdev_id %d bssid enable bitmap %08x %08x\n", cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); return 0; } int ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_pdev_obss_pd_bitmap_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev set non srg obss color enable bitmap pdev_id %d bss color enable bitmap %08x %08x\n", cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); return 0; } int ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_pdev_obss_pd_bitmap_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap)); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev set non srg obss bssid enable bitmap pdev_id %d bssid enable bitmap %08x %08x\n", cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]); return 0; } int ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id, u8 bss_color, u32 period, bool enable) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_obss_color_collision_cfg_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->evt_type = enable ? ATH11K_OBSS_COLOR_COLLISION_DETECTION : ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE; cmd->current_bss_color = bss_color; cmd->detection_period_ms = period; cmd->scan_period_ms = ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS; cmd->free_slot_expiry_time_ms = 0; cmd->flags = 0; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID); if (ret) { ath11k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd obss color collision det config id %d type %d bss_color %d detect_period %d scan_period %d\n", cmd->vdev_id, cmd->evt_type, cmd->current_bss_color, cmd->detection_period_ms, cmd->scan_period_ms); return 0; } int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id, bool enable) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct ath11k_base *ab = wmi->wmi_ab->ab; struct wmi_bss_color_change_enable_params_cmd *cmd; struct sk_buff *skb; int ret, len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BSS_COLOR_CHANGE_ENABLE) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->enable = enable ? 1 : 0; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BSS_COLOR_CHANGE_ENABLE_CMDID); if (ret) { ath11k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd bss color change enable id %d enable %d\n", cmd->vdev_id, cmd->enable); return 0; } int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id, struct sk_buff *tmpl) { struct wmi_tlv *tlv; struct sk_buff *skb; void *ptr; int ret, len; size_t aligned_len; struct wmi_fils_discovery_tmpl_cmd *cmd; aligned_len = roundup(tmpl->len, 4); len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "vdev %i set FILS discovery template\n", vdev_id); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_FILS_DISCOVERY_TMPL_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->buf_len = tmpl->len; ptr = skb->data + sizeof(*cmd); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, aligned_len); memcpy(tlv->value, tmpl->data, tmpl->len); ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID); if (ret) { ath11k_warn(ar->ab, "WMI vdev %i failed to send FILS discovery template command\n", vdev_id); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd fils discovery tmpl"); return 0; } int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id, struct sk_buff *tmpl) { struct wmi_probe_tmpl_cmd *cmd; struct wmi_bcn_prb_info *probe_info; struct wmi_tlv *tlv; struct sk_buff *skb; void *ptr; int ret, len; size_t aligned_len = roundup(tmpl->len, 4); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "vdev %i set probe response template\n", vdev_id); len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len; skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_probe_tmpl_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PRB_TMPL_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->buf_len = tmpl->len; ptr = skb->data + sizeof(*cmd); probe_info = ptr; len = sizeof(*probe_info); probe_info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_PRB_INFO) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); probe_info->caps = 0; probe_info->erp = 0; ptr += sizeof(*probe_info); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, aligned_len); memcpy(tlv->value, tmpl->data, tmpl->len); ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID); if (ret) { ath11k_warn(ar->ab, "WMI vdev %i failed to send probe response template command\n", vdev_id); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd "); return 0; } int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval, bool unsol_bcast_probe_resp_enabled) { struct sk_buff *skb; int ret, len; struct wmi_fils_discovery_cmd *cmd; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "vdev %i set %s interval to %u TU\n", vdev_id, unsol_bcast_probe_resp_enabled ? "unsolicited broadcast probe response" : "FILS discovery", interval); len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_fils_discovery_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ENABLE_FILS_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->interval = interval; cmd->config = unsol_bcast_probe_resp_enabled; ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID); if (ret) { ath11k_warn(ar->ab, "WMI vdev %i failed to send FILS discovery enable/disable command\n", vdev_id); dev_kfree_skb(skb); return ret; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd enable fils"); return 0; } static void ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_obss_color_collision_event *ev; struct ath11k_vif *arvif; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event obss color collision"); rcu_read_lock(); ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT]; if (!ev) { ath11k_warn(ab, "failed to fetch obss color collision ev"); goto exit; } arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id); if (!arvif) { ath11k_warn(ab, "failed to find arvif with vedv id %d in obss_color_collision_event\n", ev->vdev_id); goto exit; } switch (ev->evt_type) { case WMI_BSS_COLOR_COLLISION_DETECTION: ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap, 0); ath11k_dbg(ab, ATH11K_DBG_WMI, "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n", ev->vdev_id, ev->evt_type, ev->obss_color_bitmap); break; case WMI_BSS_COLOR_COLLISION_DISABLE: case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY: case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE: break; default: ath11k_warn(ab, "received unknown obss color collision detection event\n"); } exit: kfree(tb); rcu_read_unlock(); } static void ath11k_fill_band_to_mac_param(struct ath11k_base *soc, struct wmi_host_pdev_band_to_mac *band_to_mac) { u8 i; struct ath11k_hal_reg_capabilities_ext *hal_reg_cap; struct ath11k_pdev *pdev; for (i = 0; i < soc->num_radios; i++) { pdev = &soc->pdevs[i]; hal_reg_cap = &soc->hal_reg_cap[i]; band_to_mac[i].pdev_id = pdev->pdev_id; switch (pdev->cap.supported_bands) { case WMI_HOST_WLAN_2G_5G_CAP: band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan; band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan; break; case WMI_HOST_WLAN_2G_CAP: band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan; band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan; break; case WMI_HOST_WLAN_5G_CAP: band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan; band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan; break; default: break; } } } static void ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg, struct target_resource_config *tg_cfg) { wmi_cfg->num_vdevs = tg_cfg->num_vdevs; wmi_cfg->num_peers = tg_cfg->num_peers; wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers; wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs; wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys; wmi_cfg->num_tids = tg_cfg->num_tids; wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit; wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask; wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask; wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0]; wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1]; wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2]; wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3]; wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode; wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req; wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev; wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev; wmi_cfg->roam_offload_max_ap_profiles = tg_cfg->roam_offload_max_ap_profiles; wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups; wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems; wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode; wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size; wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries; wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size; wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim; wmi_cfg->rx_skip_defrag_timeout_dup_detection_check = tg_cfg->rx_skip_defrag_timeout_dup_detection_check; wmi_cfg->vow_config = tg_cfg->vow_config; wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev; wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc; wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries; wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs; wmi_cfg->num_tdls_conn_table_entries = tg_cfg->num_tdls_conn_table_entries; wmi_cfg->beacon_tx_offload_max_vdev = tg_cfg->beacon_tx_offload_max_vdev; wmi_cfg->num_multicast_filter_entries = tg_cfg->num_multicast_filter_entries; wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters; wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern; wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size; wmi_cfg->max_tdls_concurrent_sleep_sta = tg_cfg->max_tdls_concurrent_sleep_sta; wmi_cfg->max_tdls_concurrent_buffer_sta = tg_cfg->max_tdls_concurrent_buffer_sta; wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate; wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs; wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels; wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules; wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size; wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters; wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id; wmi_cfg->flag1 = tg_cfg->flag1; wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support; wmi_cfg->sched_params = tg_cfg->sched_params; wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count; wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count; wmi_cfg->host_service_flags &= ~(1 << WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT); wmi_cfg->host_service_flags |= (tg_cfg->is_reg_cc_ext_event_supported << WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT); wmi_cfg->flags2 = WMI_RSRC_CFG_FLAG2_CALC_NEXT_DTIM_COUNT_SET; wmi_cfg->ema_max_vap_cnt = tg_cfg->ema_max_vap_cnt; wmi_cfg->ema_max_profile_period = tg_cfg->ema_max_profile_period; } static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi, struct wmi_init_cmd_param *param) { struct ath11k_base *ab = wmi->wmi_ab->ab; struct sk_buff *skb; struct wmi_init_cmd *cmd; struct wmi_resource_config *cfg; struct wmi_pdev_set_hw_mode_cmd_param *hw_mode; struct wmi_pdev_band_to_mac *band_to_mac; struct wlan_host_mem_chunk *host_mem_chunks; struct wmi_tlv *tlv; size_t ret, len; void *ptr; u32 hw_mode_len = 0; u16 idx; if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE + (param->num_band_to_mac * sizeof(*band_to_mac)); len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len + (param->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_init_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_INIT_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); ptr = skb->data + sizeof(*cmd); cfg = ptr; ath11k_wmi_copy_resource_config(cfg, param->res_cfg); cfg->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_RESOURCE_CONFIG) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cfg) - TLV_HDR_SIZE); ptr += sizeof(*cfg); host_mem_chunks = ptr + TLV_HDR_SIZE; len = sizeof(struct wlan_host_mem_chunk); for (idx = 0; idx < param->num_mem_chunks; ++idx) { host_mem_chunks[idx].tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WLAN_HOST_MEMORY_CHUNK) | FIELD_PREP(WMI_TLV_LEN, len); host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr; host_mem_chunks[idx].size = param->mem_chunks[idx].len; host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id; ath11k_dbg(ab, ATH11K_DBG_WMI, "host mem chunk req_id %d paddr 0x%llx len %d\n", param->mem_chunks[idx].req_id, (u64)param->mem_chunks[idx].paddr, param->mem_chunks[idx].len); } cmd->num_host_mem_chunks = param->num_mem_chunks; len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks; /* num_mem_chunks is zero */ tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE + len; if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) { hw_mode = ptr; hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*hw_mode) - TLV_HDR_SIZE); hw_mode->hw_mode_index = param->hw_mode_id; hw_mode->num_band_to_mac = param->num_band_to_mac; ptr += sizeof(*hw_mode); len = param->num_band_to_mac * sizeof(*band_to_mac); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; len = sizeof(*band_to_mac); for (idx = 0; idx < param->num_band_to_mac; idx++) { band_to_mac = ptr; band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_BAND_TO_MAC) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id; band_to_mac->start_freq = param->band_to_mac[idx].start_freq; band_to_mac->end_freq = param->band_to_mac[idx].end_freq; ptr += sizeof(*band_to_mac); } } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_INIT_CMDID\n"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd wmi init"); return 0; } int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar, int pdev_id) { struct ath11k_wmi_pdev_lro_config_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct ath11k_wmi_pdev_lro_config_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_LRO_INFO_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); get_random_bytes(cmd->th_4, sizeof(uint32_t) * ATH11K_IPV4_TH_SEED_SIZE); get_random_bytes(cmd->th_6, sizeof(uint32_t) * ATH11K_IPV6_TH_SEED_SIZE); cmd->pdev_id = pdev_id; ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send lro cfg req wmi cmd\n"); goto err; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd lro config pdev_id 0x%x\n", pdev_id); return 0; err: dev_kfree_skb(skb); return ret; } int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab) { unsigned long time_left; time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready, WMI_SERVICE_READY_TIMEOUT_HZ); if (!time_left) return -ETIMEDOUT; return 0; } int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab) { unsigned long time_left; time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready, WMI_SERVICE_READY_TIMEOUT_HZ); if (!time_left) return -ETIMEDOUT; return 0; } int ath11k_wmi_set_hw_mode(struct ath11k_base *ab, enum wmi_host_hw_mode_config_type mode) { struct wmi_pdev_set_hw_mode_cmd_param *cmd; struct sk_buff *skb; struct ath11k_wmi_base *wmi_ab = &ab->wmi_ab; int len; int ret; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = WMI_PDEV_ID_SOC; cmd->hw_mode_index = mode; ret = ath11k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID); if (ret) { ath11k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n"); dev_kfree_skb(skb); return ret; } ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd pdev set hw mode %d", cmd->hw_mode_index); return 0; } int ath11k_wmi_cmd_init(struct ath11k_base *ab) { struct ath11k_wmi_base *wmi_ab = &ab->wmi_ab; struct wmi_init_cmd_param init_param; struct target_resource_config config; memset(&init_param, 0, sizeof(init_param)); memset(&config, 0, sizeof(config)); ab->hw_params.hw_ops->wmi_init_config(ab, &config); if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, ab->wmi_ab.svc_map)) config.is_reg_cc_ext_event_supported = 1; memcpy(&wmi_ab->wlan_resource_config, &config, sizeof(config)); init_param.res_cfg = &wmi_ab->wlan_resource_config; init_param.num_mem_chunks = wmi_ab->num_mem_chunks; init_param.hw_mode_id = wmi_ab->preferred_hw_mode; init_param.mem_chunks = wmi_ab->mem_chunks; if (ab->hw_params.single_pdev_only) init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX; init_param.num_band_to_mac = ab->num_radios; ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac); return ath11k_init_cmd_send(&wmi_ab->wmi[0], &init_param); } int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar, struct ath11k_wmi_vdev_spectral_conf_param *param) { struct ath11k_wmi_vdev_spectral_conf_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct ath11k_wmi_vdev_spectral_conf_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); memcpy(&cmd->param, param, sizeof(*param)); ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send spectral scan config wmi cmd\n"); goto err; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev spectral scan configure vdev_id 0x%x\n", param->vdev_id); return 0; err: dev_kfree_skb(skb); return ret; } int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id, u32 trigger, u32 enable) { struct ath11k_wmi_vdev_spectral_enable_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct ath11k_wmi_vdev_spectral_enable_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->trigger_cmd = trigger; cmd->enable_cmd = enable; ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send spectral enable wmi cmd\n"); goto err; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev spectral scan enable vdev id 0x%x\n", vdev_id); return 0; err: dev_kfree_skb(skb); return ret; } int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar, struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param) { struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *cmd; struct sk_buff *skb; int ret; skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DMA_RING_CFG_REQ) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = param->pdev_id; cmd->module_id = param->module_id; cmd->base_paddr_lo = param->base_paddr_lo; cmd->base_paddr_hi = param->base_paddr_hi; cmd->head_idx_paddr_lo = param->head_idx_paddr_lo; cmd->head_idx_paddr_hi = param->head_idx_paddr_hi; cmd->tail_idx_paddr_lo = param->tail_idx_paddr_lo; cmd->tail_idx_paddr_hi = param->tail_idx_paddr_hi; cmd->num_elems = param->num_elems; cmd->buf_size = param->buf_size; cmd->num_resp_per_event = param->num_resp_per_event; cmd->event_timeout_ms = param->event_timeout_ms; ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_PDEV_DMA_RING_CFG_REQ_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send dma ring cfg req wmi cmd\n"); goto err; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev dma ring cfg req pdev_id 0x%x\n", param->pdev_id); return 0; err: dev_kfree_skb(skb); return ret; } static int ath11k_wmi_tlv_dma_buf_entry_parse(struct ath11k_base *soc, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_dma_buf_release_parse *parse = data; if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY) return -EPROTO; if (parse->num_buf_entry >= parse->fixed.num_buf_release_entry) return -ENOBUFS; parse->num_buf_entry++; return 0; } static int ath11k_wmi_tlv_dma_buf_meta_parse(struct ath11k_base *soc, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_dma_buf_release_parse *parse = data; if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA) return -EPROTO; if (parse->num_meta >= parse->fixed.num_meta_data_entry) return -ENOBUFS; parse->num_meta++; return 0; } static int ath11k_wmi_tlv_dma_buf_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_dma_buf_release_parse *parse = data; int ret; switch (tag) { case WMI_TAG_DMA_BUF_RELEASE: memcpy(&parse->fixed, ptr, sizeof(struct ath11k_wmi_dma_buf_release_fixed_param)); parse->fixed.pdev_id = DP_HW2SW_MACID(parse->fixed.pdev_id); break; case WMI_TAG_ARRAY_STRUCT: if (!parse->buf_entry_done) { parse->num_buf_entry = 0; parse->buf_entry = (struct wmi_dma_buf_release_entry *)ptr; ret = ath11k_wmi_tlv_iter(ab, ptr, len, ath11k_wmi_tlv_dma_buf_entry_parse, parse); if (ret) { ath11k_warn(ab, "failed to parse dma buf entry tlv %d\n", ret); return ret; } parse->buf_entry_done = true; } else if (!parse->meta_data_done) { parse->num_meta = 0; parse->meta_data = (struct wmi_dma_buf_release_meta_data *)ptr; ret = ath11k_wmi_tlv_iter(ab, ptr, len, ath11k_wmi_tlv_dma_buf_meta_parse, parse); if (ret) { ath11k_warn(ab, "failed to parse dma buf meta tlv %d\n", ret); return ret; } parse->meta_data_done = true; } break; default: break; } return 0; } static void ath11k_wmi_pdev_dma_ring_buf_release_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_tlv_dma_buf_release_parse parse = { }; struct ath11k_dbring_buf_release_event param; int ret; ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_dma_buf_parse, &parse); if (ret) { ath11k_warn(ab, "failed to parse dma buf release tlv %d\n", ret); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev dma ring buf release"); param.fixed = parse.fixed; param.buf_entry = parse.buf_entry; param.num_buf_entry = parse.num_buf_entry; param.meta_data = parse.meta_data; param.num_meta = parse.num_meta; ret = ath11k_dbring_buffer_release_event(ab, ¶m); if (ret) { ath11k_warn(ab, "failed to handle dma buf release event %d\n", ret); return; } } static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base *soc, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; struct wmi_hw_mode_capabilities *hw_mode_cap; u32 phy_map = 0; if (tag != WMI_TAG_HW_MODE_CAPABILITIES) return -EPROTO; if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes) return -ENOBUFS; hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities, hw_mode_id); svc_rdy_ext->n_hw_mode_caps++; phy_map = hw_mode_cap->phy_id_map; while (phy_map) { svc_rdy_ext->tot_phy_id++; phy_map = phy_map >> 1; } return 0; } static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base *soc, u16 len, const void *ptr, void *data) { struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; struct wmi_hw_mode_capabilities *hw_mode_caps; enum wmi_host_hw_mode_config_type mode, pref; u32 i; int ret; svc_rdy_ext->n_hw_mode_caps = 0; svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr; ret = ath11k_wmi_tlv_iter(soc, ptr, len, ath11k_wmi_tlv_hw_mode_caps_parse, svc_rdy_ext); if (ret) { ath11k_warn(soc, "failed to parse tlv %d\n", ret); return ret; } i = 0; while (i < svc_rdy_ext->n_hw_mode_caps) { hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i]; mode = hw_mode_caps->hw_mode_id; pref = soc->wmi_ab.preferred_hw_mode; if (ath11k_hw_mode_pri_map[mode] < ath11k_hw_mode_pri_map[pref]) { svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps; soc->wmi_ab.preferred_hw_mode = mode; } i++; } ath11k_dbg(soc, ATH11K_DBG_WMI, "preferred_hw_mode:%d\n", soc->wmi_ab.preferred_hw_mode); if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX) return -EINVAL; return 0; } static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base *soc, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; if (tag != WMI_TAG_MAC_PHY_CAPABILITIES) return -EPROTO; if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id) return -ENOBUFS; len = min_t(u16, len, sizeof(struct wmi_mac_phy_capabilities)); if (!svc_rdy_ext->n_mac_phy_caps) { svc_rdy_ext->mac_phy_caps = kcalloc(svc_rdy_ext->tot_phy_id, len, GFP_ATOMIC); if (!svc_rdy_ext->mac_phy_caps) return -ENOMEM; } memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len); svc_rdy_ext->n_mac_phy_caps++; return 0; } static int ath11k_wmi_tlv_ext_hal_reg_caps_parse(struct ath11k_base *soc, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT) return -EPROTO; if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy) return -ENOBUFS; svc_rdy_ext->n_ext_hal_reg_caps++; return 0; } static int ath11k_wmi_tlv_ext_hal_reg_caps(struct ath11k_base *soc, u16 len, const void *ptr, void *data) { struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0]; struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; struct ath11k_hal_reg_capabilities_ext reg_cap; int ret; u32 i; svc_rdy_ext->n_ext_hal_reg_caps = 0; svc_rdy_ext->ext_hal_reg_caps = (struct wmi_hal_reg_capabilities_ext *)ptr; ret = ath11k_wmi_tlv_iter(soc, ptr, len, ath11k_wmi_tlv_ext_hal_reg_caps_parse, svc_rdy_ext); if (ret) { ath11k_warn(soc, "failed to parse tlv %d\n", ret); return ret; } for (i = 0; i < svc_rdy_ext->param.num_phy; i++) { ret = ath11k_pull_reg_cap_svc_rdy_ext(wmi_handle, svc_rdy_ext->soc_hal_reg_caps, svc_rdy_ext->ext_hal_reg_caps, i, ®_cap); if (ret) { ath11k_warn(soc, "failed to extract reg cap %d\n", i); return ret; } memcpy(&soc->hal_reg_cap[reg_cap.phy_id], ®_cap, sizeof(reg_cap)); } return 0; } static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc, u16 len, const void *ptr, void *data) { struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0]; struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; u8 hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id; u32 phy_id_map; int pdev_index = 0; int ret; svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr; svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy; soc->num_radios = 0; soc->target_pdev_count = 0; phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map; while (phy_id_map && soc->num_radios < MAX_RADIOS) { ret = ath11k_pull_mac_phy_cap_svc_ready_ext(wmi_handle, svc_rdy_ext->hw_caps, svc_rdy_ext->hw_mode_caps, svc_rdy_ext->soc_hal_reg_caps, svc_rdy_ext->mac_phy_caps, hw_mode_id, soc->num_radios, &soc->pdevs[pdev_index]); if (ret) { ath11k_warn(soc, "failed to extract mac caps, idx :%d\n", soc->num_radios); return ret; } soc->num_radios++; /* For QCA6390, save mac_phy capability in the same pdev */ if (soc->hw_params.single_pdev_only) pdev_index = 0; else pdev_index = soc->num_radios; /* TODO: mac_phy_cap prints */ phy_id_map >>= 1; } /* For QCA6390, set num_radios to 1 because host manages * both 2G and 5G radio in one pdev. * Set pdev_id = 0 and 0 means soc level. */ if (soc->hw_params.single_pdev_only) { soc->num_radios = 1; soc->pdevs[0].pdev_id = 0; } if (!soc->reg_info_store) { soc->reg_info_store = kcalloc(soc->num_radios, sizeof(*soc->reg_info_store), GFP_ATOMIC); if (!soc->reg_info_store) return -ENOMEM; } return 0; } static int ath11k_wmi_tlv_dma_ring_caps_parse(struct ath11k_base *soc, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_dma_ring_caps_parse *parse = data; if (tag != WMI_TAG_DMA_RING_CAPABILITIES) return -EPROTO; parse->n_dma_ring_caps++; return 0; } static int ath11k_wmi_alloc_dbring_caps(struct ath11k_base *ab, u32 num_cap) { size_t sz; void *ptr; sz = num_cap * sizeof(struct ath11k_dbring_cap); ptr = kzalloc(sz, GFP_ATOMIC); if (!ptr) return -ENOMEM; ab->db_caps = ptr; ab->num_db_cap = num_cap; return 0; } static void ath11k_wmi_free_dbring_caps(struct ath11k_base *ab) { kfree(ab->db_caps); ab->db_caps = NULL; ab->num_db_cap = 0; } static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base *ab, u16 len, const void *ptr, void *data) { struct wmi_tlv_dma_ring_caps_parse *dma_caps_parse = data; struct wmi_dma_ring_capabilities *dma_caps; struct ath11k_dbring_cap *dir_buff_caps; int ret; u32 i; dma_caps_parse->n_dma_ring_caps = 0; dma_caps = (struct wmi_dma_ring_capabilities *)ptr; ret = ath11k_wmi_tlv_iter(ab, ptr, len, ath11k_wmi_tlv_dma_ring_caps_parse, dma_caps_parse); if (ret) { ath11k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret); return ret; } if (!dma_caps_parse->n_dma_ring_caps) return 0; if (ab->num_db_cap) { ath11k_warn(ab, "Already processed, so ignoring dma ring caps\n"); return 0; } ret = ath11k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps); if (ret) return ret; dir_buff_caps = ab->db_caps; for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) { if (dma_caps[i].module_id >= WMI_DIRECT_BUF_MAX) { ath11k_warn(ab, "Invalid module id %d\n", dma_caps[i].module_id); ret = -EINVAL; goto free_dir_buff; } dir_buff_caps[i].id = dma_caps[i].module_id; dir_buff_caps[i].pdev_id = DP_HW2SW_MACID(dma_caps[i].pdev_id); dir_buff_caps[i].min_elem = dma_caps[i].min_elem; dir_buff_caps[i].min_buf_sz = dma_caps[i].min_buf_sz; dir_buff_caps[i].min_buf_align = dma_caps[i].min_buf_align; } return 0; free_dir_buff: ath11k_wmi_free_dbring_caps(ab); return ret; } static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0]; struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; int ret; switch (tag) { case WMI_TAG_SERVICE_READY_EXT_EVENT: ret = ath11k_pull_svc_ready_ext(wmi_handle, ptr, &svc_rdy_ext->param); if (ret) { ath11k_warn(ab, "unable to extract ext params\n"); return ret; } break; case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS: svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr; svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes; break; case WMI_TAG_SOC_HAL_REG_CAPABILITIES: ret = ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(ab, len, ptr, svc_rdy_ext); if (ret) return ret; break; case WMI_TAG_ARRAY_STRUCT: if (!svc_rdy_ext->hw_mode_done) { ret = ath11k_wmi_tlv_hw_mode_caps(ab, len, ptr, svc_rdy_ext); if (ret) return ret; svc_rdy_ext->hw_mode_done = true; } else if (!svc_rdy_ext->mac_phy_done) { svc_rdy_ext->n_mac_phy_caps = 0; ret = ath11k_wmi_tlv_iter(ab, ptr, len, ath11k_wmi_tlv_mac_phy_caps_parse, svc_rdy_ext); if (ret) { ath11k_warn(ab, "failed to parse tlv %d\n", ret); return ret; } svc_rdy_ext->mac_phy_done = true; } else if (!svc_rdy_ext->ext_hal_reg_done) { ret = ath11k_wmi_tlv_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext); if (ret) return ret; svc_rdy_ext->ext_hal_reg_done = true; } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) { svc_rdy_ext->mac_phy_chainmask_combo_done = true; } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) { svc_rdy_ext->mac_phy_chainmask_cap_done = true; } else if (!svc_rdy_ext->oem_dma_ring_cap_done) { svc_rdy_ext->oem_dma_ring_cap_done = true; } else if (!svc_rdy_ext->dma_ring_cap_done) { ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr, &svc_rdy_ext->dma_caps_parse); if (ret) return ret; svc_rdy_ext->dma_ring_cap_done = true; } break; default: break; } return 0; } static int ath11k_service_ready_ext_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { }; int ret; ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_svc_rdy_ext_parse, &svc_rdy_ext); if (ret) { ath11k_warn(ab, "failed to parse tlv %d\n", ret); goto err; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready ext"); if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map)) complete(&ab->wmi_ab.service_ready); kfree(svc_rdy_ext.mac_phy_caps); return 0; err: ath11k_wmi_free_dbring_caps(ab); return ret; } static int ath11k_wmi_tlv_svc_rdy_ext2_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_svc_rdy_ext2_parse *parse = data; int ret; switch (tag) { case WMI_TAG_ARRAY_STRUCT: if (!parse->dma_ring_cap_done) { ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr, &parse->dma_caps_parse); if (ret) return ret; parse->dma_ring_cap_done = true; } break; default: break; } return 0; } static int ath11k_service_ready_ext2_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2 = { }; int ret; ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_svc_rdy_ext2_parse, &svc_rdy_ext2); if (ret) { ath11k_warn(ab, "failed to parse ext2 event tlv %d\n", ret); goto err; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready ext2"); complete(&ab->wmi_ab.service_ready); return 0; err: ath11k_wmi_free_dbring_caps(ab); return ret; } static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_vdev_start_resp_event *vdev_rsp) { const void **tb; const struct wmi_vdev_start_resp_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch vdev start resp ev"); kfree(tb); return -EPROTO; } memset(vdev_rsp, 0, sizeof(*vdev_rsp)); vdev_rsp->vdev_id = ev->vdev_id; vdev_rsp->requestor_id = ev->requestor_id; vdev_rsp->resp_type = ev->resp_type; vdev_rsp->status = ev->status; vdev_rsp->chain_mask = ev->chain_mask; vdev_rsp->smps_mode = ev->smps_mode; vdev_rsp->mac_id = ev->mac_id; vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams; vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams; vdev_rsp->max_allowed_tx_power = ev->max_allowed_tx_power; kfree(tb); return 0; } static void ath11k_print_reg_rule(struct ath11k_base *ab, const char *band, u32 num_reg_rules, struct cur_reg_rule *reg_rule_ptr) { struct cur_reg_rule *reg_rule = reg_rule_ptr; u32 count; ath11k_dbg(ab, ATH11K_DBG_WMI, "number of reg rules in %s band: %d\n", band, num_reg_rules); for (count = 0; count < num_reg_rules; count++) { ath11k_dbg(ab, ATH11K_DBG_WMI, "reg rule %d: (%d - %d @ %d) (%d, %d) (FLAGS %d)\n", count + 1, reg_rule->start_freq, reg_rule->end_freq, reg_rule->max_bw, reg_rule->ant_gain, reg_rule->reg_power, reg_rule->flags); reg_rule++; } } static struct cur_reg_rule *create_reg_rules_from_wmi(u32 num_reg_rules, struct wmi_regulatory_rule_struct *wmi_reg_rule) { struct cur_reg_rule *reg_rule_ptr; u32 count; reg_rule_ptr = kcalloc(num_reg_rules, sizeof(*reg_rule_ptr), GFP_ATOMIC); if (!reg_rule_ptr) return NULL; for (count = 0; count < num_reg_rules; count++) { reg_rule_ptr[count].start_freq = FIELD_GET(REG_RULE_START_FREQ, wmi_reg_rule[count].freq_info); reg_rule_ptr[count].end_freq = FIELD_GET(REG_RULE_END_FREQ, wmi_reg_rule[count].freq_info); reg_rule_ptr[count].max_bw = FIELD_GET(REG_RULE_MAX_BW, wmi_reg_rule[count].bw_pwr_info); reg_rule_ptr[count].reg_power = FIELD_GET(REG_RULE_REG_PWR, wmi_reg_rule[count].bw_pwr_info); reg_rule_ptr[count].ant_gain = FIELD_GET(REG_RULE_ANT_GAIN, wmi_reg_rule[count].bw_pwr_info); reg_rule_ptr[count].flags = FIELD_GET(REG_RULE_FLAGS, wmi_reg_rule[count].flag_info); } return reg_rule_ptr; } static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab, struct sk_buff *skb, struct cur_regulatory_info *reg_info) { const void **tb; const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr; struct wmi_regulatory_rule_struct *wmi_reg_rule; u32 num_2ghz_reg_rules, num_5ghz_reg_rules; int ret; ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n"); tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } chan_list_event_hdr = tb[WMI_TAG_REG_CHAN_LIST_CC_EVENT]; if (!chan_list_event_hdr) { ath11k_warn(ab, "failed to fetch reg chan list update ev\n"); kfree(tb); return -EPROTO; } reg_info->num_2ghz_reg_rules = chan_list_event_hdr->num_2ghz_reg_rules; reg_info->num_5ghz_reg_rules = chan_list_event_hdr->num_5ghz_reg_rules; if (!(reg_info->num_2ghz_reg_rules + reg_info->num_5ghz_reg_rules)) { ath11k_warn(ab, "No regulatory rules available in the event info\n"); kfree(tb); return -EINVAL; } memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2, REG_ALPHA2_LEN); reg_info->dfs_region = chan_list_event_hdr->dfs_region; reg_info->phybitmap = chan_list_event_hdr->phybitmap; reg_info->num_phy = chan_list_event_hdr->num_phy; reg_info->phy_id = chan_list_event_hdr->phy_id; reg_info->ctry_code = chan_list_event_hdr->country_id; reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code; ath11k_dbg(ab, ATH11K_DBG_WMI, "status_code %s", ath11k_cc_status_to_str(reg_info->status_code)); reg_info->status_code = ath11k_wmi_cc_setting_code_to_reg(chan_list_event_hdr->status_code); reg_info->is_ext_reg_event = false; reg_info->min_bw_2ghz = chan_list_event_hdr->min_bw_2ghz; reg_info->max_bw_2ghz = chan_list_event_hdr->max_bw_2ghz; reg_info->min_bw_5ghz = chan_list_event_hdr->min_bw_5ghz; reg_info->max_bw_5ghz = chan_list_event_hdr->max_bw_5ghz; num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules; num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules; ath11k_dbg(ab, ATH11K_DBG_WMI, "cc %s dsf %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d", reg_info->alpha2, reg_info->dfs_region, reg_info->min_bw_2ghz, reg_info->max_bw_2ghz, reg_info->min_bw_5ghz, reg_info->max_bw_5ghz); ath11k_dbg(ab, ATH11K_DBG_WMI, "num_2ghz_reg_rules %d num_5ghz_reg_rules %d", num_2ghz_reg_rules, num_5ghz_reg_rules); wmi_reg_rule = (struct wmi_regulatory_rule_struct *)((u8 *)chan_list_event_hdr + sizeof(*chan_list_event_hdr) + sizeof(struct wmi_tlv)); if (num_2ghz_reg_rules) { reg_info->reg_rules_2ghz_ptr = create_reg_rules_from_wmi(num_2ghz_reg_rules, wmi_reg_rule); if (!reg_info->reg_rules_2ghz_ptr) { kfree(tb); ath11k_warn(ab, "Unable to Allocate memory for 2 GHz rules\n"); return -ENOMEM; } ath11k_print_reg_rule(ab, "2 GHz", num_2ghz_reg_rules, reg_info->reg_rules_2ghz_ptr); } if (num_5ghz_reg_rules) { wmi_reg_rule += num_2ghz_reg_rules; reg_info->reg_rules_5ghz_ptr = create_reg_rules_from_wmi(num_5ghz_reg_rules, wmi_reg_rule); if (!reg_info->reg_rules_5ghz_ptr) { kfree(tb); ath11k_warn(ab, "Unable to Allocate memory for 5 GHz rules\n"); return -ENOMEM; } ath11k_print_reg_rule(ab, "5 GHz", num_5ghz_reg_rules, reg_info->reg_rules_5ghz_ptr); } ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory channel list\n"); kfree(tb); return 0; } static struct cur_reg_rule *create_ext_reg_rules_from_wmi(u32 num_reg_rules, struct wmi_regulatory_ext_rule *wmi_reg_rule) { struct cur_reg_rule *reg_rule_ptr; u32 count; reg_rule_ptr = kcalloc(num_reg_rules, sizeof(*reg_rule_ptr), GFP_ATOMIC); if (!reg_rule_ptr) return NULL; for (count = 0; count < num_reg_rules; count++) { reg_rule_ptr[count].start_freq = u32_get_bits(wmi_reg_rule[count].freq_info, REG_RULE_START_FREQ); reg_rule_ptr[count].end_freq = u32_get_bits(wmi_reg_rule[count].freq_info, REG_RULE_END_FREQ); reg_rule_ptr[count].max_bw = u32_get_bits(wmi_reg_rule[count].bw_pwr_info, REG_RULE_MAX_BW); reg_rule_ptr[count].reg_power = u32_get_bits(wmi_reg_rule[count].bw_pwr_info, REG_RULE_REG_PWR); reg_rule_ptr[count].ant_gain = u32_get_bits(wmi_reg_rule[count].bw_pwr_info, REG_RULE_ANT_GAIN); reg_rule_ptr[count].flags = u32_get_bits(wmi_reg_rule[count].flag_info, REG_RULE_FLAGS); reg_rule_ptr[count].psd_flag = u32_get_bits(wmi_reg_rule[count].psd_power_info, REG_RULE_PSD_INFO); reg_rule_ptr[count].psd_eirp = u32_get_bits(wmi_reg_rule[count].psd_power_info, REG_RULE_PSD_EIRP); } return reg_rule_ptr; } static u8 ath11k_invalid_5ghz_reg_ext_rules_from_wmi(u32 num_reg_rules, const struct wmi_regulatory_ext_rule *rule) { u8 num_invalid_5ghz_rules = 0; u32 count, start_freq; for (count = 0; count < num_reg_rules; count++) { start_freq = u32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ); if (start_freq >= ATH11K_MIN_6G_FREQ) num_invalid_5ghz_rules++; } return num_invalid_5ghz_rules; } static int ath11k_pull_reg_chan_list_ext_update_ev(struct ath11k_base *ab, struct sk_buff *skb, struct cur_regulatory_info *reg_info) { const void **tb; const struct wmi_reg_chan_list_cc_ext_event *ev; struct wmi_regulatory_ext_rule *ext_wmi_reg_rule; u32 num_2ghz_reg_rules, num_5ghz_reg_rules; u32 num_6ghz_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE]; u32 num_6ghz_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; u32 total_reg_rules = 0; int ret, i, j, num_invalid_5ghz_ext_rules = 0; ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory ext channel list\n"); tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch reg chan list ext update ev\n"); kfree(tb); return -EPROTO; } reg_info->num_2ghz_reg_rules = ev->num_2ghz_reg_rules; reg_info->num_5ghz_reg_rules = ev->num_5ghz_reg_rules; reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP] = ev->num_6ghz_reg_rules_ap_lpi; reg_info->num_6ghz_rules_ap[WMI_REG_STANDARD_POWER_AP] = ev->num_6ghz_reg_rules_ap_sp; reg_info->num_6ghz_rules_ap[WMI_REG_VERY_LOW_POWER_AP] = ev->num_6ghz_reg_rules_ap_vlp; for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { reg_info->num_6ghz_rules_client[WMI_REG_INDOOR_AP][i] = ev->num_6ghz_reg_rules_client_lpi[i]; reg_info->num_6ghz_rules_client[WMI_REG_STANDARD_POWER_AP][i] = ev->num_6ghz_reg_rules_client_sp[i]; reg_info->num_6ghz_rules_client[WMI_REG_VERY_LOW_POWER_AP][i] = ev->num_6ghz_reg_rules_client_vlp[i]; } num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules; num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules; total_reg_rules += num_2ghz_reg_rules; total_reg_rules += num_5ghz_reg_rules; if ((num_2ghz_reg_rules > MAX_REG_RULES) || (num_5ghz_reg_rules > MAX_REG_RULES)) { ath11k_warn(ab, "Num reg rules for 2.4 GHz/5 GHz exceeds max limit (num_2ghz_reg_rules: %d num_5ghz_reg_rules: %d max_rules: %d)\n", num_2ghz_reg_rules, num_5ghz_reg_rules, MAX_REG_RULES); kfree(tb); return -EINVAL; } for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { num_6ghz_reg_rules_ap[i] = reg_info->num_6ghz_rules_ap[i]; if (num_6ghz_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) { ath11k_warn(ab, "Num 6 GHz reg rules for AP mode(%d) exceeds max limit (num_6ghz_reg_rules_ap: %d, max_rules: %d)\n", i, num_6ghz_reg_rules_ap[i], MAX_6GHZ_REG_RULES); kfree(tb); return -EINVAL; } total_reg_rules += num_6ghz_reg_rules_ap[i]; } for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { num_6ghz_client[WMI_REG_INDOOR_AP][i] = reg_info->num_6ghz_rules_client[WMI_REG_INDOOR_AP][i]; total_reg_rules += num_6ghz_client[WMI_REG_INDOOR_AP][i]; num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] = reg_info->num_6ghz_rules_client[WMI_REG_STANDARD_POWER_AP][i]; total_reg_rules += num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i]; num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] = reg_info->num_6ghz_rules_client[WMI_REG_VERY_LOW_POWER_AP][i]; total_reg_rules += num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i]; if ((num_6ghz_client[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES) || (num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] > MAX_6GHZ_REG_RULES) || (num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] > MAX_6GHZ_REG_RULES)) { ath11k_warn(ab, "Num 6 GHz client reg rules exceeds max limit, for client(type: %d)\n", i); kfree(tb); return -EINVAL; } } if (!total_reg_rules) { ath11k_warn(ab, "No reg rules available\n"); kfree(tb); return -EINVAL; } memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN); reg_info->dfs_region = ev->dfs_region; reg_info->phybitmap = ev->phybitmap; reg_info->num_phy = ev->num_phy; reg_info->phy_id = ev->phy_id; reg_info->ctry_code = ev->country_id; reg_info->reg_dmn_pair = ev->domain_code; ath11k_dbg(ab, ATH11K_DBG_WMI, "status_code %s", ath11k_cc_status_to_str(reg_info->status_code)); reg_info->status_code = ath11k_wmi_cc_setting_code_to_reg(ev->status_code); reg_info->is_ext_reg_event = true; reg_info->min_bw_2ghz = ev->min_bw_2ghz; reg_info->max_bw_2ghz = ev->max_bw_2ghz; reg_info->min_bw_5ghz = ev->min_bw_5ghz; reg_info->max_bw_5ghz = ev->max_bw_5ghz; reg_info->min_bw_6ghz_ap[WMI_REG_INDOOR_AP] = ev->min_bw_6ghz_ap_lpi; reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP] = ev->max_bw_6ghz_ap_lpi; reg_info->min_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP] = ev->min_bw_6ghz_ap_sp; reg_info->max_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP] = ev->max_bw_6ghz_ap_sp; reg_info->min_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] = ev->min_bw_6ghz_ap_vlp; reg_info->max_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] = ev->max_bw_6ghz_ap_vlp; ath11k_dbg(ab, ATH11K_DBG_WMI, "6 GHz AP BW: LPI (%d - %d), SP (%d - %d), VLP (%d - %d)\n", reg_info->min_bw_6ghz_ap[WMI_REG_INDOOR_AP], reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP], reg_info->min_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP], reg_info->max_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP], reg_info->min_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP], reg_info->max_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP]); for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { reg_info->min_bw_6ghz_client[WMI_REG_INDOOR_AP][i] = ev->min_bw_6ghz_client_lpi[i]; reg_info->max_bw_6ghz_client[WMI_REG_INDOOR_AP][i] = ev->max_bw_6ghz_client_lpi[i]; reg_info->min_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] = ev->min_bw_6ghz_client_sp[i]; reg_info->max_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] = ev->max_bw_6ghz_client_sp[i]; reg_info->min_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] = ev->min_bw_6ghz_client_vlp[i]; reg_info->max_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] = ev->max_bw_6ghz_client_vlp[i]; ath11k_dbg(ab, ATH11K_DBG_WMI, "6 GHz %s BW: LPI (%d - %d), SP (%d - %d), VLP (%d - %d)\n", ath11k_6ghz_client_type_to_str(i), reg_info->min_bw_6ghz_client[WMI_REG_INDOOR_AP][i], reg_info->max_bw_6ghz_client[WMI_REG_INDOOR_AP][i], reg_info->min_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i], reg_info->max_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i], reg_info->min_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i], reg_info->max_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i]); } ath11k_dbg(ab, ATH11K_DBG_WMI, "cc_ext %s dfs %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d phy_bitmap 0x%x", reg_info->alpha2, reg_info->dfs_region, reg_info->min_bw_2ghz, reg_info->max_bw_2ghz, reg_info->min_bw_5ghz, reg_info->max_bw_5ghz, reg_info->phybitmap); ath11k_dbg(ab, ATH11K_DBG_WMI, "num_2ghz_reg_rules %d num_5ghz_reg_rules %d", num_2ghz_reg_rules, num_5ghz_reg_rules); ath11k_dbg(ab, ATH11K_DBG_WMI, "num_6ghz_reg_rules_ap_lpi: %d num_6ghz_reg_rules_ap_sp: %d num_6ghz_reg_rules_ap_vlp: %d", num_6ghz_reg_rules_ap[WMI_REG_INDOOR_AP], num_6ghz_reg_rules_ap[WMI_REG_STANDARD_POWER_AP], num_6ghz_reg_rules_ap[WMI_REG_VERY_LOW_POWER_AP]); j = WMI_REG_DEFAULT_CLIENT; ath11k_dbg(ab, ATH11K_DBG_WMI, "6 GHz Regular client: num_6ghz_reg_rules_lpi: %d num_6ghz_reg_rules_sp: %d num_6ghz_reg_rules_vlp: %d", num_6ghz_client[WMI_REG_INDOOR_AP][j], num_6ghz_client[WMI_REG_STANDARD_POWER_AP][j], num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][j]); j = WMI_REG_SUBORDINATE_CLIENT; ath11k_dbg(ab, ATH11K_DBG_WMI, "6 GHz Subordinate client: num_6ghz_reg_rules_lpi: %d num_6ghz_reg_rules_sp: %d num_6ghz_reg_rules_vlp: %d", num_6ghz_client[WMI_REG_INDOOR_AP][j], num_6ghz_client[WMI_REG_STANDARD_POWER_AP][j], num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][j]); ext_wmi_reg_rule = (struct wmi_regulatory_ext_rule *)((u8 *)ev + sizeof(*ev) + sizeof(struct wmi_tlv)); if (num_2ghz_reg_rules) { reg_info->reg_rules_2ghz_ptr = create_ext_reg_rules_from_wmi(num_2ghz_reg_rules, ext_wmi_reg_rule); if (!reg_info->reg_rules_2ghz_ptr) { kfree(tb); ath11k_warn(ab, "Unable to Allocate memory for 2 GHz rules\n"); return -ENOMEM; } ath11k_print_reg_rule(ab, "2 GHz", num_2ghz_reg_rules, reg_info->reg_rules_2ghz_ptr); } ext_wmi_reg_rule += num_2ghz_reg_rules; /* Firmware might include 6 GHz reg rule in 5 GHz rule list * for few countries along with separate 6 GHz rule. * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list * causes intersect check to be true, and same rules will be * shown multiple times in iw cmd. * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list */ num_invalid_5ghz_ext_rules = ath11k_invalid_5ghz_reg_ext_rules_from_wmi(num_5ghz_reg_rules, ext_wmi_reg_rule); if (num_invalid_5ghz_ext_rules) { ath11k_dbg(ab, ATH11K_DBG_WMI, "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules", reg_info->alpha2, reg_info->num_5ghz_reg_rules, num_invalid_5ghz_ext_rules); num_5ghz_reg_rules = num_5ghz_reg_rules - num_invalid_5ghz_ext_rules; reg_info->num_5ghz_reg_rules = num_5ghz_reg_rules; } if (num_5ghz_reg_rules) { reg_info->reg_rules_5ghz_ptr = create_ext_reg_rules_from_wmi(num_5ghz_reg_rules, ext_wmi_reg_rule); if (!reg_info->reg_rules_5ghz_ptr) { kfree(tb); ath11k_warn(ab, "Unable to Allocate memory for 5 GHz rules\n"); return -ENOMEM; } ath11k_print_reg_rule(ab, "5 GHz", num_5ghz_reg_rules, reg_info->reg_rules_5ghz_ptr); } /* We have adjusted the number of 5 GHz reg rules above. But still those * many rules needs to be adjusted in ext_wmi_reg_rule. * * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases. */ ext_wmi_reg_rule += (num_5ghz_reg_rules + num_invalid_5ghz_ext_rules); for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { reg_info->reg_rules_6ghz_ap_ptr[i] = create_ext_reg_rules_from_wmi(num_6ghz_reg_rules_ap[i], ext_wmi_reg_rule); if (!reg_info->reg_rules_6ghz_ap_ptr[i]) { kfree(tb); ath11k_warn(ab, "Unable to Allocate memory for 6 GHz AP rules\n"); return -ENOMEM; } ath11k_print_reg_rule(ab, ath11k_6ghz_ap_type_to_str(i), num_6ghz_reg_rules_ap[i], reg_info->reg_rules_6ghz_ap_ptr[i]); ext_wmi_reg_rule += num_6ghz_reg_rules_ap[i]; } for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) { ath11k_dbg(ab, ATH11K_DBG_WMI, "6 GHz AP type %s", ath11k_6ghz_ap_type_to_str(j)); for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { reg_info->reg_rules_6ghz_client_ptr[j][i] = create_ext_reg_rules_from_wmi(num_6ghz_client[j][i], ext_wmi_reg_rule); if (!reg_info->reg_rules_6ghz_client_ptr[j][i]) { kfree(tb); ath11k_warn(ab, "Unable to Allocate memory for 6 GHz client rules\n"); return -ENOMEM; } ath11k_print_reg_rule(ab, ath11k_6ghz_client_type_to_str(i), num_6ghz_client[j][i], reg_info->reg_rules_6ghz_client_ptr[j][i]); ext_wmi_reg_rule += num_6ghz_client[j][i]; } } reg_info->client_type = ev->client_type; reg_info->rnr_tpe_usable = ev->rnr_tpe_usable; reg_info->unspecified_ap_usable = ev->unspecified_ap_usable; reg_info->domain_code_6ghz_ap[WMI_REG_INDOOR_AP] = ev->domain_code_6ghz_ap_lpi; reg_info->domain_code_6ghz_ap[WMI_REG_STANDARD_POWER_AP] = ev->domain_code_6ghz_ap_sp; reg_info->domain_code_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] = ev->domain_code_6ghz_ap_vlp; ath11k_dbg(ab, ATH11K_DBG_WMI, "6 GHz reg info client type %s rnr_tpe_usable %d unspecified_ap_usable %d AP sub domain: lpi %s, sp %s, vlp %s\n", ath11k_6ghz_client_type_to_str(reg_info->client_type), reg_info->rnr_tpe_usable, reg_info->unspecified_ap_usable, ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_lpi), ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_sp), ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_vlp)); for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { reg_info->domain_code_6ghz_client[WMI_REG_INDOOR_AP][i] = ev->domain_code_6ghz_client_lpi[i]; reg_info->domain_code_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] = ev->domain_code_6ghz_client_sp[i]; reg_info->domain_code_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] = ev->domain_code_6ghz_client_vlp[i]; ath11k_dbg(ab, ATH11K_DBG_WMI, "6 GHz client type %s client sub domain: lpi %s, sp %s, vlp %s\n", ath11k_6ghz_client_type_to_str(i), ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_lpi[i]), ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_sp[i]), ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_vlp[i]) ); } reg_info->domain_code_6ghz_super_id = ev->domain_code_6ghz_super_id; ath11k_dbg(ab, ATH11K_DBG_WMI, "6 GHz client_type %s 6 GHz super domain %s", ath11k_6ghz_client_type_to_str(reg_info->client_type), ath11k_super_reg_6ghz_to_str(reg_info->domain_code_6ghz_super_id)); ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory ext channel list\n"); kfree(tb); return 0; } static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_peer_delete_resp_event *peer_del_resp) { const void **tb; const struct wmi_peer_delete_resp_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch peer delete resp ev"); kfree(tb); return -EPROTO; } memset(peer_del_resp, 0, sizeof(*peer_del_resp)); peer_del_resp->vdev_id = ev->vdev_id; ether_addr_copy(peer_del_resp->peer_macaddr.addr, ev->peer_macaddr.addr); kfree(tb); return 0; } static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab, struct sk_buff *skb, u32 *vdev_id) { const void **tb; const struct wmi_vdev_delete_resp_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch vdev delete resp ev"); kfree(tb); return -EPROTO; } *vdev_id = ev->vdev_id; kfree(tb); return 0; } static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, struct sk_buff *skb, u32 *vdev_id, u32 *tx_status) { const void **tb; const struct wmi_bcn_tx_status_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch bcn tx status ev"); kfree(tb); return -EPROTO; } *vdev_id = ev->vdev_id; *tx_status = ev->tx_status; kfree(tb); return 0; } static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_buff *skb, u32 *vdev_id) { const void **tb; const struct wmi_vdev_stopped_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_VDEV_STOPPED_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch vdev stop ev"); kfree(tb); return -EPROTO; } *vdev_id = ev->vdev_id; kfree(tb); return 0; } static int ath11k_wmi_tlv_mgmt_rx_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_mgmt_rx_parse *parse = data; switch (tag) { case WMI_TAG_MGMT_RX_HDR: parse->fixed = ptr; break; case WMI_TAG_ARRAY_BYTE: if (!parse->frame_buf_done) { parse->frame_buf = ptr; parse->frame_buf_done = true; } break; } return 0; } static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab, struct sk_buff *skb, struct mgmt_rx_event_params *hdr) { struct wmi_tlv_mgmt_rx_parse parse = { }; const struct wmi_mgmt_rx_hdr *ev; const u8 *frame; int ret; ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_mgmt_rx_parse, &parse); if (ret) { ath11k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret); return ret; } ev = parse.fixed; frame = parse.frame_buf; if (!ev || !frame) { ath11k_warn(ab, "failed to fetch mgmt rx hdr"); return -EPROTO; } hdr->pdev_id = ev->pdev_id; hdr->chan_freq = ev->chan_freq; hdr->channel = ev->channel; hdr->snr = ev->snr; hdr->rate = ev->rate; hdr->phy_mode = ev->phy_mode; hdr->buf_len = ev->buf_len; hdr->status = ev->status; hdr->flags = ev->flags; hdr->rssi = ev->rssi; hdr->tsf_delta = ev->tsf_delta; memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl)); if (skb->len < (frame - skb->data) + hdr->buf_len) { ath11k_warn(ab, "invalid length in mgmt rx hdr ev"); return -EPROTO; } /* shift the sk_buff to point to `frame` */ skb_trim(skb, 0); skb_put(skb, frame - skb->data); skb_pull(skb, frame - skb->data); skb_put(skb, hdr->buf_len); ath11k_ce_byte_swap(skb->data, hdr->buf_len); return 0; } static int wmi_process_mgmt_tx_comp(struct ath11k *ar, struct wmi_mgmt_tx_compl_event *tx_compl_param) { struct sk_buff *msdu; struct ieee80211_tx_info *info; struct ath11k_skb_cb *skb_cb; int num_mgmt; spin_lock_bh(&ar->txmgmt_idr_lock); msdu = idr_find(&ar->txmgmt_idr, tx_compl_param->desc_id); if (!msdu) { ath11k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n", tx_compl_param->desc_id); spin_unlock_bh(&ar->txmgmt_idr_lock); return -ENOENT; } idr_remove(&ar->txmgmt_idr, tx_compl_param->desc_id); spin_unlock_bh(&ar->txmgmt_idr_lock); skb_cb = ATH11K_SKB_CB(msdu); dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); info = IEEE80211_SKB_CB(msdu); if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !tx_compl_param->status) { info->flags |= IEEE80211_TX_STAT_ACK; if (test_bit(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI, ar->ab->wmi_ab.svc_map)) info->status.ack_signal = tx_compl_param->ack_rssi; } ieee80211_tx_status_irqsafe(ar->hw, msdu); num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); /* WARN when we received this event without doing any mgmt tx */ if (num_mgmt < 0) WARN_ON_ONCE(1); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "mgmt tx comp pending %d desc id %d\n", num_mgmt, tx_compl_param->desc_id); if (!num_mgmt) wake_up(&ar->txmgmt_empty_waitq); return 0; } static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_mgmt_tx_compl_event *param) { const void **tb; const struct wmi_mgmt_tx_compl_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch mgmt tx compl ev"); kfree(tb); return -EPROTO; } param->pdev_id = ev->pdev_id; param->desc_id = ev->desc_id; param->status = ev->status; param->ack_rssi = ev->ack_rssi; kfree(tb); return 0; } static void ath11k_wmi_event_scan_started(struct ath11k *ar) { lockdep_assert_held(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: ath11k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n", ath11k_scan_state_str(ar->scan.state), ar->scan.state); break; case ATH11K_SCAN_STARTING: ar->scan.state = ATH11K_SCAN_RUNNING; if (ar->scan.is_roc) ieee80211_ready_on_channel(ar->hw); complete(&ar->scan.started); break; } } static void ath11k_wmi_event_scan_start_failed(struct ath11k *ar) { lockdep_assert_held(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: ath11k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n", ath11k_scan_state_str(ar->scan.state), ar->scan.state); break; case ATH11K_SCAN_STARTING: complete(&ar->scan.started); __ath11k_mac_scan_finish(ar); break; } } static void ath11k_wmi_event_scan_completed(struct ath11k *ar) { lockdep_assert_held(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: case ATH11K_SCAN_STARTING: /* One suspected reason scan can be completed while starting is * if firmware fails to deliver all scan events to the host, * e.g. when transport pipe is full. This has been observed * with spectral scan phyerr events starving wmi transport * pipe. In such case the "scan completed" event should be (and * is) ignored by the host as it may be just firmware's scan * state machine recovering. */ ath11k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n", ath11k_scan_state_str(ar->scan.state), ar->scan.state); break; case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: __ath11k_mac_scan_finish(ar); break; } } static void ath11k_wmi_event_scan_bss_chan(struct ath11k *ar) { lockdep_assert_held(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: case ATH11K_SCAN_STARTING: ath11k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n", ath11k_scan_state_str(ar->scan.state), ar->scan.state); break; case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: ar->scan_channel = NULL; break; } } static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq) { lockdep_assert_held(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: case ATH11K_SCAN_STARTING: ath11k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n", ath11k_scan_state_str(ar->scan.state), ar->scan.state); break; case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); if (ar->scan.is_roc && ar->scan.roc_freq == freq) complete(&ar->scan.on_channel); break; } } static const char * ath11k_wmi_event_scan_type_str(enum wmi_scan_event_type type, enum wmi_scan_completion_reason reason) { switch (type) { case WMI_SCAN_EVENT_STARTED: return "started"; case WMI_SCAN_EVENT_COMPLETED: switch (reason) { case WMI_SCAN_REASON_COMPLETED: return "completed"; case WMI_SCAN_REASON_CANCELLED: return "completed [cancelled]"; case WMI_SCAN_REASON_PREEMPTED: return "completed [preempted]"; case WMI_SCAN_REASON_TIMEDOUT: return "completed [timedout]"; case WMI_SCAN_REASON_INTERNAL_FAILURE: return "completed [internal err]"; case WMI_SCAN_REASON_MAX: break; } return "completed [unknown]"; case WMI_SCAN_EVENT_BSS_CHANNEL: return "bss channel"; case WMI_SCAN_EVENT_FOREIGN_CHAN: return "foreign channel"; case WMI_SCAN_EVENT_DEQUEUED: return "dequeued"; case WMI_SCAN_EVENT_PREEMPTED: return "preempted"; case WMI_SCAN_EVENT_START_FAILED: return "start failed"; case WMI_SCAN_EVENT_RESTARTED: return "restarted"; case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: return "foreign channel exit"; default: return "unknown"; } } static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_scan_event *scan_evt_param) { const void **tb; const struct wmi_scan_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_SCAN_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch scan ev"); kfree(tb); return -EPROTO; } scan_evt_param->event_type = ev->event_type; scan_evt_param->reason = ev->reason; scan_evt_param->channel_freq = ev->channel_freq; scan_evt_param->scan_req_id = ev->scan_req_id; scan_evt_param->scan_id = ev->scan_id; scan_evt_param->vdev_id = ev->vdev_id; scan_evt_param->tsf_timestamp = ev->tsf_timestamp; kfree(tb); return 0; } static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_peer_sta_kickout_arg *arg) { const void **tb; const struct wmi_peer_sta_kickout_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch peer sta kickout ev"); kfree(tb); return -EPROTO; } arg->mac_addr = ev->peer_macaddr.addr; kfree(tb); return 0; } static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_roam_event *roam_ev) { const void **tb; const struct wmi_roam_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_ROAM_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch roam ev"); kfree(tb); return -EPROTO; } roam_ev->vdev_id = ev->vdev_id; roam_ev->reason = ev->reason; roam_ev->rssi = ev->rssi; kfree(tb); return 0; } static int freq_to_idx(struct ath11k *ar, int freq) { struct ieee80211_supported_band *sband; int band, ch, idx = 0; for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { sband = ar->hw->wiphy->bands[band]; if (!sband) continue; for (ch = 0; ch < sband->n_channels; ch++, idx++) if (sband->channels[ch].center_freq == freq) goto exit; } exit: return idx; } static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_chan_info_event *ch_info_ev) { const void **tb; const struct wmi_chan_info_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_CHAN_INFO_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch chan info ev"); kfree(tb); return -EPROTO; } ch_info_ev->err_code = ev->err_code; ch_info_ev->freq = ev->freq; ch_info_ev->cmd_flags = ev->cmd_flags; ch_info_ev->noise_floor = ev->noise_floor; ch_info_ev->rx_clear_count = ev->rx_clear_count; ch_info_ev->cycle_count = ev->cycle_count; ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range; ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp; ch_info_ev->rx_frame_count = ev->rx_frame_count; ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt; ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz; ch_info_ev->vdev_id = ev->vdev_id; kfree(tb); return 0; } static int ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev) { const void **tb; const struct wmi_pdev_bss_chan_info_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch pdev bss chan info ev"); kfree(tb); return -EPROTO; } bss_ch_info_ev->pdev_id = ev->pdev_id; bss_ch_info_ev->freq = ev->freq; bss_ch_info_ev->noise_floor = ev->noise_floor; bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low; bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high; bss_ch_info_ev->cycle_count_low = ev->cycle_count_low; bss_ch_info_ev->cycle_count_high = ev->cycle_count_high; bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low; bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high; bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low; bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high; bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low; bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high; kfree(tb); return 0; } static int ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_vdev_install_key_complete_arg *arg) { const void **tb; const struct wmi_vdev_install_key_compl_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch vdev install key compl ev"); kfree(tb); return -EPROTO; } arg->vdev_id = ev->vdev_id; arg->macaddr = ev->peer_macaddr.addr; arg->key_idx = ev->key_idx; arg->key_flags = ev->key_flags; arg->status = ev->status; kfree(tb); return 0; } static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff *skb, struct wmi_peer_assoc_conf_arg *peer_assoc_conf) { const void **tb; const struct wmi_peer_assoc_conf_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch peer assoc conf ev"); kfree(tb); return -EPROTO; } peer_assoc_conf->vdev_id = ev->vdev_id; peer_assoc_conf->macaddr = ev->peer_macaddr.addr; kfree(tb); return 0; } static void ath11k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src, struct ath11k_fw_stats_pdev *dst) { dst->ch_noise_floor = src->chan_nf; dst->tx_frame_count = src->tx_frame_count; dst->rx_frame_count = src->rx_frame_count; dst->rx_clear_count = src->rx_clear_count; dst->cycle_count = src->cycle_count; dst->phy_err_count = src->phy_err_count; dst->chan_tx_power = src->chan_tx_pwr; } static void ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src, struct ath11k_fw_stats_pdev *dst) { dst->comp_queued = src->comp_queued; dst->comp_delivered = src->comp_delivered; dst->msdu_enqued = src->msdu_enqued; dst->mpdu_enqued = src->mpdu_enqued; dst->wmm_drop = src->wmm_drop; dst->local_enqued = src->local_enqued; dst->local_freed = src->local_freed; dst->hw_queued = src->hw_queued; dst->hw_reaped = src->hw_reaped; dst->underrun = src->underrun; dst->hw_paused = src->hw_paused; dst->tx_abort = src->tx_abort; dst->mpdus_requeued = src->mpdus_requeued; dst->tx_ko = src->tx_ko; dst->tx_xretry = src->tx_xretry; dst->data_rc = src->data_rc; dst->self_triggers = src->self_triggers; dst->sw_retry_failure = src->sw_retry_failure; dst->illgl_rate_phy_err = src->illgl_rate_phy_err; dst->pdev_cont_xretry = src->pdev_cont_xretry; dst->pdev_tx_timeout = src->pdev_tx_timeout; dst->pdev_resets = src->pdev_resets; dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure; dst->phy_underrun = src->phy_underrun; dst->txop_ovf = src->txop_ovf; dst->seq_posted = src->seq_posted; dst->seq_failed_queueing = src->seq_failed_queueing; dst->seq_completed = src->seq_completed; dst->seq_restarted = src->seq_restarted; dst->mu_seq_posted = src->mu_seq_posted; dst->mpdus_sw_flush = src->mpdus_sw_flush; dst->mpdus_hw_filter = src->mpdus_hw_filter; dst->mpdus_truncated = src->mpdus_truncated; dst->mpdus_ack_failed = src->mpdus_ack_failed; dst->mpdus_expired = src->mpdus_expired; } static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src, struct ath11k_fw_stats_pdev *dst) { dst->mid_ppdu_route_change = src->mid_ppdu_route_change; dst->status_rcvd = src->status_rcvd; dst->r0_frags = src->r0_frags; dst->r1_frags = src->r1_frags; dst->r2_frags = src->r2_frags; dst->r3_frags = src->r3_frags; dst->htt_msdus = src->htt_msdus; dst->htt_mpdus = src->htt_mpdus; dst->loc_msdus = src->loc_msdus; dst->loc_mpdus = src->loc_mpdus; dst->oversize_amsdu = src->oversize_amsdu; dst->phy_errs = src->phy_errs; dst->phy_err_drop = src->phy_err_drop; dst->mpdu_errs = src->mpdu_errs; dst->rx_ovfl_errs = src->rx_ovfl_errs; } static void ath11k_wmi_pull_vdev_stats(const struct wmi_vdev_stats *src, struct ath11k_fw_stats_vdev *dst) { int i; dst->vdev_id = src->vdev_id; dst->beacon_snr = src->beacon_snr; dst->data_snr = src->data_snr; dst->num_rx_frames = src->num_rx_frames; dst->num_rts_fail = src->num_rts_fail; dst->num_rts_success = src->num_rts_success; dst->num_rx_err = src->num_rx_err; dst->num_rx_discard = src->num_rx_discard; dst->num_tx_not_acked = src->num_tx_not_acked; for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++) dst->num_tx_frames[i] = src->num_tx_frames[i]; for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++) dst->num_tx_frames_retries[i] = src->num_tx_frames_retries[i]; for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++) dst->num_tx_frames_failures[i] = src->num_tx_frames_failures[i]; for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++) dst->tx_rate_history[i] = src->tx_rate_history[i]; for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++) dst->beacon_rssi_history[i] = src->beacon_rssi_history[i]; } static void ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats *src, struct ath11k_fw_stats_bcn *dst) { dst->vdev_id = src->vdev_id; dst->tx_bcn_succ_cnt = src->tx_bcn_succ_cnt; dst->tx_bcn_outage_cnt = src->tx_bcn_outage_cnt; } static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_fw_stats_parse *parse = data; const struct wmi_stats_event *ev = parse->ev; struct ath11k_fw_stats *stats = parse->stats; struct ath11k *ar; struct ath11k_vif *arvif; struct ieee80211_sta *sta; struct ath11k_sta *arsta; const struct wmi_rssi_stats *stats_rssi = (const struct wmi_rssi_stats *)ptr; int j, ret = 0; if (tag != WMI_TAG_RSSI_STATS) return -EPROTO; rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT; ath11k_dbg(ab, ATH11K_DBG_WMI, "stats vdev id %d mac %pM\n", stats_rssi->vdev_id, stats_rssi->peer_macaddr.addr); arvif = ath11k_mac_get_arvif(ar, stats_rssi->vdev_id); if (!arvif) { ath11k_warn(ab, "not found vif for vdev id %d\n", stats_rssi->vdev_id); ret = -EPROTO; goto exit; } ath11k_dbg(ab, ATH11K_DBG_WMI, "stats bssid %pM vif %p\n", arvif->bssid, arvif->vif); sta = ieee80211_find_sta_by_ifaddr(ar->hw, arvif->bssid, NULL); if (!sta) { ath11k_dbg(ab, ATH11K_DBG_WMI, "not found station of bssid %pM for rssi chain\n", arvif->bssid); goto exit; } arsta = ath11k_sta_to_arsta(sta); BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > ARRAY_SIZE(stats_rssi->rssi_avg_beacon)); for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++) { arsta->chain_signal[j] = stats_rssi->rssi_avg_beacon[j]; ath11k_dbg(ab, ATH11K_DBG_WMI, "stats beacon rssi[%d] %d data rssi[%d] %d\n", j, stats_rssi->rssi_avg_beacon[j], j, stats_rssi->rssi_avg_data[j]); } exit: rcu_read_unlock(); return ret; } static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab, struct wmi_tlv_fw_stats_parse *parse, const void *ptr, u16 len) { struct ath11k_fw_stats *stats = parse->stats; const struct wmi_stats_event *ev = parse->ev; struct ath11k *ar; struct ath11k_vif *arvif; struct ieee80211_sta *sta; struct ath11k_sta *arsta; int i, ret = 0; const void *data = ptr; if (!ev) { ath11k_warn(ab, "failed to fetch update stats ev"); return -EPROTO; } stats->stats_id = 0; rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); for (i = 0; i < ev->num_pdev_stats; i++) { const struct wmi_pdev_stats *src; struct ath11k_fw_stats_pdev *dst; src = data; if (len < sizeof(*src)) { ret = -EPROTO; goto exit; } stats->stats_id = WMI_REQUEST_PDEV_STAT; data += sizeof(*src); len -= sizeof(*src); dst = kzalloc(sizeof(*dst), GFP_ATOMIC); if (!dst) continue; ath11k_wmi_pull_pdev_stats_base(&src->base, dst); ath11k_wmi_pull_pdev_stats_tx(&src->tx, dst); ath11k_wmi_pull_pdev_stats_rx(&src->rx, dst); list_add_tail(&dst->list, &stats->pdevs); } for (i = 0; i < ev->num_vdev_stats; i++) { const struct wmi_vdev_stats *src; struct ath11k_fw_stats_vdev *dst; src = data; if (len < sizeof(*src)) { ret = -EPROTO; goto exit; } stats->stats_id = WMI_REQUEST_VDEV_STAT; arvif = ath11k_mac_get_arvif(ar, src->vdev_id); if (arvif) { sta = ieee80211_find_sta_by_ifaddr(ar->hw, arvif->bssid, NULL); if (sta) { arsta = ath11k_sta_to_arsta(sta); arsta->rssi_beacon = src->beacon_snr; ath11k_dbg(ab, ATH11K_DBG_WMI, "stats vdev id %d snr %d\n", src->vdev_id, src->beacon_snr); } else { ath11k_dbg(ab, ATH11K_DBG_WMI, "not found station of bssid %pM for vdev stat\n", arvif->bssid); } } data += sizeof(*src); len -= sizeof(*src); dst = kzalloc(sizeof(*dst), GFP_ATOMIC); if (!dst) continue; ath11k_wmi_pull_vdev_stats(src, dst); list_add_tail(&dst->list, &stats->vdevs); } for (i = 0; i < ev->num_bcn_stats; i++) { const struct wmi_bcn_stats *src; struct ath11k_fw_stats_bcn *dst; src = data; if (len < sizeof(*src)) { ret = -EPROTO; goto exit; } stats->stats_id = WMI_REQUEST_BCN_STAT; data += sizeof(*src); len -= sizeof(*src); dst = kzalloc(sizeof(*dst), GFP_ATOMIC); if (!dst) continue; ath11k_wmi_pull_bcn_stats(src, dst); list_add_tail(&dst->list, &stats->bcn); } exit: rcu_read_unlock(); return ret; } static int ath11k_wmi_tlv_fw_stats_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_fw_stats_parse *parse = data; int ret = 0; switch (tag) { case WMI_TAG_STATS_EVENT: parse->ev = (struct wmi_stats_event *)ptr; parse->stats->pdev_id = parse->ev->pdev_id; break; case WMI_TAG_ARRAY_BYTE: ret = ath11k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len); break; case WMI_TAG_PER_CHAIN_RSSI_STATS: parse->rssi = (struct wmi_per_chain_rssi_stats *)ptr; if (parse->ev->stats_id & WMI_REQUEST_RSSI_PER_CHAIN_STAT) parse->rssi_num = parse->rssi->num_per_chain_rssi_stats; ath11k_dbg(ab, ATH11K_DBG_WMI, "stats id 0x%x num chain %d\n", parse->ev->stats_id, parse->rssi_num); break; case WMI_TAG_ARRAY_STRUCT: if (parse->rssi_num && !parse->chain_rssi_done) { ret = ath11k_wmi_tlv_iter(ab, ptr, len, ath11k_wmi_tlv_rssi_chain_parse, parse); if (ret) { ath11k_warn(ab, "failed to parse rssi chain %d\n", ret); return ret; } parse->chain_rssi_done = true; } break; default: break; } return ret; } int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb, struct ath11k_fw_stats *stats) { struct wmi_tlv_fw_stats_parse parse = { }; stats->stats_id = 0; parse.stats = stats; return ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_fw_stats_parse, &parse); } static void ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev *pdev, char *buf, u32 *length) { u32 len = *length; u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%30s\n", "ath11k PDEV stats"); len += scnprintf(buf + len, buf_len - len, "%30s\n\n", "================="); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Channel noise floor", pdev->ch_noise_floor); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Channel TX power", pdev->chan_tx_power); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "TX frame count", pdev->tx_frame_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "RX frame count", pdev->rx_frame_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "RX clear count", pdev->rx_clear_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Cycle count", pdev->cycle_count); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "PHY error count", pdev->phy_err_count); *length = len; } static void ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev, char *buf, u32 *length) { u32 len = *length; u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "\n%30s\n", "ath11k PDEV TX stats"); len += scnprintf(buf + len, buf_len - len, "%30s\n\n", "===================="); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "HTT cookies queued", pdev->comp_queued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "HTT cookies disp.", pdev->comp_delivered); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MSDU queued", pdev->msdu_enqued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MPDU queued", pdev->mpdu_enqued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MSDUs dropped", pdev->wmm_drop); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Local enqued", pdev->local_enqued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Local freed", pdev->local_freed); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "HW queued", pdev->hw_queued); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "PPDUs reaped", pdev->hw_reaped); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Num underruns", pdev->underrun); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Num HW Paused", pdev->hw_paused); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "PPDUs cleaned", pdev->tx_abort); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MPDUs requeued", pdev->mpdus_requeued); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "PPDU OK", pdev->tx_ko); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Excessive retries", pdev->tx_xretry); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "HW rate", pdev->data_rc); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Sched self triggers", pdev->self_triggers); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Dropped due to SW retries", pdev->sw_retry_failure); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Illegal rate phy errors", pdev->illgl_rate_phy_err); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "PDEV continuous xretry", pdev->pdev_cont_xretry); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "TX timeout", pdev->pdev_tx_timeout); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "PDEV resets", pdev->pdev_resets); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Stateless TIDs alloc failures", pdev->stateless_tid_alloc_failure); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "PHY underrun", pdev->phy_underrun); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "MPDU is more than txop limit", pdev->txop_ovf); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num sequences posted", pdev->seq_posted); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num seq failed queueing ", pdev->seq_failed_queueing); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num sequences completed ", pdev->seq_completed); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num sequences restarted ", pdev->seq_restarted); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num of MU sequences posted ", pdev->mu_seq_posted); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num of MPDUS SW flushed ", pdev->mpdus_sw_flush); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num of MPDUS HW filtered ", pdev->mpdus_hw_filter); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num of MPDUS truncated ", pdev->mpdus_truncated); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num of MPDUS ACK failed ", pdev->mpdus_ack_failed); len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", "Num of MPDUS expired ", pdev->mpdus_expired); *length = len; } static void ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev *pdev, char *buf, u32 *length) { u32 len = *length; u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; len += scnprintf(buf + len, buf_len - len, "\n%30s\n", "ath11k PDEV RX stats"); len += scnprintf(buf + len, buf_len - len, "%30s\n\n", "===================="); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Mid PPDU route change", pdev->mid_ppdu_route_change); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Tot. number of statuses", pdev->status_rcvd); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Extra frags on rings 0", pdev->r0_frags); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Extra frags on rings 1", pdev->r1_frags); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Extra frags on rings 2", pdev->r2_frags); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Extra frags on rings 3", pdev->r3_frags); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MSDUs delivered to HTT", pdev->htt_msdus); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MPDUs delivered to HTT", pdev->htt_mpdus); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MSDUs delivered to stack", pdev->loc_msdus); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MPDUs delivered to stack", pdev->loc_mpdus); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Oversized AMSUs", pdev->oversize_amsdu); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "PHY errors", pdev->phy_errs); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "PHY errors drops", pdev->phy_err_drop); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Overflow errors", pdev->rx_ovfl_errs); *length = len; } static void ath11k_wmi_fw_vdev_stats_fill(struct ath11k *ar, const struct ath11k_fw_stats_vdev *vdev, char *buf, u32 *length) { u32 len = *length; u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev->vdev_id); u8 *vif_macaddr; int i; /* VDEV stats has all the active VDEVs of other PDEVs as well, * ignoring those not part of requested PDEV */ if (!arvif) return; vif_macaddr = arvif->vif->addr; len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "VDEV ID", vdev->vdev_id); len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", "VDEV MAC address", vif_macaddr); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "beacon snr", vdev->beacon_snr); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "data snr", vdev->data_snr); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "num rx frames", vdev->num_rx_frames); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "num rts fail", vdev->num_rts_fail); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "num rts success", vdev->num_rts_success); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "num rx err", vdev->num_rx_err); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "num rx discard", vdev->num_rx_discard); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "num tx not acked", vdev->num_tx_not_acked); for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++) len += scnprintf(buf + len, buf_len - len, "%25s [%02d] %u\n", "num tx frames", i, vdev->num_tx_frames[i]); for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++) len += scnprintf(buf + len, buf_len - len, "%25s [%02d] %u\n", "num tx frames retries", i, vdev->num_tx_frames_retries[i]); for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++) len += scnprintf(buf + len, buf_len - len, "%25s [%02d] %u\n", "num tx frames failures", i, vdev->num_tx_frames_failures[i]); for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++) len += scnprintf(buf + len, buf_len - len, "%25s [%02d] 0x%08x\n", "tx rate history", i, vdev->tx_rate_history[i]); for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++) len += scnprintf(buf + len, buf_len - len, "%25s [%02d] %u\n", "beacon rssi history", i, vdev->beacon_rssi_history[i]); len += scnprintf(buf + len, buf_len - len, "\n"); *length = len; } static void ath11k_wmi_fw_bcn_stats_fill(struct ath11k *ar, const struct ath11k_fw_stats_bcn *bcn, char *buf, u32 *length) { u32 len = *length; u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, bcn->vdev_id); u8 *vdev_macaddr; if (!arvif) { ath11k_warn(ar->ab, "invalid vdev id %d in bcn stats", bcn->vdev_id); return; } vdev_macaddr = arvif->vif->addr; len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "VDEV ID", bcn->vdev_id); len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", "VDEV MAC address", vdev_macaddr); len += scnprintf(buf + len, buf_len - len, "%30s\n\n", "================"); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "Num of beacon tx success", bcn->tx_bcn_succ_cnt); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "Num of beacon tx failures", bcn->tx_bcn_outage_cnt); len += scnprintf(buf + len, buf_len - len, "\n"); *length = len; } void ath11k_wmi_fw_stats_fill(struct ath11k *ar, struct ath11k_fw_stats *fw_stats, u32 stats_id, char *buf) { u32 len = 0; u32 buf_len = ATH11K_FW_STATS_BUF_SIZE; const struct ath11k_fw_stats_pdev *pdev; const struct ath11k_fw_stats_vdev *vdev; const struct ath11k_fw_stats_bcn *bcn; size_t num_bcn; spin_lock_bh(&ar->data_lock); if (stats_id == WMI_REQUEST_PDEV_STAT) { pdev = list_first_entry_or_null(&fw_stats->pdevs, struct ath11k_fw_stats_pdev, list); if (!pdev) { ath11k_warn(ar->ab, "failed to get pdev stats\n"); goto unlock; } ath11k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len); ath11k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len); ath11k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len); } if (stats_id == WMI_REQUEST_VDEV_STAT) { len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%30s\n", "ath11k VDEV stats"); len += scnprintf(buf + len, buf_len - len, "%30s\n\n", "================="); list_for_each_entry(vdev, &fw_stats->vdevs, list) ath11k_wmi_fw_vdev_stats_fill(ar, vdev, buf, &len); } if (stats_id == WMI_REQUEST_BCN_STAT) { num_bcn = list_count_nodes(&fw_stats->bcn); len += scnprintf(buf + len, buf_len - len, "\n"); len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", "ath11k Beacon stats", num_bcn); len += scnprintf(buf + len, buf_len - len, "%30s\n\n", "==================="); list_for_each_entry(bcn, &fw_stats->bcn, list) ath11k_wmi_fw_bcn_stats_fill(ar, bcn, buf, &len); } unlock: spin_unlock_bh(&ar->data_lock); if (len >= buf_len) buf[len - 1] = 0; else buf[len] = 0; } static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab) { /* try to send pending beacons first. they take priority */ wake_up(&ab->wmi_ab.tx_credits_wq); } static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb) { const struct wmi_11d_new_cc_ev *ev; struct ath11k *ar; struct ath11k_pdev *pdev; const void **tb; int ret, i; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return ret; } ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT]; if (!ev) { kfree(tb); ath11k_warn(ab, "failed to fetch 11d new cc ev"); return -EPROTO; } spin_lock_bh(&ab->base_lock); memcpy(&ab->new_alpha2, &ev->new_alpha2, 2); spin_unlock_bh(&ab->base_lock); ath11k_dbg(ab, ATH11K_DBG_WMI, "event 11d new cc %c%c\n", ab->new_alpha2[0], ab->new_alpha2[1]); kfree(tb); for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; ar->state_11d = ATH11K_11D_IDLE; complete(&ar->completed_11d_scan); } queue_work(ab->workqueue, &ab->update_11d_work); return 0; } static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k_pdev_wmi *wmi = NULL; u32 i; u8 wmi_ep_count; u8 eid; eid = ATH11K_SKB_CB(skb)->eid; dev_kfree_skb(skb); if (eid >= ATH11K_HTC_EP_COUNT) return; wmi_ep_count = ab->htc.wmi_ep_count; if (wmi_ep_count > ab->hw_params.max_radios) return; for (i = 0; i < ab->htc.wmi_ep_count; i++) { if (ab->wmi_ab.wmi[i].eid == eid) { wmi = &ab->wmi_ab.wmi[i]; break; } } if (wmi) wake_up(&wmi->tx_ce_desc_wq); } static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb, enum wmi_reg_chan_list_cmd_type id) { struct cur_regulatory_info *reg_info; int ret; reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC); if (!reg_info) return -ENOMEM; if (id == WMI_REG_CHAN_LIST_CC_ID) ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info); else ret = ath11k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info); if (ret) { ath11k_warn(ab, "failed to extract regulatory info\n"); goto mem_free; } ret = ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_UNSET_AP); if (ret) { ath11k_warn(ab, "failed to process regulatory info %d\n", ret); goto mem_free; } kfree(reg_info); return 0; mem_free: ath11k_reg_reset_info(reg_info); kfree(reg_info); return ret; } static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_tlv_rdy_parse *rdy_parse = data; struct wmi_ready_event fixed_param; struct wmi_mac_addr *addr_list; struct ath11k_pdev *pdev; u32 num_mac_addr; int i; switch (tag) { case WMI_TAG_READY_EVENT: memset(&fixed_param, 0, sizeof(fixed_param)); memcpy(&fixed_param, (struct wmi_ready_event *)ptr, min_t(u16, sizeof(fixed_param), len)); rdy_parse->num_extra_mac_addr = fixed_param.ready_event_min.num_extra_mac_addr; ether_addr_copy(ab->mac_addr, fixed_param.ready_event_min.mac_addr.addr); ab->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum; break; case WMI_TAG_ARRAY_FIXED_STRUCT: addr_list = (struct wmi_mac_addr *)ptr; num_mac_addr = rdy_parse->num_extra_mac_addr; if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios)) break; for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ether_addr_copy(pdev->mac_addr, addr_list[i].addr); } ab->pdevs_macaddr_valid = true; break; default: break; } return 0; } static int ath11k_ready_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_tlv_rdy_parse rdy_parse = { }; int ret; ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_rdy_parse, &rdy_parse); if (ret) { ath11k_warn(ab, "failed to parse tlv %d\n", ret); return ret; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event ready"); complete(&ab->wmi_ab.unified_ready); return 0; } static void ath11k_peer_delete_resp_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_peer_delete_resp_event peer_del_resp; struct ath11k *ar; if (ath11k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) { ath11k_warn(ab, "failed to extract peer delete resp"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer delete resp"); rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_del_resp.vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in peer delete resp ev %d", peer_del_resp.vdev_id); rcu_read_unlock(); return; } complete(&ar->peer_delete_done); rcu_read_unlock(); ath11k_dbg(ab, ATH11K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n", peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr); } static void ath11k_vdev_delete_resp_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k *ar; u32 vdev_id = 0; if (ath11k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) { ath11k_warn(ab, "failed to extract vdev delete resp"); return; } rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in vdev delete resp ev %d", vdev_id); rcu_read_unlock(); return; } complete(&ar->vdev_delete_done); rcu_read_unlock(); ath11k_dbg(ab, ATH11K_DBG_WMI, "event vdev delete resp for vdev id %d\n", vdev_id); } static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status) { switch (vdev_resp_status) { case WMI_VDEV_START_RESPONSE_INVALID_VDEVID: return "invalid vdev id"; case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED: return "not supported"; case WMI_VDEV_START_RESPONSE_DFS_VIOLATION: return "dfs violation"; case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN: return "invalid regdomain"; default: return "unknown"; } } static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_vdev_start_resp_event vdev_start_resp; struct ath11k *ar; u32 status; if (ath11k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) { ath11k_warn(ab, "failed to extract vdev start resp"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event start resp event"); rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_start_resp.vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in vdev start resp ev %d", vdev_start_resp.vdev_id); rcu_read_unlock(); return; } ar->last_wmi_vdev_start_status = 0; ar->max_allowed_tx_power = vdev_start_resp.max_allowed_tx_power; status = vdev_start_resp.status; if (WARN_ON_ONCE(status)) { ath11k_warn(ab, "vdev start resp error status %d (%s)\n", status, ath11k_wmi_vdev_resp_print(status)); ar->last_wmi_vdev_start_status = status; } complete(&ar->vdev_setup_done); rcu_read_unlock(); ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev start resp for vdev id %d", vdev_start_resp.vdev_id); } static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k_vif *arvif; u32 vdev_id, tx_status; if (ath11k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) { ath11k_warn(ab, "failed to extract bcn tx status"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event offload bcn tx status"); rcu_read_lock(); arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_id); if (!arvif) { ath11k_warn(ab, "invalid vdev id %d in bcn_tx_status", vdev_id); rcu_read_unlock(); return; } queue_work(ab->workqueue, &arvif->bcn_tx_work); rcu_read_unlock(); } static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab, struct sk_buff *skb) { const struct wmi_peer_sta_ps_state_chg_event *ev; struct ieee80211_sta *sta; struct ath11k_peer *peer; struct ath11k *ar; struct ath11k_sta *arsta; const void **tb; enum ath11k_wmi_peer_ps_state peer_previous_ps_state; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return; } ev = tb[WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch sta ps change ev"); kfree(tb); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer sta ps change ev addr %pM state %u sup_bitmap %x ps_valid %u ts %u\n", ev->peer_macaddr.addr, ev->peer_ps_state, ev->ps_supported_bitmap, ev->peer_ps_valid, ev->peer_ps_timestamp); rcu_read_lock(); spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_addr(ab, ev->peer_macaddr.addr); if (!peer) { spin_unlock_bh(&ab->base_lock); ath11k_warn(ab, "peer not found %pM\n", ev->peer_macaddr.addr); goto exit; } ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id); if (!ar) { spin_unlock_bh(&ab->base_lock); ath11k_warn(ab, "invalid vdev id in peer sta ps state change ev %d", peer->vdev_id); goto exit; } sta = peer->sta; spin_unlock_bh(&ab->base_lock); if (!sta) { ath11k_warn(ab, "failed to find station entry %pM\n", ev->peer_macaddr.addr); goto exit; } arsta = ath11k_sta_to_arsta(sta); spin_lock_bh(&ar->data_lock); peer_previous_ps_state = arsta->peer_ps_state; arsta->peer_ps_state = ev->peer_ps_state; arsta->peer_current_ps_valid = !!ev->peer_ps_valid; if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT, ar->ab->wmi_ab.svc_map)) { if (!(ev->ps_supported_bitmap & WMI_PEER_PS_VALID) || !(ev->ps_supported_bitmap & WMI_PEER_PS_STATE_TIMESTAMP) || !ev->peer_ps_valid) goto out; if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON) { arsta->ps_start_time = ev->peer_ps_timestamp; arsta->ps_start_jiffies = jiffies; } else if (arsta->peer_ps_state == WMI_PEER_PS_STATE_OFF && peer_previous_ps_state == WMI_PEER_PS_STATE_ON) { arsta->ps_total_duration = arsta->ps_total_duration + (ev->peer_ps_timestamp - arsta->ps_start_time); } if (ar->ps_timekeeper_enable) trace_ath11k_ps_timekeeper(ar, ev->peer_macaddr.addr, ev->peer_ps_timestamp, arsta->peer_ps_state); } out: spin_unlock_bh(&ar->data_lock); exit: rcu_read_unlock(); kfree(tb); } static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k *ar; u32 vdev_id = 0; if (ath11k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) { ath11k_warn(ab, "failed to extract vdev stopped event"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event vdev stopped"); rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in vdev stopped ev %d", vdev_id); rcu_read_unlock(); return; } complete(&ar->vdev_setup_done); rcu_read_unlock(); ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id); } static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb) { - struct mgmt_rx_event_params rx_ev = {0}; + struct mgmt_rx_event_params rx_ev = {}; struct ath11k *ar; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr; u16 fc; struct ieee80211_supported_band *sband; if (ath11k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) { ath11k_warn(ab, "failed to extract mgmt rx event"); dev_kfree_skb(skb); return; } memset(status, 0, sizeof(*status)); ath11k_dbg(ab, ATH11K_DBG_MGMT, "event mgmt rx status %08x\n", rx_ev.status); rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id); if (!ar) { ath11k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n", rx_ev.pdev_id); dev_kfree_skb(skb); goto exit; } if ((test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) || (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT | WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) { dev_kfree_skb(skb); goto exit; } if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) status->flag |= RX_FLAG_MMIC_ERROR; if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ && rx_ev.chan_freq <= ATH11K_MAX_6G_FREQ) { status->band = NL80211_BAND_6GHZ; status->freq = rx_ev.chan_freq; } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) { status->band = NL80211_BAND_2GHZ; } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) { status->band = NL80211_BAND_5GHZ; } else { /* Shouldn't happen unless list of advertised channels to * mac80211 has been changed. */ WARN_ON_ONCE(1); dev_kfree_skb(skb); goto exit; } if (rx_ev.phy_mode == MODE_11B && (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ)) ath11k_dbg(ab, ATH11K_DBG_WMI, "mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band); sband = &ar->mac.sbands[status->band]; if (status->band != NL80211_BAND_6GHZ) status->freq = ieee80211_channel_to_frequency(rx_ev.channel, status->band); status->signal = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR; status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100); hdr = (struct ieee80211_hdr *)skb->data; fc = le16_to_cpu(hdr->frame_control); /* Firmware is guaranteed to report all essential management frames via * WMI while it can deliver some extra via HTT. Since there can be * duplicates split the reporting wrt monitor/sniffing. */ status->flag |= RX_FLAG_SKIP_MONITOR; /* In case of PMF, FW delivers decrypted frames with Protected Bit set. * Don't clear that. Also, FW delivers broadcast management frames * (ex: group privacy action frames in mesh) as encrypted payload. */ if (ieee80211_has_protected(hdr->frame_control) && !is_multicast_ether_addr(ieee80211_get_DA(hdr))) { status->flag |= RX_FLAG_DECRYPTED; if (!ieee80211_is_robust_mgmt_frame(skb)) { status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; hdr->frame_control = __cpu_to_le16(fc & ~IEEE80211_FCTL_PROTECTED); } } if (ieee80211_is_beacon(hdr->frame_control)) ath11k_mac_handle_beacon(ar, skb); ath11k_dbg(ab, ATH11K_DBG_MGMT, "event mgmt rx skb %p len %d ftype %02x stype %02x\n", skb, skb->len, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); ath11k_dbg(ab, ATH11K_DBG_MGMT, "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", status->freq, status->band, status->signal, status->rate_idx); ieee80211_rx_ni(ar->hw, skb); exit: rcu_read_unlock(); } static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *skb) { - struct wmi_mgmt_tx_compl_event tx_compl_param = {0}; + struct wmi_mgmt_tx_compl_event tx_compl_param = {}; struct ath11k *ar; if (ath11k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) { ath11k_warn(ab, "failed to extract mgmt tx compl event"); return; } rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, tx_compl_param.pdev_id); if (!ar) { ath11k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n", tx_compl_param.pdev_id); goto exit; } wmi_process_mgmt_tx_comp(ar, &tx_compl_param); ath11k_dbg(ab, ATH11K_DBG_MGMT, "event mgmt tx compl ev pdev_id %d, desc_id %d, status %d ack_rssi %d", tx_compl_param.pdev_id, tx_compl_param.desc_id, tx_compl_param.status, tx_compl_param.ack_rssi); exit: rcu_read_unlock(); } static struct ath11k *ath11k_get_ar_on_scan_state(struct ath11k_base *ab, u32 vdev_id, enum ath11k_scan_state state) { int i; struct ath11k_pdev *pdev; struct ath11k *ar; for (i = 0; i < ab->num_radios; i++) { pdev = rcu_dereference(ab->pdevs_active[i]); if (pdev && pdev->ar) { ar = pdev->ar; spin_lock_bh(&ar->data_lock); if (ar->scan.state == state && ar->scan.vdev_id == vdev_id) { spin_unlock_bh(&ar->data_lock); return ar; } spin_unlock_bh(&ar->data_lock); } } return NULL; } static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k *ar; - struct wmi_scan_event scan_ev = {0}; + struct wmi_scan_event scan_ev = {}; if (ath11k_pull_scan_ev(ab, skb, &scan_ev) != 0) { ath11k_warn(ab, "failed to extract scan event"); return; } rcu_read_lock(); /* In case the scan was cancelled, ex. during interface teardown, * the interface will not be found in active interfaces. * Rather, in such scenarios, iterate over the active pdev's to * search 'ar' if the corresponding 'ar' scan is ABORTING and the * aborting scan's vdev id matches this event info. */ if (scan_ev.event_type == WMI_SCAN_EVENT_COMPLETED && scan_ev.reason == WMI_SCAN_REASON_CANCELLED) { ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id, ATH11K_SCAN_ABORTING); if (!ar) ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id, ATH11K_SCAN_RUNNING); } else { ar = ath11k_mac_get_ar_by_vdev_id(ab, scan_ev.vdev_id); } if (!ar) { ath11k_warn(ab, "Received scan event for unknown vdev"); rcu_read_unlock(); return; } spin_lock_bh(&ar->data_lock); ath11k_dbg(ab, ATH11K_DBG_WMI, "event scan %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n", ath11k_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason), scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq, scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id, ath11k_scan_state_str(ar->scan.state), ar->scan.state); switch (scan_ev.event_type) { case WMI_SCAN_EVENT_STARTED: ath11k_wmi_event_scan_started(ar); break; case WMI_SCAN_EVENT_COMPLETED: ath11k_wmi_event_scan_completed(ar); break; case WMI_SCAN_EVENT_BSS_CHANNEL: ath11k_wmi_event_scan_bss_chan(ar); break; case WMI_SCAN_EVENT_FOREIGN_CHAN: ath11k_wmi_event_scan_foreign_chan(ar, scan_ev.channel_freq); break; case WMI_SCAN_EVENT_START_FAILED: ath11k_warn(ab, "received scan start failure event\n"); ath11k_wmi_event_scan_start_failed(ar); break; case WMI_SCAN_EVENT_DEQUEUED: __ath11k_mac_scan_finish(ar); break; case WMI_SCAN_EVENT_PREEMPTED: case WMI_SCAN_EVENT_RESTARTED: case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: default: break; } spin_unlock_bh(&ar->data_lock); rcu_read_unlock(); } static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_peer_sta_kickout_arg arg = {}; struct ieee80211_sta *sta; struct ath11k_peer *peer; struct ath11k *ar; u32 vdev_id; if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) { ath11k_warn(ab, "failed to extract peer sta kickout event"); return; } rcu_read_lock(); spin_lock_bh(&ab->base_lock); peer = ath11k_peer_find_by_addr(ab, arg.mac_addr); if (!peer) { ath11k_warn(ab, "peer not found %pM\n", arg.mac_addr); spin_unlock_bh(&ab->base_lock); goto exit; } vdev_id = peer->vdev_id; spin_unlock_bh(&ab->base_lock); ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d", peer->vdev_id); goto exit; } sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL); if (!sta) { ath11k_warn(ab, "Spurious quick kickout for STA %pM\n", arg.mac_addr); goto exit; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer sta kickout %pM", arg.mac_addr); ieee80211_report_low_ack(sta, 10); exit: rcu_read_unlock(); } static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_roam_event roam_ev = {}; struct ath11k *ar; if (ath11k_pull_roam_ev(ab, skb, &roam_ev) != 0) { ath11k_warn(ab, "failed to extract roam event"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event roam vdev %u reason 0x%08x rssi %d\n", roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi); rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, roam_ev.vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in roam ev %d", roam_ev.vdev_id); rcu_read_unlock(); return; } if (roam_ev.reason >= WMI_ROAM_REASON_MAX) ath11k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n", roam_ev.reason, roam_ev.vdev_id); switch (roam_ev.reason) { case WMI_ROAM_REASON_BEACON_MISS: ath11k_mac_handle_beacon_miss(ar, roam_ev.vdev_id); break; case WMI_ROAM_REASON_BETTER_AP: case WMI_ROAM_REASON_LOW_RSSI: case WMI_ROAM_REASON_SUITABLE_AP_FOUND: case WMI_ROAM_REASON_HO_FAILED: ath11k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n", roam_ev.reason, roam_ev.vdev_id); break; } rcu_read_unlock(); } static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb) { - struct wmi_chan_info_event ch_info_ev = {0}; + struct wmi_chan_info_event ch_info_ev = {}; struct ath11k *ar; struct survey_info *survey; int idx; /* HW channel counters frequency value in hertz */ u32 cc_freq_hz = ab->cc_freq_hz; if (ath11k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) { ath11k_warn(ab, "failed to extract chan info event"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n", ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq, ch_info_ev.cmd_flags, ch_info_ev.noise_floor, ch_info_ev.rx_clear_count, ch_info_ev.cycle_count, ch_info_ev.mac_clk_mhz); if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_END_RESP) { ath11k_dbg(ab, ATH11K_DBG_WMI, "chan info report completed\n"); return; } rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, ch_info_ev.vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in chan info ev %d", ch_info_ev.vdev_id); rcu_read_unlock(); return; } spin_lock_bh(&ar->data_lock); switch (ar->scan.state) { case ATH11K_SCAN_IDLE: case ATH11K_SCAN_STARTING: ath11k_warn(ab, "received chan info event without a scan request, ignoring\n"); goto exit; case ATH11K_SCAN_RUNNING: case ATH11K_SCAN_ABORTING: break; } idx = freq_to_idx(ar, ch_info_ev.freq); if (idx >= ARRAY_SIZE(ar->survey)) { ath11k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n", ch_info_ev.freq, idx); goto exit; } /* If FW provides MAC clock frequency in Mhz, overriding the initialized * HW channel counters frequency value */ if (ch_info_ev.mac_clk_mhz) cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000); if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) { survey = &ar->survey[idx]; memset(survey, 0, sizeof(*survey)); survey->noise = ch_info_ev.noise_floor; survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY; survey->time = div_u64(ch_info_ev.cycle_count, cc_freq_hz); survey->time_busy = div_u64(ch_info_ev.rx_clear_count, cc_freq_hz); } exit: spin_unlock_bh(&ar->data_lock); rcu_read_unlock(); } static void ath11k_pdev_bss_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {}; struct survey_info *survey; struct ath11k *ar; u32 cc_freq_hz = ab->cc_freq_hz; u64 busy, total, tx, rx, rx_bss; int idx; if (ath11k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) { ath11k_warn(ab, "failed to extract pdev bss chan info event"); return; } busy = (u64)(bss_ch_info_ev.rx_clear_count_high) << 32 | bss_ch_info_ev.rx_clear_count_low; total = (u64)(bss_ch_info_ev.cycle_count_high) << 32 | bss_ch_info_ev.cycle_count_low; tx = (u64)(bss_ch_info_ev.tx_cycle_count_high) << 32 | bss_ch_info_ev.tx_cycle_count_low; rx = (u64)(bss_ch_info_ev.rx_cycle_count_high) << 32 | bss_ch_info_ev.rx_cycle_count_low; rx_bss = (u64)(bss_ch_info_ev.rx_bss_cycle_count_high) << 32 | bss_ch_info_ev.rx_bss_cycle_count_low; ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n", bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq, bss_ch_info_ev.noise_floor, busy, total, tx, rx, rx_bss); rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, bss_ch_info_ev.pdev_id); if (!ar) { ath11k_warn(ab, "invalid pdev id %d in bss_chan_info event\n", bss_ch_info_ev.pdev_id); rcu_read_unlock(); return; } spin_lock_bh(&ar->data_lock); idx = freq_to_idx(ar, bss_ch_info_ev.freq); if (idx >= ARRAY_SIZE(ar->survey)) { ath11k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n", bss_ch_info_ev.freq, idx); goto exit; } survey = &ar->survey[idx]; survey->noise = bss_ch_info_ev.noise_floor; survey->time = div_u64(total, cc_freq_hz); survey->time_busy = div_u64(busy, cc_freq_hz); survey->time_rx = div_u64(rx_bss, cc_freq_hz); survey->time_tx = div_u64(tx, cc_freq_hz); survey->filled |= (SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_TX); exit: spin_unlock_bh(&ar->data_lock); complete(&ar->bss_survey_done); rcu_read_unlock(); } static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab, struct sk_buff *skb) { - struct wmi_vdev_install_key_complete_arg install_key_compl = {0}; + struct wmi_vdev_install_key_complete_arg install_key_compl = {}; struct ath11k *ar; if (ath11k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) { ath11k_warn(ab, "failed to extract install key compl event"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event vdev install key ev idx %d flags %08x macaddr %pM status %d\n", install_key_compl.key_idx, install_key_compl.key_flags, install_key_compl.macaddr, install_key_compl.status); rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in install key compl ev %d", install_key_compl.vdev_id); rcu_read_unlock(); return; } ar->install_key_status = 0; if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) { ath11k_warn(ab, "install key failed for %pM status %d\n", install_key_compl.macaddr, install_key_compl.status); ar->install_key_status = install_key_compl.status; } complete(&ar->install_key_done); rcu_read_unlock(); } static int ath11k_wmi_tlv_services_parser(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { const struct wmi_service_available_event *ev; u32 *wmi_ext2_service_bitmap; int i, j; switch (tag) { case WMI_TAG_SERVICE_AVAILABLE_EVENT: ev = (struct wmi_service_available_event *)ptr; for (i = 0, j = WMI_MAX_SERVICE; i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE; i++) { do { if (ev->wmi_service_segment_bitmap[i] & BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) set_bit(j, ab->wmi_ab.svc_map); } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); } ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x", ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1], ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]); break; case WMI_TAG_ARRAY_UINT32: wmi_ext2_service_bitmap = (u32 *)ptr; for (i = 0, j = WMI_MAX_EXT_SERVICE; i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE; i++) { do { if (wmi_ext2_service_bitmap[i] & BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) set_bit(j, ab->wmi_ab.svc_map); } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); } ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi_ext2_service__bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x", wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1], wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]); break; } return 0; } static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb) { int ret; ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_services_parser, NULL); if (ret) ath11k_warn(ab, "failed to parse services available tlv %d\n", ret); ath11k_dbg(ab, ATH11K_DBG_WMI, "event service available"); } static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb) { - struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0}; + struct wmi_peer_assoc_conf_arg peer_assoc_conf = {}; struct ath11k *ar; if (ath11k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) { ath11k_warn(ab, "failed to extract peer assoc conf event"); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer assoc conf ev vdev id %d macaddr %pM\n", peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr); rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in peer assoc conf ev %d", peer_assoc_conf.vdev_id); rcu_read_unlock(); return; } complete(&ar->peer_assoc_done); rcu_read_unlock(); } static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k_fw_stats stats = {}; size_t total_vdevs_started = 0; struct ath11k_pdev *pdev; bool is_end = true; int i; struct ath11k *ar; int ret; INIT_LIST_HEAD(&stats.pdevs); INIT_LIST_HEAD(&stats.vdevs); INIT_LIST_HEAD(&stats.bcn); ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats); if (ret) { ath11k_warn(ab, "failed to pull fw stats: %d\n", ret); goto free; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event update stats"); rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id); if (!ar) { rcu_read_unlock(); ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n", stats.pdev_id, ret); goto free; } spin_lock_bh(&ar->data_lock); /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_VDEV_STAT and * WMI_REQUEST_RSSI_PER_CHAIN_STAT can be requested via mac ops or via * debugfs fw stats. Therefore, processing it separately. */ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) { list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs); complete(&ar->fw_stats_done); goto complete; } if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) { complete(&ar->fw_stats_done); goto complete; } if (stats.stats_id == WMI_REQUEST_VDEV_STAT) { if (list_empty(&stats.vdevs)) { ath11k_warn(ab, "empty vdev stats"); goto complete; } /* FW sends all the active VDEV stats irrespective of PDEV, * hence limit until the count of all VDEVs started */ for (i = 0; i < ab->num_radios; i++) { pdev = rcu_dereference(ab->pdevs_active[i]); if (pdev && pdev->ar) total_vdevs_started += ar->num_started_vdevs; } if (total_vdevs_started) is_end = ((++ar->fw_stats.num_vdev_recvd) == total_vdevs_started); list_splice_tail_init(&stats.vdevs, &ar->fw_stats.vdevs); if (is_end) complete(&ar->fw_stats_done); goto complete; } /* WMI_REQUEST_BCN_STAT is currently requested only via debugfs fw stats. * Hence, processing it in debugfs context */ ath11k_debugfs_fw_stats_process(ar, &stats); complete: complete(&ar->fw_stats_complete); spin_unlock_bh(&ar->data_lock); rcu_read_unlock(); /* Since the stats's pdev, vdev and beacon list are spliced and reinitialised * at this point, no need to free the individual list. */ return; free: ath11k_fw_stats_free(&stats); } /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned * is not part of BDF CTL(Conformance test limits) table entries. */ static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_pdev_ctl_failsafe_chk_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return; } ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch pdev ctl failsafe check ev"); kfree(tb); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev ctl failsafe check status %d\n", ev->ctl_failsafe_status); /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power * to 10 dBm else the CTL power entry in the BDF would be picked up. */ if (ev->ctl_failsafe_status != 0) ath11k_warn(ab, "pdev ctl failsafe failure status %d", ev->ctl_failsafe_status); kfree(tb); } static void ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab, const struct wmi_pdev_csa_switch_ev *ev, const u32 *vdev_ids) { int i; struct ath11k_vif *arvif; /* Finish CSA once the switch count becomes NULL */ if (ev->current_switch_count) return; rcu_read_lock(); for (i = 0; i < ev->num_vdevs; i++) { arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]); if (!arvif) { ath11k_warn(ab, "Recvd csa status for unknown vdev %d", vdev_ids[i]); continue; } if (arvif->is_up && arvif->vif->bss_conf.csa_active) ieee80211_csa_finish(arvif->vif, 0); } rcu_read_unlock(); } static void ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_pdev_csa_switch_ev *ev; const u32 *vdev_ids; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return; } ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT]; vdev_ids = tb[WMI_TAG_ARRAY_UINT32]; if (!ev || !vdev_ids) { ath11k_warn(ab, "failed to fetch pdev csa switch count ev"); kfree(tb); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev csa switch count %d for pdev %d, num_vdevs %d", ev->current_switch_count, ev->pdev_id, ev->num_vdevs); ath11k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids); kfree(tb); } static void ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_pdev_radar_ev *ev; struct ath11k *ar; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return; } ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch pdev dfs radar detected ev"); kfree(tb); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d", ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width, ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp, ev->freq_offset, ev->sidx); rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); if (!ar) { ath11k_warn(ab, "radar detected in invalid pdev %d\n", ev->pdev_id); goto exit; } ath11k_dbg(ar->ab, ATH11K_DBG_REG, "DFS Radar Detected in pdev %d\n", ev->pdev_id); if (ar->dfs_block_radar_events) ath11k_info(ab, "DFS Radar detected, but ignored as requested\n"); else ieee80211_radar_detected(ar->hw, NULL); exit: rcu_read_unlock(); kfree(tb); } static void ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab, struct sk_buff *skb) { struct ath11k *ar; const void **tb; const struct wmi_pdev_temperature_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return; } ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch pdev temp ev"); kfree(tb); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id); rcu_read_lock(); ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); if (!ar) { ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id); goto exit; } ath11k_thermal_event_temperature(ar, ev->temp); exit: rcu_read_unlock(); kfree(tb); } static void ath11k_fils_discovery_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_fils_discovery_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse FILS discovery event tlv %d\n", ret); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event fils discovery"); ev = tb[WMI_TAG_HOST_SWFDA_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch FILS discovery event\n"); kfree(tb); return; } ath11k_warn(ab, "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n", ev->vdev_id, ev->fils_tt, ev->tbtt); kfree(tb); } static void ath11k_probe_resp_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_probe_resp_tx_status_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse probe response transmission status event tlv: %d\n", ret); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event probe resp tx status"); ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch probe response transmission status event"); kfree(tb); return; } if (ev->tx_status) ath11k_warn(ab, "Probe response transmission failed for vdev_id %u, status %u\n", ev->vdev_id, ev->tx_status); kfree(tb); } static int ath11k_wmi_tlv_wow_wakeup_host_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) { struct wmi_wow_ev_arg *ev = data; const char *wow_pg_fault; int wow_pg_len; switch (tag) { case WMI_TAG_WOW_EVENT_INFO: memcpy(ev, ptr, sizeof(*ev)); ath11k_dbg(ab, ATH11K_DBG_WMI, "wow wakeup host reason %d %s\n", ev->wake_reason, wow_reason(ev->wake_reason)); break; case WMI_TAG_ARRAY_BYTE: if (ev && ev->wake_reason == WOW_REASON_PAGE_FAULT) { wow_pg_fault = ptr; /* the first 4 bytes are length */ wow_pg_len = *(int *)wow_pg_fault; wow_pg_fault += sizeof(int); ath11k_dbg(ab, ATH11K_DBG_WMI, "wow data_len = %d\n", wow_pg_len); ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "wow_event_info_type packet present", "wow_pg_fault ", wow_pg_fault, wow_pg_len); } break; default: break; } return 0; } static void ath11k_wmi_event_wow_wakeup_host(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_wow_ev_arg ev = { }; int ret; ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, ath11k_wmi_tlv_wow_wakeup_host_parse, &ev); if (ret) { ath11k_warn(ab, "failed to parse wmi wow tlv: %d\n", ret); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event wow wakeup host"); complete(&ab->wow.wakeup_completed); } static void ath11k_wmi_diag_event(struct ath11k_base *ab, struct sk_buff *skb) { ath11k_dbg(ab, ATH11K_DBG_WMI, "event diag"); trace_ath11k_wmi_diag(ab, skb->data, skb->len); } static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status) { switch (status) { case WMI_ADD_TWT_STATUS_OK: return "ok"; case WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED: return "twt disabled"; case WMI_ADD_TWT_STATUS_USED_DIALOG_ID: return "dialog id in use"; case WMI_ADD_TWT_STATUS_INVALID_PARAM: return "invalid parameters"; case WMI_ADD_TWT_STATUS_NOT_READY: return "not ready"; case WMI_ADD_TWT_STATUS_NO_RESOURCE: return "resource unavailable"; case WMI_ADD_TWT_STATUS_NO_ACK: return "no ack"; case WMI_ADD_TWT_STATUS_NO_RESPONSE: return "no response"; case WMI_ADD_TWT_STATUS_DENIED: return "denied"; case WMI_ADD_TWT_STATUS_UNKNOWN_ERROR: fallthrough; default: return "unknown error"; } } static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_twt_add_dialog_event *ev; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse wmi twt add dialog status event tlv: %d\n", ret); return; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event twt add dialog"); ev = tb[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch twt add dialog wmi event\n"); goto exit; } if (ev->status) ath11k_warn(ab, "wmi add twt dialog event vdev %d dialog id %d status %s\n", ev->vdev_id, ev->dialog_id, ath11k_wmi_twt_add_dialog_event_status(ev->status)); exit: kfree(tb); } static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_gtk_offload_status_event *ev; struct ath11k_vif *arvif; __be64 replay_ctr_be; u64 replay_ctr; int ret; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ret = PTR_ERR(tb); ath11k_warn(ab, "failed to parse tlv: %d\n", ret); return; } ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT]; if (!ev) { ath11k_warn(ab, "failed to fetch gtk offload status ev"); kfree(tb); return; } rcu_read_lock(); arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id); if (!arvif) { ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n", ev->vdev_id); goto exit; } ath11k_dbg(ab, ATH11K_DBG_WMI, "event gtk offload refresh_cnt %d\n", ev->refresh_cnt); ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "replay_cnt", NULL, ev->replay_ctr.counter, GTK_REPLAY_COUNTER_BYTES); replay_ctr = ev->replay_ctr.word1; replay_ctr = (replay_ctr << 32) | ev->replay_ctr.word0; arvif->rekey_data.replay_ctr = replay_ctr; /* supplicant expects big-endian replay counter */ replay_ctr_be = cpu_to_be64(replay_ctr); ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid, (void *)&replay_ctr_be, GFP_ATOMIC); exit: rcu_read_unlock(); kfree(tb); } static void ath11k_wmi_p2p_noa_event(struct ath11k_base *ab, struct sk_buff *skb) { const void **tb; const struct wmi_p2p_noa_event *ev; const struct ath11k_wmi_p2p_noa_info *noa; struct ath11k *ar; int vdev_id; u8 noa_descriptors; tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); if (IS_ERR(tb)) { ath11k_warn(ab, "failed to parse tlv: %ld\n", PTR_ERR(tb)); return; } ev = tb[WMI_TAG_P2P_NOA_EVENT]; noa = tb[WMI_TAG_P2P_NOA_INFO]; if (!ev || !noa) goto out; vdev_id = ev->vdev_id; noa_descriptors = u32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM); if (noa_descriptors > WMI_P2P_MAX_NOA_DESCRIPTORS) { ath11k_warn(ab, "invalid descriptor num %d in P2P NoA event\n", noa_descriptors); goto out; } ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi tlv p2p noa vdev_id %i descriptors %u\n", vdev_id, noa_descriptors); rcu_read_lock(); ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id %d in P2P NoA event\n", vdev_id); goto unlock; } ath11k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa); unlock: rcu_read_unlock(); out: kfree(tb); } static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; enum wmi_tlv_event_id id; cmd_hdr = (struct wmi_cmd_hdr *)skb->data; id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id)); trace_ath11k_wmi_event(ab, id, skb->data, skb->len); if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) goto out; switch (id) { /* Process all the WMI events here */ case WMI_SERVICE_READY_EVENTID: ath11k_service_ready_event(ab, skb); break; case WMI_SERVICE_READY_EXT_EVENTID: ath11k_service_ready_ext_event(ab, skb); break; case WMI_SERVICE_READY_EXT2_EVENTID: ath11k_service_ready_ext2_event(ab, skb); break; case WMI_REG_CHAN_LIST_CC_EVENTID: ath11k_reg_chan_list_event(ab, skb, WMI_REG_CHAN_LIST_CC_ID); break; case WMI_REG_CHAN_LIST_CC_EXT_EVENTID: ath11k_reg_chan_list_event(ab, skb, WMI_REG_CHAN_LIST_CC_EXT_ID); break; case WMI_READY_EVENTID: ath11k_ready_event(ab, skb); break; case WMI_PEER_DELETE_RESP_EVENTID: ath11k_peer_delete_resp_event(ab, skb); break; case WMI_VDEV_START_RESP_EVENTID: ath11k_vdev_start_resp_event(ab, skb); break; case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID: ath11k_bcn_tx_status_event(ab, skb); break; case WMI_VDEV_STOPPED_EVENTID: ath11k_vdev_stopped_event(ab, skb); break; case WMI_MGMT_RX_EVENTID: ath11k_mgmt_rx_event(ab, skb); /* mgmt_rx_event() owns the skb now! */ return; case WMI_MGMT_TX_COMPLETION_EVENTID: ath11k_mgmt_tx_compl_event(ab, skb); break; case WMI_SCAN_EVENTID: ath11k_scan_event(ab, skb); break; case WMI_PEER_STA_KICKOUT_EVENTID: ath11k_peer_sta_kickout_event(ab, skb); break; case WMI_ROAM_EVENTID: ath11k_roam_event(ab, skb); break; case WMI_CHAN_INFO_EVENTID: ath11k_chan_info_event(ab, skb); break; case WMI_PDEV_BSS_CHAN_INFO_EVENTID: ath11k_pdev_bss_chan_info_event(ab, skb); break; case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID: ath11k_vdev_install_key_compl_event(ab, skb); break; case WMI_SERVICE_AVAILABLE_EVENTID: ath11k_service_available_event(ab, skb); break; case WMI_PEER_ASSOC_CONF_EVENTID: ath11k_peer_assoc_conf_event(ab, skb); break; case WMI_UPDATE_STATS_EVENTID: ath11k_update_stats_event(ab, skb); break; case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID: ath11k_pdev_ctl_failsafe_check_event(ab, skb); break; case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID: ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb); break; case WMI_PDEV_UTF_EVENTID: ath11k_tm_wmi_event(ab, id, skb); break; case WMI_PDEV_TEMPERATURE_EVENTID: ath11k_wmi_pdev_temperature_event(ab, skb); break; case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID: ath11k_wmi_pdev_dma_ring_buf_release_event(ab, skb); break; case WMI_HOST_FILS_DISCOVERY_EVENTID: ath11k_fils_discovery_event(ab, skb); break; case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID: ath11k_probe_resp_tx_status_event(ab, skb); break; case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID: ath11k_wmi_obss_color_collision_event(ab, skb); break; case WMI_TWT_ADD_DIALOG_EVENTID: ath11k_wmi_twt_add_dialog_event(ab, skb); break; case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID: ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb); break; case WMI_VDEV_DELETE_RESP_EVENTID: ath11k_vdev_delete_resp_event(ab, skb); break; case WMI_WOW_WAKEUP_HOST_EVENTID: ath11k_wmi_event_wow_wakeup_host(ab, skb); break; case WMI_11D_NEW_COUNTRY_EVENTID: ath11k_reg_11d_new_cc_event(ab, skb); break; case WMI_DIAG_EVENTID: ath11k_wmi_diag_event(ab, skb); break; case WMI_PEER_STA_PS_STATECHG_EVENTID: ath11k_wmi_event_peer_sta_ps_state_chg(ab, skb); break; case WMI_GTK_OFFLOAD_STATUS_EVENTID: ath11k_wmi_gtk_offload_status_event(ab, skb); break; case WMI_P2P_NOA_EVENTID: ath11k_wmi_p2p_noa_event(ab, skb); break; default: ath11k_dbg(ab, ATH11K_DBG_WMI, "unsupported event id 0x%x\n", id); break; } out: dev_kfree_skb(skb); } static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab, u32 pdev_idx) { int status; u32 svc_id[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL, ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1, ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 }; struct ath11k_htc_svc_conn_req conn_req; struct ath11k_htc_svc_conn_resp conn_resp; memset(&conn_req, 0, sizeof(conn_req)); memset(&conn_resp, 0, sizeof(conn_resp)); /* these fields are the same for all service endpoints */ conn_req.ep_ops.ep_tx_complete = ath11k_wmi_htc_tx_complete; conn_req.ep_ops.ep_rx_complete = ath11k_wmi_tlv_op_rx; conn_req.ep_ops.ep_tx_credits = ath11k_wmi_op_ep_tx_credits; /* connect to control service */ conn_req.service_id = svc_id[pdev_idx]; status = ath11k_htc_connect_service(&ab->htc, &conn_req, &conn_resp); if (status) { ath11k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n", status); return status; } ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid; ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid; ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len; init_waitqueue_head(&ab->wmi_ab.wmi[pdev_idx].tx_ce_desc_wq); return 0; } static int ath11k_wmi_send_unit_test_cmd(struct ath11k *ar, struct wmi_unit_test_cmd ut_cmd, u32 *test_args) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_unit_test_cmd *cmd; struct sk_buff *skb; struct wmi_tlv *tlv; void *ptr; u32 *ut_cmd_args; int buf_len, arg_len; int ret; int i; arg_len = sizeof(u32) * ut_cmd.num_args; buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len); if (!skb) return -ENOMEM; cmd = (struct wmi_unit_test_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_UNIT_TEST_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(ut_cmd) - TLV_HDR_SIZE); cmd->vdev_id = ut_cmd.vdev_id; cmd->module_id = ut_cmd.module_id; cmd->num_args = ut_cmd.num_args; cmd->diag_token = ut_cmd.diag_token; ptr = skb->data + sizeof(ut_cmd); tlv = ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | FIELD_PREP(WMI_TLV_LEN, arg_len); ptr += TLV_HDR_SIZE; ut_cmd_args = ptr; for (i = 0; i < ut_cmd.num_args; i++) ut_cmd_args[i] = test_args[i]; ret = ath11k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n", ret); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd unit test module %d vdev %d n_args %d token %d\n", cmd->module_id, cmd->vdev_id, cmd->num_args, cmd->diag_token); return ret; } int ath11k_wmi_simulate_radar(struct ath11k *ar) { struct ath11k_vif *arvif; u32 dfs_args[DFS_MAX_TEST_ARGS]; struct wmi_unit_test_cmd wmi_ut; bool arvif_found = false; list_for_each_entry(arvif, &ar->arvifs, list) { if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) { arvif_found = true; break; } } if (!arvif_found) return -EINVAL; dfs_args[DFS_TEST_CMDID] = 0; dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id; /* Currently we could pass segment_id(b0 - b1), chirp(b2) * freq offset (b3 - b10) to unit test. For simulation * purpose this can be set to 0 which is valid. */ dfs_args[DFS_TEST_RADAR_PARAM] = 0; wmi_ut.vdev_id = arvif->vdev_id; wmi_ut.module_id = DFS_UNIT_TEST_MODULE; wmi_ut.num_args = DFS_MAX_TEST_ARGS; wmi_ut.diag_token = DFS_UNIT_TEST_TOKEN; ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Triggering Radar Simulation\n"); return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args); } int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap, struct ath11k_fw_dbglog *dbglog) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_debug_log_config_cmd_fixed_param *cmd; struct sk_buff *skb; struct wmi_tlv *tlv; int ret, len; len = sizeof(*cmd) + TLV_HDR_SIZE + (MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32)); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_debug_log_config_cmd_fixed_param *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DEBUG_LOG_CONFIG_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->dbg_log_param = dbglog->param; tlv = (struct wmi_tlv *)((u8 *)cmd + sizeof(*cmd)); tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | FIELD_PREP(WMI_TLV_LEN, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32)); switch (dbglog->param) { case WMI_DEBUG_LOG_PARAM_LOG_LEVEL: case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE: case WMI_DEBUG_LOG_PARAM_VDEV_DISABLE: case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP: cmd->value = dbglog->value; break; case WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP: case WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP: cmd->value = dbglog->value; memcpy(tlv->value, module_id_bitmap, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32)); /* clear current config to be used for next user config */ memset(module_id_bitmap, 0, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32)); break; default: dev_kfree_skb(skb); return -EINVAL; } ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DBGLOG_CFG_CMDID); if (ret) { ath11k_warn(ar->ab, "failed to send WMI_DBGLOG_CFG_CMDID\n"); dev_kfree_skb(skb); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd dbglog cfg"); return ret; } int ath11k_wmi_connect(struct ath11k_base *ab) { u32 i; u8 wmi_ep_count; wmi_ep_count = ab->htc.wmi_ep_count; if (wmi_ep_count > ab->hw_params.max_radios) return -1; for (i = 0; i < wmi_ep_count; i++) ath11k_connect_pdev_htc_service(ab, i); return 0; } static void ath11k_wmi_pdev_detach(struct ath11k_base *ab, u8 pdev_id) { if (WARN_ON(pdev_id >= MAX_RADIOS)) return; /* TODO: Deinit any pdev specific wmi resource */ } int ath11k_wmi_pdev_attach(struct ath11k_base *ab, u8 pdev_id) { struct ath11k_pdev_wmi *wmi_handle; if (pdev_id >= ab->hw_params.max_radios) return -EINVAL; wmi_handle = &ab->wmi_ab.wmi[pdev_id]; wmi_handle->wmi_ab = &ab->wmi_ab; ab->wmi_ab.ab = ab; /* TODO: Init remaining resource specific to pdev */ return 0; } int ath11k_wmi_attach(struct ath11k_base *ab) { int ret; ret = ath11k_wmi_pdev_attach(ab, 0); if (ret) return ret; ab->wmi_ab.ab = ab; ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX; /* It's overwritten when service_ext_ready is handled */ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxdma_per_pdev > 1) ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE; /* TODO: Init remaining wmi soc resources required */ init_completion(&ab->wmi_ab.service_ready); init_completion(&ab->wmi_ab.unified_ready); return 0; } void ath11k_wmi_detach(struct ath11k_base *ab) { int i; /* TODO: Deinit wmi resource specific to SOC as required */ for (i = 0; i < ab->htc.wmi_ep_count; i++) ath11k_wmi_pdev_detach(ab, i); ath11k_wmi_free_dbring_caps(ab); } int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id, u32 filter_bitmap, bool enable) { struct wmi_hw_data_filter_cmd *cmd; struct sk_buff *skb; int len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_hw_data_filter_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HW_DATA_FILTER_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->enable = enable; /* Set all modes in case of disable */ if (cmd->enable) cmd->hw_filter_bitmap = filter_bitmap; else cmd->hw_filter_bitmap = ((u32)~0U); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "hw data filter enable %d filter_bitmap 0x%x\n", enable, filter_bitmap); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID); } int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar) { struct wmi_wow_host_wakeup_ind *cmd; struct sk_buff *skb; size_t len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_wow_host_wakeup_ind *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow host wakeup ind\n"); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID); } int ath11k_wmi_wow_enable(struct ath11k *ar) { struct wmi_wow_enable_cmd *cmd; struct sk_buff *skb; int len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_wow_enable_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ENABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->enable = 1; cmd->pause_iface_config = WOW_IFACE_PAUSE_ENABLED; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow enable\n"); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID); } int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar, const u8 mac_addr[ETH_ALEN]) { struct sk_buff *skb; struct wmi_scan_prob_req_oui_cmd *cmd; u32 prob_req_oui; int len; prob_req_oui = (((u32)mac_addr[0]) << 16) | (((u32)mac_addr[1]) << 8) | mac_addr[2]; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_scan_prob_req_oui_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_PROB_REQ_OUI_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->prob_req_oui = prob_req_oui; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "scan prob req oui %d\n", prob_req_oui); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID); } int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id, enum wmi_wow_wakeup_event event, u32 enable) { struct wmi_wow_add_del_event_cmd *cmd; struct sk_buff *skb; size_t len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_wow_add_del_event_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_DEL_EVT_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->is_add = enable; cmd->event_bitmap = (1 << event); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow add wakeup event %s enable %d vdev_id %d\n", wow_wakeup_event(event), enable, vdev_id); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID); } int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id, const u8 *pattern, const u8 *mask, int pattern_len, int pattern_offset) { struct wmi_wow_add_pattern_cmd *cmd; struct wmi_wow_bitmap_pattern *bitmap; struct wmi_tlv *tlv; struct sk_buff *skb; u8 *ptr; size_t len; len = sizeof(*cmd) + sizeof(*tlv) + /* array struct */ sizeof(*bitmap) + /* bitmap */ sizeof(*tlv) + /* empty ipv4 sync */ sizeof(*tlv) + /* empty ipv6 sync */ sizeof(*tlv) + /* empty magic */ sizeof(*tlv) + /* empty info timeout */ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; /* cmd */ ptr = (u8 *)skb->data; cmd = (struct wmi_wow_add_pattern_cmd *)ptr; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_PATTERN_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->pattern_id = pattern_id; cmd->pattern_type = WOW_BITMAP_PATTERN; ptr += sizeof(*cmd); /* bitmap */ tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap)); ptr += sizeof(*tlv); bitmap = (struct wmi_wow_bitmap_pattern *)ptr; bitmap->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_BITMAP_PATTERN_T) | FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap) - TLV_HDR_SIZE); memcpy(bitmap->patternbuf, pattern, pattern_len); ath11k_ce_byte_swap(bitmap->patternbuf, roundup(pattern_len, 4)); memcpy(bitmap->bitmaskbuf, mask, pattern_len); ath11k_ce_byte_swap(bitmap->bitmaskbuf, roundup(pattern_len, 4)); bitmap->pattern_offset = pattern_offset; bitmap->pattern_len = pattern_len; bitmap->bitmask_len = pattern_len; bitmap->pattern_id = pattern_id; ptr += sizeof(*bitmap); /* ipv4 sync */ tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, 0); ptr += sizeof(*tlv); /* ipv6 sync */ tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, 0); ptr += sizeof(*tlv); /* magic */ tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, 0); ptr += sizeof(*tlv); /* pattern info timeout */ tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | FIELD_PREP(WMI_TLV_LEN, 0); ptr += sizeof(*tlv); /* ratelimit interval */ tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | FIELD_PREP(WMI_TLV_LEN, sizeof(u32)); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d\n", vdev_id, pattern_id, pattern_offset); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID); } int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id) { struct wmi_wow_del_pattern_cmd *cmd; struct sk_buff *skb; size_t len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_wow_del_pattern_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_DEL_PATTERN_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->pattern_id = pattern_id; cmd->pattern_type = WOW_BITMAP_PATTERN; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow del pattern vdev_id %d pattern_id %d\n", vdev_id, pattern_id); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID); } static struct sk_buff * ath11k_wmi_op_gen_config_pno_start(struct ath11k *ar, u32 vdev_id, struct wmi_pno_scan_req *pno) { struct nlo_configured_parameters *nlo_list; struct wmi_wow_nlo_config_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; u32 *channel_list; size_t len, nlo_list_len, channel_list_len; u8 *ptr; u32 i; len = sizeof(*cmd) + sizeof(*tlv) + /* TLV place holder for array of structures * nlo_configured_parameters(nlo_list) */ sizeof(*tlv); /* TLV place holder for array of uint32 channel_list */ channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count; len += channel_list_len; nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count; len += nlo_list_len; skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return ERR_PTR(-ENOMEM); ptr = (u8 *)skb->data; cmd = (struct wmi_wow_nlo_config_cmd *)ptr; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = pno->vdev_id; cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN; /* current FW does not support min-max range for dwell time */ cmd->active_dwell_time = pno->active_max_time; cmd->passive_dwell_time = pno->passive_max_time; if (pno->do_passive_scan) cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE; cmd->fast_scan_period = pno->fast_scan_period; cmd->slow_scan_period = pno->slow_scan_period; cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles; cmd->delay_start_time = pno->delay_start_time; if (pno->enable_pno_scan_randomization) { cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ; ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); ath11k_ce_byte_swap(cmd->mac_addr.addr, 8); ath11k_ce_byte_swap(cmd->mac_mask.addr, 8); } ptr += sizeof(*cmd); /* nlo_configured_parameters(nlo_list) */ cmd->no_of_ssids = pno->uc_networks_count; tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, nlo_list_len); ptr += sizeof(*tlv); nlo_list = (struct nlo_configured_parameters *)ptr; for (i = 0; i < cmd->no_of_ssids; i++) { tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, sizeof(*nlo_list) - sizeof(*tlv)); nlo_list[i].ssid.valid = true; nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len; memcpy(nlo_list[i].ssid.ssid.ssid, pno->a_networks[i].ssid.ssid, nlo_list[i].ssid.ssid.ssid_len); ath11k_ce_byte_swap(nlo_list[i].ssid.ssid.ssid, roundup(nlo_list[i].ssid.ssid.ssid_len, 4)); if (pno->a_networks[i].rssi_threshold && pno->a_networks[i].rssi_threshold > -300) { nlo_list[i].rssi_cond.valid = true; nlo_list[i].rssi_cond.rssi = pno->a_networks[i].rssi_threshold; } nlo_list[i].bcast_nw_type.valid = true; nlo_list[i].bcast_nw_type.bcast_nw_type = pno->a_networks[i].bcast_nw_type; } ptr += nlo_list_len; cmd->num_of_channels = pno->a_networks[0].channel_count; tlv = (struct wmi_tlv *)ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) | FIELD_PREP(WMI_TLV_LEN, channel_list_len); ptr += sizeof(*tlv); channel_list = (u32 *)ptr; for (i = 0; i < cmd->num_of_channels; i++) channel_list[i] = pno->a_networks[0].channels[i]; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv start pno config vdev_id %d\n", vdev_id); return skb; } static struct sk_buff *ath11k_wmi_op_gen_config_pno_stop(struct ath11k *ar, u32 vdev_id) { struct wmi_wow_nlo_config_cmd *cmd; struct sk_buff *skb; size_t len; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return ERR_PTR(-ENOMEM); cmd = (struct wmi_wow_nlo_config_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) | FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; cmd->flags = WMI_NLO_CONFIG_STOP; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv stop pno config vdev_id %d\n", vdev_id); return skb; } int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id, struct wmi_pno_scan_req *pno_scan) { struct sk_buff *skb; if (pno_scan->enable) skb = ath11k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan); else skb = ath11k_wmi_op_gen_config_pno_stop(ar, vdev_id); if (IS_ERR_OR_NULL(skb)) return -ENOMEM; return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); } static void ath11k_wmi_fill_ns_offload(struct ath11k *ar, struct ath11k_arp_ns_offload *offload, u8 **ptr, bool enable, bool ext) { struct wmi_ns_offload_tuple *ns; struct wmi_tlv *tlv; u8 *buf_ptr = *ptr; u32 ns_cnt, ns_ext_tuples; int i, max_offloads; ns_cnt = offload->ipv6_count; tlv = (struct wmi_tlv *)buf_ptr; if (ext) { ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, ns_ext_tuples * sizeof(*ns)); i = WMI_MAX_NS_OFFLOADS; max_offloads = offload->ipv6_count; } else { tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, WMI_MAX_NS_OFFLOADS * sizeof(*ns)); i = 0; max_offloads = WMI_MAX_NS_OFFLOADS; } buf_ptr += sizeof(*tlv); for (; i < max_offloads; i++) { ns = (struct wmi_ns_offload_tuple *)buf_ptr; ns->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NS_OFFLOAD_TUPLE) | FIELD_PREP(WMI_TLV_LEN, sizeof(*ns) - TLV_HDR_SIZE); if (enable) { if (i < ns_cnt) ns->flags |= WMI_NSOL_FLAGS_VALID; memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16); memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16); ath11k_ce_byte_swap(ns->target_ipaddr[0], 16); ath11k_ce_byte_swap(ns->solicitation_ipaddr, 16); if (offload->ipv6_type[i]) ns->flags |= WMI_NSOL_FLAGS_IS_IPV6_ANYCAST; memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN); ath11k_ce_byte_swap(ns->target_mac.addr, 8); if (ns->target_mac.word0 != 0 || ns->target_mac.word1 != 0) { ns->flags |= WMI_NSOL_FLAGS_MAC_VALID; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "index %d ns_solicited %pI6 target %pI6", i, ns->solicitation_ipaddr, ns->target_ipaddr[0]); } buf_ptr += sizeof(*ns); } *ptr = buf_ptr; } static void ath11k_wmi_fill_arp_offload(struct ath11k *ar, struct ath11k_arp_ns_offload *offload, u8 **ptr, bool enable) { struct wmi_arp_offload_tuple *arp; struct wmi_tlv *tlv; u8 *buf_ptr = *ptr; int i; /* fill arp tuple */ tlv = (struct wmi_tlv *)buf_ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | FIELD_PREP(WMI_TLV_LEN, WMI_MAX_ARP_OFFLOADS * sizeof(*arp)); buf_ptr += sizeof(*tlv); for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { arp = (struct wmi_arp_offload_tuple *)buf_ptr; arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARP_OFFLOAD_TUPLE) | FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE); if (enable && i < offload->ipv4_count) { /* Copy the target ip addr and flags */ arp->flags = WMI_ARPOL_FLAGS_VALID; memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4); ath11k_ce_byte_swap(arp->target_ipaddr, 4); ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "arp offload address %pI4", arp->target_ipaddr); } buf_ptr += sizeof(*arp); } *ptr = buf_ptr; } int ath11k_wmi_arp_ns_offload(struct ath11k *ar, struct ath11k_vif *arvif, bool enable) { struct ath11k_arp_ns_offload *offload; struct wmi_set_arp_ns_offload_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; u8 *buf_ptr; size_t len; u8 ns_cnt, ns_ext_tuples = 0; offload = &arvif->arp_ns_offload; ns_cnt = offload->ipv6_count; len = sizeof(*cmd) + sizeof(*tlv) + WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_tuple) + sizeof(*tlv) + WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_tuple); if (ns_cnt > WMI_MAX_NS_OFFLOADS) { ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS; len += sizeof(*tlv) + ns_ext_tuples * sizeof(struct wmi_ns_offload_tuple); } skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; buf_ptr = skb->data; cmd = (struct wmi_set_arp_ns_offload_cmd *)buf_ptr; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SET_ARP_NS_OFFLOAD_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->flags = 0; cmd->vdev_id = arvif->vdev_id; cmd->num_ns_ext_tuples = ns_ext_tuples; buf_ptr += sizeof(*cmd); ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0); ath11k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable); if (ns_ext_tuples) ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID); } int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar, struct ath11k_vif *arvif, bool enable) { struct wmi_gtk_rekey_offload_cmd *cmd; struct ath11k_rekey_data *rekey_data = &arvif->rekey_data; int len; struct sk_buff *skb; __le64 replay_ctr; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = arvif->vdev_id; if (enable) { cmd->flags = GTK_OFFLOAD_ENABLE_OPCODE; /* the length in rekey_data and cmd is equal */ memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck)); ath11k_ce_byte_swap(cmd->kck, GTK_OFFLOAD_KEK_BYTES); memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek)); ath11k_ce_byte_swap(cmd->kek, GTK_OFFLOAD_KEK_BYTES); replay_ctr = cpu_to_le64(rekey_data->replay_ctr); memcpy(cmd->replay_ctr, &replay_ctr, sizeof(replay_ctr)); ath11k_ce_byte_swap(cmd->replay_ctr, GTK_REPLAY_COUNTER_BYTES); } else { cmd->flags = GTK_OFFLOAD_DISABLE_OPCODE; } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "offload gtk rekey vdev: %d %d\n", arvif->vdev_id, enable); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); } int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar, struct ath11k_vif *arvif) { struct wmi_gtk_rekey_offload_cmd *cmd; int len; struct sk_buff *skb; len = sizeof(*cmd); skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = arvif->vdev_id; cmd->flags = GTK_OFFLOAD_REQUEST_STATUS_OPCODE; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "get gtk rekey vdev_id: %d\n", arvif->vdev_id); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); } int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_set_sar_table_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; u8 *buf_ptr; u32 len, sar_len_aligned, rsvd_len_aligned; sar_len_aligned = roundup(BIOS_SAR_TABLE_LEN, sizeof(u32)); rsvd_len_aligned = roundup(BIOS_SAR_RSVD1_LEN, sizeof(u32)); len = sizeof(*cmd) + TLV_HDR_SIZE + sar_len_aligned + TLV_HDR_SIZE + rsvd_len_aligned; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_set_sar_table_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; cmd->sar_len = BIOS_SAR_TABLE_LEN; cmd->rsvd_len = BIOS_SAR_RSVD1_LEN; buf_ptr = skb->data + sizeof(*cmd); tlv = (struct wmi_tlv *)buf_ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, sar_len_aligned); buf_ptr += TLV_HDR_SIZE; memcpy(buf_ptr, sar_val, BIOS_SAR_TABLE_LEN); buf_ptr += sar_len_aligned; tlv = (struct wmi_tlv *)buf_ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned); return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID); } int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_pdev_set_geo_table_cmd *cmd; struct wmi_tlv *tlv; struct sk_buff *skb; u8 *buf_ptr; u32 len, rsvd_len_aligned; rsvd_len_aligned = roundup(BIOS_SAR_RSVD2_LEN, sizeof(u32)); len = sizeof(*cmd) + TLV_HDR_SIZE + rsvd_len_aligned; skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_pdev_set_geo_table_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->pdev_id = ar->pdev->pdev_id; cmd->rsvd_len = BIOS_SAR_RSVD2_LEN; buf_ptr = skb->data + sizeof(*cmd); tlv = (struct wmi_tlv *)buf_ptr; tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned); return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID); } int ath11k_wmi_sta_keepalive(struct ath11k *ar, const struct wmi_sta_keepalive_arg *arg) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_sta_keepalive_cmd *cmd; struct wmi_sta_keepalive_arp_resp *arp; struct sk_buff *skb; size_t len; len = sizeof(*cmd) + sizeof(*arp); skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; cmd = (struct wmi_sta_keepalive_cmd *)skb->data; cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_KEEPALIVE_CMD) | FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = arg->vdev_id; cmd->enabled = arg->enabled; cmd->interval = arg->interval; cmd->method = arg->method; arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1); arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE) | FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE); if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE || arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) { arp->src_ip4_addr = arg->src_ip4_addr; arp->dest_ip4_addr = arg->dest_ip4_addr; ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); } ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "sta keepalive vdev %d enabled %d method %d interval %d\n", arg->vdev_id, arg->enabled, arg->method, arg->interval); return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID); } bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar) { return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, ar->ab->wmi_ab.svc_map) && ar->supports_6ghz; }