Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'ath-next-20250305' of git://git.kernel.org/pub/scm/linux/kernel/git/ath/ath

Jeff Johnson says:
====================
ath.git patches for v6.15

This development cycle again featured multiple patchsets to ath12k to
support the new 802.11be MLO feature. In addition, there was the usual
set of bug fixes and cleanups.
====================

Link: https://lore.kernel.org/linux-wireless/d01b1976-ebe8-48cd-8f49-32bfa00bed7e@oss.qualcomm.com/
Signed-off-by: Johannes Berg <johannes.berg@intel.com>

+9051 -1125
+23 -2
Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml
··· 92 92 93 93 ieee80211-freq-limit: true 94 94 95 - qcom,ath10k-calibration-data: 95 + qcom,calibration-data: 96 96 $ref: /schemas/types.yaml#/definitions/uint8-array 97 97 description: 98 98 Calibration data + board-specific data as a byte array. The length 99 99 can vary between hardware versions. 100 100 101 - qcom,ath10k-calibration-variant: 101 + qcom,ath10k-calibration-data: 102 + $ref: /schemas/types.yaml#/definitions/uint8-array 103 + deprecated: true 104 + description: 105 + Calibration data + board-specific data as a byte array. The length 106 + can vary between hardware versions. 107 + 108 + qcom,calibration-variant: 102 109 $ref: /schemas/types.yaml#/definitions/string 103 110 description: 104 111 Unique variant identifier of the calibration data in board-2.bin 105 112 for designs with colliding bus and device specific ids 106 113 114 + qcom,ath10k-calibration-variant: 115 + $ref: /schemas/types.yaml#/definitions/string 116 + deprecated: true 117 + description: 118 + Unique variant identifier of the calibration data in board-2.bin 119 + for designs with colliding bus and device specific ids 120 + 121 + qcom,pre-calibration-data: 122 + $ref: /schemas/types.yaml#/definitions/uint8-array 123 + description: 124 + Pre-calibration data as a byte array. The length can vary between 125 + hardware versions. 126 + 107 127 qcom,ath10k-pre-calibration-data: 108 128 $ref: /schemas/types.yaml#/definitions/uint8-array 129 + deprecated: true 109 130 description: 110 131 Pre-calibration data as a byte array. The length can vary between 111 132 hardware versions.
+8 -1
Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml
··· 22 22 reg: 23 23 maxItems: 1 24 24 25 + qcom,calibration-variant: 26 + $ref: /schemas/types.yaml#/definitions/string 27 + description: | 28 + string to uniquely identify variant of the calibration data for designs 29 + with colliding bus and device ids 30 + 25 31 qcom,ath11k-calibration-variant: 26 32 $ref: /schemas/types.yaml#/definitions/string 33 + deprecated: true 27 34 description: | 28 35 string to uniquely identify variant of the calibration data for designs 29 36 with colliding bus and device ids ··· 134 127 vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>; 135 128 vddrfa1p8-supply = <&vreg_pmu_rfa_1p7>; 136 129 137 - qcom,ath11k-calibration-variant = "LE_X13S"; 130 + qcom,calibration-variant = "LE_X13S"; 138 131 }; 139 132 }; 140 133 };
+7
Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
··· 41 41 * reg 42 42 * reg-names 43 43 44 + qcom,calibration-variant: 45 + $ref: /schemas/types.yaml#/definitions/string 46 + description: 47 + string to uniquely identify variant of the calibration data in the 48 + board-2.bin for designs with colliding bus and device specific ids 49 + 44 50 qcom,ath11k-calibration-variant: 45 51 $ref: /schemas/types.yaml#/definitions/string 52 + deprecated: true 46 53 description: 47 54 string to uniquely identify variant of the calibration data in the 48 55 board-2.bin for designs with colliding bus and device specific ids
+10 -3
Documentation/devicetree/bindings/net/wireless/qcom,ath12k-wsi.yaml
··· 52 52 reg: 53 53 maxItems: 1 54 54 55 + qcom,calibration-variant: 56 + $ref: /schemas/types.yaml#/definitions/string 57 + description: 58 + String to uniquely identify variant of the calibration data for designs 59 + with colliding bus and device ids 60 + 55 61 qcom,ath12k-calibration-variant: 56 62 $ref: /schemas/types.yaml#/definitions/string 63 + deprecated: true 57 64 description: 58 65 String to uniquely identify variant of the calibration data for designs 59 66 with colliding bus and device ids ··· 110 103 compatible = "pci17cb,1109"; 111 104 reg = <0x0 0x0 0x0 0x0 0x0>; 112 105 113 - qcom,ath12k-calibration-variant = "RDP433_1"; 106 + qcom,calibration-variant = "RDP433_1"; 114 107 115 108 ports { 116 109 #address-cells = <1>; ··· 146 139 compatible = "pci17cb,1109"; 147 140 reg = <0x0 0x0 0x0 0x0 0x0>; 148 141 149 - qcom,ath12k-calibration-variant = "RDP433_2"; 142 + qcom,calibration-variant = "RDP433_2"; 150 143 qcom,wsi-controller; 151 144 152 145 ports { ··· 183 176 compatible = "pci17cb,1109"; 184 177 reg = <0x0 0x0 0x0 0x0 0x0>; 185 178 186 - qcom,ath12k-calibration-variant = "RDP433_3"; 179 + qcom,calibration-variant = "RDP433_3"; 187 180 188 181 ports { 189 182 #address-cells = <1>;
+10 -3
drivers/net/wireless/ath/ath10k/core.c
··· 1163 1163 if (!node) 1164 1164 return -ENOENT; 1165 1165 1166 - of_property_read_string(node, "qcom,ath10k-calibration-variant", 1166 + of_property_read_string(node, "qcom,calibration-variant", 1167 1167 &variant); 1168 + if (!variant) 1169 + of_property_read_string(node, "qcom,ath10k-calibration-variant", 1170 + &variant); 1168 1171 if (!variant) 1169 1172 return -ENODATA; 1170 1173 ··· 2262 2259 "boot did not find a pre calibration file, try DT next: %d\n", 2263 2260 ret); 2264 2261 2265 - ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data"); 2262 + ret = ath10k_download_cal_dt(ar, "qcom,pre-calibration-data"); 2263 + if (ret == -ENOENT) 2264 + ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data"); 2266 2265 if (ret) { 2267 2266 ath10k_dbg(ar, ATH10K_DBG_BOOT, 2268 2267 "unable to load pre cal data from DT: %d\n", ret); ··· 2342 2337 "boot did not find a calibration file, try DT next: %d\n", 2343 2338 ret); 2344 2339 2345 - ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data"); 2340 + ret = ath10k_download_cal_dt(ar, "qcom,calibration-data"); 2341 + if (ret == -ENOENT) 2342 + ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data"); 2346 2343 if (ret == 0) { 2347 2344 ar->cal_mode = ATH10K_CAL_MODE_DT; 2348 2345 goto done;
+1
drivers/net/wireless/ath/ath11k/Makefile
··· 27 27 ath11k-$(CONFIG_THERMAL) += thermal.o 28 28 ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o 29 29 ath11k-$(CONFIG_PM) += wow.o 30 + ath11k-$(CONFIG_DEV_COREDUMP) += coredump.o 30 31 31 32 obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o 32 33 ath11k_ahb-y += ahb.o
+3 -1
drivers/net/wireless/ath/ath11k/ahb.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/module.h> ··· 1290 1290 ath11k_core_deinit(ab); 1291 1291 1292 1292 qmi_fail: 1293 + ath11k_fw_destroy(ab); 1293 1294 ath11k_ahb_free_resources(ab); 1294 1295 } 1295 1296 ··· 1310 1309 ath11k_core_deinit(ab); 1311 1310 1312 1311 free_resources: 1312 + ath11k_fw_destroy(ab); 1313 1313 ath11k_ahb_free_resources(ab); 1314 1314 } 1315 1315
+8 -3
drivers/net/wireless/ath/ath11k/core.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/module.h> ··· 1175 1175 if (!node) 1176 1176 return -ENOENT; 1177 1177 1178 - of_property_read_string(node, "qcom,ath11k-calibration-variant", 1178 + of_property_read_string(node, "qcom,calibration-variant", 1179 1179 &variant); 1180 + if (!variant) 1181 + of_property_read_string(node, "qcom,ath11k-calibration-variant", 1182 + &variant); 1180 1183 if (!variant) 1181 1184 return -ENODATA; 1182 1185 ··· 2059 2056 ath11k_mac_scan_finish(ar); 2060 2057 ath11k_mac_peer_cleanup_all(ar); 2061 2058 cancel_delayed_work_sync(&ar->scan.timeout); 2059 + cancel_work_sync(&ar->channel_update_work); 2062 2060 cancel_work_sync(&ar->regd_update_work); 2063 2061 cancel_work_sync(&ab->update_11d_work); 2064 2062 ··· 2261 2257 reinit_completion(&ab->recovery_start); 2262 2258 atomic_set(&ab->recovery_start_count, 0); 2263 2259 2260 + ath11k_coredump_collect(ab); 2264 2261 ath11k_core_pre_reconfigure_recovery(ab); 2265 2262 2266 2263 reinit_completion(&ab->reconfigure_complete); ··· 2351 2346 ath11k_hif_power_down(ab); 2352 2347 ath11k_mac_destroy(ab); 2353 2348 ath11k_core_soc_destroy(ab); 2354 - ath11k_fw_destroy(ab); 2355 2349 } 2356 2350 EXPORT_SYMBOL(ath11k_core_deinit); 2357 2351 ··· 2397 2393 INIT_WORK(&ab->restart_work, ath11k_core_restart); 2398 2394 INIT_WORK(&ab->update_11d_work, ath11k_update_11d); 2399 2395 INIT_WORK(&ab->reset_work, ath11k_core_reset); 2396 + INIT_WORK(&ab->dump_work, ath11k_coredump_upload); 2400 2397 timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0); 2401 2398 init_completion(&ab->htc_suspend); 2402 2399 init_completion(&ab->wow.wakeup_completed);
+11 -2
drivers/net/wireless/ath/ath11k/core.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH11K_CORE_H ··· 32 32 #include "spectral.h" 33 33 #include "wow.h" 34 34 #include "fw.h" 35 + #include "coredump.h" 35 36 36 37 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) 37 38 ··· 371 370 struct ieee80211_vif *vif; 372 371 373 372 struct wmi_wmm_params_all_arg wmm_params; 373 + struct wmi_wmm_params_all_arg muedca_params; 374 374 struct list_head list; 375 375 union { 376 376 struct { ··· 687 685 struct mutex conf_mutex; 688 686 /* protects the radio specific data like debug stats, ppdu_stats_info stats, 689 687 * vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info, 690 - * channel context data, survey info, test mode data. 688 + * channel context data, survey info, test mode data, channel_update_queue. 691 689 */ 692 690 spinlock_t data_lock; 693 691 ··· 745 743 struct completion bss_survey_done; 746 744 747 745 struct work_struct regd_update_work; 746 + struct work_struct channel_update_work; 747 + /* protected with data_lock */ 748 + struct list_head channel_update_queue; 748 749 749 750 struct work_struct wmi_mgmt_tx_work; 750 751 struct sk_buff_head wmi_mgmt_tx_queue; ··· 904 899 int num_radios; 905 900 /* HW channel counters frequency value in hertz common to all MACs */ 906 901 u32 cc_freq_hz; 902 + 903 + struct ath11k_dump_file_data *dump_data; 904 + size_t ath11k_coredump_len; 905 + struct work_struct dump_work; 907 906 908 907 struct ath11k_htc htc; 909 908
+52
drivers/net/wireless/ath/ath11k/coredump.c
··· 1 + // SPDX-License-Identifier: BSD-3-Clause-Clear 2 + /* 3 + * Copyright (c) 2020 The Linux Foundation. All rights reserved. 4 + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 + */ 6 + #include <linux/devcoredump.h> 7 + #include "hif.h" 8 + #include "coredump.h" 9 + #include "debug.h" 10 + 11 + enum 12 + ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type) 13 + { 14 + enum ath11k_fw_crash_dump_type dump_type; 15 + 16 + switch (type) { 17 + case HOST_DDR_REGION_TYPE: 18 + dump_type = FW_CRASH_DUMP_REMOTE_MEM_DATA; 19 + break; 20 + case M3_DUMP_REGION_TYPE: 21 + dump_type = FW_CRASH_DUMP_M3_DUMP; 22 + break; 23 + case PAGEABLE_MEM_REGION_TYPE: 24 + dump_type = FW_CRASH_DUMP_PAGEABLE_DATA; 25 + break; 26 + case BDF_MEM_REGION_TYPE: 27 + case CALDB_MEM_REGION_TYPE: 28 + dump_type = FW_CRASH_DUMP_NONE; 29 + break; 30 + default: 31 + dump_type = FW_CRASH_DUMP_TYPE_MAX; 32 + break; 33 + } 34 + 35 + return dump_type; 36 + } 37 + EXPORT_SYMBOL(ath11k_coredump_get_dump_type); 38 + 39 + void ath11k_coredump_upload(struct work_struct *work) 40 + { 41 + struct ath11k_base *ab = container_of(work, struct ath11k_base, dump_work); 42 + 43 + ath11k_info(ab, "Uploading coredump\n"); 44 + /* dev_coredumpv() takes ownership of the buffer */ 45 + dev_coredumpv(ab->dev, ab->dump_data, ab->ath11k_coredump_len, GFP_KERNEL); 46 + ab->dump_data = NULL; 47 + } 48 + 49 + void ath11k_coredump_collect(struct ath11k_base *ab) 50 + { 51 + ath11k_hif_coredump_download(ab); 52 + }
+79
drivers/net/wireless/ath/ath11k/coredump.h
··· 1 + /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 + /* 3 + * Copyright (c) 2020 The Linux Foundation. All rights reserved. 4 + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 + */ 6 + #ifndef _ATH11K_COREDUMP_H_ 7 + #define _ATH11K_COREDUMP_H_ 8 + 9 + #define ATH11K_FW_CRASH_DUMP_V2 2 10 + 11 + enum ath11k_fw_crash_dump_type { 12 + FW_CRASH_DUMP_PAGING_DATA, 13 + FW_CRASH_DUMP_RDDM_DATA, 14 + FW_CRASH_DUMP_REMOTE_MEM_DATA, 15 + FW_CRASH_DUMP_PAGEABLE_DATA, 16 + FW_CRASH_DUMP_M3_DUMP, 17 + FW_CRASH_DUMP_NONE, 18 + 19 + /* keep last */ 20 + FW_CRASH_DUMP_TYPE_MAX, 21 + }; 22 + 23 + #define COREDUMP_TLV_HDR_SIZE 8 24 + 25 + struct ath11k_tlv_dump_data { 26 + /* see ath11k_fw_crash_dump_type above */ 27 + __le32 type; 28 + 29 + /* in bytes */ 30 + __le32 tlv_len; 31 + 32 + /* pad to 32-bit boundaries as needed */ 33 + u8 tlv_data[]; 34 + } __packed; 35 + 36 + struct ath11k_dump_file_data { 37 + /* "ATH11K-FW-DUMP" */ 38 + char df_magic[16]; 39 + /* total dump len in bytes */ 40 + __le32 len; 41 + /* file dump version */ 42 + __le32 version; 43 + /* pci device id */ 44 + __le32 chip_id; 45 + /* qrtr instance id */ 46 + __le32 qrtr_id; 47 + /* pci domain id */ 48 + __le32 bus_id; 49 + guid_t guid; 50 + /* time-of-day stamp */ 51 + __le64 tv_sec; 52 + /* time-of-day stamp, nano-seconds */ 53 + __le64 tv_nsec; 54 + /* room for growth w/out changing binary format */ 55 + u8 unused[128]; 56 + u8 data[]; 57 + } __packed; 58 + 59 + #ifdef CONFIG_DEV_COREDUMP 60 + enum ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type); 61 + void ath11k_coredump_upload(struct work_struct *work); 62 + void ath11k_coredump_collect(struct ath11k_base *ab); 63 + #else 64 + static inline enum 65 + ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type) 66 + { 67 + return FW_CRASH_DUMP_TYPE_MAX; 68 + } 69 + 70 + static inline void ath11k_coredump_upload(struct work_struct *work) 71 + { 72 + } 73 + 74 + static inline void ath11k_coredump_collect(struct ath11k_base *ab) 75 + { 76 + } 77 + #endif 78 + 79 + #endif
+11 -24
drivers/net/wireless/ath/ath11k/dp.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <crypto/hash.h> ··· 104 104 if (!ring->vaddr_unaligned) 105 105 return; 106 106 107 - if (ring->cached) { 108 - dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size, 109 - DMA_FROM_DEVICE); 110 - kfree(ring->vaddr_unaligned); 111 - } else { 107 + if (ring->cached) 108 + dma_free_noncoherent(ab->dev, ring->size, ring->vaddr_unaligned, 109 + ring->paddr_unaligned, DMA_FROM_DEVICE); 110 + else 112 111 dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, 113 112 ring->paddr_unaligned); 114 - } 115 113 116 114 ring->vaddr_unaligned = NULL; 117 115 } ··· 247 249 default: 248 250 cached = false; 249 251 } 250 - 251 - if (cached) { 252 - ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL); 253 - if (!ring->vaddr_unaligned) 254 - return -ENOMEM; 255 - 256 - ring->paddr_unaligned = dma_map_single(ab->dev, 257 - ring->vaddr_unaligned, 258 - ring->size, 259 - DMA_FROM_DEVICE); 260 - if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) { 261 - kfree(ring->vaddr_unaligned); 262 - ring->vaddr_unaligned = NULL; 263 - return -ENOMEM; 264 - } 265 - } 266 252 } 267 253 268 - if (!cached) 254 + if (cached) 255 + ring->vaddr_unaligned = dma_alloc_noncoherent(ab->dev, ring->size, 256 + &ring->paddr_unaligned, 257 + DMA_FROM_DEVICE, 258 + GFP_KERNEL); 259 + else 269 260 ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size, 270 261 &ring->paddr_unaligned, 271 262 GFP_KERNEL);
+4 -2
drivers/net/wireless/ath/ath11k/dp.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH11K_DP_H ··· 20 20 21 21 struct dp_rx_tid { 22 22 u8 tid; 23 - u32 *vaddr; 24 23 dma_addr_t paddr; 25 24 u32 size; 26 25 u32 ba_win_sz; ··· 36 37 /* Timer info related to fragments */ 37 38 struct timer_list frag_timer; 38 39 struct ath11k_base *ab; 40 + u32 *vaddr_unaligned; 41 + dma_addr_t paddr_unaligned; 42 + u32 unaligned_size; 39 43 }; 40 44 41 45 #define DP_REO_DESC_FREE_THRESHOLD 64
+64 -69
drivers/net/wireless/ath/ath11k/dp_rx.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/ieee80211.h> ··· 675 675 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { 676 676 list_del(&cmd->list); 677 677 rx_tid = &cmd->data; 678 - if (rx_tid->vaddr) { 679 - dma_unmap_single(ab->dev, rx_tid->paddr, 680 - rx_tid->size, DMA_BIDIRECTIONAL); 681 - kfree(rx_tid->vaddr); 682 - rx_tid->vaddr = NULL; 678 + if (rx_tid->vaddr_unaligned) { 679 + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, 680 + rx_tid->vaddr_unaligned, 681 + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); 682 + rx_tid->vaddr_unaligned = NULL; 683 683 } 684 684 kfree(cmd); 685 685 } ··· 689 689 list_del(&cmd_cache->list); 690 690 dp->reo_cmd_cache_flush_count--; 691 691 rx_tid = &cmd_cache->data; 692 - if (rx_tid->vaddr) { 693 - dma_unmap_single(ab->dev, rx_tid->paddr, 694 - rx_tid->size, DMA_BIDIRECTIONAL); 695 - kfree(rx_tid->vaddr); 696 - rx_tid->vaddr = NULL; 692 + if (rx_tid->vaddr_unaligned) { 693 + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, 694 + rx_tid->vaddr_unaligned, 695 + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); 696 + rx_tid->vaddr_unaligned = NULL; 697 697 } 698 698 kfree(cmd_cache); 699 699 } ··· 708 708 if (status != HAL_REO_CMD_SUCCESS) 709 709 ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", 710 710 rx_tid->tid, status); 711 - if (rx_tid->vaddr) { 712 - dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, 713 - DMA_BIDIRECTIONAL); 714 - kfree(rx_tid->vaddr); 715 - rx_tid->vaddr = NULL; 711 + if (rx_tid->vaddr_unaligned) { 712 + dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size, 713 + rx_tid->vaddr_unaligned, 714 + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); 715 + rx_tid->vaddr_unaligned = NULL; 716 716 } 717 717 } 718 718 ··· 749 749 if (ret) { 750 750 ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", 751 751 rx_tid->tid, ret); 752 - dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 753 - DMA_BIDIRECTIONAL); 754 - kfree(rx_tid->vaddr); 755 - rx_tid->vaddr = NULL; 752 + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, 753 + rx_tid->vaddr_unaligned, 754 + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); 755 + rx_tid->vaddr_unaligned = NULL; 756 756 } 757 757 } 758 758 ··· 802 802 803 803 return; 804 804 free_desc: 805 - dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 806 - DMA_BIDIRECTIONAL); 807 - kfree(rx_tid->vaddr); 808 - rx_tid->vaddr = NULL; 805 + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, 806 + rx_tid->vaddr_unaligned, 807 + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); 808 + rx_tid->vaddr_unaligned = NULL; 809 809 } 810 810 811 811 void ath11k_peer_rx_tid_delete(struct ath11k *ar, ··· 831 831 if (ret != -ESHUTDOWN) 832 832 ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", 833 833 tid, ret); 834 - dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, 835 - DMA_BIDIRECTIONAL); 836 - kfree(rx_tid->vaddr); 837 - rx_tid->vaddr = NULL; 834 + dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size, 835 + rx_tid->vaddr_unaligned, 836 + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); 837 + rx_tid->vaddr_unaligned = NULL; 838 838 } 839 839 840 840 rx_tid->paddr = 0; 841 + rx_tid->paddr_unaligned = 0; 841 842 rx_tid->size = 0; 843 + rx_tid->unaligned_size = 0; 842 844 } 843 845 844 846 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, ··· 984 982 if (!rx_tid->active) 985 983 goto unlock_exit; 986 984 987 - dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, 988 - DMA_BIDIRECTIONAL); 989 - kfree(rx_tid->vaddr); 990 - rx_tid->vaddr = NULL; 985 + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, 986 + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); 987 + rx_tid->vaddr_unaligned = NULL; 991 988 992 989 rx_tid->active = false; 993 990 ··· 1001 1000 struct ath11k_base *ab = ar->ab; 1002 1001 struct ath11k_peer *peer; 1003 1002 struct dp_rx_tid *rx_tid; 1004 - u32 hw_desc_sz; 1005 - u32 *addr_aligned; 1006 - void *vaddr; 1003 + u32 hw_desc_sz, *vaddr; 1004 + void *vaddr_unaligned; 1007 1005 dma_addr_t paddr; 1008 1006 int ret; 1009 1007 ··· 1050 1050 else 1051 1051 hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); 1052 1052 1053 - vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); 1054 - if (!vaddr) { 1053 + rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1; 1054 + vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr, 1055 + DMA_BIDIRECTIONAL, GFP_ATOMIC); 1056 + if (!vaddr_unaligned) { 1055 1057 spin_unlock_bh(&ab->base_lock); 1056 1058 return -ENOMEM; 1057 1059 } 1058 1060 1059 - addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); 1060 - 1061 - ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, 1062 - ssn, pn_type); 1063 - 1064 - paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, 1065 - DMA_BIDIRECTIONAL); 1066 - 1067 - ret = dma_mapping_error(ab->dev, paddr); 1068 - if (ret) { 1069 - spin_unlock_bh(&ab->base_lock); 1070 - ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n", 1071 - peer_mac, tid, ret); 1072 - goto err_mem_free; 1073 - } 1074 - 1075 - rx_tid->vaddr = vaddr; 1076 - rx_tid->paddr = paddr; 1061 + rx_tid->vaddr_unaligned = vaddr_unaligned; 1062 + vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN); 1063 + rx_tid->paddr_unaligned = paddr; 1064 + rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr - 1065 + (unsigned long)rx_tid->vaddr_unaligned); 1066 + ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type); 1077 1067 rx_tid->size = hw_desc_sz; 1078 1068 rx_tid->active = true; 1079 1069 1070 + /* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup. 1071 + * Since these changes are not reflected in the device, driver now needs to 1072 + * explicitly call dma_sync_single_for_device. 1073 + */ 1074 + dma_sync_single_for_device(ab->dev, rx_tid->paddr, 1075 + rx_tid->size, 1076 + DMA_TO_DEVICE); 1080 1077 spin_unlock_bh(&ab->base_lock); 1081 1078 1082 - ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, 1083 - paddr, tid, 1, ba_win_sz); 1079 + ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr, 1080 + tid, 1, ba_win_sz); 1084 1081 if (ret) { 1085 1082 ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n", 1086 1083 peer_mac, tid, ret); 1087 1084 ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); 1088 1085 } 1089 - 1090 - return ret; 1091 - 1092 - err_mem_free: 1093 - kfree(rx_tid->vaddr); 1094 - rx_tid->vaddr = NULL; 1095 1086 1096 1087 return ret; 1097 1088 } ··· 2821 2830 rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; 2822 2831 rx_stats->dcm_count += ppdu_info->dcm; 2823 2832 rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; 2824 - 2825 - arsta->rssi_comb = ppdu_info->rssi_comb; 2826 2833 2827 2834 BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > 2828 2835 ARRAY_SIZE(ppdu_info->rssi_chain_pri20)); ··· 4772 4783 if (!msdu) { 4773 4784 ath11k_dbg(ar->ab, ATH11K_DBG_DATA, 4774 4785 "msdu_pop: invalid buf_id %d\n", buf_id); 4775 - break; 4786 + goto next_msdu; 4776 4787 } 4777 4788 rxcb = ATH11K_SKB_RXCB(msdu); 4778 4789 if (!rxcb->unmapped) { ··· 5137 5148 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 5138 5149 const struct ath11k_hw_hal_params *hal_params; 5139 5150 void *ring_entry; 5140 - void *mon_dst_srng; 5151 + struct hal_srng *mon_dst_srng; 5141 5152 u32 ppdu_id; 5142 5153 u32 rx_bufs_used; 5143 5154 u32 ring_id; ··· 5154 5165 5155 5166 spin_lock_bh(&pmon->mon_lock); 5156 5167 5168 + spin_lock_bh(&mon_dst_srng->lock); 5157 5169 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 5158 5170 5159 5171 ppdu_id = pmon->mon_ppdu_info.ppdu_id; ··· 5213 5223 mon_dst_srng); 5214 5224 } 5215 5225 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 5226 + spin_unlock_bh(&mon_dst_srng->lock); 5216 5227 5217 5228 spin_unlock_bh(&pmon->mon_lock); 5218 5229 ··· 5401 5410 "full mon msdu_pop: invalid buf_id %d\n", 5402 5411 buf_id); 5403 5412 spin_unlock_bh(&rx_ring->idr_lock); 5404 - break; 5413 + goto next_msdu; 5405 5414 } 5406 5415 idr_remove(&rx_ring->bufs_idr, buf_id); 5407 5416 spin_unlock_bh(&rx_ring->idr_lock); ··· 5603 5612 struct hal_sw_mon_ring_entries *sw_mon_entries; 5604 5613 struct ath11k_pdev_mon_stats *rx_mon_stats; 5605 5614 struct sk_buff *head_msdu, *tail_msdu; 5606 - void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; 5615 + struct hal_srng *mon_dst_srng; 5607 5616 void *ring_entry; 5608 5617 u32 rx_bufs_used = 0, mpdu_rx_bufs_used; 5609 5618 int quota = 0, ret; ··· 5618 5627 spin_unlock_bh(&pmon->mon_lock); 5619 5628 goto reap_status_ring; 5620 5629 } 5630 + 5631 + mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; 5632 + spin_lock_bh(&mon_dst_srng->lock); 5621 5633 5622 5634 ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); 5623 5635 while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { ··· 5665 5671 } 5666 5672 5667 5673 ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); 5674 + spin_unlock_bh(&mon_dst_srng->lock); 5668 5675 spin_unlock_bh(&pmon->mon_lock); 5669 5676 5670 5677 if (rx_bufs_used) {
+2 -1
drivers/net/wireless/ath/ath11k/fw.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 - * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. 3 + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. 4 4 */ 5 5 6 6 #include "core.h" ··· 166 166 { 167 167 release_firmware(ab->fw.fw); 168 168 } 169 + EXPORT_SYMBOL(ath11k_fw_destroy);
+7
drivers/net/wireless/ath/ath11k/hif.h
··· 31 31 void (*ce_irq_enable)(struct ath11k_base *ab); 32 32 void (*ce_irq_disable)(struct ath11k_base *ab); 33 33 void (*get_ce_msi_idx)(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx); 34 + void (*coredump_download)(struct ath11k_base *ab); 34 35 }; 35 36 36 37 static inline void ath11k_hif_ce_irq_enable(struct ath11k_base *ab) ··· 145 144 ab->hif.ops->get_ce_msi_idx(ab, ce_id, msi_data_idx); 146 145 else 147 146 *msi_data_idx = ce_id; 147 + } 148 + 149 + static inline void ath11k_hif_coredump_download(struct ath11k_base *ab) 150 + { 151 + if (ab->hif.ops->coredump_download) 152 + ab->hif.ops->coredump_download(ab); 148 153 } 149 154 150 155 #endif /* _HIF_H_ */
+101 -44
drivers/net/wireless/ath/ath11k/mac.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <net/mac80211.h> ··· 1529 1529 return ret; 1530 1530 } 1531 1531 1532 - static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif) 1532 + static struct ath11k_vif *ath11k_mac_get_tx_arvif(struct ath11k_vif *arvif) 1533 1533 { 1534 - struct ath11k_vif *tx_arvif; 1534 + if (arvif->vif->mbssid_tx_vif) 1535 + return ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif); 1536 + 1537 + return NULL; 1538 + } 1539 + 1540 + static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif, 1541 + struct ath11k_vif *tx_arvif) 1542 + { 1535 1543 struct ieee80211_ema_beacons *beacons; 1536 1544 int ret = 0; 1537 1545 bool nontx_vif_params_set = false; 1538 1546 u32 params = 0; 1539 1547 u8 i = 0; 1540 - 1541 - tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif); 1542 1548 1543 1549 beacons = ieee80211_beacon_get_template_ema_list(tx_arvif->ar->hw, 1544 1550 tx_arvif->vif, 0); ··· 1591 1585 return ret; 1592 1586 } 1593 1587 1594 - static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif) 1588 + static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif, 1589 + struct ath11k_vif *tx_arvif) 1595 1590 { 1596 1591 struct ath11k *ar = arvif->ar; 1597 1592 struct ath11k_base *ab = ar->ab; 1598 - struct ath11k_vif *tx_arvif = arvif; 1599 1593 struct ieee80211_hw *hw = ar->hw; 1600 1594 struct ieee80211_vif *vif = arvif->vif; 1601 1595 struct ieee80211_mutable_offsets offs = {}; 1602 1596 struct sk_buff *bcn; 1603 1597 int ret; 1604 1598 1605 - if (vif->mbssid_tx_vif) { 1606 - tx_arvif = ath11k_vif_to_arvif(vif->mbssid_tx_vif); 1607 - if (tx_arvif != arvif) { 1608 - ar = tx_arvif->ar; 1609 - ab = ar->ab; 1610 - hw = ar->hw; 1611 - vif = tx_arvif->vif; 1612 - } 1599 + if (tx_arvif != arvif) { 1600 + ar = tx_arvif->ar; 1601 + ab = ar->ab; 1602 + hw = ar->hw; 1603 + vif = tx_arvif->vif; 1613 1604 } 1614 1605 1615 1606 bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0); ··· 1635 1632 static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) 1636 1633 { 1637 1634 struct ieee80211_vif *vif = arvif->vif; 1635 + struct ath11k_vif *tx_arvif; 1638 1636 1639 1637 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1640 1638 return 0; ··· 1643 1639 /* Target does not expect beacon templates for the already up 1644 1640 * non-transmitting interfaces, and results in a crash if sent. 1645 1641 */ 1646 - if (vif->mbssid_tx_vif && 1647 - arvif != ath11k_vif_to_arvif(vif->mbssid_tx_vif) && arvif->is_up) 1648 - return 0; 1642 + tx_arvif = ath11k_mac_get_tx_arvif(arvif); 1643 + if (tx_arvif) { 1644 + if (arvif != tx_arvif && arvif->is_up) 1645 + return 0; 1649 1646 1650 - if (vif->bss_conf.ema_ap && vif->mbssid_tx_vif) 1651 - return ath11k_mac_setup_bcn_tmpl_ema(arvif); 1647 + if (vif->bss_conf.ema_ap) 1648 + return ath11k_mac_setup_bcn_tmpl_ema(arvif, tx_arvif); 1649 + } else { 1650 + tx_arvif = arvif; 1651 + } 1652 1652 1653 - return ath11k_mac_setup_bcn_tmpl_mbssid(arvif); 1653 + return ath11k_mac_setup_bcn_tmpl_mbssid(arvif, tx_arvif); 1654 1654 } 1655 1655 1656 1656 void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif) ··· 1682 1674 struct ieee80211_bss_conf *info) 1683 1675 { 1684 1676 struct ath11k *ar = arvif->ar; 1685 - struct ath11k_vif *tx_arvif = NULL; 1677 + struct ath11k_vif *tx_arvif; 1686 1678 int ret = 0; 1687 1679 1688 1680 lockdep_assert_held(&arvif->ar->conf_mutex); ··· 1709 1701 1710 1702 ether_addr_copy(arvif->bssid, info->bssid); 1711 1703 1712 - if (arvif->vif->mbssid_tx_vif) 1713 - tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif); 1714 - 1704 + tx_arvif = ath11k_mac_get_tx_arvif(arvif); 1715 1705 ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1716 1706 arvif->bssid, 1717 1707 tx_arvif ? tx_arvif->bssid : NULL, ··· 5210 5204 return ret; 5211 5205 } 5212 5206 5207 + static int ath11k_mac_op_conf_tx_mu_edca(struct ieee80211_hw *hw, 5208 + struct ieee80211_vif *vif, 5209 + unsigned int link_id, u16 ac, 5210 + const struct ieee80211_tx_queue_params *params) 5211 + { 5212 + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); 5213 + struct ath11k *ar = hw->priv; 5214 + struct wmi_wmm_params_arg *p; 5215 + int ret; 5216 + 5217 + switch (ac) { 5218 + case IEEE80211_AC_VO: 5219 + p = &arvif->muedca_params.ac_vo; 5220 + break; 5221 + case IEEE80211_AC_VI: 5222 + p = &arvif->muedca_params.ac_vi; 5223 + break; 5224 + case IEEE80211_AC_BE: 5225 + p = &arvif->muedca_params.ac_be; 5226 + break; 5227 + case IEEE80211_AC_BK: 5228 + p = &arvif->muedca_params.ac_bk; 5229 + break; 5230 + default: 5231 + ath11k_warn(ar->ab, "error ac: %d", ac); 5232 + return -EINVAL; 5233 + } 5234 + 5235 + p->cwmin = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(3, 0)); 5236 + p->cwmax = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(7, 4)); 5237 + p->aifs = u8_get_bits(params->mu_edca_param_rec.aifsn, GENMASK(3, 0)); 5238 + p->txop = params->mu_edca_param_rec.mu_edca_timer; 5239 + 5240 + ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id, 5241 + &arvif->muedca_params, 5242 + WMI_WMM_PARAM_TYPE_11AX_MU_EDCA); 5243 + return ret; 5244 + } 5245 + 5213 5246 static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw, 5214 5247 struct ieee80211_vif *vif, 5215 5248 unsigned int link_id, u16 ac, ··· 5287 5242 p->txop = params->txop; 5288 5243 5289 5244 ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id, 5290 - &arvif->wmm_params); 5245 + &arvif->wmm_params, 5246 + WMI_WMM_PARAM_TYPE_LEGACY); 5291 5247 if (ret) { 5292 5248 ath11k_warn(ar->ab, "failed to set wmm params: %d\n", ret); 5293 5249 goto exit; 5250 + } 5251 + 5252 + if (params->mu_edca) { 5253 + ret = ath11k_mac_op_conf_tx_mu_edca(hw, vif, link_id, ac, 5254 + params); 5255 + if (ret) { 5256 + ath11k_warn(ar->ab, "failed to set mu_edca params: %d\n", ret); 5257 + goto exit; 5258 + } 5294 5259 } 5295 5260 5296 5261 ret = ath11k_conf_tx_uapsd(ar, vif, ac, params->uapsd); ··· 5391 5336 if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) { 5392 5337 nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 5393 5338 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 5394 - if (nsts > (ar->num_rx_chains - 1)) 5395 - nsts = ar->num_rx_chains - 1; 5396 5339 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); 5397 5340 } 5398 5341 ··· 5474 5421 5475 5422 /* Enable Beamformee STS Field only if SU BF is enabled */ 5476 5423 if (subfee) { 5477 - if (nsts > (ar->num_rx_chains - 1)) 5478 - nsts = ar->num_rx_chains - 1; 5479 - 5480 5424 nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 5481 5425 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 5482 5426 *vht_cap |= nsts; ··· 6338 6288 { 6339 6289 struct ath11k *ar = hw->priv; 6340 6290 struct htt_ppdu_stats_info *ppdu_stats, *tmp; 6291 + struct scan_chan_list_params *params; 6341 6292 int ret; 6342 6293 6343 6294 ath11k_mac_drain_tx(ar); ··· 6354 6303 mutex_unlock(&ar->conf_mutex); 6355 6304 6356 6305 cancel_delayed_work_sync(&ar->scan.timeout); 6306 + cancel_work_sync(&ar->channel_update_work); 6357 6307 cancel_work_sync(&ar->regd_update_work); 6358 6308 cancel_work_sync(&ar->ab->update_11d_work); 6359 6309 ··· 6364 6312 } 6365 6313 6366 6314 spin_lock_bh(&ar->data_lock); 6315 + 6367 6316 list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) { 6368 6317 list_del(&ppdu_stats->list); 6369 6318 kfree(ppdu_stats); 6370 6319 } 6320 + 6321 + while ((params = list_first_entry_or_null(&ar->channel_update_queue, 6322 + struct scan_chan_list_params, 6323 + list))) { 6324 + list_del(&params->list); 6325 + kfree(params); 6326 + } 6327 + 6371 6328 spin_unlock_bh(&ar->data_lock); 6372 6329 6373 6330 rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL); ··· 6391 6330 { 6392 6331 struct ath11k *ar = arvif->ar; 6393 6332 struct ath11k_vif *tx_arvif; 6394 - struct ieee80211_vif *tx_vif; 6395 6333 6396 6334 *tx_vdev_id = 0; 6397 - tx_vif = arvif->vif->mbssid_tx_vif; 6398 - if (!tx_vif) { 6335 + tx_arvif = ath11k_mac_get_tx_arvif(arvif); 6336 + if (!tx_arvif) { 6399 6337 *flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP; 6400 6338 return 0; 6401 6339 } 6402 6340 6403 - tx_arvif = ath11k_vif_to_arvif(tx_vif); 6404 - 6405 6341 if (arvif->vif->bss_conf.nontransmitted) { 6406 - if (ar->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy) 6342 + if (ar->hw->wiphy != tx_arvif->ar->hw->wiphy) 6407 6343 return -EINVAL; 6408 6344 6409 6345 *flags = WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP; 6410 - *tx_vdev_id = ath11k_vif_to_arvif(tx_vif)->vdev_id; 6346 + *tx_vdev_id = tx_arvif->vdev_id; 6411 6347 } else if (tx_arvif == arvif) { 6412 6348 *flags = WMI_HOST_VDEV_FLAGS_TRANSMIT_AP; 6413 6349 } else { ··· 7364 7306 int n_vifs) 7365 7307 { 7366 7308 struct ath11k_base *ab = ar->ab; 7367 - struct ath11k_vif *arvif, *tx_arvif = NULL; 7368 - struct ieee80211_vif *mbssid_tx_vif; 7309 + struct ath11k_vif *arvif, *tx_arvif; 7369 7310 int ret; 7370 7311 int i; 7371 7312 bool monitor_vif = false; ··· 7418 7361 ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n", 7419 7362 ret); 7420 7363 7421 - mbssid_tx_vif = arvif->vif->mbssid_tx_vif; 7422 - if (mbssid_tx_vif) 7423 - tx_arvif = ath11k_vif_to_arvif(mbssid_tx_vif); 7424 - 7364 + tx_arvif = ath11k_mac_get_tx_arvif(arvif); 7425 7365 ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 7426 7366 arvif->bssid, 7427 7367 tx_arvif ? tx_arvif->bssid : NULL, ··· 10073 10019 10074 10020 static void __ath11k_mac_unregister(struct ath11k *ar) 10075 10021 { 10022 + cancel_work_sync(&ar->channel_update_work); 10076 10023 cancel_work_sync(&ar->regd_update_work); 10077 10024 10078 10025 ieee80211_unregister_hw(ar->hw); ··· 10473 10418 init_completion(&ar->thermal.wmi_sync); 10474 10419 10475 10420 INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work); 10421 + INIT_WORK(&ar->channel_update_work, ath11k_regd_update_chan_list_work); 10422 + INIT_LIST_HEAD(&ar->channel_update_queue); 10476 10423 INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work); 10477 10424 10478 10425 INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);
+5
drivers/net/wireless/ath/ath11k/mhi.c
··· 491 491 492 492 return 0; 493 493 } 494 + 495 + void ath11k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic) 496 + { 497 + mhi_download_rddm_image(mhi_ctrl, in_panic); 498 + }
+1
drivers/net/wireless/ath/ath11k/mhi.h
··· 26 26 27 27 int ath11k_mhi_suspend(struct ath11k_pci *ar_pci); 28 28 int ath11k_mhi_resume(struct ath11k_pci *ar_pci); 29 + void ath11k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic); 29 30 30 31 #endif
+193 -2
drivers/net/wireless/ath/ath11k/pci.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/module.h> 8 8 #include <linux/msi.h> 9 9 #include <linux/pci.h> 10 10 #include <linux/of.h> 11 + #include <linux/time.h> 12 + #include <linux/vmalloc.h> 11 13 12 14 #include "pci.h" 13 15 #include "core.h" ··· 612 610 PCI_EXP_LNKCTL_ASPMC); 613 611 } 614 612 613 + #ifdef CONFIG_DEV_COREDUMP 614 + static int ath11k_pci_coredump_calculate_size(struct ath11k_base *ab, u32 *dump_seg_sz) 615 + { 616 + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 617 + struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl; 618 + struct image_info *rddm_img, *fw_img; 619 + struct ath11k_tlv_dump_data *dump_tlv; 620 + enum ath11k_fw_crash_dump_type mem_type; 621 + u32 len = 0, rddm_tlv_sz = 0, paging_tlv_sz = 0; 622 + struct ath11k_dump_file_data *file_data; 623 + int i; 624 + 625 + rddm_img = mhi_ctrl->rddm_image; 626 + if (!rddm_img) { 627 + ath11k_err(ab, "No RDDM dump found\n"); 628 + return 0; 629 + } 630 + 631 + fw_img = mhi_ctrl->fbc_image; 632 + 633 + for (i = 0; i < fw_img->entries ; i++) { 634 + if (!fw_img->mhi_buf[i].buf) 635 + continue; 636 + 637 + paging_tlv_sz += fw_img->mhi_buf[i].len; 638 + } 639 + dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA] = paging_tlv_sz; 640 + 641 + for (i = 0; i < rddm_img->entries; i++) { 642 + if (!rddm_img->mhi_buf[i].buf) 643 + continue; 644 + 645 + rddm_tlv_sz += rddm_img->mhi_buf[i].len; 646 + } 647 + dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA] = rddm_tlv_sz; 648 + 649 + for (i = 0; i < ab->qmi.mem_seg_count; i++) { 650 + mem_type = ath11k_coredump_get_dump_type(ab->qmi.target_mem[i].type); 651 + 652 + if (mem_type == FW_CRASH_DUMP_NONE) 653 + continue; 654 + 655 + if (mem_type == FW_CRASH_DUMP_TYPE_MAX) { 656 + ath11k_dbg(ab, ATH11K_DBG_PCI, 657 + "target mem region type %d not supported", 658 + ab->qmi.target_mem[i].type); 659 + continue; 660 + } 661 + 662 + if (!ab->qmi.target_mem[i].anyaddr) 663 + continue; 664 + 665 + dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size; 666 + } 667 + 668 + for (i = 0; i < FW_CRASH_DUMP_TYPE_MAX; i++) { 669 + if (!dump_seg_sz[i]) 670 + continue; 671 + 672 + len += sizeof(*dump_tlv) + dump_seg_sz[i]; 673 + } 674 + 675 + if (len) 676 + len += sizeof(*file_data); 677 + 678 + return len; 679 + } 680 + 681 + static void ath11k_pci_coredump_download(struct ath11k_base *ab) 682 + { 683 + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 684 + struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl; 685 + struct image_info *rddm_img, *fw_img; 686 + struct timespec64 timestamp; 687 + int i, len, mem_idx; 688 + enum ath11k_fw_crash_dump_type mem_type; 689 + struct ath11k_dump_file_data *file_data; 690 + struct ath11k_tlv_dump_data *dump_tlv; 691 + size_t hdr_len = sizeof(*file_data); 692 + void *buf; 693 + u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = { 0 }; 694 + 695 + ath11k_mhi_coredump(mhi_ctrl, false); 696 + 697 + len = ath11k_pci_coredump_calculate_size(ab, dump_seg_sz); 698 + if (!len) { 699 + ath11k_warn(ab, "No crash dump data found for devcoredump"); 700 + return; 701 + } 702 + 703 + rddm_img = mhi_ctrl->rddm_image; 704 + fw_img = mhi_ctrl->fbc_image; 705 + 706 + /* dev_coredumpv() requires vmalloc data */ 707 + buf = vzalloc(len); 708 + if (!buf) 709 + return; 710 + 711 + ab->dump_data = buf; 712 + ab->ath11k_coredump_len = len; 713 + file_data = ab->dump_data; 714 + strscpy(file_data->df_magic, "ATH11K-FW-DUMP", sizeof(file_data->df_magic)); 715 + file_data->len = cpu_to_le32(len); 716 + file_data->version = cpu_to_le32(ATH11K_FW_CRASH_DUMP_V2); 717 + file_data->chip_id = cpu_to_le32(ab_pci->dev_id); 718 + file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id); 719 + file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus)); 720 + guid_gen(&file_data->guid); 721 + ktime_get_real_ts64(&timestamp); 722 + file_data->tv_sec = cpu_to_le64(timestamp.tv_sec); 723 + file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec); 724 + buf += hdr_len; 725 + dump_tlv = buf; 726 + dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA); 727 + dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]); 728 + buf += COREDUMP_TLV_HDR_SIZE; 729 + 730 + /* append all segments together as they are all part of a single contiguous 731 + * block of memory 732 + */ 733 + for (i = 0; i < fw_img->entries ; i++) { 734 + if (!fw_img->mhi_buf[i].buf) 735 + continue; 736 + 737 + memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf, 738 + fw_img->mhi_buf[i].len); 739 + buf += fw_img->mhi_buf[i].len; 740 + } 741 + 742 + dump_tlv = buf; 743 + dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA); 744 + dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]); 745 + buf += COREDUMP_TLV_HDR_SIZE; 746 + 747 + /* append all segments together as they are all part of a single contiguous 748 + * block of memory 749 + */ 750 + for (i = 0; i < rddm_img->entries; i++) { 751 + if (!rddm_img->mhi_buf[i].buf) 752 + continue; 753 + 754 + memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf, 755 + rddm_img->mhi_buf[i].len); 756 + buf += rddm_img->mhi_buf[i].len; 757 + } 758 + 759 + mem_idx = FW_CRASH_DUMP_REMOTE_MEM_DATA; 760 + for (; mem_idx < FW_CRASH_DUMP_TYPE_MAX; mem_idx++) { 761 + if (mem_idx == FW_CRASH_DUMP_NONE) 762 + continue; 763 + 764 + for (i = 0; i < ab->qmi.mem_seg_count; i++) { 765 + mem_type = ath11k_coredump_get_dump_type 766 + (ab->qmi.target_mem[i].type); 767 + 768 + if (mem_type != mem_idx) 769 + continue; 770 + 771 + if (!ab->qmi.target_mem[i].anyaddr) { 772 + ath11k_dbg(ab, ATH11K_DBG_PCI, 773 + "Skipping mem region type %d", 774 + ab->qmi.target_mem[i].type); 775 + continue; 776 + } 777 + 778 + dump_tlv = buf; 779 + dump_tlv->type = cpu_to_le32(mem_idx); 780 + dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]); 781 + buf += COREDUMP_TLV_HDR_SIZE; 782 + 783 + memcpy_fromio(buf, ab->qmi.target_mem[i].iaddr, 784 + ab->qmi.target_mem[i].size); 785 + 786 + buf += ab->qmi.target_mem[i].size; 787 + } 788 + } 789 + 790 + queue_work(ab->workqueue, &ab->dump_work); 791 + } 792 + #endif 793 + 615 794 static int ath11k_pci_power_up(struct ath11k_base *ab) 616 795 { 617 796 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); ··· 896 713 .ce_irq_enable = ath11k_pci_hif_ce_irq_enable, 897 714 .ce_irq_disable = ath11k_pci_hif_ce_irq_disable, 898 715 .get_ce_msi_idx = ath11k_pcic_get_ce_msi_idx, 716 + #ifdef CONFIG_DEV_COREDUMP 717 + .coredump_download = ath11k_pci_coredump_download, 718 + #endif 899 719 }; 900 720 901 721 static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor) ··· 921 735 if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab_pci->ab->dev_flags)) 922 736 return 0; 923 737 924 - return irq_set_affinity_hint(ab_pci->pdev->irq, m); 738 + return irq_set_affinity_and_hint(ab_pci->pdev->irq, m); 925 739 } 926 740 927 741 static int ath11k_pci_probe(struct pci_dev *pdev, ··· 1125 939 return 0; 1126 940 1127 941 err_free_irq: 942 + /* __free_irq() expects the caller to have cleared the affinity hint */ 943 + ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); 1128 944 ath11k_pcic_free_irq(ab); 1129 945 1130 946 err_ce_free: ··· 1169 981 1170 982 set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags); 1171 983 984 + cancel_work_sync(&ab->reset_work); 985 + cancel_work_sync(&ab->dump_work); 1172 986 ath11k_core_deinit(ab); 1173 987 1174 988 qmi_fail: 989 + ath11k_fw_destroy(ab); 1175 990 ath11k_mhi_unregister(ab_pci); 1176 991 1177 992 ath11k_pcic_free_irq(ab);
+11 -8
drivers/net/wireless/ath/ath11k/qmi.c
··· 1957 1957 int i; 1958 1958 1959 1959 for (i = 0; i < ab->qmi.mem_seg_count; i++) { 1960 - if ((ab->hw_params.fixed_mem_region || 1961 - test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) && 1962 - ab->qmi.target_mem[i].iaddr) 1963 - iounmap(ab->qmi.target_mem[i].iaddr); 1964 - 1965 - if (!ab->qmi.target_mem[i].vaddr) 1960 + if (!ab->qmi.target_mem[i].anyaddr) 1966 1961 continue; 1962 + 1963 + if (ab->hw_params.fixed_mem_region || 1964 + test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) { 1965 + iounmap(ab->qmi.target_mem[i].iaddr); 1966 + ab->qmi.target_mem[i].iaddr = NULL; 1967 + continue; 1968 + } 1967 1969 1968 1970 dma_free_coherent(ab->dev, 1969 1971 ab->qmi.target_mem[i].prev_size, ··· 2072 2070 break; 2073 2071 case BDF_MEM_REGION_TYPE: 2074 2072 ab->qmi.target_mem[idx].paddr = ab->hw_params.bdf_addr; 2075 - ab->qmi.target_mem[idx].vaddr = NULL; 2073 + ab->qmi.target_mem[idx].iaddr = NULL; 2076 2074 ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; 2077 2075 ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; 2078 2076 idx++; ··· 2095 2093 } else { 2096 2094 ab->qmi.target_mem[idx].paddr = 2097 2095 ATH11K_QMI_CALDB_ADDRESS; 2096 + ab->qmi.target_mem[idx].iaddr = NULL; 2098 2097 } 2099 2098 } else { 2100 2099 ab->qmi.target_mem[idx].paddr = 0; 2101 - ab->qmi.target_mem[idx].vaddr = NULL; 2100 + ab->qmi.target_mem[idx].iaddr = NULL; 2102 2101 } 2103 2102 ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; 2104 2103 ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+7 -3
drivers/net/wireless/ath/ath11k/qmi.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH11K_QMI_H ··· 102 102 u32 prev_size; 103 103 u32 prev_type; 104 104 dma_addr_t paddr; 105 - u32 *vaddr; 106 - void __iomem *iaddr; 105 + union { 106 + u32 *vaddr; 107 + void __iomem *iaddr; 108 + void *anyaddr; 109 + }; 107 110 }; 108 111 109 112 struct target_info { ··· 157 154 #define BDF_MEM_REGION_TYPE 0x2 158 155 #define M3_DUMP_REGION_TYPE 0x3 159 156 #define CALDB_MEM_REGION_TYPE 0x4 157 + #define PAGEABLE_MEM_REGION_TYPE 0x9 160 158 161 159 struct qmi_wlanfw_host_cap_req_msg_v01 { 162 160 u8 num_clients_valid;
+74 -33
drivers/net/wireless/ath/ath11k/reg.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 #include <linux/rtnetlink.h> 7 7 ··· 54 54 55 55 ath11k_dbg(ar->ab, ATH11K_DBG_REG, 56 56 "Regulatory Notification received for %s\n", wiphy_name(wiphy)); 57 + 58 + if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER) { 59 + ath11k_dbg(ar->ab, ATH11K_DBG_REG, 60 + "driver initiated regd update\n"); 61 + if (ar->state != ATH11K_STATE_ON) 62 + return; 63 + 64 + ret = ath11k_reg_update_chan_list(ar, true); 65 + if (ret) 66 + ath11k_warn(ar->ab, "failed to update channel list: %d\n", ret); 67 + 68 + return; 69 + } 57 70 58 71 /* Currently supporting only General User Hints. Cell base user 59 72 * hints to be handled later. ··· 124 111 struct channel_param *ch; 125 112 enum nl80211_band band; 126 113 int num_channels = 0; 127 - int i, ret, left; 128 - 129 - if (wait && ar->state_11d != ATH11K_11D_IDLE) { 130 - left = wait_for_completion_timeout(&ar->completed_11d_scan, 131 - ATH11K_SCAN_TIMEOUT_HZ); 132 - if (!left) { 133 - ath11k_dbg(ar->ab, ATH11K_DBG_REG, 134 - "failed to receive 11d scan complete: timed out\n"); 135 - ar->state_11d = ATH11K_11D_IDLE; 136 - } 137 - ath11k_dbg(ar->ab, ATH11K_DBG_REG, 138 - "11d scan wait left time %d\n", left); 139 - } 140 - 141 - if (wait && 142 - (ar->scan.state == ATH11K_SCAN_STARTING || 143 - ar->scan.state == ATH11K_SCAN_RUNNING)) { 144 - left = wait_for_completion_timeout(&ar->scan.completed, 145 - ATH11K_SCAN_TIMEOUT_HZ); 146 - if (!left) 147 - ath11k_dbg(ar->ab, ATH11K_DBG_REG, 148 - "failed to receive hw scan complete: timed out\n"); 149 - 150 - ath11k_dbg(ar->ab, ATH11K_DBG_REG, 151 - "hw scan wait left time %d\n", left); 152 - } 114 + int i, ret = 0; 153 115 154 116 if (ar->state == ATH11K_STATE_RESTARTING) 155 117 return 0; ··· 206 218 } 207 219 } 208 220 221 + if (wait) { 222 + spin_lock_bh(&ar->data_lock); 223 + list_add_tail(&params->list, &ar->channel_update_queue); 224 + spin_unlock_bh(&ar->data_lock); 225 + 226 + queue_work(ar->ab->workqueue, &ar->channel_update_work); 227 + 228 + return 0; 229 + } 230 + 209 231 ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params); 210 232 kfree(params); 211 233 ··· 290 292 291 293 if (ret) 292 294 goto err; 293 - 294 - if (ar->state == ATH11K_STATE_ON) { 295 - ret = ath11k_reg_update_chan_list(ar, true); 296 - if (ret) 297 - goto err; 298 - } 299 295 300 296 return 0; 301 297 err: ··· 796 804 return new_regd; 797 805 } 798 806 807 + void ath11k_regd_update_chan_list_work(struct work_struct *work) 808 + { 809 + struct ath11k *ar = container_of(work, struct ath11k, 810 + channel_update_work); 811 + struct scan_chan_list_params *params; 812 + struct list_head local_update_list; 813 + int left; 814 + 815 + INIT_LIST_HEAD(&local_update_list); 816 + 817 + spin_lock_bh(&ar->data_lock); 818 + list_splice_tail_init(&ar->channel_update_queue, &local_update_list); 819 + spin_unlock_bh(&ar->data_lock); 820 + 821 + while ((params = list_first_entry_or_null(&local_update_list, 822 + struct scan_chan_list_params, 823 + list))) { 824 + if (ar->state_11d != ATH11K_11D_IDLE) { 825 + left = wait_for_completion_timeout(&ar->completed_11d_scan, 826 + ATH11K_SCAN_TIMEOUT_HZ); 827 + if (!left) { 828 + ath11k_dbg(ar->ab, ATH11K_DBG_REG, 829 + "failed to receive 11d scan complete: timed out\n"); 830 + ar->state_11d = ATH11K_11D_IDLE; 831 + } 832 + 833 + ath11k_dbg(ar->ab, ATH11K_DBG_REG, 834 + "reg 11d scan wait left time %d\n", left); 835 + } 836 + 837 + if ((ar->scan.state == ATH11K_SCAN_STARTING || 838 + ar->scan.state == ATH11K_SCAN_RUNNING)) { 839 + left = wait_for_completion_timeout(&ar->scan.completed, 840 + ATH11K_SCAN_TIMEOUT_HZ); 841 + if (!left) 842 + ath11k_dbg(ar->ab, ATH11K_DBG_REG, 843 + "failed to receive hw scan complete: timed out\n"); 844 + 845 + ath11k_dbg(ar->ab, ATH11K_DBG_REG, 846 + "reg hw scan wait left time %d\n", left); 847 + } 848 + 849 + ath11k_wmi_send_scan_chan_list_cmd(ar, params); 850 + list_del(&params->list); 851 + kfree(params); 852 + } 853 + } 854 + 799 855 static bool ath11k_reg_is_world_alpha(char *alpha) 800 856 { 801 857 if (alpha[0] == '0' && alpha[1] == '0') ··· 1017 977 void ath11k_reg_init(struct ath11k *ar) 1018 978 { 1019 979 ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED; 980 + ar->hw->wiphy->flags |= WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER; 1020 981 ar->hw->wiphy->reg_notifier = ath11k_reg_notifier; 1021 982 } 1022 983
+2 -1
drivers/net/wireless/ath/ath11k/reg.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH11K_REG_H ··· 33 33 void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info); 34 34 void ath11k_reg_free(struct ath11k_base *ab); 35 35 void ath11k_regd_update_work(struct work_struct *work); 36 + void ath11k_regd_update_chan_list_work(struct work_struct *work); 36 37 struct ieee80211_regdomain * 37 38 ath11k_reg_build_regd(struct ath11k_base *ab, 38 39 struct cur_regulatory_info *reg_info, bool intersect,
+40 -40
drivers/net/wireless/ath/ath11k/testmode.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include "testmode.h" ··· 10 10 #include "wmi.h" 11 11 #include "hw.h" 12 12 #include "core.h" 13 - #include "testmode_i.h" 13 + #include "../testmode_i.h" 14 14 15 15 #define ATH11K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0) 16 16 #define ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4) 17 17 18 - static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = { 19 - [ATH11K_TM_ATTR_CMD] = { .type = NLA_U32 }, 20 - [ATH11K_TM_ATTR_DATA] = { .type = NLA_BINARY, 21 - .len = ATH11K_TM_DATA_MAX_LEN }, 22 - [ATH11K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 }, 23 - [ATH11K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 }, 24 - [ATH11K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 }, 18 + static const struct nla_policy ath11k_tm_policy[ATH_TM_ATTR_MAX + 1] = { 19 + [ATH_TM_ATTR_CMD] = { .type = NLA_U32 }, 20 + [ATH_TM_ATTR_DATA] = { .type = NLA_BINARY, 21 + .len = ATH_TM_DATA_MAX_LEN }, 22 + [ATH_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 }, 23 + [ATH_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 }, 24 + [ATH_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 }, 25 25 }; 26 26 27 27 static struct ath11k *ath11k_tm_get_ar(struct ath11k_base *ab) ··· 73 73 goto out; 74 74 } 75 75 76 - if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI) || 77 - nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) || 78 - nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data)) { 76 + if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, ATH_TM_CMD_WMI) || 77 + nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) || 78 + nla_put(nl_skb, ATH_TM_ATTR_DATA, skb->len, skb->data)) { 79 79 ath11k_warn(ab, "failed to populate testmode unsegmented event\n"); 80 80 kfree_skb(nl_skb); 81 81 goto out; ··· 140 140 141 141 data_pos = ab->testmode.data_pos; 142 142 143 - if ((data_pos + datalen) > ATH11K_FTM_EVENT_MAX_BUF_LENGTH) { 143 + if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) { 144 144 ath11k_warn(ab, "Invalid ftm event length at %d: %d\n", 145 145 data_pos, datalen); 146 146 ret = -EINVAL; ··· 172 172 goto out; 173 173 } 174 174 175 - if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, 176 - ATH11K_TM_CMD_WMI_FTM) || 177 - nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) || 178 - nla_put(nl_skb, ATH11K_TM_ATTR_DATA, data_pos, 175 + if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, 176 + ATH_TM_CMD_WMI_FTM) || 177 + nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) || 178 + nla_put(nl_skb, ATH_TM_ATTR_DATA, data_pos, 179 179 &ab->testmode.eventdata[0])) { 180 180 ath11k_warn(ab, "failed to populate segmented testmode event"); 181 181 kfree_skb(nl_skb); ··· 235 235 236 236 ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE, 237 237 "cmd get version_major %d version_minor %d\n", 238 - ATH11K_TESTMODE_VERSION_MAJOR, 239 - ATH11K_TESTMODE_VERSION_MINOR); 238 + ATH_TESTMODE_VERSION_MAJOR, 239 + ATH_TESTMODE_VERSION_MINOR); 240 240 241 241 skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy, 242 242 nla_total_size(sizeof(u32))); 243 243 if (!skb) 244 244 return -ENOMEM; 245 245 246 - ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MAJOR, 247 - ATH11K_TESTMODE_VERSION_MAJOR); 246 + ret = nla_put_u32(skb, ATH_TM_ATTR_VERSION_MAJOR, 247 + ATH_TESTMODE_VERSION_MAJOR); 248 248 if (ret) { 249 249 kfree_skb(skb); 250 250 return ret; 251 251 } 252 252 253 - ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MINOR, 254 - ATH11K_TESTMODE_VERSION_MINOR); 253 + ret = nla_put_u32(skb, ATH_TM_ATTR_VERSION_MINOR, 254 + ATH_TESTMODE_VERSION_MINOR); 255 255 if (ret) { 256 256 kfree_skb(skb); 257 257 return ret; ··· 277 277 goto err; 278 278 } 279 279 280 - ar->ab->testmode.eventdata = kzalloc(ATH11K_FTM_EVENT_MAX_BUF_LENGTH, 280 + ar->ab->testmode.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH, 281 281 GFP_KERNEL); 282 282 if (!ar->ab->testmode.eventdata) { 283 283 ret = -ENOMEM; ··· 310 310 311 311 mutex_lock(&ar->conf_mutex); 312 312 313 - if (!tb[ATH11K_TM_ATTR_DATA]) { 313 + if (!tb[ATH_TM_ATTR_DATA]) { 314 314 ret = -EINVAL; 315 315 goto out; 316 316 } 317 317 318 - if (!tb[ATH11K_TM_ATTR_WMI_CMDID]) { 318 + if (!tb[ATH_TM_ATTR_WMI_CMDID]) { 319 319 ret = -EINVAL; 320 320 goto out; 321 321 } 322 322 323 - buf = nla_data(tb[ATH11K_TM_ATTR_DATA]); 324 - buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]); 323 + buf = nla_data(tb[ATH_TM_ATTR_DATA]); 324 + buf_len = nla_len(tb[ATH_TM_ATTR_DATA]); 325 325 if (!buf_len) { 326 326 ath11k_warn(ar->ab, "No data present in testmode wmi command\n"); 327 327 ret = -EINVAL; 328 328 goto out; 329 329 } 330 330 331 - cmd_id = nla_get_u32(tb[ATH11K_TM_ATTR_WMI_CMDID]); 331 + cmd_id = nla_get_u32(tb[ATH_TM_ATTR_WMI_CMDID]); 332 332 333 333 /* Make sure that the buffer length is long enough to 334 334 * hold TLV and pdev/vdev id. ··· 409 409 goto out; 410 410 } 411 411 412 - if (!tb[ATH11K_TM_ATTR_DATA]) { 412 + if (!tb[ATH_TM_ATTR_DATA]) { 413 413 ret = -EINVAL; 414 414 goto out; 415 415 } 416 416 417 - buf = nla_data(tb[ATH11K_TM_ATTR_DATA]); 418 - buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]); 417 + buf = nla_data(tb[ATH_TM_ATTR_DATA]); 418 + buf_len = nla_len(tb[ATH_TM_ATTR_DATA]); 419 419 cmd_id = WMI_PDEV_UTF_CMDID; 420 420 421 421 ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE, ··· 476 476 void *data, int len) 477 477 { 478 478 struct ath11k *ar = hw->priv; 479 - struct nlattr *tb[ATH11K_TM_ATTR_MAX + 1]; 479 + struct nlattr *tb[ATH_TM_ATTR_MAX + 1]; 480 480 int ret; 481 481 482 - ret = nla_parse(tb, ATH11K_TM_ATTR_MAX, data, len, ath11k_tm_policy, 482 + ret = nla_parse(tb, ATH_TM_ATTR_MAX, data, len, ath11k_tm_policy, 483 483 NULL); 484 484 if (ret) 485 485 return ret; 486 486 487 - if (!tb[ATH11K_TM_ATTR_CMD]) 487 + if (!tb[ATH_TM_ATTR_CMD]) 488 488 return -EINVAL; 489 489 490 - switch (nla_get_u32(tb[ATH11K_TM_ATTR_CMD])) { 491 - case ATH11K_TM_CMD_GET_VERSION: 490 + switch (nla_get_u32(tb[ATH_TM_ATTR_CMD])) { 491 + case ATH_TM_CMD_GET_VERSION: 492 492 return ath11k_tm_cmd_get_version(ar, tb); 493 - case ATH11K_TM_CMD_WMI: 493 + case ATH_TM_CMD_WMI: 494 494 return ath11k_tm_cmd_wmi(ar, tb, vif); 495 - case ATH11K_TM_CMD_TESTMODE_START: 495 + case ATH_TM_CMD_TESTMODE_START: 496 496 return ath11k_tm_cmd_testmode_start(ar, tb); 497 - case ATH11K_TM_CMD_WMI_FTM: 497 + case ATH_TM_CMD_WMI_FTM: 498 498 return ath11k_tm_cmd_wmi_ftm(ar, tb); 499 499 default: 500 500 return -EOPNOTSUPP;
-66
drivers/net/wireless/ath/ath11k/testmode_i.h
··· 1 - /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 - /* 3 - * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. 5 - */ 6 - 7 - /* "API" level of the ath11k testmode interface. Bump it after every 8 - * incompatible interface change. 9 - */ 10 - #define ATH11K_TESTMODE_VERSION_MAJOR 1 11 - 12 - /* Bump this after every _compatible_ interface change, for example 13 - * addition of a new command or an attribute. 14 - */ 15 - #define ATH11K_TESTMODE_VERSION_MINOR 1 16 - 17 - #define ATH11K_TM_DATA_MAX_LEN 5000 18 - #define ATH11K_FTM_EVENT_MAX_BUF_LENGTH 2048 19 - 20 - enum ath11k_tm_attr { 21 - __ATH11K_TM_ATTR_INVALID = 0, 22 - ATH11K_TM_ATTR_CMD = 1, 23 - ATH11K_TM_ATTR_DATA = 2, 24 - ATH11K_TM_ATTR_WMI_CMDID = 3, 25 - ATH11K_TM_ATTR_VERSION_MAJOR = 4, 26 - ATH11K_TM_ATTR_VERSION_MINOR = 5, 27 - ATH11K_TM_ATTR_WMI_OP_VERSION = 6, 28 - 29 - /* keep last */ 30 - __ATH11K_TM_ATTR_AFTER_LAST, 31 - ATH11K_TM_ATTR_MAX = __ATH11K_TM_ATTR_AFTER_LAST - 1, 32 - }; 33 - 34 - /* All ath11k testmode interface commands specified in 35 - * ATH11K_TM_ATTR_CMD 36 - */ 37 - enum ath11k_tm_cmd { 38 - /* Returns the supported ath11k testmode interface version in 39 - * ATH11K_TM_ATTR_VERSION. Always guaranteed to work. User space 40 - * uses this to verify it's using the correct version of the 41 - * testmode interface 42 - */ 43 - ATH11K_TM_CMD_GET_VERSION = 0, 44 - 45 - /* The command used to transmit a WMI command to the firmware and 46 - * the event to receive WMI events from the firmware. Without 47 - * struct wmi_cmd_hdr header, only the WMI payload. Command id is 48 - * provided with ATH11K_TM_ATTR_WMI_CMDID and payload in 49 - * ATH11K_TM_ATTR_DATA. 50 - */ 51 - ATH11K_TM_CMD_WMI = 1, 52 - 53 - /* Boots the UTF firmware, the netdev interface must be down at the 54 - * time. 55 - */ 56 - ATH11K_TM_CMD_TESTMODE_START = 2, 57 - 58 - /* The command used to transmit a FTM WMI command to the firmware 59 - * and the event to receive WMI events from the firmware. The data 60 - * received only contain the payload, need to add the tlv header 61 - * and send the cmd to firmware with command id WMI_PDEV_UTF_CMDID. 62 - * The data payload size could be large and the driver needs to 63 - * send segmented data to firmware. 64 - */ 65 - ATH11K_TM_CMD_WMI_FTM = 3, 66 - };
+6 -5
drivers/net/wireless/ath/ath11k/wmi.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 #include <linux/skbuff.h> 7 7 #include <linux/ctype.h> ··· 2662 2662 } 2663 2663 2664 2664 int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id, 2665 - struct wmi_wmm_params_all_arg *param) 2665 + struct wmi_wmm_params_all_arg *param, 2666 + enum wmi_wmm_params_type wmm_param_type) 2666 2667 { 2667 2668 struct ath11k_pdev_wmi *wmi = ar->wmi; 2668 2669 struct wmi_vdev_set_wmm_params_cmd *cmd; ··· 2682 2681 FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 2683 2682 2684 2683 cmd->vdev_id = vdev_id; 2685 - cmd->wmm_param_type = 0; 2684 + cmd->wmm_param_type = wmm_param_type; 2686 2685 2687 2686 for (ac = 0; ac < WME_NUM_AC; ac++) { 2688 2687 switch (ac) { ··· 2715 2714 wmm_param->no_ack = wmi_wmm_arg->no_ack; 2716 2715 2717 2716 ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 2718 - "wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n", 2719 - ac, wmm_param->aifs, wmm_param->cwmin, 2717 + "wmm set type %d ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n", 2718 + wmm_param_type, ac, wmm_param->aifs, wmm_param->cwmin, 2720 2719 wmm_param->cwmax, wmm_param->txoplimit, 2721 2720 wmm_param->acm, wmm_param->no_ack); 2722 2721 }
+9 -2
drivers/net/wireless/ath/ath11k/wmi.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH11K_WMI_H ··· 3817 3817 }; 3818 3818 3819 3819 struct scan_chan_list_params { 3820 + struct list_head list; 3820 3821 u32 pdev_id; 3821 3822 u16 nallchans; 3822 3823 struct channel_param ch_param[]; ··· 6347 6346 #define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30 6348 6347 #define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0 6349 6348 6349 + enum wmi_wmm_params_type { 6350 + WMI_WMM_PARAM_TYPE_LEGACY = 0, 6351 + WMI_WMM_PARAM_TYPE_11AX_MU_EDCA = 1, 6352 + }; 6353 + 6350 6354 const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, 6351 6355 struct sk_buff *skb, gfp_t gfp); 6352 6356 int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, ··· 6408 6402 int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar, 6409 6403 struct scan_cancel_param *param); 6410 6404 int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id, 6411 - struct wmi_wmm_params_all_arg *param); 6405 + struct wmi_wmm_params_all_arg *param, 6406 + enum wmi_wmm_params_type wmm_param_type); 6412 6407 int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt, 6413 6408 u32 pdev_id); 6414 6409 int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id);
+2 -1
drivers/net/wireless/ath/ath12k/Makefile
··· 23 23 fw.o \ 24 24 p2p.o 25 25 26 - ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o 26 + ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o 27 27 ath12k-$(CONFIG_ACPI) += acpi.o 28 28 ath12k-$(CONFIG_ATH12K_TRACING) += trace.o 29 29 ath12k-$(CONFIG_PM) += wow.o 30 30 ath12k-$(CONFIG_ATH12K_COREDUMP) += coredump.o 31 + ath12k-$(CONFIG_NL80211_TESTMODE) += testmode.o 31 32 32 33 # for tracing framework to find trace.h 33 34 CFLAGS_trace.o := -I$(src)
+158 -44
drivers/net/wireless/ath/ath12k/acpi.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include "core.h" ··· 12 12 { 13 13 union acpi_object *obj; 14 14 acpi_handle root_handle; 15 - int ret; 15 + int ret, i; 16 16 17 17 root_handle = ACPI_HANDLE(ab->dev); 18 18 if (!root_handle) { ··· 29 29 } 30 30 31 31 if (obj->type == ACPI_TYPE_INTEGER) { 32 - ab->acpi.func_bit = obj->integer.value; 32 + switch (func) { 33 + case ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS: 34 + ab->acpi.func_bit = obj->integer.value; 35 + break; 36 + case ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG: 37 + ab->acpi.bit_flag = obj->integer.value; 38 + break; 39 + } 40 + } else if (obj->type == ACPI_TYPE_STRING) { 41 + switch (func) { 42 + case ATH12K_ACPI_DSM_FUNC_BDF_EXT: 43 + if (obj->string.length <= ATH12K_ACPI_BDF_ANCHOR_STRING_LEN || 44 + obj->string.length > ATH12K_ACPI_BDF_MAX_LEN || 45 + memcmp(obj->string.pointer, ATH12K_ACPI_BDF_ANCHOR_STRING, 46 + ATH12K_ACPI_BDF_ANCHOR_STRING_LEN)) { 47 + ath12k_warn(ab, "invalid ACPI DSM BDF size: %d\n", 48 + obj->string.length); 49 + ret = -EINVAL; 50 + goto out; 51 + } 52 + 53 + memcpy(ab->acpi.bdf_string, obj->string.pointer, 54 + obj->buffer.length); 55 + 56 + break; 57 + } 33 58 } else if (obj->type == ACPI_TYPE_BUFFER) { 34 59 switch (func) { 60 + case ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS: 61 + if (obj->buffer.length < ATH12K_ACPI_DSM_FUNC_MIN_BITMAP_SIZE || 62 + obj->buffer.length > ATH12K_ACPI_DSM_FUNC_MAX_BITMAP_SIZE) { 63 + ath12k_warn(ab, "invalid ACPI DSM func size: %d\n", 64 + obj->buffer.length); 65 + ret = -EINVAL; 66 + goto out; 67 + } 68 + 69 + ab->acpi.func_bit = 0; 70 + for (i = 0; i < obj->buffer.length; i++) 71 + ab->acpi.func_bit += obj->buffer.pointer[i] << (i * 8); 72 + 73 + break; 35 74 case ATH12K_ACPI_DSM_FUNC_TAS_CFG: 36 75 if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_CFG_SIZE) { 37 76 ath12k_warn(ab, "invalid ACPI DSM TAS config size: %d\n", ··· 286 247 return 0; 287 248 } 288 249 250 + bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab) 251 + { 252 + return ab->acpi.acpi_disable_rfkill; 253 + } 254 + 255 + bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab) 256 + { 257 + return ab->acpi.acpi_disable_11be; 258 + } 259 + 260 + void ath12k_acpi_set_dsm_func(struct ath12k_base *ab) 261 + { 262 + int ret; 263 + u8 *buf; 264 + 265 + if (!ab->hw_params->acpi_guid) 266 + /* not supported with this hardware */ 267 + return; 268 + 269 + if (ab->acpi.acpi_tas_enable) { 270 + ret = ath12k_acpi_set_tas_params(ab); 271 + if (ret) { 272 + ath12k_warn(ab, "failed to send ACPI TAS parameters: %d\n", ret); 273 + return; 274 + } 275 + } 276 + 277 + if (ab->acpi.acpi_bios_sar_enable) { 278 + ret = ath12k_acpi_set_bios_sar_params(ab); 279 + if (ret) { 280 + ath12k_warn(ab, "failed to send ACPI BIOS SAR: %d\n", ret); 281 + return; 282 + } 283 + } 284 + 285 + if (ab->acpi.acpi_cca_enable) { 286 + buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET; 287 + ret = ath12k_wmi_set_bios_cmd(ab, 288 + WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE, 289 + buf, 290 + ATH12K_ACPI_CCA_THR_OFFSET_LEN); 291 + if (ret) { 292 + ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n", 293 + ret); 294 + return; 295 + } 296 + } 297 + 298 + if (ab->acpi.acpi_band_edge_enable) { 299 + ret = ath12k_wmi_set_bios_cmd(ab, 300 + WMI_BIOS_PARAM_TYPE_BANDEDGE, 301 + ab->acpi.band_edge_power, 302 + sizeof(ab->acpi.band_edge_power)); 303 + if (ret) { 304 + ath12k_warn(ab, 305 + "failed to set ACPI DSM band edge channel power: %d\n", 306 + ret); 307 + return; 308 + } 309 + } 310 + } 311 + 289 312 int ath12k_acpi_start(struct ath12k_base *ab) 290 313 { 291 314 acpi_status status; 292 - u8 *buf; 293 315 int ret; 316 + 317 + ab->acpi.acpi_tas_enable = false; 318 + ab->acpi.acpi_disable_11be = false; 319 + ab->acpi.acpi_disable_rfkill = false; 320 + ab->acpi.acpi_bios_sar_enable = false; 321 + ab->acpi.acpi_cca_enable = false; 322 + ab->acpi.acpi_band_edge_enable = false; 323 + ab->acpi.acpi_enable_bdf = false; 324 + ab->acpi.bdf_string[0] = '\0'; 294 325 295 326 if (!ab->hw_params->acpi_guid) 296 327 /* not supported with this hardware */ 297 328 return 0; 298 329 299 - ab->acpi.acpi_tas_enable = false; 300 - 301 330 ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS); 302 331 if (ret) { 303 332 ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to get ACPI DSM data: %d\n", ret); 304 333 return ret; 334 + } 335 + 336 + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_DISABLE_FLAG)) { 337 + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG); 338 + if (ret) { 339 + ath12k_warn(ab, "failed to get ACPI DISABLE FLAG: %d\n", ret); 340 + return ret; 341 + } 342 + 343 + if (ATH12K_ACPI_CHEK_BIT_VALID(ab->acpi, 344 + ATH12K_ACPI_DSM_DISABLE_11BE_BIT)) 345 + ab->acpi.acpi_disable_11be = true; 346 + 347 + if (!ATH12K_ACPI_CHEK_BIT_VALID(ab->acpi, 348 + ATH12K_ACPI_DSM_DISABLE_RFKILL_BIT)) 349 + ab->acpi.acpi_disable_rfkill = true; 350 + } 351 + 352 + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BDF_EXT)) { 353 + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BDF_EXT); 354 + if (ret || ab->acpi.bdf_string[0] == '\0') { 355 + ath12k_warn(ab, "failed to get ACPI BDF EXT: %d\n", ret); 356 + return ret; 357 + } 358 + 359 + ab->acpi.acpi_enable_bdf = true; 305 360 } 306 361 307 362 if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG)) { ··· 441 308 ab->acpi.acpi_bios_sar_enable = true; 442 309 } 443 310 444 - if (ab->acpi.acpi_tas_enable) { 445 - ret = ath12k_acpi_set_tas_params(ab); 446 - if (ret) { 447 - ath12k_warn(ab, "failed to send ACPI parameters: %d\n", ret); 448 - return ret; 449 - } 450 - } 451 - 452 - if (ab->acpi.acpi_bios_sar_enable) { 453 - ret = ath12k_acpi_set_bios_sar_params(ab); 454 - if (ret) 455 - return ret; 456 - } 457 - 458 311 if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_CCA)) { 459 312 ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_CCA); 460 313 if (ret) { ··· 451 332 452 333 if (ab->acpi.cca_data[0] == ATH12K_ACPI_CCA_THR_VERSION && 453 334 ab->acpi.cca_data[ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET] == 454 - ATH12K_ACPI_CCA_THR_ENABLE_FLAG) { 455 - buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET; 456 - ret = ath12k_wmi_set_bios_cmd(ab, 457 - WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE, 458 - buf, 459 - ATH12K_ACPI_CCA_THR_OFFSET_LEN); 460 - if (ret) { 461 - ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n", 462 - ret); 463 - return ret; 464 - } 465 - } 335 + ATH12K_ACPI_CCA_THR_ENABLE_FLAG) 336 + ab->acpi.acpi_cca_enable = true; 466 337 } 467 338 468 339 if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ··· 465 356 } 466 357 467 358 if (ab->acpi.band_edge_power[0] == ATH12K_ACPI_BAND_EDGE_VERSION && 468 - ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG) { 469 - ret = ath12k_wmi_set_bios_cmd(ab, 470 - WMI_BIOS_PARAM_TYPE_BANDEDGE, 471 - ab->acpi.band_edge_power, 472 - sizeof(ab->acpi.band_edge_power)); 473 - if (ret) { 474 - ath12k_warn(ab, 475 - "failed to set ACPI DSM band edge channel power: %d\n", 476 - ret); 477 - return ret; 478 - } 479 - } 359 + ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG) 360 + ab->acpi.acpi_band_edge_enable = true; 480 361 } 481 362 482 363 status = acpi_install_notify_handler(ACPI_HANDLE(ab->dev), ··· 478 379 } 479 380 480 381 ab->acpi.started = true; 382 + 383 + return 0; 384 + } 385 + 386 + int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab) 387 + { 388 + size_t max_len = sizeof(ab->qmi.target.bdf_ext); 389 + 390 + if (!ab->acpi.acpi_enable_bdf) 391 + return -ENODATA; 392 + 393 + if (strscpy(ab->qmi.target.bdf_ext, ab->acpi.bdf_string + 4, max_len) < 0) 394 + ath12k_dbg(ab, ATH12K_DBG_BOOT, 395 + "acpi bdf variant longer than the buffer (variant: %s)\n", 396 + ab->acpi.bdf_string); 481 397 482 398 return 0; 483 399 }
+39 -1
drivers/net/wireless/ath/ath12k/acpi.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 #ifndef ATH12K_ACPI_H 7 7 #define ATH12K_ACPI_H ··· 9 9 #include <linux/acpi.h> 10 10 11 11 #define ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS 0 12 + #define ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG 2 13 + #define ATH12K_ACPI_DSM_FUNC_BDF_EXT 3 12 14 #define ATH12K_ACPI_DSM_FUNC_BIOS_SAR 4 13 15 #define ATH12K_ACPI_DSM_FUNC_GEO_OFFSET 5 14 16 #define ATH12K_ACPI_DSM_FUNC_INDEX_CCA 6 ··· 18 16 #define ATH12K_ACPI_DSM_FUNC_TAS_DATA 9 19 17 #define ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE 10 20 18 19 + #define ATH12K_ACPI_FUNC_BIT_DISABLE_FLAG BIT(1) 20 + #define ATH12K_ACPI_FUNC_BIT_BDF_EXT BIT(2) 21 21 #define ATH12K_ACPI_FUNC_BIT_BIOS_SAR BIT(3) 22 22 #define ATH12K_ACPI_FUNC_BIT_GEO_OFFSET BIT(4) 23 23 #define ATH12K_ACPI_FUNC_BIT_CCA BIT(5) ··· 29 25 30 26 #define ATH12K_ACPI_NOTIFY_EVENT 0x86 31 27 #define ATH12K_ACPI_FUNC_BIT_VALID(_acdata, _func) (((_acdata).func_bit) & (_func)) 28 + #define ATH12K_ACPI_CHEK_BIT_VALID(_acdata, _func) (((_acdata).bit_flag) & (_func)) 32 29 33 30 #define ATH12K_ACPI_TAS_DATA_VERSION 0x1 34 31 #define ATH12K_ACPI_TAS_DATA_ENABLE 0x1 ··· 53 48 #define ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE 100 54 49 #define ATH12K_ACPI_DSM_TAS_CFG_SIZE 108 55 50 51 + #define ATH12K_ACPI_DSM_FUNC_MIN_BITMAP_SIZE 1 52 + #define ATH12K_ACPI_DSM_FUNC_MAX_BITMAP_SIZE 4 53 + 54 + #define ATH12K_ACPI_DSM_DISABLE_11BE_BIT BIT(0) 55 + #define ATH12K_ACPI_DSM_DISABLE_RFKILL_BIT BIT(2) 56 + 57 + #define ATH12K_ACPI_BDF_ANCHOR_STRING_LEN 3 58 + #define ATH12K_ACPI_BDF_ANCHOR_STRING "BDF" 59 + #define ATH12K_ACPI_BDF_MAX_LEN 100 60 + 56 61 #define ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE (ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET + \ 57 62 ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN) 58 63 #define ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE (ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET + \ ··· 74 59 75 60 int ath12k_acpi_start(struct ath12k_base *ab); 76 61 void ath12k_acpi_stop(struct ath12k_base *ab); 62 + bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab); 63 + bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab); 64 + void ath12k_acpi_set_dsm_func(struct ath12k_base *ab); 65 + int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab); 77 66 78 67 #else 79 68 ··· 88 69 89 70 static inline void ath12k_acpi_stop(struct ath12k_base *ab) 90 71 { 72 + } 73 + 74 + static inline bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab) 75 + { 76 + return false; 77 + } 78 + 79 + static inline bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab) 80 + { 81 + return false; 82 + } 83 + 84 + static inline void ath12k_acpi_set_dsm_func(struct ath12k_base *ab) 85 + { 86 + } 87 + 88 + static inline int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab) 89 + { 90 + return 0; 91 91 } 92 92 93 93 #endif /* CONFIG_ACPI */
+75 -28
drivers/net/wireless/ath/ath12k/core.c
··· 23 23 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644); 24 24 MODULE_PARM_DESC(debug_mask, "Debugging mask"); 25 25 26 + bool ath12k_ftm_mode; 27 + module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444); 28 + MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode"); 29 + 26 30 /* protected with ath12k_hw_group_mutex */ 27 31 static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list); 28 32 ··· 38 34 int ret = 0, i; 39 35 40 36 if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL)) 37 + return 0; 38 + 39 + if (ath12k_acpi_get_disable_rfkill(ab)) 41 40 return 0; 42 41 43 42 for (i = 0; i < ab->num_radios; i++) { ··· 180 173 181 174 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name, 182 175 size_t name_len, bool with_variant, 183 - bool bus_type_mode) 176 + bool bus_type_mode, bool with_default) 184 177 { 185 178 /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */ 186 179 char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 }; ··· 211 204 "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s", 212 205 ath12k_bus_str(ab->hif.bus), 213 206 ab->qmi.target.chip_id, 214 - ab->qmi.target.board_id, variant); 207 + with_default ? 208 + ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id, 209 + variant); 215 210 break; 216 211 } 217 212 ··· 225 216 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name, 226 217 size_t name_len) 227 218 { 228 - return __ath12k_core_create_board_name(ab, name, name_len, true, false); 219 + return __ath12k_core_create_board_name(ab, name, name_len, true, false, false); 229 220 } 230 221 231 222 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name, 232 223 size_t name_len) 233 224 { 234 - return __ath12k_core_create_board_name(ab, name, name_len, false, false); 225 + return __ath12k_core_create_board_name(ab, name, name_len, false, false, true); 235 226 } 236 227 237 228 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name, 238 229 size_t name_len) 239 230 { 240 - return __ath12k_core_create_board_name(ab, name, name_len, false, true); 231 + return __ath12k_core_create_board_name(ab, name, name_len, false, true, true); 241 232 } 242 233 243 234 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab, ··· 702 693 { 703 694 int ret; 704 695 696 + if (ath12k_ftm_mode) { 697 + ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM; 698 + ath12k_info(ab, "Booting in ftm mode\n"); 699 + } 700 + 705 701 ret = ath12k_qmi_init_service(ab); 706 702 if (ret) { 707 703 ath12k_err(ab, "failed to initialize qmi :%d\n", ret); ··· 755 741 ath12k_dp_pdev_free(ab); 756 742 } 757 743 758 - static int ath12k_core_start(struct ath12k_base *ab, 759 - enum ath12k_firmware_mode mode) 744 + static int ath12k_core_start(struct ath12k_base *ab) 760 745 { 761 746 int ret; 762 747 ··· 849 836 goto err_reo_cleanup; 850 837 } 851 838 852 - ret = ath12k_acpi_start(ab); 853 - if (ret) 854 - /* ACPI is optional so continue in case of an error */ 855 - ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret); 839 + ath12k_acpi_set_dsm_func(ab); 856 840 857 841 if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) 858 842 /* Indicate the core start in the appropriate group */ ··· 897 887 ath12k_mac_destroy(ag); 898 888 } 899 889 890 + u8 ath12k_get_num_partner_link(struct ath12k *ar) 891 + { 892 + struct ath12k_base *partner_ab, *ab = ar->ab; 893 + struct ath12k_hw_group *ag = ab->ag; 894 + struct ath12k_pdev *pdev; 895 + u8 num_link = 0; 896 + int i, j; 897 + 898 + lockdep_assert_held(&ag->mutex); 899 + 900 + for (i = 0; i < ag->num_devices; i++) { 901 + partner_ab = ag->ab[i]; 902 + 903 + for (j = 0; j < partner_ab->num_radios; j++) { 904 + pdev = &partner_ab->pdevs[j]; 905 + 906 + /* Avoid the self link */ 907 + if (ar == pdev->ar) 908 + continue; 909 + 910 + num_link++; 911 + } 912 + } 913 + 914 + return num_link; 915 + } 916 + 900 917 static int __ath12k_mac_mlo_ready(struct ath12k *ar) 901 918 { 919 + u8 num_link = ath12k_get_num_partner_link(ar); 902 920 int ret; 921 + 922 + if (num_link == 0) 923 + return 0; 903 924 904 925 ret = ath12k_wmi_mlo_ready(ar); 905 926 if (ret) { ··· 961 920 ar = &ah->radio[j]; 962 921 ret = __ath12k_mac_mlo_ready(ar); 963 922 if (ret) 964 - goto out; 923 + return ret; 965 924 } 966 925 } 967 926 968 - out: 969 - return ret; 927 + return 0; 970 928 } 971 929 972 930 static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag) 973 931 { 974 932 int ret, i; 975 933 976 - if (!ag->mlo_capable || ag->num_devices == 1) 934 + if (!ag->mlo_capable) 977 935 return 0; 978 936 979 937 ret = ath12k_mac_mlo_setup(ag); ··· 1108 1068 struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab); 1109 1069 int ret, i; 1110 1070 1111 - ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL); 1071 + ret = ath12k_core_start_firmware(ab, ab->fw_mode); 1112 1072 if (ret) { 1113 1073 ath12k_err(ab, "failed to start firmware: %d\n", ret); 1114 1074 return ret; ··· 1129 1089 mutex_lock(&ag->mutex); 1130 1090 mutex_lock(&ab->core_lock); 1131 1091 1132 - ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL); 1092 + ret = ath12k_core_start(ab); 1133 1093 if (ret) { 1134 1094 ath12k_err(ab, "failed to start core: %d\n", ret); 1135 1095 goto err_dp_free; ··· 1162 1122 ath12k_core_stop(ab); 1163 1123 mutex_unlock(&ab->core_lock); 1164 1124 } 1125 + mutex_unlock(&ag->mutex); 1165 1126 goto exit; 1166 1127 1167 1128 err_dp_free: 1168 1129 ath12k_dp_free(ab); 1169 1130 mutex_unlock(&ab->core_lock); 1131 + mutex_unlock(&ag->mutex); 1132 + 1170 1133 err_firmware_stop: 1171 1134 ath12k_qmi_firmware_stop(ab); 1172 1135 1173 1136 exit: 1174 - mutex_unlock(&ag->mutex); 1175 1137 return ret; 1176 1138 } 1177 1139 ··· 1281 1239 1282 1240 for (i = 0; i < ag->num_hw; i++) { 1283 1241 ah = ath12k_ag_to_ah(ag, i); 1284 - if (!ah || ah->state == ATH12K_HW_STATE_OFF) 1242 + if (!ah || ah->state == ATH12K_HW_STATE_OFF || 1243 + ah->state == ATH12K_HW_STATE_TM) 1285 1244 continue; 1286 1245 1287 1246 ieee80211_stop_queues(ah->hw); ··· 1351 1308 case ATH12K_HW_STATE_WEDGED: 1352 1309 ath12k_warn(ab, 1353 1310 "device is wedged, will not restart hw %d\n", i); 1311 + break; 1312 + case ATH12K_HW_STATE_TM: 1313 + ath12k_warn(ab, "fw mode reset done radio %d\n", i); 1354 1314 break; 1355 1315 } 1356 1316 ··· 1648 1602 1649 1603 lockdep_assert_held(&ath12k_hw_group_mutex); 1650 1604 1605 + if (ath12k_ftm_mode) 1606 + goto invalid_group; 1607 + 1651 1608 /* The grouping of multiple devices will be done based on device tree file. 1652 1609 * The platforms that do not have any valid group information would have 1653 1610 * each device to be part of its own invalid group. ··· 1838 1789 struct ath12k_base *ab; 1839 1790 int i; 1840 1791 1792 + if (ath12k_ftm_mode) 1793 + return; 1794 + 1841 1795 lockdep_assert_held(&ag->mutex); 1842 1796 1843 1797 /* If more than one devices are grouped, then inter MLO 1844 1798 * functionality can work still independent of whether internally 1845 1799 * each device supports single_chip_mlo or not. 1846 - * Only when there is one device, then it depends whether the 1847 - * device can support intra chip MLO or not 1800 + * Only when there is one device, then disable for WCN chipsets 1801 + * till the required driver implementation is in place. 1848 1802 */ 1849 - if (ag->num_devices > 1) { 1850 - ag->mlo_capable = true; 1851 - } else { 1803 + if (ag->num_devices == 1) { 1852 1804 ab = ag->ab[0]; 1853 - ag->mlo_capable = ab->single_chip_mlo_supp; 1854 1805 1855 1806 /* WCN chipsets does not advertise in firmware features 1856 1807 * hence skip checking ··· 1859 1810 return; 1860 1811 } 1861 1812 1862 - if (!ag->mlo_capable) 1863 - return; 1813 + ag->mlo_capable = true; 1864 1814 1865 1815 for (i = 0; i < ag->num_devices; i++) { 1866 1816 ab = ag->ab[i]; ··· 1975 1927 ab->dev = dev; 1976 1928 ab->hif.bus = bus; 1977 1929 ab->qmi.num_radios = U8_MAX; 1978 - ab->single_chip_mlo_supp = false; 1979 1930 1980 1931 /* Device index used to identify the devices in a group. 1981 1932 *
+131 -8
drivers/net/wireless/ath/ath12k/core.h
··· 15 15 #include <linux/ctype.h> 16 16 #include <linux/firmware.h> 17 17 #include <linux/panic_notifier.h> 18 + #include <linux/average.h> 18 19 #include "qmi.h" 19 20 #include "htc.h" 20 21 #include "wmi.h" ··· 53 52 54 53 #define ATH12K_INVALID_HW_MAC_ID 0xFF 55 54 #define ATH12K_CONNECTION_LOSS_HZ (3 * HZ) 56 - #define ATH12K_RX_RATE_TABLE_NUM 320 57 - #define ATH12K_RX_RATE_TABLE_11AX_NUM 576 58 55 59 56 #define ATH12K_MON_TIMER_INTERVAL 10 60 57 #define ATH12K_RESET_TIMEOUT_HZ (20 * HZ) ··· 86 87 #define ATH12K_HT_MCS_MAX 7 87 88 #define ATH12K_VHT_MCS_MAX 9 88 89 #define ATH12K_HE_MCS_MAX 11 90 + #define ATH12K_EHT_MCS_MAX 15 89 91 90 92 enum ath12k_crypt_mode { 91 93 /* Only use hardware crypto engine */ ··· 141 141 u8 is_frag; 142 142 u8 tid; 143 143 u16 peer_id; 144 + bool is_end_of_ppdu; 144 145 }; 145 146 146 147 enum ath12k_hw_rev { ··· 167 166 u32 num_irq; 168 167 u32 grp_id; 169 168 u64 timestamp; 169 + bool napi_enabled; 170 170 struct napi_struct napi; 171 171 struct net_device *napi_ndev; 172 172 }; ··· 237 235 ATH12K_FLAG_CE_IRQ_ENABLED, 238 236 ATH12K_FLAG_EXT_IRQ_ENABLED, 239 237 ATH12K_FLAG_QMI_FW_READY_COMPLETE, 238 + ATH12K_FLAG_FTM_SEGMENTED, 240 239 }; 241 240 242 241 struct ath12k_tx_conf { ··· 301 298 u8 link_id; 302 299 struct ath12k_vif *ahvif; 303 300 struct ath12k_rekey_data rekey_data; 301 + 302 + u8 current_cntdown_counter; 304 303 }; 305 304 306 305 struct ath12k_vif { ··· 332 327 u32 key_cipher; 333 328 u8 tx_encap_type; 334 329 bool ps; 330 + atomic_t mcbc_gsn; 335 331 336 332 struct ath12k_link_vif deflink; 337 333 struct ath12k_link_vif __rcu *link[ATH12K_NUM_MAX_LINKS]; ··· 360 354 #define HAL_RX_MAX_MCS_HT 31 361 355 #define HAL_RX_MAX_MCS_VHT 9 362 356 #define HAL_RX_MAX_MCS_HE 11 357 + #define HAL_RX_MAX_MCS_BE 15 363 358 #define HAL_RX_MAX_NSS 8 364 359 #define HAL_RX_MAX_NUM_LEGACY_RATES 12 365 - #define ATH12K_RX_RATE_TABLE_11AX_NUM 576 366 - #define ATH12K_RX_RATE_TABLE_NUM 320 367 360 368 361 struct ath12k_rx_peer_rate_stats { 369 362 u64 ht_mcs_count[HAL_RX_MAX_MCS_HT + 1]; 370 363 u64 vht_mcs_count[HAL_RX_MAX_MCS_VHT + 1]; 371 364 u64 he_mcs_count[HAL_RX_MAX_MCS_HE + 1]; 365 + u64 be_mcs_count[HAL_RX_MAX_MCS_BE + 1]; 372 366 u64 nss_count[HAL_RX_MAX_NSS]; 373 367 u64 bw_count[HAL_RX_BW_MAX]; 374 368 u64 gi_count[HAL_RX_GI_MAX]; 375 369 u64 legacy_count[HAL_RX_MAX_NUM_LEGACY_RATES]; 376 - u64 rx_rate[ATH12K_RX_RATE_TABLE_11AX_NUM]; 370 + u64 rx_rate[HAL_RX_BW_MAX][HAL_RX_GI_MAX][HAL_RX_MAX_NSS][HAL_RX_MAX_MCS_HT + 1]; 377 371 }; 378 372 379 373 struct ath12k_rx_peer_stats { ··· 483 477 u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX]; 484 478 }; 485 479 480 + DECLARE_EWMA(avg_rssi, 10, 8) 481 + 486 482 struct ath12k_link_sta { 487 483 struct ath12k_link_vif *arvif; 488 484 struct ath12k_sta *ahsta; ··· 504 496 u64 rx_duration; 505 497 u64 tx_duration; 506 498 u8 rssi_comb; 499 + struct ewma_avg_rssi avg_rssi; 507 500 u8 link_id; 508 501 struct ath12k_rx_peer_stats *rx_stats; 509 502 struct ath12k_wbm_tx_stats *wbm_tx_stats; 510 503 u32 bw_prev; 504 + u32 peer_nss; 505 + s8 rssi_beacon; 511 506 512 507 /* For now the assoc link will be considered primary */ 513 508 bool is_assoc_link; ··· 545 534 ATH12K_HW_STATE_RESTARTING, 546 535 ATH12K_HW_STATE_RESTARTED, 547 536 ATH12K_HW_STATE_WEDGED, 537 + ATH12K_HW_STATE_TM, 548 538 /* Add other states as required */ 549 539 }; 550 540 551 541 /* Antenna noise floor */ 552 542 #define ATH12K_DEFAULT_NOISE_FLOOR -95 543 + 544 + struct ath12k_ftm_event_obj { 545 + u32 data_pos; 546 + u32 expected_seq; 547 + u8 *eventdata; 548 + }; 553 549 554 550 struct ath12k_fw_stats { 555 551 u32 pdev_id; ··· 564 546 struct list_head pdevs; 565 547 struct list_head vdevs; 566 548 struct list_head bcn; 549 + bool fw_stats_done; 567 550 }; 568 551 569 552 struct ath12k_dbg_htt_stats { ··· 578 559 struct dentry *debugfs_pdev; 579 560 struct dentry *debugfs_pdev_symlink; 580 561 struct ath12k_dbg_htt_stats htt_stats; 562 + enum wmi_halphy_ctrl_path_stats_id tpc_stats_type; 563 + bool tpc_request; 564 + struct completion tpc_complete; 565 + struct wmi_tpc_stats_arg *tpc_stats; 566 + u32 rx_filter; 567 + bool extd_rx_stats; 581 568 }; 582 569 583 570 struct ath12k_per_peer_tx_stats { ··· 737 712 738 713 bool nlo_enabled; 739 714 715 + struct completion fw_stats_complete; 716 + 740 717 struct completion mlo_setup_done; 741 718 u32 mlo_setup_status; 719 + u8 ftm_msgref; 720 + struct ath12k_fw_stats fw_stats; 742 721 }; 743 722 744 723 struct ath12k_hw { ··· 1055 1026 1056 1027 const struct hal_rx_ops *hal_rx_ops; 1057 1028 1058 - /* Denotes the whether MLO is possible within the chip */ 1059 - bool single_chip_mlo_supp; 1060 - 1061 1029 struct completion restart_completed; 1062 1030 1063 1031 #ifdef CONFIG_ACPI ··· 1064 1038 u32 func_bit; 1065 1039 bool acpi_tas_enable; 1066 1040 bool acpi_bios_sar_enable; 1041 + bool acpi_disable_11be; 1042 + bool acpi_disable_rfkill; 1043 + bool acpi_cca_enable; 1044 + bool acpi_band_edge_enable; 1045 + bool acpi_enable_bdf; 1046 + u32 bit_flag; 1047 + char bdf_string[ATH12K_ACPI_BDF_MAX_LEN]; 1067 1048 u8 tas_cfg[ATH12K_ACPI_DSM_TAS_CFG_SIZE]; 1068 1049 u8 tas_sar_power_table[ATH12K_ACPI_DSM_TAS_DATA_SIZE]; 1069 1050 u8 bios_sar_data[ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE]; ··· 1085 1052 1086 1053 struct ath12k_hw_group *ag; 1087 1054 struct ath12k_wsi_info wsi_info; 1055 + enum ath12k_firmware_mode fw_mode; 1056 + struct ath12k_ftm_event_obj ftm_event_obj; 1088 1057 1089 1058 /* must be last */ 1090 1059 u8 drv_priv[] __aligned(sizeof(void *)); ··· 1095 1060 struct ath12k_pdev_map { 1096 1061 struct ath12k_base *ab; 1097 1062 u8 pdev_idx; 1063 + }; 1064 + 1065 + struct ath12k_fw_stats_vdev { 1066 + struct list_head list; 1067 + 1068 + u32 vdev_id; 1069 + u32 beacon_snr; 1070 + u32 data_snr; 1071 + u32 num_tx_frames[WLAN_MAX_AC]; 1072 + u32 num_rx_frames; 1073 + u32 num_tx_frames_retries[WLAN_MAX_AC]; 1074 + u32 num_tx_frames_failures[WLAN_MAX_AC]; 1075 + u32 num_rts_fail; 1076 + u32 num_rts_success; 1077 + u32 num_rx_err; 1078 + u32 num_rx_discard; 1079 + u32 num_tx_not_acked; 1080 + u32 tx_rate_history[MAX_TX_RATE_VALUES]; 1081 + u32 beacon_rssi_history[MAX_TX_RATE_VALUES]; 1082 + }; 1083 + 1084 + struct ath12k_fw_stats_bcn { 1085 + struct list_head list; 1086 + 1087 + u32 vdev_id; 1088 + u32 tx_bcn_succ_cnt; 1089 + u32 tx_bcn_outage_cnt; 1090 + }; 1091 + 1092 + struct ath12k_fw_stats_pdev { 1093 + struct list_head list; 1094 + 1095 + /* PDEV stats */ 1096 + s32 ch_noise_floor; 1097 + u32 tx_frame_count; 1098 + u32 rx_frame_count; 1099 + u32 rx_clear_count; 1100 + u32 cycle_count; 1101 + u32 phy_err_count; 1102 + u32 chan_tx_power; 1103 + u32 ack_rx_bad; 1104 + u32 rts_bad; 1105 + u32 rts_good; 1106 + u32 fcs_bad; 1107 + u32 no_beacons; 1108 + u32 mib_int_count; 1109 + 1110 + /* PDEV TX stats */ 1111 + s32 comp_queued; 1112 + s32 comp_delivered; 1113 + s32 msdu_enqued; 1114 + s32 mpdu_enqued; 1115 + s32 wmm_drop; 1116 + s32 local_enqued; 1117 + s32 local_freed; 1118 + s32 hw_queued; 1119 + s32 hw_reaped; 1120 + s32 underrun; 1121 + s32 tx_abort; 1122 + s32 mpdus_requed; 1123 + u32 tx_ko; 1124 + u32 data_rc; 1125 + u32 self_triggers; 1126 + u32 sw_retry_failure; 1127 + u32 illgl_rate_phy_err; 1128 + u32 pdev_cont_xretry; 1129 + u32 pdev_tx_timeout; 1130 + u32 pdev_resets; 1131 + u32 stateless_tid_alloc_failure; 1132 + u32 phy_underrun; 1133 + u32 txop_ovf; 1134 + 1135 + /* PDEV RX stats */ 1136 + s32 mid_ppdu_route_change; 1137 + s32 status_rcvd; 1138 + s32 r0_frags; 1139 + s32 r1_frags; 1140 + s32 r2_frags; 1141 + s32 r3_frags; 1142 + s32 htt_msdus; 1143 + s32 htt_mpdus; 1144 + s32 loc_msdus; 1145 + s32 loc_mpdus; 1146 + s32 oversize_amsdu; 1147 + s32 phy_errs; 1148 + s32 phy_err_drop; 1149 + s32 mpdu_errs; 1098 1150 }; 1099 1151 1100 1152 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab); ··· 1206 1084 int ath12k_core_suspend(struct ath12k_base *ab); 1207 1085 int ath12k_core_suspend_late(struct ath12k_base *ab); 1208 1086 void ath12k_core_hw_group_unassign(struct ath12k_base *ab); 1087 + u8 ath12k_get_num_partner_link(struct ath12k *ar); 1209 1088 1210 1089 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab, 1211 1090 const char *filename);
+4 -2
drivers/net/wireless/ath/ath12k/debug.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/vmalloc.h> ··· 63 63 vaf.fmt = fmt; 64 64 vaf.va = &args; 65 65 66 - if (ath12k_debug_mask & mask) 66 + if (likely(ab)) 67 67 dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf); 68 + else 69 + printk(KERN_DEBUG "ath12k: %pV", &vaf); 68 70 69 71 /* TODO: trace log */ 70 72
+7 -3
drivers/net/wireless/ath/ath12k/debug.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef _ATH12K_DEBUG_H_ ··· 37 37 #define ath12k_hw_warn(ah, fmt, ...) __ath12k_warn((ah)->dev, fmt, ##__VA_ARGS__) 38 38 39 39 extern unsigned int ath12k_debug_mask; 40 + extern bool ath12k_ftm_mode; 40 41 41 42 #ifdef CONFIG_ATH12K_DEBUG 42 43 __printf(3, 4) void __ath12k_dbg(struct ath12k_base *ab, ··· 62 61 } 63 62 #endif /* CONFIG_ATH12K_DEBUG */ 64 63 65 - #define ath12k_dbg(ar, dbg_mask, fmt, ...) \ 64 + #define ath12k_dbg(ab, dbg_mask, fmt, ...) \ 66 65 do { \ 67 66 typeof(dbg_mask) mask = (dbg_mask); \ 68 67 if (ath12k_debug_mask & mask) \ 69 - __ath12k_dbg(ar, mask, fmt, ##__VA_ARGS__); \ 68 + __ath12k_dbg(ab, mask, fmt, ##__VA_ARGS__); \ 70 69 } while (0) 70 + 71 + #define ath12k_generic_dbg(dbg_mask, fmt, ...) \ 72 + ath12k_dbg(NULL, dbg_mask, fmt, ##__VA_ARGS__) 71 73 72 74 #endif /* _ATH12K_DEBUG_H_ */
+1190 -1
drivers/net/wireless/ath/ath12k/debugfs.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include "core.h" 8 + #include "dp_tx.h" 9 + #include "debug.h" 8 10 #include "debugfs.h" 9 11 #include "debugfs_htt_stats.h" 10 12 ··· 31 29 static const struct file_operations fops_simulate_radar = { 32 30 .write = ath12k_write_simulate_radar, 33 31 .open = simple_open 32 + }; 33 + 34 + static ssize_t ath12k_write_tpc_stats_type(struct file *file, 35 + const char __user *user_buf, 36 + size_t count, loff_t *ppos) 37 + { 38 + struct ath12k *ar = file->private_data; 39 + u8 type; 40 + int ret; 41 + 42 + ret = kstrtou8_from_user(user_buf, count, 0, &type); 43 + if (ret) 44 + return ret; 45 + 46 + if (type >= WMI_HALPHY_PDEV_TX_STATS_MAX) 47 + return -EINVAL; 48 + 49 + spin_lock_bh(&ar->data_lock); 50 + ar->debug.tpc_stats_type = type; 51 + spin_unlock_bh(&ar->data_lock); 52 + 53 + return count; 54 + } 55 + 56 + static int ath12k_debug_tpc_stats_request(struct ath12k *ar) 57 + { 58 + enum wmi_halphy_ctrl_path_stats_id tpc_stats_sub_id; 59 + struct ath12k_base *ab = ar->ab; 60 + int ret; 61 + 62 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 63 + 64 + reinit_completion(&ar->debug.tpc_complete); 65 + 66 + spin_lock_bh(&ar->data_lock); 67 + ar->debug.tpc_request = true; 68 + tpc_stats_sub_id = ar->debug.tpc_stats_type; 69 + spin_unlock_bh(&ar->data_lock); 70 + 71 + ret = ath12k_wmi_send_tpc_stats_request(ar, tpc_stats_sub_id); 72 + if (ret) { 73 + ath12k_warn(ab, "failed to request pdev tpc stats: %d\n", ret); 74 + spin_lock_bh(&ar->data_lock); 75 + ar->debug.tpc_request = false; 76 + spin_unlock_bh(&ar->data_lock); 77 + return ret; 78 + } 79 + 80 + return 0; 81 + } 82 + 83 + static int ath12k_get_tpc_ctl_mode_idx(struct wmi_tpc_stats_arg *tpc_stats, 84 + enum wmi_tpc_pream_bw pream_bw, int *mode_idx) 85 + { 86 + u32 chan_freq = le32_to_cpu(tpc_stats->tpc_config.chan_freq); 87 + u8 band; 88 + 89 + band = ((chan_freq > ATH12K_MIN_6G_FREQ) ? NL80211_BAND_6GHZ : 90 + ((chan_freq > ATH12K_MIN_5G_FREQ) ? NL80211_BAND_5GHZ : 91 + NL80211_BAND_2GHZ)); 92 + 93 + if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) { 94 + switch (pream_bw) { 95 + case WMI_TPC_PREAM_HT20: 96 + case WMI_TPC_PREAM_VHT20: 97 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT_VHT20_5GHZ_6GHZ; 98 + break; 99 + case WMI_TPC_PREAM_HE20: 100 + case WMI_TPC_PREAM_EHT20: 101 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT20_5GHZ_6GHZ; 102 + break; 103 + case WMI_TPC_PREAM_HT40: 104 + case WMI_TPC_PREAM_VHT40: 105 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT_VHT40_5GHZ_6GHZ; 106 + break; 107 + case WMI_TPC_PREAM_HE40: 108 + case WMI_TPC_PREAM_EHT40: 109 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT40_5GHZ_6GHZ; 110 + break; 111 + case WMI_TPC_PREAM_VHT80: 112 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_VHT80_5GHZ_6GHZ; 113 + break; 114 + case WMI_TPC_PREAM_EHT60: 115 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT80_SU_PUNC20; 116 + break; 117 + case WMI_TPC_PREAM_HE80: 118 + case WMI_TPC_PREAM_EHT80: 119 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT80_5GHZ_6GHZ; 120 + break; 121 + case WMI_TPC_PREAM_VHT160: 122 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_VHT160_5GHZ_6GHZ; 123 + break; 124 + case WMI_TPC_PREAM_EHT120: 125 + case WMI_TPC_PREAM_EHT140: 126 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT160_SU_PUNC20; 127 + break; 128 + case WMI_TPC_PREAM_HE160: 129 + case WMI_TPC_PREAM_EHT160: 130 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT160_5GHZ_6GHZ; 131 + break; 132 + case WMI_TPC_PREAM_EHT200: 133 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC120; 134 + break; 135 + case WMI_TPC_PREAM_EHT240: 136 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC80; 137 + break; 138 + case WMI_TPC_PREAM_EHT280: 139 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC40; 140 + break; 141 + case WMI_TPC_PREAM_EHT320: 142 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT320_5GHZ_6GHZ; 143 + break; 144 + default: 145 + /* for 5GHZ and 6GHZ, default case will be for OFDM */ 146 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_LEGACY_5GHZ_6GHZ; 147 + break; 148 + } 149 + } else { 150 + switch (pream_bw) { 151 + case WMI_TPC_PREAM_OFDM: 152 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_LEGACY_2GHZ; 153 + break; 154 + case WMI_TPC_PREAM_HT20: 155 + case WMI_TPC_PREAM_VHT20: 156 + case WMI_TPC_PREAM_HE20: 157 + case WMI_TPC_PREAM_EHT20: 158 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT20_2GHZ; 159 + break; 160 + case WMI_TPC_PREAM_HT40: 161 + case WMI_TPC_PREAM_VHT40: 162 + case WMI_TPC_PREAM_HE40: 163 + case WMI_TPC_PREAM_EHT40: 164 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT40_2GHZ; 165 + break; 166 + default: 167 + /* for 2GHZ, default case will be CCK */ 168 + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_CCK_2GHZ; 169 + break; 170 + } 171 + } 172 + 173 + return 0; 174 + } 175 + 176 + static s16 ath12k_tpc_get_rate(struct ath12k *ar, 177 + struct wmi_tpc_stats_arg *tpc_stats, 178 + u32 rate_idx, u32 num_chains, u32 rate_code, 179 + enum wmi_tpc_pream_bw pream_bw, 180 + enum wmi_halphy_ctrl_path_stats_id type, 181 + u32 eht_rate_idx) 182 + { 183 + u32 tot_nss, tot_modes, txbf_on_off, index_offset1, index_offset2, index_offset3; 184 + u8 chain_idx, stm_idx, num_streams; 185 + bool is_mu, txbf_enabled = 0; 186 + s8 rates_ctl_min, tpc_ctl; 187 + s16 rates, tpc, reg_pwr; 188 + u16 rate1, rate2; 189 + int mode, ret; 190 + 191 + num_streams = 1 + ATH12K_HW_NSS(rate_code); 192 + chain_idx = num_chains - 1; 193 + stm_idx = num_streams - 1; 194 + mode = -1; 195 + 196 + ret = ath12k_get_tpc_ctl_mode_idx(tpc_stats, pream_bw, &mode); 197 + if (ret) { 198 + ath12k_warn(ar->ab, "Invalid mode index received\n"); 199 + tpc = TPC_INVAL; 200 + goto out; 201 + } 202 + 203 + if (num_chains < num_streams) { 204 + tpc = TPC_INVAL; 205 + goto out; 206 + } 207 + 208 + if (le32_to_cpu(tpc_stats->tpc_config.num_tx_chain) <= 1) { 209 + tpc = TPC_INVAL; 210 + goto out; 211 + } 212 + 213 + if (type == WMI_HALPHY_PDEV_TX_SUTXBF_STATS || 214 + type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS) 215 + txbf_enabled = 1; 216 + 217 + if (type == WMI_HALPHY_PDEV_TX_MU_STATS || 218 + type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS) { 219 + is_mu = true; 220 + } else { 221 + is_mu = false; 222 + } 223 + 224 + /* Below is the min calculation of ctl array, rates array and 225 + * regulator power table. tpc is minimum of all 3 226 + */ 227 + if (pream_bw >= WMI_TPC_PREAM_EHT20 && pream_bw <= WMI_TPC_PREAM_EHT320) { 228 + rate2 = tpc_stats->rates_array2.rate_array[eht_rate_idx]; 229 + if (is_mu) 230 + rates = u32_get_bits(rate2, ATH12K_TPC_RATE_ARRAY_MU); 231 + else 232 + rates = u32_get_bits(rate2, ATH12K_TPC_RATE_ARRAY_SU); 233 + } else { 234 + rate1 = tpc_stats->rates_array1.rate_array[rate_idx]; 235 + if (is_mu) 236 + rates = u32_get_bits(rate1, ATH12K_TPC_RATE_ARRAY_MU); 237 + else 238 + rates = u32_get_bits(rate1, ATH12K_TPC_RATE_ARRAY_SU); 239 + } 240 + 241 + if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) { 242 + tot_nss = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d1); 243 + tot_modes = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d2); 244 + txbf_on_off = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d3); 245 + index_offset1 = txbf_on_off * tot_modes * tot_nss; 246 + index_offset2 = tot_modes * tot_nss; 247 + index_offset3 = tot_nss; 248 + 249 + tpc_ctl = *(tpc_stats->ctl_array.ctl_pwr_table + 250 + chain_idx * index_offset1 + txbf_enabled * index_offset2 251 + + mode * index_offset3 + stm_idx); 252 + } else { 253 + tpc_ctl = TPC_MAX; 254 + ath12k_warn(ar->ab, 255 + "ctl array for tpc stats not received from fw\n"); 256 + } 257 + 258 + rates_ctl_min = min_t(s16, rates, tpc_ctl); 259 + 260 + reg_pwr = tpc_stats->max_reg_allowed_power.reg_pwr_array[chain_idx]; 261 + 262 + if (reg_pwr < 0) 263 + reg_pwr = TPC_INVAL; 264 + 265 + tpc = min_t(s16, rates_ctl_min, reg_pwr); 266 + 267 + /* MODULATION_LIMIT is the maximum power limit,tpc should not exceed 268 + * modulation limit even if min tpc of all three array is greater 269 + * modulation limit 270 + */ 271 + tpc = min_t(s16, tpc, MODULATION_LIMIT); 272 + 273 + out: 274 + return tpc; 275 + } 276 + 277 + static u16 ath12k_get_ratecode(u16 pream_idx, u16 nss, u16 mcs_rate) 278 + { 279 + u16 mode_type = ~0; 280 + 281 + /* Below assignments are just for printing purpose only */ 282 + switch (pream_idx) { 283 + case WMI_TPC_PREAM_CCK: 284 + mode_type = WMI_RATE_PREAMBLE_CCK; 285 + break; 286 + case WMI_TPC_PREAM_OFDM: 287 + mode_type = WMI_RATE_PREAMBLE_OFDM; 288 + break; 289 + case WMI_TPC_PREAM_HT20: 290 + case WMI_TPC_PREAM_HT40: 291 + mode_type = WMI_RATE_PREAMBLE_HT; 292 + break; 293 + case WMI_TPC_PREAM_VHT20: 294 + case WMI_TPC_PREAM_VHT40: 295 + case WMI_TPC_PREAM_VHT80: 296 + case WMI_TPC_PREAM_VHT160: 297 + mode_type = WMI_RATE_PREAMBLE_VHT; 298 + break; 299 + case WMI_TPC_PREAM_HE20: 300 + case WMI_TPC_PREAM_HE40: 301 + case WMI_TPC_PREAM_HE80: 302 + case WMI_TPC_PREAM_HE160: 303 + mode_type = WMI_RATE_PREAMBLE_HE; 304 + break; 305 + case WMI_TPC_PREAM_EHT20: 306 + case WMI_TPC_PREAM_EHT40: 307 + case WMI_TPC_PREAM_EHT60: 308 + case WMI_TPC_PREAM_EHT80: 309 + case WMI_TPC_PREAM_EHT120: 310 + case WMI_TPC_PREAM_EHT140: 311 + case WMI_TPC_PREAM_EHT160: 312 + case WMI_TPC_PREAM_EHT200: 313 + case WMI_TPC_PREAM_EHT240: 314 + case WMI_TPC_PREAM_EHT280: 315 + case WMI_TPC_PREAM_EHT320: 316 + mode_type = WMI_RATE_PREAMBLE_EHT; 317 + if (mcs_rate == 0 || mcs_rate == 1) 318 + mcs_rate += 14; 319 + else 320 + mcs_rate -= 2; 321 + break; 322 + default: 323 + return mode_type; 324 + } 325 + return ((mode_type << 8) | ((nss & 0x7) << 5) | (mcs_rate & 0x1F)); 326 + } 327 + 328 + static bool ath12k_he_supports_extra_mcs(struct ath12k *ar, int freq) 329 + { 330 + struct ath12k_pdev_cap *cap = &ar->pdev->cap; 331 + struct ath12k_band_cap *cap_band; 332 + bool extra_mcs_supported; 333 + 334 + if (freq <= ATH12K_2GHZ_MAX_FREQUENCY) 335 + cap_band = &cap->band[NL80211_BAND_2GHZ]; 336 + else if (freq <= ATH12K_5GHZ_MAX_FREQUENCY) 337 + cap_band = &cap->band[NL80211_BAND_5GHZ]; 338 + else 339 + cap_band = &cap->band[NL80211_BAND_6GHZ]; 340 + 341 + extra_mcs_supported = u32_get_bits(cap_band->he_cap_info[1], 342 + HE_EXTRA_MCS_SUPPORT); 343 + return extra_mcs_supported; 344 + } 345 + 346 + static int ath12k_tpc_fill_pream(struct ath12k *ar, char *buf, int buf_len, int len, 347 + enum wmi_tpc_pream_bw pream_bw, u32 max_rix, 348 + int max_nss, int max_rates, int pream_type, 349 + enum wmi_halphy_ctrl_path_stats_id tpc_type, 350 + int rate_idx, int eht_rate_idx) 351 + { 352 + struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats; 353 + int nss, rates, chains; 354 + u8 active_tx_chains; 355 + u16 rate_code; 356 + s16 tpc; 357 + 358 + static const char *const pream_str[] = { 359 + [WMI_TPC_PREAM_CCK] = "CCK", 360 + [WMI_TPC_PREAM_OFDM] = "OFDM", 361 + [WMI_TPC_PREAM_HT20] = "HT20", 362 + [WMI_TPC_PREAM_HT40] = "HT40", 363 + [WMI_TPC_PREAM_VHT20] = "VHT20", 364 + [WMI_TPC_PREAM_VHT40] = "VHT40", 365 + [WMI_TPC_PREAM_VHT80] = "VHT80", 366 + [WMI_TPC_PREAM_VHT160] = "VHT160", 367 + [WMI_TPC_PREAM_HE20] = "HE20", 368 + [WMI_TPC_PREAM_HE40] = "HE40", 369 + [WMI_TPC_PREAM_HE80] = "HE80", 370 + [WMI_TPC_PREAM_HE160] = "HE160", 371 + [WMI_TPC_PREAM_EHT20] = "EHT20", 372 + [WMI_TPC_PREAM_EHT40] = "EHT40", 373 + [WMI_TPC_PREAM_EHT60] = "EHT60", 374 + [WMI_TPC_PREAM_EHT80] = "EHT80", 375 + [WMI_TPC_PREAM_EHT120] = "EHT120", 376 + [WMI_TPC_PREAM_EHT140] = "EHT140", 377 + [WMI_TPC_PREAM_EHT160] = "EHT160", 378 + [WMI_TPC_PREAM_EHT200] = "EHT200", 379 + [WMI_TPC_PREAM_EHT240] = "EHT240", 380 + [WMI_TPC_PREAM_EHT280] = "EHT280", 381 + [WMI_TPC_PREAM_EHT320] = "EHT320"}; 382 + 383 + active_tx_chains = ar->num_tx_chains; 384 + 385 + for (nss = 0; nss < max_nss; nss++) { 386 + for (rates = 0; rates < max_rates; rates++, rate_idx++, max_rix++) { 387 + /* FW send extra MCS(10&11) for VHT and HE rates, 388 + * this is not used. Hence skipping it here 389 + */ 390 + if (pream_type == WMI_RATE_PREAMBLE_VHT && 391 + rates > ATH12K_VHT_MCS_MAX) 392 + continue; 393 + 394 + if (pream_type == WMI_RATE_PREAMBLE_HE && 395 + rates > ATH12K_HE_MCS_MAX) 396 + continue; 397 + 398 + if (pream_type == WMI_RATE_PREAMBLE_EHT && 399 + rates > ATH12K_EHT_MCS_MAX) 400 + continue; 401 + 402 + rate_code = ath12k_get_ratecode(pream_bw, nss, rates); 403 + len += scnprintf(buf + len, buf_len - len, 404 + "%d\t %s\t 0x%03x\t", max_rix, 405 + pream_str[pream_bw], rate_code); 406 + 407 + for (chains = 0; chains < active_tx_chains; chains++) { 408 + if (nss > chains) { 409 + len += scnprintf(buf + len, 410 + buf_len - len, 411 + "\t%s", "NA"); 412 + } else { 413 + tpc = ath12k_tpc_get_rate(ar, tpc_stats, 414 + rate_idx, chains + 1, 415 + rate_code, pream_bw, 416 + tpc_type, 417 + eht_rate_idx); 418 + 419 + if (tpc == TPC_INVAL) { 420 + len += scnprintf(buf + len, 421 + buf_len - len, "\tNA"); 422 + } else { 423 + len += scnprintf(buf + len, 424 + buf_len - len, "\t%d", 425 + tpc); 426 + } 427 + } 428 + } 429 + len += scnprintf(buf + len, buf_len - len, "\n"); 430 + 431 + if (pream_type == WMI_RATE_PREAMBLE_EHT) 432 + /*For fetching the next eht rates pwr from rates array2*/ 433 + ++eht_rate_idx; 434 + } 435 + } 436 + 437 + return len; 438 + } 439 + 440 + static int ath12k_tpc_stats_print(struct ath12k *ar, 441 + struct wmi_tpc_stats_arg *tpc_stats, 442 + char *buf, size_t len, 443 + enum wmi_halphy_ctrl_path_stats_id type) 444 + { 445 + u32 eht_idx = 0, pream_idx = 0, rate_pream_idx = 0, total_rates = 0, max_rix = 0; 446 + u32 chan_freq, num_tx_chain, caps, i, j = 1; 447 + size_t buf_len = ATH12K_TPC_STATS_BUF_SIZE; 448 + u8 nss, active_tx_chains; 449 + bool he_ext_mcs; 450 + static const char *const type_str[WMI_HALPHY_PDEV_TX_STATS_MAX] = { 451 + [WMI_HALPHY_PDEV_TX_SU_STATS] = "SU", 452 + [WMI_HALPHY_PDEV_TX_SUTXBF_STATS] = "SU WITH TXBF", 453 + [WMI_HALPHY_PDEV_TX_MU_STATS] = "MU", 454 + [WMI_HALPHY_PDEV_TX_MUTXBF_STATS] = "MU WITH TXBF"}; 455 + 456 + u8 max_rates[WMI_TPC_PREAM_MAX] = { 457 + [WMI_TPC_PREAM_CCK] = ATH12K_CCK_RATES, 458 + [WMI_TPC_PREAM_OFDM] = ATH12K_OFDM_RATES, 459 + [WMI_TPC_PREAM_HT20] = ATH12K_HT_RATES, 460 + [WMI_TPC_PREAM_HT40] = ATH12K_HT_RATES, 461 + [WMI_TPC_PREAM_VHT20] = ATH12K_VHT_RATES, 462 + [WMI_TPC_PREAM_VHT40] = ATH12K_VHT_RATES, 463 + [WMI_TPC_PREAM_VHT80] = ATH12K_VHT_RATES, 464 + [WMI_TPC_PREAM_VHT160] = ATH12K_VHT_RATES, 465 + [WMI_TPC_PREAM_HE20] = ATH12K_HE_RATES, 466 + [WMI_TPC_PREAM_HE40] = ATH12K_HE_RATES, 467 + [WMI_TPC_PREAM_HE80] = ATH12K_HE_RATES, 468 + [WMI_TPC_PREAM_HE160] = ATH12K_HE_RATES, 469 + [WMI_TPC_PREAM_EHT20] = ATH12K_EHT_RATES, 470 + [WMI_TPC_PREAM_EHT40] = ATH12K_EHT_RATES, 471 + [WMI_TPC_PREAM_EHT60] = ATH12K_EHT_RATES, 472 + [WMI_TPC_PREAM_EHT80] = ATH12K_EHT_RATES, 473 + [WMI_TPC_PREAM_EHT120] = ATH12K_EHT_RATES, 474 + [WMI_TPC_PREAM_EHT140] = ATH12K_EHT_RATES, 475 + [WMI_TPC_PREAM_EHT160] = ATH12K_EHT_RATES, 476 + [WMI_TPC_PREAM_EHT200] = ATH12K_EHT_RATES, 477 + [WMI_TPC_PREAM_EHT240] = ATH12K_EHT_RATES, 478 + [WMI_TPC_PREAM_EHT280] = ATH12K_EHT_RATES, 479 + [WMI_TPC_PREAM_EHT320] = ATH12K_EHT_RATES}; 480 + static const u8 max_nss[WMI_TPC_PREAM_MAX] = { 481 + [WMI_TPC_PREAM_CCK] = ATH12K_NSS_1, 482 + [WMI_TPC_PREAM_OFDM] = ATH12K_NSS_1, 483 + [WMI_TPC_PREAM_HT20] = ATH12K_NSS_4, 484 + [WMI_TPC_PREAM_HT40] = ATH12K_NSS_4, 485 + [WMI_TPC_PREAM_VHT20] = ATH12K_NSS_8, 486 + [WMI_TPC_PREAM_VHT40] = ATH12K_NSS_8, 487 + [WMI_TPC_PREAM_VHT80] = ATH12K_NSS_8, 488 + [WMI_TPC_PREAM_VHT160] = ATH12K_NSS_4, 489 + [WMI_TPC_PREAM_HE20] = ATH12K_NSS_8, 490 + [WMI_TPC_PREAM_HE40] = ATH12K_NSS_8, 491 + [WMI_TPC_PREAM_HE80] = ATH12K_NSS_8, 492 + [WMI_TPC_PREAM_HE160] = ATH12K_NSS_4, 493 + [WMI_TPC_PREAM_EHT20] = ATH12K_NSS_4, 494 + [WMI_TPC_PREAM_EHT40] = ATH12K_NSS_4, 495 + [WMI_TPC_PREAM_EHT60] = ATH12K_NSS_4, 496 + [WMI_TPC_PREAM_EHT80] = ATH12K_NSS_4, 497 + [WMI_TPC_PREAM_EHT120] = ATH12K_NSS_4, 498 + [WMI_TPC_PREAM_EHT140] = ATH12K_NSS_4, 499 + [WMI_TPC_PREAM_EHT160] = ATH12K_NSS_4, 500 + [WMI_TPC_PREAM_EHT200] = ATH12K_NSS_4, 501 + [WMI_TPC_PREAM_EHT240] = ATH12K_NSS_4, 502 + [WMI_TPC_PREAM_EHT280] = ATH12K_NSS_4, 503 + [WMI_TPC_PREAM_EHT320] = ATH12K_NSS_4}; 504 + 505 + u16 rate_idx[WMI_TPC_PREAM_MAX] = {}, eht_rate_idx[WMI_TPC_PREAM_MAX] = {}; 506 + static const u8 pream_type[WMI_TPC_PREAM_MAX] = { 507 + [WMI_TPC_PREAM_CCK] = WMI_RATE_PREAMBLE_CCK, 508 + [WMI_TPC_PREAM_OFDM] = WMI_RATE_PREAMBLE_OFDM, 509 + [WMI_TPC_PREAM_HT20] = WMI_RATE_PREAMBLE_HT, 510 + [WMI_TPC_PREAM_HT40] = WMI_RATE_PREAMBLE_HT, 511 + [WMI_TPC_PREAM_VHT20] = WMI_RATE_PREAMBLE_VHT, 512 + [WMI_TPC_PREAM_VHT40] = WMI_RATE_PREAMBLE_VHT, 513 + [WMI_TPC_PREAM_VHT80] = WMI_RATE_PREAMBLE_VHT, 514 + [WMI_TPC_PREAM_VHT160] = WMI_RATE_PREAMBLE_VHT, 515 + [WMI_TPC_PREAM_HE20] = WMI_RATE_PREAMBLE_HE, 516 + [WMI_TPC_PREAM_HE40] = WMI_RATE_PREAMBLE_HE, 517 + [WMI_TPC_PREAM_HE80] = WMI_RATE_PREAMBLE_HE, 518 + [WMI_TPC_PREAM_HE160] = WMI_RATE_PREAMBLE_HE, 519 + [WMI_TPC_PREAM_EHT20] = WMI_RATE_PREAMBLE_EHT, 520 + [WMI_TPC_PREAM_EHT40] = WMI_RATE_PREAMBLE_EHT, 521 + [WMI_TPC_PREAM_EHT60] = WMI_RATE_PREAMBLE_EHT, 522 + [WMI_TPC_PREAM_EHT80] = WMI_RATE_PREAMBLE_EHT, 523 + [WMI_TPC_PREAM_EHT120] = WMI_RATE_PREAMBLE_EHT, 524 + [WMI_TPC_PREAM_EHT140] = WMI_RATE_PREAMBLE_EHT, 525 + [WMI_TPC_PREAM_EHT160] = WMI_RATE_PREAMBLE_EHT, 526 + [WMI_TPC_PREAM_EHT200] = WMI_RATE_PREAMBLE_EHT, 527 + [WMI_TPC_PREAM_EHT240] = WMI_RATE_PREAMBLE_EHT, 528 + [WMI_TPC_PREAM_EHT280] = WMI_RATE_PREAMBLE_EHT, 529 + [WMI_TPC_PREAM_EHT320] = WMI_RATE_PREAMBLE_EHT}; 530 + 531 + chan_freq = le32_to_cpu(tpc_stats->tpc_config.chan_freq); 532 + num_tx_chain = le32_to_cpu(tpc_stats->tpc_config.num_tx_chain); 533 + caps = le32_to_cpu(tpc_stats->tpc_config.caps); 534 + 535 + active_tx_chains = ar->num_tx_chains; 536 + he_ext_mcs = ath12k_he_supports_extra_mcs(ar, chan_freq); 537 + 538 + /* mcs 12&13 is sent by FW for certain HWs in rate array, skipping it as 539 + * it is not supported 540 + */ 541 + if (he_ext_mcs) { 542 + for (i = WMI_TPC_PREAM_HE20; i <= WMI_TPC_PREAM_HE160; ++i) 543 + max_rates[i] = ATH12K_HE_RATES; 544 + } 545 + 546 + if (type == WMI_HALPHY_PDEV_TX_MU_STATS || 547 + type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS) { 548 + pream_idx = WMI_TPC_PREAM_VHT20; 549 + 550 + for (i = WMI_TPC_PREAM_CCK; i <= WMI_TPC_PREAM_HT40; ++i) 551 + max_rix += max_nss[i] * max_rates[i]; 552 + } 553 + /* Enumerate all the rate indices */ 554 + for (i = rate_pream_idx + 1; i < WMI_TPC_PREAM_MAX; i++) { 555 + nss = (max_nss[i - 1] < num_tx_chain ? 556 + max_nss[i - 1] : num_tx_chain); 557 + 558 + rate_idx[i] = rate_idx[i - 1] + max_rates[i - 1] * nss; 559 + 560 + if (pream_type[i] == WMI_RATE_PREAMBLE_EHT) { 561 + eht_rate_idx[j] = eht_rate_idx[j - 1] + max_rates[i] * nss; 562 + ++j; 563 + } 564 + } 565 + 566 + for (i = 0; i < WMI_TPC_PREAM_MAX; i++) { 567 + nss = (max_nss[i] < num_tx_chain ? 568 + max_nss[i] : num_tx_chain); 569 + total_rates += max_rates[i] * nss; 570 + } 571 + 572 + len += scnprintf(buf + len, buf_len - len, 573 + "No.of rates-%d\n", total_rates); 574 + 575 + len += scnprintf(buf + len, buf_len - len, 576 + "**************** %s ****************\n", 577 + type_str[type]); 578 + len += scnprintf(buf + len, buf_len - len, 579 + "\t\t\t\tTPC values for Active chains\n"); 580 + len += scnprintf(buf + len, buf_len - len, 581 + "Rate idx Preamble Rate code"); 582 + 583 + for (i = 1; i <= active_tx_chains; ++i) { 584 + len += scnprintf(buf + len, buf_len - len, 585 + "\t%d-Chain", i); 586 + } 587 + 588 + len += scnprintf(buf + len, buf_len - len, "\n"); 589 + for (i = pream_idx; i < WMI_TPC_PREAM_MAX; i++) { 590 + if (chan_freq <= 2483) { 591 + if (i == WMI_TPC_PREAM_VHT80 || 592 + i == WMI_TPC_PREAM_VHT160 || 593 + i == WMI_TPC_PREAM_HE80 || 594 + i == WMI_TPC_PREAM_HE160 || 595 + (i >= WMI_TPC_PREAM_EHT60 && 596 + i <= WMI_TPC_PREAM_EHT320)) { 597 + max_rix += max_nss[i] * max_rates[i]; 598 + continue; 599 + } 600 + } else { 601 + if (i == WMI_TPC_PREAM_CCK) { 602 + max_rix += max_rates[i]; 603 + continue; 604 + } 605 + } 606 + 607 + nss = (max_nss[i] < ar->num_tx_chains ? max_nss[i] : ar->num_tx_chains); 608 + 609 + if (!(caps & 610 + (1 << ATH12K_TPC_STATS_SUPPORT_BE_PUNC))) { 611 + if (i == WMI_TPC_PREAM_EHT60 || i == WMI_TPC_PREAM_EHT120 || 612 + i == WMI_TPC_PREAM_EHT140 || i == WMI_TPC_PREAM_EHT200 || 613 + i == WMI_TPC_PREAM_EHT240 || i == WMI_TPC_PREAM_EHT280) { 614 + max_rix += max_nss[i] * max_rates[i]; 615 + continue; 616 + } 617 + } 618 + 619 + len = ath12k_tpc_fill_pream(ar, buf, buf_len, len, i, max_rix, nss, 620 + max_rates[i], pream_type[i], 621 + type, rate_idx[i], eht_rate_idx[eht_idx]); 622 + 623 + if (pream_type[i] == WMI_RATE_PREAMBLE_EHT) 624 + /*For fetch the next index eht rates from rates array2*/ 625 + ++eht_idx; 626 + 627 + max_rix += max_nss[i] * max_rates[i]; 628 + } 629 + return len; 630 + } 631 + 632 + static void ath12k_tpc_stats_fill(struct ath12k *ar, 633 + struct wmi_tpc_stats_arg *tpc_stats, 634 + char *buf) 635 + { 636 + size_t buf_len = ATH12K_TPC_STATS_BUF_SIZE; 637 + struct wmi_tpc_config_params *tpc; 638 + size_t len = 0; 639 + 640 + if (!tpc_stats) { 641 + ath12k_warn(ar->ab, "failed to find tpc stats\n"); 642 + return; 643 + } 644 + 645 + spin_lock_bh(&ar->data_lock); 646 + 647 + tpc = &tpc_stats->tpc_config; 648 + len += scnprintf(buf + len, buf_len - len, "\n"); 649 + len += scnprintf(buf + len, buf_len - len, 650 + "*************** TPC config **************\n"); 651 + len += scnprintf(buf + len, buf_len - len, 652 + "* powers are in 0.25 dBm steps\n"); 653 + len += scnprintf(buf + len, buf_len - len, 654 + "reg domain-%d\t\tchan freq-%d\n", 655 + tpc->reg_domain, tpc->chan_freq); 656 + len += scnprintf(buf + len, buf_len - len, 657 + "power limit-%d\t\tmax reg-domain Power-%d\n", 658 + le32_to_cpu(tpc->twice_max_reg_power) / 2, tpc->power_limit); 659 + len += scnprintf(buf + len, buf_len - len, 660 + "No.of tx chain-%d\t", 661 + ar->num_tx_chains); 662 + 663 + ath12k_tpc_stats_print(ar, tpc_stats, buf, len, 664 + ar->debug.tpc_stats_type); 665 + 666 + spin_unlock_bh(&ar->data_lock); 667 + } 668 + 669 + static int ath12k_open_tpc_stats(struct inode *inode, struct file *file) 670 + { 671 + struct ath12k *ar = inode->i_private; 672 + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); 673 + int ret; 674 + 675 + guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy); 676 + 677 + if (ah->state != ATH12K_HW_STATE_ON) { 678 + ath12k_warn(ar->ab, "Interface not up\n"); 679 + return -ENETDOWN; 680 + } 681 + 682 + void *buf __free(kfree) = kzalloc(ATH12K_TPC_STATS_BUF_SIZE, GFP_KERNEL); 683 + if (!buf) 684 + return -ENOMEM; 685 + 686 + ret = ath12k_debug_tpc_stats_request(ar); 687 + if (ret) { 688 + ath12k_warn(ar->ab, "failed to request tpc stats: %d\n", 689 + ret); 690 + return ret; 691 + } 692 + 693 + if (!wait_for_completion_timeout(&ar->debug.tpc_complete, TPC_STATS_WAIT_TIME)) { 694 + spin_lock_bh(&ar->data_lock); 695 + ath12k_wmi_free_tpc_stats_mem(ar); 696 + ar->debug.tpc_request = false; 697 + spin_unlock_bh(&ar->data_lock); 698 + return -ETIMEDOUT; 699 + } 700 + 701 + ath12k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf); 702 + file->private_data = no_free_ptr(buf); 703 + 704 + spin_lock_bh(&ar->data_lock); 705 + ath12k_wmi_free_tpc_stats_mem(ar); 706 + spin_unlock_bh(&ar->data_lock); 707 + 708 + return 0; 709 + } 710 + 711 + static ssize_t ath12k_read_tpc_stats(struct file *file, 712 + char __user *user_buf, 713 + size_t count, loff_t *ppos) 714 + { 715 + const char *buf = file->private_data; 716 + size_t len = strlen(buf); 717 + 718 + return simple_read_from_buffer(user_buf, count, ppos, buf, len); 719 + } 720 + 721 + static int ath12k_release_tpc_stats(struct inode *inode, 722 + struct file *file) 723 + { 724 + kfree(file->private_data); 725 + return 0; 726 + } 727 + 728 + static const struct file_operations fops_tpc_stats = { 729 + .open = ath12k_open_tpc_stats, 730 + .release = ath12k_release_tpc_stats, 731 + .read = ath12k_read_tpc_stats, 732 + .owner = THIS_MODULE, 733 + .llseek = default_llseek, 734 + }; 735 + 736 + static const struct file_operations fops_tpc_stats_type = { 737 + .write = ath12k_write_tpc_stats_type, 738 + .open = simple_open, 739 + .llseek = default_llseek, 740 + }; 741 + 742 + static ssize_t ath12k_write_extd_rx_stats(struct file *file, 743 + const char __user *ubuf, 744 + size_t count, loff_t *ppos) 745 + { 746 + struct ath12k *ar = file->private_data; 747 + struct htt_rx_ring_tlv_filter tlv_filter = {0}; 748 + u32 ring_id, rx_filter = 0; 749 + bool enable; 750 + int ret, i; 751 + 752 + if (kstrtobool_from_user(ubuf, count, &enable)) 753 + return -EINVAL; 754 + 755 + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); 756 + 757 + if (!ar->ab->hw_params->rxdma1_enable) { 758 + ret = count; 759 + goto exit; 760 + } 761 + 762 + if (ar->ah->state != ATH12K_HW_STATE_ON) { 763 + ret = -ENETDOWN; 764 + goto exit; 765 + } 766 + 767 + if (enable == ar->debug.extd_rx_stats) { 768 + ret = count; 769 + goto exit; 770 + } 771 + 772 + if (enable) { 773 + rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START; 774 + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START; 775 + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END; 776 + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS; 777 + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT; 778 + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE; 779 + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO; 780 + 781 + tlv_filter.rx_filter = rx_filter; 782 + tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0; 783 + tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1; 784 + tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2; 785 + tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 | 786 + HTT_RX_FP_DATA_FILTER_FLASG3; 787 + } else { 788 + tlv_filter = ath12k_mac_mon_status_filter_default; 789 + } 790 + 791 + ar->debug.rx_filter = tlv_filter.rx_filter; 792 + 793 + for (i = 0; i < ar->ab->hw_params->num_rxdma_per_pdev; i++) { 794 + ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id; 795 + ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id + i, 796 + HAL_RXDMA_MONITOR_DST, 797 + DP_RXDMA_REFILL_RING_SIZE, 798 + &tlv_filter); 799 + if (ret) { 800 + ath12k_warn(ar->ab, "failed to set rx filter for monitor status ring\n"); 801 + goto exit; 802 + } 803 + } 804 + 805 + ar->debug.extd_rx_stats = !!enable; 806 + ret = count; 807 + exit: 808 + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); 809 + return ret; 810 + } 811 + 812 + static ssize_t ath12k_read_extd_rx_stats(struct file *file, 813 + char __user *ubuf, 814 + size_t count, loff_t *ppos) 815 + { 816 + struct ath12k *ar = file->private_data; 817 + char buf[32]; 818 + int len = 0; 819 + 820 + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); 821 + len = scnprintf(buf, sizeof(buf) - len, "%d\n", 822 + ar->debug.extd_rx_stats); 823 + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); 824 + 825 + return simple_read_from_buffer(ubuf, count, ppos, buf, len); 826 + } 827 + 828 + static const struct file_operations fops_extd_rx_stats = { 829 + .read = ath12k_read_extd_rx_stats, 830 + .write = ath12k_write_extd_rx_stats, 831 + .open = simple_open, 34 832 }; 35 833 36 834 void ath12k_debugfs_soc_create(struct ath12k_base *ab) ··· 870 68 */ 871 69 } 872 70 71 + static void ath12k_fw_stats_pdevs_free(struct list_head *head) 72 + { 73 + struct ath12k_fw_stats_pdev *i, *tmp; 74 + 75 + list_for_each_entry_safe(i, tmp, head, list) { 76 + list_del(&i->list); 77 + kfree(i); 78 + } 79 + } 80 + 81 + static void ath12k_fw_stats_bcn_free(struct list_head *head) 82 + { 83 + struct ath12k_fw_stats_bcn *i, *tmp; 84 + 85 + list_for_each_entry_safe(i, tmp, head, list) { 86 + list_del(&i->list); 87 + kfree(i); 88 + } 89 + } 90 + 91 + static void ath12k_fw_stats_vdevs_free(struct list_head *head) 92 + { 93 + struct ath12k_fw_stats_vdev *i, *tmp; 94 + 95 + list_for_each_entry_safe(i, tmp, head, list) { 96 + list_del(&i->list); 97 + kfree(i); 98 + } 99 + } 100 + 101 + void ath12k_debugfs_fw_stats_reset(struct ath12k *ar) 102 + { 103 + spin_lock_bh(&ar->data_lock); 104 + ar->fw_stats.fw_stats_done = false; 105 + ath12k_fw_stats_vdevs_free(&ar->fw_stats.vdevs); 106 + ath12k_fw_stats_bcn_free(&ar->fw_stats.bcn); 107 + ath12k_fw_stats_pdevs_free(&ar->fw_stats.pdevs); 108 + spin_unlock_bh(&ar->data_lock); 109 + } 110 + 111 + static int ath12k_debugfs_fw_stats_request(struct ath12k *ar, 112 + struct ath12k_fw_stats_req_params *param) 113 + { 114 + struct ath12k_base *ab = ar->ab; 115 + unsigned long timeout, time_left; 116 + int ret; 117 + 118 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 119 + 120 + /* FW stats can get split when exceeding the stats data buffer limit. 121 + * In that case, since there is no end marking for the back-to-back 122 + * received 'update stats' event, we keep a 3 seconds timeout in case, 123 + * fw_stats_done is not marked yet 124 + */ 125 + timeout = jiffies + msecs_to_jiffies(3 * 1000); 126 + 127 + ath12k_debugfs_fw_stats_reset(ar); 128 + 129 + reinit_completion(&ar->fw_stats_complete); 130 + 131 + ret = ath12k_wmi_send_stats_request_cmd(ar, param->stats_id, 132 + param->vdev_id, param->pdev_id); 133 + 134 + if (ret) { 135 + ath12k_warn(ab, "could not request fw stats (%d)\n", 136 + ret); 137 + return ret; 138 + } 139 + 140 + time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 141 + 1 * HZ); 142 + /* If the wait timed out, return -ETIMEDOUT */ 143 + if (!time_left) 144 + return -ETIMEDOUT; 145 + 146 + /* Firmware sends WMI_UPDATE_STATS_EVENTID back-to-back 147 + * when stats data buffer limit is reached. fw_stats_complete 148 + * is completed once host receives first event from firmware, but 149 + * still end might not be marked in the TLV. 150 + * Below loop is to confirm that firmware completed sending all the event 151 + * and fw_stats_done is marked true when end is marked in the TLV 152 + */ 153 + for (;;) { 154 + if (time_after(jiffies, timeout)) 155 + break; 156 + 157 + spin_lock_bh(&ar->data_lock); 158 + if (ar->fw_stats.fw_stats_done) { 159 + spin_unlock_bh(&ar->data_lock); 160 + break; 161 + } 162 + spin_unlock_bh(&ar->data_lock); 163 + } 164 + return 0; 165 + } 166 + 167 + void 168 + ath12k_debugfs_fw_stats_process(struct ath12k *ar, 169 + struct ath12k_fw_stats *stats) 170 + { 171 + struct ath12k_base *ab = ar->ab; 172 + struct ath12k_pdev *pdev; 173 + bool is_end; 174 + static unsigned int num_vdev, num_bcn; 175 + size_t total_vdevs_started = 0; 176 + int i; 177 + 178 + if (stats->stats_id == WMI_REQUEST_VDEV_STAT) { 179 + if (list_empty(&stats->vdevs)) { 180 + ath12k_warn(ab, "empty vdev stats"); 181 + return; 182 + } 183 + /* FW sends all the active VDEV stats irrespective of PDEV, 184 + * hence limit until the count of all VDEVs started 185 + */ 186 + rcu_read_lock(); 187 + for (i = 0; i < ab->num_radios; i++) { 188 + pdev = rcu_dereference(ab->pdevs_active[i]); 189 + if (pdev && pdev->ar) 190 + total_vdevs_started += pdev->ar->num_started_vdevs; 191 + } 192 + rcu_read_unlock(); 193 + 194 + is_end = ((++num_vdev) == total_vdevs_started); 195 + 196 + list_splice_tail_init(&stats->vdevs, 197 + &ar->fw_stats.vdevs); 198 + 199 + if (is_end) { 200 + ar->fw_stats.fw_stats_done = true; 201 + num_vdev = 0; 202 + } 203 + return; 204 + } 205 + if (stats->stats_id == WMI_REQUEST_BCN_STAT) { 206 + if (list_empty(&stats->bcn)) { 207 + ath12k_warn(ab, "empty beacon stats"); 208 + return; 209 + } 210 + /* Mark end until we reached the count of all started VDEVs 211 + * within the PDEV 212 + */ 213 + is_end = ((++num_bcn) == ar->num_started_vdevs); 214 + 215 + list_splice_tail_init(&stats->bcn, 216 + &ar->fw_stats.bcn); 217 + 218 + if (is_end) { 219 + ar->fw_stats.fw_stats_done = true; 220 + num_bcn = 0; 221 + } 222 + } 223 + if (stats->stats_id == WMI_REQUEST_PDEV_STAT) { 224 + list_splice_tail_init(&stats->pdevs, &ar->fw_stats.pdevs); 225 + ar->fw_stats.fw_stats_done = true; 226 + } 227 + } 228 + 229 + static int ath12k_open_vdev_stats(struct inode *inode, struct file *file) 230 + { 231 + struct ath12k *ar = inode->i_private; 232 + struct ath12k_fw_stats_req_params param; 233 + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); 234 + int ret; 235 + 236 + guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy); 237 + 238 + if (!ah) 239 + return -ENETDOWN; 240 + 241 + if (ah->state != ATH12K_HW_STATE_ON) 242 + return -ENETDOWN; 243 + 244 + void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC); 245 + if (!buf) 246 + return -ENOMEM; 247 + 248 + param.pdev_id = ath12k_mac_get_target_pdev_id(ar); 249 + /* VDEV stats is always sent for all active VDEVs from FW */ 250 + param.vdev_id = 0; 251 + param.stats_id = WMI_REQUEST_VDEV_STAT; 252 + 253 + ret = ath12k_debugfs_fw_stats_request(ar, &param); 254 + if (ret) { 255 + ath12k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret); 256 + return ret; 257 + } 258 + 259 + ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id, 260 + buf); 261 + 262 + file->private_data = no_free_ptr(buf); 263 + 264 + return 0; 265 + } 266 + 267 + static int ath12k_release_vdev_stats(struct inode *inode, struct file *file) 268 + { 269 + kfree(file->private_data); 270 + 271 + return 0; 272 + } 273 + 274 + static ssize_t ath12k_read_vdev_stats(struct file *file, 275 + char __user *user_buf, 276 + size_t count, loff_t *ppos) 277 + { 278 + const char *buf = file->private_data; 279 + size_t len = strlen(buf); 280 + 281 + return simple_read_from_buffer(user_buf, count, ppos, buf, len); 282 + } 283 + 284 + static const struct file_operations fops_vdev_stats = { 285 + .open = ath12k_open_vdev_stats, 286 + .release = ath12k_release_vdev_stats, 287 + .read = ath12k_read_vdev_stats, 288 + .owner = THIS_MODULE, 289 + .llseek = default_llseek, 290 + }; 291 + 292 + static int ath12k_open_bcn_stats(struct inode *inode, struct file *file) 293 + { 294 + struct ath12k *ar = inode->i_private; 295 + struct ath12k_link_vif *arvif; 296 + struct ath12k_fw_stats_req_params param; 297 + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); 298 + int ret; 299 + 300 + guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy); 301 + 302 + if (ah && ah->state != ATH12K_HW_STATE_ON) 303 + return -ENETDOWN; 304 + 305 + void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC); 306 + if (!buf) 307 + return -ENOMEM; 308 + 309 + param.pdev_id = ath12k_mac_get_target_pdev_id(ar); 310 + param.stats_id = WMI_REQUEST_BCN_STAT; 311 + 312 + /* loop all active VDEVs for bcn stats */ 313 + list_for_each_entry(arvif, &ar->arvifs, list) { 314 + if (!arvif->is_up) 315 + continue; 316 + 317 + param.vdev_id = arvif->vdev_id; 318 + ret = ath12k_debugfs_fw_stats_request(ar, &param); 319 + if (ret) { 320 + ath12k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret); 321 + return ret; 322 + } 323 + } 324 + 325 + ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id, 326 + buf); 327 + /* since beacon stats request is looped for all active VDEVs, saved fw 328 + * stats is not freed for each request until done for all active VDEVs 329 + */ 330 + spin_lock_bh(&ar->data_lock); 331 + ath12k_fw_stats_bcn_free(&ar->fw_stats.bcn); 332 + spin_unlock_bh(&ar->data_lock); 333 + 334 + file->private_data = no_free_ptr(buf); 335 + 336 + return 0; 337 + } 338 + 339 + static int ath12k_release_bcn_stats(struct inode *inode, struct file *file) 340 + { 341 + kfree(file->private_data); 342 + 343 + return 0; 344 + } 345 + 346 + static ssize_t ath12k_read_bcn_stats(struct file *file, 347 + char __user *user_buf, 348 + size_t count, loff_t *ppos) 349 + { 350 + const char *buf = file->private_data; 351 + size_t len = strlen(buf); 352 + 353 + return simple_read_from_buffer(user_buf, count, ppos, buf, len); 354 + } 355 + 356 + static const struct file_operations fops_bcn_stats = { 357 + .open = ath12k_open_bcn_stats, 358 + .release = ath12k_release_bcn_stats, 359 + .read = ath12k_read_bcn_stats, 360 + .owner = THIS_MODULE, 361 + .llseek = default_llseek, 362 + }; 363 + 364 + static int ath12k_open_pdev_stats(struct inode *inode, struct file *file) 365 + { 366 + struct ath12k *ar = inode->i_private; 367 + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); 368 + struct ath12k_base *ab = ar->ab; 369 + struct ath12k_fw_stats_req_params param; 370 + int ret; 371 + 372 + guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy); 373 + 374 + if (ah && ah->state != ATH12K_HW_STATE_ON) 375 + return -ENETDOWN; 376 + 377 + void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC); 378 + if (!buf) 379 + return -ENOMEM; 380 + 381 + param.pdev_id = ath12k_mac_get_target_pdev_id(ar); 382 + param.vdev_id = 0; 383 + param.stats_id = WMI_REQUEST_PDEV_STAT; 384 + 385 + ret = ath12k_debugfs_fw_stats_request(ar, &param); 386 + if (ret) { 387 + ath12k_warn(ab, "failed to request fw pdev stats: %d\n", ret); 388 + return ret; 389 + } 390 + 391 + ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id, 392 + buf); 393 + 394 + file->private_data = no_free_ptr(buf); 395 + 396 + return 0; 397 + } 398 + 399 + static int ath12k_release_pdev_stats(struct inode *inode, struct file *file) 400 + { 401 + kfree(file->private_data); 402 + 403 + return 0; 404 + } 405 + 406 + static ssize_t ath12k_read_pdev_stats(struct file *file, 407 + char __user *user_buf, 408 + size_t count, loff_t *ppos) 409 + { 410 + const char *buf = file->private_data; 411 + size_t len = strlen(buf); 412 + 413 + return simple_read_from_buffer(user_buf, count, ppos, buf, len); 414 + } 415 + 416 + static const struct file_operations fops_pdev_stats = { 417 + .open = ath12k_open_pdev_stats, 418 + .release = ath12k_release_pdev_stats, 419 + .read = ath12k_read_pdev_stats, 420 + .owner = THIS_MODULE, 421 + .llseek = default_llseek, 422 + }; 423 + 424 + static 425 + void ath12k_debugfs_fw_stats_register(struct ath12k *ar) 426 + { 427 + struct dentry *fwstats_dir = debugfs_create_dir("fw_stats", 428 + ar->debug.debugfs_pdev); 429 + 430 + /* all stats debugfs files created are under "fw_stats" directory 431 + * created per PDEV 432 + */ 433 + debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar, 434 + &fops_vdev_stats); 435 + debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar, 436 + &fops_bcn_stats); 437 + debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar, 438 + &fops_pdev_stats); 439 + 440 + INIT_LIST_HEAD(&ar->fw_stats.vdevs); 441 + INIT_LIST_HEAD(&ar->fw_stats.bcn); 442 + INIT_LIST_HEAD(&ar->fw_stats.pdevs); 443 + 444 + init_completion(&ar->fw_stats_complete); 445 + } 446 + 873 447 void ath12k_debugfs_register(struct ath12k *ar) 874 448 { 875 449 struct ath12k_base *ab = ar->ab; ··· 1269 91 &fops_simulate_radar); 1270 92 } 1271 93 94 + debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_pdev, ar, 95 + &fops_tpc_stats); 96 + debugfs_create_file("tpc_stats_type", 0200, ar->debug.debugfs_pdev, 97 + ar, &fops_tpc_stats_type); 98 + init_completion(&ar->debug.tpc_complete); 99 + 1272 100 ath12k_debugfs_htt_stats_register(ar); 101 + ath12k_debugfs_fw_stats_register(ar); 102 + 103 + debugfs_create_file("ext_rx_stats", 0644, 104 + ar->debug.debugfs_pdev, ar, 105 + &fops_extd_rx_stats); 1273 106 } 1274 107 1275 108 void ath12k_debugfs_unregister(struct ath12k *ar)
+114 -1
drivers/net/wireless/ath/ath12k/debugfs.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef _ATH12K_DEBUGFS_H_ ··· 12 12 void ath12k_debugfs_soc_destroy(struct ath12k_base *ab); 13 13 void ath12k_debugfs_register(struct ath12k *ar); 14 14 void ath12k_debugfs_unregister(struct ath12k *ar); 15 + void ath12k_debugfs_fw_stats_process(struct ath12k *ar, 16 + struct ath12k_fw_stats *stats); 17 + void ath12k_debugfs_fw_stats_reset(struct ath12k *ar); 18 + 19 + static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar) 20 + { 21 + return ar->debug.extd_rx_stats; 22 + } 23 + 24 + static inline int ath12k_debugfs_rx_filter(struct ath12k *ar) 25 + { 26 + return ar->debug.rx_filter; 27 + } 28 + 29 + #define ATH12K_CCK_RATES 4 30 + #define ATH12K_OFDM_RATES 8 31 + #define ATH12K_HT_RATES 8 32 + #define ATH12K_VHT_RATES 12 33 + #define ATH12K_HE_RATES 12 34 + #define ATH12K_HE_RATES_WITH_EXTRA_MCS 14 35 + #define ATH12K_EHT_RATES 16 36 + #define HE_EXTRA_MCS_SUPPORT GENMASK(31, 16) 37 + #define ATH12K_NSS_1 1 38 + #define ATH12K_NSS_4 4 39 + #define ATH12K_NSS_8 8 40 + #define ATH12K_HW_NSS(_rcode) (((_rcode) >> 5) & 0x7) 41 + #define TPC_STATS_WAIT_TIME (1 * HZ) 42 + #define MAX_TPC_PREAM_STR_LEN 7 43 + #define TPC_INVAL -128 44 + #define TPC_MAX 127 45 + #define TPC_STATS_WAIT_TIME (1 * HZ) 46 + #define TPC_STATS_TOT_ROW 700 47 + #define TPC_STATS_TOT_COLUMN 100 48 + #define MODULATION_LIMIT 126 49 + 50 + #define ATH12K_TPC_STATS_BUF_SIZE (TPC_STATS_TOT_ROW * TPC_STATS_TOT_COLUMN) 51 + 52 + enum wmi_tpc_pream_bw { 53 + WMI_TPC_PREAM_CCK, 54 + WMI_TPC_PREAM_OFDM, 55 + WMI_TPC_PREAM_HT20, 56 + WMI_TPC_PREAM_HT40, 57 + WMI_TPC_PREAM_VHT20, 58 + WMI_TPC_PREAM_VHT40, 59 + WMI_TPC_PREAM_VHT80, 60 + WMI_TPC_PREAM_VHT160, 61 + WMI_TPC_PREAM_HE20, 62 + WMI_TPC_PREAM_HE40, 63 + WMI_TPC_PREAM_HE80, 64 + WMI_TPC_PREAM_HE160, 65 + WMI_TPC_PREAM_EHT20, 66 + WMI_TPC_PREAM_EHT40, 67 + WMI_TPC_PREAM_EHT60, 68 + WMI_TPC_PREAM_EHT80, 69 + WMI_TPC_PREAM_EHT120, 70 + WMI_TPC_PREAM_EHT140, 71 + WMI_TPC_PREAM_EHT160, 72 + WMI_TPC_PREAM_EHT200, 73 + WMI_TPC_PREAM_EHT240, 74 + WMI_TPC_PREAM_EHT280, 75 + WMI_TPC_PREAM_EHT320, 76 + WMI_TPC_PREAM_MAX 77 + }; 78 + 79 + enum ath12k_debug_tpc_stats_ctl_mode { 80 + ATH12K_TPC_STATS_CTL_MODE_LEGACY_5GHZ_6GHZ, 81 + ATH12K_TPC_STATS_CTL_MODE_HT_VHT20_5GHZ_6GHZ, 82 + ATH12K_TPC_STATS_CTL_MODE_HE_EHT20_5GHZ_6GHZ, 83 + ATH12K_TPC_STATS_CTL_MODE_HT_VHT40_5GHZ_6GHZ, 84 + ATH12K_TPC_STATS_CTL_MODE_HE_EHT40_5GHZ_6GHZ, 85 + ATH12K_TPC_STATS_CTL_MODE_VHT80_5GHZ_6GHZ, 86 + ATH12K_TPC_STATS_CTL_MODE_HE_EHT80_5GHZ_6GHZ, 87 + ATH12K_TPC_STATS_CTL_MODE_VHT160_5GHZ_6GHZ, 88 + ATH12K_TPC_STATS_CTL_MODE_HE_EHT160_5GHZ_6GHZ, 89 + ATH12K_TPC_STATS_CTL_MODE_HE_EHT320_5GHZ_6GHZ, 90 + ATH12K_TPC_STATS_CTL_MODE_CCK_2GHZ, 91 + ATH12K_TPC_STATS_CTL_MODE_LEGACY_2GHZ, 92 + ATH12K_TPC_STATS_CTL_MODE_HT20_2GHZ, 93 + ATH12K_TPC_STATS_CTL_MODE_HT40_2GHZ, 94 + 95 + ATH12K_TPC_STATS_CTL_MODE_EHT80_SU_PUNC20 = 23, 96 + ATH12K_TPC_STATS_CTL_MODE_EHT160_SU_PUNC20, 97 + ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC40, 98 + ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC80, 99 + ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC120 100 + }; 101 + 102 + enum ath12k_debug_tpc_stats_support_modes { 103 + ATH12K_TPC_STATS_SUPPORT_160 = 0, 104 + ATH12K_TPC_STATS_SUPPORT_320, 105 + ATH12K_TPC_STATS_SUPPORT_AX, 106 + ATH12K_TPC_STATS_SUPPORT_AX_EXTRA_MCS, 107 + ATH12K_TPC_STATS_SUPPORT_BE, 108 + ATH12K_TPC_STATS_SUPPORT_BE_PUNC, 109 + }; 15 110 #else 16 111 static inline void ath12k_debugfs_soc_create(struct ath12k_base *ab) 17 112 { ··· 124 29 { 125 30 } 126 31 32 + static inline void ath12k_debugfs_fw_stats_process(struct ath12k *ar, 33 + struct ath12k_fw_stats *stats) 34 + { 35 + } 36 + 37 + static inline void ath12k_debugfs_fw_stats_reset(struct ath12k *ar) 38 + { 39 + } 40 + 41 + static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar) 42 + { 43 + return false; 44 + } 45 + 46 + static inline int ath12k_debugfs_rx_filter(struct ath12k *ar) 47 + { 48 + return 0; 49 + } 127 50 #endif /* CONFIG_ATH12K_DEBUGFS */ 128 51 129 52 #endif /* _ATH12K_DEBUGFS_H_ */
+1237 -1
drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/vmalloc.h> ··· 46 46 { 47 47 return print_array_to_buf_index(buf, offset, header, 0, array, array_len, 48 48 footer); 49 + } 50 + 51 + static u32 52 + print_array_to_buf_s8(u8 *buf, u32 offset, const char *header, u32 stats_index, 53 + const s8 *array, u32 array_len, const char *footer) 54 + { 55 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 56 + int index = 0; 57 + u8 i; 58 + 59 + if (header) 60 + index += scnprintf(buf + offset, buf_len - offset, "%s = ", header); 61 + 62 + for (i = 0; i < array_len; i++) { 63 + index += scnprintf(buf + offset + index, (buf_len - offset) - index, 64 + " %u:%d,", stats_index++, array[i]); 65 + } 66 + 67 + index--; 68 + if ((offset + index) < buf_len) 69 + buf[offset + index] = '\0'; 70 + 71 + if (footer) { 72 + index += scnprintf(buf + offset + index, (buf_len - offset) - index, 73 + "%s", footer); 74 + } 75 + 76 + return index; 49 77 } 50 78 51 79 static const char *ath12k_htt_ax_tx_rx_ru_size_to_str(u8 ru_size) ··· 2540 2512 } 2541 2513 2542 2514 static void 2515 + ath12k_htt_print_tx_sounding_stats_tlv(const void *tag_buf, u16 tag_len, 2516 + struct debug_htt_stats_req *stats_req) 2517 + { 2518 + const struct ath12k_htt_tx_sounding_stats_tlv *htt_stats_buf = tag_buf; 2519 + const __le32 *cbf_20, *cbf_40, *cbf_80, *cbf_160, *cbf_320; 2520 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2521 + u32 len = stats_req->buf_len; 2522 + u8 *buf = stats_req->buf; 2523 + u32 tx_sounding_mode; 2524 + u8 i, u; 2525 + 2526 + if (tag_len < sizeof(*htt_stats_buf)) 2527 + return; 2528 + 2529 + cbf_20 = htt_stats_buf->cbf_20; 2530 + cbf_40 = htt_stats_buf->cbf_40; 2531 + cbf_80 = htt_stats_buf->cbf_80; 2532 + cbf_160 = htt_stats_buf->cbf_160; 2533 + cbf_320 = htt_stats_buf->cbf_320; 2534 + tx_sounding_mode = le32_to_cpu(htt_stats_buf->tx_sounding_mode); 2535 + 2536 + if (tx_sounding_mode == ATH12K_HTT_TX_AC_SOUNDING_MODE) { 2537 + len += scnprintf(buf + len, buf_len - len, 2538 + "HTT_TX_AC_SOUNDING_STATS_TLV:\n"); 2539 + len += scnprintf(buf + len, buf_len - len, 2540 + "ac_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", 2541 + le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]), 2542 + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), 2543 + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); 2544 + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", 2545 + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), 2546 + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); 2547 + len += scnprintf(buf + len, buf_len - len, 2548 + "ac_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", 2549 + le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]), 2550 + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), 2551 + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); 2552 + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", 2553 + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), 2554 + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); 2555 + len += scnprintf(buf + len, buf_len - len, 2556 + "ac_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", 2557 + le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]), 2558 + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), 2559 + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); 2560 + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", 2561 + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), 2562 + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); 2563 + len += scnprintf(buf + len, buf_len - len, 2564 + "ac_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", 2565 + le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]), 2566 + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), 2567 + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); 2568 + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", 2569 + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), 2570 + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); 2571 + 2572 + for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; u++) { 2573 + len += scnprintf(buf + len, buf_len - len, 2574 + "Sounding User_%u = 20MHz: %u, ", u, 2575 + le32_to_cpu(htt_stats_buf->sounding[i++])); 2576 + len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ", 2577 + le32_to_cpu(htt_stats_buf->sounding[i++])); 2578 + len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ", 2579 + le32_to_cpu(htt_stats_buf->sounding[i++])); 2580 + len += scnprintf(buf + len, buf_len - len, "160MHz: %u\n", 2581 + le32_to_cpu(htt_stats_buf->sounding[i++])); 2582 + } 2583 + } else if (tx_sounding_mode == ATH12K_HTT_TX_AX_SOUNDING_MODE) { 2584 + len += scnprintf(buf + len, buf_len - len, 2585 + "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n"); 2586 + len += scnprintf(buf + len, buf_len - len, 2587 + "ax_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", 2588 + le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]), 2589 + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), 2590 + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); 2591 + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", 2592 + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), 2593 + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); 2594 + len += scnprintf(buf + len, buf_len - len, 2595 + "ax_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", 2596 + le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]), 2597 + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), 2598 + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); 2599 + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", 2600 + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), 2601 + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); 2602 + len += scnprintf(buf + len, buf_len - len, 2603 + "ax_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", 2604 + le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]), 2605 + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), 2606 + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); 2607 + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", 2608 + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), 2609 + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); 2610 + len += scnprintf(buf + len, buf_len - len, 2611 + "ax_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", 2612 + le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]), 2613 + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), 2614 + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); 2615 + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", 2616 + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), 2617 + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); 2618 + 2619 + for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS; u++) { 2620 + len += scnprintf(buf + len, buf_len - len, 2621 + "Sounding User_%u = 20MHz: %u, ", u, 2622 + le32_to_cpu(htt_stats_buf->sounding[i++])); 2623 + len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ", 2624 + le32_to_cpu(htt_stats_buf->sounding[i++])); 2625 + len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ", 2626 + le32_to_cpu(htt_stats_buf->sounding[i++])); 2627 + len += scnprintf(buf + len, buf_len - len, "160MHz: %u\n", 2628 + le32_to_cpu(htt_stats_buf->sounding[i++])); 2629 + } 2630 + } else if (tx_sounding_mode == ATH12K_HTT_TX_BE_SOUNDING_MODE) { 2631 + len += scnprintf(buf + len, buf_len - len, 2632 + "\nHTT_TX_BE_SOUNDING_STATS_TLV:\n"); 2633 + len += scnprintf(buf + len, buf_len - len, 2634 + "be_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", 2635 + le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]), 2636 + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), 2637 + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); 2638 + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", 2639 + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), 2640 + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); 2641 + len += scnprintf(buf + len, buf_len - len, 2642 + "be_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", 2643 + le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]), 2644 + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), 2645 + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); 2646 + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", 2647 + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), 2648 + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); 2649 + len += scnprintf(buf + len, buf_len - len, 2650 + "be_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", 2651 + le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]), 2652 + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), 2653 + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); 2654 + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", 2655 + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), 2656 + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); 2657 + len += scnprintf(buf + len, buf_len - len, 2658 + "be_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", 2659 + le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]), 2660 + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), 2661 + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); 2662 + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", 2663 + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), 2664 + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); 2665 + len += scnprintf(buf + len, buf_len - len, 2666 + "be_cbf_320 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", 2667 + le32_to_cpu(cbf_320[ATH12K_HTT_IMPL_STEER_STATS]), 2668 + le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), 2669 + le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); 2670 + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", 2671 + le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), 2672 + le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); 2673 + for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS; u++) { 2674 + len += scnprintf(buf + len, buf_len - len, 2675 + "Sounding User_%u = 20MHz: %u, ", u, 2676 + le32_to_cpu(htt_stats_buf->sounding[i++])); 2677 + len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ", 2678 + le32_to_cpu(htt_stats_buf->sounding[i++])); 2679 + len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ", 2680 + le32_to_cpu(htt_stats_buf->sounding[i++])); 2681 + len += scnprintf(buf + len, buf_len - len, 2682 + "160MHz: %u, 320MHz: %u\n", 2683 + le32_to_cpu(htt_stats_buf->sounding[i++]), 2684 + le32_to_cpu(htt_stats_buf->sounding_320[u])); 2685 + } 2686 + } else if (tx_sounding_mode == ATH12K_HTT_TX_CMN_SOUNDING_MODE) { 2687 + len += scnprintf(buf + len, buf_len - len, 2688 + "\nCV UPLOAD HANDLER STATS:\n"); 2689 + len += scnprintf(buf + len, buf_len - len, "cv_nc_mismatch_err = %u\n", 2690 + le32_to_cpu(htt_stats_buf->cv_nc_mismatch_err)); 2691 + len += scnprintf(buf + len, buf_len - len, "cv_fcs_err = %u\n", 2692 + le32_to_cpu(htt_stats_buf->cv_fcs_err)); 2693 + len += scnprintf(buf + len, buf_len - len, "cv_frag_idx_mismatch = %u\n", 2694 + le32_to_cpu(htt_stats_buf->cv_frag_idx_mismatch)); 2695 + len += scnprintf(buf + len, buf_len - len, "cv_invalid_peer_id = %u\n", 2696 + le32_to_cpu(htt_stats_buf->cv_invalid_peer_id)); 2697 + len += scnprintf(buf + len, buf_len - len, "cv_no_txbf_setup = %u\n", 2698 + le32_to_cpu(htt_stats_buf->cv_no_txbf_setup)); 2699 + len += scnprintf(buf + len, buf_len - len, "cv_expiry_in_update = %u\n", 2700 + le32_to_cpu(htt_stats_buf->cv_expiry_in_update)); 2701 + len += scnprintf(buf + len, buf_len - len, "cv_pkt_bw_exceed = %u\n", 2702 + le32_to_cpu(htt_stats_buf->cv_pkt_bw_exceed)); 2703 + len += scnprintf(buf + len, buf_len - len, "cv_dma_not_done_err = %u\n", 2704 + le32_to_cpu(htt_stats_buf->cv_dma_not_done_err)); 2705 + len += scnprintf(buf + len, buf_len - len, "cv_update_failed = %u\n", 2706 + le32_to_cpu(htt_stats_buf->cv_update_failed)); 2707 + len += scnprintf(buf + len, buf_len - len, "cv_dma_timeout_error = %u\n", 2708 + le32_to_cpu(htt_stats_buf->cv_dma_timeout_error)); 2709 + len += scnprintf(buf + len, buf_len - len, "cv_buf_ibf_uploads = %u\n", 2710 + le32_to_cpu(htt_stats_buf->cv_buf_ibf_uploads)); 2711 + len += scnprintf(buf + len, buf_len - len, "cv_buf_ebf_uploads = %u\n", 2712 + le32_to_cpu(htt_stats_buf->cv_buf_ebf_uploads)); 2713 + len += scnprintf(buf + len, buf_len - len, "cv_buf_received = %u\n", 2714 + le32_to_cpu(htt_stats_buf->cv_buf_received)); 2715 + len += scnprintf(buf + len, buf_len - len, "cv_buf_fed_back = %u\n\n", 2716 + le32_to_cpu(htt_stats_buf->cv_buf_fed_back)); 2717 + 2718 + len += scnprintf(buf + len, buf_len - len, "CV QUERY STATS:\n"); 2719 + len += scnprintf(buf + len, buf_len - len, "cv_total_query = %u\n", 2720 + le32_to_cpu(htt_stats_buf->cv_total_query)); 2721 + len += scnprintf(buf + len, buf_len - len, 2722 + "cv_total_pattern_query = %u\n", 2723 + le32_to_cpu(htt_stats_buf->cv_total_pattern_query)); 2724 + len += scnprintf(buf + len, buf_len - len, "cv_total_bw_query = %u\n", 2725 + le32_to_cpu(htt_stats_buf->cv_total_bw_query)); 2726 + len += scnprintf(buf + len, buf_len - len, "cv_invalid_bw_coding = %u\n", 2727 + le32_to_cpu(htt_stats_buf->cv_invalid_bw_coding)); 2728 + len += scnprintf(buf + len, buf_len - len, "cv_forced_sounding = %u\n", 2729 + le32_to_cpu(htt_stats_buf->cv_forced_sounding)); 2730 + len += scnprintf(buf + len, buf_len - len, 2731 + "cv_standalone_sounding = %u\n", 2732 + le32_to_cpu(htt_stats_buf->cv_standalone_sounding)); 2733 + len += scnprintf(buf + len, buf_len - len, "cv_nc_mismatch = %u\n", 2734 + le32_to_cpu(htt_stats_buf->cv_nc_mismatch)); 2735 + len += scnprintf(buf + len, buf_len - len, "cv_fb_type_mismatch = %u\n", 2736 + le32_to_cpu(htt_stats_buf->cv_fb_type_mismatch)); 2737 + len += scnprintf(buf + len, buf_len - len, "cv_ofdma_bw_mismatch = %u\n", 2738 + le32_to_cpu(htt_stats_buf->cv_ofdma_bw_mismatch)); 2739 + len += scnprintf(buf + len, buf_len - len, "cv_bw_mismatch = %u\n", 2740 + le32_to_cpu(htt_stats_buf->cv_bw_mismatch)); 2741 + len += scnprintf(buf + len, buf_len - len, "cv_pattern_mismatch = %u\n", 2742 + le32_to_cpu(htt_stats_buf->cv_pattern_mismatch)); 2743 + len += scnprintf(buf + len, buf_len - len, "cv_preamble_mismatch = %u\n", 2744 + le32_to_cpu(htt_stats_buf->cv_preamble_mismatch)); 2745 + len += scnprintf(buf + len, buf_len - len, "cv_nr_mismatch = %u\n", 2746 + le32_to_cpu(htt_stats_buf->cv_nr_mismatch)); 2747 + len += scnprintf(buf + len, buf_len - len, 2748 + "cv_in_use_cnt_exceeded = %u\n", 2749 + le32_to_cpu(htt_stats_buf->cv_in_use_cnt_exceeded)); 2750 + len += scnprintf(buf + len, buf_len - len, "cv_ntbr_sounding = %u\n", 2751 + le32_to_cpu(htt_stats_buf->cv_ntbr_sounding)); 2752 + len += scnprintf(buf + len, buf_len - len, 2753 + "cv_found_upload_in_progress = %u\n", 2754 + le32_to_cpu(htt_stats_buf->cv_found_upload_in_progress)); 2755 + len += scnprintf(buf + len, buf_len - len, 2756 + "cv_expired_during_query = %u\n", 2757 + le32_to_cpu(htt_stats_buf->cv_expired_during_query)); 2758 + len += scnprintf(buf + len, buf_len - len, "cv_found = %u\n", 2759 + le32_to_cpu(htt_stats_buf->cv_found)); 2760 + len += scnprintf(buf + len, buf_len - len, "cv_not_found = %u\n", 2761 + le32_to_cpu(htt_stats_buf->cv_not_found)); 2762 + len += scnprintf(buf + len, buf_len - len, "cv_total_query_ibf = %u\n", 2763 + le32_to_cpu(htt_stats_buf->cv_total_query_ibf)); 2764 + len += scnprintf(buf + len, buf_len - len, "cv_found_ibf = %u\n", 2765 + le32_to_cpu(htt_stats_buf->cv_found_ibf)); 2766 + len += scnprintf(buf + len, buf_len - len, "cv_not_found_ibf = %u\n", 2767 + le32_to_cpu(htt_stats_buf->cv_not_found_ibf)); 2768 + len += scnprintf(buf + len, buf_len - len, 2769 + "cv_expired_during_query_ibf = %u\n\n", 2770 + le32_to_cpu(htt_stats_buf->cv_expired_during_query_ibf)); 2771 + } 2772 + 2773 + stats_req->buf_len = len; 2774 + } 2775 + 2776 + static void 2543 2777 ath12k_htt_print_pdev_obss_pd_stats_tlv(const void *tag_buf, u16 tag_len, 2544 2778 struct debug_htt_stats_req *stats_req) 2545 2779 { ··· 2862 2572 "num_srg_ppdu_success = %u\n\n", 2863 2573 le32_to_cpu(htt_stats_buf->num_srg_success_per_ac[i])); 2864 2574 } 2575 + 2576 + stats_req->buf_len = len; 2577 + } 2578 + 2579 + static void 2580 + ath12k_htt_print_latency_prof_ctx_tlv(const void *tag_buf, u16 tag_len, 2581 + struct debug_htt_stats_req *stats_req) 2582 + { 2583 + const struct ath12k_htt_latency_prof_ctx_tlv *htt_stats_buf = tag_buf; 2584 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2585 + u32 len = stats_req->buf_len; 2586 + u8 *buf = stats_req->buf; 2587 + 2588 + if (tag_len < sizeof(*htt_stats_buf)) 2589 + return; 2590 + 2591 + len += scnprintf(buf + len, buf_len - len, "HTT_STATS_LATENCY_CTX_TLV:\n"); 2592 + len += scnprintf(buf + len, buf_len - len, "duration = %u\n", 2593 + le32_to_cpu(htt_stats_buf->duration)); 2594 + len += scnprintf(buf + len, buf_len - len, "tx_msdu_cnt = %u\n", 2595 + le32_to_cpu(htt_stats_buf->tx_msdu_cnt)); 2596 + len += scnprintf(buf + len, buf_len - len, "tx_mpdu_cnt = %u\n", 2597 + le32_to_cpu(htt_stats_buf->tx_mpdu_cnt)); 2598 + len += scnprintf(buf + len, buf_len - len, "rx_msdu_cnt = %u\n", 2599 + le32_to_cpu(htt_stats_buf->rx_msdu_cnt)); 2600 + len += scnprintf(buf + len, buf_len - len, "rx_mpdu_cnt = %u\n\n", 2601 + le32_to_cpu(htt_stats_buf->rx_mpdu_cnt)); 2602 + 2603 + stats_req->buf_len = len; 2604 + } 2605 + 2606 + static void 2607 + ath12k_htt_print_latency_prof_cnt(const void *tag_buf, u16 tag_len, 2608 + struct debug_htt_stats_req *stats_req) 2609 + { 2610 + const struct ath12k_htt_latency_prof_cnt_tlv *htt_stats_buf = tag_buf; 2611 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2612 + u32 len = stats_req->buf_len; 2613 + u8 *buf = stats_req->buf; 2614 + 2615 + if (tag_len < sizeof(*htt_stats_buf)) 2616 + return; 2617 + 2618 + len += scnprintf(buf + len, buf_len - len, "HTT_STATS_LATENCY_CNT_TLV:\n"); 2619 + len += scnprintf(buf + len, buf_len - len, "prof_enable_cnt = %u\n\n", 2620 + le32_to_cpu(htt_stats_buf->prof_enable_cnt)); 2621 + 2622 + stats_req->buf_len = len; 2623 + } 2624 + 2625 + static void 2626 + ath12k_htt_print_latency_prof_stats_tlv(const void *tag_buf, u16 tag_len, 2627 + struct debug_htt_stats_req *stats_req) 2628 + { 2629 + const struct ath12k_htt_latency_prof_stats_tlv *htt_stats_buf = tag_buf; 2630 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2631 + u32 len = stats_req->buf_len; 2632 + u8 *buf = stats_req->buf; 2633 + 2634 + if (tag_len < sizeof(*htt_stats_buf)) 2635 + return; 2636 + 2637 + if (le32_to_cpu(htt_stats_buf->print_header) == 1) { 2638 + len += scnprintf(buf + len, buf_len - len, 2639 + "HTT_STATS_LATENCY_PROF_TLV:\n"); 2640 + } 2641 + 2642 + len += scnprintf(buf + len, buf_len - len, "Latency name = %s\n", 2643 + htt_stats_buf->latency_prof_name); 2644 + len += scnprintf(buf + len, buf_len - len, "count = %u\n", 2645 + le32_to_cpu(htt_stats_buf->cnt)); 2646 + len += scnprintf(buf + len, buf_len - len, "minimum = %u\n", 2647 + le32_to_cpu(htt_stats_buf->min)); 2648 + len += scnprintf(buf + len, buf_len - len, "maximum = %u\n", 2649 + le32_to_cpu(htt_stats_buf->max)); 2650 + len += scnprintf(buf + len, buf_len - len, "last = %u\n", 2651 + le32_to_cpu(htt_stats_buf->last)); 2652 + len += scnprintf(buf + len, buf_len - len, "total = %u\n", 2653 + le32_to_cpu(htt_stats_buf->tot)); 2654 + len += scnprintf(buf + len, buf_len - len, "average = %u\n", 2655 + le32_to_cpu(htt_stats_buf->avg)); 2656 + len += scnprintf(buf + len, buf_len - len, "histogram interval = %u\n", 2657 + le32_to_cpu(htt_stats_buf->hist_intvl)); 2658 + len += print_array_to_buf(buf, len, "histogram", htt_stats_buf->hist, 2659 + ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST, "\n\n"); 2660 + 2661 + stats_req->buf_len = len; 2662 + } 2663 + 2664 + static void 2665 + ath12k_htt_print_ul_ofdma_trigger_stats(const void *tag_buf, u16 tag_len, 2666 + struct debug_htt_stats_req *stats_req) 2667 + { 2668 + const struct ath12k_htt_rx_pdev_ul_trigger_stats_tlv *htt_stats_buf = tag_buf; 2669 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2670 + u32 len = stats_req->buf_len; 2671 + u8 *buf = stats_req->buf; 2672 + u32 mac_id; 2673 + u8 j; 2674 + 2675 + if (tag_len < sizeof(*htt_stats_buf)) 2676 + return; 2677 + 2678 + mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word); 2679 + 2680 + len += scnprintf(buf + len, buf_len - len, 2681 + "HTT_RX_PDEV_UL_TRIGGER_STATS_TLV:\n"); 2682 + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", 2683 + u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID)); 2684 + len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n", 2685 + le32_to_cpu(htt_stats_buf->rx_11ax_ul_ofdma)); 2686 + len += print_array_to_buf(buf, len, "ul_ofdma_rx_mcs", 2687 + htt_stats_buf->ul_ofdma_rx_mcs, 2688 + ATH12K_HTT_RX_NUM_MCS_CNTRS, "\n"); 2689 + for (j = 0; j < ATH12K_HTT_RX_NUM_GI_CNTRS; j++) { 2690 + len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_gi[%u]", j); 2691 + len += print_array_to_buf(buf, len, "", 2692 + htt_stats_buf->ul_ofdma_rx_gi[j], 2693 + ATH12K_HTT_RX_NUM_MCS_CNTRS, "\n"); 2694 + } 2695 + 2696 + len += print_array_to_buf_index(buf, len, "ul_ofdma_rx_nss", 1, 2697 + htt_stats_buf->ul_ofdma_rx_nss, 2698 + ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n"); 2699 + len += print_array_to_buf(buf, len, "ul_ofdma_rx_bw", 2700 + htt_stats_buf->ul_ofdma_rx_bw, 2701 + ATH12K_HTT_RX_NUM_BW_CNTRS, "\n"); 2702 + 2703 + for (j = 0; j < ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES; j++) { 2704 + len += scnprintf(buf + len, buf_len - len, j == 0 ? 2705 + "half_ul_ofdma_rx_bw" : 2706 + "quarter_ul_ofdma_rx_bw"); 2707 + len += print_array_to_buf(buf, len, "", htt_stats_buf->red_bw[j], 2708 + ATH12K_HTT_RX_NUM_BW_CNTRS, "\n"); 2709 + } 2710 + len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n", 2711 + le32_to_cpu(htt_stats_buf->ul_ofdma_rx_stbc)); 2712 + len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n", 2713 + le32_to_cpu(htt_stats_buf->ul_ofdma_rx_ldpc)); 2714 + 2715 + len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_ru_size_ppdu = "); 2716 + for (j = 0; j < ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS; j++) 2717 + len += scnprintf(buf + len, buf_len - len, " %s:%u ", 2718 + ath12k_htt_ax_tx_rx_ru_size_to_str(j), 2719 + le32_to_cpu(htt_stats_buf->data_ru_size_ppdu[j])); 2720 + len += scnprintf(buf + len, buf_len - len, "\n"); 2721 + 2722 + len += scnprintf(buf + len, buf_len - len, 2723 + "rx_ulofdma_non_data_ru_size_ppdu = "); 2724 + for (j = 0; j < ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS; j++) 2725 + len += scnprintf(buf + len, buf_len - len, " %s:%u ", 2726 + ath12k_htt_ax_tx_rx_ru_size_to_str(j), 2727 + le32_to_cpu(htt_stats_buf->non_data_ru_size_ppdu[j])); 2728 + len += scnprintf(buf + len, buf_len - len, "\n"); 2729 + 2730 + len += print_array_to_buf(buf, len, "rx_rssi_track_sta_aid", 2731 + htt_stats_buf->uplink_sta_aid, 2732 + ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n"); 2733 + len += print_array_to_buf(buf, len, "rx_sta_target_rssi", 2734 + htt_stats_buf->uplink_sta_target_rssi, 2735 + ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n"); 2736 + len += print_array_to_buf(buf, len, "rx_sta_fd_rssi", 2737 + htt_stats_buf->uplink_sta_fd_rssi, 2738 + ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n"); 2739 + len += print_array_to_buf(buf, len, "rx_sta_power_headroom", 2740 + htt_stats_buf->uplink_sta_power_headroom, 2741 + ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n"); 2742 + len += scnprintf(buf + len, buf_len - len, 2743 + "ul_ofdma_basic_trigger_rx_qos_null_only = %u\n\n", 2744 + le32_to_cpu(htt_stats_buf->ul_ofdma_bsc_trig_rx_qos_null_only)); 2745 + 2746 + stats_req->buf_len = len; 2747 + } 2748 + 2749 + static void 2750 + ath12k_htt_print_ul_ofdma_user_stats(const void *tag_buf, u16 tag_len, 2751 + struct debug_htt_stats_req *stats_req) 2752 + { 2753 + const struct ath12k_htt_rx_pdev_ul_ofdma_user_stats_tlv *htt_stats_buf = tag_buf; 2754 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2755 + u32 len = stats_req->buf_len; 2756 + u8 *buf = stats_req->buf; 2757 + u32 user_index; 2758 + 2759 + if (tag_len < sizeof(*htt_stats_buf)) 2760 + return; 2761 + 2762 + user_index = __le32_to_cpu(htt_stats_buf->user_index); 2763 + 2764 + if (!user_index) 2765 + len += scnprintf(buf + len, buf_len - len, 2766 + "HTT_RX_PDEV_UL_OFDMA_USER_STAS_TLV:\n"); 2767 + len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_non_data_ppdu_%u = %u\n", 2768 + user_index, 2769 + le32_to_cpu(htt_stats_buf->rx_ulofdma_non_data_ppdu)); 2770 + len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_ppdu_%u = %u\n", 2771 + user_index, 2772 + le32_to_cpu(htt_stats_buf->rx_ulofdma_data_ppdu)); 2773 + len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_mpdu_ok_%u = %u\n", 2774 + user_index, 2775 + le32_to_cpu(htt_stats_buf->rx_ulofdma_mpdu_ok)); 2776 + len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_mpdu_fail_%u = %u\n", 2777 + user_index, 2778 + le32_to_cpu(htt_stats_buf->rx_ulofdma_mpdu_fail)); 2779 + len += scnprintf(buf + len, buf_len - len, 2780 + "rx_ulofdma_non_data_nusers_%u = %u\n", user_index, 2781 + le32_to_cpu(htt_stats_buf->rx_ulofdma_non_data_nusers)); 2782 + len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_nusers_%u = %u\n\n", 2783 + user_index, 2784 + le32_to_cpu(htt_stats_buf->rx_ulofdma_data_nusers)); 2785 + 2786 + stats_req->buf_len = len; 2787 + } 2788 + 2789 + static void 2790 + ath12k_htt_print_ul_mumimo_trig_stats(const void *tag_buf, u16 tag_len, 2791 + struct debug_htt_stats_req *stats_req) 2792 + { 2793 + const struct ath12k_htt_rx_ul_mumimo_trig_stats_tlv *htt_stats_buf = tag_buf; 2794 + char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {0}; 2795 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2796 + u32 len = stats_req->buf_len; 2797 + u8 *buf = stats_req->buf; 2798 + u32 mac_id; 2799 + u16 index; 2800 + u8 i, j; 2801 + 2802 + if (tag_len < sizeof(*htt_stats_buf)) 2803 + return; 2804 + 2805 + mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word); 2806 + 2807 + len += scnprintf(buf + len, buf_len - len, 2808 + "HTT_RX_PDEV_UL_MUMIMO_TRIG_STATS_TLV:\n"); 2809 + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", 2810 + u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID)); 2811 + len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_mumimo = %u\n", 2812 + le32_to_cpu(htt_stats_buf->rx_11ax_ul_mumimo)); 2813 + index = 0; 2814 + memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN); 2815 + for (i = 0; i < ATH12K_HTT_RX_NUM_MCS_CNTRS; i++) 2816 + index += scnprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index, 2817 + " %u:%u,", i, 2818 + le32_to_cpu(htt_stats_buf->ul_mumimo_rx_mcs[i])); 2819 + 2820 + for (i = 0; i < ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS; i++) 2821 + index += scnprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index, 2822 + " %u:%u,", i + ATH12K_HTT_RX_NUM_MCS_CNTRS, 2823 + le32_to_cpu(htt_stats_buf->ul_mumimo_rx_mcs_ext[i])); 2824 + str_buf[--index] = '\0'; 2825 + len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_mcs = %s\n", str_buf); 2826 + 2827 + for (j = 0; j < ATH12K_HTT_RX_NUM_GI_CNTRS; j++) { 2828 + index = 0; 2829 + memset(&str_buf[index], 0x0, ATH12K_HTT_MAX_STRING_LEN); 2830 + for (i = 0; i < ATH12K_HTT_RX_NUM_MCS_CNTRS; i++) 2831 + index += scnprintf(&str_buf[index], 2832 + ATH12K_HTT_MAX_STRING_LEN - index, 2833 + " %u:%u,", i, 2834 + le32_to_cpu(htt_stats_buf->ul_rx_gi[j][i])); 2835 + 2836 + for (i = 0; i < ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS; i++) 2837 + index += scnprintf(&str_buf[index], 2838 + ATH12K_HTT_MAX_STRING_LEN - index, 2839 + " %u:%u,", i + ATH12K_HTT_RX_NUM_MCS_CNTRS, 2840 + le32_to_cpu(htt_stats_buf->ul_gi_ext[j][i])); 2841 + str_buf[--index] = '\0'; 2842 + len += scnprintf(buf + len, buf_len - len, 2843 + "ul_mumimo_rx_gi_%u = %s\n", j, str_buf); 2844 + } 2845 + 2846 + index = 0; 2847 + memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN); 2848 + len += print_array_to_buf_index(buf, len, "ul_mumimo_rx_nss", 1, 2849 + htt_stats_buf->ul_mumimo_rx_nss, 2850 + ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n"); 2851 + 2852 + len += print_array_to_buf(buf, len, "ul_mumimo_rx_bw", 2853 + htt_stats_buf->ul_mumimo_rx_bw, 2854 + ATH12K_HTT_RX_NUM_BW_CNTRS, "\n"); 2855 + for (i = 0; i < ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES; i++) { 2856 + index = 0; 2857 + memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN); 2858 + for (j = 0; j < ATH12K_HTT_RX_NUM_BW_CNTRS; j++) 2859 + index += scnprintf(&str_buf[index], 2860 + ATH12K_HTT_MAX_STRING_LEN - index, 2861 + " %u:%u,", j, 2862 + le32_to_cpu(htt_stats_buf->red_bw[i][j])); 2863 + str_buf[--index] = '\0'; 2864 + len += scnprintf(buf + len, buf_len - len, "%s = %s\n", 2865 + i == 0 ? "half_ul_mumimo_rx_bw" : 2866 + "quarter_ul_mumimo_rx_bw", str_buf); 2867 + } 2868 + 2869 + len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_stbc = %u\n", 2870 + le32_to_cpu(htt_stats_buf->ul_mumimo_rx_stbc)); 2871 + len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_ldpc = %u\n", 2872 + le32_to_cpu(htt_stats_buf->ul_mumimo_rx_ldpc)); 2873 + 2874 + for (j = 0; j < ATH12K_HTT_RX_NUM_SPATIAL_STREAMS; j++) { 2875 + len += scnprintf(buf + len, buf_len - len, 2876 + "rx_ul_mumimo_rssi_in_dbm: chain%u ", j); 2877 + len += print_array_to_buf_s8(buf, len, "", 0, 2878 + htt_stats_buf->ul_rssi[j], 2879 + ATH12K_HTT_RX_NUM_BW_CNTRS, "\n"); 2880 + } 2881 + 2882 + for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) { 2883 + len += scnprintf(buf + len, buf_len - len, 2884 + "rx_ul_mumimo_target_rssi: user_%u ", j); 2885 + len += print_array_to_buf_s8(buf, len, "", 0, 2886 + htt_stats_buf->tgt_rssi[j], 2887 + ATH12K_HTT_RX_NUM_BW_CNTRS, "\n"); 2888 + } 2889 + 2890 + for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) { 2891 + len += scnprintf(buf + len, buf_len - len, 2892 + "rx_ul_mumimo_fd_rssi: user_%u ", j); 2893 + len += print_array_to_buf_s8(buf, len, "", 0, 2894 + htt_stats_buf->fd[j], 2895 + ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n"); 2896 + } 2897 + 2898 + for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) { 2899 + len += scnprintf(buf + len, buf_len - len, 2900 + "rx_ulmumimo_pilot_evm_db_mean: user_%u ", j); 2901 + len += print_array_to_buf_s8(buf, len, "", 0, 2902 + htt_stats_buf->db[j], 2903 + ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n"); 2904 + } 2905 + 2906 + len += scnprintf(buf + len, buf_len - len, 2907 + "ul_mumimo_basic_trigger_rx_qos_null_only = %u\n\n", 2908 + le32_to_cpu(htt_stats_buf->mumimo_bsc_trig_rx_qos_null_only)); 2909 + 2910 + stats_req->buf_len = len; 2911 + } 2912 + 2913 + static void 2914 + ath12k_htt_print_rx_fse_stats_tlv(const void *tag_buf, u16 tag_len, 2915 + struct debug_htt_stats_req *stats_req) 2916 + { 2917 + const struct ath12k_htt_rx_fse_stats_tlv *htt_stats_buf = tag_buf; 2918 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2919 + u32 len = stats_req->buf_len; 2920 + u8 *buf = stats_req->buf; 2921 + 2922 + if (tag_len < sizeof(*htt_stats_buf)) 2923 + return; 2924 + 2925 + len += scnprintf(buf + len, buf_len - len, "HTT_STATS_RX_FSE_STATS_TLV:\n"); 2926 + len += scnprintf(buf + len, buf_len - len, "=== Software RX FSE STATS ===\n"); 2927 + len += scnprintf(buf + len, buf_len - len, "Enable count = %u\n", 2928 + le32_to_cpu(htt_stats_buf->fse_enable_cnt)); 2929 + len += scnprintf(buf + len, buf_len - len, "Disable count = %u\n", 2930 + le32_to_cpu(htt_stats_buf->fse_disable_cnt)); 2931 + len += scnprintf(buf + len, buf_len - len, "Cache invalidate entry count = %u\n", 2932 + le32_to_cpu(htt_stats_buf->fse_cache_invalidate_entry_cnt)); 2933 + len += scnprintf(buf + len, buf_len - len, "Full cache invalidate count = %u\n", 2934 + le32_to_cpu(htt_stats_buf->fse_full_cache_invalidate_cnt)); 2935 + 2936 + len += scnprintf(buf + len, buf_len - len, "\n=== Hardware RX FSE STATS ===\n"); 2937 + len += scnprintf(buf + len, buf_len - len, "Cache hits count = %u\n", 2938 + le32_to_cpu(htt_stats_buf->fse_num_cache_hits_cnt)); 2939 + len += scnprintf(buf + len, buf_len - len, "Cache no. of searches = %u\n", 2940 + le32_to_cpu(htt_stats_buf->fse_num_searches_cnt)); 2941 + len += scnprintf(buf + len, buf_len - len, "Cache occupancy peak count:\n"); 2942 + len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-16] = %u [17-32] = %u ", 2943 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[0]), 2944 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[1]), 2945 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[2])); 2946 + len += scnprintf(buf + len, buf_len - len, "[33-48] = %u [49-64] = %u ", 2947 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[3]), 2948 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[4])); 2949 + len += scnprintf(buf + len, buf_len - len, "[65-80] = %u [81-96] = %u ", 2950 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[5]), 2951 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[6])); 2952 + len += scnprintf(buf + len, buf_len - len, "[97-112] = %u [113-127] = %u ", 2953 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[7]), 2954 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[8])); 2955 + len += scnprintf(buf + len, buf_len - len, "[128] = %u\n", 2956 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[9])); 2957 + len += scnprintf(buf + len, buf_len - len, "Cache occupancy current count:\n"); 2958 + len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-16] = %u [17-32] = %u ", 2959 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[0]), 2960 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[1]), 2961 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[2])); 2962 + len += scnprintf(buf + len, buf_len - len, "[33-48] = %u [49-64] = %u ", 2963 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[3]), 2964 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[4])); 2965 + len += scnprintf(buf + len, buf_len - len, "[65-80] = %u [81-96] = %u ", 2966 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[5]), 2967 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[6])); 2968 + len += scnprintf(buf + len, buf_len - len, "[97-112] = %u [113-127] = %u ", 2969 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[7]), 2970 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[8])); 2971 + len += scnprintf(buf + len, buf_len - len, "[128] = %u\n", 2972 + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[9])); 2973 + len += scnprintf(buf + len, buf_len - len, "Cache search square count:\n"); 2974 + len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-50] = %u [51-100] = %u ", 2975 + le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[0]), 2976 + le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[1]), 2977 + le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[2])); 2978 + len += scnprintf(buf + len, buf_len - len, "[101-200] = %u [201-255] = %u ", 2979 + le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[3]), 2980 + le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[4])); 2981 + len += scnprintf(buf + len, buf_len - len, "[256] = %u\n", 2982 + le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[5])); 2983 + len += scnprintf(buf + len, buf_len - len, "Cache search peak pending count:\n"); 2984 + len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-2] = %u [3-4] = %u ", 2985 + le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[0]), 2986 + le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[1]), 2987 + le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[2])); 2988 + len += scnprintf(buf + len, buf_len - len, "[Greater/Equal to 5] = %u\n", 2989 + le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[3])); 2990 + len += scnprintf(buf + len, buf_len - len, "Cache search tot pending count:\n"); 2991 + len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-2] = %u [3-4] = %u ", 2992 + le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[0]), 2993 + le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[1]), 2994 + le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[2])); 2995 + len += scnprintf(buf + len, buf_len - len, "[Greater/Equal to 5] = %u\n\n", 2996 + le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[3])); 2865 2997 2866 2998 stats_req->buf_len = len; 2867 2999 } ··· 4524 3812 stats_req->buf_len = len; 4525 3813 } 4526 3814 3815 + static inline void 3816 + ath12k_htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf, u16 tag_len, 3817 + struct debug_htt_stats_req *stats_req) 3818 + { 3819 + const struct ath12k_htt_tx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf; 3820 + u8 *buf = stats_req->buf; 3821 + u32 len = stats_req->buf_len; 3822 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 3823 + u8 i, j; 3824 + u32 mac_id_word; 3825 + 3826 + if (tag_len < sizeof(*htt_stats_buf)) 3827 + return; 3828 + 3829 + mac_id_word = le32_to_cpu(htt_stats_buf->mac_id_word); 3830 + 3831 + len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:\n"); 3832 + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", 3833 + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); 3834 + len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n", 3835 + le32_to_cpu(htt_stats_buf->tx_ldpc)); 3836 + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u\n", 3837 + le32_to_cpu(htt_stats_buf->ac_mu_mimo_tx_ldpc)); 3838 + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u\n", 3839 + le32_to_cpu(htt_stats_buf->ax_mu_mimo_tx_ldpc)); 3840 + len += scnprintf(buf + len, buf_len - len, "ofdma_tx_ldpc = %u\n", 3841 + le32_to_cpu(htt_stats_buf->ofdma_tx_ldpc)); 3842 + len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n", 3843 + le32_to_cpu(htt_stats_buf->rts_cnt)); 3844 + len += scnprintf(buf + len, buf_len - len, "rts_success = %u\n", 3845 + le32_to_cpu(htt_stats_buf->rts_success)); 3846 + len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n", 3847 + le32_to_cpu(htt_stats_buf->ack_rssi)); 3848 + len += scnprintf(buf + len, buf_len - len, 3849 + "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 12 Mbps: %u\n", 3850 + le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[0]), 3851 + le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[1]), 3852 + le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[2]), 3853 + le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[3])); 3854 + len += scnprintf(buf + len, buf_len - len, 3855 + "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n" 3856 + " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u\n", 3857 + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[0]), 3858 + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[1]), 3859 + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[2]), 3860 + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[3]), 3861 + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[4]), 3862 + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[5]), 3863 + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[6]), 3864 + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[7])); 3865 + len += scnprintf(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u\n", 3866 + le32_to_cpu(htt_stats_buf->tx_he_ltf[1]), 3867 + le32_to_cpu(htt_stats_buf->tx_he_ltf[2]), 3868 + le32_to_cpu(htt_stats_buf->tx_he_ltf[3])); 3869 + 3870 + len += print_array_to_buf(buf, len, "tx_mcs", htt_stats_buf->tx_mcs, 3871 + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL); 3872 + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++) 3873 + len += scnprintf(buf + len, buf_len - len, ", %u:%u", 3874 + j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, 3875 + le32_to_cpu(htt_stats_buf->tx_mcs_ext[j])); 3876 + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS; j++) 3877 + len += scnprintf(buf + len, buf_len - len, ", %u:%u", 3878 + j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS + 3879 + ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS, 3880 + le32_to_cpu(htt_stats_buf->tx_mcs_ext_2[j])); 3881 + len += scnprintf(buf + len, buf_len - len, "\n"); 3882 + 3883 + len += print_array_to_buf(buf, len, "ax_mu_mimo_tx_mcs", 3884 + htt_stats_buf->ax_mu_mimo_tx_mcs, 3885 + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL); 3886 + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++) 3887 + len += scnprintf(buf + len, buf_len - len, ", %u:%u", 3888 + j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, 3889 + le32_to_cpu(htt_stats_buf->ax_mu_mimo_tx_mcs_ext[j])); 3890 + len += scnprintf(buf + len, buf_len - len, "\n"); 3891 + 3892 + len += print_array_to_buf(buf, len, "ofdma_tx_mcs", 3893 + htt_stats_buf->ofdma_tx_mcs, 3894 + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL); 3895 + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++) 3896 + len += scnprintf(buf + len, buf_len - len, ", %u:%u", 3897 + j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, 3898 + le32_to_cpu(htt_stats_buf->ofdma_tx_mcs_ext[j])); 3899 + len += scnprintf(buf + len, buf_len - len, "\n"); 3900 + 3901 + len += scnprintf(buf + len, buf_len - len, "tx_nss ="); 3902 + for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) 3903 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", 3904 + j, le32_to_cpu(htt_stats_buf->tx_nss[j - 1])); 3905 + len--; 3906 + len += scnprintf(buf + len, buf_len - len, "\n"); 3907 + 3908 + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_nss ="); 3909 + for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) 3910 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", 3911 + j, le32_to_cpu(htt_stats_buf->ac_mu_mimo_tx_nss[j - 1])); 3912 + len--; 3913 + len += scnprintf(buf + len, buf_len - len, "\n"); 3914 + 3915 + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_nss ="); 3916 + for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) 3917 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", 3918 + j, le32_to_cpu(htt_stats_buf->ax_mu_mimo_tx_nss[j - 1])); 3919 + len--; 3920 + len += scnprintf(buf + len, buf_len - len, "\n"); 3921 + 3922 + len += scnprintf(buf + len, buf_len - len, "ofdma_tx_nss ="); 3923 + for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) 3924 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", 3925 + j, le32_to_cpu(htt_stats_buf->ofdma_tx_nss[j - 1])); 3926 + len--; 3927 + len += scnprintf(buf + len, buf_len - len, "\n"); 3928 + 3929 + len += print_array_to_buf(buf, len, "tx_bw", htt_stats_buf->tx_bw, 3930 + ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, NULL); 3931 + len += scnprintf(buf + len, buf_len - len, ", %u:%u\n", 3932 + ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, 3933 + le32_to_cpu(htt_stats_buf->tx_bw_320mhz)); 3934 + 3935 + len += print_array_to_buf(buf, len, "tx_stbc", 3936 + htt_stats_buf->tx_stbc, 3937 + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL); 3938 + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++) 3939 + len += scnprintf(buf + len, buf_len - len, ", %u:%u", 3940 + j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, 3941 + le32_to_cpu(htt_stats_buf->tx_stbc_ext[j])); 3942 + len += scnprintf(buf + len, buf_len - len, "\n"); 3943 + 3944 + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { 3945 + len += scnprintf(buf + len, (buf_len - len), 3946 + "tx_gi[%u] =", j); 3947 + len += print_array_to_buf(buf, len, NULL, htt_stats_buf->tx_gi[j], 3948 + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, 3949 + NULL); 3950 + for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; i++) 3951 + len += scnprintf(buf + len, buf_len - len, ", %u:%u", 3952 + i + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, 3953 + le32_to_cpu(htt_stats_buf->tx_gi_ext[j][i])); 3954 + len += scnprintf(buf + len, buf_len - len, "\n"); 3955 + } 3956 + 3957 + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { 3958 + len += scnprintf(buf + len, (buf_len - len), 3959 + "ac_mu_mimo_tx_gi[%u] =", j); 3960 + len += print_array_to_buf(buf, len, NULL, 3961 + htt_stats_buf->ac_mu_mimo_tx_gi[j], 3962 + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, 3963 + "\n"); 3964 + } 3965 + 3966 + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { 3967 + len += scnprintf(buf + len, (buf_len - len), 3968 + "ax_mu_mimo_tx_gi[%u] =", j); 3969 + len += print_array_to_buf(buf, len, NULL, htt_stats_buf->ax_mimo_tx_gi[j], 3970 + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, 3971 + NULL); 3972 + for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; i++) 3973 + len += scnprintf(buf + len, buf_len - len, ", %u:%u", 3974 + i + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, 3975 + le32_to_cpu(htt_stats_buf->ax_tx_gi_ext[j][i])); 3976 + len += scnprintf(buf + len, buf_len - len, "\n"); 3977 + } 3978 + 3979 + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { 3980 + len += scnprintf(buf + len, (buf_len - len), 3981 + "ofdma_tx_gi[%u] = ", j); 3982 + len += print_array_to_buf(buf, len, NULL, htt_stats_buf->ofdma_tx_gi[j], 3983 + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, 3984 + NULL); 3985 + for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; i++) 3986 + len += scnprintf(buf + len, buf_len - len, ", %u:%u", 3987 + i + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, 3988 + le32_to_cpu(htt_stats_buf->ofd_tx_gi_ext[j][i])); 3989 + len += scnprintf(buf + len, buf_len - len, "\n"); 3990 + } 3991 + 3992 + len += print_array_to_buf(buf, len, "tx_su_mcs", htt_stats_buf->tx_su_mcs, 3993 + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); 3994 + len += print_array_to_buf(buf, len, "tx_mu_mcs", htt_stats_buf->tx_mu_mcs, 3995 + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); 3996 + len += print_array_to_buf(buf, len, "ac_mu_mimo_tx_mcs", 3997 + htt_stats_buf->ac_mu_mimo_tx_mcs, 3998 + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); 3999 + len += print_array_to_buf(buf, len, "ac_mu_mimo_tx_bw", 4000 + htt_stats_buf->ac_mu_mimo_tx_bw, 4001 + ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); 4002 + len += print_array_to_buf(buf, len, "ax_mu_mimo_tx_bw", 4003 + htt_stats_buf->ax_mu_mimo_tx_bw, 4004 + ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); 4005 + len += print_array_to_buf(buf, len, "ofdma_tx_bw", 4006 + htt_stats_buf->ofdma_tx_bw, 4007 + ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); 4008 + len += print_array_to_buf(buf, len, "tx_pream", htt_stats_buf->tx_pream, 4009 + ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n"); 4010 + len += print_array_to_buf(buf, len, "tx_dcm", htt_stats_buf->tx_dcm, 4011 + ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n"); 4012 + 4013 + stats_req->buf_len = len; 4014 + } 4015 + 4016 + static inline void 4017 + ath12k_htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf, u16 tag_len, 4018 + struct debug_htt_stats_req *stats_req) 4019 + { 4020 + const struct ath12k_htt_rx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf; 4021 + u8 *buf = stats_req->buf; 4022 + u32 len = stats_req->buf_len; 4023 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 4024 + u8 i, j; 4025 + u32 mac_id_word; 4026 + 4027 + if (tag_len < sizeof(*htt_stats_buf)) 4028 + return; 4029 + 4030 + mac_id_word = le32_to_cpu(htt_stats_buf->mac_id_word); 4031 + 4032 + len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:\n"); 4033 + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", 4034 + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); 4035 + len += scnprintf(buf + len, buf_len - len, "nsts = %u\n", 4036 + le32_to_cpu(htt_stats_buf->nsts)); 4037 + len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n", 4038 + le32_to_cpu(htt_stats_buf->rx_ldpc)); 4039 + len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n", 4040 + le32_to_cpu(htt_stats_buf->rts_cnt)); 4041 + len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n", 4042 + le32_to_cpu(htt_stats_buf->rssi_mgmt)); 4043 + len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n", 4044 + le32_to_cpu(htt_stats_buf->rssi_data)); 4045 + len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n", 4046 + le32_to_cpu(htt_stats_buf->rssi_comb)); 4047 + len += scnprintf(buf + len, buf_len - len, "rssi_in_dbm = %d\n", 4048 + le32_to_cpu(htt_stats_buf->rssi_in_dbm)); 4049 + len += scnprintf(buf + len, buf_len - len, "rx_evm_nss_count = %u\n", 4050 + le32_to_cpu(htt_stats_buf->nss_count)); 4051 + len += scnprintf(buf + len, buf_len - len, "rx_evm_pilot_count = %u\n", 4052 + le32_to_cpu(htt_stats_buf->pilot_count)); 4053 + len += scnprintf(buf + len, buf_len - len, "rx_11ax_su_ext = %u\n", 4054 + le32_to_cpu(htt_stats_buf->rx_11ax_su_ext)); 4055 + len += scnprintf(buf + len, buf_len - len, "rx_11ac_mumimo = %u\n", 4056 + le32_to_cpu(htt_stats_buf->rx_11ac_mumimo)); 4057 + len += scnprintf(buf + len, buf_len - len, "rx_11ax_mumimo = %u\n", 4058 + le32_to_cpu(htt_stats_buf->rx_11ax_mumimo)); 4059 + len += scnprintf(buf + len, buf_len - len, "rx_11ax_ofdma = %u\n", 4060 + le32_to_cpu(htt_stats_buf->rx_11ax_ofdma)); 4061 + len += scnprintf(buf + len, buf_len - len, "txbf = %u\n", 4062 + le32_to_cpu(htt_stats_buf->txbf)); 4063 + len += scnprintf(buf + len, buf_len - len, "rx_su_ndpa = %u\n", 4064 + le32_to_cpu(htt_stats_buf->rx_su_ndpa)); 4065 + len += scnprintf(buf + len, buf_len - len, "rx_mu_ndpa = %u\n", 4066 + le32_to_cpu(htt_stats_buf->rx_mu_ndpa)); 4067 + len += scnprintf(buf + len, buf_len - len, "rx_br_poll = %u\n", 4068 + le32_to_cpu(htt_stats_buf->rx_br_poll)); 4069 + len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_low = %u\n", 4070 + le32_to_cpu(htt_stats_buf->rx_active_dur_us_low)); 4071 + len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_high = %u\n", 4072 + le32_to_cpu(htt_stats_buf->rx_active_dur_us_high)); 4073 + len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n", 4074 + le32_to_cpu(htt_stats_buf->rx_11ax_ul_ofdma)); 4075 + len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n", 4076 + le32_to_cpu(htt_stats_buf->ul_ofdma_rx_stbc)); 4077 + len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n", 4078 + le32_to_cpu(htt_stats_buf->ul_ofdma_rx_ldpc)); 4079 + len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n", 4080 + le32_to_cpu(htt_stats_buf->per_chain_rssi_pkt_type)); 4081 + 4082 + len += print_array_to_buf(buf, len, "rx_nss", htt_stats_buf->rx_nss, 4083 + ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); 4084 + len += print_array_to_buf(buf, len, "rx_dcm", htt_stats_buf->rx_dcm, 4085 + ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n"); 4086 + len += print_array_to_buf(buf, len, "rx_stbc", htt_stats_buf->rx_stbc, 4087 + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); 4088 + len += print_array_to_buf(buf, len, "rx_bw", htt_stats_buf->rx_bw, 4089 + ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); 4090 + len += print_array_to_buf(buf, len, "rx_pream", htt_stats_buf->rx_pream, 4091 + ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n"); 4092 + len += print_array_to_buf(buf, len, "rx_11ax_su_txbf_mcs", 4093 + htt_stats_buf->rx_11ax_su_txbf_mcs, 4094 + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); 4095 + len += print_array_to_buf(buf, len, "rx_11ax_mu_txbf_mcs", 4096 + htt_stats_buf->rx_11ax_mu_txbf_mcs, 4097 + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); 4098 + len += print_array_to_buf(buf, len, "rx_legacy_cck_rate", 4099 + htt_stats_buf->rx_legacy_cck_rate, 4100 + ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n"); 4101 + len += print_array_to_buf(buf, len, "rx_legacy_ofdm_rate", 4102 + htt_stats_buf->rx_legacy_ofdm_rate, 4103 + ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS, "\n"); 4104 + len += print_array_to_buf(buf, len, "ul_ofdma_rx_mcs", 4105 + htt_stats_buf->ul_ofdma_rx_mcs, 4106 + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); 4107 + len += print_array_to_buf(buf, len, "ul_ofdma_rx_nss", 4108 + htt_stats_buf->ul_ofdma_rx_nss, 4109 + ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); 4110 + len += print_array_to_buf(buf, len, "ul_ofdma_rx_bw", 4111 + htt_stats_buf->ul_ofdma_rx_bw, 4112 + ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); 4113 + len += print_array_to_buf(buf, len, "rx_ulofdma_non_data_ppdu", 4114 + htt_stats_buf->rx_ulofdma_non_data_ppdu, 4115 + ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); 4116 + len += print_array_to_buf(buf, len, "rx_ulofdma_data_ppdu", 4117 + htt_stats_buf->rx_ulofdma_data_ppdu, 4118 + ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); 4119 + len += print_array_to_buf(buf, len, "rx_ulofdma_mpdu_ok", 4120 + htt_stats_buf->rx_ulofdma_mpdu_ok, 4121 + ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); 4122 + len += print_array_to_buf(buf, len, "rx_ulofdma_mpdu_fail", 4123 + htt_stats_buf->rx_ulofdma_mpdu_fail, 4124 + ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); 4125 + len += print_array_to_buf(buf, len, "rx_ulofdma_non_data_nusers", 4126 + htt_stats_buf->rx_ulofdma_non_data_nusers, 4127 + ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); 4128 + len += print_array_to_buf(buf, len, "rx_ulofdma_data_nusers", 4129 + htt_stats_buf->rx_ulofdma_data_nusers, 4130 + ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); 4131 + len += print_array_to_buf(buf, len, "rx_11ax_dl_ofdma_mcs", 4132 + htt_stats_buf->rx_11ax_dl_ofdma_mcs, 4133 + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); 4134 + len += print_array_to_buf(buf, len, "rx_11ax_dl_ofdma_ru", 4135 + htt_stats_buf->rx_11ax_dl_ofdma_ru, 4136 + ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS, "\n"); 4137 + len += print_array_to_buf(buf, len, "rx_ulmumimo_non_data_ppdu", 4138 + htt_stats_buf->rx_ulmumimo_non_data_ppdu, 4139 + ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n"); 4140 + len += print_array_to_buf(buf, len, "rx_ulmumimo_data_ppdu", 4141 + htt_stats_buf->rx_ulmumimo_data_ppdu, 4142 + ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n"); 4143 + len += print_array_to_buf(buf, len, "rx_ulmumimo_mpdu_ok", 4144 + htt_stats_buf->rx_ulmumimo_mpdu_ok, 4145 + ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n"); 4146 + len += print_array_to_buf(buf, len, "rx_ulmumimo_mpdu_fail", 4147 + htt_stats_buf->rx_ulmumimo_mpdu_fail, 4148 + ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n"); 4149 + 4150 + len += print_array_to_buf(buf, len, "rx_mcs", 4151 + htt_stats_buf->rx_mcs, 4152 + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, NULL); 4153 + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++) 4154 + len += scnprintf(buf + len, buf_len - len, ", %u:%u", 4155 + j + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, 4156 + le32_to_cpu(htt_stats_buf->rx_mcs_ext[j])); 4157 + len += scnprintf(buf + len, buf_len - len, "\n"); 4158 + 4159 + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { 4160 + len += scnprintf(buf + len, buf_len - len, 4161 + "pilot_evm_db[%u] =", j); 4162 + len += print_array_to_buf(buf, len, NULL, 4163 + htt_stats_buf->rx_pil_evm_db[j], 4164 + ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS, 4165 + "\n"); 4166 + } 4167 + 4168 + len += scnprintf(buf + len, buf_len - len, "pilot_evm_db_mean ="); 4169 + for (i = 0; i < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) 4170 + len += scnprintf(buf + len, 4171 + buf_len - len, 4172 + " %u:%d,", i, 4173 + le32_to_cpu(htt_stats_buf->rx_pilot_evm_db_mean[i])); 4174 + len--; 4175 + len += scnprintf(buf + len, buf_len - len, "\n"); 4176 + 4177 + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { 4178 + len += scnprintf(buf + len, buf_len - len, 4179 + "rssi_chain_in_db[%u] = ", j); 4180 + for (i = 0; i < ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++) 4181 + len += scnprintf(buf + len, 4182 + buf_len - len, 4183 + " %u: %d,", i, 4184 + htt_stats_buf->rssi_chain_in_db[j][i]); 4185 + len--; 4186 + len += scnprintf(buf + len, buf_len - len, "\n"); 4187 + } 4188 + 4189 + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) { 4190 + len += scnprintf(buf + len, buf_len - len, 4191 + "rx_gi[%u] = ", j); 4192 + len += print_array_to_buf(buf, len, NULL, 4193 + htt_stats_buf->rx_gi[j], 4194 + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, 4195 + "\n"); 4196 + } 4197 + 4198 + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) { 4199 + len += scnprintf(buf + len, buf_len - len, 4200 + "ul_ofdma_rx_gi[%u] = ", j); 4201 + len += print_array_to_buf(buf, len, NULL, 4202 + htt_stats_buf->ul_ofdma_rx_gi[j], 4203 + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, 4204 + "\n"); 4205 + } 4206 + 4207 + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { 4208 + len += scnprintf(buf + len, buf_len - len, 4209 + "rx_ul_fd_rssi: nss[%u] = ", j); 4210 + for (i = 0; i < ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++) 4211 + len += scnprintf(buf + len, 4212 + buf_len - len, 4213 + " %u:%d,", 4214 + i, htt_stats_buf->rx_ul_fd_rssi[j][i]); 4215 + len--; 4216 + len += scnprintf(buf + len, buf_len - len, "\n"); 4217 + } 4218 + 4219 + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { 4220 + len += scnprintf(buf + len, buf_len - len, 4221 + "rx_per_chain_rssi_in_dbm[%u] =", j); 4222 + for (i = 0; i < ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++) 4223 + len += scnprintf(buf + len, 4224 + buf_len - len, 4225 + " %u:%d,", 4226 + i, 4227 + htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]); 4228 + len--; 4229 + len += scnprintf(buf + len, buf_len - len, "\n"); 4230 + } 4231 + 4232 + stats_req->buf_len = len; 4233 + } 4234 + 4235 + static inline void 4236 + ath12k_htt_print_rx_pdev_rate_ext_stats_tlv(const void *tag_buf, u16 tag_len, 4237 + struct debug_htt_stats_req *stats_req) 4238 + { 4239 + const struct ath12k_htt_rx_pdev_rate_ext_stats_tlv *htt_stats_buf = tag_buf; 4240 + u8 *buf = stats_req->buf; 4241 + u32 len = stats_req->buf_len; 4242 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 4243 + u8 j; 4244 + 4245 + if (tag_len < sizeof(*htt_stats_buf)) 4246 + return; 4247 + 4248 + len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_EXT_STATS_TLV:\n"); 4249 + len += scnprintf(buf + len, buf_len - len, "rssi_mgmt_in_dbm = %d\n", 4250 + le32_to_cpu(htt_stats_buf->rssi_mgmt_in_dbm)); 4251 + 4252 + len += print_array_to_buf(buf, len, "rx_stbc_ext", 4253 + htt_stats_buf->rx_stbc_ext, 4254 + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n"); 4255 + len += print_array_to_buf(buf, len, "ul_ofdma_rx_mcs_ext", 4256 + htt_stats_buf->ul_ofdma_rx_mcs_ext, 4257 + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n"); 4258 + len += print_array_to_buf(buf, len, "rx_11ax_su_txbf_mcs_ext", 4259 + htt_stats_buf->rx_11ax_su_txbf_mcs_ext, 4260 + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n"); 4261 + len += print_array_to_buf(buf, len, "rx_11ax_mu_txbf_mcs_ext", 4262 + htt_stats_buf->rx_11ax_mu_txbf_mcs_ext, 4263 + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n"); 4264 + len += print_array_to_buf(buf, len, "rx_11ax_dl_ofdma_mcs_ext", 4265 + htt_stats_buf->rx_11ax_dl_ofdma_mcs_ext, 4266 + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n"); 4267 + len += print_array_to_buf(buf, len, "rx_bw_ext", 4268 + htt_stats_buf->rx_bw_ext, 4269 + ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS, "\n"); 4270 + len += print_array_to_buf(buf, len, "rx_su_punctured_mode", 4271 + htt_stats_buf->rx_su_punctured_mode, 4272 + ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS, 4273 + "\n"); 4274 + 4275 + len += print_array_to_buf(buf, len, "rx_mcs_ext", 4276 + htt_stats_buf->rx_mcs_ext, 4277 + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, 4278 + NULL); 4279 + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS; j++) 4280 + len += scnprintf(buf + len, buf_len - len, ", %u:%u", 4281 + j + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, 4282 + le32_to_cpu(htt_stats_buf->rx_mcs_ext_2[j])); 4283 + len += scnprintf(buf + len, buf_len - len, "\n"); 4284 + 4285 + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) { 4286 + len += scnprintf(buf + len, buf_len - len, 4287 + "rx_gi_ext[%u] = ", j); 4288 + len += print_array_to_buf(buf, len, NULL, 4289 + htt_stats_buf->rx_gi_ext[j], 4290 + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, 4291 + "\n"); 4292 + } 4293 + 4294 + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) { 4295 + len += scnprintf(buf + len, buf_len - len, 4296 + "ul_ofdma_rx_gi_ext[%u] = ", j); 4297 + len += print_array_to_buf(buf, len, NULL, 4298 + htt_stats_buf->ul_ofdma_rx_gi_ext[j], 4299 + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, 4300 + "\n"); 4301 + } 4302 + 4303 + stats_req->buf_len = len; 4304 + } 4305 + 4527 4306 static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab, 4528 4307 u16 tag, u16 len, const void *tag_buf, 4529 4308 void *user_data) ··· 5185 3982 case HTT_STATS_PDEV_CCA_COUNTERS_TAG: 5186 3983 ath12k_htt_print_pdev_stats_cca_counters_tlv(tag_buf, len, stats_req); 5187 3984 break; 3985 + case HTT_STATS_TX_SOUNDING_STATS_TAG: 3986 + ath12k_htt_print_tx_sounding_stats_tlv(tag_buf, len, stats_req); 3987 + break; 5188 3988 case HTT_STATS_PDEV_OBSS_PD_TAG: 5189 3989 ath12k_htt_print_pdev_obss_pd_stats_tlv(tag_buf, len, stats_req); 3990 + break; 3991 + case HTT_STATS_LATENCY_CTX_TAG: 3992 + ath12k_htt_print_latency_prof_ctx_tlv(tag_buf, len, stats_req); 3993 + break; 3994 + case HTT_STATS_LATENCY_CNT_TAG: 3995 + ath12k_htt_print_latency_prof_cnt(tag_buf, len, stats_req); 3996 + break; 3997 + case HTT_STATS_LATENCY_PROF_STATS_TAG: 3998 + ath12k_htt_print_latency_prof_stats_tlv(tag_buf, len, stats_req); 3999 + break; 4000 + case HTT_STATS_RX_PDEV_UL_TRIG_STATS_TAG: 4001 + ath12k_htt_print_ul_ofdma_trigger_stats(tag_buf, len, stats_req); 4002 + break; 4003 + case HTT_STATS_RX_PDEV_UL_OFDMA_USER_STATS_TAG: 4004 + ath12k_htt_print_ul_ofdma_user_stats(tag_buf, len, stats_req); 4005 + break; 4006 + case HTT_STATS_RX_PDEV_UL_MUMIMO_TRIG_STATS_TAG: 4007 + ath12k_htt_print_ul_mumimo_trig_stats(tag_buf, len, stats_req); 4008 + break; 4009 + case HTT_STATS_RX_FSE_STATS_TAG: 4010 + ath12k_htt_print_rx_fse_stats_tlv(tag_buf, len, stats_req); 5190 4011 break; 5191 4012 case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG: 5192 4013 ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, len, stats_req); ··· 5273 4046 case HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG: 5274 4047 ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(tag_buf, len, 5275 4048 stats_req); 4049 + break; 4050 + case HTT_STATS_TX_PDEV_RATE_STATS_TAG: 4051 + ath12k_htt_print_tx_pdev_rate_stats_tlv(tag_buf, len, stats_req); 4052 + break; 4053 + case HTT_STATS_RX_PDEV_RATE_STATS_TAG: 4054 + ath12k_htt_print_rx_pdev_rate_stats_tlv(tag_buf, len, stats_req); 4055 + break; 4056 + case HTT_STATS_RX_PDEV_RATE_EXT_STATS_TAG: 4057 + ath12k_htt_print_rx_pdev_rate_ext_stats_tlv(tag_buf, len, stats_req); 5276 4058 break; 5277 4059 default: 5278 4060 break;
+417 -36
drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef DEBUG_HTT_STATS_H ··· 123 123 124 124 /* htt_dbg_ext_stats_type */ 125 125 enum ath12k_dbg_htt_ext_stats_type { 126 - ATH12K_DBG_HTT_EXT_STATS_RESET = 0, 127 - ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1, 128 - ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4, 129 - ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5, 130 - ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6, 131 - ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8, 132 - ATH12K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12, 133 - ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO = 15, 134 - ATH12K_DBG_HTT_EXT_STATS_SFM_INFO = 16, 135 - ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17, 136 - ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19, 137 - ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23, 138 - ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF = 31, 139 - ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32, 140 - ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36, 141 - ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37, 142 - ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38, 143 - ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40, 144 - ATH12K_DBG_HTT_EXT_AST_ENTRIES = 41, 145 - ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45, 146 - ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS = 46, 147 - ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49, 148 - ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51, 149 - ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54, 126 + ATH12K_DBG_HTT_EXT_STATS_RESET = 0, 127 + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1, 128 + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4, 129 + ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5, 130 + ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6, 131 + ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8, 132 + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9, 133 + ATH12K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10, 134 + ATH12K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12, 135 + ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO = 15, 136 + ATH12K_DBG_HTT_EXT_STATS_SFM_INFO = 16, 137 + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17, 138 + ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19, 139 + ATH12K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22, 140 + ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23, 141 + ATH12K_DBG_HTT_EXT_STATS_LATENCY_PROF_STATS = 25, 142 + ATH12K_DBG_HTT_EXT_STATS_PDEV_UL_TRIG_STATS = 26, 143 + ATH12K_DBG_HTT_EXT_STATS_PDEV_UL_MUMIMO_TRIG_STATS = 27, 144 + ATH12K_DBG_HTT_EXT_STATS_FSE_RX = 28, 145 + ATH12K_DBG_HTT_EXT_STATS_PDEV_RX_RATE_EXT = 30, 146 + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF = 31, 147 + ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32, 148 + ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36, 149 + ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37, 150 + ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38, 151 + ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40, 152 + ATH12K_DBG_HTT_EXT_AST_ENTRIES = 41, 153 + ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45, 154 + ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS = 46, 155 + ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49, 156 + ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51, 157 + ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54, 150 158 151 159 /* keep this last */ 152 160 ATH12K_DBG_HTT_NUM_EXT_STATS, ··· 181 173 HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG = 25, 182 174 HTT_STATS_SFM_CMN_TAG = 26, 183 175 HTT_STATS_SRING_STATS_TAG = 27, 176 + HTT_STATS_TX_PDEV_RATE_STATS_TAG = 34, 177 + HTT_STATS_RX_PDEV_RATE_STATS_TAG = 35, 184 178 HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36, 185 179 HTT_STATS_TX_SCHED_CMN_TAG = 37, 186 180 HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39, ··· 205 195 HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG = 72, 206 196 HTT_STATS_PDEV_CCA_COUNTERS_TAG = 73, 207 197 HTT_STATS_TX_PDEV_MPDU_STATS_TAG = 74, 198 + HTT_STATS_TX_SOUNDING_STATS_TAG = 80, 208 199 HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86, 209 200 HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87, 210 201 HTT_STATS_PDEV_OBSS_PD_TAG = 88, 211 202 HTT_STATS_HW_WAR_TAG = 89, 203 + HTT_STATS_LATENCY_PROF_STATS_TAG = 91, 204 + HTT_STATS_LATENCY_CTX_TAG = 92, 205 + HTT_STATS_LATENCY_CNT_TAG = 93, 206 + HTT_STATS_RX_PDEV_UL_TRIG_STATS_TAG = 94, 207 + HTT_STATS_RX_PDEV_UL_OFDMA_USER_STATS_TAG = 95, 208 + HTT_STATS_RX_PDEV_UL_MUMIMO_TRIG_STATS_TAG = 97, 209 + HTT_STATS_RX_FSE_STATS_TAG = 98, 212 210 HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100, 213 211 HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102, 212 + HTT_STATS_RX_PDEV_RATE_EXT_STATS_TAG = 103, 214 213 HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG = 108, 215 214 HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG = 111, 216 215 HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG = 112, ··· 405 386 __le32 num_seq_posted[ATH12K_HTT_STATS_NUM_NR_BINS]; 406 387 __le32 num_ppdu_posted_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS]; 407 388 } __packed; 389 + 390 + #define ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS 12 391 + #define ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS 4 392 + #define ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS 5 393 + #define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4 394 + #define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8 395 + #define ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES 7 396 + #define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4 397 + #define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8 398 + #define ATH12K_HTT_TX_PDEV_STATS_NUM_LTF 4 399 + #define ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS 2 400 + #define ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS 2 401 + #define ATH12K_HTT_TX_PDEV_STATS_NUM_11AX_TRIGGER_TYPES 6 402 + 403 + struct ath12k_htt_tx_pdev_rate_stats_tlv { 404 + __le32 mac_id_word; 405 + __le32 tx_ldpc; 406 + __le32 rts_cnt; 407 + __le32 ack_rssi; 408 + __le32 tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; 409 + __le32 tx_su_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; 410 + __le32 tx_mu_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; 411 + __le32 tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; 412 + __le32 tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS]; 413 + __le32 tx_stbc[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; 414 + __le32 tx_pream[ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES]; 415 + __le32 tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] 416 + [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; 417 + __le32 tx_dcm[ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS]; 418 + __le32 rts_success; 419 + __le32 tx_legacy_cck_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS]; 420 + __le32 tx_legacy_ofdm_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS]; 421 + __le32 ac_mu_mimo_tx_ldpc; 422 + __le32 ax_mu_mimo_tx_ldpc; 423 + __le32 ofdma_tx_ldpc; 424 + __le32 tx_he_ltf[ATH12K_HTT_TX_PDEV_STATS_NUM_LTF]; 425 + __le32 ac_mu_mimo_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; 426 + __le32 ax_mu_mimo_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; 427 + __le32 ofdma_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; 428 + __le32 ac_mu_mimo_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; 429 + __le32 ax_mu_mimo_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; 430 + __le32 ofdma_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; 431 + __le32 ac_mu_mimo_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS]; 432 + __le32 ax_mu_mimo_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS]; 433 + __le32 ofdma_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS]; 434 + __le32 ac_mu_mimo_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] 435 + [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; 436 + __le32 ax_mimo_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] 437 + [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; 438 + __le32 ofdma_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] 439 + [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; 440 + __le32 trigger_type_11ax[ATH12K_HTT_TX_PDEV_STATS_NUM_11AX_TRIGGER_TYPES]; 441 + __le32 tx_11ax_su_ext; 442 + __le32 tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; 443 + __le32 tx_stbc_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; 444 + __le32 tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] 445 + [ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; 446 + __le32 ax_mu_mimo_tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; 447 + __le32 ofdma_tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; 448 + __le32 ax_tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] 449 + [ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; 450 + __le32 ofd_tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] 451 + [ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; 452 + __le32 tx_mcs_ext_2[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS]; 453 + __le32 tx_bw_320mhz; 454 + }; 455 + 456 + #define ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4 457 + #define ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8 458 + #define ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS 12 459 + #define ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS 4 460 + #define ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS 5 461 + #define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS 4 462 + #define ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS 8 463 + #define ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES 7 464 + #define ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER 8 465 + #define ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS 16 466 + #define ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS 6 467 + #define ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER 8 468 + #define ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS 2 469 + 470 + struct ath12k_htt_rx_pdev_rate_stats_tlv { 471 + __le32 mac_id_word; 472 + __le32 nsts; 473 + __le32 rx_ldpc; 474 + __le32 rts_cnt; 475 + __le32 rssi_mgmt; 476 + __le32 rssi_data; 477 + __le32 rssi_comb; 478 + __le32 rx_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; 479 + __le32 rx_nss[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]; 480 + __le32 rx_dcm[ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS]; 481 + __le32 rx_stbc[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; 482 + __le32 rx_bw[ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS]; 483 + __le32 rx_pream[ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES]; 484 + u8 rssi_chain_in_db[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] 485 + [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS]; 486 + __le32 rx_gi[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS] 487 + [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; 488 + __le32 rssi_in_dbm; 489 + __le32 rx_11ax_su_ext; 490 + __le32 rx_11ac_mumimo; 491 + __le32 rx_11ax_mumimo; 492 + __le32 rx_11ax_ofdma; 493 + __le32 txbf; 494 + __le32 rx_legacy_cck_rate[ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS]; 495 + __le32 rx_legacy_ofdm_rate[ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS]; 496 + __le32 rx_active_dur_us_low; 497 + __le32 rx_active_dur_us_high; 498 + __le32 rx_11ax_ul_ofdma; 499 + __le32 ul_ofdma_rx_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; 500 + __le32 ul_ofdma_rx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] 501 + [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; 502 + __le32 ul_ofdma_rx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; 503 + __le32 ul_ofdma_rx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS]; 504 + __le32 ul_ofdma_rx_stbc; 505 + __le32 ul_ofdma_rx_ldpc; 506 + __le32 rx_ulofdma_non_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; 507 + __le32 rx_ulofdma_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; 508 + __le32 rx_ulofdma_mpdu_ok[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; 509 + __le32 rx_ulofdma_mpdu_fail[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; 510 + __le32 nss_count; 511 + __le32 pilot_count; 512 + __le32 rx_pil_evm_db[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] 513 + [ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS]; 514 + __le32 rx_pilot_evm_db_mean[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]; 515 + s8 rx_ul_fd_rssi[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] 516 + [ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; 517 + __le32 per_chain_rssi_pkt_type; 518 + s8 rx_per_chain_rssi_in_dbm[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] 519 + [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS]; 520 + __le32 rx_su_ndpa; 521 + __le32 rx_11ax_su_txbf_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; 522 + __le32 rx_mu_ndpa; 523 + __le32 rx_11ax_mu_txbf_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; 524 + __le32 rx_br_poll; 525 + __le32 rx_11ax_dl_ofdma_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; 526 + __le32 rx_11ax_dl_ofdma_ru[ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS]; 527 + __le32 rx_ulmumimo_non_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER]; 528 + __le32 rx_ulmumimo_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER]; 529 + __le32 rx_ulmumimo_mpdu_ok[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER]; 530 + __le32 rx_ulmumimo_mpdu_fail[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER]; 531 + __le32 rx_ulofdma_non_data_nusers[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; 532 + __le32 rx_ulofdma_data_nusers[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; 533 + __le32 rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; 534 + }; 535 + 536 + #define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS 4 537 + #define ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT 14 538 + #define ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS 2 539 + #define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS 5 540 + #define ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS 5 541 + 542 + struct ath12k_htt_rx_pdev_rate_ext_stats_tlv { 543 + u8 rssi_chain_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] 544 + [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS]; 545 + s8 rx_per_chain_rssi_ext_in_dbm[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] 546 + [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS]; 547 + __le32 rssi_mcast_in_dbm; 548 + __le32 rssi_mgmt_in_dbm; 549 + __le32 rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT]; 550 + __le32 rx_stbc_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT]; 551 + __le32 rx_gi_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS] 552 + [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT]; 553 + __le32 ul_ofdma_rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT]; 554 + __le32 ul_ofdma_rx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] 555 + [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT]; 556 + __le32 rx_11ax_su_txbf_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT]; 557 + __le32 rx_11ax_mu_txbf_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT]; 558 + __le32 rx_11ax_dl_ofdma_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT]; 559 + __le32 rx_mcs_ext_2[ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS]; 560 + __le32 rx_bw_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS]; 561 + __le32 rx_gi_ext_2[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS] 562 + [ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS]; 563 + __le32 rx_su_punctured_mode[ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS]; 564 + }; 408 565 409 566 #define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0) 410 567 #define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8) ··· 1253 1058 __le32 collection_interval; 1254 1059 } __packed; 1255 1060 1061 + #define ATH12K_HTT_TX_CV_CORR_MAX_NUM_COLUMNS 8 1062 + #define ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS 4 1063 + #define ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS 8 1064 + #define ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS 8 1065 + #define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4 1066 + #define ATH12K_HTT_TX_NUM_MCS_CNTRS 12 1067 + #define ATH12K_HTT_TX_NUM_EXTRA_MCS_CNTRS 2 1068 + 1069 + #define ATH12K_HTT_TX_NUM_OF_SOUNDING_STATS_WORDS \ 1070 + (ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS * \ 1071 + ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS) 1072 + 1073 + enum ath12k_htt_txbf_sound_steer_modes { 1074 + ATH12K_HTT_IMPL_STEER_STATS = 0, 1075 + ATH12K_HTT_EXPL_SUSIFS_STEER_STATS = 1, 1076 + ATH12K_HTT_EXPL_SURBO_STEER_STATS = 2, 1077 + ATH12K_HTT_EXPL_MUSIFS_STEER_STATS = 3, 1078 + ATH12K_HTT_EXPL_MURBO_STEER_STATS = 4, 1079 + ATH12K_HTT_TXBF_MAX_NUM_OF_MODES = 5 1080 + }; 1081 + 1082 + enum ath12k_htt_stats_sounding_tx_mode { 1083 + ATH12K_HTT_TX_AC_SOUNDING_MODE = 0, 1084 + ATH12K_HTT_TX_AX_SOUNDING_MODE = 1, 1085 + ATH12K_HTT_TX_BE_SOUNDING_MODE = 2, 1086 + ATH12K_HTT_TX_CMN_SOUNDING_MODE = 3, 1087 + }; 1088 + 1089 + struct ath12k_htt_tx_sounding_stats_tlv { 1090 + __le32 tx_sounding_mode; 1091 + __le32 cbf_20[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES]; 1092 + __le32 cbf_40[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES]; 1093 + __le32 cbf_80[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES]; 1094 + __le32 cbf_160[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES]; 1095 + __le32 sounding[ATH12K_HTT_TX_NUM_OF_SOUNDING_STATS_WORDS]; 1096 + __le32 cv_nc_mismatch_err; 1097 + __le32 cv_fcs_err; 1098 + __le32 cv_frag_idx_mismatch; 1099 + __le32 cv_invalid_peer_id; 1100 + __le32 cv_no_txbf_setup; 1101 + __le32 cv_expiry_in_update; 1102 + __le32 cv_pkt_bw_exceed; 1103 + __le32 cv_dma_not_done_err; 1104 + __le32 cv_update_failed; 1105 + __le32 cv_total_query; 1106 + __le32 cv_total_pattern_query; 1107 + __le32 cv_total_bw_query; 1108 + __le32 cv_invalid_bw_coding; 1109 + __le32 cv_forced_sounding; 1110 + __le32 cv_standalone_sounding; 1111 + __le32 cv_nc_mismatch; 1112 + __le32 cv_fb_type_mismatch; 1113 + __le32 cv_ofdma_bw_mismatch; 1114 + __le32 cv_bw_mismatch; 1115 + __le32 cv_pattern_mismatch; 1116 + __le32 cv_preamble_mismatch; 1117 + __le32 cv_nr_mismatch; 1118 + __le32 cv_in_use_cnt_exceeded; 1119 + __le32 cv_found; 1120 + __le32 cv_not_found; 1121 + __le32 sounding_320[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS]; 1122 + __le32 cbf_320[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES]; 1123 + __le32 cv_ntbr_sounding; 1124 + __le32 cv_found_upload_in_progress; 1125 + __le32 cv_expired_during_query; 1126 + __le32 cv_dma_timeout_error; 1127 + __le32 cv_buf_ibf_uploads; 1128 + __le32 cv_buf_ebf_uploads; 1129 + __le32 cv_buf_received; 1130 + __le32 cv_buf_fed_back; 1131 + __le32 cv_total_query_ibf; 1132 + __le32 cv_found_ibf; 1133 + __le32 cv_not_found_ibf; 1134 + __le32 cv_expired_during_query_ibf; 1135 + } __packed; 1136 + 1256 1137 struct ath12k_htt_pdev_obss_pd_stats_tlv { 1257 1138 __le32 num_obss_tx_ppdu_success; 1258 1139 __le32 num_obss_tx_ppdu_failure; ··· 1349 1078 __le32 num_srg_success_per_ac[ATH12K_HTT_NUM_AC_WMM]; 1350 1079 __le32 num_obss_min_dur_check_flush_cnt; 1351 1080 __le32 num_sr_ppdu_abort_flush_cnt; 1081 + } __packed; 1082 + 1083 + #define ATH12K_HTT_STATS_MAX_PROF_STATS_NAME_LEN 32 1084 + #define ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST 3 1085 + #define ATH12K_HTT_INTERRUPTS_LATENCY_PROFILE_MAX_HIST 3 1086 + 1087 + struct ath12k_htt_latency_prof_stats_tlv { 1088 + __le32 print_header; 1089 + s8 latency_prof_name[ATH12K_HTT_STATS_MAX_PROF_STATS_NAME_LEN]; 1090 + __le32 cnt; 1091 + __le32 min; 1092 + __le32 max; 1093 + __le32 last; 1094 + __le32 tot; 1095 + __le32 avg; 1096 + __le32 hist_intvl; 1097 + __le32 hist[ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST]; 1098 + } __packed; 1099 + 1100 + struct ath12k_htt_latency_prof_ctx_tlv { 1101 + __le32 duration; 1102 + __le32 tx_msdu_cnt; 1103 + __le32 tx_mpdu_cnt; 1104 + __le32 tx_ppdu_cnt; 1105 + __le32 rx_msdu_cnt; 1106 + __le32 rx_mpdu_cnt; 1107 + } __packed; 1108 + 1109 + struct ath12k_htt_latency_prof_cnt_tlv { 1110 + __le32 prof_enable_cnt; 1111 + } __packed; 1112 + 1113 + #define ATH12K_HTT_RX_NUM_MCS_CNTRS 12 1114 + #define ATH12K_HTT_RX_NUM_GI_CNTRS 4 1115 + #define ATH12K_HTT_RX_NUM_SPATIAL_STREAMS 8 1116 + #define ATH12K_HTT_RX_NUM_BW_CNTRS 4 1117 + #define ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS 6 1118 + #define ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS 7 1119 + #define ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK 5 1120 + #define ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES 2 1121 + #define ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS 2 1122 + 1123 + enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE { 1124 + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26, 1125 + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52, 1126 + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106, 1127 + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242, 1128 + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484, 1129 + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996, 1130 + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2, 1131 + ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS, 1132 + }; 1133 + 1134 + struct ath12k_htt_rx_pdev_ul_ofdma_user_stats_tlv { 1135 + __le32 user_index; 1136 + __le32 rx_ulofdma_non_data_ppdu; 1137 + __le32 rx_ulofdma_data_ppdu; 1138 + __le32 rx_ulofdma_mpdu_ok; 1139 + __le32 rx_ulofdma_mpdu_fail; 1140 + __le32 rx_ulofdma_non_data_nusers; 1141 + __le32 rx_ulofdma_data_nusers; 1142 + } __packed; 1143 + 1144 + struct ath12k_htt_rx_pdev_ul_trigger_stats_tlv { 1145 + __le32 mac_id__word; 1146 + __le32 rx_11ax_ul_ofdma; 1147 + __le32 ul_ofdma_rx_mcs[ATH12K_HTT_RX_NUM_MCS_CNTRS]; 1148 + __le32 ul_ofdma_rx_gi[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_MCS_CNTRS]; 1149 + __le32 ul_ofdma_rx_nss[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS]; 1150 + __le32 ul_ofdma_rx_bw[ATH12K_HTT_RX_NUM_BW_CNTRS]; 1151 + __le32 ul_ofdma_rx_stbc; 1152 + __le32 ul_ofdma_rx_ldpc; 1153 + __le32 data_ru_size_ppdu[ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS]; 1154 + __le32 non_data_ru_size_ppdu[ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS]; 1155 + __le32 uplink_sta_aid[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK]; 1156 + __le32 uplink_sta_target_rssi[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK]; 1157 + __le32 uplink_sta_fd_rssi[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK]; 1158 + __le32 uplink_sta_power_headroom[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK]; 1159 + __le32 red_bw[ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_RX_NUM_BW_CNTRS]; 1160 + __le32 ul_ofdma_bsc_trig_rx_qos_null_only; 1161 + } __packed; 1162 + 1163 + #define ATH12K_HTT_TX_UL_MUMIMO_USER_STATS 8 1164 + 1165 + struct ath12k_htt_rx_ul_mumimo_trig_stats_tlv { 1166 + __le32 mac_id__word; 1167 + __le32 rx_11ax_ul_mumimo; 1168 + __le32 ul_mumimo_rx_mcs[ATH12K_HTT_RX_NUM_MCS_CNTRS]; 1169 + __le32 ul_rx_gi[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_MCS_CNTRS]; 1170 + __le32 ul_mumimo_rx_nss[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS]; 1171 + __le32 ul_mumimo_rx_bw[ATH12K_HTT_RX_NUM_BW_CNTRS]; 1172 + __le32 ul_mumimo_rx_stbc; 1173 + __le32 ul_mumimo_rx_ldpc; 1174 + __le32 ul_mumimo_rx_mcs_ext[ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS]; 1175 + __le32 ul_gi_ext[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS]; 1176 + s8 ul_rssi[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS][ATH12K_HTT_RX_NUM_BW_CNTRS]; 1177 + s8 tgt_rssi[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_BW_CNTRS]; 1178 + s8 fd[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_SPATIAL_STREAMS]; 1179 + s8 db[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_SPATIAL_STREAMS]; 1180 + __le32 red_bw[ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_RX_NUM_BW_CNTRS]; 1181 + __le32 mumimo_bsc_trig_rx_qos_null_only; 1182 + } __packed; 1183 + 1184 + #define ATH12K_HTT_RX_NUM_MAX_PEAK_OCCUPANCY_INDEX 10 1185 + #define ATH12K_HTT_RX_NUM_MAX_CURR_OCCUPANCY_INDEX 10 1186 + #define ATH12K_HTT_RX_NUM_SQUARE_INDEX 6 1187 + #define ATH12K_HTT_RX_NUM_MAX_PEAK_SEARCH_INDEX 4 1188 + #define ATH12K_HTT_RX_NUM_MAX_PENDING_SEARCH_INDEX 4 1189 + 1190 + struct ath12k_htt_rx_fse_stats_tlv { 1191 + __le32 fse_enable_cnt; 1192 + __le32 fse_disable_cnt; 1193 + __le32 fse_cache_invalidate_entry_cnt; 1194 + __le32 fse_full_cache_invalidate_cnt; 1195 + __le32 fse_num_cache_hits_cnt; 1196 + __le32 fse_num_searches_cnt; 1197 + __le32 fse_cache_occupancy_peak_cnt[ATH12K_HTT_RX_NUM_MAX_PEAK_OCCUPANCY_INDEX]; 1198 + __le32 fse_cache_occupancy_curr_cnt[ATH12K_HTT_RX_NUM_MAX_CURR_OCCUPANCY_INDEX]; 1199 + __le32 fse_search_stat_square_cnt[ATH12K_HTT_RX_NUM_SQUARE_INDEX]; 1200 + __le32 fse_search_stat_peak_cnt[ATH12K_HTT_RX_NUM_MAX_PEAK_SEARCH_INDEX]; 1201 + __le32 fse_search_stat_pending_cnt[ATH12K_HTT_RX_NUM_MAX_PENDING_SEARCH_INDEX]; 1352 1202 } __packed; 1353 1203 1354 1204 #define ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS 14 ··· 1807 1415 ATH12K_HTT_RC_MODE_MU6_INTF, 1808 1416 ATH12K_HTT_RC_MODE_MU7_INTF, 1809 1417 ATH12K_HTT_RC_MODE_2D_COUNT 1810 - }; 1811 - 1812 - enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE { 1813 - ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26, 1814 - ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52, 1815 - ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106, 1816 - ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242, 1817 - ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484, 1818 - ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996, 1819 - ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2, 1820 - ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS 1821 1418 }; 1822 1419 1823 1420 enum ath12k_htt_stats_rc_mode {
+337
drivers/net/wireless/ath/ath12k/debugfs_sta.c
··· 1 + // SPDX-License-Identifier: BSD-3-Clause-Clear 2 + /* 3 + * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. 4 + */ 5 + 6 + #include <linux/vmalloc.h> 7 + 8 + #include "debugfs_sta.h" 9 + #include "core.h" 10 + #include "peer.h" 11 + #include "debug.h" 12 + #include "debugfs_htt_stats.h" 13 + #include "debugfs.h" 14 + 15 + static 16 + u32 ath12k_dbg_sta_dump_rate_stats(u8 *buf, u32 offset, const int size, 17 + bool he_rates_avail, 18 + const struct ath12k_rx_peer_rate_stats *stats) 19 + { 20 + static const char *legacy_rate_str[HAL_RX_MAX_NUM_LEGACY_RATES] = { 21 + "1 Mbps", "2 Mbps", "5.5 Mbps", "6 Mbps", 22 + "9 Mbps", "11 Mbps", "12 Mbps", "18 Mbps", 23 + "24 Mbps", "36 Mbps", "48 Mbps", "54 Mbps"}; 24 + u8 max_bw = HAL_RX_BW_MAX, max_gi = HAL_RX_GI_MAX, max_mcs = HAL_RX_MAX_NSS; 25 + int mcs = 0, bw = 0, nss = 0, gi = 0, bw_num = 0; 26 + u32 i, len = offset, max = max_bw * max_gi * max_mcs; 27 + bool found; 28 + 29 + len += scnprintf(buf + len, size - len, "\nEHT stats:\n"); 30 + for (i = 0; i <= HAL_RX_MAX_MCS_BE; i++) 31 + len += scnprintf(buf + len, size - len, 32 + "MCS %d: %llu%s", i, stats->be_mcs_count[i], 33 + (i + 1) % 8 ? "\t" : "\n"); 34 + 35 + len += scnprintf(buf + len, size - len, "\nHE stats:\n"); 36 + for (i = 0; i <= HAL_RX_MAX_MCS_HE; i++) 37 + len += scnprintf(buf + len, size - len, 38 + "MCS %d: %llu%s", i, stats->he_mcs_count[i], 39 + (i + 1) % 6 ? "\t" : "\n"); 40 + 41 + len += scnprintf(buf + len, size - len, "\nVHT stats:\n"); 42 + for (i = 0; i <= HAL_RX_MAX_MCS_VHT; i++) 43 + len += scnprintf(buf + len, size - len, 44 + "MCS %d: %llu%s", i, stats->vht_mcs_count[i], 45 + (i + 1) % 5 ? "\t" : "\n"); 46 + 47 + len += scnprintf(buf + len, size - len, "\nHT stats:\n"); 48 + for (i = 0; i <= HAL_RX_MAX_MCS_HT; i++) 49 + len += scnprintf(buf + len, size - len, 50 + "MCS %d: %llu%s", i, stats->ht_mcs_count[i], 51 + (i + 1) % 8 ? "\t" : "\n"); 52 + 53 + len += scnprintf(buf + len, size - len, "\nLegacy stats:\n"); 54 + for (i = 0; i < HAL_RX_MAX_NUM_LEGACY_RATES; i++) 55 + len += scnprintf(buf + len, size - len, 56 + "%s: %llu%s", legacy_rate_str[i], 57 + stats->legacy_count[i], 58 + (i + 1) % 4 ? "\t" : "\n"); 59 + 60 + len += scnprintf(buf + len, size - len, "\nNSS stats:\n"); 61 + for (i = 0; i < HAL_RX_MAX_NSS; i++) 62 + len += scnprintf(buf + len, size - len, 63 + "%dx%d: %llu ", i + 1, i + 1, 64 + stats->nss_count[i]); 65 + 66 + len += scnprintf(buf + len, size - len, 67 + "\n\nGI: 0.8 us %llu 0.4 us %llu 1.6 us %llu 3.2 us %llu\n", 68 + stats->gi_count[0], 69 + stats->gi_count[1], 70 + stats->gi_count[2], 71 + stats->gi_count[3]); 72 + 73 + len += scnprintf(buf + len, size - len, 74 + "BW: 20 MHz %llu 40 MHz %llu 80 MHz %llu 160 MHz %llu 320 MHz %llu\n", 75 + stats->bw_count[0], 76 + stats->bw_count[1], 77 + stats->bw_count[2], 78 + stats->bw_count[3], 79 + stats->bw_count[4]); 80 + 81 + for (i = 0; i < max; i++) { 82 + found = false; 83 + 84 + for (mcs = 0; mcs <= HAL_RX_MAX_MCS_HT; mcs++) { 85 + if (stats->rx_rate[bw][gi][nss][mcs]) { 86 + found = true; 87 + break; 88 + } 89 + } 90 + 91 + if (!found) 92 + goto skip_report; 93 + 94 + switch (bw) { 95 + case HAL_RX_BW_20MHZ: 96 + bw_num = 20; 97 + break; 98 + case HAL_RX_BW_40MHZ: 99 + bw_num = 40; 100 + break; 101 + case HAL_RX_BW_80MHZ: 102 + bw_num = 80; 103 + break; 104 + case HAL_RX_BW_160MHZ: 105 + bw_num = 160; 106 + break; 107 + case HAL_RX_BW_320MHZ: 108 + bw_num = 320; 109 + break; 110 + } 111 + 112 + len += scnprintf(buf + len, size - len, "\n%d Mhz gi %d us %dx%d : ", 113 + bw_num, gi, nss + 1, nss + 1); 114 + 115 + for (mcs = 0; mcs <= HAL_RX_MAX_MCS_HT; mcs++) { 116 + if (stats->rx_rate[bw][gi][nss][mcs]) 117 + len += scnprintf(buf + len, size - len, 118 + " %d:%llu", mcs, 119 + stats->rx_rate[bw][gi][nss][mcs]); 120 + } 121 + 122 + skip_report: 123 + if (nss++ >= max_mcs - 1) { 124 + nss = 0; 125 + if (gi++ >= max_gi - 1) { 126 + gi = 0; 127 + if (bw < max_bw - 1) 128 + bw++; 129 + } 130 + } 131 + } 132 + 133 + len += scnprintf(buf + len, size - len, "\n"); 134 + 135 + return len - offset; 136 + } 137 + 138 + static ssize_t ath12k_dbg_sta_dump_rx_stats(struct file *file, 139 + char __user *user_buf, 140 + size_t count, loff_t *ppos) 141 + { 142 + struct ieee80211_link_sta *link_sta = file->private_data; 143 + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(link_sta->sta); 144 + const int size = ATH12K_STA_RX_STATS_BUF_SIZE; 145 + struct ath12k_hw *ah = ahsta->ahvif->ah; 146 + struct ath12k_rx_peer_stats *rx_stats; 147 + struct ath12k_link_sta *arsta; 148 + u8 link_id = link_sta->link_id; 149 + int len = 0, i, ret = 0; 150 + bool he_rates_avail; 151 + struct ath12k *ar; 152 + 153 + wiphy_lock(ah->hw->wiphy); 154 + 155 + if (!(BIT(link_id) & ahsta->links_map)) { 156 + wiphy_unlock(ah->hw->wiphy); 157 + return -ENOENT; 158 + } 159 + 160 + arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]); 161 + if (!arsta || !arsta->arvif->ar) { 162 + wiphy_unlock(ah->hw->wiphy); 163 + return -ENOENT; 164 + } 165 + 166 + ar = arsta->arvif->ar; 167 + 168 + u8 *buf __free(kfree) = kzalloc(size, GFP_KERNEL); 169 + if (!buf) { 170 + ret = -ENOENT; 171 + goto out; 172 + } 173 + 174 + spin_lock_bh(&ar->ab->base_lock); 175 + 176 + rx_stats = arsta->rx_stats; 177 + if (!rx_stats) { 178 + ret = -ENOENT; 179 + goto unlock; 180 + } 181 + 182 + len += scnprintf(buf + len, size - len, "RX peer stats:\n\n"); 183 + len += scnprintf(buf + len, size - len, "Num of MSDUs: %llu\n", 184 + rx_stats->num_msdu); 185 + len += scnprintf(buf + len, size - len, "Num of MSDUs with TCP L4: %llu\n", 186 + rx_stats->tcp_msdu_count); 187 + len += scnprintf(buf + len, size - len, "Num of MSDUs with UDP L4: %llu\n", 188 + rx_stats->udp_msdu_count); 189 + len += scnprintf(buf + len, size - len, "Num of other MSDUs: %llu\n", 190 + rx_stats->other_msdu_count); 191 + len += scnprintf(buf + len, size - len, "Num of MSDUs part of AMPDU: %llu\n", 192 + rx_stats->ampdu_msdu_count); 193 + len += scnprintf(buf + len, size - len, "Num of MSDUs not part of AMPDU: %llu\n", 194 + rx_stats->non_ampdu_msdu_count); 195 + len += scnprintf(buf + len, size - len, "Num of MSDUs using STBC: %llu\n", 196 + rx_stats->stbc_count); 197 + len += scnprintf(buf + len, size - len, "Num of MSDUs beamformed: %llu\n", 198 + rx_stats->beamformed_count); 199 + len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS ok: %llu\n", 200 + rx_stats->num_mpdu_fcs_ok); 201 + len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS error: %llu\n", 202 + rx_stats->num_mpdu_fcs_err); 203 + 204 + he_rates_avail = (rx_stats->pream_cnt[HAL_RX_PREAMBLE_11AX] > 1) ? true : false; 205 + 206 + len += scnprintf(buf + len, size - len, 207 + "preamble: 11A %llu 11B %llu 11N %llu 11AC %llu 11AX %llu 11BE %llu\n", 208 + rx_stats->pream_cnt[0], rx_stats->pream_cnt[1], 209 + rx_stats->pream_cnt[2], rx_stats->pream_cnt[3], 210 + rx_stats->pream_cnt[4], rx_stats->pream_cnt[6]); 211 + len += scnprintf(buf + len, size - len, 212 + "reception type: SU %llu MU_MIMO %llu MU_OFDMA %llu MU_OFDMA_MIMO %llu\n", 213 + rx_stats->reception_type[0], rx_stats->reception_type[1], 214 + rx_stats->reception_type[2], rx_stats->reception_type[3]); 215 + 216 + len += scnprintf(buf + len, size - len, "TID(0-15) Legacy TID(16):"); 217 + for (i = 0; i <= IEEE80211_NUM_TIDS; i++) 218 + len += scnprintf(buf + len, size - len, "%llu ", rx_stats->tid_count[i]); 219 + 220 + len += scnprintf(buf + len, size - len, "\nRX Duration:%llu\n", 221 + rx_stats->rx_duration); 222 + 223 + len += scnprintf(buf + len, size - len, 224 + "\nDCM: %llu\nRU26: %llu\nRU52: %llu\nRU106: %llu\nRU242: %llu\nRU484: %llu\nRU996: %llu\nRU996x2: %llu\n", 225 + rx_stats->dcm_count, rx_stats->ru_alloc_cnt[0], 226 + rx_stats->ru_alloc_cnt[1], rx_stats->ru_alloc_cnt[2], 227 + rx_stats->ru_alloc_cnt[3], rx_stats->ru_alloc_cnt[4], 228 + rx_stats->ru_alloc_cnt[5], rx_stats->ru_alloc_cnt[6]); 229 + 230 + len += scnprintf(buf + len, size - len, "\nRX success packet stats:\n"); 231 + len += ath12k_dbg_sta_dump_rate_stats(buf, len, size, he_rates_avail, 232 + &rx_stats->pkt_stats); 233 + 234 + len += scnprintf(buf + len, size - len, "\n"); 235 + 236 + len += scnprintf(buf + len, size - len, "\nRX success byte stats:\n"); 237 + len += ath12k_dbg_sta_dump_rate_stats(buf, len, size, he_rates_avail, 238 + &rx_stats->byte_stats); 239 + 240 + unlock: 241 + spin_unlock_bh(&ar->ab->base_lock); 242 + 243 + if (len) 244 + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); 245 + out: 246 + wiphy_unlock(ah->hw->wiphy); 247 + return ret; 248 + } 249 + 250 + static const struct file_operations fops_rx_stats = { 251 + .read = ath12k_dbg_sta_dump_rx_stats, 252 + .open = simple_open, 253 + .owner = THIS_MODULE, 254 + .llseek = default_llseek, 255 + }; 256 + 257 + static ssize_t ath12k_dbg_sta_reset_rx_stats(struct file *file, 258 + const char __user *buf, 259 + size_t count, loff_t *ppos) 260 + { 261 + struct ieee80211_link_sta *link_sta = file->private_data; 262 + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(link_sta->sta); 263 + struct ath12k_hw *ah = ahsta->ahvif->ah; 264 + struct ath12k_rx_peer_stats *rx_stats; 265 + struct ath12k_link_sta *arsta; 266 + u8 link_id = link_sta->link_id; 267 + struct ath12k *ar; 268 + bool reset; 269 + int ret; 270 + 271 + ret = kstrtobool_from_user(buf, count, &reset); 272 + if (ret) 273 + return ret; 274 + 275 + if (!reset) 276 + return -EINVAL; 277 + 278 + wiphy_lock(ah->hw->wiphy); 279 + 280 + if (!(BIT(link_id) & ahsta->links_map)) { 281 + ret = -ENOENT; 282 + goto out; 283 + } 284 + 285 + arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]); 286 + if (!arsta || !arsta->arvif->ar) { 287 + ret = -ENOENT; 288 + goto out; 289 + } 290 + 291 + ar = arsta->arvif->ar; 292 + 293 + spin_lock_bh(&ar->ab->base_lock); 294 + 295 + rx_stats = arsta->rx_stats; 296 + if (!rx_stats) { 297 + spin_unlock_bh(&ar->ab->base_lock); 298 + ret = -ENOENT; 299 + goto out; 300 + } 301 + 302 + memset(rx_stats, 0, sizeof(*rx_stats)); 303 + spin_unlock_bh(&ar->ab->base_lock); 304 + 305 + ret = count; 306 + out: 307 + wiphy_unlock(ah->hw->wiphy); 308 + return ret; 309 + } 310 + 311 + static const struct file_operations fops_reset_rx_stats = { 312 + .write = ath12k_dbg_sta_reset_rx_stats, 313 + .open = simple_open, 314 + .owner = THIS_MODULE, 315 + .llseek = default_llseek, 316 + }; 317 + 318 + void ath12k_debugfs_link_sta_op_add(struct ieee80211_hw *hw, 319 + struct ieee80211_vif *vif, 320 + struct ieee80211_link_sta *link_sta, 321 + struct dentry *dir) 322 + { 323 + struct ath12k *ar; 324 + 325 + lockdep_assert_wiphy(hw->wiphy); 326 + 327 + ar = ath12k_get_ar_by_vif(hw, vif, link_sta->link_id); 328 + if (!ar) 329 + return; 330 + 331 + if (ath12k_debugfs_is_extd_rx_stats_enabled(ar)) { 332 + debugfs_create_file("rx_stats", 0400, dir, link_sta, 333 + &fops_rx_stats); 334 + debugfs_create_file("reset_rx_stats", 0200, dir, link_sta, 335 + &fops_reset_rx_stats); 336 + } 337 + }
+24
drivers/net/wireless/ath/ath12k/debugfs_sta.h
··· 1 + /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 + /* 3 + * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. 4 + */ 5 + 6 + #ifndef _ATH12K_DEBUGFS_STA_H_ 7 + #define _ATH12K_DEBUGFS_STA_H_ 8 + 9 + #include <net/mac80211.h> 10 + 11 + #include "core.h" 12 + 13 + #define ATH12K_STA_RX_STATS_BUF_SIZE (1024 * 16) 14 + 15 + #ifdef CONFIG_ATH12K_DEBUGFS 16 + 17 + void ath12k_debugfs_link_sta_op_add(struct ieee80211_hw *hw, 18 + struct ieee80211_vif *vif, 19 + struct ieee80211_link_sta *link_sta, 20 + struct dentry *dir); 21 + 22 + #endif /* CONFIG_ATH12K_DEBUGFS */ 23 + 24 + #endif /* _ATH12K_DEBUGFS_STA_H_ */
+4 -1
drivers/net/wireless/ath/ath12k/dp.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <crypto/hash.h> ··· 1314 1314 u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG; 1315 1315 u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG; 1316 1316 u32 val = 0; 1317 + 1318 + if (ath12k_ftm_mode) 1319 + return; 1317 1320 1318 1321 ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base); 1319 1322
+71 -11
drivers/net/wireless/ath/ath12k/dp.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH12K_DP_H ··· 125 125 struct sk_buff_head rx_status_q; 126 126 struct dp_mon_mpdu *mon_mpdu; 127 127 struct list_head dp_rx_mon_mpdu_list; 128 - struct sk_buff *dest_skb_q[DP_MON_MAX_STATUS_BUF]; 129 128 struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info; 130 129 struct dp_mon_tx_ppdu_info *tx_data_ppdu_info; 131 130 }; ··· 175 176 #define DP_RXDMA_ERR_DST_RING_SIZE 1024 176 177 #define DP_RXDMA_MON_STATUS_RING_SIZE 1024 177 178 #define DP_RXDMA_MONITOR_BUF_RING_SIZE 4096 178 - #define DP_RXDMA_MONITOR_DST_RING_SIZE 2048 179 + #define DP_RXDMA_MONITOR_DST_RING_SIZE 8092 179 180 #define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096 180 181 #define DP_TX_MONITOR_BUF_RING_SIZE 4096 181 182 #define DP_TX_MONITOR_DEST_RING_SIZE 2048 ··· 372 373 }; 373 374 374 375 /* HTT definitions */ 376 + #define HTT_TAG_TCL_METADATA_VERSION 5 375 377 376 - #define HTT_TCL_META_DATA_TYPE BIT(0) 377 - #define HTT_TCL_META_DATA_VALID_HTT BIT(1) 378 + #define HTT_TCL_META_DATA_TYPE GENMASK(1, 0) 379 + #define HTT_TCL_META_DATA_VALID_HTT BIT(2) 378 380 379 381 /* vdev meta data */ 380 - #define HTT_TCL_META_DATA_VDEV_ID GENMASK(9, 2) 381 - #define HTT_TCL_META_DATA_PDEV_ID GENMASK(11, 10) 382 - #define HTT_TCL_META_DATA_HOST_INSPECTED BIT(12) 382 + #define HTT_TCL_META_DATA_VDEV_ID GENMASK(10, 3) 383 + #define HTT_TCL_META_DATA_PDEV_ID GENMASK(12, 11) 384 + #define HTT_TCL_META_DATA_HOST_INSPECTED_MISSION BIT(13) 383 385 384 386 /* peer meta data */ 385 - #define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2) 387 + #define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 3) 388 + 389 + /* Global sequence number */ 390 + #define HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM 3 391 + #define HTT_TCL_META_DATA_GLOBAL_SEQ_HOST_INSPECTED BIT(2) 392 + #define HTT_TCL_META_DATA_GLOBAL_SEQ_NUM GENMASK(14, 3) 393 + #define HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID 128 386 394 387 395 /* HTT tx completion is overlaid in wbm_release_ring */ 388 396 #define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13) ··· 420 414 }; 421 415 422 416 #define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0) 417 + #define HTT_OPTION_TCL_METADATA_VER_V2 2 418 + #define HTT_OPTION_TAG GENMASK(7, 0) 419 + #define HTT_OPTION_LEN GENMASK(15, 8) 420 + #define HTT_OPTION_VALUE GENMASK(31, 16) 421 + #define HTT_TCL_METADATA_VER_SZ 4 423 422 424 423 struct htt_ver_req_cmd { 425 424 __le32 ver_reg_info; 425 + __le32 tcl_metadata_version; 426 426 } __packed; 427 427 428 428 enum htt_srng_ring_type { ··· 446 434 HTT_HOST1_TO_FW_RXBUF_RING, 447 435 HTT_HOST2_TO_FW_RXBUF_RING, 448 436 HTT_RXDMA_NON_MONITOR_DEST_RING, 437 + HTT_RXDMA_HOST_BUF_RING2, 449 438 HTT_TX_MON_HOST2MON_BUF_RING, 450 439 HTT_TX_MON_MON2HOST_DEST_RING, 440 + HTT_RX_MON_HOST2MON_BUF_RING, 441 + HTT_RX_MON_MON2HOST_DEST_RING, 451 442 }; 452 443 453 444 /* host -> target HTT_SRING_SETUP message ··· 782 767 #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16) 783 768 #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24) 784 769 #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25) 785 - #define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0) 786 - #define HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID BIT(26) 770 + #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID BIT(26) 771 + #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL BIT(27) 772 + #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON BIT(28) 773 + 774 + #define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0) 775 + #define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT GENMASK(18, 16) 776 + #define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL GENMASK(21, 19) 777 + #define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA GENMASK(24, 22) 778 + 779 + #define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD GENMASK(9, 0) 780 + #define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE BIT(17) 781 + #define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE BIT(18) 782 + #define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE BIT(19) 783 + 784 + #define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET BIT(0) 785 + #define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET GENMASK(14, 1) 787 786 788 787 #define HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET GENMASK(15, 0) 789 788 #define HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET GENMASK(31, 16) ··· 826 797 HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS = BIT(10), 827 798 HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11), 828 799 HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE = BIT(12), 800 + HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO = BIT(13), 829 801 }; 830 802 831 803 enum htt_rx_mgmt_pkt_filter_tlv_flags0 { ··· 1115 1085 HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \ 1116 1086 HTT_RX_FILTER_TLV_FLAGS_ATTENTION) 1117 1087 1088 + #define HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING \ 1089 + (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \ 1090 + HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \ 1091 + HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \ 1092 + HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \ 1093 + HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \ 1094 + HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \ 1095 + HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \ 1096 + HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \ 1097 + HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \ 1098 + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \ 1099 + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \ 1100 + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \ 1101 + HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO) 1102 + 1118 1103 /* msdu start. mpdu end, attention, rx hdr tlv's are not subscribed */ 1119 1104 #define HTT_RX_TLV_FLAGS_RXDMA_RING \ 1120 1105 (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \ ··· 1158 1113 __le32 info3; 1159 1114 } __packed; 1160 1115 1116 + #define HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE 32 1117 + #define HTT_RX_RING_DEFAULT_DMA_LENGTH 0x7 1118 + #define HTT_RX_RING_PKT_TLV_OFFSET 0x1 1119 + 1161 1120 struct htt_rx_ring_tlv_filter { 1162 1121 u32 rx_filter; /* see htt_rx_filter_tlv_flags */ 1163 1122 u32 pkt_filter_flags0; /* MGMT */ ··· 1179 1130 u16 rx_mpdu_start_wmask; 1180 1131 u16 rx_mpdu_end_wmask; 1181 1132 u32 rx_msdu_end_wmask; 1133 + u32 conf_len_ctrl; 1134 + u32 conf_len_mgmt; 1135 + u32 conf_len_data; 1136 + u16 rx_drop_threshold; 1137 + bool enable_log_mgmt_type; 1138 + bool enable_log_ctrl_type; 1139 + bool enable_log_data_type; 1140 + bool enable_rx_tlv_offset; 1141 + u16 rx_tlv_offset; 1142 + bool drop_threshold_valid; 1143 + bool rxmon_disable; 1182 1144 }; 1183 1145 1184 1146 #define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0
+1142 -283
drivers/net/wireless/ath/ath12k/dp_mon.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include "dp_mon.h" ··· 9 9 #include "dp_rx.h" 10 10 #include "dp_tx.h" 11 11 #include "peer.h" 12 + 13 + #define ATH12K_LE32_DEC_ENC(value, dec_bits, enc_bits) \ 14 + u32_encode_bits(le32_get_bits(value, dec_bits), enc_bits) 15 + 16 + #define ATH12K_LE64_DEC_ENC(value, dec_bits, enc_bits) \ 17 + u32_encode_bits(le64_get_bits(value, dec_bits), enc_bits) 12 18 13 19 static void 14 20 ath12k_dp_mon_rx_handle_ofdma_info(const struct hal_rx_ppdu_end_user_stats *ppdu_end_user, ··· 81 75 static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vht_sig, 82 76 struct hal_rx_mon_ppdu_info *ppdu_info) 83 77 { 84 - u32 nsts, group_id, info0, info1; 78 + u32 nsts, info0, info1; 85 79 u8 gi_setting; 86 80 87 81 info0 = __le32_to_cpu(vht_sig->info0); ··· 109 103 ppdu_info->bw = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_BW); 110 104 ppdu_info->beamformed = u32_get_bits(info1, 111 105 HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED); 112 - group_id = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID); 113 - if (group_id == 0 || group_id == 63) 114 - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; 115 - else 116 - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; 117 - ppdu_info->vht_flag_values5 = group_id; 106 + ppdu_info->vht_flag_values5 = u32_get_bits(info0, 107 + HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID); 118 108 ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) | 119 109 ppdu_info->nss); 120 110 ppdu_info->vht_flag_values2 = ppdu_info->bw; ··· 130 128 ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING); 131 129 ppdu_info->gi = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_GI); 132 130 ppdu_info->nss = (ppdu_info->mcs >> 3); 133 - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; 134 131 } 135 132 136 133 static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb, ··· 161 160 162 161 ppdu_info->rate = rate; 163 162 ppdu_info->cck_flag = 1; 164 - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; 165 163 } 166 164 167 165 static void ath12k_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga, ··· 200 200 } 201 201 202 202 ppdu_info->rate = rate; 203 - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; 204 203 } 205 204 206 205 static void ··· 236 237 ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS); 237 238 ppdu_info->beamformed = u32_get_bits(info0, 238 239 HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF); 239 - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA; 240 240 } 241 241 242 242 static void ··· 275 277 HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION); 276 278 ppdu_info->ru_alloc = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones); 277 279 ppdu_info->he_RU[0] = ru_tones; 278 - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; 279 280 } 280 281 281 282 static void ··· 408 411 409 412 ppdu_info->is_stbc = info1 & 410 413 HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC; 411 - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; 412 414 } 413 415 414 416 static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info *he_sig_a, ··· 555 559 dcm = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM); 556 560 ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS); 557 561 ppdu_info->dcm = dcm; 558 - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; 562 + } 563 + 564 + static void 565 + ath12k_dp_mon_hal_rx_parse_u_sig_cmn(const struct hal_mon_usig_cmn *cmn, 566 + struct hal_rx_mon_ppdu_info *ppdu_info) 567 + { 568 + u32 common; 569 + 570 + ppdu_info->u_sig_info.bw = le32_get_bits(cmn->info0, 571 + HAL_RX_USIG_CMN_INFO0_BW); 572 + ppdu_info->u_sig_info.ul_dl = le32_get_bits(cmn->info0, 573 + HAL_RX_USIG_CMN_INFO0_UL_DL); 574 + 575 + common = __le32_to_cpu(ppdu_info->u_sig_info.usig.common); 576 + common |= IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN | 577 + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN | 578 + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN | 579 + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN | 580 + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN | 581 + ATH12K_LE32_DEC_ENC(cmn->info0, 582 + HAL_RX_USIG_CMN_INFO0_PHY_VERSION, 583 + IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER) | 584 + u32_encode_bits(ppdu_info->u_sig_info.bw, 585 + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW) | 586 + u32_encode_bits(ppdu_info->u_sig_info.ul_dl, 587 + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL) | 588 + ATH12K_LE32_DEC_ENC(cmn->info0, 589 + HAL_RX_USIG_CMN_INFO0_BSS_COLOR, 590 + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR) | 591 + ATH12K_LE32_DEC_ENC(cmn->info0, 592 + HAL_RX_USIG_CMN_INFO0_TXOP, 593 + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP); 594 + ppdu_info->u_sig_info.usig.common = cpu_to_le32(common); 595 + 596 + switch (ppdu_info->u_sig_info.bw) { 597 + default: 598 + fallthrough; 599 + case HAL_EHT_BW_20: 600 + ppdu_info->bw = HAL_RX_BW_20MHZ; 601 + break; 602 + case HAL_EHT_BW_40: 603 + ppdu_info->bw = HAL_RX_BW_40MHZ; 604 + break; 605 + case HAL_EHT_BW_80: 606 + ppdu_info->bw = HAL_RX_BW_80MHZ; 607 + break; 608 + case HAL_EHT_BW_160: 609 + ppdu_info->bw = HAL_RX_BW_160MHZ; 610 + break; 611 + case HAL_EHT_BW_320_1: 612 + case HAL_EHT_BW_320_2: 613 + ppdu_info->bw = HAL_RX_BW_320MHZ; 614 + break; 615 + } 616 + } 617 + 618 + static void 619 + ath12k_dp_mon_hal_rx_parse_u_sig_tb(const struct hal_mon_usig_tb *usig_tb, 620 + struct hal_rx_mon_ppdu_info *ppdu_info) 621 + { 622 + struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig; 623 + enum ieee80211_radiotap_eht_usig_tb spatial_reuse1, spatial_reuse2; 624 + u32 common, value, mask; 625 + 626 + spatial_reuse1 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1; 627 + spatial_reuse2 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2; 628 + 629 + common = __le32_to_cpu(usig->common); 630 + value = __le32_to_cpu(usig->value); 631 + mask = __le32_to_cpu(usig->mask); 632 + 633 + ppdu_info->u_sig_info.ppdu_type_comp_mode = 634 + le32_get_bits(usig_tb->info0, 635 + HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE); 636 + 637 + common |= ATH12K_LE32_DEC_ENC(usig_tb->info0, 638 + HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS, 639 + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC); 640 + 641 + value |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD | 642 + u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode, 643 + IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE) | 644 + IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE | 645 + ATH12K_LE32_DEC_ENC(usig_tb->info0, 646 + HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1, 647 + spatial_reuse1) | 648 + ATH12K_LE32_DEC_ENC(usig_tb->info0, 649 + HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2, 650 + spatial_reuse2) | 651 + IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD | 652 + ATH12K_LE32_DEC_ENC(usig_tb->info0, 653 + HAL_RX_USIG_TB_INFO0_CRC, 654 + IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC) | 655 + ATH12K_LE32_DEC_ENC(usig_tb->info0, 656 + HAL_RX_USIG_TB_INFO0_TAIL, 657 + IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL); 658 + 659 + mask |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD | 660 + IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE | 661 + IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE | 662 + spatial_reuse1 | spatial_reuse2 | 663 + IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD | 664 + IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC | 665 + IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL; 666 + 667 + usig->common = cpu_to_le32(common); 668 + usig->value = cpu_to_le32(value); 669 + usig->mask = cpu_to_le32(mask); 670 + } 671 + 672 + static void 673 + ath12k_dp_mon_hal_rx_parse_u_sig_mu(const struct hal_mon_usig_mu *usig_mu, 674 + struct hal_rx_mon_ppdu_info *ppdu_info) 675 + { 676 + struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig; 677 + enum ieee80211_radiotap_eht_usig_mu sig_symb, punc; 678 + u32 common, value, mask; 679 + 680 + sig_symb = IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS; 681 + punc = IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO; 682 + 683 + common = __le32_to_cpu(usig->common); 684 + value = __le32_to_cpu(usig->value); 685 + mask = __le32_to_cpu(usig->mask); 686 + 687 + ppdu_info->u_sig_info.ppdu_type_comp_mode = 688 + le32_get_bits(usig_mu->info0, 689 + HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE); 690 + ppdu_info->u_sig_info.eht_sig_mcs = 691 + le32_get_bits(usig_mu->info0, 692 + HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS); 693 + ppdu_info->u_sig_info.num_eht_sig_sym = 694 + le32_get_bits(usig_mu->info0, 695 + HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM); 696 + 697 + common |= ATH12K_LE32_DEC_ENC(usig_mu->info0, 698 + HAL_RX_USIG_MU_INFO0_RX_INTEG_CHECK_PASS, 699 + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC); 700 + 701 + value |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD | 702 + IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE | 703 + u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode, 704 + IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE) | 705 + IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE | 706 + ATH12K_LE32_DEC_ENC(usig_mu->info0, 707 + HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO, 708 + punc) | 709 + IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE | 710 + u32_encode_bits(ppdu_info->u_sig_info.eht_sig_mcs, 711 + IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS) | 712 + u32_encode_bits(ppdu_info->u_sig_info.num_eht_sig_sym, 713 + sig_symb) | 714 + ATH12K_LE32_DEC_ENC(usig_mu->info0, 715 + HAL_RX_USIG_MU_INFO0_CRC, 716 + IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC) | 717 + ATH12K_LE32_DEC_ENC(usig_mu->info0, 718 + HAL_RX_USIG_MU_INFO0_TAIL, 719 + IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL); 720 + 721 + mask |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD | 722 + IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE | 723 + IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE | 724 + IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE | 725 + punc | 726 + IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE | 727 + IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS | 728 + sig_symb | 729 + IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC | 730 + IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL; 731 + 732 + usig->common = cpu_to_le32(common); 733 + usig->value = cpu_to_le32(value); 734 + usig->mask = cpu_to_le32(mask); 735 + } 736 + 737 + static void 738 + ath12k_dp_mon_hal_rx_parse_u_sig_hdr(const struct hal_mon_usig_hdr *usig, 739 + struct hal_rx_mon_ppdu_info *ppdu_info) 740 + { 741 + u8 comp_mode; 742 + 743 + ppdu_info->eht_usig = true; 744 + 745 + ath12k_dp_mon_hal_rx_parse_u_sig_cmn(&usig->cmn, ppdu_info); 746 + 747 + comp_mode = le32_get_bits(usig->non_cmn.mu.info0, 748 + HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE); 749 + 750 + if (comp_mode == 0 && ppdu_info->u_sig_info.ul_dl) 751 + ath12k_dp_mon_hal_rx_parse_u_sig_tb(&usig->non_cmn.tb, ppdu_info); 752 + else 753 + ath12k_dp_mon_hal_rx_parse_u_sig_mu(&usig->non_cmn.mu, ppdu_info); 754 + } 755 + 756 + static void 757 + ath12k_dp_mon_hal_aggr_tlv(struct hal_rx_mon_ppdu_info *ppdu_info, 758 + u16 tlv_len, const void *tlv_data) 759 + { 760 + if (tlv_len <= HAL_RX_MON_MAX_AGGR_SIZE - ppdu_info->tlv_aggr.cur_len) { 761 + memcpy(ppdu_info->tlv_aggr.buf + ppdu_info->tlv_aggr.cur_len, 762 + tlv_data, tlv_len); 763 + ppdu_info->tlv_aggr.cur_len += tlv_len; 764 + } 765 + } 766 + 767 + static inline bool 768 + ath12k_dp_mon_hal_rx_is_frame_type_ndp(const struct hal_rx_u_sig_info *usig_info) 769 + { 770 + if (usig_info->ppdu_type_comp_mode == 1 && 771 + usig_info->eht_sig_mcs == 0 && 772 + usig_info->num_eht_sig_sym == 0) 773 + return true; 774 + 775 + return false; 776 + } 777 + 778 + static inline bool 779 + ath12k_dp_mon_hal_rx_is_non_ofdma(const struct hal_rx_u_sig_info *usig_info) 780 + { 781 + u32 ppdu_type_comp_mode = usig_info->ppdu_type_comp_mode; 782 + u32 ul_dl = usig_info->ul_dl; 783 + 784 + if ((ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 0) || 785 + (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_OFDMA && ul_dl == 0) || 786 + (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 1)) 787 + return true; 788 + 789 + return false; 790 + } 791 + 792 + static inline bool 793 + ath12k_dp_mon_hal_rx_is_ofdma(const struct hal_rx_u_sig_info *usig_info) 794 + { 795 + if (usig_info->ppdu_type_comp_mode == 0 && usig_info->ul_dl == 0) 796 + return true; 797 + 798 + return false; 799 + } 800 + 801 + static void 802 + ath12k_dp_mon_hal_rx_parse_eht_sig_ndp(const struct hal_eht_sig_ndp_cmn_eb *eht_sig_ndp, 803 + struct hal_rx_mon_ppdu_info *ppdu_info) 804 + { 805 + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; 806 + u32 known, data; 807 + 808 + known = __le32_to_cpu(eht->known); 809 + known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE | 810 + IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF | 811 + IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S | 812 + IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S | 813 + IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_S | 814 + IEEE80211_RADIOTAP_EHT_KNOWN_CRC1 | 815 + IEEE80211_RADIOTAP_EHT_KNOWN_TAIL1; 816 + eht->known = cpu_to_le32(known); 817 + 818 + data = __le32_to_cpu(eht->data[0]); 819 + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, 820 + HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE, 821 + IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE); 822 + /* GI and LTF size are separately indicated in radiotap header 823 + * and hence will be parsed from other TLV 824 + */ 825 + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, 826 + HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM, 827 + IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF); 828 + 829 + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, 830 + HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC, 831 + IEEE80211_RADIOTAP_EHT_DATA0_CRC1_O); 832 + 833 + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, 834 + HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD, 835 + IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_S); 836 + eht->data[0] = cpu_to_le32(data); 837 + 838 + data = __le32_to_cpu(eht->data[7]); 839 + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, 840 + HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS, 841 + IEEE80211_RADIOTAP_EHT_DATA7_NSS_S); 842 + 843 + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, 844 + HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED, 845 + IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S); 846 + eht->data[7] = cpu_to_le32(data); 847 + } 848 + 849 + static void 850 + ath12k_dp_mon_hal_rx_parse_usig_overflow(const struct hal_eht_sig_usig_overflow *ovflow, 851 + struct hal_rx_mon_ppdu_info *ppdu_info) 852 + { 853 + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; 854 + u32 known, data; 855 + 856 + known = __le32_to_cpu(eht->known); 857 + known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE | 858 + IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF | 859 + IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM | 860 + IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM | 861 + IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM | 862 + IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_O; 863 + eht->known = cpu_to_le32(known); 864 + 865 + data = __le32_to_cpu(eht->data[0]); 866 + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, 867 + HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE, 868 + IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE); 869 + 870 + /* GI and LTF size are separately indicated in radiotap header 871 + * and hence will be parsed from other TLV 872 + */ 873 + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, 874 + HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM, 875 + IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF); 876 + 877 + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, 878 + HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM, 879 + IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM); 880 + 881 + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, 882 + HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR, 883 + IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM); 884 + 885 + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, 886 + HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY, 887 + IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM); 888 + 889 + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, 890 + HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD, 891 + IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_O); 892 + eht->data[0] = cpu_to_le32(data); 893 + } 894 + 895 + static void 896 + ath12k_dp_mon_hal_rx_parse_non_ofdma_users(const struct hal_eht_sig_non_ofdma_cmn_eb *eb, 897 + struct hal_rx_mon_ppdu_info *ppdu_info) 898 + { 899 + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; 900 + u32 known, data; 901 + 902 + known = __le32_to_cpu(eht->known); 903 + known |= IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M; 904 + eht->known = cpu_to_le32(known); 905 + 906 + data = __le32_to_cpu(eht->data[7]); 907 + data |= ATH12K_LE32_DEC_ENC(eb->info0, 908 + HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS, 909 + IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS); 910 + eht->data[7] = cpu_to_le32(data); 911 + } 912 + 913 + static void 914 + ath12k_dp_mon_hal_rx_parse_eht_mumimo_user(const struct hal_eht_sig_mu_mimo *user, 915 + struct hal_rx_mon_ppdu_info *ppdu_info) 916 + { 917 + struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info; 918 + u32 user_idx; 919 + 920 + if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info)) 921 + return; 922 + 923 + user_idx = eht_info->num_user_info++; 924 + 925 + eht_info->user_info[user_idx] |= 926 + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN | 927 + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN | 928 + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN | 929 + IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_KNOWN_M | 930 + ATH12K_LE32_DEC_ENC(user->info0, 931 + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID, 932 + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) | 933 + ATH12K_LE32_DEC_ENC(user->info0, 934 + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING, 935 + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) | 936 + ATH12K_LE32_DEC_ENC(user->info0, 937 + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS, 938 + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) | 939 + ATH12K_LE32_DEC_ENC(user->info0, 940 + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING, 941 + IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_M); 942 + 943 + ppdu_info->mcs = le32_get_bits(user->info0, 944 + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS); 945 + } 946 + 947 + static void 948 + ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(const struct hal_eht_sig_non_mu_mimo *user, 949 + struct hal_rx_mon_ppdu_info *ppdu_info) 950 + { 951 + struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info; 952 + u32 user_idx; 953 + 954 + if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info)) 955 + return; 956 + 957 + user_idx = eht_info->num_user_info++; 958 + 959 + eht_info->user_info[user_idx] |= 960 + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN | 961 + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN | 962 + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN | 963 + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O | 964 + IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O | 965 + ATH12K_LE32_DEC_ENC(user->info0, 966 + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID, 967 + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) | 968 + ATH12K_LE32_DEC_ENC(user->info0, 969 + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING, 970 + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) | 971 + ATH12K_LE32_DEC_ENC(user->info0, 972 + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS, 973 + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) | 974 + ATH12K_LE32_DEC_ENC(user->info0, 975 + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS, 976 + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O) | 977 + ATH12K_LE32_DEC_ENC(user->info0, 978 + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED, 979 + IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O); 980 + 981 + ppdu_info->mcs = le32_get_bits(user->info0, 982 + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS); 983 + 984 + ppdu_info->nss = le32_get_bits(user->info0, 985 + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS) + 1; 986 + } 987 + 988 + static inline bool 989 + ath12k_dp_mon_hal_rx_is_mu_mimo_user(const struct hal_rx_u_sig_info *usig_info) 990 + { 991 + if (usig_info->ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_SU && 992 + usig_info->ul_dl == 1) 993 + return true; 994 + 995 + return false; 996 + } 997 + 998 + static void 999 + ath12k_dp_mon_hal_rx_parse_eht_sig_non_ofdma(const void *tlv, 1000 + struct hal_rx_mon_ppdu_info *ppdu_info) 1001 + { 1002 + const struct hal_eht_sig_non_ofdma_cmn_eb *eb = tlv; 1003 + 1004 + ath12k_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info); 1005 + ath12k_dp_mon_hal_rx_parse_non_ofdma_users(eb, ppdu_info); 1006 + 1007 + if (ath12k_dp_mon_hal_rx_is_mu_mimo_user(&ppdu_info->u_sig_info)) 1008 + ath12k_dp_mon_hal_rx_parse_eht_mumimo_user(&eb->user_field.mu_mimo, 1009 + ppdu_info); 1010 + else 1011 + ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(&eb->user_field.n_mu_mimo, 1012 + ppdu_info); 1013 + } 1014 + 1015 + static void 1016 + ath12k_dp_mon_hal_rx_parse_ru_allocation(const struct hal_eht_sig_ofdma_cmn_eb *eb, 1017 + struct hal_rx_mon_ppdu_info *ppdu_info) 1018 + { 1019 + const struct hal_eht_sig_ofdma_cmn_eb1 *ofdma_cmn_eb1 = &eb->eb1; 1020 + const struct hal_eht_sig_ofdma_cmn_eb2 *ofdma_cmn_eb2 = &eb->eb2; 1021 + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; 1022 + enum ieee80211_radiotap_eht_data ru_123, ru_124, ru_125, ru_126; 1023 + enum ieee80211_radiotap_eht_data ru_121, ru_122, ru_112, ru_111; 1024 + u32 data; 1025 + 1026 + ru_123 = IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3; 1027 + ru_124 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4; 1028 + ru_125 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5; 1029 + ru_126 = IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6; 1030 + ru_121 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1; 1031 + ru_122 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2; 1032 + ru_112 = IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2; 1033 + ru_111 = IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1; 1034 + 1035 + switch (ppdu_info->u_sig_info.bw) { 1036 + case HAL_EHT_BW_320_2: 1037 + case HAL_EHT_BW_320_1: 1038 + data = __le32_to_cpu(eht->data[4]); 1039 + /* CC1 2::3 */ 1040 + data |= IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3_KNOWN | 1041 + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, 1042 + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3, 1043 + ru_123); 1044 + eht->data[4] = cpu_to_le32(data); 1045 + 1046 + data = __le32_to_cpu(eht->data[5]); 1047 + /* CC1 2::4 */ 1048 + data |= IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4_KNOWN | 1049 + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, 1050 + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4, 1051 + ru_124); 1052 + 1053 + /* CC1 2::5 */ 1054 + data |= IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5_KNOWN | 1055 + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, 1056 + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5, 1057 + ru_125); 1058 + eht->data[5] = cpu_to_le32(data); 1059 + 1060 + data = __le32_to_cpu(eht->data[6]); 1061 + /* CC1 2::6 */ 1062 + data |= IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6_KNOWN | 1063 + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, 1064 + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6, 1065 + ru_126); 1066 + eht->data[6] = cpu_to_le32(data); 1067 + 1068 + fallthrough; 1069 + case HAL_EHT_BW_160: 1070 + data = __le32_to_cpu(eht->data[3]); 1071 + /* CC1 2::1 */ 1072 + data |= IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1_KNOWN | 1073 + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, 1074 + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1, 1075 + ru_121); 1076 + /* CC1 2::2 */ 1077 + data |= IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2_KNOWN | 1078 + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, 1079 + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2, 1080 + ru_122); 1081 + eht->data[3] = cpu_to_le32(data); 1082 + 1083 + fallthrough; 1084 + case HAL_EHT_BW_80: 1085 + data = __le32_to_cpu(eht->data[2]); 1086 + /* CC1 1::2 */ 1087 + data |= IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2_KNOWN | 1088 + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0, 1089 + HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2, 1090 + ru_112); 1091 + eht->data[2] = cpu_to_le32(data); 1092 + 1093 + fallthrough; 1094 + case HAL_EHT_BW_40: 1095 + fallthrough; 1096 + case HAL_EHT_BW_20: 1097 + data = __le32_to_cpu(eht->data[1]); 1098 + /* CC1 1::1 */ 1099 + data |= IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1_KNOWN | 1100 + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0, 1101 + HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1, 1102 + ru_111); 1103 + eht->data[1] = cpu_to_le32(data); 1104 + break; 1105 + default: 1106 + break; 1107 + } 1108 + } 1109 + 1110 + static void 1111 + ath12k_dp_mon_hal_rx_parse_eht_sig_ofdma(const void *tlv, 1112 + struct hal_rx_mon_ppdu_info *ppdu_info) 1113 + { 1114 + const struct hal_eht_sig_ofdma_cmn_eb *ofdma = tlv; 1115 + 1116 + ath12k_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info); 1117 + ath12k_dp_mon_hal_rx_parse_ru_allocation(ofdma, ppdu_info); 1118 + 1119 + ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(&ofdma->user_field.n_mu_mimo, 1120 + ppdu_info); 1121 + } 1122 + 1123 + static void 1124 + ath12k_dp_mon_parse_eht_sig_hdr(struct hal_rx_mon_ppdu_info *ppdu_info, 1125 + const void *tlv_data) 1126 + { 1127 + ppdu_info->is_eht = true; 1128 + 1129 + if (ath12k_dp_mon_hal_rx_is_frame_type_ndp(&ppdu_info->u_sig_info)) 1130 + ath12k_dp_mon_hal_rx_parse_eht_sig_ndp(tlv_data, ppdu_info); 1131 + else if (ath12k_dp_mon_hal_rx_is_non_ofdma(&ppdu_info->u_sig_info)) 1132 + ath12k_dp_mon_hal_rx_parse_eht_sig_non_ofdma(tlv_data, ppdu_info); 1133 + else if (ath12k_dp_mon_hal_rx_is_ofdma(&ppdu_info->u_sig_info)) 1134 + ath12k_dp_mon_hal_rx_parse_eht_sig_ofdma(tlv_data, ppdu_info); 1135 + } 1136 + 1137 + static inline enum ath12k_eht_ru_size 1138 + hal_rx_mon_hal_ru_size_to_ath12k_ru_size(u32 hal_ru_size) 1139 + { 1140 + switch (hal_ru_size) { 1141 + case HAL_EHT_RU_26: 1142 + return ATH12K_EHT_RU_26; 1143 + case HAL_EHT_RU_52: 1144 + return ATH12K_EHT_RU_52; 1145 + case HAL_EHT_RU_78: 1146 + return ATH12K_EHT_RU_52_26; 1147 + case HAL_EHT_RU_106: 1148 + return ATH12K_EHT_RU_106; 1149 + case HAL_EHT_RU_132: 1150 + return ATH12K_EHT_RU_106_26; 1151 + case HAL_EHT_RU_242: 1152 + return ATH12K_EHT_RU_242; 1153 + case HAL_EHT_RU_484: 1154 + return ATH12K_EHT_RU_484; 1155 + case HAL_EHT_RU_726: 1156 + return ATH12K_EHT_RU_484_242; 1157 + case HAL_EHT_RU_996: 1158 + return ATH12K_EHT_RU_996; 1159 + case HAL_EHT_RU_996x2: 1160 + return ATH12K_EHT_RU_996x2; 1161 + case HAL_EHT_RU_996x3: 1162 + return ATH12K_EHT_RU_996x3; 1163 + case HAL_EHT_RU_996x4: 1164 + return ATH12K_EHT_RU_996x4; 1165 + case HAL_EHT_RU_NONE: 1166 + return ATH12K_EHT_RU_INVALID; 1167 + case HAL_EHT_RU_996_484: 1168 + return ATH12K_EHT_RU_996_484; 1169 + case HAL_EHT_RU_996x2_484: 1170 + return ATH12K_EHT_RU_996x2_484; 1171 + case HAL_EHT_RU_996x3_484: 1172 + return ATH12K_EHT_RU_996x3_484; 1173 + case HAL_EHT_RU_996_484_242: 1174 + return ATH12K_EHT_RU_996_484_242; 1175 + default: 1176 + return ATH12K_EHT_RU_INVALID; 1177 + } 1178 + } 1179 + 1180 + static inline u32 1181 + hal_rx_ul_ofdma_ru_size_to_width(enum ath12k_eht_ru_size ru_size) 1182 + { 1183 + switch (ru_size) { 1184 + case ATH12K_EHT_RU_26: 1185 + return RU_26; 1186 + case ATH12K_EHT_RU_52: 1187 + return RU_52; 1188 + case ATH12K_EHT_RU_52_26: 1189 + return RU_52_26; 1190 + case ATH12K_EHT_RU_106: 1191 + return RU_106; 1192 + case ATH12K_EHT_RU_106_26: 1193 + return RU_106_26; 1194 + case ATH12K_EHT_RU_242: 1195 + return RU_242; 1196 + case ATH12K_EHT_RU_484: 1197 + return RU_484; 1198 + case ATH12K_EHT_RU_484_242: 1199 + return RU_484_242; 1200 + case ATH12K_EHT_RU_996: 1201 + return RU_996; 1202 + case ATH12K_EHT_RU_996_484: 1203 + return RU_996_484; 1204 + case ATH12K_EHT_RU_996_484_242: 1205 + return RU_996_484_242; 1206 + case ATH12K_EHT_RU_996x2: 1207 + return RU_2X996; 1208 + case ATH12K_EHT_RU_996x2_484: 1209 + return RU_2X996_484; 1210 + case ATH12K_EHT_RU_996x3: 1211 + return RU_3X996; 1212 + case ATH12K_EHT_RU_996x3_484: 1213 + return RU_3X996_484; 1214 + case ATH12K_EHT_RU_996x4: 1215 + return RU_4X996; 1216 + default: 1217 + return RU_INVALID; 1218 + } 1219 + } 1220 + 1221 + static void 1222 + ath12k_dp_mon_hal_rx_parse_user_info(const struct hal_receive_user_info *rx_usr_info, 1223 + u16 user_id, 1224 + struct hal_rx_mon_ppdu_info *ppdu_info) 1225 + { 1226 + struct hal_rx_user_status *mon_rx_user_status = NULL; 1227 + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; 1228 + enum ath12k_eht_ru_size rtap_ru_size = ATH12K_EHT_RU_INVALID; 1229 + u32 ru_width, reception_type, ru_index = HAL_EHT_RU_INVALID; 1230 + u32 ru_type_80_0, ru_start_index_80_0; 1231 + u32 ru_type_80_1, ru_start_index_80_1; 1232 + u32 ru_type_80_2, ru_start_index_80_2; 1233 + u32 ru_type_80_3, ru_start_index_80_3; 1234 + u32 ru_size = 0, num_80mhz_with_ru = 0; 1235 + u64 ru_index_320mhz = 0; 1236 + u32 ru_index_per80mhz; 1237 + 1238 + reception_type = le32_get_bits(rx_usr_info->info0, 1239 + HAL_RX_USR_INFO0_RECEPTION_TYPE); 1240 + 1241 + switch (reception_type) { 1242 + case HAL_RECEPTION_TYPE_SU: 1243 + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; 1244 + break; 1245 + case HAL_RECEPTION_TYPE_DL_MU_MIMO: 1246 + case HAL_RECEPTION_TYPE_UL_MU_MIMO: 1247 + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; 1248 + break; 1249 + case HAL_RECEPTION_TYPE_DL_MU_OFMA: 1250 + case HAL_RECEPTION_TYPE_UL_MU_OFDMA: 1251 + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA; 1252 + break; 1253 + case HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO: 1254 + case HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO: 1255 + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO; 1256 + } 1257 + 1258 + ppdu_info->is_stbc = le32_get_bits(rx_usr_info->info0, HAL_RX_USR_INFO0_STBC); 1259 + ppdu_info->ldpc = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_LDPC); 1260 + ppdu_info->dcm = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_STA_DCM); 1261 + ppdu_info->bw = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_RX_BW); 1262 + ppdu_info->mcs = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_MCS); 1263 + ppdu_info->nss = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_NSS) + 1; 1264 + 1265 + if (user_id < HAL_MAX_UL_MU_USERS) { 1266 + mon_rx_user_status = &ppdu_info->userstats[user_id]; 1267 + mon_rx_user_status->mcs = ppdu_info->mcs; 1268 + mon_rx_user_status->nss = ppdu_info->nss; 1269 + } 1270 + 1271 + if (!(ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_MIMO || 1272 + ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA || 1273 + ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO)) 1274 + return; 1275 + 1276 + /* RU allocation present only for OFDMA reception */ 1277 + ru_type_80_0 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_0); 1278 + ru_start_index_80_0 = le32_get_bits(rx_usr_info->info3, 1279 + HAL_RX_USR_INFO3_RU_START_IDX_80_0); 1280 + if (ru_type_80_0 != HAL_EHT_RU_NONE) { 1281 + ru_size += ru_type_80_0; 1282 + ru_index_per80mhz = ru_start_index_80_0; 1283 + ru_index = ru_index_per80mhz; 1284 + ru_index_320mhz |= HAL_RU_PER80(ru_type_80_0, 0, ru_index_per80mhz); 1285 + num_80mhz_with_ru++; 1286 + } 1287 + 1288 + ru_type_80_1 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_1); 1289 + ru_start_index_80_1 = le32_get_bits(rx_usr_info->info3, 1290 + HAL_RX_USR_INFO3_RU_START_IDX_80_1); 1291 + if (ru_type_80_1 != HAL_EHT_RU_NONE) { 1292 + ru_size += ru_type_80_1; 1293 + ru_index_per80mhz = ru_start_index_80_1; 1294 + ru_index = ru_index_per80mhz; 1295 + ru_index_320mhz |= HAL_RU_PER80(ru_type_80_1, 1, ru_index_per80mhz); 1296 + num_80mhz_with_ru++; 1297 + } 1298 + 1299 + ru_type_80_2 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_2); 1300 + ru_start_index_80_2 = le32_get_bits(rx_usr_info->info3, 1301 + HAL_RX_USR_INFO3_RU_START_IDX_80_2); 1302 + if (ru_type_80_2 != HAL_EHT_RU_NONE) { 1303 + ru_size += ru_type_80_2; 1304 + ru_index_per80mhz = ru_start_index_80_2; 1305 + ru_index = ru_index_per80mhz; 1306 + ru_index_320mhz |= HAL_RU_PER80(ru_type_80_2, 2, ru_index_per80mhz); 1307 + num_80mhz_with_ru++; 1308 + } 1309 + 1310 + ru_type_80_3 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_3); 1311 + ru_start_index_80_3 = le32_get_bits(rx_usr_info->info2, 1312 + HAL_RX_USR_INFO3_RU_START_IDX_80_3); 1313 + if (ru_type_80_3 != HAL_EHT_RU_NONE) { 1314 + ru_size += ru_type_80_3; 1315 + ru_index_per80mhz = ru_start_index_80_3; 1316 + ru_index = ru_index_per80mhz; 1317 + ru_index_320mhz |= HAL_RU_PER80(ru_type_80_3, 3, ru_index_per80mhz); 1318 + num_80mhz_with_ru++; 1319 + } 1320 + 1321 + if (num_80mhz_with_ru > 1) { 1322 + /* Calculate the MRU index */ 1323 + switch (ru_index_320mhz) { 1324 + case HAL_EHT_RU_996_484_0: 1325 + case HAL_EHT_RU_996x2_484_0: 1326 + case HAL_EHT_RU_996x3_484_0: 1327 + ru_index = 0; 1328 + break; 1329 + case HAL_EHT_RU_996_484_1: 1330 + case HAL_EHT_RU_996x2_484_1: 1331 + case HAL_EHT_RU_996x3_484_1: 1332 + ru_index = 1; 1333 + break; 1334 + case HAL_EHT_RU_996_484_2: 1335 + case HAL_EHT_RU_996x2_484_2: 1336 + case HAL_EHT_RU_996x3_484_2: 1337 + ru_index = 2; 1338 + break; 1339 + case HAL_EHT_RU_996_484_3: 1340 + case HAL_EHT_RU_996x2_484_3: 1341 + case HAL_EHT_RU_996x3_484_3: 1342 + ru_index = 3; 1343 + break; 1344 + case HAL_EHT_RU_996_484_4: 1345 + case HAL_EHT_RU_996x2_484_4: 1346 + case HAL_EHT_RU_996x3_484_4: 1347 + ru_index = 4; 1348 + break; 1349 + case HAL_EHT_RU_996_484_5: 1350 + case HAL_EHT_RU_996x2_484_5: 1351 + case HAL_EHT_RU_996x3_484_5: 1352 + ru_index = 5; 1353 + break; 1354 + case HAL_EHT_RU_996_484_6: 1355 + case HAL_EHT_RU_996x2_484_6: 1356 + case HAL_EHT_RU_996x3_484_6: 1357 + ru_index = 6; 1358 + break; 1359 + case HAL_EHT_RU_996_484_7: 1360 + case HAL_EHT_RU_996x2_484_7: 1361 + case HAL_EHT_RU_996x3_484_7: 1362 + ru_index = 7; 1363 + break; 1364 + case HAL_EHT_RU_996x2_484_8: 1365 + ru_index = 8; 1366 + break; 1367 + case HAL_EHT_RU_996x2_484_9: 1368 + ru_index = 9; 1369 + break; 1370 + case HAL_EHT_RU_996x2_484_10: 1371 + ru_index = 10; 1372 + break; 1373 + case HAL_EHT_RU_996x2_484_11: 1374 + ru_index = 11; 1375 + break; 1376 + default: 1377 + ru_index = HAL_EHT_RU_INVALID; 1378 + break; 1379 + } 1380 + 1381 + ru_size += 4; 1382 + } 1383 + 1384 + rtap_ru_size = hal_rx_mon_hal_ru_size_to_ath12k_ru_size(ru_size); 1385 + if (rtap_ru_size != ATH12K_EHT_RU_INVALID) { 1386 + u32 known, data; 1387 + 1388 + known = __le32_to_cpu(eht->known); 1389 + known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_SIZE_OM; 1390 + eht->known = cpu_to_le32(known); 1391 + 1392 + data = __le32_to_cpu(eht->data[1]); 1393 + data |= u32_encode_bits(rtap_ru_size, 1394 + IEEE80211_RADIOTAP_EHT_DATA1_RU_SIZE); 1395 + eht->data[1] = cpu_to_le32(data); 1396 + } 1397 + 1398 + if (ru_index != HAL_EHT_RU_INVALID) { 1399 + u32 known, data; 1400 + 1401 + known = __le32_to_cpu(eht->known); 1402 + known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_INDEX_OM; 1403 + eht->known = cpu_to_le32(known); 1404 + 1405 + data = __le32_to_cpu(eht->data[1]); 1406 + data |= u32_encode_bits(rtap_ru_size, 1407 + IEEE80211_RADIOTAP_EHT_DATA1_RU_INDEX); 1408 + eht->data[1] = cpu_to_le32(data); 1409 + } 1410 + 1411 + if (mon_rx_user_status && ru_index != HAL_EHT_RU_INVALID && 1412 + rtap_ru_size != ATH12K_EHT_RU_INVALID) { 1413 + mon_rx_user_status->ul_ofdma_ru_start_index = ru_index; 1414 + mon_rx_user_status->ul_ofdma_ru_size = rtap_ru_size; 1415 + 1416 + ru_width = hal_rx_ul_ofdma_ru_size_to_width(rtap_ru_size); 1417 + 1418 + mon_rx_user_status->ul_ofdma_ru_width = ru_width; 1419 + mon_rx_user_status->ofdma_info_valid = 1; 1420 + } 559 1421 } 560 1422 561 1423 static enum hal_rx_mon_status 562 - ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, 1424 + ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar, 563 1425 struct ath12k_mon_data *pmon, 564 - u32 tlv_tag, const void *tlv_data, 565 - u32 userid) 1426 + const struct hal_tlv_64_hdr *tlv) 566 1427 { 567 1428 struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; 568 - u32 info[7]; 1429 + const void *tlv_data = tlv->value; 1430 + u32 info[7], userid; 1431 + u16 tlv_tag, tlv_len; 1432 + 1433 + tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG); 1434 + tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN); 1435 + userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID); 1436 + 1437 + if (ppdu_info->tlv_aggr.in_progress && ppdu_info->tlv_aggr.tlv_tag != tlv_tag) { 1438 + ath12k_dp_mon_parse_eht_sig_hdr(ppdu_info, ppdu_info->tlv_aggr.buf); 1439 + 1440 + ppdu_info->tlv_aggr.in_progress = false; 1441 + ppdu_info->tlv_aggr.cur_len = 0; 1442 + } 569 1443 570 1444 switch (tlv_tag) { 571 1445 case HAL_RX_PPDU_START: { ··· 1504 638 ppdu_info->num_mpdu_fcs_err = 1505 639 u32_get_bits(info[0], 1506 640 HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR); 641 + ppdu_info->peer_id = 642 + u32_get_bits(info[0], HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID); 643 + 1507 644 switch (ppdu_info->preamble_type) { 1508 645 case HAL_RX_PREAMBLE_11N: 1509 646 ppdu_info->ht_flags = 1; ··· 1517 648 case HAL_RX_PREAMBLE_11AX: 1518 649 ppdu_info->he_flags = 1; 1519 650 break; 651 + case HAL_RX_PREAMBLE_11BE: 652 + ppdu_info->is_eht = true; 653 + break; 1520 654 default: 1521 655 break; 1522 656 } ··· 1527 655 if (userid < HAL_MAX_UL_MU_USERS) { 1528 656 struct hal_rx_user_status *rxuser_stats = 1529 657 &ppdu_info->userstats[userid]; 658 + 659 + if (ppdu_info->num_mpdu_fcs_ok > 1 || 660 + ppdu_info->num_mpdu_fcs_err > 1) 661 + ppdu_info->userstats[userid].ampdu_present = true; 662 + 1530 663 ppdu_info->num_users += 1; 1531 664 1532 665 ath12k_dp_mon_rx_handle_ofdma_info(eu_stats, rxuser_stats); ··· 1607 730 HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW); 1608 731 break; 1609 732 } 733 + case HAL_PHYRX_OTHER_RECEIVE_INFO: { 734 + const struct hal_phyrx_common_user_info *cmn_usr_info = tlv_data; 735 + 736 + ppdu_info->gi = le32_get_bits(cmn_usr_info->info0, 737 + HAL_RX_PHY_CMN_USER_INFO0_GI); 738 + break; 739 + } 740 + case HAL_RX_PPDU_START_USER_INFO: 741 + ath12k_dp_mon_hal_rx_parse_user_info(tlv_data, userid, ppdu_info); 742 + break; 743 + 1610 744 case HAL_RXPCU_PPDU_END_INFO: { 1611 745 const struct hal_rx_ppdu_end_duration *ppdu_rx_duration = tlv_data; 1612 746 ··· 1631 743 } 1632 744 case HAL_RX_MPDU_START: { 1633 745 const struct hal_rx_mpdu_start *mpdu_start = tlv_data; 1634 - struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu; 1635 746 u16 peer_id; 1636 747 1637 748 info[1] = __le32_to_cpu(mpdu_start->info1); ··· 1643 756 if (userid < HAL_MAX_UL_MU_USERS) { 1644 757 info[0] = __le32_to_cpu(mpdu_start->info0); 1645 758 ppdu_info->userid = userid; 1646 - ppdu_info->ampdu_id[userid] = 1647 - u32_get_bits(info[0], HAL_RX_MPDU_START_INFO1_PEERID); 759 + ppdu_info->userstats[userid].ampdu_id = 760 + u32_get_bits(info[0], HAL_RX_MPDU_START_INFO0_PPDU_ID); 1648 761 } 1649 - 1650 - mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC); 1651 - if (!mon_mpdu) 1652 - return HAL_RX_MON_STATUS_PPDU_NOT_DONE; 1653 762 1654 763 break; 1655 764 } 1656 765 case HAL_RX_MSDU_START: 1657 766 /* TODO: add msdu start parsing logic */ 1658 767 break; 1659 - case HAL_MON_BUF_ADDR: { 1660 - struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring; 1661 - const struct dp_mon_packet_info *packet_info = tlv_data; 1662 - int buf_id = u32_get_bits(packet_info->cookie, 1663 - DP_RXDMA_BUF_COOKIE_BUF_ID); 1664 - struct sk_buff *msdu; 1665 - struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu; 1666 - struct ath12k_skb_rxcb *rxcb; 1667 - 1668 - spin_lock_bh(&buf_ring->idr_lock); 1669 - msdu = idr_remove(&buf_ring->bufs_idr, buf_id); 1670 - spin_unlock_bh(&buf_ring->idr_lock); 1671 - 1672 - if (unlikely(!msdu)) { 1673 - ath12k_warn(ab, "monitor destination with invalid buf_id %d\n", 1674 - buf_id); 1675 - return HAL_RX_MON_STATUS_PPDU_NOT_DONE; 1676 - } 1677 - 1678 - rxcb = ATH12K_SKB_RXCB(msdu); 1679 - dma_unmap_single(ab->dev, rxcb->paddr, 1680 - msdu->len + skb_tailroom(msdu), 1681 - DMA_FROM_DEVICE); 1682 - 1683 - if (mon_mpdu->tail) 1684 - mon_mpdu->tail->next = msdu; 1685 - else 1686 - mon_mpdu->tail = msdu; 1687 - 1688 - ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); 1689 - 1690 - break; 1691 - } 1692 - case HAL_RX_MSDU_END: { 1693 - const struct rx_msdu_end_qcn9274 *msdu_end = tlv_data; 1694 - bool is_first_msdu_in_mpdu; 1695 - u16 msdu_end_info; 1696 - 1697 - msdu_end_info = __le16_to_cpu(msdu_end->info5); 1698 - is_first_msdu_in_mpdu = u32_get_bits(msdu_end_info, 1699 - RX_MSDU_END_INFO5_FIRST_MSDU); 1700 - if (is_first_msdu_in_mpdu) { 1701 - pmon->mon_mpdu->head = pmon->mon_mpdu->tail; 1702 - pmon->mon_mpdu->tail = NULL; 1703 - } 1704 - break; 1705 - } 768 + case HAL_MON_BUF_ADDR: 769 + return HAL_RX_MON_STATUS_BUF_ADDR; 770 + case HAL_RX_MSDU_END: 771 + return HAL_RX_MON_STATUS_MSDU_END; 1706 772 case HAL_RX_MPDU_END: 1707 - list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list); 773 + return HAL_RX_MON_STATUS_MPDU_END; 774 + case HAL_PHYRX_GENERIC_U_SIG: 775 + ath12k_dp_mon_hal_rx_parse_u_sig_hdr(tlv_data, ppdu_info); 776 + break; 777 + case HAL_PHYRX_GENERIC_EHT_SIG: 778 + /* Handle the case where aggregation is in progress 779 + * or the current TLV is one of the TLVs which should be 780 + * aggregated 781 + */ 782 + if (!ppdu_info->tlv_aggr.in_progress) { 783 + ppdu_info->tlv_aggr.in_progress = true; 784 + ppdu_info->tlv_aggr.tlv_tag = tlv_tag; 785 + ppdu_info->tlv_aggr.cur_len = 0; 786 + } 787 + 788 + ppdu_info->is_eht = true; 789 + 790 + ath12k_dp_mon_hal_aggr_tlv(ppdu_info, tlv_len, tlv_data); 1708 791 break; 1709 792 case HAL_DUMMY: 1710 793 return HAL_RX_MON_STATUS_BUF_DONE; ··· 1701 844 } 1702 845 1703 846 static struct sk_buff * 1704 - ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, u32 mac_id, 847 + ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, 1705 848 struct sk_buff *head_msdu, struct sk_buff *tail_msdu, 1706 849 struct ieee80211_rx_status *rxs, bool *fcs_err) 1707 850 { ··· 1862 1005 { 1863 1006 struct ieee80211_supported_band *sband; 1864 1007 u8 *ptr = NULL; 1865 - u16 ampdu_id = ppduinfo->ampdu_id[ppduinfo->userid]; 1866 1008 1867 1009 rxs->flag |= RX_FLAG_MACTIME_START; 1868 1010 rxs->signal = ppduinfo->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR; 1869 1011 rxs->nss = ppduinfo->nss + 1; 1870 1012 1871 - if (ampdu_id) { 1013 + if (ppduinfo->userstats[ppduinfo->userid].ampdu_present) { 1872 1014 rxs->flag |= RX_FLAG_AMPDU_DETAILS; 1873 - rxs->ampdu_reference = ampdu_id; 1015 + rxs->ampdu_reference = ppduinfo->userstats[ppduinfo->userid].ampdu_id; 1874 1016 } 1875 1017 1876 - if (ppduinfo->he_mu_flags) { 1018 + if (ppduinfo->is_eht || ppduinfo->eht_usig) { 1019 + struct ieee80211_radiotap_tlv *tlv; 1020 + struct ieee80211_radiotap_eht *eht; 1021 + struct ieee80211_radiotap_eht_usig *usig; 1022 + u16 len = 0, i, eht_len, usig_len; 1023 + u8 user; 1024 + 1025 + if (ppduinfo->is_eht) { 1026 + eht_len = struct_size(eht, 1027 + user_info, 1028 + ppduinfo->eht_info.num_user_info); 1029 + len += sizeof(*tlv) + eht_len; 1030 + } 1031 + 1032 + if (ppduinfo->eht_usig) { 1033 + usig_len = sizeof(*usig); 1034 + len += sizeof(*tlv) + usig_len; 1035 + } 1036 + 1037 + rxs->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; 1038 + rxs->encoding = RX_ENC_EHT; 1039 + 1040 + skb_reset_mac_header(mon_skb); 1041 + 1042 + tlv = skb_push(mon_skb, len); 1043 + 1044 + if (ppduinfo->is_eht) { 1045 + tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT); 1046 + tlv->len = cpu_to_le16(eht_len); 1047 + 1048 + eht = (struct ieee80211_radiotap_eht *)tlv->data; 1049 + eht->known = ppduinfo->eht_info.eht.known; 1050 + 1051 + for (i = 0; 1052 + i < ARRAY_SIZE(eht->data) && 1053 + i < ARRAY_SIZE(ppduinfo->eht_info.eht.data); 1054 + i++) 1055 + eht->data[i] = ppduinfo->eht_info.eht.data[i]; 1056 + 1057 + for (user = 0; user < ppduinfo->eht_info.num_user_info; user++) 1058 + put_unaligned_le32(ppduinfo->eht_info.user_info[user], 1059 + &eht->user_info[user]); 1060 + 1061 + tlv = (struct ieee80211_radiotap_tlv *)&tlv->data[eht_len]; 1062 + } 1063 + 1064 + if (ppduinfo->eht_usig) { 1065 + tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT_USIG); 1066 + tlv->len = cpu_to_le16(usig_len); 1067 + 1068 + usig = (struct ieee80211_radiotap_eht_usig *)tlv->data; 1069 + *usig = ppduinfo->u_sig_info.usig; 1070 + } 1071 + } else if (ppduinfo->he_mu_flags) { 1877 1072 rxs->flag |= RX_FLAG_RADIOTAP_HE_MU; 1878 1073 rxs->encoding = RX_ENC_HE; 1879 1074 ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu)); ··· 2034 1125 ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi); 2035 1126 } 2036 1127 2037 - static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id, 1128 + static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, 2038 1129 struct sk_buff *head_msdu, struct sk_buff *tail_msdu, 2039 1130 struct hal_rx_mon_ppdu_info *ppduinfo, 2040 1131 struct napi_struct *napi) ··· 2044 1135 struct ieee80211_rx_status *rxs = &dp->rx_status; 2045 1136 bool fcs_err = false; 2046 1137 2047 - mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id, 1138 + mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, 2048 1139 head_msdu, tail_msdu, 2049 1140 rxs, &fcs_err); 2050 1141 if (!mon_skb) ··· 2089 1180 } 2090 1181 2091 1182 static enum hal_rx_mon_status 2092 - ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon, 1183 + ath12k_dp_mon_parse_rx_dest(struct ath12k *ar, struct ath12k_mon_data *pmon, 2093 1184 struct sk_buff *skb) 2094 1185 { 2095 - struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; 2096 1186 struct hal_tlv_64_hdr *tlv; 1187 + struct ath12k_skb_rxcb *rxcb; 2097 1188 enum hal_rx_mon_status hal_status; 2098 - u32 tlv_userid; 2099 1189 u16 tlv_tag, tlv_len; 2100 1190 u8 *ptr = skb->data; 2101 - 2102 - memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info)); 2103 1191 2104 1192 do { 2105 1193 tlv = (struct hal_tlv_64_hdr *)ptr; 2106 1194 tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG); 2107 - tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN); 2108 - tlv_userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID); 2109 - ptr += sizeof(*tlv); 2110 1195 2111 1196 /* The actual length of PPDU_END is the combined length of many PHY 2112 1197 * TLVs that follow. Skip the TLV header and ··· 2110 1207 2111 1208 if (tlv_tag == HAL_RX_PPDU_END) 2112 1209 tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview); 1210 + else 1211 + tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN); 2113 1212 2114 - hal_status = ath12k_dp_mon_rx_parse_status_tlv(ab, pmon, 2115 - tlv_tag, ptr, tlv_userid); 2116 - ptr += tlv_len; 1213 + hal_status = ath12k_dp_mon_rx_parse_status_tlv(ar, pmon, tlv); 1214 + ptr += sizeof(*tlv) + tlv_len; 2117 1215 ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN); 2118 1216 2119 1217 if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE) 2120 1218 break; 2121 1219 2122 - } while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE); 1220 + } while ((hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE) || 1221 + (hal_status == HAL_RX_MON_STATUS_BUF_ADDR) || 1222 + (hal_status == HAL_RX_MON_STATUS_MPDU_END) || 1223 + (hal_status == HAL_RX_MON_STATUS_MSDU_END)); 1224 + 1225 + rxcb = ATH12K_SKB_RXCB(skb); 1226 + if (rxcb->is_end_of_ppdu) 1227 + hal_status = HAL_RX_MON_STATUS_PPDU_DONE; 2123 1228 2124 1229 return hal_status; 2125 1230 } ··· 2135 1224 enum hal_rx_mon_status 2136 1225 ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar, 2137 1226 struct ath12k_mon_data *pmon, 2138 - int mac_id, 2139 1227 struct sk_buff *skb, 2140 1228 struct napi_struct *napi) 2141 1229 { 2142 - struct ath12k_base *ab = ar->ab; 2143 1230 struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; 2144 1231 struct dp_mon_mpdu *tmp; 2145 1232 struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu; 2146 1233 struct sk_buff *head_msdu, *tail_msdu; 2147 1234 enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE; 2148 1235 2149 - ath12k_dp_mon_parse_rx_dest(ab, pmon, skb); 1236 + ath12k_dp_mon_parse_rx_dest(ar, pmon, skb); 2150 1237 2151 1238 list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) { 2152 1239 list_del(&mon_mpdu->list); ··· 2152 1243 tail_msdu = mon_mpdu->tail; 2153 1244 2154 1245 if (head_msdu && tail_msdu) { 2155 - ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, 1246 + ath12k_dp_mon_rx_deliver(ar, head_msdu, 2156 1247 tail_msdu, ppdu_info, napi); 2157 1248 } 2158 1249 ··· 2833 1924 } 2834 1925 2835 1926 static void 2836 - ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id, 1927 + ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, 2837 1928 struct napi_struct *napi, 2838 1929 struct dp_mon_tx_ppdu_info *tx_ppdu_info) 2839 1930 { ··· 2847 1938 tail_msdu = mon_mpdu->tail; 2848 1939 2849 1940 if (head_msdu) 2850 - ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, tail_msdu, 1941 + ath12k_dp_mon_rx_deliver(ar, head_msdu, tail_msdu, 2851 1942 &tx_ppdu_info->rx_status, napi); 2852 1943 2853 1944 kfree(mon_mpdu); ··· 2857 1948 enum hal_rx_mon_status 2858 1949 ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar, 2859 1950 struct ath12k_mon_data *pmon, 2860 - int mac_id, 2861 1951 struct sk_buff *skb, 2862 1952 struct napi_struct *napi, 2863 1953 u32 ppdu_id) ··· 2903 1995 break; 2904 1996 } while (tlv_status != DP_MON_TX_FES_STATUS_END); 2905 1997 2906 - ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_data_ppdu_info); 2907 - ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_prot_ppdu_info); 1998 + ath12k_dp_mon_tx_process_ppdu_info(ar, napi, tx_data_ppdu_info); 1999 + ath12k_dp_mon_tx_process_ppdu_info(ar, napi, tx_prot_ppdu_info); 2908 2000 2909 2001 return tlv_status; 2910 - } 2911 - 2912 - int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget, 2913 - enum dp_monitor_mode monitor_mode, 2914 - struct napi_struct *napi) 2915 - { 2916 - struct hal_mon_dest_desc *mon_dst_desc; 2917 - struct ath12k_pdev_dp *pdev_dp = &ar->dp; 2918 - struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data; 2919 - struct ath12k_base *ab = ar->ab; 2920 - struct ath12k_dp *dp = &ab->dp; 2921 - struct sk_buff *skb; 2922 - struct ath12k_skb_rxcb *rxcb; 2923 - struct dp_srng *mon_dst_ring; 2924 - struct hal_srng *srng; 2925 - struct dp_rxdma_mon_ring *buf_ring; 2926 - u64 cookie; 2927 - u32 ppdu_id; 2928 - int num_buffs_reaped = 0, srng_id, buf_id; 2929 - u8 dest_idx = 0, i; 2930 - bool end_of_ppdu; 2931 - struct hal_rx_mon_ppdu_info *ppdu_info; 2932 - struct ath12k_peer *peer = NULL; 2933 - 2934 - ppdu_info = &pmon->mon_ppdu_info; 2935 - memset(ppdu_info, 0, sizeof(*ppdu_info)); 2936 - ppdu_info->peer_id = HAL_INVALID_PEERID; 2937 - 2938 - srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id); 2939 - 2940 - if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) { 2941 - mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id]; 2942 - buf_ring = &dp->rxdma_mon_buf_ring; 2943 - } else { 2944 - return 0; 2945 - } 2946 - 2947 - srng = &ab->hal.srng_list[mon_dst_ring->ring_id]; 2948 - 2949 - spin_lock_bh(&srng->lock); 2950 - ath12k_hal_srng_access_begin(ab, srng); 2951 - 2952 - while (likely(*budget)) { 2953 - *budget -= 1; 2954 - mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng); 2955 - if (unlikely(!mon_dst_desc)) 2956 - break; 2957 - 2958 - cookie = le32_to_cpu(mon_dst_desc->cookie); 2959 - buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); 2960 - 2961 - spin_lock_bh(&buf_ring->idr_lock); 2962 - skb = idr_remove(&buf_ring->bufs_idr, buf_id); 2963 - spin_unlock_bh(&buf_ring->idr_lock); 2964 - 2965 - if (unlikely(!skb)) { 2966 - ath12k_warn(ab, "monitor destination with invalid buf_id %d\n", 2967 - buf_id); 2968 - goto move_next; 2969 - } 2970 - 2971 - rxcb = ATH12K_SKB_RXCB(skb); 2972 - dma_unmap_single(ab->dev, rxcb->paddr, 2973 - skb->len + skb_tailroom(skb), 2974 - DMA_FROM_DEVICE); 2975 - 2976 - pmon->dest_skb_q[dest_idx] = skb; 2977 - dest_idx++; 2978 - ppdu_id = le32_to_cpu(mon_dst_desc->ppdu_id); 2979 - end_of_ppdu = le32_get_bits(mon_dst_desc->info0, 2980 - HAL_MON_DEST_INFO0_END_OF_PPDU); 2981 - if (!end_of_ppdu) 2982 - continue; 2983 - 2984 - for (i = 0; i < dest_idx; i++) { 2985 - skb = pmon->dest_skb_q[i]; 2986 - 2987 - if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) 2988 - ath12k_dp_mon_rx_parse_mon_status(ar, pmon, mac_id, 2989 - skb, napi); 2990 - else 2991 - ath12k_dp_mon_tx_parse_mon_status(ar, pmon, mac_id, 2992 - skb, napi, ppdu_id); 2993 - 2994 - peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id); 2995 - 2996 - if (!peer || !peer->sta) { 2997 - ath12k_dbg(ab, ATH12K_DBG_DATA, 2998 - "failed to find the peer with peer_id %d\n", 2999 - ppdu_info->peer_id); 3000 - dev_kfree_skb_any(skb); 3001 - continue; 3002 - } 3003 - 3004 - dev_kfree_skb_any(skb); 3005 - pmon->dest_skb_q[i] = NULL; 3006 - } 3007 - 3008 - dest_idx = 0; 3009 - move_next: 3010 - ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); 3011 - ath12k_hal_srng_src_get_next_entry(ab, srng); 3012 - num_buffs_reaped++; 3013 - } 3014 - 3015 - ath12k_hal_srng_access_end(ab, srng); 3016 - spin_unlock_bh(&srng->lock); 3017 - 3018 - return num_buffs_reaped; 3019 2002 } 3020 2003 3021 2004 static void ··· 2915 2116 struct hal_rx_user_status *user_stats, 2916 2117 u32 num_msdu) 2917 2118 { 2918 - u32 rate_idx = 0; 2119 + struct ath12k_rx_peer_rate_stats *stats; 2919 2120 u32 mcs_idx = (user_stats) ? user_stats->mcs : ppdu_info->mcs; 2920 2121 u32 nss_idx = (user_stats) ? user_stats->nss - 1 : ppdu_info->nss - 1; 2921 2122 u32 bw_idx = ppdu_info->bw; 2922 2123 u32 gi_idx = ppdu_info->gi; 2124 + u32 len; 2923 2125 2924 - if ((mcs_idx > HAL_RX_MAX_MCS_HE) || (nss_idx >= HAL_RX_MAX_NSS) || 2925 - (bw_idx >= HAL_RX_BW_MAX) || (gi_idx >= HAL_RX_GI_MAX)) { 2126 + if (mcs_idx > HAL_RX_MAX_MCS_HT || nss_idx >= HAL_RX_MAX_NSS || 2127 + bw_idx >= HAL_RX_BW_MAX || gi_idx >= HAL_RX_GI_MAX) { 2926 2128 return; 2927 2129 } 2928 2130 2929 - if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11N || 2930 - ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AC) { 2931 - rate_idx = mcs_idx * 8 + 8 * 10 * nss_idx; 2932 - rate_idx += bw_idx * 2 + gi_idx; 2933 - } else if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX) { 2131 + if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX || 2132 + ppdu_info->preamble_type == HAL_RX_PREAMBLE_11BE) 2934 2133 gi_idx = ath12k_he_gi_to_nl80211_he_gi(ppdu_info->gi); 2935 - rate_idx = mcs_idx * 12 + 12 * 12 * nss_idx; 2936 - rate_idx += bw_idx * 3 + gi_idx; 2937 - } else { 2938 - return; 2939 - } 2940 2134 2941 - rx_stats->pkt_stats.rx_rate[rate_idx] += num_msdu; 2135 + rx_stats->pkt_stats.rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += num_msdu; 2136 + stats = &rx_stats->byte_stats; 2137 + 2942 2138 if (user_stats) 2943 - rx_stats->byte_stats.rx_rate[rate_idx] += user_stats->mpdu_ok_byte_count; 2139 + len = user_stats->mpdu_ok_byte_count; 2944 2140 else 2945 - rx_stats->byte_stats.rx_rate[rate_idx] += ppdu_info->mpdu_len; 2141 + len = ppdu_info->mpdu_len; 2142 + 2143 + stats->rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += len; 2946 2144 } 2947 2145 2948 2146 static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar, ··· 2953 2157 return; 2954 2158 2955 2159 arsta->rssi_comb = ppdu_info->rssi_comb; 2160 + ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb); 2956 2161 2957 2162 num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + 2958 2163 ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; ··· 3024 2227 ppdu_info->mcs <= HAL_RX_MAX_MCS_HE) { 3025 2228 rx_stats->pkt_stats.he_mcs_count[ppdu_info->mcs] += num_msdu; 3026 2229 rx_stats->byte_stats.he_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len; 2230 + } 2231 + 2232 + if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11BE && 2233 + ppdu_info->mcs <= HAL_RX_MAX_MCS_BE) { 2234 + rx_stats->pkt_stats.be_mcs_count[ppdu_info->mcs] += num_msdu; 2235 + rx_stats->byte_stats.be_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len; 3027 2236 } 3028 2237 3029 2238 if ((ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || ··· 3132 2329 return; 3133 2330 3134 2331 arsta->rssi_comb = ppdu_info->rssi_comb; 2332 + ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb); 3135 2333 3136 2334 num_msdu = user_stats->tcp_msdu_count + user_stats->tcp_ack_msdu_count + 3137 2335 user_stats->udp_msdu_count + user_stats->other_msdu_count; ··· 3219 2415 ath12k_dp_mon_rx_update_user_stats(ar, ppdu_info, i); 3220 2416 } 3221 2417 3222 - int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, 3223 - struct napi_struct *napi, int *budget) 2418 + static void 2419 + ath12k_dp_mon_rx_memset_ppdu_info(struct hal_rx_mon_ppdu_info *ppdu_info) 2420 + { 2421 + memset(ppdu_info, 0, sizeof(*ppdu_info)); 2422 + ppdu_info->peer_id = HAL_INVALID_PEERID; 2423 + } 2424 + 2425 + int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget, 2426 + struct napi_struct *napi) 3224 2427 { 3225 2428 struct ath12k_base *ab = ar->ab; 3226 2429 struct ath12k_pdev_dp *pdev_dp = &ar->dp; ··· 3243 2432 struct ath12k_sta *ahsta = NULL; 3244 2433 struct ath12k_link_sta *arsta; 3245 2434 struct ath12k_peer *peer; 2435 + struct sk_buff_head skb_list; 3246 2436 u64 cookie; 3247 2437 int num_buffs_reaped = 0, srng_id, buf_id; 3248 - u8 dest_idx = 0, i; 3249 - bool end_of_ppdu; 3250 - u32 hal_status; 2438 + u32 hal_status, end_offset, info0, end_reason; 2439 + u8 pdev_idx = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, ar->pdev_idx); 3251 2440 3252 - srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id); 2441 + __skb_queue_head_init(&skb_list); 2442 + srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, pdev_idx); 3253 2443 mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id]; 3254 2444 buf_ring = &dp->rxdma_mon_buf_ring; 3255 2445 ··· 3263 2451 mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng); 3264 2452 if (unlikely(!mon_dst_desc)) 3265 2453 break; 2454 + 2455 + /* In case of empty descriptor, the cookie in the ring descriptor 2456 + * is invalid. Therefore, this entry is skipped, and ring processing 2457 + * continues. 2458 + */ 2459 + info0 = le32_to_cpu(mon_dst_desc->info0); 2460 + if (u32_get_bits(info0, HAL_MON_DEST_INFO0_EMPTY_DESC)) 2461 + goto move_next; 2462 + 3266 2463 cookie = le32_to_cpu(mon_dst_desc->cookie); 3267 2464 buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); 3268 2465 ··· 3289 2468 dma_unmap_single(ab->dev, rxcb->paddr, 3290 2469 skb->len + skb_tailroom(skb), 3291 2470 DMA_FROM_DEVICE); 3292 - pmon->dest_skb_q[dest_idx] = skb; 3293 - dest_idx++; 3294 - end_of_ppdu = le32_get_bits(mon_dst_desc->info0, 3295 - HAL_MON_DEST_INFO0_END_OF_PPDU); 3296 - if (!end_of_ppdu) 3297 - continue; 3298 2471 3299 - for (i = 0; i < dest_idx; i++) { 3300 - skb = pmon->dest_skb_q[i]; 3301 - hal_status = ath12k_dp_mon_parse_rx_dest(ab, pmon, skb); 2472 + end_reason = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_REASON); 3302 2473 3303 - if (ppdu_info->peer_id == HAL_INVALID_PEERID || 3304 - hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { 3305 - dev_kfree_skb_any(skb); 3306 - continue; 3307 - } 3308 - 3309 - rcu_read_lock(); 3310 - spin_lock_bh(&ab->base_lock); 3311 - peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id); 3312 - if (!peer || !peer->sta) { 3313 - ath12k_dbg(ab, ATH12K_DBG_DATA, 3314 - "failed to find the peer with peer_id %d\n", 3315 - ppdu_info->peer_id); 3316 - spin_unlock_bh(&ab->base_lock); 3317 - rcu_read_unlock(); 3318 - dev_kfree_skb_any(skb); 3319 - continue; 3320 - } 3321 - 3322 - if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) { 3323 - ahsta = ath12k_sta_to_ahsta(peer->sta); 3324 - arsta = &ahsta->deflink; 3325 - ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta, 3326 - ppdu_info); 3327 - } else if ((ppdu_info->fc_valid) && 3328 - (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) { 3329 - ath12k_dp_mon_rx_process_ulofdma(ppdu_info); 3330 - ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info); 3331 - } 3332 - 3333 - spin_unlock_bh(&ab->base_lock); 3334 - rcu_read_unlock(); 2474 + /* HAL_MON_FLUSH_DETECTED implies that an rx flush received at the end of 2475 + * rx PPDU and HAL_MON_PPDU_TRUNCATED implies that the PPDU got 2476 + * truncated due to a system level error. In both the cases, buffer data 2477 + * can be discarded 2478 + */ 2479 + if ((end_reason == HAL_MON_FLUSH_DETECTED) || 2480 + (end_reason == HAL_MON_PPDU_TRUNCATED)) { 2481 + ath12k_dbg(ab, ATH12K_DBG_DATA, 2482 + "Monitor dest descriptor end reason %d", end_reason); 3335 2483 dev_kfree_skb_any(skb); 3336 - memset(ppdu_info, 0, sizeof(*ppdu_info)); 3337 - ppdu_info->peer_id = HAL_INVALID_PEERID; 2484 + goto move_next; 3338 2485 } 3339 2486 3340 - dest_idx = 0; 2487 + /* Calculate the budget when the ring descriptor with the 2488 + * HAL_MON_END_OF_PPDU to ensure that one PPDU worth of data is always 2489 + * reaped. This helps to efficiently utilize the NAPI budget. 2490 + */ 2491 + if (end_reason == HAL_MON_END_OF_PPDU) { 2492 + *budget -= 1; 2493 + rxcb->is_end_of_ppdu = true; 2494 + } 2495 + 2496 + end_offset = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_OFFSET); 2497 + if (likely(end_offset <= DP_RX_BUFFER_SIZE)) { 2498 + skb_put(skb, end_offset); 2499 + } else { 2500 + ath12k_warn(ab, 2501 + "invalid offset on mon stats destination %u\n", 2502 + end_offset); 2503 + skb_put(skb, DP_RX_BUFFER_SIZE); 2504 + } 2505 + 2506 + __skb_queue_tail(&skb_list, skb); 2507 + 3341 2508 move_next: 3342 2509 ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); 3343 - ath12k_hal_srng_src_get_next_entry(ab, srng); 2510 + ath12k_hal_srng_dst_get_next_entry(ab, srng); 3344 2511 num_buffs_reaped++; 3345 2512 } 3346 2513 3347 2514 ath12k_hal_srng_access_end(ab, srng); 3348 2515 spin_unlock_bh(&srng->lock); 2516 + 2517 + if (!num_buffs_reaped) 2518 + return 0; 2519 + 2520 + /* In some cases, one PPDU worth of data can be spread across multiple NAPI 2521 + * schedules, To avoid losing existing parsed ppdu_info information, skip 2522 + * the memset of the ppdu_info structure and continue processing it. 2523 + */ 2524 + if (!ppdu_info->ppdu_continuation) 2525 + ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info); 2526 + 2527 + while ((skb = __skb_dequeue(&skb_list))) { 2528 + hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb); 2529 + if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { 2530 + ppdu_info->ppdu_continuation = true; 2531 + dev_kfree_skb_any(skb); 2532 + continue; 2533 + } 2534 + 2535 + if (ppdu_info->peer_id == HAL_INVALID_PEERID) 2536 + goto free_skb; 2537 + 2538 + rcu_read_lock(); 2539 + spin_lock_bh(&ab->base_lock); 2540 + peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id); 2541 + if (!peer || !peer->sta) { 2542 + ath12k_dbg(ab, ATH12K_DBG_DATA, 2543 + "failed to find the peer with monitor peer_id %d\n", 2544 + ppdu_info->peer_id); 2545 + goto next_skb; 2546 + } 2547 + 2548 + if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) { 2549 + ahsta = ath12k_sta_to_ahsta(peer->sta); 2550 + arsta = &ahsta->deflink; 2551 + ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta, 2552 + ppdu_info); 2553 + } else if ((ppdu_info->fc_valid) && 2554 + (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) { 2555 + ath12k_dp_mon_rx_process_ulofdma(ppdu_info); 2556 + ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info); 2557 + } 2558 + 2559 + next_skb: 2560 + spin_unlock_bh(&ab->base_lock); 2561 + rcu_read_unlock(); 2562 + free_skb: 2563 + dev_kfree_skb_any(skb); 2564 + ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info); 2565 + } 2566 + 3349 2567 return num_buffs_reaped; 3350 2568 } 3351 2569 ··· 3395 2535 struct ath12k *ar = ath12k_ab_to_ar(ab, mac_id); 3396 2536 int num_buffs_reaped = 0; 3397 2537 3398 - if (!ar->monitor_started) 3399 - ath12k_dp_mon_rx_process_stats(ar, mac_id, napi, &budget); 3400 - else 3401 - num_buffs_reaped = ath12k_dp_mon_srng_process(ar, mac_id, &budget, 3402 - monitor_mode, napi); 2538 + if (ab->hw_params->rxdma1_enable) { 2539 + if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) 2540 + num_buffs_reaped = ath12k_dp_mon_srng_process(ar, &budget, napi); 2541 + } 3403 2542 3404 2543 return num_buffs_reaped; 3405 2544 }
+3 -8
drivers/net/wireless/ath/ath12k/dp_mon.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH12K_DP_MON_H ··· 77 77 enum hal_rx_mon_status 78 78 ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar, 79 79 struct ath12k_mon_data *pmon, 80 - int mac_id, struct sk_buff *skb, 80 + struct sk_buff *skb, 81 81 struct napi_struct *napi); 82 82 int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab, 83 83 struct dp_rxdma_mon_ring *buf_ring, 84 84 int req_entries); 85 - int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, 86 - int *budget, enum dp_monitor_mode monitor_mode, 87 - struct napi_struct *napi); 88 85 int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id, 89 86 struct napi_struct *napi, int budget, 90 87 enum dp_monitor_mode monitor_mode); ··· 93 96 enum hal_rx_mon_status 94 97 ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar, 95 98 struct ath12k_mon_data *pmon, 96 - int mac_id, 97 99 struct sk_buff *skb, 98 100 struct napi_struct *napi, 99 101 u32 ppdu_id); 100 102 void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info); 101 - int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, 102 - struct napi_struct *napi, int *budget); 103 + int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget, struct napi_struct *napi); 103 104 #endif
+61 -5
drivers/net/wireless/ath/ath12k/dp_rx.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/ieee80211.h> ··· 2392 2392 rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); 2393 2393 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2394 2394 break; 2395 + case RX_MSDU_START_PKT_TYPE_11BE: 2396 + rx_status->rate_idx = rate_mcs; 2397 + 2398 + if (rate_mcs > ATH12K_EHT_MCS_MAX) { 2399 + ath12k_warn(ar->ab, 2400 + "Received with invalid mcs in EHT mode %d\n", 2401 + rate_mcs); 2402 + break; 2403 + } 2404 + 2405 + rx_status->encoding = RX_ENC_EHT; 2406 + rx_status->nss = nss; 2407 + rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi); 2408 + rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); 2409 + break; 2410 + default: 2411 + break; 2395 2412 } 2396 2413 } 2397 2414 ··· 2503 2486 spin_unlock_bh(&ab->base_lock); 2504 2487 2505 2488 ath12k_dbg(ab, ATH12K_DBG_DATA, 2506 - "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2489 + "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 2507 2490 msdu, 2508 2491 msdu->len, 2509 2492 peer ? peer->addr : NULL, ··· 2514 2497 (status->encoding == RX_ENC_HT) ? "ht" : "", 2515 2498 (status->encoding == RX_ENC_VHT) ? "vht" : "", 2516 2499 (status->encoding == RX_ENC_HE) ? "he" : "", 2500 + (status->encoding == RX_ENC_EHT) ? "eht" : "", 2517 2501 (status->bw == RATE_INFO_BW_40) ? "40" : "", 2518 2502 (status->bw == RATE_INFO_BW_80) ? "80" : "", 2519 2503 (status->bw == RATE_INFO_BW_160) ? "160" : "", ··· 2546 2528 rx_status->flag |= RX_FLAG_8023; 2547 2529 2548 2530 ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi); 2531 + } 2532 + 2533 + static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab, 2534 + struct hal_rx_desc *rx_desc, 2535 + struct sk_buff *msdu) 2536 + { 2537 + struct ieee80211_hdr *hdr; 2538 + u8 decap_type; 2539 + u32 hdr_len; 2540 + 2541 + decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc); 2542 + if (decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI) 2543 + return true; 2544 + 2545 + hdr = (struct ieee80211_hdr *)msdu->data; 2546 + hdr_len = ieee80211_hdrlen(hdr->frame_control); 2547 + 2548 + if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN))) 2549 + return true; 2550 + 2551 + ab->soc_stats.invalid_rbm++; 2552 + WARN_ON_ONCE(1); 2553 + return false; 2549 2554 } 2550 2555 2551 2556 static int ath12k_dp_rx_process_msdu(struct ath12k *ar, ··· 2627 2586 "failed to coalesce msdu rx buffer%d\n", ret); 2628 2587 goto free_out; 2629 2588 } 2589 + } 2590 + 2591 + if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) { 2592 + ret = -EINVAL; 2593 + goto free_out; 2630 2594 } 2631 2595 2632 2596 ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status); ··· 3023 2977 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED | 3024 2978 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; 3025 2979 skb_pull(msdu, hal_rx_desc_sz); 2980 + 2981 + if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) 2982 + return -EINVAL; 3026 2983 3027 2984 ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs); 3028 2985 ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, ··· 3769 3720 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3770 3721 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3771 3722 } 3723 + if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu))) 3724 + return -EINVAL; 3725 + 3772 3726 ath12k_dp_rx_h_ppdu(ar, desc, status); 3773 3727 3774 3728 ath12k_dp_rx_h_mpdu(ar, msdu, desc, status); ··· 3816 3764 return drop; 3817 3765 } 3818 3766 3819 - static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, 3767 + static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, 3820 3768 struct ieee80211_rx_status *status) 3821 3769 { 3822 3770 struct ath12k_base *ab = ar->ab; ··· 3834 3782 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); 3835 3783 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); 3836 3784 3785 + if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu))) 3786 + return true; 3787 + 3837 3788 ath12k_dp_rx_h_ppdu(ar, desc, status); 3838 3789 3839 3790 status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | ··· 3844 3789 3845 3790 ath12k_dp_rx_h_undecap(ar, msdu, desc, 3846 3791 HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); 3792 + return false; 3847 3793 } 3848 3794 3849 3795 static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu, ··· 3863 3807 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: 3864 3808 err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); 3865 3809 if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) { 3866 - ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status); 3810 + drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status); 3867 3811 break; 3868 3812 } 3869 3813 fallthrough; ··· 4088 4032 hw_links[hw_link_id].pdev_idx); 4089 4033 ar = partner_ab->pdevs[pdev_id].ar; 4090 4034 4091 - if (!ar || !rcu_dereference(ar->ab->pdevs_active[hw_link_id])) { 4035 + if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_id])) { 4092 4036 dev_kfree_skb_any(msdu); 4093 4037 continue; 4094 4038 }
+4 -4
drivers/net/wireless/ath/ath12k/dp_rx.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 #ifndef ATH12K_DP_RX_H 7 7 #define ATH12K_DP_RX_H ··· 79 79 case RX_MSDU_START_SGI_3_2_US: 80 80 ret = NL80211_RATE_INFO_HE_GI_3_2; 81 81 break; 82 + default: 83 + ret = NL80211_RATE_INFO_HE_GI_0_8; 84 + break; 82 85 } 83 86 84 87 return ret; ··· 138 135 struct hal_rx_desc *desc); 139 136 void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc, 140 137 struct ieee80211_rx_status *rx_status); 141 - struct ath12k_peer * 142 - ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu); 143 - 144 138 int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab); 145 139 int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab); 146 140
+228 -25
drivers/net/wireless/ath/ath12k/dp_tx.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include "core.h" 8 8 #include "dp_tx.h" 9 9 #include "debug.h" 10 10 #include "hw.h" 11 + #include "peer.h" 12 + #include "mac.h" 11 13 12 14 static enum hal_tcl_encap_type 13 15 ath12k_dp_tx_get_encap_type(struct ath12k_link_vif *arvif, struct sk_buff *skb) ··· 119 117 le32_encode_bits(ti->data_len, 120 118 HAL_TX_MSDU_EXT_INFO1_BUF_LEN); 121 119 122 - tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) | 120 + tcl_ext_cmd->info1 |= le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) | 123 121 le32_encode_bits(ti->encap_type, 124 122 HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) | 125 123 le32_encode_bits(ti->encrypt_type, ··· 219 217 } 220 218 221 219 int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif, 222 - struct sk_buff *skb) 220 + struct sk_buff *skb, bool gsn_valid, int mcbc_gsn) 223 221 { 224 222 struct ath12k_base *ab = ar->ab; 225 223 struct ath12k_dp *dp = &ab->dp; ··· 292 290 msdu_ext_desc = true; 293 291 } 294 292 293 + if (gsn_valid) { 294 + /* Reset and Initialize meta_data_flags with Global Sequence 295 + * Number (GSN) info. 296 + */ 297 + ti.meta_data_flags = 298 + u32_encode_bits(HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM, 299 + HTT_TCL_META_DATA_TYPE) | 300 + u32_encode_bits(mcbc_gsn, HTT_TCL_META_DATA_GLOBAL_SEQ_NUM); 301 + } 302 + 295 303 ti.encap_type = ath12k_dp_tx_get_encap_type(arvif, skb); 296 304 ti.addr_search_flags = arvif->hal_addr_search_flags; 297 305 ti.search_type = arvif->search_type; 298 306 ti.type = HAL_TCL_DESC_TYPE_BUFFER; 299 307 ti.pkt_offset = 0; 300 308 ti.lmac_id = ar->lmac_id; 309 + 301 310 ti.vdev_id = arvif->vdev_id; 311 + if (gsn_valid) 312 + ti.vdev_id += HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID; 313 + 302 314 ti.bss_ast_hash = arvif->ast_hash; 303 315 ti.bss_ast_idx = arvif->ast_idx; 304 316 ti.dscp_tid_tbl_idx = 0; ··· 384 368 add_htt_metadata = true; 385 369 msdu_ext_desc = true; 386 370 ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW); 371 + ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT; 387 372 ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW; 388 373 ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN; 389 374 } ··· 415 398 if (ret < 0) { 416 399 ath12k_dbg(ab, ATH12K_DBG_DP_TX, 417 400 "Failed to add HTT meta data, dropping packet\n"); 401 + kfree_skb(skb_ext_desc); 418 402 goto fail_unmap_dma; 419 403 } 420 404 } ··· 576 558 577 559 switch (wbm_status) { 578 560 case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK: 579 - case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP: 580 - case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL: 581 561 ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK); 582 562 ts.ack_rssi = le32_get_bits(status_desc->info2, 583 563 HTT_TX_WBM_COMP_INFO2_ACK_RSSI); 584 564 ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts); 585 565 break; 566 + case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP: 567 + case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL: 586 568 case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ: 587 569 case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT: 588 570 ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring); ··· 596 578 ath12k_warn(ab, "Unknown htt tx status %d\n", wbm_status); 597 579 break; 598 580 } 581 + } 582 + 583 + static void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status *ts) 584 + { 585 + struct ath12k_base *ab = ar->ab; 586 + struct ath12k_peer *peer; 587 + struct ieee80211_sta *sta; 588 + struct ath12k_sta *ahsta; 589 + struct ath12k_link_sta *arsta; 590 + struct rate_info txrate = {0}; 591 + u16 rate, ru_tones; 592 + u8 rate_idx = 0; 593 + int ret; 594 + 595 + spin_lock_bh(&ab->base_lock); 596 + peer = ath12k_peer_find_by_id(ab, ts->peer_id); 597 + if (!peer || !peer->sta) { 598 + ath12k_dbg(ab, ATH12K_DBG_DP_TX, 599 + "failed to find the peer by id %u\n", ts->peer_id); 600 + spin_unlock_bh(&ab->base_lock); 601 + return; 602 + } 603 + sta = peer->sta; 604 + ahsta = ath12k_sta_to_ahsta(sta); 605 + arsta = &ahsta->deflink; 606 + 607 + /* This is to prefer choose the real NSS value arsta->last_txrate.nss, 608 + * if it is invalid, then choose the NSS value while assoc. 609 + */ 610 + if (arsta->last_txrate.nss) 611 + txrate.nss = arsta->last_txrate.nss; 612 + else 613 + txrate.nss = arsta->peer_nss; 614 + spin_unlock_bh(&ab->base_lock); 615 + 616 + switch (ts->pkt_type) { 617 + case HAL_TX_RATE_STATS_PKT_TYPE_11A: 618 + case HAL_TX_RATE_STATS_PKT_TYPE_11B: 619 + ret = ath12k_mac_hw_ratecode_to_legacy_rate(ts->mcs, 620 + ts->pkt_type, 621 + &rate_idx, 622 + &rate); 623 + if (ret < 0) { 624 + ath12k_warn(ab, "Invalid tx legacy rate %d\n", ret); 625 + return; 626 + } 627 + 628 + txrate.legacy = rate; 629 + break; 630 + case HAL_TX_RATE_STATS_PKT_TYPE_11N: 631 + if (ts->mcs > ATH12K_HT_MCS_MAX) { 632 + ath12k_warn(ab, "Invalid HT mcs index %d\n", ts->mcs); 633 + return; 634 + } 635 + 636 + if (txrate.nss != 0) 637 + txrate.mcs = ts->mcs + 8 * (txrate.nss - 1); 638 + 639 + txrate.flags = RATE_INFO_FLAGS_MCS; 640 + 641 + if (ts->sgi) 642 + txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 643 + break; 644 + case HAL_TX_RATE_STATS_PKT_TYPE_11AC: 645 + if (ts->mcs > ATH12K_VHT_MCS_MAX) { 646 + ath12k_warn(ab, "Invalid VHT mcs index %d\n", ts->mcs); 647 + return; 648 + } 649 + 650 + txrate.mcs = ts->mcs; 651 + txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 652 + 653 + if (ts->sgi) 654 + txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 655 + break; 656 + case HAL_TX_RATE_STATS_PKT_TYPE_11AX: 657 + if (ts->mcs > ATH12K_HE_MCS_MAX) { 658 + ath12k_warn(ab, "Invalid HE mcs index %d\n", ts->mcs); 659 + return; 660 + } 661 + 662 + txrate.mcs = ts->mcs; 663 + txrate.flags = RATE_INFO_FLAGS_HE_MCS; 664 + txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(ts->sgi); 665 + break; 666 + case HAL_TX_RATE_STATS_PKT_TYPE_11BE: 667 + if (ts->mcs > ATH12K_EHT_MCS_MAX) { 668 + ath12k_warn(ab, "Invalid EHT mcs index %d\n", ts->mcs); 669 + return; 670 + } 671 + 672 + txrate.mcs = ts->mcs; 673 + txrate.flags = RATE_INFO_FLAGS_EHT_MCS; 674 + txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(ts->sgi); 675 + break; 676 + default: 677 + ath12k_warn(ab, "Invalid tx pkt type: %d\n", ts->pkt_type); 678 + return; 679 + } 680 + 681 + txrate.bw = ath12k_mac_bw_to_mac80211_bw(ts->bw); 682 + 683 + if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) { 684 + txrate.bw = RATE_INFO_BW_HE_RU; 685 + ru_tones = ath12k_mac_he_convert_tones_to_ru_tones(ts->tones); 686 + txrate.he_ru_alloc = 687 + ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones); 688 + } 689 + 690 + if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11BE) { 691 + txrate.bw = RATE_INFO_BW_EHT_RU; 692 + txrate.eht_ru_alloc = 693 + ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(ts->tones); 694 + } 695 + 696 + spin_lock_bh(&ab->base_lock); 697 + arsta->txrate = txrate; 698 + spin_unlock_bh(&ab->base_lock); 599 699 } 600 700 601 701 static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, ··· 794 658 * Might end up reporting it out-of-band from HTT stats. 795 659 */ 796 660 661 + ath12k_dp_tx_update_txcompl(ar, ts); 662 + 797 663 ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu); 798 664 799 665 exit: ··· 806 668 struct hal_wbm_completion_ring_tx *desc, 807 669 struct hal_tx_status *ts) 808 670 { 671 + u32 info0 = le32_to_cpu(desc->rate_stats.info0); 672 + 809 673 ts->buf_rel_source = 810 674 le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE); 811 675 if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW && ··· 822 682 823 683 ts->ppdu_id = le32_get_bits(desc->info1, 824 684 HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER); 825 - if (le32_to_cpu(desc->rate_stats.info0) & HAL_TX_RATE_STATS_INFO0_VALID) 826 - ts->rate_stats = le32_to_cpu(desc->rate_stats.info0); 827 - else 828 - ts->rate_stats = 0; 685 + 686 + ts->peer_id = le32_get_bits(desc->info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID); 687 + 688 + if (info0 & HAL_TX_RATE_STATS_INFO0_VALID) { 689 + ts->pkt_type = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_PKT_TYPE); 690 + ts->mcs = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_MCS); 691 + ts->sgi = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_SGI); 692 + ts->bw = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_BW); 693 + ts->tones = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_TONES_IN_RU); 694 + ts->ofdma = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_OFDMA_TX); 695 + } 829 696 } 830 697 831 698 void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id) ··· 961 814 *htt_ring_type = HTT_HW_TO_SW_RING; 962 815 break; 963 816 case HAL_RXDMA_MONITOR_BUF: 964 - *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; 817 + *htt_ring_id = HTT_RX_MON_HOST2MON_BUF_RING; 965 818 *htt_ring_type = HTT_SW_TO_HW_RING; 966 819 break; 967 820 case HAL_RXDMA_MONITOR_STATUS: ··· 969 822 *htt_ring_type = HTT_SW_TO_HW_RING; 970 823 break; 971 824 case HAL_RXDMA_MONITOR_DST: 972 - *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; 825 + *htt_ring_id = HTT_RX_MON_MON2HOST_DEST_RING; 973 826 *htt_ring_type = HTT_HW_TO_SW_RING; 974 827 break; 975 828 case HAL_RXDMA_MONITOR_DESC: ··· 1118 971 skb_put(skb, len); 1119 972 cmd = (struct htt_ver_req_cmd *)skb->data; 1120 973 cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ, 1121 - HTT_VER_REQ_INFO_MSG_ID); 974 + HTT_OPTION_TAG); 975 + 976 + cmd->tcl_metadata_version = le32_encode_bits(HTT_TAG_TCL_METADATA_VERSION, 977 + HTT_OPTION_TAG) | 978 + le32_encode_bits(HTT_TCL_METADATA_VER_SZ, 979 + HTT_OPTION_LEN) | 980 + le32_encode_bits(HTT_OPTION_TCL_METADATA_VER_V2, 981 + HTT_OPTION_VALUE); 1122 982 1123 983 ret = ath12k_htc_send(&ab->htc, dp->eid, skb); 1124 984 if (ret) { ··· 1231 1077 cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP), 1232 1078 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS); 1233 1079 cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid, 1234 - HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID); 1080 + HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID); 1081 + cmd->info0 |= 1082 + le32_encode_bits(tlv_filter->drop_threshold_valid, 1083 + HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL); 1084 + cmd->info0 |= le32_encode_bits(!tlv_filter->rxmon_disable, 1085 + HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON); 1086 + 1235 1087 cmd->info1 = le32_encode_bits(rx_buf_size, 1236 1088 HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE); 1089 + cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_mgmt, 1090 + HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT); 1091 + cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_ctrl, 1092 + HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL); 1093 + cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_data, 1094 + HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA); 1237 1095 cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0); 1238 1096 cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1); 1239 1097 cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2); 1240 1098 cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3); 1241 1099 cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter); 1100 + 1101 + cmd->info2 = le32_encode_bits(tlv_filter->rx_drop_threshold, 1102 + HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD); 1103 + cmd->info2 |= 1104 + le32_encode_bits(tlv_filter->enable_log_mgmt_type, 1105 + HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE); 1106 + cmd->info2 |= 1107 + le32_encode_bits(tlv_filter->enable_log_ctrl_type, 1108 + HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE); 1109 + cmd->info2 |= 1110 + le32_encode_bits(tlv_filter->enable_log_data_type, 1111 + HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE); 1112 + 1113 + cmd->info3 = 1114 + le32_encode_bits(tlv_filter->enable_rx_tlv_offset, 1115 + HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET); 1116 + cmd->info3 |= 1117 + le32_encode_bits(tlv_filter->rx_tlv_offset, 1118 + HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET); 1242 1119 1243 1120 if (tlv_filter->offset_valid) { 1244 1121 cmd->rx_packet_offset = ··· 1395 1210 int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset) 1396 1211 { 1397 1212 struct ath12k_base *ab = ar->ab; 1398 - struct ath12k_dp *dp = &ab->dp; 1399 1213 struct htt_rx_ring_tlv_filter tlv_filter = {0}; 1400 - int ret, ring_id; 1214 + int ret, ring_id, i; 1401 1215 1402 - ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 1403 1216 tlv_filter.offset_valid = false; 1404 1217 1405 1218 if (!reset) { 1406 - tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING; 1219 + tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING; 1220 + 1221 + tlv_filter.drop_threshold_valid = true; 1222 + tlv_filter.rx_drop_threshold = HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE; 1223 + 1224 + tlv_filter.enable_log_mgmt_type = true; 1225 + tlv_filter.enable_log_ctrl_type = true; 1226 + tlv_filter.enable_log_data_type = true; 1227 + 1228 + tlv_filter.conf_len_ctrl = HTT_RX_RING_DEFAULT_DMA_LENGTH; 1229 + tlv_filter.conf_len_mgmt = HTT_RX_RING_DEFAULT_DMA_LENGTH; 1230 + tlv_filter.conf_len_data = HTT_RX_RING_DEFAULT_DMA_LENGTH; 1231 + 1232 + tlv_filter.enable_rx_tlv_offset = true; 1233 + tlv_filter.rx_tlv_offset = HTT_RX_RING_PKT_TLV_OFFSET; 1234 + 1407 1235 tlv_filter.pkt_filter_flags0 = 1408 1236 HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 | 1409 1237 HTT_RX_MON_MO_MGMT_FILTER_FLAGS0; ··· 1434 1236 } 1435 1237 1436 1238 if (ab->hw_params->rxdma1_enable) { 1437 - ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 0, 1438 - HAL_RXDMA_MONITOR_BUF, 1439 - DP_RXDMA_REFILL_RING_SIZE, 1440 - &tlv_filter); 1441 - if (ret) { 1442 - ath12k_err(ab, 1443 - "failed to setup filter for monitor buf %d\n", ret); 1444 - return ret; 1239 + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 1240 + ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id; 1241 + ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 1242 + ar->dp.mac_id + i, 1243 + HAL_RXDMA_MONITOR_DST, 1244 + DP_RXDMA_REFILL_RING_SIZE, 1245 + &tlv_filter); 1246 + if (ret) { 1247 + ath12k_err(ab, 1248 + "failed to setup filter for monitor buf %d\n", 1249 + ret); 1250 + return ret; 1251 + } 1445 1252 } 1446 1253 } 1447 1254
+2 -2
drivers/net/wireless/ath/ath12k/dp_tx.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH12K_DP_TX_H ··· 17 17 18 18 int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab); 19 19 int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif, 20 - struct sk_buff *skb); 20 + struct sk_buff *skb, bool gsn_valid, int mcbc_gsn); 21 21 void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id); 22 22 23 23 int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask);
+2 -3
drivers/net/wireless/ath/ath12k/hal_desc.h
··· 2968 2968 2969 2969 #define HAL_MON_DEST_COOKIE_BUF_ID GENMASK(17, 0) 2970 2970 2971 - #define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(15, 0) 2972 - #define HAL_MON_DEST_INFO0_FLUSH_DETECTED BIT(16) 2973 - #define HAL_MON_DEST_INFO0_END_OF_PPDU BIT(17) 2971 + #define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(11, 0) 2972 + #define HAL_MON_DEST_INFO0_END_REASON GENMASK(17, 16) 2974 2973 #define HAL_MON_DEST_INFO0_INITIATOR BIT(18) 2975 2974 #define HAL_MON_DEST_INFO0_EMPTY_DESC BIT(19) 2976 2975 #define HAL_MON_DEST_INFO0_RING_ID GENMASK(27, 20)
+437 -5
drivers/net/wireless/ath/ath12k/hal_rx.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH12K_HAL_RX_H ··· 21 21 22 22 #define HAL_INVALID_PEERID 0x3fff 23 23 #define VHT_SIG_SU_NSS_MASK 0x7 24 - 25 - #define HAL_RX_MAX_MCS 12 26 - #define HAL_RX_MAX_NSS 8 27 24 28 25 #define HAL_RX_MPDU_INFO_PN_GET_BYTE1(__val) \ 29 26 le32_get_bits((__val), GENMASK(7, 0)) ··· 68 71 HAL_RX_PREAMBLE_11N, 69 72 HAL_RX_PREAMBLE_11AC, 70 73 HAL_RX_PREAMBLE_11AX, 74 + HAL_RX_PREAMBLE_11BA, 75 + HAL_RX_PREAMBLE_11BE, 71 76 HAL_RX_PREAMBLE_MAX, 72 77 }; 73 78 ··· 107 108 HAL_RX_MON_STATUS_PPDU_NOT_DONE, 108 109 HAL_RX_MON_STATUS_PPDU_DONE, 109 110 HAL_RX_MON_STATUS_BUF_DONE, 111 + HAL_RX_MON_STATUS_BUF_ADDR, 112 + HAL_RX_MON_STATUS_MPDU_END, 113 + HAL_RX_MON_STATUS_MSDU_END, 110 114 }; 111 115 112 116 #define HAL_RX_MAX_MPDU 256 ··· 145 143 u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP]; 146 144 u32 mpdu_ok_byte_count; 147 145 u32 mpdu_err_byte_count; 146 + bool ampdu_present; 147 + u16 ampdu_id; 148 148 }; 149 149 150 150 #define HAL_MAX_UL_MU_USERS 37 151 + 152 + struct hal_rx_u_sig_info { 153 + bool ul_dl; 154 + u8 bw; 155 + u8 ppdu_type_comp_mode; 156 + u8 eht_sig_mcs; 157 + u8 num_eht_sig_sym; 158 + struct ieee80211_radiotap_eht_usig usig; 159 + }; 160 + 161 + #define HAL_RX_MON_MAX_AGGR_SIZE 128 162 + 163 + struct hal_rx_tlv_aggr_info { 164 + bool in_progress; 165 + u16 cur_len; 166 + u16 tlv_tag; 167 + u8 buf[HAL_RX_MON_MAX_AGGR_SIZE]; 168 + }; 169 + 170 + struct hal_rx_radiotap_eht { 171 + __le32 known; 172 + __le32 data[9]; 173 + }; 174 + 175 + #define EHT_MAX_USER_INFO 4 176 + 177 + struct hal_rx_eht_info { 178 + u8 num_user_info; 179 + struct hal_rx_radiotap_eht eht; 180 + u32 user_info[EHT_MAX_USER_INFO]; 181 + }; 151 182 152 183 struct hal_rx_mon_ppdu_info { 153 184 u32 ppdu_id; ··· 262 227 u8 addr4[ETH_ALEN]; 263 228 struct hal_rx_user_status userstats[HAL_MAX_UL_MU_USERS]; 264 229 u8 userid; 265 - u16 ampdu_id[HAL_MAX_UL_MU_USERS]; 266 230 bool first_msdu_in_mpdu; 267 231 bool is_ampdu; 268 232 u8 medium_prot_type; 233 + bool ppdu_continuation; 234 + bool eht_usig; 235 + struct hal_rx_u_sig_info u_sig_info; 236 + bool is_eht; 237 + struct hal_rx_eht_info eht_info; 238 + struct hal_rx_tlv_aggr_info tlv_aggr; 269 239 }; 270 240 271 241 #define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0) ··· 681 641 #define HAL_RX_MPDU_ERR_MPDU_LEN BIT(6) 682 642 #define HAL_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7) 683 643 644 + #define HAL_RX_PHY_CMN_USER_INFO0_GI GENMASK(17, 16) 645 + 646 + struct hal_phyrx_common_user_info { 647 + __le32 rsvd[2]; 648 + __le32 info0; 649 + __le32 rsvd1; 650 + } __packed; 651 + 652 + #define HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE GENMASK(3, 0) 653 + #define HAL_RX_EHT_SIG_NDP_CMN_INFO0_GI_LTF GENMASK(5, 4) 654 + #define HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM GENMASK(8, 6) 655 + #define HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS GENMASK(10, 7) 656 + #define HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED BIT(11) 657 + #define HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD GENMASK(13, 12) 658 + #define HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC GENMASK(17, 14) 659 + 660 + struct hal_eht_sig_ndp_cmn_eb { 661 + __le32 info0; 662 + } __packed; 663 + 664 + #define HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE GENMASK(3, 0) 665 + #define HAL_RX_EHT_SIG_OVERFLOW_INFO0_GI_LTF GENMASK(5, 4) 666 + #define HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM GENMASK(8, 6) 667 + #define HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM BIT(9) 668 + #define HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR GENMASK(11, 10) 669 + #define HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY BIT(12) 670 + #define HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD GENMASK(16, 13) 671 + 672 + struct hal_eht_sig_usig_overflow { 673 + __le32 info0; 674 + } __packed; 675 + 676 + #define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID GENMASK(10, 0) 677 + #define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS GENMASK(14, 11) 678 + #define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_VALIDATE BIT(15) 679 + #define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS GENMASK(19, 16) 680 + #define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED BIT(20) 681 + #define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING BIT(21) 682 + #define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CRC GENMASK(25, 22) 683 + 684 + struct hal_eht_sig_non_mu_mimo { 685 + __le32 info0; 686 + } __packed; 687 + 688 + #define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID GENMASK(10, 0) 689 + #define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS GENMASK(14, 11) 690 + #define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING BIT(15) 691 + #define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING GENMASK(22, 16) 692 + #define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CRC GENMASK(26, 23) 693 + 694 + struct hal_eht_sig_mu_mimo { 695 + __le32 info0; 696 + } __packed; 697 + 698 + union hal_eht_sig_user_field { 699 + struct hal_eht_sig_mu_mimo mu_mimo; 700 + struct hal_eht_sig_non_mu_mimo n_mu_mimo; 701 + }; 702 + 703 + #define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_SPATIAL_REUSE GENMASK(3, 0) 704 + #define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_GI_LTF GENMASK(5, 4) 705 + #define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_LTF_SYM GENMASK(8, 6) 706 + #define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_LDPC_EXTA_SYM BIT(9) 707 + #define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_PRE_FEC_PAD_FACTOR GENMASK(11, 10) 708 + #define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_DISAMBIGUITY BIT(12) 709 + #define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_DISREGARD GENMASK(16, 13) 710 + #define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS GENMASK(19, 17) 711 + 712 + struct hal_eht_sig_non_ofdma_cmn_eb { 713 + __le32 info0; 714 + union hal_eht_sig_user_field user_field; 715 + } __packed; 716 + 717 + #define HAL_RX_EHT_SIG_OFDMA_EB1_SPATIAL_REUSE GENMASK_ULL(3, 0) 718 + #define HAL_RX_EHT_SIG_OFDMA_EB1_GI_LTF GENMASK_ULL(5, 4) 719 + #define HAL_RX_EHT_SIG_OFDMA_EB1_NUM_LFT_SYM GENMASK_ULL(8, 6) 720 + #define HAL_RX_EHT_SIG_OFDMA_EB1_LDPC_EXTRA_SYM BIT(9) 721 + #define HAL_RX_EHT_SIG_OFDMA_EB1_PRE_FEC_PAD_FACTOR GENMASK_ULL(11, 10) 722 + #define HAL_RX_EHT_SIG_OFDMA_EB1_PRE_DISAMBIGUITY BIT(12) 723 + #define HAL_RX_EHT_SIG_OFDMA_EB1_DISREGARD GENMASK_ULL(16, 13) 724 + #define HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1 GENMASK_ULL(25, 17) 725 + #define HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2 GENMASK_ULL(34, 26) 726 + #define HAL_RX_EHT_SIG_OFDMA_EB1_CRC GENMASK_ULL(30, 27) 727 + 728 + struct hal_eht_sig_ofdma_cmn_eb1 { 729 + __le64 info0; 730 + } __packed; 731 + 732 + #define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1 GENMASK_ULL(8, 0) 733 + #define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2 GENMASK_ULL(17, 9) 734 + #define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3 GENMASK_ULL(26, 18) 735 + #define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4 GENMASK_ULL(35, 27) 736 + #define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5 GENMASK_ULL(44, 36) 737 + #define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6 GENMASK_ULL(53, 45) 738 + #define HAL_RX_EHT_SIG_OFDMA_EB2_MCS GNEMASK_ULL(57, 54) 739 + 740 + struct hal_eht_sig_ofdma_cmn_eb2 { 741 + __le64 info0; 742 + } __packed; 743 + 744 + struct hal_eht_sig_ofdma_cmn_eb { 745 + struct hal_eht_sig_ofdma_cmn_eb1 eb1; 746 + struct hal_eht_sig_ofdma_cmn_eb2 eb2; 747 + union hal_eht_sig_user_field user_field; 748 + } __packed; 749 + 750 + enum hal_eht_bw { 751 + HAL_EHT_BW_20, 752 + HAL_EHT_BW_40, 753 + HAL_EHT_BW_80, 754 + HAL_EHT_BW_160, 755 + HAL_EHT_BW_320_1, 756 + HAL_EHT_BW_320_2, 757 + }; 758 + 759 + #define HAL_RX_USIG_CMN_INFO0_PHY_VERSION GENMASK(2, 0) 760 + #define HAL_RX_USIG_CMN_INFO0_BW GENMASK(5, 3) 761 + #define HAL_RX_USIG_CMN_INFO0_UL_DL BIT(6) 762 + #define HAL_RX_USIG_CMN_INFO0_BSS_COLOR GENMASK(12, 7) 763 + #define HAL_RX_USIG_CMN_INFO0_TXOP GENMASK(19, 13) 764 + #define HAL_RX_USIG_CMN_INFO0_DISREGARD GENMASK(25, 20) 765 + #define HAL_RX_USIG_CMN_INFO0_VALIDATE BIT(26) 766 + 767 + struct hal_mon_usig_cmn { 768 + __le32 info0; 769 + } __packed; 770 + 771 + #define HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE GENMASK(1, 0) 772 + #define HAL_RX_USIG_TB_INFO0_VALIDATE BIT(2) 773 + #define HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1 GENMASK(6, 3) 774 + #define HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2 GENMASK(10, 7) 775 + #define HAL_RX_USIG_TB_INFO0_DISREGARD_1 GENMASK(15, 11) 776 + #define HAL_RX_USIG_TB_INFO0_CRC GENMASK(19, 16) 777 + #define HAL_RX_USIG_TB_INFO0_TAIL GENMASK(25, 20) 778 + #define HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS BIT(31) 779 + 780 + struct hal_mon_usig_tb { 781 + __le32 info0; 782 + } __packed; 783 + 784 + #define HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE GENMASK(1, 0) 785 + #define HAL_RX_USIG_MU_INFO0_VALIDATE_1 BIT(2) 786 + #define HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO GENMASK(7, 3) 787 + #define HAL_RX_USIG_MU_INFO0_VALIDATE_2 BIT(8) 788 + #define HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS GENMASK(10, 9) 789 + #define HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM GENMASK(15, 11) 790 + #define HAL_RX_USIG_MU_INFO0_CRC GENMASK(20, 16) 791 + #define HAL_RX_USIG_MU_INFO0_TAIL GENMASK(26, 21) 792 + #define HAL_RX_USIG_MU_INFO0_RX_INTEG_CHECK_PASS BIT(31) 793 + 794 + struct hal_mon_usig_mu { 795 + __le32 info0; 796 + } __packed; 797 + 798 + union hal_mon_usig_non_cmn { 799 + struct hal_mon_usig_tb tb; 800 + struct hal_mon_usig_mu mu; 801 + }; 802 + 803 + struct hal_mon_usig_hdr { 804 + struct hal_mon_usig_cmn cmn; 805 + union hal_mon_usig_non_cmn non_cmn; 806 + } __packed; 807 + 808 + #define HAL_RX_USR_INFO0_PHY_PPDU_ID GENMASK(15, 0) 809 + #define HAL_RX_USR_INFO0_USR_RSSI GENMASK(23, 16) 810 + #define HAL_RX_USR_INFO0_PKT_TYPE GENMASK(27, 24) 811 + #define HAL_RX_USR_INFO0_STBC BIT(28) 812 + #define HAL_RX_USR_INFO0_RECEPTION_TYPE GENMASK(31, 29) 813 + 814 + #define HAL_RX_USR_INFO1_MCS GENMASK(3, 0) 815 + #define HAL_RX_USR_INFO1_SGI GENMASK(5, 4) 816 + #define HAL_RX_USR_INFO1_HE_RANGING_NDP BIT(6) 817 + #define HAL_RX_USR_INFO1_MIMO_SS_BITMAP GENMASK(15, 8) 818 + #define HAL_RX_USR_INFO1_RX_BW GENMASK(18, 16) 819 + #define HAL_RX_USR_INFO1_DL_OFMDA_USR_IDX GENMASK(31, 24) 820 + 821 + #define HAL_RX_USR_INFO2_DL_OFDMA_CONTENT_CHAN BIT(0) 822 + #define HAL_RX_USR_INFO2_NSS GENMASK(10, 8) 823 + #define HAL_RX_USR_INFO2_STREAM_OFFSET GENMASK(13, 11) 824 + #define HAL_RX_USR_INFO2_STA_DCM BIT(14) 825 + #define HAL_RX_USR_INFO2_LDPC BIT(15) 826 + #define HAL_RX_USR_INFO2_RU_TYPE_80_0 GENMASK(19, 16) 827 + #define HAL_RX_USR_INFO2_RU_TYPE_80_1 GENMASK(23, 20) 828 + #define HAL_RX_USR_INFO2_RU_TYPE_80_2 GENMASK(27, 24) 829 + #define HAL_RX_USR_INFO2_RU_TYPE_80_3 GENMASK(31, 28) 830 + 831 + #define HAL_RX_USR_INFO3_RU_START_IDX_80_0 GENMASK(5, 0) 832 + #define HAL_RX_USR_INFO3_RU_START_IDX_80_1 GENMASK(13, 8) 833 + #define HAL_RX_USR_INFO3_RU_START_IDX_80_2 GENMASK(21, 16) 834 + #define HAL_RX_USR_INFO3_RU_START_IDX_80_3 GENMASK(29, 24) 835 + 836 + struct hal_receive_user_info { 837 + __le32 info0; 838 + __le32 info1; 839 + __le32 info2; 840 + __le32 info3; 841 + __le32 user_fd_rssi_seg0; 842 + __le32 user_fd_rssi_seg1; 843 + __le32 user_fd_rssi_seg2; 844 + __le32 user_fd_rssi_seg3; 845 + } __packed; 846 + 847 + enum hal_mon_reception_type { 848 + HAL_RECEPTION_TYPE_SU, 849 + HAL_RECEPTION_TYPE_DL_MU_MIMO, 850 + HAL_RECEPTION_TYPE_DL_MU_OFMA, 851 + HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO, 852 + HAL_RECEPTION_TYPE_UL_MU_MIMO, 853 + HAL_RECEPTION_TYPE_UL_MU_OFDMA, 854 + HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO, 855 + }; 856 + 857 + /* Different allowed RU in 11BE */ 858 + #define HAL_EHT_RU_26 0ULL 859 + #define HAL_EHT_RU_52 1ULL 860 + #define HAL_EHT_RU_78 2ULL 861 + #define HAL_EHT_RU_106 3ULL 862 + #define HAL_EHT_RU_132 4ULL 863 + #define HAL_EHT_RU_242 5ULL 864 + #define HAL_EHT_RU_484 6ULL 865 + #define HAL_EHT_RU_726 7ULL 866 + #define HAL_EHT_RU_996 8ULL 867 + #define HAL_EHT_RU_996x2 9ULL 868 + #define HAL_EHT_RU_996x3 10ULL 869 + #define HAL_EHT_RU_996x4 11ULL 870 + #define HAL_EHT_RU_NONE 15ULL 871 + #define HAL_EHT_RU_INVALID 31ULL 872 + /* MRUs spanning above 80Mhz 873 + * HAL_EHT_RU_996_484 = HAL_EHT_RU_484 + HAL_EHT_RU_996 + 4 (reserved) 874 + */ 875 + #define HAL_EHT_RU_996_484 18ULL 876 + #define HAL_EHT_RU_996x2_484 28ULL 877 + #define HAL_EHT_RU_996x3_484 40ULL 878 + #define HAL_EHT_RU_996_484_242 23ULL 879 + 880 + #define NUM_RU_BITS_PER80 16 881 + #define NUM_RU_BITS_PER20 4 882 + 883 + /* Different per_80Mhz band in 320Mhz bandwidth */ 884 + #define HAL_80_0 0 885 + #define HAL_80_1 1 886 + #define HAL_80_2 2 887 + #define HAL_80_3 3 888 + 889 + #define HAL_RU_80MHZ(num_band) ((num_band) * NUM_RU_BITS_PER80) 890 + #define HAL_RU_20MHZ(idx_per_80) ((idx_per_80) * NUM_RU_BITS_PER20) 891 + 892 + #define HAL_RU_SHIFT(num_band, idx_per_80) \ 893 + (HAL_RU_80MHZ(num_band) + HAL_RU_20MHZ(idx_per_80)) 894 + 895 + #define HAL_RU(ru, num_band, idx_per_80) \ 896 + ((u64)(ru) << HAL_RU_SHIFT(num_band, idx_per_80)) 897 + 898 + /* MRU-996+484 */ 899 + #define HAL_EHT_RU_996_484_0 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) | \ 900 + HAL_RU(HAL_EHT_RU_996, HAL_80_1, 0)) 901 + #define HAL_EHT_RU_996_484_1 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) | \ 902 + HAL_RU(HAL_EHT_RU_996, HAL_80_1, 0)) 903 + #define HAL_EHT_RU_996_484_2 (HAL_RU(HAL_EHT_RU_996, HAL_80_0, 0) | \ 904 + HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1)) 905 + #define HAL_EHT_RU_996_484_3 (HAL_RU(HAL_EHT_RU_996, HAL_80_0, 0) | \ 906 + HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0)) 907 + #define HAL_EHT_RU_996_484_4 (HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) | \ 908 + HAL_RU(HAL_EHT_RU_996, HAL_80_3, 0)) 909 + #define HAL_EHT_RU_996_484_5 (HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) | \ 910 + HAL_RU(HAL_EHT_RU_996, HAL_80_3, 0)) 911 + #define HAL_EHT_RU_996_484_6 (HAL_RU(HAL_EHT_RU_996, HAL_80_2, 0) | \ 912 + HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1)) 913 + #define HAL_EHT_RU_996_484_7 (HAL_RU(HAL_EHT_RU_996, HAL_80_2, 0) | \ 914 + HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0)) 915 + 916 + /* MRU-996x2+484 */ 917 + #define HAL_EHT_RU_996x2_484_0 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) | \ 918 + HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \ 919 + HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0)) 920 + #define HAL_EHT_RU_996x2_484_1 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) | \ 921 + HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \ 922 + HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0)) 923 + #define HAL_EHT_RU_996x2_484_2 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \ 924 + HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) | \ 925 + HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0)) 926 + #define HAL_EHT_RU_996x2_484_3 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \ 927 + HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) | \ 928 + HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0)) 929 + #define HAL_EHT_RU_996x2_484_4 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \ 930 + HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \ 931 + HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1)) 932 + #define HAL_EHT_RU_996x2_484_5 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \ 933 + HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \ 934 + HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0)) 935 + #define HAL_EHT_RU_996x2_484_6 (HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) | \ 936 + HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \ 937 + HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0)) 938 + #define HAL_EHT_RU_996x2_484_7 (HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) | \ 939 + HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \ 940 + HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0)) 941 + #define HAL_EHT_RU_996x2_484_8 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \ 942 + HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) | \ 943 + HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0)) 944 + #define HAL_EHT_RU_996x2_484_9 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \ 945 + HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) | \ 946 + HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0)) 947 + #define HAL_EHT_RU_996x2_484_10 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \ 948 + HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \ 949 + HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1)) 950 + #define HAL_EHT_RU_996x2_484_11 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \ 951 + HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \ 952 + HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0)) 953 + 954 + /* MRU-996x3+484 */ 955 + #define HAL_EHT_RU_996x3_484_0 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) | \ 956 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \ 957 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \ 958 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0)) 959 + #define HAL_EHT_RU_996x3_484_1 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) | \ 960 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \ 961 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \ 962 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0)) 963 + #define HAL_EHT_RU_996x3_484_2 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \ 964 + HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) | \ 965 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \ 966 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0)) 967 + #define HAL_EHT_RU_996x3_484_3 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \ 968 + HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) | \ 969 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \ 970 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0)) 971 + #define HAL_EHT_RU_996x3_484_4 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \ 972 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \ 973 + HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) | \ 974 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0)) 975 + #define HAL_EHT_RU_996x3_484_5 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \ 976 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \ 977 + HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) | \ 978 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0)) 979 + #define HAL_EHT_RU_996x3_484_6 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \ 980 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \ 981 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \ 982 + HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1)) 983 + #define HAL_EHT_RU_996x3_484_7 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \ 984 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \ 985 + HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \ 986 + HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0)) 987 + 988 + #define HAL_RU_PER80(ru_per80, num_80mhz, ru_idx_per80mhz) \ 989 + (HAL_RU(ru_per80, num_80mhz, ru_idx_per80mhz)) 990 + 991 + #define RU_INVALID 0 992 + #define RU_26 1 993 + #define RU_52 2 994 + #define RU_106 4 995 + #define RU_242 9 996 + #define RU_484 18 997 + #define RU_996 37 998 + #define RU_2X996 74 999 + #define RU_3X996 111 1000 + #define RU_4X996 148 1001 + #define RU_52_26 (RU_52 + RU_26) 1002 + #define RU_106_26 (RU_106 + RU_26) 1003 + #define RU_484_242 (RU_484 + RU_242) 1004 + #define RU_996_484 (RU_996 + RU_484) 1005 + #define RU_996_484_242 (RU_996 + RU_484_242) 1006 + #define RU_2X996_484 (RU_2X996 + RU_484) 1007 + #define RU_3X996_484 (RU_3X996 + RU_484) 1008 + 1009 + enum ath12k_eht_ru_size { 1010 + ATH12K_EHT_RU_26, 1011 + ATH12K_EHT_RU_52, 1012 + ATH12K_EHT_RU_106, 1013 + ATH12K_EHT_RU_242, 1014 + ATH12K_EHT_RU_484, 1015 + ATH12K_EHT_RU_996, 1016 + ATH12K_EHT_RU_996x2, 1017 + ATH12K_EHT_RU_996x4, 1018 + ATH12K_EHT_RU_52_26, 1019 + ATH12K_EHT_RU_106_26, 1020 + ATH12K_EHT_RU_484_242, 1021 + ATH12K_EHT_RU_996_484, 1022 + ATH12K_EHT_RU_996_484_242, 1023 + ATH12K_EHT_RU_996x2_484, 1024 + ATH12K_EHT_RU_996x3, 1025 + ATH12K_EHT_RU_996x3_484, 1026 + 1027 + /* Keep last */ 1028 + ATH12K_EHT_RU_INVALID, 1029 + }; 1030 + 1031 + #define HAL_RX_RU_ALLOC_TYPE_MAX ATH12K_EHT_RU_INVALID 1032 + 684 1033 static inline 685 1034 enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones) 686 1035 { ··· 1090 661 break; 1091 662 case RU_996: 1092 663 ret = NL80211_RATE_INFO_HE_RU_ALLOC_996; 664 + break; 665 + case RU_2X996: 666 + ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; 1093 667 break; 1094 668 case RU_26: 1095 669 fallthrough;
+8 -2
drivers/net/wireless/ath/ath12k/hal_tx.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. 5 + * All rights reserved. 5 6 */ 6 7 7 8 #ifndef ATH12K_HAL_TX_H ··· 64 63 u8 try_cnt; 65 64 u8 tid; 66 65 u16 peer_id; 67 - u32 rate_stats; 66 + enum hal_tx_rate_stats_pkt_type pkt_type; 67 + enum hal_tx_rate_stats_sgi sgi; 68 + enum ath12k_supported_bw bw; 69 + u8 mcs; 70 + u16 tones; 71 + u8 ofdma; 68 72 }; 69 73 70 74 #define HAL_TX_PHY_DESC_INFO0_BF_TYPE GENMASK(17, 16)
+6 -2
drivers/net/wireless/ath/ath12k/hw.c
··· 543 543 ATH12K_TX_RING_MASK_3, 544 544 }, 545 545 .rx_mon_dest = { 546 - 0, 0, 0, 546 + 0, 0, 0, 0, 547 + 0, 0, 0, 0, 548 + ATH12K_RX_MON_RING_MASK_0, 549 + ATH12K_RX_MON_RING_MASK_1, 550 + ATH12K_RX_MON_RING_MASK_2, 547 551 }, 548 552 .rx = { 549 553 0, 0, 0, 0, ··· 1039 1035 1040 1036 .hal_params = &ath12k_hw_hal_params_qcn9274, 1041 1037 1042 - .rxdma1_enable = false, 1038 + .rxdma1_enable = true, 1043 1039 .num_rxdma_per_pdev = 1, 1044 1040 .num_rxdma_dst_ring = 0, 1045 1041 .rx_mac_buf_ring = false,
+599 -271
drivers/net/wireless/ath/ath12k/mac.c
··· 15 15 #include "hw.h" 16 16 #include "dp_tx.h" 17 17 #include "dp_rx.h" 18 + #include "testmode.h" 18 19 #include "peer.h" 19 20 #include "debugfs.h" 20 21 #include "hif.h" 21 22 #include "wow.h" 23 + #include "debugfs_sta.h" 22 24 23 25 #define CHAN2G(_channel, _freq, _flags) { \ 24 26 .band = NL80211_BAND_2GHZ, \ ··· 339 337 return "<unknown>"; 340 338 } 341 339 340 + u16 ath12k_mac_he_convert_tones_to_ru_tones(u16 tones) 341 + { 342 + switch (tones) { 343 + case 26: 344 + return RU_26; 345 + case 52: 346 + return RU_52; 347 + case 106: 348 + return RU_106; 349 + case 242: 350 + return RU_242; 351 + case 484: 352 + return RU_484; 353 + case 996: 354 + return RU_996; 355 + case (996 * 2): 356 + return RU_2X996; 357 + default: 358 + return RU_26; 359 + } 360 + } 361 + 362 + enum nl80211_eht_gi ath12k_mac_eht_gi_to_nl80211_eht_gi(u8 sgi) 363 + { 364 + switch (sgi) { 365 + case RX_MSDU_START_SGI_0_8_US: 366 + return NL80211_RATE_INFO_EHT_GI_0_8; 367 + case RX_MSDU_START_SGI_1_6_US: 368 + return NL80211_RATE_INFO_EHT_GI_1_6; 369 + case RX_MSDU_START_SGI_3_2_US: 370 + return NL80211_RATE_INFO_EHT_GI_3_2; 371 + default: 372 + return NL80211_RATE_INFO_EHT_GI_0_8; 373 + } 374 + } 375 + 376 + enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru_tones) 377 + { 378 + switch (ru_tones) { 379 + case 26: 380 + return NL80211_RATE_INFO_EHT_RU_ALLOC_26; 381 + case 52: 382 + return NL80211_RATE_INFO_EHT_RU_ALLOC_52; 383 + case (52 + 26): 384 + return NL80211_RATE_INFO_EHT_RU_ALLOC_52P26; 385 + case 106: 386 + return NL80211_RATE_INFO_EHT_RU_ALLOC_106; 387 + case (106 + 26): 388 + return NL80211_RATE_INFO_EHT_RU_ALLOC_106P26; 389 + case 242: 390 + return NL80211_RATE_INFO_EHT_RU_ALLOC_242; 391 + case 484: 392 + return NL80211_RATE_INFO_EHT_RU_ALLOC_484; 393 + case (484 + 242): 394 + return NL80211_RATE_INFO_EHT_RU_ALLOC_484P242; 395 + case 996: 396 + return NL80211_RATE_INFO_EHT_RU_ALLOC_996; 397 + case (996 + 484): 398 + return NL80211_RATE_INFO_EHT_RU_ALLOC_996P484; 399 + case (996 + 484 + 242): 400 + return NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242; 401 + case (2 * 996): 402 + return NL80211_RATE_INFO_EHT_RU_ALLOC_2x996; 403 + case (2 * 996 + 484): 404 + return NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484; 405 + case (3 * 996): 406 + return NL80211_RATE_INFO_EHT_RU_ALLOC_3x996; 407 + case (3 * 996 + 484): 408 + return NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484; 409 + case (4 * 996): 410 + return NL80211_RATE_INFO_EHT_RU_ALLOC_4x996; 411 + default: 412 + return NL80211_RATE_INFO_EHT_RU_ALLOC_26; 413 + } 414 + } 415 + 342 416 enum rate_info_bw 343 417 ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw) 344 418 { ··· 580 502 return 0; 581 503 } 582 504 583 - static struct ieee80211_bss_conf * 505 + static struct ath12k_link_vif *ath12k_mac_get_tx_arvif(struct ath12k_link_vif *arvif) 506 + { 507 + struct ath12k_vif *tx_ahvif; 508 + 509 + if (arvif->ahvif->vif->mbssid_tx_vif) { 510 + tx_ahvif = ath12k_vif_to_ahvif(arvif->ahvif->vif->mbssid_tx_vif); 511 + if (tx_ahvif) 512 + return &tx_ahvif->deflink; 513 + } 514 + 515 + return NULL; 516 + } 517 + 518 + struct ieee80211_bss_conf * 584 519 ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif) 585 520 { 586 521 struct ieee80211_vif *vif = arvif->ahvif->vif; ··· 766 675 return NULL; 767 676 768 677 for (i = 0; i < ab->num_radios; i++) { 769 - pdev = rcu_dereference(ab->pdevs_active[i]); 678 + if (ab->fw_mode == ATH12K_FIRMWARE_MODE_FTM) 679 + pdev = &ab->pdevs[i]; 680 + else 681 + pdev = rcu_dereference(ab->pdevs_active[i]); 770 682 771 683 if (pdev && pdev->pdev_id == pdev_id) 772 684 return (pdev->ar ? pdev->ar : NULL); ··· 819 725 return ath12k_mac_get_ar_by_chan(hw, ctx->def.chan); 820 726 } 821 727 822 - static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, 823 - struct ieee80211_vif *vif, 824 - u8 link_id) 728 + struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, 729 + struct ieee80211_vif *vif, 730 + u8 link_id) 825 731 { 826 732 struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 827 733 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); ··· 1643 1549 } 1644 1550 } 1645 1551 1646 - static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif) 1552 + static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif, 1553 + struct ath12k_link_vif *tx_arvif, 1554 + u8 bssid_index) 1647 1555 { 1648 - struct ath12k_vif *ahvif = arvif->ahvif; 1649 - struct ieee80211_bss_conf *bss_conf; 1650 1556 struct ath12k_wmi_bcn_tmpl_ema_arg ema_args; 1651 1557 struct ieee80211_ema_beacons *beacons; 1652 - struct ath12k_link_vif *tx_arvif; 1653 1558 bool nontx_profile_found = false; 1654 - struct ath12k_vif *tx_ahvif; 1655 1559 int ret = 0; 1656 1560 u8 i; 1657 1561 1658 - bss_conf = ath12k_mac_get_link_bss_conf(arvif); 1659 - if (!bss_conf) { 1660 - ath12k_warn(arvif->ar->ab, 1661 - "failed to get link bss conf to update bcn tmpl for vif %pM link %u\n", 1662 - ahvif->vif->addr, arvif->link_id); 1663 - return -ENOLINK; 1664 - } 1665 - 1666 - tx_ahvif = ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif); 1667 - tx_arvif = &tx_ahvif->deflink; 1668 1562 beacons = ieee80211_beacon_get_template_ema_list(ath12k_ar_to_hw(tx_arvif->ar), 1669 - tx_ahvif->vif, 1563 + tx_arvif->ahvif->vif, 1670 1564 tx_arvif->link_id); 1671 1565 if (!beacons || !beacons->cnt) { 1672 1566 ath12k_warn(arvif->ar->ab, ··· 1668 1586 for (i = 0; i < beacons->cnt; i++) { 1669 1587 if (tx_arvif != arvif && !nontx_profile_found) 1670 1588 ath12k_mac_set_arvif_ies(arvif, beacons->bcn[i].skb, 1671 - bss_conf->bssid_index, 1589 + bssid_index, 1672 1590 &nontx_profile_found); 1673 1591 1674 1592 ema_args.bcn_cnt = beacons->cnt; 1675 1593 ema_args.bcn_index = i; 1676 - ret = ath12k_wmi_bcn_tmpl(tx_arvif->ar, tx_arvif->vdev_id, 1677 - &beacons->bcn[i].offs, 1594 + ret = ath12k_wmi_bcn_tmpl(tx_arvif, &beacons->bcn[i].offs, 1678 1595 beacons->bcn[i].skb, &ema_args); 1679 1596 if (ret) { 1680 1597 ath12k_warn(tx_arvif->ar->ab, ··· 1686 1605 if (tx_arvif != arvif && !nontx_profile_found) 1687 1606 ath12k_warn(arvif->ar->ab, 1688 1607 "nontransmitted bssid index %u not found in beacon template\n", 1689 - bss_conf->bssid_index); 1608 + bssid_index); 1690 1609 1691 1610 ieee80211_beacon_free_ema_list(beacons); 1692 1611 return ret; ··· 1697 1616 struct ath12k_vif *ahvif = arvif->ahvif; 1698 1617 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); 1699 1618 struct ieee80211_bss_conf *link_conf; 1700 - struct ath12k_link_vif *tx_arvif = arvif; 1619 + struct ath12k_link_vif *tx_arvif; 1701 1620 struct ath12k *ar = arvif->ar; 1702 1621 struct ath12k_base *ab = ar->ab; 1703 1622 struct ieee80211_mutable_offsets offs = {}; 1704 - struct ath12k_vif *tx_ahvif = ahvif; 1705 1623 bool nontx_profile_found = false; 1706 1624 struct sk_buff *bcn; 1707 1625 int ret; ··· 1715 1635 return -ENOLINK; 1716 1636 } 1717 1637 1718 - if (vif->mbssid_tx_vif) { 1719 - tx_ahvif = ath12k_vif_to_ahvif(vif->mbssid_tx_vif); 1720 - tx_arvif = &tx_ahvif->deflink; 1638 + tx_arvif = ath12k_mac_get_tx_arvif(arvif); 1639 + if (tx_arvif) { 1721 1640 if (tx_arvif != arvif && arvif->is_up) 1722 1641 return 0; 1723 1642 1724 1643 if (link_conf->ema_ap) 1725 - return ath12k_mac_setup_bcn_tmpl_ema(arvif); 1644 + return ath12k_mac_setup_bcn_tmpl_ema(arvif, tx_arvif, 1645 + link_conf->bssid_index); 1646 + } else { 1647 + tx_arvif = arvif; 1726 1648 } 1727 1649 1728 - bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_ahvif->vif, 1650 + bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), 1651 + tx_arvif->ahvif->vif, 1729 1652 &offs, tx_arvif->link_id); 1730 1653 if (!bcn) { 1731 1654 ath12k_warn(ab, "failed to get beacon template from mac80211\n"); ··· 1769 1686 } 1770 1687 } 1771 1688 1772 - ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, NULL); 1689 + ret = ath12k_wmi_bcn_tmpl(arvif, &offs, bcn, NULL); 1773 1690 1774 1691 if (ret) 1775 1692 ath12k_warn(ab, "failed to submit beacon template command: %d\n", ··· 1785 1702 { 1786 1703 struct ath12k_wmi_vdev_up_params params = {}; 1787 1704 struct ath12k_vif *ahvif = arvif->ahvif; 1705 + struct ath12k_link_vif *tx_arvif; 1788 1706 struct ath12k *ar = arvif->ar; 1789 1707 int ret; 1790 1708 ··· 1816 1732 params.vdev_id = arvif->vdev_id; 1817 1733 params.aid = ahvif->aid; 1818 1734 params.bssid = arvif->bssid; 1819 - if (ahvif->vif->mbssid_tx_vif) { 1820 - struct ath12k_vif *tx_ahvif = 1821 - ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif); 1822 - struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink; 1823 1735 1736 + tx_arvif = ath12k_mac_get_tx_arvif(arvif); 1737 + if (tx_arvif) { 1824 1738 params.tx_bssid = tx_arvif->bssid; 1825 1739 params.nontx_profile_idx = info->bssid_index; 1826 1740 params.nontx_profile_cnt = 1 << info->bssid_indicator; ··· 3198 3116 ath12k_peer_assoc_h_smps(arsta, arg); 3199 3117 ath12k_peer_assoc_h_mlo(arsta, arg); 3200 3118 3119 + arsta->peer_nss = arg->peer_nss; 3201 3120 /* TODO: amsdu_disable req? */ 3202 3121 } 3203 3122 ··· 3219 3136 return ath12k_wmi_set_peer_param(ar, addr, arvif->vdev_id, 3220 3137 WMI_PEER_MIMO_PS_STATE, 3221 3138 ath12k_smps_map[smps]); 3139 + } 3140 + 3141 + static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar, 3142 + struct ieee80211_link_sta *link_sta) 3143 + { 3144 + u32 bw; 3145 + 3146 + switch (link_sta->bandwidth) { 3147 + case IEEE80211_STA_RX_BW_20: 3148 + bw = WMI_PEER_CHWIDTH_20MHZ; 3149 + break; 3150 + case IEEE80211_STA_RX_BW_40: 3151 + bw = WMI_PEER_CHWIDTH_40MHZ; 3152 + break; 3153 + case IEEE80211_STA_RX_BW_80: 3154 + bw = WMI_PEER_CHWIDTH_80MHZ; 3155 + break; 3156 + case IEEE80211_STA_RX_BW_160: 3157 + bw = WMI_PEER_CHWIDTH_160MHZ; 3158 + break; 3159 + case IEEE80211_STA_RX_BW_320: 3160 + bw = WMI_PEER_CHWIDTH_320MHZ; 3161 + break; 3162 + default: 3163 + ath12k_warn(ar->ab, "Invalid bandwidth %d for link station %pM\n", 3164 + link_sta->bandwidth, link_sta->addr); 3165 + bw = WMI_PEER_CHWIDTH_20MHZ; 3166 + break; 3167 + } 3168 + 3169 + return bw; 3222 3170 } 3223 3171 3224 3172 static void ath12k_bss_assoc(struct ath12k *ar, ··· 3472 3358 ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret); 3473 3359 } 3474 3360 3361 + static void ath12k_mac_init_arvif(struct ath12k_vif *ahvif, 3362 + struct ath12k_link_vif *arvif, int link_id) 3363 + { 3364 + struct ath12k_hw *ah = ahvif->ah; 3365 + u8 _link_id; 3366 + int i; 3367 + 3368 + lockdep_assert_wiphy(ah->hw->wiphy); 3369 + 3370 + if (WARN_ON(!arvif)) 3371 + return; 3372 + 3373 + if (WARN_ON(link_id >= ATH12K_NUM_MAX_LINKS)) 3374 + return; 3375 + 3376 + if (link_id < 0) 3377 + _link_id = 0; 3378 + else 3379 + _link_id = link_id; 3380 + 3381 + arvif->ahvif = ahvif; 3382 + arvif->link_id = _link_id; 3383 + 3384 + INIT_LIST_HEAD(&arvif->list); 3385 + INIT_DELAYED_WORK(&arvif->connection_loss_work, 3386 + ath12k_mac_vif_sta_connection_loss_work); 3387 + 3388 + for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { 3389 + arvif->bitrate_mask.control[i].legacy = 0xffffffff; 3390 + memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, 3391 + sizeof(arvif->bitrate_mask.control[i].ht_mcs)); 3392 + memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, 3393 + sizeof(arvif->bitrate_mask.control[i].vht_mcs)); 3394 + } 3395 + 3396 + /* Handle MLO related assignments */ 3397 + if (link_id >= 0) { 3398 + rcu_assign_pointer(ahvif->link[arvif->link_id], arvif); 3399 + ahvif->links_map |= BIT(_link_id); 3400 + } 3401 + 3402 + ath12k_generic_dbg(ATH12K_DBG_MAC, 3403 + "mac init link arvif (link_id %d%s) for vif %pM. links_map 0x%x", 3404 + _link_id, (link_id < 0) ? " deflink" : "", ahvif->vif->addr, 3405 + ahvif->links_map); 3406 + } 3407 + 3408 + static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw, 3409 + struct ath12k_link_vif *arvif) 3410 + { 3411 + struct ath12k_vif *ahvif = arvif->ahvif; 3412 + struct ath12k_hw *ah = hw->priv; 3413 + struct ath12k *ar = arvif->ar; 3414 + int ret; 3415 + 3416 + lockdep_assert_wiphy(ah->hw->wiphy); 3417 + 3418 + cancel_delayed_work_sync(&arvif->connection_loss_work); 3419 + 3420 + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac remove link interface (vdev %d link id %d)", 3421 + arvif->vdev_id, arvif->link_id); 3422 + 3423 + if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) { 3424 + ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->bssid); 3425 + if (ret) 3426 + ath12k_warn(ar->ab, "failed to submit AP self-peer removal on vdev %d link id %d: %d", 3427 + arvif->vdev_id, arvif->link_id, ret); 3428 + } 3429 + ath12k_mac_vdev_delete(ar, arvif); 3430 + } 3431 + 3432 + static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah, 3433 + struct ieee80211_vif *vif, 3434 + u8 link_id) 3435 + { 3436 + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 3437 + struct ath12k_link_vif *arvif; 3438 + 3439 + lockdep_assert_wiphy(ah->hw->wiphy); 3440 + 3441 + arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]); 3442 + if (arvif) 3443 + return arvif; 3444 + 3445 + if (!vif->valid_links) { 3446 + /* Use deflink for Non-ML VIFs and mark the link id as 0 3447 + */ 3448 + link_id = 0; 3449 + arvif = &ahvif->deflink; 3450 + } else { 3451 + /* If this is the first link arvif being created for an ML VIF 3452 + * use the preallocated deflink memory except for scan arvifs 3453 + */ 3454 + if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) { 3455 + arvif = &ahvif->deflink; 3456 + } else { 3457 + arvif = (struct ath12k_link_vif *) 3458 + kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL); 3459 + if (!arvif) 3460 + return NULL; 3461 + } 3462 + } 3463 + 3464 + ath12k_mac_init_arvif(ahvif, arvif, link_id); 3465 + 3466 + return arvif; 3467 + } 3468 + 3469 + static void ath12k_mac_unassign_link_vif(struct ath12k_link_vif *arvif) 3470 + { 3471 + struct ath12k_vif *ahvif = arvif->ahvif; 3472 + struct ath12k_hw *ah = ahvif->ah; 3473 + 3474 + lockdep_assert_wiphy(ah->hw->wiphy); 3475 + 3476 + rcu_assign_pointer(ahvif->link[arvif->link_id], NULL); 3477 + synchronize_rcu(); 3478 + ahvif->links_map &= ~BIT(arvif->link_id); 3479 + 3480 + if (arvif != &ahvif->deflink) 3481 + kfree(arvif); 3482 + else 3483 + memset(arvif, 0, sizeof(*arvif)); 3484 + } 3485 + 3475 3486 static int 3476 3487 ath12k_mac_op_change_vif_links(struct ieee80211_hw *hw, 3477 3488 struct ieee80211_vif *vif, 3478 3489 u16 old_links, u16 new_links, 3479 3490 struct ieee80211_bss_conf *ol[IEEE80211_MLD_MAX_NUM_LINKS]) 3480 3491 { 3492 + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 3493 + unsigned long to_remove = old_links & ~new_links; 3494 + unsigned long to_add = ~old_links & new_links; 3495 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 3496 + struct ath12k_link_vif *arvif; 3497 + u8 link_id; 3498 + 3499 + lockdep_assert_wiphy(hw->wiphy); 3500 + 3501 + ath12k_generic_dbg(ATH12K_DBG_MAC, 3502 + "mac vif link changed for MLD %pM old_links 0x%x new_links 0x%x\n", 3503 + vif->addr, old_links, new_links); 3504 + 3505 + for_each_set_bit(link_id, &to_add, IEEE80211_MLD_MAX_NUM_LINKS) { 3506 + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); 3507 + /* mac80211 wants to add link but driver already has the 3508 + * link. This should not happen ideally. 3509 + */ 3510 + if (WARN_ON(arvif)) 3511 + return -EINVAL; 3512 + 3513 + arvif = ath12k_mac_assign_link_vif(ah, vif, link_id); 3514 + if (WARN_ON(!arvif)) 3515 + return -EINVAL; 3516 + } 3517 + 3518 + for_each_set_bit(link_id, &to_remove, IEEE80211_MLD_MAX_NUM_LINKS) { 3519 + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); 3520 + if (WARN_ON(!arvif)) 3521 + return -EINVAL; 3522 + 3523 + if (!arvif->is_created) 3524 + continue; 3525 + 3526 + if (WARN_ON(!arvif->ar)) 3527 + return -EINVAL; 3528 + 3529 + ath12k_mac_remove_link_interface(hw, arvif); 3530 + ath12k_mac_unassign_link_vif(arvif); 3531 + } 3532 + 3481 3533 return 0; 3482 3534 } 3483 3535 ··· 4140 3860 ar = arvif->ar; 4141 3861 4142 3862 ath12k_mac_bss_info_changed(ar, arvif, info, changed); 4143 - } 4144 - 4145 - static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah, 4146 - struct ieee80211_vif *vif, 4147 - u8 link_id) 4148 - { 4149 - struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 4150 - struct ath12k_link_vif *arvif; 4151 - int i; 4152 - 4153 - lockdep_assert_wiphy(ah->hw->wiphy); 4154 - 4155 - arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]); 4156 - if (arvif) 4157 - return arvif; 4158 - 4159 - if (!vif->valid_links) { 4160 - /* Use deflink for Non-ML VIFs and mark the link id as 0 4161 - */ 4162 - link_id = 0; 4163 - arvif = &ahvif->deflink; 4164 - } else { 4165 - /* If this is the first link arvif being created for an ML VIF 4166 - * use the preallocated deflink memory except for scan arvifs 4167 - */ 4168 - if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) { 4169 - arvif = &ahvif->deflink; 4170 - } else { 4171 - arvif = (struct ath12k_link_vif *) 4172 - kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL); 4173 - if (!arvif) 4174 - return NULL; 4175 - } 4176 - } 4177 - 4178 - arvif->ahvif = ahvif; 4179 - arvif->link_id = link_id; 4180 - ahvif->links_map |= BIT(link_id); 4181 - 4182 - INIT_LIST_HEAD(&arvif->list); 4183 - INIT_DELAYED_WORK(&arvif->connection_loss_work, 4184 - ath12k_mac_vif_sta_connection_loss_work); 4185 - 4186 - for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { 4187 - arvif->bitrate_mask.control[i].legacy = 0xffffffff; 4188 - memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, 4189 - sizeof(arvif->bitrate_mask.control[i].ht_mcs)); 4190 - memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, 4191 - sizeof(arvif->bitrate_mask.control[i].vht_mcs)); 4192 - } 4193 - 4194 - /* Allocate Default Queue now and reassign during actual vdev create */ 4195 - vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE; 4196 - for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) 4197 - vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE; 4198 - 4199 - vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 4200 - 4201 - rcu_assign_pointer(ahvif->link[arvif->link_id], arvif); 4202 - ahvif->links_map |= BIT(link_id); 4203 - synchronize_rcu(); 4204 - return arvif; 4205 - } 4206 - 4207 - static void ath12k_mac_unassign_link_vif(struct ath12k_link_vif *arvif) 4208 - { 4209 - struct ath12k_vif *ahvif = arvif->ahvif; 4210 - struct ath12k_hw *ah = ahvif->ah; 4211 - 4212 - lockdep_assert_wiphy(ah->hw->wiphy); 4213 - 4214 - rcu_assign_pointer(ahvif->link[arvif->link_id], NULL); 4215 - synchronize_rcu(); 4216 - ahvif->links_map &= ~BIT(arvif->link_id); 4217 - 4218 - if (arvif != &ahvif->deflink) 4219 - kfree(arvif); 4220 - else 4221 - memset(arvif, 0, sizeof(*arvif)); 4222 - } 4223 - 4224 - static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw, 4225 - struct ath12k_link_vif *arvif) 4226 - { 4227 - struct ath12k_vif *ahvif = arvif->ahvif; 4228 - struct ath12k_hw *ah = hw->priv; 4229 - struct ath12k *ar = arvif->ar; 4230 - int ret; 4231 - 4232 - lockdep_assert_wiphy(ah->hw->wiphy); 4233 - 4234 - cancel_delayed_work_sync(&arvif->connection_loss_work); 4235 - 4236 - ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac remove link interface (vdev %d link id %d)", 4237 - arvif->vdev_id, arvif->link_id); 4238 - 4239 - if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) { 4240 - ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->bssid); 4241 - if (ret) 4242 - ath12k_warn(ar->ab, "failed to submit AP self-peer removal on vdev %d link id %d: %d", 4243 - arvif->vdev_id, arvif->link_id, ret); 4244 - } 4245 - ath12k_mac_vdev_delete(ar, arvif); 4246 3863 } 4247 3864 4248 3865 static struct ath12k* ··· 4711 4534 struct ath12k_link_sta *arsta, 4712 4535 struct ieee80211_key_conf *key) 4713 4536 { 4714 - struct ath12k_vif *ahvif = arvif->ahvif; 4715 - struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); 4716 - struct ieee80211_bss_conf *link_conf; 4717 4537 struct ieee80211_sta *sta = NULL; 4718 4538 struct ath12k_base *ab = ar->ab; 4719 4539 struct ath12k_peer *peer; ··· 4727 4553 if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags)) 4728 4554 return 1; 4729 4555 4730 - link_conf = ath12k_mac_get_link_bss_conf(arvif); 4731 - if (!link_conf) { 4732 - ath12k_warn(ab, "unable to access bss link conf in set key for vif %pM link %u\n", 4733 - vif->addr, arvif->link_id); 4734 - return -ENOLINK; 4735 - } 4736 - 4737 4556 if (sta) 4738 4557 peer_addr = arsta->addr; 4739 - else if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) 4740 - peer_addr = link_conf->bssid; 4741 4558 else 4742 - peer_addr = link_conf->addr; 4559 + peer_addr = arvif->bssid; 4743 4560 4744 4561 key->hw_key_idx = key->keyidx; 4745 4562 ··· 5073 4908 ath12k_warn(ar->ab, "unable to access link sta in station assoc\n"); 5074 4909 return -EINVAL; 5075 4910 } 4911 + 4912 + spin_lock_bh(&ar->data_lock); 4913 + arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, link_sta); 4914 + arsta->bw_prev = link_sta->bandwidth; 4915 + spin_unlock_bh(&ar->data_lock); 5076 4916 5077 4917 if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) { 5078 4918 ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask, ··· 5573 5403 } 5574 5404 } 5575 5405 5406 + ewma_avg_rssi_init(&arsta->avg_rssi); 5576 5407 return 0; 5577 5408 5578 5409 free_peer: ··· 5584 5413 ath12k_mac_dec_num_stations(arvif, arsta); 5585 5414 exit: 5586 5415 return ret; 5587 - } 5588 - 5589 - static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar, 5590 - struct ieee80211_sta *sta) 5591 - { 5592 - u32 bw = WMI_PEER_CHWIDTH_20MHZ; 5593 - 5594 - switch (sta->deflink.bandwidth) { 5595 - case IEEE80211_STA_RX_BW_20: 5596 - bw = WMI_PEER_CHWIDTH_20MHZ; 5597 - break; 5598 - case IEEE80211_STA_RX_BW_40: 5599 - bw = WMI_PEER_CHWIDTH_40MHZ; 5600 - break; 5601 - case IEEE80211_STA_RX_BW_80: 5602 - bw = WMI_PEER_CHWIDTH_80MHZ; 5603 - break; 5604 - case IEEE80211_STA_RX_BW_160: 5605 - bw = WMI_PEER_CHWIDTH_160MHZ; 5606 - break; 5607 - case IEEE80211_STA_RX_BW_320: 5608 - bw = WMI_PEER_CHWIDTH_320MHZ; 5609 - break; 5610 - default: 5611 - ath12k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n", 5612 - sta->deflink.bandwidth, sta->addr); 5613 - bw = WMI_PEER_CHWIDTH_20MHZ; 5614 - break; 5615 - } 5616 - 5617 - return bw; 5618 5416 } 5619 5417 5620 5418 static int ath12k_mac_assign_link_sta(struct ath12k_hw *ah, ··· 5669 5529 enum ieee80211_sta_state new_state) 5670 5530 { 5671 5531 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 5672 - struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 5673 5532 struct ath12k *ar = arvif->ar; 5674 5533 int ret = 0; 5675 5534 ··· 5710 5571 if (ret) 5711 5572 ath12k_warn(ar->ab, "Failed to associate station: %pM\n", 5712 5573 arsta->addr); 5713 - 5714 - spin_lock_bh(&ar->data_lock); 5715 - 5716 - arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta); 5717 - arsta->bw_prev = sta->deflink.bandwidth; 5718 - 5719 - spin_unlock_bh(&ar->data_lock); 5720 5574 5721 5575 /* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTHORIZED: set peer status as 5722 5576 * authorized ··· 5978 5846 spin_lock_bh(&ar->data_lock); 5979 5847 5980 5848 if (changed & IEEE80211_RC_BW_CHANGED) { 5981 - bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta); 5849 + bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, link_sta); 5982 5850 arsta->bw_prev = arsta->bw; 5983 5851 arsta->bw = bw; 5984 5852 } ··· 6806 6674 6807 6675 memset(eht_cap, 0, sizeof(struct ieee80211_sta_eht_cap)); 6808 6676 6809 - if (!(test_bit(WMI_TLV_SERVICE_11BE, ar->ab->wmi_ab.svc_map))) 6677 + if (!(test_bit(WMI_TLV_SERVICE_11BE, ar->ab->wmi_ab.svc_map)) || 6678 + ath12k_acpi_get_disable_11be(ar->ab)) 6810 6679 return; 6811 6680 6812 6681 eht_cap->has_eht = true; ··· 7204 7071 } 7205 7072 7206 7073 /* Note: called under rcu_read_lock() */ 7074 + static void ath12k_mlo_mcast_update_tx_link_address(struct ieee80211_vif *vif, 7075 + u8 link_id, struct sk_buff *skb, 7076 + u32 info_flags) 7077 + { 7078 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 7079 + struct ieee80211_bss_conf *bss_conf; 7080 + 7081 + if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) 7082 + return; 7083 + 7084 + bss_conf = rcu_dereference(vif->link_conf[link_id]); 7085 + if (bss_conf) 7086 + ether_addr_copy(hdr->addr2, bss_conf->addr); 7087 + } 7088 + 7089 + /* Note: called under rcu_read_lock() */ 7207 7090 static u8 ath12k_mac_get_tx_link(struct ieee80211_sta *sta, struct ieee80211_vif *vif, 7208 7091 u8 link, struct sk_buff *skb, u32 info_flags) 7209 7092 { ··· 7330 7181 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 7331 7182 struct ieee80211_key_conf *key = info->control.hw_key; 7332 7183 struct ieee80211_sta *sta = control->sta; 7184 + struct ath12k_link_vif *tmp_arvif; 7333 7185 u32 info_flags = info->flags; 7334 - struct ath12k *ar; 7186 + struct sk_buff *msdu_copied; 7187 + struct ath12k *ar, *tmp_ar; 7188 + struct ath12k_peer *peer; 7189 + unsigned long links_map; 7190 + bool is_mcast = false; 7191 + struct ethhdr *eth; 7335 7192 bool is_prb_rsp; 7193 + u16 mcbc_gsn; 7336 7194 u8 link_id; 7337 7195 int ret; 7338 7196 ··· 7376 7220 is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control); 7377 7221 7378 7222 if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { 7223 + eth = (struct ethhdr *)skb->data; 7224 + is_mcast = is_multicast_ether_addr(eth->h_dest); 7225 + 7379 7226 skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP; 7380 7227 } else if (ieee80211_is_mgmt(hdr->frame_control)) { 7381 7228 ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp); ··· 7390 7231 return; 7391 7232 } 7392 7233 7234 + if (!(info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) 7235 + is_mcast = is_multicast_ether_addr(hdr->addr1); 7236 + 7393 7237 /* This is case only for P2P_GO */ 7394 7238 if (vif->type == NL80211_IFTYPE_AP && vif->p2p) 7395 7239 ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp); 7396 7240 7397 - ret = ath12k_dp_tx(ar, arvif, skb); 7398 - if (ret) { 7399 - ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret); 7400 - ieee80211_free_txskb(hw, skb); 7241 + if (!vif->valid_links || !is_mcast || 7242 + test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) { 7243 + ret = ath12k_dp_tx(ar, arvif, skb, false, 0); 7244 + if (unlikely(ret)) { 7245 + ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret); 7246 + ieee80211_free_txskb(ar->ah->hw, skb); 7247 + return; 7248 + } 7249 + } else { 7250 + mcbc_gsn = atomic_inc_return(&ahvif->mcbc_gsn) & 0xfff; 7251 + 7252 + links_map = ahvif->links_map; 7253 + for_each_set_bit(link_id, &links_map, 7254 + IEEE80211_MLD_MAX_NUM_LINKS) { 7255 + tmp_arvif = rcu_dereference(ahvif->link[link_id]); 7256 + if (!tmp_arvif || !tmp_arvif->is_up) 7257 + continue; 7258 + 7259 + tmp_ar = tmp_arvif->ar; 7260 + msdu_copied = skb_copy(skb, GFP_ATOMIC); 7261 + if (!msdu_copied) { 7262 + ath12k_err(ar->ab, 7263 + "skb copy failure link_id 0x%X vdevid 0x%X\n", 7264 + link_id, tmp_arvif->vdev_id); 7265 + continue; 7266 + } 7267 + 7268 + ath12k_mlo_mcast_update_tx_link_address(vif, link_id, 7269 + msdu_copied, 7270 + info_flags); 7271 + 7272 + skb_cb = ATH12K_SKB_CB(msdu_copied); 7273 + info = IEEE80211_SKB_CB(msdu_copied); 7274 + skb_cb->link_id = link_id; 7275 + 7276 + /* For open mode, skip peer find logic */ 7277 + if (unlikely(ahvif->key_cipher == WMI_CIPHER_NONE)) 7278 + goto skip_peer_find; 7279 + 7280 + spin_lock_bh(&tmp_ar->ab->base_lock); 7281 + peer = ath12k_peer_find_by_addr(tmp_ar->ab, tmp_arvif->bssid); 7282 + if (!peer) { 7283 + spin_unlock_bh(&tmp_ar->ab->base_lock); 7284 + ath12k_warn(tmp_ar->ab, 7285 + "failed to find peer for vdev_id 0x%X addr %pM link_map 0x%X\n", 7286 + tmp_arvif->vdev_id, tmp_arvif->bssid, 7287 + ahvif->links_map); 7288 + dev_kfree_skb_any(msdu_copied); 7289 + continue; 7290 + } 7291 + 7292 + key = peer->keys[peer->mcast_keyidx]; 7293 + if (key) { 7294 + skb_cb->cipher = key->cipher; 7295 + skb_cb->flags |= ATH12K_SKB_CIPHER_SET; 7296 + info->control.hw_key = key; 7297 + 7298 + hdr = (struct ieee80211_hdr *)msdu_copied->data; 7299 + if (!ieee80211_has_protected(hdr->frame_control)) 7300 + hdr->frame_control |= 7301 + cpu_to_le16(IEEE80211_FCTL_PROTECTED); 7302 + } 7303 + spin_unlock_bh(&tmp_ar->ab->base_lock); 7304 + 7305 + skip_peer_find: 7306 + ret = ath12k_dp_tx(tmp_ar, tmp_arvif, 7307 + msdu_copied, true, mcbc_gsn); 7308 + if (unlikely(ret)) { 7309 + if (ret == -ENOMEM) { 7310 + /* Drops are expected during heavy multicast 7311 + * frame flood. Print with debug log 7312 + * level to avoid lot of console prints 7313 + */ 7314 + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 7315 + "failed to transmit frame %d\n", 7316 + ret); 7317 + } else { 7318 + ath12k_warn(ar->ab, 7319 + "failed to transmit frame %d\n", 7320 + ret); 7321 + } 7322 + 7323 + dev_kfree_skb_any(msdu_copied); 7324 + } 7325 + } 7326 + ieee80211_free_txskb(ar->ah->hw, skb); 7401 7327 } 7402 7328 } 7403 7329 ··· 7499 7255 7500 7256 static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable) 7501 7257 { 7502 - return -EOPNOTSUPP; 7503 - /* TODO: Need to support new monitor mode */ 7258 + struct htt_rx_ring_tlv_filter tlv_filter = {}; 7259 + struct ath12k_base *ab = ar->ab; 7260 + u32 ring_id, i; 7261 + int ret = 0; 7262 + 7263 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 7264 + 7265 + if (!ab->hw_params->rxdma1_enable) 7266 + return ret; 7267 + 7268 + if (enable) { 7269 + tlv_filter = ath12k_mac_mon_status_filter_default; 7270 + 7271 + if (ath12k_debugfs_rx_filter(ar)) 7272 + tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar); 7273 + } else { 7274 + tlv_filter.rxmon_disable = true; 7275 + } 7276 + 7277 + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { 7278 + ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id; 7279 + ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 7280 + ar->dp.mac_id + i, 7281 + HAL_RXDMA_MONITOR_DST, 7282 + DP_RXDMA_REFILL_RING_SIZE, 7283 + &tlv_filter); 7284 + if (ret) { 7285 + ath12k_err(ab, 7286 + "failed to setup filter for monitor buf %d\n", 7287 + ret); 7288 + } 7289 + } 7290 + 7291 + return ret; 7504 7292 } 7505 7293 7506 7294 static int ath12k_mac_start(struct ath12k *ar) ··· 7639 7363 7640 7364 static void ath12k_drain_tx(struct ath12k_hw *ah) 7641 7365 { 7642 - struct ath12k *ar; 7366 + struct ath12k *ar = ah->radio; 7643 7367 int i; 7368 + 7369 + if (ath12k_ftm_mode) { 7370 + ath12k_err(ar->ab, "fail to start mac operations in ftm mode\n"); 7371 + return; 7372 + } 7644 7373 7645 7374 lockdep_assert_wiphy(ah->hw->wiphy); 7646 7375 ··· 7675 7394 case ATH12K_HW_STATE_RESTARTED: 7676 7395 case ATH12K_HW_STATE_WEDGED: 7677 7396 case ATH12K_HW_STATE_ON: 7397 + case ATH12K_HW_STATE_TM: 7678 7398 ah->state = ATH12K_HW_STATE_OFF; 7679 7399 7680 7400 WARN_ON(1); ··· 7843 7561 u32 *flags, u32 *tx_vdev_id) 7844 7562 { 7845 7563 struct ath12k_vif *ahvif = arvif->ahvif; 7846 - struct ieee80211_vif *tx_vif = ahvif->vif->mbssid_tx_vif; 7847 7564 struct ieee80211_bss_conf *link_conf; 7848 7565 struct ath12k *ar = arvif->ar; 7849 7566 struct ath12k_link_vif *tx_arvif; 7850 - struct ath12k_vif *tx_ahvif; 7851 - 7852 - if (!tx_vif) 7853 - return 0; 7854 7567 7855 7568 link_conf = ath12k_mac_get_link_bss_conf(arvif); 7856 7569 if (!link_conf) { ··· 7854 7577 return -ENOLINK; 7855 7578 } 7856 7579 7857 - tx_ahvif = ath12k_vif_to_ahvif(tx_vif); 7858 - tx_arvif = &tx_ahvif->deflink; 7580 + tx_arvif = ath12k_mac_get_tx_arvif(arvif); 7581 + if (!tx_arvif) 7582 + return 0; 7859 7583 7860 7584 if (link_conf->nontransmitted) { 7861 - if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy) 7585 + if (ath12k_ar_to_hw(ar)->wiphy != 7586 + ath12k_ar_to_hw(tx_arvif->ar)->wiphy) 7862 7587 return -EINVAL; 7863 7588 7864 7589 *flags = WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP; ··· 8345 8066 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); 8346 8067 struct ath12k_vif_cache *cache = ahvif->cache[arvif->link_id]; 8347 8068 struct ath12k_base *ab = ar->ab; 8069 + struct ieee80211_bss_conf *link_conf; 8348 8070 8349 8071 int ret; 8350 8072 ··· 8364 8084 } 8365 8085 8366 8086 if (cache->bss_conf_changed) { 8367 - ath12k_mac_bss_info_changed(ar, arvif, &vif->bss_conf, 8087 + link_conf = ath12k_mac_get_link_bss_conf(arvif); 8088 + if (!link_conf) { 8089 + ath12k_warn(ar->ab, "unable to access bss link conf in cache flush for vif %pM link %u\n", 8090 + vif->addr, arvif->link_id); 8091 + return; 8092 + } 8093 + ath12k_mac_bss_info_changed(ar, arvif, link_conf, 8368 8094 cache->bss_conf_changed); 8369 8095 } 8370 8096 ··· 8493 8207 ahvif->ah = ah; 8494 8208 ahvif->vif = vif; 8495 8209 arvif = &ahvif->deflink; 8496 - arvif->ahvif = ahvif; 8497 8210 8498 - INIT_LIST_HEAD(&arvif->list); 8499 - INIT_DELAYED_WORK(&arvif->connection_loss_work, 8500 - ath12k_mac_vif_sta_connection_loss_work); 8501 - 8502 - for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { 8503 - arvif->bitrate_mask.control[i].legacy = 0xffffffff; 8504 - memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, 8505 - sizeof(arvif->bitrate_mask.control[i].ht_mcs)); 8506 - memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, 8507 - sizeof(arvif->bitrate_mask.control[i].vht_mcs)); 8508 - } 8211 + ath12k_mac_init_arvif(ahvif, arvif, -1); 8509 8212 8510 8213 /* Allocate Default Queue now and reassign during actual vdev create */ 8511 8214 vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE; ··· 8656 8381 FIF_PROBE_REQ | \ 8657 8382 FIF_FCSFAIL) 8658 8383 8659 - static void ath12k_mac_configure_filter(struct ath12k *ar, 8660 - unsigned int total_flags) 8661 - { 8662 - bool reset_flag; 8663 - int ret; 8664 - 8665 - lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 8666 - 8667 - ar->filter_flags = total_flags; 8668 - 8669 - /* For monitor mode */ 8670 - reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC); 8671 - 8672 - ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag); 8673 - if (ret) 8674 - ath12k_warn(ar->ab, 8675 - "fail to set monitor filter: %d\n", ret); 8676 - 8677 - ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 8678 - "total_flags:0x%x, reset_flag:%d\n", 8679 - total_flags, reset_flag); 8680 - } 8681 - 8682 8384 static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, 8683 8385 unsigned int changed_flags, 8684 8386 unsigned int *total_flags, ··· 8669 8417 ar = ath12k_ah_to_ar(ah, 0); 8670 8418 8671 8419 *total_flags &= SUPPORTED_FILTERS; 8672 - ath12k_mac_configure_filter(ar, *total_flags); 8420 + ar->filter_flags = *total_flags; 8673 8421 } 8674 8422 8675 8423 static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) ··· 8927 8675 continue; 8928 8676 8929 8677 if (arvif == arvif_p) 8678 + continue; 8679 + 8680 + if (!arvif_p->is_created) 8930 8681 continue; 8931 8682 8932 8683 link_conf = wiphy_dereference(ahvif->ah->hw->wiphy, ··· 9237 8982 int n_vifs) 9238 8983 { 9239 8984 struct ath12k_wmi_vdev_up_params params = {}; 8985 + struct ath12k_link_vif *arvif, *tx_arvif; 9240 8986 struct ieee80211_bss_conf *link_conf; 9241 8987 struct ath12k_base *ab = ar->ab; 9242 - struct ath12k_link_vif *arvif; 9243 8988 struct ieee80211_vif *vif; 9244 8989 struct ath12k_vif *ahvif; 9245 8990 u8 link_id; ··· 9307 9052 params.vdev_id = arvif->vdev_id; 9308 9053 params.aid = ahvif->aid; 9309 9054 params.bssid = arvif->bssid; 9310 - if (vif->mbssid_tx_vif) { 9311 - struct ath12k_vif *tx_ahvif = 9312 - ath12k_vif_to_ahvif(vif->mbssid_tx_vif); 9313 - struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink; 9314 9055 9056 + tx_arvif = ath12k_mac_get_tx_arvif(arvif); 9057 + if (tx_arvif) { 9315 9058 params.tx_bssid = tx_arvif->bssid; 9316 9059 params.nontx_profile_idx = link_conf->bssid_index; 9317 9060 params.nontx_profile_cnt = 1 << link_conf->bssid_indicator; ··· 9575 9322 if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR && 9576 9323 ar->num_started_vdevs == 1 && ar->monitor_vdev_created) 9577 9324 ath12k_mac_monitor_stop(ar); 9578 - 9579 - ath12k_mac_remove_link_interface(hw, arvif); 9580 - ath12k_mac_unassign_link_vif(arvif); 9581 9325 } 9582 9326 9583 9327 static int ··· 10267 10017 return 0; 10268 10018 } 10269 10019 10020 + static int ath12k_mac_get_fw_stats(struct ath12k *ar, u32 pdev_id, 10021 + u32 vdev_id, u32 stats_id) 10022 + { 10023 + struct ath12k_base *ab = ar->ab; 10024 + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); 10025 + unsigned long time_left; 10026 + int ret; 10027 + 10028 + guard(mutex)(&ah->hw_mutex); 10029 + 10030 + if (ah->state != ATH12K_HW_STATE_ON) 10031 + return -ENETDOWN; 10032 + 10033 + reinit_completion(&ar->fw_stats_complete); 10034 + 10035 + ret = ath12k_wmi_send_stats_request_cmd(ar, stats_id, vdev_id, pdev_id); 10036 + 10037 + if (ret) { 10038 + ath12k_warn(ab, "failed to request fw stats: %d\n", ret); 10039 + return ret; 10040 + } 10041 + 10042 + ath12k_dbg(ab, ATH12K_DBG_WMI, 10043 + "get fw stat pdev id %d vdev id %d stats id 0x%x\n", 10044 + pdev_id, vdev_id, stats_id); 10045 + 10046 + time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ); 10047 + 10048 + if (!time_left) 10049 + ath12k_warn(ab, "time out while waiting for get fw stats\n"); 10050 + 10051 + return ret; 10052 + } 10053 + 10270 10054 static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, 10271 10055 struct ieee80211_vif *vif, 10272 10056 struct ieee80211_sta *sta, ··· 10308 10024 { 10309 10025 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); 10310 10026 struct ath12k_link_sta *arsta; 10027 + struct ath12k *ar; 10028 + s8 signal; 10029 + bool db2dbm; 10311 10030 10312 10031 lockdep_assert_wiphy(hw->wiphy); 10313 10032 10314 10033 arsta = &ahsta->deflink; 10034 + ar = ath12k_get_ar_by_vif(hw, vif, arsta->link_id); 10035 + if (!ar) 10036 + return; 10037 + 10038 + db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, 10039 + ar->ab->wmi_ab.svc_map); 10315 10040 10316 10041 sinfo->rx_duration = arsta->rx_duration; 10317 10042 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); ··· 10328 10035 sinfo->tx_duration = arsta->tx_duration; 10329 10036 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); 10330 10037 10331 - if (!arsta->txrate.legacy && !arsta->txrate.nss) 10332 - return; 10333 - 10334 - if (arsta->txrate.legacy) { 10335 - sinfo->txrate.legacy = arsta->txrate.legacy; 10336 - } else { 10337 - sinfo->txrate.mcs = arsta->txrate.mcs; 10338 - sinfo->txrate.nss = arsta->txrate.nss; 10339 - sinfo->txrate.bw = arsta->txrate.bw; 10340 - sinfo->txrate.he_gi = arsta->txrate.he_gi; 10341 - sinfo->txrate.he_dcm = arsta->txrate.he_dcm; 10342 - sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc; 10038 + if (arsta->txrate.legacy || arsta->txrate.nss) { 10039 + if (arsta->txrate.legacy) { 10040 + sinfo->txrate.legacy = arsta->txrate.legacy; 10041 + } else { 10042 + sinfo->txrate.mcs = arsta->txrate.mcs; 10043 + sinfo->txrate.nss = arsta->txrate.nss; 10044 + sinfo->txrate.bw = arsta->txrate.bw; 10045 + sinfo->txrate.he_gi = arsta->txrate.he_gi; 10046 + sinfo->txrate.he_dcm = arsta->txrate.he_dcm; 10047 + sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc; 10048 + sinfo->txrate.eht_gi = arsta->txrate.eht_gi; 10049 + sinfo->txrate.eht_ru_alloc = arsta->txrate.eht_ru_alloc; 10050 + } 10051 + sinfo->txrate.flags = arsta->txrate.flags; 10052 + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 10343 10053 } 10344 - sinfo->txrate.flags = arsta->txrate.flags; 10345 - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 10346 10054 10347 10055 /* TODO: Use real NF instead of default one. */ 10348 - sinfo->signal = arsta->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR; 10349 - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); 10056 + signal = arsta->rssi_comb; 10057 + 10058 + if (!signal && 10059 + ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA && 10060 + !(ath12k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0, 10061 + WMI_REQUEST_VDEV_STAT))) 10062 + signal = arsta->rssi_beacon; 10063 + 10064 + if (signal) { 10065 + sinfo->signal = db2dbm ? signal : signal + ATH12K_DEFAULT_NOISE_FLOOR; 10066 + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); 10067 + } 10068 + 10069 + sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi); 10070 + 10071 + if (!db2dbm) 10072 + sinfo->signal_avg += ATH12K_DEFAULT_NOISE_FLOOR; 10073 + 10074 + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); 10350 10075 } 10351 10076 10352 10077 static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw, ··· 10609 10298 .suspend = ath12k_wow_op_suspend, 10610 10299 .resume = ath12k_wow_op_resume, 10611 10300 .set_wakeup = ath12k_wow_op_set_wakeup, 10301 + #endif 10302 + CFG80211_TESTMODE_CMD(ath12k_tm_cmd) 10303 + #ifdef CONFIG_ATH12K_DEBUGFS 10304 + .link_sta_add_debugfs = ath12k_debugfs_link_sta_op_add, 10612 10305 #endif 10613 10306 }; 10614 10307 ··· 11265 10950 ath12k_iftypes_ext_capa[2].eml_capabilities = cap->eml_cap; 11266 10951 ath12k_iftypes_ext_capa[2].mld_capa_and_ops = cap->mld_cap; 11267 10952 wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; 10953 + 10954 + ieee80211_hw_set(hw, MLO_MCAST_MULTI_LINK_TX); 11268 10955 } 11269 10956 11270 10957 hw->queues = ATH12K_HW_MAX_QUEUES; ··· 11349 11032 11350 11033 ath12k_debugfs_register(ar); 11351 11034 } 11035 + 11036 + init_completion(&ar->fw_stats_complete); 11352 11037 11353 11038 return 0; 11354 11039 ··· 11452 11133 } 11453 11134 } 11454 11135 11136 + if (num_link == 0) 11137 + return 0; 11138 + 11455 11139 mlo.group_id = cpu_to_le32(ag->id); 11456 11140 mlo.partner_link_id = partner_link_id; 11457 11141 mlo.num_partner_links = num_link; ··· 11484 11162 { 11485 11163 struct ath12k_base *ab = ar->ab; 11486 11164 int ret; 11165 + u8 num_link; 11487 11166 11488 11167 if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) 11168 + return 0; 11169 + 11170 + num_link = ath12k_get_num_partner_link(ar); 11171 + 11172 + if (num_link == 0) 11489 11173 return 0; 11490 11174 11491 11175 ret = ath12k_wmi_mlo_teardown(ar);
+8 -2
drivers/net/wireless/ath/ath12k/mac.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH12K_MAC_H ··· 108 108 void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw, 109 109 struct ieee80211_chanctx_conf *conf, 110 110 void *data); 111 - 111 + u16 ath12k_mac_he_convert_tones_to_ru_tones(u16 tones); 112 + enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru_tones); 113 + enum nl80211_eht_gi ath12k_mac_eht_gi_to_nl80211_eht_gi(u8 sgi); 114 + struct ieee80211_bss_conf *ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif); 115 + struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, 116 + struct ieee80211_vif *vif, 117 + u8 link_id); 112 118 #endif
+16 -6
drivers/net/wireless/ath/ath12k/pci.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/module.h> ··· 483 483 484 484 ath12k_pci_ext_grp_disable(irq_grp); 485 485 486 - napi_synchronize(&irq_grp->napi); 487 - napi_disable(&irq_grp->napi); 486 + if (irq_grp->napi_enabled) { 487 + napi_synchronize(&irq_grp->napi); 488 + napi_disable(&irq_grp->napi); 489 + irq_grp->napi_enabled = false; 490 + } 488 491 } 489 492 } 490 493 ··· 649 646 if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 650 647 return 0; 651 648 652 - return irq_set_affinity_hint(ab_pci->pdev->irq, m); 649 + return irq_set_affinity_and_hint(ab_pci->pdev->irq, m); 653 650 } 654 651 655 652 static int ath12k_pci_config_irq(struct ath12k_base *ab) ··· 1117 1114 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 1118 1115 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 1119 1116 1120 - napi_enable(&irq_grp->napi); 1117 + if (!irq_grp->napi_enabled) { 1118 + napi_enable(&irq_grp->napi); 1119 + irq_grp->napi_enabled = true; 1120 + } 1121 + 1121 1122 ath12k_pci_ext_grp_enable(irq_grp); 1122 1123 } 1123 1124 ··· 1568 1561 ab_pci->ab = ab; 1569 1562 ab_pci->pdev = pdev; 1570 1563 ab->hif.ops = &ath12k_pci_hif_ops; 1564 + ab->fw_mode = ATH12K_FIRMWARE_MODE_NORMAL; 1571 1565 pci_set_drvdata(pdev, ab); 1572 1566 spin_lock_init(&ab_pci->window_lock); 1573 1567 ··· 1697 1689 return 0; 1698 1690 1699 1691 err_free_irq: 1692 + /* __free_irq() expects the caller to have cleared the affinity hint */ 1693 + ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); 1700 1694 ath12k_pci_free_irq(ab); 1701 1695 1702 1696 err_ce_free: ··· 1744 1734 cancel_work_sync(&ab->reset_work); 1745 1735 cancel_work_sync(&ab->dump_work); 1746 1736 ath12k_core_deinit(ab); 1747 - ath12k_fw_unmap(ab); 1748 1737 1749 1738 qmi_fail: 1739 + ath12k_fw_unmap(ab); 1750 1740 ath12k_mhi_unregister(ab_pci); 1751 1741 1752 1742 ath12k_pci_free_irq(ab);
+13 -10
drivers/net/wireless/ath/ath12k/qmi.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/elf.h> ··· 2056 2056 } 2057 2057 2058 2058 if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) { 2059 - ab->single_chip_mlo_supp = false; 2060 - 2059 + ag->mlo_capable = false; 2061 2060 ath12k_dbg(ab, ATH12K_DBG_QMI, 2062 2061 "skip QMI MLO cap due to invalid num_radio %d\n", 2063 2062 ab->qmi.num_radios); ··· 2264 2265 goto out; 2265 2266 } 2266 2267 2267 - if (resp.single_chip_mlo_support_valid && 2268 - resp.single_chip_mlo_support) 2269 - ab->single_chip_mlo_supp = true; 2270 - 2271 2268 if (!resp.num_phy_valid) { 2272 2269 ret = -ENODATA; 2273 2270 goto out; ··· 2272 2277 ab->qmi.num_radios = resp.num_phy; 2273 2278 2274 2279 ath12k_dbg(ab, ATH12K_DBG_QMI, 2275 - "phy capability resp valid %d num_phy %d valid %d board_id %d valid %d single_chip_mlo_support %d\n", 2280 + "phy capability resp valid %d num_phy %d valid %d board_id %d\n", 2276 2281 resp.num_phy_valid, resp.num_phy, 2277 - resp.board_id_valid, resp.board_id, 2278 - resp.single_chip_mlo_support_valid, resp.single_chip_mlo_support); 2282 + resp.board_id_valid, resp.board_id); 2279 2283 2280 2284 return; 2281 2285 ··· 2733 2739 r = ath12k_core_check_smbios(ab); 2734 2740 if (r) 2735 2741 ath12k_dbg(ab, ATH12K_DBG_QMI, "SMBIOS bdf variant name not set.\n"); 2742 + 2743 + r = ath12k_acpi_start(ab); 2744 + if (r) 2745 + /* ACPI is optional so continue in case of an error */ 2746 + ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", r); 2747 + 2748 + r = ath12k_acpi_check_bdf_variant_name(ab); 2749 + if (r) 2750 + ath12k_dbg(ab, ATH12K_DBG_BOOT, "ACPI bdf variant name not set.\n"); 2736 2751 2737 2752 out: 2738 2753 return ret;
+4 -1
drivers/net/wireless/ath/ath12k/reg.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH12K_REG_H ··· 12 12 13 13 struct ath12k_base; 14 14 struct ath12k; 15 + 16 + #define ATH12K_2GHZ_MAX_FREQUENCY 2495 17 + #define ATH12K_5GHZ_MAX_FREQUENCY 5920 15 18 16 19 /* DFS regdomains supported by Firmware */ 17 20 enum ath12k_dfs_region {
+3 -9
drivers/net/wireless/ath/ath12k/rx_desc.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 #ifndef ATH12K_RX_DESC_H 7 7 #define ATH12K_RX_DESC_H ··· 637 637 RX_MSDU_START_PKT_TYPE_11N, 638 638 RX_MSDU_START_PKT_TYPE_11AC, 639 639 RX_MSDU_START_PKT_TYPE_11AX, 640 + RX_MSDU_START_PKT_TYPE_11BA, 641 + RX_MSDU_START_PKT_TYPE_11BE, 640 642 }; 641 643 642 644 enum rx_msdu_start_sgi { ··· 1540 1538 #define MAX_MU_GROUP_ID 64 1541 1539 #define MAX_MU_GROUP_SHOW 16 1542 1540 #define MAX_MU_GROUP_LENGTH (6 * MAX_MU_GROUP_SHOW) 1543 - 1544 - #define HAL_RX_RU_ALLOC_TYPE_MAX 6 1545 - #define RU_26 1 1546 - #define RU_52 2 1547 - #define RU_106 4 1548 - #define RU_242 9 1549 - #define RU_484 18 1550 - #define RU_996 37 1551 1541 1552 1542 #endif /* ATH12K_RX_DESC_H */
+395
drivers/net/wireless/ath/ath12k/testmode.c
··· 1 + // SPDX-License-Identifier: BSD-3-Clause-Clear 2 + /* 3 + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 + */ 6 + 7 + #include "testmode.h" 8 + #include <net/netlink.h> 9 + #include "debug.h" 10 + #include "wmi.h" 11 + #include "hw.h" 12 + #include "core.h" 13 + #include "hif.h" 14 + #include "../testmode_i.h" 15 + 16 + #define ATH12K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0) 17 + #define ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4) 18 + 19 + static const struct nla_policy ath12k_tm_policy[ATH_TM_ATTR_MAX + 1] = { 20 + [ATH_TM_ATTR_CMD] = { .type = NLA_U32 }, 21 + [ATH_TM_ATTR_DATA] = { .type = NLA_BINARY, 22 + .len = ATH_TM_DATA_MAX_LEN }, 23 + [ATH_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 }, 24 + [ATH_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 }, 25 + [ATH_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 }, 26 + }; 27 + 28 + static struct ath12k *ath12k_tm_get_ar(struct ath12k_base *ab) 29 + { 30 + struct ath12k_pdev *pdev; 31 + struct ath12k *ar; 32 + int i; 33 + 34 + for (i = 0; i < ab->num_radios; i++) { 35 + pdev = &ab->pdevs[i]; 36 + ar = pdev->ar; 37 + 38 + if (ar && ar->ah->state == ATH12K_HW_STATE_TM) 39 + return ar; 40 + } 41 + 42 + return NULL; 43 + } 44 + 45 + void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id, 46 + struct sk_buff *skb) 47 + { 48 + struct sk_buff *nl_skb; 49 + struct ath12k *ar; 50 + 51 + ath12k_dbg(ab, ATH12K_DBG_TESTMODE, 52 + "testmode event wmi cmd_id %d skb length %d\n", 53 + cmd_id, skb->len); 54 + 55 + ath12k_dbg_dump(ab, ATH12K_DBG_TESTMODE, NULL, "", skb->data, skb->len); 56 + 57 + ar = ath12k_tm_get_ar(ab); 58 + if (!ar) { 59 + ath12k_warn(ab, "testmode event not handled due to invalid pdev\n"); 60 + return; 61 + } 62 + 63 + spin_lock_bh(&ar->data_lock); 64 + 65 + nl_skb = cfg80211_testmode_alloc_event_skb(ar->ah->hw->wiphy, 66 + 2 * nla_total_size(sizeof(u32)) + 67 + nla_total_size(skb->len), 68 + GFP_ATOMIC); 69 + spin_unlock_bh(&ar->data_lock); 70 + 71 + if (!nl_skb) { 72 + ath12k_warn(ab, 73 + "failed to allocate skb for unsegmented testmode wmi event\n"); 74 + return; 75 + } 76 + 77 + if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, ATH_TM_CMD_WMI) || 78 + nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) || 79 + nla_put(nl_skb, ATH_TM_ATTR_DATA, skb->len, skb->data)) { 80 + ath12k_warn(ab, "failed to populate testmode unsegmented event\n"); 81 + kfree_skb(nl_skb); 82 + return; 83 + } 84 + 85 + cfg80211_testmode_event(nl_skb, GFP_ATOMIC); 86 + } 87 + 88 + void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id, 89 + const struct ath12k_wmi_ftm_event *ftm_msg, 90 + u16 length) 91 + { 92 + struct sk_buff *nl_skb; 93 + struct ath12k *ar; 94 + u32 data_pos, pdev_id; 95 + u16 datalen; 96 + u8 total_segments, current_seq; 97 + u8 const *buf_pos; 98 + 99 + ath12k_dbg(ab, ATH12K_DBG_TESTMODE, 100 + "testmode event wmi cmd_id %d ftm event msg %pK datalen %d\n", 101 + cmd_id, ftm_msg, length); 102 + ath12k_dbg_dump(ab, ATH12K_DBG_TESTMODE, NULL, "", ftm_msg, length); 103 + pdev_id = DP_HW2SW_MACID(le32_to_cpu(ftm_msg->seg_hdr.pdev_id)); 104 + 105 + if (pdev_id >= ab->num_radios) { 106 + ath12k_warn(ab, "testmode event not handled due to invalid pdev id\n"); 107 + return; 108 + } 109 + 110 + ar = ab->pdevs[pdev_id].ar; 111 + 112 + if (!ar) { 113 + ath12k_warn(ab, "testmode event not handled due to absence of pdev\n"); 114 + return; 115 + } 116 + 117 + current_seq = le32_get_bits(ftm_msg->seg_hdr.segmentinfo, 118 + ATH12K_FTM_SEGHDR_CURRENT_SEQ); 119 + total_segments = le32_get_bits(ftm_msg->seg_hdr.segmentinfo, 120 + ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS); 121 + datalen = length - (sizeof(struct ath12k_wmi_ftm_seg_hdr_params)); 122 + buf_pos = ftm_msg->data; 123 + 124 + if (current_seq == 0) { 125 + ab->ftm_event_obj.expected_seq = 0; 126 + ab->ftm_event_obj.data_pos = 0; 127 + } 128 + 129 + data_pos = ab->ftm_event_obj.data_pos; 130 + 131 + if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) { 132 + ath12k_warn(ab, 133 + "Invalid event length date_pos[%d] datalen[%d]\n", 134 + data_pos, datalen); 135 + return; 136 + } 137 + 138 + memcpy(&ab->ftm_event_obj.eventdata[data_pos], buf_pos, datalen); 139 + data_pos += datalen; 140 + 141 + if (++ab->ftm_event_obj.expected_seq != total_segments) { 142 + ab->ftm_event_obj.data_pos = data_pos; 143 + ath12k_dbg(ab, ATH12K_DBG_TESTMODE, 144 + "partial data received current_seq[%d], total_seg[%d]\n", 145 + current_seq, total_segments); 146 + return; 147 + } 148 + 149 + ath12k_dbg(ab, ATH12K_DBG_TESTMODE, 150 + "total data length[%d] = [%d]\n", 151 + data_pos, ftm_msg->seg_hdr.len); 152 + 153 + spin_lock_bh(&ar->data_lock); 154 + nl_skb = cfg80211_testmode_alloc_event_skb(ar->ah->hw->wiphy, 155 + 2 * nla_total_size(sizeof(u32)) + 156 + nla_total_size(data_pos), 157 + GFP_ATOMIC); 158 + spin_unlock_bh(&ar->data_lock); 159 + 160 + if (!nl_skb) { 161 + ath12k_warn(ab, 162 + "failed to allocate skb for testmode wmi event\n"); 163 + return; 164 + } 165 + 166 + if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, 167 + ATH_TM_CMD_WMI_FTM) || 168 + nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) || 169 + nla_put(nl_skb, ATH_TM_ATTR_DATA, data_pos, 170 + &ab->ftm_event_obj.eventdata[0])) { 171 + ath12k_warn(ab, "failed to populate testmode event"); 172 + kfree_skb(nl_skb); 173 + return; 174 + } 175 + 176 + cfg80211_testmode_event(nl_skb, GFP_ATOMIC); 177 + } 178 + 179 + static int ath12k_tm_cmd_get_version(struct ath12k *ar, struct nlattr *tb[]) 180 + { 181 + struct sk_buff *skb; 182 + 183 + ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE, 184 + "testmode cmd get version_major %d version_minor %d\n", 185 + ATH_TESTMODE_VERSION_MAJOR, 186 + ATH_TESTMODE_VERSION_MINOR); 187 + 188 + spin_lock_bh(&ar->data_lock); 189 + skb = cfg80211_testmode_alloc_reply_skb(ar->ah->hw->wiphy, 190 + 2 * nla_total_size(sizeof(u32))); 191 + spin_unlock_bh(&ar->data_lock); 192 + 193 + if (!skb) 194 + return -ENOMEM; 195 + 196 + if (nla_put_u32(skb, ATH_TM_ATTR_VERSION_MAJOR, 197 + ATH_TESTMODE_VERSION_MAJOR) || 198 + nla_put_u32(skb, ATH_TM_ATTR_VERSION_MINOR, 199 + ATH_TESTMODE_VERSION_MINOR)) { 200 + kfree_skb(skb); 201 + return -ENOBUFS; 202 + } 203 + 204 + return cfg80211_testmode_reply(skb); 205 + } 206 + 207 + static int ath12k_tm_cmd_process_ftm(struct ath12k *ar, struct nlattr *tb[]) 208 + { 209 + struct ath12k_wmi_pdev *wmi = ar->wmi; 210 + struct sk_buff *skb; 211 + struct ath12k_wmi_ftm_cmd *ftm_cmd; 212 + int ret = 0; 213 + void *buf; 214 + size_t aligned_len; 215 + u32 cmd_id, buf_len; 216 + u16 chunk_len, total_bytes, num_segments; 217 + u8 segnumber = 0, *bufpos; 218 + 219 + ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE, "ah->state %d\n", ar->ah->state); 220 + if (ar->ah->state != ATH12K_HW_STATE_TM) 221 + return -ENETDOWN; 222 + 223 + if (!tb[ATH_TM_ATTR_DATA]) 224 + return -EINVAL; 225 + 226 + buf = nla_data(tb[ATH_TM_ATTR_DATA]); 227 + buf_len = nla_len(tb[ATH_TM_ATTR_DATA]); 228 + cmd_id = WMI_PDEV_UTF_CMDID; 229 + ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE, 230 + "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n", 231 + cmd_id, buf, buf_len); 232 + ath12k_dbg_dump(ar->ab, ATH12K_DBG_TESTMODE, NULL, "", buf, buf_len); 233 + bufpos = buf; 234 + total_bytes = buf_len; 235 + num_segments = total_bytes / MAX_WMI_UTF_LEN; 236 + 237 + if (buf_len - (num_segments * MAX_WMI_UTF_LEN)) 238 + num_segments++; 239 + 240 + while (buf_len) { 241 + if (buf_len > MAX_WMI_UTF_LEN) 242 + chunk_len = MAX_WMI_UTF_LEN; /* MAX message */ 243 + else 244 + chunk_len = buf_len; 245 + 246 + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, (chunk_len + 247 + sizeof(struct ath12k_wmi_ftm_cmd))); 248 + 249 + if (!skb) 250 + return -ENOMEM; 251 + 252 + ftm_cmd = (struct ath12k_wmi_ftm_cmd *)skb->data; 253 + aligned_len = chunk_len + sizeof(struct ath12k_wmi_ftm_seg_hdr_params); 254 + ftm_cmd->tlv_header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); 255 + ftm_cmd->seg_hdr.len = cpu_to_le32(total_bytes); 256 + ftm_cmd->seg_hdr.msgref = cpu_to_le32(ar->ftm_msgref); 257 + ftm_cmd->seg_hdr.segmentinfo = 258 + le32_encode_bits(num_segments, 259 + ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS) | 260 + le32_encode_bits(segnumber, 261 + ATH12K_FTM_SEGHDR_CURRENT_SEQ); 262 + ftm_cmd->seg_hdr.pdev_id = cpu_to_le32(ar->pdev->pdev_id); 263 + segnumber++; 264 + memcpy(&ftm_cmd->data, bufpos, chunk_len); 265 + ret = ath12k_wmi_cmd_send(wmi, skb, cmd_id); 266 + 267 + if (ret) { 268 + ath12k_warn(ar->ab, "ftm wmi command fail: %d\n", ret); 269 + kfree_skb(skb); 270 + return ret; 271 + } 272 + 273 + buf_len -= chunk_len; 274 + bufpos += chunk_len; 275 + } 276 + 277 + ++ar->ftm_msgref; 278 + return ret; 279 + } 280 + 281 + static int ath12k_tm_cmd_testmode_start(struct ath12k *ar, struct nlattr *tb[]) 282 + { 283 + if (ar->ah->state == ATH12K_HW_STATE_TM) 284 + return -EALREADY; 285 + 286 + if (ar->ah->state != ATH12K_HW_STATE_OFF) 287 + return -EBUSY; 288 + 289 + ar->ab->ftm_event_obj.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH, 290 + GFP_KERNEL); 291 + 292 + if (!ar->ab->ftm_event_obj.eventdata) 293 + return -ENOMEM; 294 + 295 + ar->ah->state = ATH12K_HW_STATE_TM; 296 + ar->ftm_msgref = 0; 297 + return 0; 298 + } 299 + 300 + static int ath12k_tm_cmd_wmi(struct ath12k *ar, struct nlattr *tb[]) 301 + { 302 + struct ath12k_wmi_pdev *wmi = ar->wmi; 303 + struct sk_buff *skb; 304 + struct wmi_pdev_set_param_cmd *cmd; 305 + int ret = 0, tag; 306 + void *buf; 307 + u32 cmd_id, buf_len; 308 + 309 + if (!tb[ATH_TM_ATTR_DATA]) 310 + return -EINVAL; 311 + 312 + if (!tb[ATH_TM_ATTR_WMI_CMDID]) 313 + return -EINVAL; 314 + 315 + buf = nla_data(tb[ATH_TM_ATTR_DATA]); 316 + buf_len = nla_len(tb[ATH_TM_ATTR_DATA]); 317 + 318 + if (!buf_len) { 319 + ath12k_warn(ar->ab, "No data present in testmode command\n"); 320 + return -EINVAL; 321 + } 322 + 323 + cmd_id = nla_get_u32(tb[ATH_TM_ATTR_WMI_CMDID]); 324 + 325 + cmd = buf; 326 + tag = le32_get_bits(cmd->tlv_header, WMI_TLV_TAG); 327 + 328 + if (tag == WMI_TAG_PDEV_SET_PARAM_CMD) 329 + cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 330 + 331 + ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE, 332 + "testmode cmd wmi cmd_id %d buf length %d\n", 333 + cmd_id, buf_len); 334 + 335 + ath12k_dbg_dump(ar->ab, ATH12K_DBG_TESTMODE, NULL, "", buf, buf_len); 336 + 337 + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 338 + 339 + if (!skb) 340 + return -ENOMEM; 341 + 342 + memcpy(skb->data, buf, buf_len); 343 + 344 + ret = ath12k_wmi_cmd_send(wmi, skb, cmd_id); 345 + if (ret) { 346 + dev_kfree_skb(skb); 347 + ath12k_warn(ar->ab, "failed to transmit wmi command (testmode): %d\n", 348 + ret); 349 + } 350 + 351 + return ret; 352 + } 353 + 354 + int ath12k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 355 + void *data, int len) 356 + { 357 + struct ath12k_hw *ah = hw->priv; 358 + struct ath12k *ar = NULL; 359 + struct nlattr *tb[ATH_TM_ATTR_MAX + 1]; 360 + struct ath12k_base *ab; 361 + struct wiphy *wiphy = hw->wiphy; 362 + int ret; 363 + 364 + lockdep_assert_held(&wiphy->mtx); 365 + 366 + ret = nla_parse(tb, ATH_TM_ATTR_MAX, data, len, ath12k_tm_policy, 367 + NULL); 368 + if (ret) 369 + return ret; 370 + 371 + if (!tb[ATH_TM_ATTR_CMD]) 372 + return -EINVAL; 373 + 374 + /* TODO: have to handle ar for MLO case */ 375 + if (ah->num_radio) 376 + ar = ah->radio; 377 + 378 + if (!ar) 379 + return -EINVAL; 380 + 381 + ab = ar->ab; 382 + switch (nla_get_u32(tb[ATH_TM_ATTR_CMD])) { 383 + case ATH_TM_CMD_WMI: 384 + return ath12k_tm_cmd_wmi(ar, tb); 385 + case ATH_TM_CMD_TESTMODE_START: 386 + return ath12k_tm_cmd_testmode_start(ar, tb); 387 + case ATH_TM_CMD_GET_VERSION: 388 + return ath12k_tm_cmd_get_version(ar, tb); 389 + case ATH_TM_CMD_WMI_FTM: 390 + set_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags); 391 + return ath12k_tm_cmd_process_ftm(ar, tb); 392 + default: 393 + return -EOPNOTSUPP; 394 + } 395 + }
+40
drivers/net/wireless/ath/ath12k/testmode.h
··· 1 + /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 + /* 3 + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 + */ 6 + 7 + #include "core.h" 8 + #include "hif.h" 9 + 10 + #ifdef CONFIG_NL80211_TESTMODE 11 + 12 + void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id, 13 + struct sk_buff *skb); 14 + void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id, 15 + const struct ath12k_wmi_ftm_event *ftm_msg, 16 + u16 length); 17 + int ath12k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 18 + void *data, int len); 19 + 20 + #else 21 + 22 + static inline void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id, 23 + struct sk_buff *skb) 24 + { 25 + } 26 + 27 + static inline void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id, 28 + const struct ath12k_wmi_ftm_event *msg, 29 + u16 length) 30 + { 31 + } 32 + 33 + static inline int ath12k_tm_cmd(struct ieee80211_hw *hw, 34 + struct ieee80211_vif *vif, 35 + void *data, int len) 36 + { 37 + return 0; 38 + } 39 + 40 + #endif
+1159 -17
drivers/net/wireless/ath/ath12k/wmi.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 #include <linux/skbuff.h> 7 7 #include <linux/ctype.h> ··· 15 15 #include <linux/time.h> 16 16 #include <linux/of.h> 17 17 #include "core.h" 18 + #include "debugfs.h" 18 19 #include "debug.h" 19 20 #include "mac.h" 20 21 #include "hw.h" 21 22 #include "peer.h" 22 23 #include "p2p.h" 24 + #include "testmode.h" 23 25 24 26 struct ath12k_wmi_svc_ready_parse { 25 27 bool wmi_svc_bitmap_done; 28 + }; 29 + 30 + struct wmi_tlv_fw_stats_parse { 31 + const struct wmi_stats_event *ev; 26 32 }; 27 33 28 34 struct ath12k_wmi_dma_ring_caps_parse { ··· 179 173 .min_len = sizeof(struct wmi_p2p_noa_event) }, 180 174 }; 181 175 182 - static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len) 176 + __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len) 183 177 { 184 178 return le32_encode_bits(cmd, WMI_TLV_TAG) | 185 179 le32_encode_bits(len, WMI_TLV_LEN); ··· 816 810 "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n"); 817 811 dev_kfree_skb(skb); 818 812 } 813 + 814 + return ret; 815 + } 816 + 817 + int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id, 818 + u32 vdev_id, u32 pdev_id) 819 + { 820 + struct ath12k_wmi_pdev *wmi = ar->wmi; 821 + struct wmi_request_stats_cmd *cmd; 822 + struct sk_buff *skb; 823 + int ret; 824 + 825 + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 826 + if (!skb) 827 + return -ENOMEM; 828 + 829 + cmd = (struct wmi_request_stats_cmd *)skb->data; 830 + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD, 831 + sizeof(*cmd)); 832 + 833 + cmd->stats_id = cpu_to_le32(stats_id); 834 + cmd->vdev_id = cpu_to_le32(vdev_id); 835 + cmd->pdev_id = cpu_to_le32(pdev_id); 836 + 837 + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID); 838 + if (ret) { 839 + ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n"); 840 + dev_kfree_skb(skb); 841 + } 842 + 843 + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 844 + "WMI request stats 0x%x vdev id %d pdev id %d\n", 845 + stats_id, vdev_id, pdev_id); 819 846 820 847 return ret; 821 848 } ··· 1927 1888 return ret; 1928 1889 } 1929 1890 1930 - int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id, 1891 + int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif, 1931 1892 struct ieee80211_mutable_offsets *offs, 1932 1893 struct sk_buff *bcn, 1933 1894 struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args) 1934 1895 { 1896 + struct ath12k *ar = arvif->ar; 1935 1897 struct ath12k_wmi_pdev *wmi = ar->wmi; 1898 + struct ath12k_base *ab = ar->ab; 1936 1899 struct wmi_bcn_tmpl_cmd *cmd; 1937 1900 struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info; 1901 + struct ath12k_vif *ahvif = arvif->ahvif; 1902 + struct ieee80211_bss_conf *conf; 1903 + u32 vdev_id = arvif->vdev_id; 1938 1904 struct wmi_tlv *tlv; 1939 1905 struct sk_buff *skb; 1940 1906 u32 ema_params = 0; 1941 1907 void *ptr; 1942 1908 int ret, len; 1943 1909 size_t aligned_len = roundup(bcn->len, 4); 1910 + 1911 + conf = ath12k_mac_get_link_bss_conf(arvif); 1912 + if (!conf) { 1913 + ath12k_warn(ab, 1914 + "unable to access bss link conf in beacon template command for vif %pM link %u\n", 1915 + ahvif->vif->addr, arvif->link_id); 1916 + return -EINVAL; 1917 + } 1944 1918 1945 1919 len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len; 1946 1920 ··· 1966 1914 sizeof(*cmd)); 1967 1915 cmd->vdev_id = cpu_to_le32(vdev_id); 1968 1916 cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset); 1969 - cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]); 1970 - cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]); 1917 + 1918 + if (conf->csa_active) { 1919 + cmd->csa_switch_count_offset = 1920 + cpu_to_le32(offs->cntdwn_counter_offs[0]); 1921 + cmd->ext_csa_switch_count_offset = 1922 + cpu_to_le32(offs->cntdwn_counter_offs[1]); 1923 + cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF); 1924 + arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]]; 1925 + } 1926 + 1971 1927 cmd->buf_len = cpu_to_le32(bcn->len); 1972 1928 cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off); 1973 1929 if (ema_args) { ··· 2005 1945 2006 1946 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID); 2007 1947 if (ret) { 2008 - ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n"); 1948 + ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n"); 2009 1949 dev_kfree_skb(skb); 2010 1950 } 2011 1951 ··· 2433 2373 arg->dwell_time_active = 50; 2434 2374 arg->dwell_time_active_2g = 0; 2435 2375 arg->dwell_time_passive = 150; 2436 - arg->dwell_time_active_6g = 40; 2437 - arg->dwell_time_passive_6g = 30; 2376 + arg->dwell_time_active_6g = 70; 2377 + arg->dwell_time_passive_6g = 70; 2438 2378 arg->min_rest_time = 50; 2439 2379 arg->max_rest_time = 500; 2440 2380 arg->repeat_probe_time = 0; ··· 2854 2794 WMI_CHAN_REG_INFO1_REG_CLS); 2855 2795 *reg2 |= le32_encode_bits(channel_arg->antennamax, 2856 2796 WMI_CHAN_REG_INFO2_ANT_MAX); 2797 + *reg2 |= le32_encode_bits(channel_arg->maxregpower, 2798 + WMI_CHAN_REG_INFO2_MAX_TX_PWR); 2857 2799 2858 2800 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2859 2801 "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n", ··· 6904 6842 rcu_read_unlock(); 6905 6843 } 6906 6844 6845 + static void 6846 + ath12k_wmi_fw_vdev_stats_dump(struct ath12k *ar, 6847 + struct ath12k_fw_stats *fw_stats, 6848 + char *buf, u32 *length) 6849 + { 6850 + const struct ath12k_fw_stats_vdev *vdev; 6851 + u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 6852 + struct ath12k_link_vif *arvif; 6853 + u32 len = *length; 6854 + u8 *vif_macaddr; 6855 + int i; 6856 + 6857 + len += scnprintf(buf + len, buf_len - len, "\n"); 6858 + len += scnprintf(buf + len, buf_len - len, "%30s\n", 6859 + "ath12k VDEV stats"); 6860 + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 6861 + "================="); 6862 + 6863 + list_for_each_entry(vdev, &fw_stats->vdevs, list) { 6864 + arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id); 6865 + if (!arvif) 6866 + continue; 6867 + vif_macaddr = arvif->ahvif->vif->addr; 6868 + 6869 + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 6870 + "VDEV ID", vdev->vdev_id); 6871 + len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", 6872 + "VDEV MAC address", vif_macaddr); 6873 + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 6874 + "beacon snr", vdev->beacon_snr); 6875 + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 6876 + "data snr", vdev->data_snr); 6877 + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 6878 + "num rx frames", vdev->num_rx_frames); 6879 + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 6880 + "num rts fail", vdev->num_rts_fail); 6881 + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 6882 + "num rts success", vdev->num_rts_success); 6883 + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 6884 + "num rx err", vdev->num_rx_err); 6885 + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 6886 + "num rx discard", vdev->num_rx_discard); 6887 + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 6888 + "num tx not acked", vdev->num_tx_not_acked); 6889 + 6890 + for (i = 0 ; i < WLAN_MAX_AC; i++) 6891 + len += scnprintf(buf + len, buf_len - len, 6892 + "%25s [%02d] %u\n", 6893 + "num tx frames", i, 6894 + vdev->num_tx_frames[i]); 6895 + 6896 + for (i = 0 ; i < WLAN_MAX_AC; i++) 6897 + len += scnprintf(buf + len, buf_len - len, 6898 + "%25s [%02d] %u\n", 6899 + "num tx frames retries", i, 6900 + vdev->num_tx_frames_retries[i]); 6901 + 6902 + for (i = 0 ; i < WLAN_MAX_AC; i++) 6903 + len += scnprintf(buf + len, buf_len - len, 6904 + "%25s [%02d] %u\n", 6905 + "num tx frames failures", i, 6906 + vdev->num_tx_frames_failures[i]); 6907 + 6908 + for (i = 0 ; i < MAX_TX_RATE_VALUES; i++) 6909 + len += scnprintf(buf + len, buf_len - len, 6910 + "%25s [%02d] 0x%08x\n", 6911 + "tx rate history", i, 6912 + vdev->tx_rate_history[i]); 6913 + for (i = 0 ; i < MAX_TX_RATE_VALUES; i++) 6914 + len += scnprintf(buf + len, buf_len - len, 6915 + "%25s [%02d] %u\n", 6916 + "beacon rssi history", i, 6917 + vdev->beacon_rssi_history[i]); 6918 + 6919 + len += scnprintf(buf + len, buf_len - len, "\n"); 6920 + *length = len; 6921 + } 6922 + } 6923 + 6924 + static void 6925 + ath12k_wmi_fw_bcn_stats_dump(struct ath12k *ar, 6926 + struct ath12k_fw_stats *fw_stats, 6927 + char *buf, u32 *length) 6928 + { 6929 + const struct ath12k_fw_stats_bcn *bcn; 6930 + u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 6931 + struct ath12k_link_vif *arvif; 6932 + u32 len = *length; 6933 + size_t num_bcn; 6934 + 6935 + num_bcn = list_count_nodes(&fw_stats->bcn); 6936 + 6937 + len += scnprintf(buf + len, buf_len - len, "\n"); 6938 + len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", 6939 + "ath12k Beacon stats", num_bcn); 6940 + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 6941 + "==================="); 6942 + 6943 + list_for_each_entry(bcn, &fw_stats->bcn, list) { 6944 + arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id); 6945 + if (!arvif) 6946 + continue; 6947 + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 6948 + "VDEV ID", bcn->vdev_id); 6949 + len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", 6950 + "VDEV MAC address", arvif->ahvif->vif->addr); 6951 + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 6952 + "================"); 6953 + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 6954 + "Num of beacon tx success", bcn->tx_bcn_succ_cnt); 6955 + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 6956 + "Num of beacon tx failures", bcn->tx_bcn_outage_cnt); 6957 + 6958 + len += scnprintf(buf + len, buf_len - len, "\n"); 6959 + *length = len; 6960 + } 6961 + } 6962 + 6963 + static void 6964 + ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev *pdev, 6965 + char *buf, u32 *length, u64 fw_soc_drop_cnt) 6966 + { 6967 + u32 len = *length; 6968 + u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 6969 + 6970 + len = scnprintf(buf + len, buf_len - len, "\n"); 6971 + len += scnprintf(buf + len, buf_len - len, "%30s\n", 6972 + "ath12k PDEV stats"); 6973 + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 6974 + "================="); 6975 + 6976 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 6977 + "Channel noise floor", pdev->ch_noise_floor); 6978 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6979 + "Channel TX power", pdev->chan_tx_power); 6980 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6981 + "TX frame count", pdev->tx_frame_count); 6982 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6983 + "RX frame count", pdev->rx_frame_count); 6984 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6985 + "RX clear count", pdev->rx_clear_count); 6986 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6987 + "Cycle count", pdev->cycle_count); 6988 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 6989 + "PHY error count", pdev->phy_err_count); 6990 + len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n", 6991 + "soc drop count", fw_soc_drop_cnt); 6992 + 6993 + *length = len; 6994 + } 6995 + 6996 + static void 6997 + ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev *pdev, 6998 + char *buf, u32 *length) 6999 + { 7000 + u32 len = *length; 7001 + u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7002 + 7003 + len += scnprintf(buf + len, buf_len - len, "\n%30s\n", 7004 + "ath12k PDEV TX stats"); 7005 + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7006 + "===================="); 7007 + 7008 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7009 + "HTT cookies queued", pdev->comp_queued); 7010 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7011 + "HTT cookies disp.", pdev->comp_delivered); 7012 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7013 + "MSDU queued", pdev->msdu_enqued); 7014 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7015 + "MPDU queued", pdev->mpdu_enqued); 7016 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7017 + "MSDUs dropped", pdev->wmm_drop); 7018 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7019 + "Local enqued", pdev->local_enqued); 7020 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7021 + "Local freed", pdev->local_freed); 7022 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7023 + "HW queued", pdev->hw_queued); 7024 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7025 + "PPDUs reaped", pdev->hw_reaped); 7026 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7027 + "Num underruns", pdev->underrun); 7028 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7029 + "PPDUs cleaned", pdev->tx_abort); 7030 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7031 + "MPDUs requeued", pdev->mpdus_requed); 7032 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7033 + "Excessive retries", pdev->tx_ko); 7034 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7035 + "HW rate", pdev->data_rc); 7036 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7037 + "Sched self triggers", pdev->self_triggers); 7038 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7039 + "Dropped due to SW retries", 7040 + pdev->sw_retry_failure); 7041 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7042 + "Illegal rate phy errors", 7043 + pdev->illgl_rate_phy_err); 7044 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7045 + "PDEV continuous xretry", pdev->pdev_cont_xretry); 7046 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7047 + "TX timeout", pdev->pdev_tx_timeout); 7048 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7049 + "PDEV resets", pdev->pdev_resets); 7050 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7051 + "Stateless TIDs alloc failures", 7052 + pdev->stateless_tid_alloc_failure); 7053 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7054 + "PHY underrun", pdev->phy_underrun); 7055 + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7056 + "MPDU is more than txop limit", pdev->txop_ovf); 7057 + *length = len; 7058 + } 7059 + 7060 + static void 7061 + ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev *pdev, 7062 + char *buf, u32 *length) 7063 + { 7064 + u32 len = *length; 7065 + u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7066 + 7067 + len += scnprintf(buf + len, buf_len - len, "\n%30s\n", 7068 + "ath12k PDEV RX stats"); 7069 + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7070 + "===================="); 7071 + 7072 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7073 + "Mid PPDU route change", 7074 + pdev->mid_ppdu_route_change); 7075 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7076 + "Tot. number of statuses", pdev->status_rcvd); 7077 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7078 + "Extra frags on rings 0", pdev->r0_frags); 7079 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7080 + "Extra frags on rings 1", pdev->r1_frags); 7081 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7082 + "Extra frags on rings 2", pdev->r2_frags); 7083 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7084 + "Extra frags on rings 3", pdev->r3_frags); 7085 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7086 + "MSDUs delivered to HTT", pdev->htt_msdus); 7087 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7088 + "MPDUs delivered to HTT", pdev->htt_mpdus); 7089 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7090 + "MSDUs delivered to stack", pdev->loc_msdus); 7091 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7092 + "MPDUs delivered to stack", pdev->loc_mpdus); 7093 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7094 + "Oversized AMSUs", pdev->oversize_amsdu); 7095 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7096 + "PHY errors", pdev->phy_errs); 7097 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7098 + "PHY errors drops", pdev->phy_err_drop); 7099 + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7100 + "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); 7101 + *length = len; 7102 + } 7103 + 7104 + static void 7105 + ath12k_wmi_fw_pdev_stats_dump(struct ath12k *ar, 7106 + struct ath12k_fw_stats *fw_stats, 7107 + char *buf, u32 *length) 7108 + { 7109 + const struct ath12k_fw_stats_pdev *pdev; 7110 + u32 len = *length; 7111 + 7112 + pdev = list_first_entry_or_null(&fw_stats->pdevs, 7113 + struct ath12k_fw_stats_pdev, list); 7114 + if (!pdev) { 7115 + ath12k_warn(ar->ab, "failed to get pdev stats\n"); 7116 + return; 7117 + } 7118 + 7119 + ath12k_wmi_fw_pdev_base_stats_dump(pdev, buf, &len, 7120 + ar->ab->fw_soc_drop_count); 7121 + ath12k_wmi_fw_pdev_tx_stats_dump(pdev, buf, &len); 7122 + ath12k_wmi_fw_pdev_rx_stats_dump(pdev, buf, &len); 7123 + 7124 + *length = len; 7125 + } 7126 + 7127 + void ath12k_wmi_fw_stats_dump(struct ath12k *ar, 7128 + struct ath12k_fw_stats *fw_stats, 7129 + u32 stats_id, char *buf) 7130 + { 7131 + u32 len = 0; 7132 + u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7133 + 7134 + spin_lock_bh(&ar->data_lock); 7135 + 7136 + switch (stats_id) { 7137 + case WMI_REQUEST_VDEV_STAT: 7138 + ath12k_wmi_fw_vdev_stats_dump(ar, fw_stats, buf, &len); 7139 + break; 7140 + case WMI_REQUEST_BCN_STAT: 7141 + ath12k_wmi_fw_bcn_stats_dump(ar, fw_stats, buf, &len); 7142 + break; 7143 + case WMI_REQUEST_PDEV_STAT: 7144 + ath12k_wmi_fw_pdev_stats_dump(ar, fw_stats, buf, &len); 7145 + break; 7146 + default: 7147 + break; 7148 + } 7149 + 7150 + spin_unlock_bh(&ar->data_lock); 7151 + 7152 + if (len >= buf_len) 7153 + buf[len - 1] = 0; 7154 + else 7155 + buf[len] = 0; 7156 + 7157 + ath12k_debugfs_fw_stats_reset(ar); 7158 + } 7159 + 7160 + static void 7161 + ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params *src, 7162 + struct ath12k_fw_stats_vdev *dst) 7163 + { 7164 + int i; 7165 + 7166 + dst->vdev_id = le32_to_cpu(src->vdev_id); 7167 + dst->beacon_snr = le32_to_cpu(src->beacon_snr); 7168 + dst->data_snr = le32_to_cpu(src->data_snr); 7169 + dst->num_rx_frames = le32_to_cpu(src->num_rx_frames); 7170 + dst->num_rts_fail = le32_to_cpu(src->num_rts_fail); 7171 + dst->num_rts_success = le32_to_cpu(src->num_rts_success); 7172 + dst->num_rx_err = le32_to_cpu(src->num_rx_err); 7173 + dst->num_rx_discard = le32_to_cpu(src->num_rx_discard); 7174 + dst->num_tx_not_acked = le32_to_cpu(src->num_tx_not_acked); 7175 + 7176 + for (i = 0; i < WLAN_MAX_AC; i++) 7177 + dst->num_tx_frames[i] = 7178 + le32_to_cpu(src->num_tx_frames[i]); 7179 + 7180 + for (i = 0; i < WLAN_MAX_AC; i++) 7181 + dst->num_tx_frames_retries[i] = 7182 + le32_to_cpu(src->num_tx_frames_retries[i]); 7183 + 7184 + for (i = 0; i < WLAN_MAX_AC; i++) 7185 + dst->num_tx_frames_failures[i] = 7186 + le32_to_cpu(src->num_tx_frames_failures[i]); 7187 + 7188 + for (i = 0; i < MAX_TX_RATE_VALUES; i++) 7189 + dst->tx_rate_history[i] = 7190 + le32_to_cpu(src->tx_rate_history[i]); 7191 + 7192 + for (i = 0; i < MAX_TX_RATE_VALUES; i++) 7193 + dst->beacon_rssi_history[i] = 7194 + le32_to_cpu(src->beacon_rssi_history[i]); 7195 + } 7196 + 7197 + static void 7198 + ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params *src, 7199 + struct ath12k_fw_stats_bcn *dst) 7200 + { 7201 + dst->vdev_id = le32_to_cpu(src->vdev_id); 7202 + dst->tx_bcn_succ_cnt = le32_to_cpu(src->tx_bcn_succ_cnt); 7203 + dst->tx_bcn_outage_cnt = le32_to_cpu(src->tx_bcn_outage_cnt); 7204 + } 7205 + 7206 + static void 7207 + ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params *src, 7208 + struct ath12k_fw_stats_pdev *dst) 7209 + { 7210 + dst->ch_noise_floor = a_sle32_to_cpu(src->chan_nf); 7211 + dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count); 7212 + dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count); 7213 + dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count); 7214 + dst->cycle_count = __le32_to_cpu(src->cycle_count); 7215 + dst->phy_err_count = __le32_to_cpu(src->phy_err_count); 7216 + dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr); 7217 + } 7218 + 7219 + static void 7220 + ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params *src, 7221 + struct ath12k_fw_stats_pdev *dst) 7222 + { 7223 + dst->comp_queued = a_sle32_to_cpu(src->comp_queued); 7224 + dst->comp_delivered = a_sle32_to_cpu(src->comp_delivered); 7225 + dst->msdu_enqued = a_sle32_to_cpu(src->msdu_enqued); 7226 + dst->mpdu_enqued = a_sle32_to_cpu(src->mpdu_enqued); 7227 + dst->wmm_drop = a_sle32_to_cpu(src->wmm_drop); 7228 + dst->local_enqued = a_sle32_to_cpu(src->local_enqued); 7229 + dst->local_freed = a_sle32_to_cpu(src->local_freed); 7230 + dst->hw_queued = a_sle32_to_cpu(src->hw_queued); 7231 + dst->hw_reaped = a_sle32_to_cpu(src->hw_reaped); 7232 + dst->underrun = a_sle32_to_cpu(src->underrun); 7233 + dst->tx_abort = a_sle32_to_cpu(src->tx_abort); 7234 + dst->mpdus_requed = a_sle32_to_cpu(src->mpdus_requed); 7235 + dst->tx_ko = __le32_to_cpu(src->tx_ko); 7236 + dst->data_rc = __le32_to_cpu(src->data_rc); 7237 + dst->self_triggers = __le32_to_cpu(src->self_triggers); 7238 + dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure); 7239 + dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err); 7240 + dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry); 7241 + dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout); 7242 + dst->pdev_resets = __le32_to_cpu(src->pdev_resets); 7243 + dst->stateless_tid_alloc_failure = 7244 + __le32_to_cpu(src->stateless_tid_alloc_failure); 7245 + dst->phy_underrun = __le32_to_cpu(src->phy_underrun); 7246 + dst->txop_ovf = __le32_to_cpu(src->txop_ovf); 7247 + } 7248 + 7249 + static void 7250 + ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params *src, 7251 + struct ath12k_fw_stats_pdev *dst) 7252 + { 7253 + dst->mid_ppdu_route_change = 7254 + a_sle32_to_cpu(src->mid_ppdu_route_change); 7255 + dst->status_rcvd = a_sle32_to_cpu(src->status_rcvd); 7256 + dst->r0_frags = a_sle32_to_cpu(src->r0_frags); 7257 + dst->r1_frags = a_sle32_to_cpu(src->r1_frags); 7258 + dst->r2_frags = a_sle32_to_cpu(src->r2_frags); 7259 + dst->r3_frags = a_sle32_to_cpu(src->r3_frags); 7260 + dst->htt_msdus = a_sle32_to_cpu(src->htt_msdus); 7261 + dst->htt_mpdus = a_sle32_to_cpu(src->htt_mpdus); 7262 + dst->loc_msdus = a_sle32_to_cpu(src->loc_msdus); 7263 + dst->loc_mpdus = a_sle32_to_cpu(src->loc_mpdus); 7264 + dst->oversize_amsdu = a_sle32_to_cpu(src->oversize_amsdu); 7265 + dst->phy_errs = a_sle32_to_cpu(src->phy_errs); 7266 + dst->phy_err_drop = a_sle32_to_cpu(src->phy_err_drop); 7267 + dst->mpdu_errs = a_sle32_to_cpu(src->mpdu_errs); 7268 + } 7269 + 7270 + static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab, 7271 + struct wmi_tlv_fw_stats_parse *parse, 7272 + const void *ptr, 7273 + u16 len) 7274 + { 7275 + const struct wmi_stats_event *ev = parse->ev; 7276 + struct ath12k_fw_stats stats = {0}; 7277 + struct ath12k *ar; 7278 + struct ath12k_link_vif *arvif; 7279 + struct ieee80211_sta *sta; 7280 + struct ath12k_sta *ahsta; 7281 + struct ath12k_link_sta *arsta; 7282 + int i, ret = 0; 7283 + const void *data = ptr; 7284 + 7285 + INIT_LIST_HEAD(&stats.vdevs); 7286 + INIT_LIST_HEAD(&stats.bcn); 7287 + INIT_LIST_HEAD(&stats.pdevs); 7288 + 7289 + if (!ev) { 7290 + ath12k_warn(ab, "failed to fetch update stats ev"); 7291 + return -EPROTO; 7292 + } 7293 + 7294 + rcu_read_lock(); 7295 + 7296 + ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id)); 7297 + if (!ar) { 7298 + ath12k_warn(ab, "invalid pdev id %d in update stats event\n", 7299 + le32_to_cpu(ev->pdev_id)); 7300 + ret = -EPROTO; 7301 + goto exit; 7302 + } 7303 + 7304 + for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) { 7305 + const struct wmi_vdev_stats_params *src; 7306 + struct ath12k_fw_stats_vdev *dst; 7307 + 7308 + src = data; 7309 + if (len < sizeof(*src)) { 7310 + ret = -EPROTO; 7311 + goto exit; 7312 + } 7313 + 7314 + arvif = ath12k_mac_get_arvif(ar, le32_to_cpu(src->vdev_id)); 7315 + if (arvif) { 7316 + sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar), 7317 + arvif->bssid, 7318 + NULL); 7319 + if (sta) { 7320 + ahsta = ath12k_sta_to_ahsta(sta); 7321 + arsta = &ahsta->deflink; 7322 + arsta->rssi_beacon = le32_to_cpu(src->beacon_snr); 7323 + ath12k_dbg(ab, ATH12K_DBG_WMI, 7324 + "wmi stats vdev id %d snr %d\n", 7325 + src->vdev_id, src->beacon_snr); 7326 + } else { 7327 + ath12k_dbg(ab, ATH12K_DBG_WMI, 7328 + "not found station bssid %pM for vdev stat\n", 7329 + arvif->bssid); 7330 + } 7331 + } 7332 + 7333 + data += sizeof(*src); 7334 + len -= sizeof(*src); 7335 + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 7336 + if (!dst) 7337 + continue; 7338 + ath12k_wmi_pull_vdev_stats(src, dst); 7339 + stats.stats_id = WMI_REQUEST_VDEV_STAT; 7340 + list_add_tail(&dst->list, &stats.vdevs); 7341 + } 7342 + for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) { 7343 + const struct ath12k_wmi_bcn_stats_params *src; 7344 + struct ath12k_fw_stats_bcn *dst; 7345 + 7346 + src = data; 7347 + if (len < sizeof(*src)) { 7348 + ret = -EPROTO; 7349 + goto exit; 7350 + } 7351 + 7352 + data += sizeof(*src); 7353 + len -= sizeof(*src); 7354 + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 7355 + if (!dst) 7356 + continue; 7357 + ath12k_wmi_pull_bcn_stats(src, dst); 7358 + stats.stats_id = WMI_REQUEST_BCN_STAT; 7359 + list_add_tail(&dst->list, &stats.bcn); 7360 + } 7361 + for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) { 7362 + const struct ath12k_wmi_pdev_stats_params *src; 7363 + struct ath12k_fw_stats_pdev *dst; 7364 + 7365 + src = data; 7366 + if (len < sizeof(*src)) { 7367 + ret = -EPROTO; 7368 + goto exit; 7369 + } 7370 + 7371 + stats.stats_id = WMI_REQUEST_PDEV_STAT; 7372 + 7373 + data += sizeof(*src); 7374 + len -= sizeof(*src); 7375 + 7376 + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); 7377 + if (!dst) 7378 + continue; 7379 + 7380 + ath12k_wmi_pull_pdev_stats_base(&src->base, dst); 7381 + ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst); 7382 + ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst); 7383 + list_add_tail(&dst->list, &stats.pdevs); 7384 + } 7385 + 7386 + complete(&ar->fw_stats_complete); 7387 + ath12k_debugfs_fw_stats_process(ar, &stats); 7388 + exit: 7389 + rcu_read_unlock(); 7390 + return ret; 7391 + } 7392 + 7393 + static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab, 7394 + u16 tag, u16 len, 7395 + const void *ptr, void *data) 7396 + { 7397 + struct wmi_tlv_fw_stats_parse *parse = data; 7398 + int ret = 0; 7399 + 7400 + switch (tag) { 7401 + case WMI_TAG_STATS_EVENT: 7402 + parse->ev = ptr; 7403 + break; 7404 + case WMI_TAG_ARRAY_BYTE: 7405 + ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len); 7406 + break; 7407 + default: 7408 + break; 7409 + } 7410 + return ret; 7411 + } 7412 + 6907 7413 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb) 6908 7414 { 7415 + int ret; 7416 + struct wmi_tlv_fw_stats_parse parse = {}; 7417 + 7418 + ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 7419 + ath12k_wmi_tlv_fw_stats_parse, 7420 + &parse); 7421 + if (ret) 7422 + ath12k_warn(ab, "failed to parse fw stats %d\n", ret); 6909 7423 } 6910 7424 6911 7425 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned ··· 7527 6889 const struct ath12k_wmi_pdev_csa_event *ev, 7528 6890 const u32 *vdev_ids) 7529 6891 { 7530 - int i; 6892 + u32 current_switch_count = le32_to_cpu(ev->current_switch_count); 6893 + u32 num_vdevs = le32_to_cpu(ev->num_vdevs); 7531 6894 struct ieee80211_bss_conf *conf; 7532 6895 struct ath12k_link_vif *arvif; 7533 6896 struct ath12k_vif *ahvif; 7534 - 7535 - /* Finish CSA once the switch count becomes NULL */ 7536 - if (ev->current_switch_count) 7537 - return; 6897 + int i; 7538 6898 7539 6899 rcu_read_lock(); 7540 - for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) { 6900 + for (i = 0; i < num_vdevs; i++) { 7541 6901 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]); 7542 6902 7543 6903 if (!arvif) { ··· 7558 6922 continue; 7559 6923 } 7560 6924 7561 - if (arvif->is_up && conf->csa_active) 7562 - ieee80211_csa_finish(ahvif->vif, 0); 6925 + if (!arvif->is_up || !conf->csa_active) 6926 + continue; 6927 + 6928 + /* Finish CSA when counter reaches zero */ 6929 + if (!current_switch_count) { 6930 + ieee80211_csa_finish(ahvif->vif, arvif->link_id); 6931 + arvif->current_cntdown_counter = 0; 6932 + } else if (current_switch_count > 1) { 6933 + /* If the count in event is not what we expect, don't update the 6934 + * mac80211 count. Since during beacon Tx failure, count in the 6935 + * firmware will not decrement and this event will come with the 6936 + * previous count value again 6937 + */ 6938 + if (current_switch_count != arvif->current_cntdown_counter) 6939 + continue; 6940 + 6941 + arvif->current_cntdown_counter = 6942 + ieee80211_beacon_update_cntdwn(ahvif->vif, 6943 + arvif->link_id); 6944 + } 7563 6945 } 7564 6946 rcu_read_unlock(); 7565 6947 } ··· 7678 7024 rcu_read_unlock(); 7679 7025 7680 7026 kfree(tb); 7027 + } 7028 + 7029 + static void ath12k_tm_wmi_event_segmented(struct ath12k_base *ab, u32 cmd_id, 7030 + struct sk_buff *skb) 7031 + { 7032 + const struct ath12k_wmi_ftm_event *ev; 7033 + const void **tb; 7034 + int ret; 7035 + u16 length; 7036 + 7037 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 7038 + 7039 + if (IS_ERR(tb)) { 7040 + ret = PTR_ERR(tb); 7041 + ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret); 7042 + return; 7043 + } 7044 + 7045 + ev = tb[WMI_TAG_ARRAY_BYTE]; 7046 + if (!ev) { 7047 + ath12k_warn(ab, "failed to fetch ftm msg\n"); 7048 + kfree(tb); 7049 + return; 7050 + } 7051 + 7052 + length = skb->len - TLV_HDR_SIZE; 7053 + ath12k_tm_process_event(ab, cmd_id, ev, length); 7054 + kfree(tb); 7055 + tb = NULL; 7681 7056 } 7682 7057 7683 7058 static void ··· 8129 7446 kfree(tb); 8130 7447 } 8131 7448 7449 + #ifdef CONFIG_ATH12K_DEBUGFS 7450 + static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab, 7451 + const void *ptr, u16 tag, u16 len, 7452 + struct wmi_tpc_stats_arg *tpc_stats) 7453 + { 7454 + u32 len1, len2, len3, len4; 7455 + s16 *dst_ptr; 7456 + s8 *dst_ptr_ctl; 7457 + 7458 + len1 = le32_to_cpu(tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len); 7459 + len2 = le32_to_cpu(tpc_stats->rates_array1.tpc_rates_array.rate_array_len); 7460 + len3 = le32_to_cpu(tpc_stats->rates_array2.tpc_rates_array.rate_array_len); 7461 + len4 = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len); 7462 + 7463 + switch (tpc_stats->event_count) { 7464 + case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT: 7465 + if (len1 > len) 7466 + return -ENOBUFS; 7467 + 7468 + if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) { 7469 + dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array; 7470 + memcpy(dst_ptr, ptr, len1); 7471 + } 7472 + break; 7473 + case ATH12K_TPC_STATS_RATES_EVENT1: 7474 + if (len2 > len) 7475 + return -ENOBUFS; 7476 + 7477 + if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) { 7478 + dst_ptr = tpc_stats->rates_array1.rate_array; 7479 + memcpy(dst_ptr, ptr, len2); 7480 + } 7481 + break; 7482 + case ATH12K_TPC_STATS_RATES_EVENT2: 7483 + if (len3 > len) 7484 + return -ENOBUFS; 7485 + 7486 + if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) { 7487 + dst_ptr = tpc_stats->rates_array2.rate_array; 7488 + memcpy(dst_ptr, ptr, len3); 7489 + } 7490 + break; 7491 + case ATH12K_TPC_STATS_CTL_TABLE_EVENT: 7492 + if (len4 > len) 7493 + return -ENOBUFS; 7494 + 7495 + if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) { 7496 + dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table; 7497 + memcpy(dst_ptr_ctl, ptr, len4); 7498 + } 7499 + break; 7500 + } 7501 + return 0; 7502 + } 7503 + 7504 + static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab, 7505 + struct wmi_tpc_stats_arg *tpc_stats, 7506 + struct wmi_max_reg_power_fixed_params *ev) 7507 + { 7508 + struct wmi_max_reg_power_allowed_arg *reg_pwr; 7509 + u32 total_size; 7510 + 7511 + ath12k_dbg(ab, ATH12K_DBG_WMI, 7512 + "Received reg power array type %d length %d for tpc stats\n", 7513 + ev->reg_power_type, ev->reg_array_len); 7514 + 7515 + switch (le32_to_cpu(ev->reg_power_type)) { 7516 + case TPC_STATS_REG_PWR_ALLOWED_TYPE: 7517 + reg_pwr = &tpc_stats->max_reg_allowed_power; 7518 + break; 7519 + default: 7520 + return -EINVAL; 7521 + } 7522 + 7523 + /* Each entry is 2 byte hence multiplying the indices with 2 */ 7524 + total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) * 7525 + le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4) * 2; 7526 + if (le32_to_cpu(ev->reg_array_len) != total_size) { 7527 + ath12k_warn(ab, 7528 + "Total size and reg_array_len doesn't match for tpc stats\n"); 7529 + return -EINVAL; 7530 + } 7531 + 7532 + memcpy(&reg_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_params)); 7533 + 7534 + reg_pwr->reg_pwr_array = kzalloc(le32_to_cpu(reg_pwr->tpc_reg_pwr.reg_array_len), 7535 + GFP_ATOMIC); 7536 + if (!reg_pwr->reg_pwr_array) 7537 + return -ENOMEM; 7538 + 7539 + tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED; 7540 + 7541 + return 0; 7542 + } 7543 + 7544 + static int ath12k_tpc_get_rate_array(struct ath12k_base *ab, 7545 + struct wmi_tpc_stats_arg *tpc_stats, 7546 + struct wmi_tpc_rates_array_fixed_params *ev) 7547 + { 7548 + struct wmi_tpc_rates_array_arg *rates_array; 7549 + u32 flag = 0, rate_array_len; 7550 + 7551 + ath12k_dbg(ab, ATH12K_DBG_WMI, 7552 + "Received rates array type %d length %d for tpc stats\n", 7553 + ev->rate_array_type, ev->rate_array_len); 7554 + 7555 + switch (le32_to_cpu(ev->rate_array_type)) { 7556 + case ATH12K_TPC_STATS_RATES_ARRAY1: 7557 + rates_array = &tpc_stats->rates_array1; 7558 + flag = WMI_TPC_RATES_ARRAY1; 7559 + break; 7560 + case ATH12K_TPC_STATS_RATES_ARRAY2: 7561 + rates_array = &tpc_stats->rates_array2; 7562 + flag = WMI_TPC_RATES_ARRAY2; 7563 + break; 7564 + default: 7565 + ath12k_warn(ab, 7566 + "Received invalid type of rates array for tpc stats\n"); 7567 + return -EINVAL; 7568 + } 7569 + memcpy(&rates_array->tpc_rates_array, ev, 7570 + sizeof(struct wmi_tpc_rates_array_fixed_params)); 7571 + rate_array_len = le32_to_cpu(rates_array->tpc_rates_array.rate_array_len); 7572 + rates_array->rate_array = kzalloc(rate_array_len, GFP_ATOMIC); 7573 + if (!rates_array->rate_array) 7574 + return -ENOMEM; 7575 + 7576 + tpc_stats->tlvs_rcvd |= flag; 7577 + return 0; 7578 + } 7579 + 7580 + static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab, 7581 + struct wmi_tpc_stats_arg *tpc_stats, 7582 + struct wmi_tpc_ctl_pwr_fixed_params *ev) 7583 + { 7584 + struct wmi_tpc_ctl_pwr_table_arg *ctl_array; 7585 + u32 total_size, ctl_array_len, flag = 0; 7586 + 7587 + ath12k_dbg(ab, ATH12K_DBG_WMI, 7588 + "Received ctl array type %d length %d for tpc stats\n", 7589 + ev->ctl_array_type, ev->ctl_array_len); 7590 + 7591 + switch (le32_to_cpu(ev->ctl_array_type)) { 7592 + case ATH12K_TPC_STATS_CTL_ARRAY: 7593 + ctl_array = &tpc_stats->ctl_array; 7594 + flag = WMI_TPC_CTL_PWR_ARRAY; 7595 + break; 7596 + default: 7597 + ath12k_warn(ab, 7598 + "Received invalid type of ctl pwr table for tpc stats\n"); 7599 + return -EINVAL; 7600 + } 7601 + 7602 + total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) * 7603 + le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4); 7604 + if (le32_to_cpu(ev->ctl_array_len) != total_size) { 7605 + ath12k_warn(ab, 7606 + "Total size and ctl_array_len doesn't match for tpc stats\n"); 7607 + return -EINVAL; 7608 + } 7609 + 7610 + memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_params)); 7611 + ctl_array_len = le32_to_cpu(ctl_array->tpc_ctl_pwr.ctl_array_len); 7612 + ctl_array->ctl_pwr_table = kzalloc(ctl_array_len, GFP_ATOMIC); 7613 + if (!ctl_array->ctl_pwr_table) 7614 + return -ENOMEM; 7615 + 7616 + tpc_stats->tlvs_rcvd |= flag; 7617 + return 0; 7618 + } 7619 + 7620 + static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab, 7621 + u16 tag, u16 len, 7622 + const void *ptr, void *data) 7623 + { 7624 + struct wmi_tpc_rates_array_fixed_params *tpc_rates_array; 7625 + struct wmi_max_reg_power_fixed_params *tpc_reg_pwr; 7626 + struct wmi_tpc_ctl_pwr_fixed_params *tpc_ctl_pwr; 7627 + struct wmi_tpc_stats_arg *tpc_stats = data; 7628 + struct wmi_tpc_config_params *tpc_config; 7629 + int ret = 0; 7630 + 7631 + if (!tpc_stats) { 7632 + ath12k_warn(ab, "tpc stats memory unavailable\n"); 7633 + return -EINVAL; 7634 + } 7635 + 7636 + switch (tag) { 7637 + case WMI_TAG_TPC_STATS_CONFIG_EVENT: 7638 + tpc_config = (struct wmi_tpc_config_params *)ptr; 7639 + memcpy(&tpc_stats->tpc_config, tpc_config, 7640 + sizeof(struct wmi_tpc_config_params)); 7641 + break; 7642 + case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED: 7643 + tpc_reg_pwr = (struct wmi_max_reg_power_fixed_params *)ptr; 7644 + ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr); 7645 + break; 7646 + case WMI_TAG_TPC_STATS_RATES_ARRAY: 7647 + tpc_rates_array = (struct wmi_tpc_rates_array_fixed_params *)ptr; 7648 + ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array); 7649 + break; 7650 + case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT: 7651 + tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_params *)ptr; 7652 + ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr); 7653 + break; 7654 + default: 7655 + ath12k_warn(ab, 7656 + "Received invalid tag for tpc stats in subtlvs\n"); 7657 + return -EINVAL; 7658 + } 7659 + return ret; 7660 + } 7661 + 7662 + static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab, 7663 + u16 tag, u16 len, 7664 + const void *ptr, void *data) 7665 + { 7666 + struct wmi_tpc_stats_arg *tpc_stats = (struct wmi_tpc_stats_arg *)data; 7667 + int ret; 7668 + 7669 + switch (tag) { 7670 + case WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM: 7671 + ret = 0; 7672 + /* Fixed param is already processed*/ 7673 + break; 7674 + case WMI_TAG_ARRAY_STRUCT: 7675 + /* len 0 is expected for array of struct when there 7676 + * is no content of that type to pack inside that tlv 7677 + */ 7678 + if (len == 0) 7679 + return 0; 7680 + ret = ath12k_wmi_tlv_iter(ab, ptr, len, 7681 + ath12k_wmi_tpc_stats_subtlv_parser, 7682 + tpc_stats); 7683 + break; 7684 + case WMI_TAG_ARRAY_INT16: 7685 + if (len == 0) 7686 + return 0; 7687 + ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr, 7688 + WMI_TAG_ARRAY_INT16, 7689 + len, tpc_stats); 7690 + break; 7691 + case WMI_TAG_ARRAY_BYTE: 7692 + if (len == 0) 7693 + return 0; 7694 + ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr, 7695 + WMI_TAG_ARRAY_BYTE, 7696 + len, tpc_stats); 7697 + break; 7698 + default: 7699 + ath12k_warn(ab, "Received invalid tag for tpc stats\n"); 7700 + ret = -EINVAL; 7701 + break; 7702 + } 7703 + return ret; 7704 + } 7705 + 7706 + void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar) 7707 + { 7708 + struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats; 7709 + 7710 + lockdep_assert_held(&ar->data_lock); 7711 + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n"); 7712 + if (tpc_stats) { 7713 + kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array); 7714 + kfree(tpc_stats->rates_array1.rate_array); 7715 + kfree(tpc_stats->rates_array2.rate_array); 7716 + kfree(tpc_stats->ctl_array.ctl_pwr_table); 7717 + kfree(tpc_stats); 7718 + ar->debug.tpc_stats = NULL; 7719 + } 7720 + } 7721 + 7722 + static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab, 7723 + struct sk_buff *skb) 7724 + { 7725 + struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *fixed_param; 7726 + struct wmi_tpc_stats_arg *tpc_stats; 7727 + const struct wmi_tlv *tlv; 7728 + void *ptr = skb->data; 7729 + struct ath12k *ar; 7730 + u16 tlv_tag; 7731 + u32 event_count; 7732 + int ret; 7733 + 7734 + if (!skb->data) { 7735 + ath12k_warn(ab, "No data present in tpc stats event\n"); 7736 + return; 7737 + } 7738 + 7739 + if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) { 7740 + ath12k_warn(ab, "TPC stats event size invalid\n"); 7741 + return; 7742 + } 7743 + 7744 + tlv = (struct wmi_tlv *)ptr; 7745 + tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG); 7746 + ptr += sizeof(*tlv); 7747 + 7748 + if (tlv_tag != WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM) { 7749 + ath12k_warn(ab, "TPC stats without fixed param tlv at start\n"); 7750 + return; 7751 + } 7752 + 7753 + fixed_param = (struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *)ptr; 7754 + rcu_read_lock(); 7755 + ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(fixed_param->pdev_id) + 1); 7756 + if (!ar) { 7757 + ath12k_warn(ab, "Failed to get ar for tpc stats\n"); 7758 + rcu_read_unlock(); 7759 + return; 7760 + } 7761 + spin_lock_bh(&ar->data_lock); 7762 + if (!ar->debug.tpc_request) { 7763 + /* Event is received either without request or the 7764 + * timeout, if memory is already allocated free it 7765 + */ 7766 + if (ar->debug.tpc_stats) { 7767 + ath12k_warn(ab, "Freeing memory for tpc_stats\n"); 7768 + ath12k_wmi_free_tpc_stats_mem(ar); 7769 + } 7770 + goto unlock; 7771 + } 7772 + 7773 + event_count = le32_to_cpu(fixed_param->event_count); 7774 + if (event_count == 0) { 7775 + if (ar->debug.tpc_stats) { 7776 + ath12k_warn(ab, 7777 + "Invalid tpc memory present\n"); 7778 + goto unlock; 7779 + } 7780 + ar->debug.tpc_stats = 7781 + kzalloc(sizeof(struct wmi_tpc_stats_arg), 7782 + GFP_ATOMIC); 7783 + if (!ar->debug.tpc_stats) { 7784 + ath12k_warn(ab, 7785 + "Failed to allocate memory for tpc stats\n"); 7786 + goto unlock; 7787 + } 7788 + } 7789 + 7790 + tpc_stats = ar->debug.tpc_stats; 7791 + if (!tpc_stats) { 7792 + ath12k_warn(ab, "tpc stats memory unavailable\n"); 7793 + goto unlock; 7794 + } 7795 + 7796 + if (!(event_count == 0)) { 7797 + if (event_count != tpc_stats->event_count + 1) { 7798 + ath12k_warn(ab, 7799 + "Invalid tpc event received\n"); 7800 + goto unlock; 7801 + } 7802 + } 7803 + tpc_stats->pdev_id = le32_to_cpu(fixed_param->pdev_id); 7804 + tpc_stats->end_of_event = le32_to_cpu(fixed_param->end_of_event); 7805 + tpc_stats->event_count = le32_to_cpu(fixed_param->event_count); 7806 + ath12k_dbg(ab, ATH12K_DBG_WMI, 7807 + "tpc stats event_count %d\n", 7808 + tpc_stats->event_count); 7809 + ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 7810 + ath12k_wmi_tpc_stats_event_parser, 7811 + tpc_stats); 7812 + if (ret) { 7813 + ath12k_wmi_free_tpc_stats_mem(ar); 7814 + ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret); 7815 + goto unlock; 7816 + } 7817 + 7818 + if (tpc_stats->end_of_event) 7819 + complete(&ar->debug.tpc_complete); 7820 + 7821 + unlock: 7822 + spin_unlock_bh(&ar->data_lock); 7823 + rcu_read_unlock(); 7824 + } 7825 + #else 7826 + static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab, 7827 + struct sk_buff *skb) 7828 + { 7829 + } 7830 + #endif 7831 + 8132 7832 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) 8133 7833 { 8134 7834 struct wmi_cmd_hdr *cmd_hdr; ··· 8637 7571 case WMI_MLO_TEARDOWN_COMPLETE_EVENTID: 8638 7572 ath12k_wmi_event_teardown_complete(ab, skb); 8639 7573 break; 7574 + case WMI_HALPHY_STATS_CTRL_PATH_EVENTID: 7575 + ath12k_wmi_process_tpc_stats(ab, skb); 7576 + break; 8640 7577 /* add Unsupported events (rare) here */ 8641 7578 case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: 8642 7579 case WMI_PEER_OPER_MODE_CHANGE_EVENTID: ··· 8653 7584 case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID: 8654 7585 /* debug might flood hence silently ignore (no-op) */ 8655 7586 break; 8656 - /* TODO: Add remaining events */ 7587 + case WMI_PDEV_UTF_EVENTID: 7588 + if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags)) 7589 + ath12k_tm_wmi_event_segmented(ab, id, skb); 7590 + else 7591 + ath12k_tm_wmi_event_unsegmented(ab, id, skb); 7592 + break; 8657 7593 default: 8658 7594 ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id); 8659 7595 break; ··· 8793 7719 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n"); 8794 7720 8795 7721 return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args); 7722 + } 7723 + 7724 + int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar, 7725 + enum wmi_halphy_ctrl_path_stats_id tpc_stats_type) 7726 + { 7727 + struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *cmd; 7728 + struct ath12k_wmi_pdev *wmi = ar->wmi; 7729 + struct sk_buff *skb; 7730 + struct wmi_tlv *tlv; 7731 + __le32 *pdev_id; 7732 + u32 buf_len; 7733 + void *ptr; 7734 + int ret; 7735 + 7736 + buf_len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(u32) + TLV_HDR_SIZE + TLV_HDR_SIZE; 7737 + 7738 + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 7739 + if (!skb) 7740 + return -ENOMEM; 7741 + cmd = (struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *)skb->data; 7742 + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM, 7743 + sizeof(*cmd)); 7744 + 7745 + cmd->stats_id_mask = cpu_to_le32(WMI_REQ_CTRL_PATH_PDEV_TX_STAT); 7746 + cmd->action = cpu_to_le32(WMI_REQUEST_CTRL_PATH_STAT_GET); 7747 + cmd->subid = cpu_to_le32(tpc_stats_type); 7748 + 7749 + ptr = skb->data + sizeof(*cmd); 7750 + 7751 + /* The below TLV arrays optionally follow this fixed param TLV structure 7752 + * 1. ARRAY_UINT32 pdev_ids[] 7753 + * If this array is present and non-zero length, stats should only 7754 + * be provided from the pdevs identified in the array. 7755 + * 2. ARRAY_UNIT32 vdev_ids[] 7756 + * If this array is present and non-zero length, stats should only 7757 + * be provided from the vdevs identified in the array. 7758 + * 3. ath12k_wmi_mac_addr_params peer_macaddr[]; 7759 + * If this array is present and non-zero length, stats should only 7760 + * be provided from the peers with the MAC addresses specified 7761 + * in the array 7762 + */ 7763 + tlv = ptr; 7764 + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32)); 7765 + ptr += TLV_HDR_SIZE; 7766 + 7767 + pdev_id = ptr; 7768 + *pdev_id = cpu_to_le32(ath12k_mac_get_target_pdev_id(ar)); 7769 + ptr += sizeof(*pdev_id); 7770 + 7771 + tlv = ptr; 7772 + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 7773 + ptr += TLV_HDR_SIZE; 7774 + 7775 + tlv = ptr; 7776 + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, 0); 7777 + ptr += TLV_HDR_SIZE; 7778 + 7779 + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID); 7780 + if (ret) { 7781 + ath12k_warn(ar->ab, 7782 + "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n"); 7783 + dev_kfree_skb(skb); 7784 + return ret; 7785 + } 7786 + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n", 7787 + ar->pdev->pdev_id); 7788 + 7789 + return ret; 8796 7790 } 8797 7791 8798 7792 int ath12k_wmi_connect(struct ath12k_base *ab)
+288 -2
drivers/net/wireless/ath/ath12k/wmi.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH12K_WMI_H ··· 25 25 struct ath12k_base; 26 26 struct ath12k; 27 27 struct ath12k_link_vif; 28 + struct ath12k_fw_stats; 28 29 29 30 /* There is no signed version of __le32, so for a temporary solution come 30 31 * up with our own version. The idea is from fs/ntfs/endian.h. ··· 517 516 WMI_REQUEST_RCPI_CMDID, 518 517 WMI_REQUEST_PEER_STATS_INFO_CMDID, 519 518 WMI_REQUEST_RADIO_CHAN_STATS_CMDID, 519 + WMI_REQUEST_WLM_STATS_CMDID, 520 + WMI_REQUEST_CTRL_PATH_STATS_CMDID, 521 + WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID = WMI_REQUEST_CTRL_PATH_STATS_CMDID + 3, 520 522 WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_ARP_NS_OFL), 521 523 WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID, 522 524 WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID, ··· 789 785 WMI_UPDATE_RCPI_EVENTID, 790 786 WMI_PEER_STATS_INFO_EVENTID, 791 787 WMI_RADIO_CHAN_STATS_EVENTID, 788 + WMI_WLM_STATS_EVENTID, 789 + WMI_CTRL_PATH_STATS_EVENTID, 790 + WMI_HALPHY_STATS_CTRL_PATH_EVENTID, 792 791 WMI_NLO_MATCH_EVENTID = WMI_TLV_CMD(WMI_GRP_NLO_OFL), 793 792 WMI_NLO_SCAN_COMPLETE_EVENTID, 794 793 WMI_APFIND_EVENTID, ··· 1198 1191 WMI_TAG_ARRAY_BYTE, 1199 1192 WMI_TAG_ARRAY_STRUCT, 1200 1193 WMI_TAG_ARRAY_FIXED_STRUCT, 1194 + WMI_TAG_ARRAY_INT16, 1201 1195 WMI_TAG_LAST_ARRAY_ENUM = 31, 1202 1196 WMI_TAG_SERVICE_READY_EVENT, 1203 1197 WMI_TAG_HAL_REG_CAPABILITIES, ··· 1949 1941 WMI_TAG_MAC_PHY_CAPABILITIES_EXT = 0x36F, 1950 1942 WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9, 1951 1943 WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT, 1944 + WMI_TAG_TPC_STATS_GET_CMD = 0x38B, 1945 + WMI_TAG_TPC_STATS_EVENT_FIXED_PARAM, 1946 + WMI_TAG_TPC_STATS_CONFIG_EVENT, 1947 + WMI_TAG_TPC_STATS_REG_PWR_ALLOWED, 1948 + WMI_TAG_TPC_STATS_RATES_ARRAY, 1949 + WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT, 1952 1950 WMI_TAG_EHT_RATE_SET = 0x3C4, 1953 1951 WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5, 1954 1952 WMI_TAG_MLO_TX_SEND_PARAMS, ··· 1972 1958 WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8, 1973 1959 WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9, 1974 1960 WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB, 1961 + WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM = 0x442, 1962 + WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM, 1975 1963 WMI_TAG_MAX 1976 1964 }; 1977 1965 ··· 3640 3624 struct ath12k_wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS]; 3641 3625 } __packed; 3642 3626 3627 + #define MAX_WMI_UTF_LEN 252 3628 + 3629 + struct ath12k_wmi_ftm_seg_hdr_params { 3630 + __le32 len; 3631 + __le32 msgref; 3632 + __le32 segmentinfo; 3633 + __le32 pdev_id; 3634 + } __packed; 3635 + 3636 + struct ath12k_wmi_ftm_cmd { 3637 + __le32 tlv_header; 3638 + struct ath12k_wmi_ftm_seg_hdr_params seg_hdr; 3639 + u8 data[]; 3640 + } __packed; 3641 + 3642 + struct ath12k_wmi_ftm_event { 3643 + struct ath12k_wmi_ftm_seg_hdr_params seg_hdr; 3644 + u8 data[]; 3645 + } __packed; 3646 + 3643 3647 #define WMI_BEACON_TX_BUFFER_SIZE 512 3644 3648 3645 3649 #define WMI_EMA_BEACON_CNT GENMASK(7, 0) ··· 4640 4604 WMI_RATE_PREAMBLE_HT, 4641 4605 WMI_RATE_PREAMBLE_VHT, 4642 4606 WMI_RATE_PREAMBLE_HE, 4607 + WMI_RATE_PREAMBLE_EHT, 4643 4608 }; 4644 4609 4645 4610 /** ··· 5665 5628 #define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30 5666 5629 #define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0 5667 5630 5631 + struct wmi_stats_event { 5632 + __le32 stats_id; 5633 + __le32 num_pdev_stats; 5634 + __le32 num_vdev_stats; 5635 + __le32 num_peer_stats; 5636 + __le32 num_bcnflt_stats; 5637 + __le32 num_chan_stats; 5638 + __le32 num_mib_stats; 5639 + __le32 pdev_id; 5640 + __le32 num_bcn_stats; 5641 + __le32 num_peer_extd_stats; 5642 + __le32 num_peer_extd2_stats; 5643 + } __packed; 5644 + 5645 + enum wmi_stats_id { 5646 + WMI_REQUEST_PDEV_STAT = BIT(2), 5647 + WMI_REQUEST_VDEV_STAT = BIT(3), 5648 + WMI_REQUEST_BCN_STAT = BIT(11), 5649 + }; 5650 + 5651 + struct wmi_request_stats_cmd { 5652 + __le32 tlv_header; 5653 + __le32 stats_id; 5654 + __le32 vdev_id; 5655 + struct ath12k_wmi_mac_addr_params peer_macaddr; 5656 + __le32 pdev_id; 5657 + } __packed; 5658 + 5659 + #define WLAN_MAX_AC 4 5660 + #define MAX_TX_RATE_VALUES 10 5661 + 5662 + struct wmi_vdev_stats_params { 5663 + __le32 vdev_id; 5664 + __le32 beacon_snr; 5665 + __le32 data_snr; 5666 + __le32 num_tx_frames[WLAN_MAX_AC]; 5667 + __le32 num_rx_frames; 5668 + __le32 num_tx_frames_retries[WLAN_MAX_AC]; 5669 + __le32 num_tx_frames_failures[WLAN_MAX_AC]; 5670 + __le32 num_rts_fail; 5671 + __le32 num_rts_success; 5672 + __le32 num_rx_err; 5673 + __le32 num_rx_discard; 5674 + __le32 num_tx_not_acked; 5675 + __le32 tx_rate_history[MAX_TX_RATE_VALUES]; 5676 + __le32 beacon_rssi_history[MAX_TX_RATE_VALUES]; 5677 + } __packed; 5678 + 5679 + struct ath12k_wmi_bcn_stats_params { 5680 + __le32 vdev_id; 5681 + __le32 tx_bcn_succ_cnt; 5682 + __le32 tx_bcn_outage_cnt; 5683 + } __packed; 5684 + 5685 + struct ath12k_wmi_pdev_base_stats_params { 5686 + a_sle32 chan_nf; 5687 + __le32 tx_frame_count; /* Cycles spent transmitting frames */ 5688 + __le32 rx_frame_count; /* Cycles spent receiving frames */ 5689 + __le32 rx_clear_count; /* Total channel busy time, evidently */ 5690 + __le32 cycle_count; /* Total on-channel time */ 5691 + __le32 phy_err_count; 5692 + __le32 chan_tx_pwr; 5693 + } __packed; 5694 + 5695 + struct ath12k_wmi_pdev_tx_stats_params { 5696 + a_sle32 comp_queued; 5697 + a_sle32 comp_delivered; 5698 + a_sle32 msdu_enqued; 5699 + a_sle32 mpdu_enqued; 5700 + a_sle32 wmm_drop; 5701 + a_sle32 local_enqued; 5702 + a_sle32 local_freed; 5703 + a_sle32 hw_queued; 5704 + a_sle32 hw_reaped; 5705 + a_sle32 underrun; 5706 + a_sle32 tx_abort; 5707 + a_sle32 mpdus_requed; 5708 + __le32 tx_ko; 5709 + __le32 data_rc; 5710 + __le32 self_triggers; 5711 + __le32 sw_retry_failure; 5712 + __le32 illgl_rate_phy_err; 5713 + __le32 pdev_cont_xretry; 5714 + __le32 pdev_tx_timeout; 5715 + __le32 pdev_resets; 5716 + __le32 stateless_tid_alloc_failure; 5717 + __le32 phy_underrun; 5718 + __le32 txop_ovf; 5719 + } __packed; 5720 + 5721 + struct ath12k_wmi_pdev_rx_stats_params { 5722 + a_sle32 mid_ppdu_route_change; 5723 + a_sle32 status_rcvd; 5724 + a_sle32 r0_frags; 5725 + a_sle32 r1_frags; 5726 + a_sle32 r2_frags; 5727 + a_sle32 r3_frags; 5728 + a_sle32 htt_msdus; 5729 + a_sle32 htt_mpdus; 5730 + a_sle32 loc_msdus; 5731 + a_sle32 loc_mpdus; 5732 + a_sle32 oversize_amsdu; 5733 + a_sle32 phy_errs; 5734 + a_sle32 phy_err_drop; 5735 + a_sle32 mpdu_errs; 5736 + } __packed; 5737 + 5738 + struct ath12k_wmi_pdev_stats_params { 5739 + struct ath12k_wmi_pdev_base_stats_params base; 5740 + struct ath12k_wmi_pdev_tx_stats_params tx; 5741 + struct ath12k_wmi_pdev_rx_stats_params rx; 5742 + } __packed; 5743 + 5744 + struct ath12k_fw_stats_req_params { 5745 + u32 stats_id; 5746 + u32 vdev_id; 5747 + u32 pdev_id; 5748 + }; 5749 + 5750 + #define WMI_REQ_CTRL_PATH_PDEV_TX_STAT 1 5751 + #define WMI_REQUEST_CTRL_PATH_STAT_GET 1 5752 + 5753 + #define WMI_TPC_CONFIG BIT(1) 5754 + #define WMI_TPC_REG_PWR_ALLOWED BIT(2) 5755 + #define WMI_TPC_RATES_ARRAY1 BIT(3) 5756 + #define WMI_TPC_RATES_ARRAY2 BIT(4) 5757 + #define WMI_TPC_RATES_DL_OFDMA_ARRAY BIT(5) 5758 + #define WMI_TPC_CTL_PWR_ARRAY BIT(6) 5759 + #define WMI_TPC_CONFIG_PARAM 0x1 5760 + #define ATH12K_TPC_RATE_ARRAY_MU GENMASK(15, 8) 5761 + #define ATH12K_TPC_RATE_ARRAY_SU GENMASK(7, 0) 5762 + #define TPC_STATS_REG_PWR_ALLOWED_TYPE 0 5763 + 5764 + enum wmi_halphy_ctrl_path_stats_id { 5765 + WMI_HALPHY_PDEV_TX_SU_STATS = 0, 5766 + WMI_HALPHY_PDEV_TX_SUTXBF_STATS, 5767 + WMI_HALPHY_PDEV_TX_MU_STATS, 5768 + WMI_HALPHY_PDEV_TX_MUTXBF_STATS, 5769 + WMI_HALPHY_PDEV_TX_STATS_MAX, 5770 + }; 5771 + 5772 + enum ath12k_wmi_tpc_stats_rates_array { 5773 + ATH12K_TPC_STATS_RATES_ARRAY1, 5774 + ATH12K_TPC_STATS_RATES_ARRAY2, 5775 + }; 5776 + 5777 + enum ath12k_wmi_tpc_stats_ctl_array { 5778 + ATH12K_TPC_STATS_CTL_ARRAY, 5779 + ATH12K_TPC_STATS_CTL_160ARRAY, 5780 + }; 5781 + 5782 + enum ath12k_wmi_tpc_stats_events { 5783 + ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT, 5784 + ATH12K_TPC_STATS_RATES_EVENT1, 5785 + ATH12K_TPC_STATS_RATES_EVENT2, 5786 + ATH12K_TPC_STATS_CTL_TABLE_EVENT 5787 + }; 5788 + 5789 + struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params { 5790 + __le32 tlv_header; 5791 + __le32 stats_id_mask; 5792 + __le32 request_id; 5793 + __le32 action; 5794 + __le32 subid; 5795 + } __packed; 5796 + 5797 + struct ath12k_wmi_pdev_tpc_stats_event_fixed_params { 5798 + __le32 pdev_id; 5799 + __le32 end_of_event; 5800 + __le32 event_count; 5801 + } __packed; 5802 + 5803 + struct wmi_tpc_config_params { 5804 + __le32 reg_domain; 5805 + __le32 chan_freq; 5806 + __le32 phy_mode; 5807 + __le32 twice_antenna_reduction; 5808 + __le32 twice_max_reg_power; 5809 + __le32 twice_antenna_gain; 5810 + __le32 power_limit; 5811 + __le32 rate_max; 5812 + __le32 num_tx_chain; 5813 + __le32 ctl; 5814 + __le32 flags; 5815 + __le32 caps; 5816 + } __packed; 5817 + 5818 + struct wmi_max_reg_power_fixed_params { 5819 + __le32 reg_power_type; 5820 + __le32 reg_array_len; 5821 + __le32 d1; 5822 + __le32 d2; 5823 + __le32 d3; 5824 + __le32 d4; 5825 + } __packed; 5826 + 5827 + struct wmi_max_reg_power_allowed_arg { 5828 + struct wmi_max_reg_power_fixed_params tpc_reg_pwr; 5829 + s16 *reg_pwr_array; 5830 + }; 5831 + 5832 + struct wmi_tpc_rates_array_fixed_params { 5833 + __le32 rate_array_type; 5834 + __le32 rate_array_len; 5835 + } __packed; 5836 + 5837 + struct wmi_tpc_rates_array_arg { 5838 + struct wmi_tpc_rates_array_fixed_params tpc_rates_array; 5839 + s16 *rate_array; 5840 + }; 5841 + 5842 + struct wmi_tpc_ctl_pwr_fixed_params { 5843 + __le32 ctl_array_type; 5844 + __le32 ctl_array_len; 5845 + __le32 end_of_ctl_pwr; 5846 + __le32 ctl_pwr_count; 5847 + __le32 d1; 5848 + __le32 d2; 5849 + __le32 d3; 5850 + __le32 d4; 5851 + } __packed; 5852 + 5853 + struct wmi_tpc_ctl_pwr_table_arg { 5854 + struct wmi_tpc_ctl_pwr_fixed_params tpc_ctl_pwr; 5855 + s8 *ctl_pwr_table; 5856 + }; 5857 + 5858 + struct wmi_tpc_stats_arg { 5859 + u32 pdev_id; 5860 + u32 event_count; 5861 + u32 end_of_event; 5862 + u32 tlvs_rcvd; 5863 + struct wmi_max_reg_power_allowed_arg max_reg_allowed_power; 5864 + struct wmi_tpc_rates_array_arg rates_array1; 5865 + struct wmi_tpc_rates_array_arg rates_array2; 5866 + struct wmi_tpc_config_params tpc_config; 5867 + struct wmi_tpc_ctl_pwr_table_arg ctl_array; 5868 + }; 5869 + 5668 5870 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, 5669 5871 struct ath12k_wmi_resource_config_arg *config); 5670 5872 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, ··· 5915 5639 struct sk_buff *frame); 5916 5640 int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id, 5917 5641 const u8 *p2p_ie); 5918 - int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id, 5642 + int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif, 5919 5643 struct ieee80211_mutable_offsets *offs, 5920 5644 struct sk_buff *bcn, 5921 5645 struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args); ··· 6029 5753 const u8 *buf, size_t buf_len); 6030 5754 int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table); 6031 5755 int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table); 5756 + int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id, 5757 + u32 vdev_id, u32 pdev_id); 5758 + __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len); 5759 + 5760 + int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar, 5761 + enum wmi_halphy_ctrl_path_stats_id tpc_stats_type); 5762 + void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar); 6032 5763 6033 5764 static inline u32 6034 5765 ath12k_wmi_caps_ext_get_pdev_id(const struct ath12k_wmi_caps_ext_params *param) ··· 6089 5806 int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params); 6090 5807 int ath12k_wmi_mlo_ready(struct ath12k *ar); 6091 5808 int ath12k_wmi_mlo_teardown(struct ath12k *ar); 5809 + void ath12k_wmi_fw_stats_dump(struct ath12k *ar, 5810 + struct ath12k_fw_stats *fw_stats, u32 stats_id, 5811 + char *buf); 6092 5812 6093 5813 #endif
+2 -1
drivers/net/wireless/ath/ath12k/wow.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2020 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/delay.h> ··· 990 990 case ATH12K_HW_STATE_RESTARTING: 991 991 case ATH12K_HW_STATE_RESTARTED: 992 992 case ATH12K_HW_STATE_WEDGED: 993 + case ATH12K_HW_STATE_TM: 993 994 ath12k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n", 994 995 ah->state); 995 996 ret = -EIO;
+1 -2
drivers/net/wireless/ath/ath9k/ath9k.h
··· 274 274 275 275 struct ath_tx_control { 276 276 struct ath_txq *txq; 277 - struct ath_node *an; 278 277 struct ieee80211_sta *sta; 279 278 u8 paprd; 280 279 }; ··· 1017 1018 1018 1019 u8 gtt_cnt; 1019 1020 u32 intrstatus; 1020 - u32 rx_active_check_time; 1021 + unsigned long rx_active_check_time; 1021 1022 u32 rx_active_count; 1022 1023 u16 ps_flags; /* PS_* */ 1023 1024 bool ps_enabled;
+2 -2
drivers/net/wireless/ath/ath9k/common-spectral.c
··· 628 628 else 629 629 RX_STAT_INC(sc, rx_spectral_sample_err); 630 630 631 - memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN); 632 - 633 631 /* Mix the received bins to the /dev/random 634 632 * pool 635 633 */ 636 634 add_device_randomness(sample_buf, num_bins); 635 + 636 + memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN); 637 637 } 638 638 639 639 /* Process a normal frame */
+3 -1
drivers/net/wireless/ath/ath9k/init.c
··· 647 647 ah->ah_flags |= AH_NO_EEP_SWAP; 648 648 } 649 649 650 - of_get_mac_address(np, common->macaddr); 650 + ret = of_get_mac_address(np, common->macaddr); 651 + if (ret == -EPROBE_DEFER) 652 + return ret; 651 653 652 654 return 0; 653 655 }
-9
drivers/net/wireless/ath/ath9k/xmit.c
··· 2291 2291 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2292 2292 struct ieee80211_sta *sta = txctl->sta; 2293 2293 struct ieee80211_vif *vif = info->control.vif; 2294 - struct ath_vif *avp; 2295 2294 struct ath_softc *sc = hw->priv; 2296 2295 int frmlen = skb->len + FCS_LEN; 2297 2296 int padpos, padsize; 2298 - 2299 - /* NOTE: sta can be NULL according to net/mac80211.h */ 2300 - if (sta) 2301 - txctl->an = (struct ath_node *)sta->drv_priv; 2302 - else if (vif && ieee80211_is_data(hdr->frame_control)) { 2303 - avp = (void *)vif->drv_priv; 2304 - txctl->an = &avp->mcast_node; 2305 - } 2306 2297 2307 2298 if (info->control.hw_key) 2308 2299 frmlen += info->control.hw_key->icv_len;
+66
drivers/net/wireless/ath/testmode_i.h
··· 1 + /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 + /* 3 + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 + * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 + */ 6 + 7 + /* "API" level of the ath testmode interface. Bump it after every 8 + * incompatible interface change. 9 + */ 10 + #define ATH_TESTMODE_VERSION_MAJOR 1 11 + 12 + /* Bump this after every _compatible_ interface change, for example 13 + * addition of a new command or an attribute. 14 + */ 15 + #define ATH_TESTMODE_VERSION_MINOR 1 16 + 17 + #define ATH_TM_DATA_MAX_LEN 5000 18 + #define ATH_FTM_EVENT_MAX_BUF_LENGTH 2048 19 + 20 + enum ath_tm_attr { 21 + __ATH_TM_ATTR_INVALID = 0, 22 + ATH_TM_ATTR_CMD = 1, 23 + ATH_TM_ATTR_DATA = 2, 24 + ATH_TM_ATTR_WMI_CMDID = 3, 25 + ATH_TM_ATTR_VERSION_MAJOR = 4, 26 + ATH_TM_ATTR_VERSION_MINOR = 5, 27 + ATH_TM_ATTR_WMI_OP_VERSION = 6, 28 + 29 + /* keep last */ 30 + __ATH_TM_ATTR_AFTER_LAST, 31 + ATH_TM_ATTR_MAX = __ATH_TM_ATTR_AFTER_LAST - 1, 32 + }; 33 + 34 + /* All ath testmode interface commands specified in 35 + * ATH_TM_ATTR_CMD 36 + */ 37 + enum ath_tm_cmd { 38 + /* Returns the supported ath testmode interface version in 39 + * ATH_TM_ATTR_VERSION. Always guaranteed to work. User space 40 + * uses this to verify it's using the correct version of the 41 + * testmode interface 42 + */ 43 + ATH_TM_CMD_GET_VERSION = 0, 44 + 45 + /* The command used to transmit a WMI command to the firmware and 46 + * the event to receive WMI events from the firmware. Without 47 + * struct wmi_cmd_hdr header, only the WMI payload. Command id is 48 + * provided with ATH_TM_ATTR_WMI_CMDID and payload in 49 + * ATH_TM_ATTR_DATA. 50 + */ 51 + ATH_TM_CMD_WMI = 1, 52 + 53 + /* Boots the UTF firmware, the netdev interface must be down at the 54 + * time. 55 + */ 56 + ATH_TM_CMD_TESTMODE_START = 2, 57 + 58 + /* The command used to transmit a FTM WMI command to the firmware 59 + * and the event to receive WMI events from the firmware. The data 60 + * received only contain the payload, need to add the tlv header 61 + * and send the cmd to firmware with command id WMI_PDEV_UTF_CMDID. 62 + * The data payload size could be large and the driver needs to 63 + * send segmented data to firmware. 64 + */ 65 + ATH_TM_CMD_WMI_FTM = 3, 66 + };