Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'wireless-drivers-next-2020-09-25' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for v5.10

Second set of patches for v5.10. Biggest change here is wcn3680
support to wcn36xx driver, otherwise smaller features. And naturally
the usual fixes and cleanups.

Major changes:

brcmfmac

* support 4-way handshake offloading for WPA/WPA2-PSK in AP mode

* support SAE authentication offload in AP mode

mt76

* mt7663 runtime power management improvements

* mt7915 A-MSDU offload

wcn36xx

* add support wcn3680 Wi-Fi 5 devices

ath11k

* spectral scan support for ipq6018
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+4208 -3460
+13 -5
drivers/net/wireless/ath/ath10k/core.c
··· 1022 1022 return 0; 1023 1023 } 1024 1024 1025 - static int ath10k_core_check_dt(struct ath10k *ar) 1025 + int ath10k_core_check_dt(struct ath10k *ar) 1026 1026 { 1027 1027 struct device_node *node; 1028 1028 const char *variant = NULL; ··· 1043 1043 1044 1044 return 0; 1045 1045 } 1046 + EXPORT_SYMBOL(ath10k_core_check_dt); 1046 1047 1047 1048 static int ath10k_download_fw(struct ath10k *ar) 1048 1049 { ··· 1438 1437 } 1439 1438 1440 1439 if (ar->id.qmi_ids_valid) { 1441 - scnprintf(name, name_len, 1442 - "bus=%s,qmi-board-id=%x", 1443 - ath10k_bus_str(ar->hif.bus), 1444 - ar->id.qmi_board_id); 1440 + if (with_variant && ar->id.bdf_ext[0] != '\0') 1441 + scnprintf(name, name_len, 1442 + "bus=%s,qmi-board-id=%x,qmi-chip-id=%x%s", 1443 + ath10k_bus_str(ar->hif.bus), 1444 + ar->id.qmi_board_id, ar->id.qmi_chip_id, 1445 + variant); 1446 + else 1447 + scnprintf(name, name_len, 1448 + "bus=%s,qmi-board-id=%x", 1449 + ath10k_bus_str(ar->hif.bus), 1450 + ar->id.qmi_board_id); 1445 1451 goto out; 1446 1452 } 1447 1453
+2
drivers/net/wireless/ath/ath10k/core.h
··· 1076 1076 bool bmi_ids_valid; 1077 1077 bool qmi_ids_valid; 1078 1078 u32 qmi_board_id; 1079 + u32 qmi_chip_id; 1079 1080 u8 bmi_board_id; 1080 1081 u8 bmi_eboard_id; 1081 1082 u8 bmi_chip_id; ··· 1316 1315 const struct ath10k_bus_params *bus_params); 1317 1316 void ath10k_core_unregister(struct ath10k *ar); 1318 1317 int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type); 1318 + int ath10k_core_check_dt(struct ath10k *ar); 1319 1319 void ath10k_core_free_board_files(struct ath10k *ar); 1320 1320 1321 1321 #endif /* _CORE_H_ */
+8
drivers/net/wireless/ath/ath10k/qmi.c
··· 576 576 if (resp->chip_info_valid) { 577 577 qmi->chip_info.chip_id = resp->chip_info.chip_id; 578 578 qmi->chip_info.chip_family = resp->chip_info.chip_family; 579 + } else { 580 + qmi->chip_info.chip_id = 0xFF; 579 581 } 580 582 581 583 if (resp->board_info_valid) ··· 819 817 static int ath10k_qmi_fetch_board_file(struct ath10k_qmi *qmi) 820 818 { 821 819 struct ath10k *ar = qmi->ar; 820 + int ret; 822 821 823 822 ar->hif.bus = ATH10K_BUS_SNOC; 824 823 ar->id.qmi_ids_valid = true; 825 824 ar->id.qmi_board_id = qmi->board_info.board_id; 825 + ar->id.qmi_chip_id = qmi->chip_info.chip_id; 826 826 ar->hw_params.fw.dir = WCN3990_HW_1_0_FW_DIR; 827 + 828 + ret = ath10k_core_check_dt(ar); 829 + if (ret) 830 + ath10k_dbg(ar, ATH10K_DBG_QMI, "DT bdf variant name not set.\n"); 827 831 828 832 return ath10k_core_fetch_board_file(qmi->ar, ATH10K_BD_IE_BOARD); 829 833 }
+1 -1
drivers/net/wireless/ath/ath11k/Makefile
··· 18 18 dbring.o \ 19 19 hw.o 20 20 21 - ath11k-$(CONFIG_ATH11K_DEBUGFS) += debug_htt_stats.o debugfs_sta.o 21 + ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o 22 22 ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o 23 23 ath11k-$(CONFIG_ATH11K_TRACING) += trace.o 24 24 ath11k-$(CONFIG_THERMAL) += thermal.o
+43 -6
drivers/net/wireless/ath/ath11k/ahb.c
··· 323 323 324 324 static int ath11k_ahb_power_up(struct ath11k_base *ab) 325 325 { 326 + struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 326 327 int ret; 327 328 328 - ret = rproc_boot(ab->tgt_rproc); 329 + ret = rproc_boot(ab_ahb->tgt_rproc); 329 330 if (ret) 330 331 ath11k_err(ab, "failed to boot the remote processor Q6\n"); 331 332 ··· 335 334 336 335 static void ath11k_ahb_power_down(struct ath11k_base *ab) 337 336 { 338 - rproc_shutdown(ab->tgt_rproc); 337 + struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 338 + 339 + rproc_shutdown(ab_ahb->tgt_rproc); 339 340 } 340 341 341 342 static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab) ··· 603 600 .power_up = ath11k_ahb_power_up, 604 601 }; 605 602 603 + static int ath11k_core_get_rproc(struct ath11k_base *ab) 604 + { 605 + struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 606 + struct device *dev = ab->dev; 607 + struct rproc *prproc; 608 + phandle rproc_phandle; 609 + 610 + if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) { 611 + ath11k_err(ab, "failed to get q6_rproc handle\n"); 612 + return -ENOENT; 613 + } 614 + 615 + prproc = rproc_get_by_phandle(rproc_phandle); 616 + if (!prproc) { 617 + ath11k_err(ab, "failed to get rproc\n"); 618 + return -EINVAL; 619 + } 620 + ab_ahb->tgt_rproc = prproc; 621 + 622 + return 0; 623 + } 624 + 606 625 static int ath11k_ahb_probe(struct platform_device *pdev) 607 626 { 608 627 struct ath11k_base *ab; ··· 651 626 return ret; 652 627 } 653 628 654 - ab = ath11k_core_alloc(&pdev->dev, 0, ATH11K_BUS_AHB, &ath11k_ahb_bus_params); 629 + ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb), 630 + ATH11K_BUS_AHB, 631 + &ath11k_ahb_bus_params); 655 632 if (!ab) { 656 633 dev_err(&pdev->dev, "failed to allocate ath11k base\n"); 657 634 return -ENOMEM; ··· 681 654 } 682 655 683 656 ath11k_ahb_init_qmi_ce_config(ab); 657 + 658 + ret = ath11k_core_get_rproc(ab); 659 + if (ret) { 660 + ath11k_err(ab, "failed to get rproc: %d\n", ret); 661 + goto err_ce_free; 662 + } 684 663 685 664 ret = ath11k_core_init(ab); 686 665 if (ret) { ··· 718 685 static int ath11k_ahb_remove(struct platform_device *pdev) 719 686 { 720 687 struct ath11k_base *ab = platform_get_drvdata(pdev); 688 + unsigned long left; 721 689 722 690 reinit_completion(&ab->driver_recovery); 723 691 724 - if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) 725 - wait_for_completion_timeout(&ab->driver_recovery, 726 - ATH11K_AHB_RECOVERY_TIMEOUT); 692 + if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) { 693 + left = wait_for_completion_timeout(&ab->driver_recovery, 694 + ATH11K_AHB_RECOVERY_TIMEOUT); 695 + if (!left) 696 + ath11k_warn(ab, "failed to receive recovery response completion\n"); 697 + } 727 698 728 699 set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags); 729 700 cancel_work_sync(&ab->restart_work);
+8
drivers/net/wireless/ath/ath11k/ahb.h
··· 10 10 #define ATH11K_AHB_RECOVERY_TIMEOUT (3 * HZ) 11 11 struct ath11k_base; 12 12 13 + struct ath11k_ahb { 14 + struct rproc *tgt_rproc; 15 + }; 16 + 17 + static inline struct ath11k_ahb *ath11k_ahb_priv(struct ath11k_base *ab) 18 + { 19 + return (struct ath11k_ahb *)ab->drv_priv; 20 + } 13 21 #endif
+9 -39
drivers/net/wireless/ath/ath11k/core.c
··· 57 57 .vdev_start_delay = false, 58 58 .htt_peer_map_v2 = true, 59 59 .tcl_0_only = false, 60 + .spectral_fft_sz = 2, 60 61 }, 61 62 { 62 63 .hw_rev = ATH11K_HW_IPQ6018_HW10, ··· 87 86 .vdev_start_delay = false, 88 87 .htt_peer_map_v2 = true, 89 88 .tcl_0_only = false, 89 + .spectral_fft_sz = 4, 90 90 }, 91 91 { 92 92 .name = "qca6390 hw2.0", ··· 117 115 .vdev_start_delay = true, 118 116 .htt_peer_map_v2 = false, 119 117 .tcl_0_only = true, 118 + .spectral_fft_sz = 0, 120 119 }, 121 120 }; 122 121 ··· 415 412 return ret; 416 413 } 417 414 418 - ret = ath11k_debug_soc_create(ab); 415 + ret = ath11k_debugfs_soc_create(ab); 419 416 if (ret) { 420 417 ath11k_err(ab, "failed to create ath11k debugfs\n"); 421 418 goto err_qmi_deinit; ··· 430 427 return 0; 431 428 432 429 err_debugfs_reg: 433 - ath11k_debug_soc_destroy(ab); 430 + ath11k_debugfs_soc_destroy(ab); 434 431 err_qmi_deinit: 435 432 ath11k_qmi_deinit_service(ab); 436 433 return ret; ··· 438 435 439 436 static void ath11k_core_soc_destroy(struct ath11k_base *ab) 440 437 { 441 - ath11k_debug_soc_destroy(ab); 438 + ath11k_debugfs_soc_destroy(ab); 442 439 ath11k_dp_free(ab); 443 440 ath11k_reg_free(ab); 444 441 ath11k_qmi_deinit_service(ab); ··· 448 445 { 449 446 int ret; 450 447 451 - ret = ath11k_debug_pdev_create(ab); 448 + ret = ath11k_debugfs_pdev_create(ab); 452 449 if (ret) { 453 450 ath11k_err(ab, "failed to create core pdev debugfs: %d\n", ret); 454 451 return ret; ··· 488 485 err_mac_unregister: 489 486 ath11k_mac_unregister(ab); 490 487 err_pdev_debug: 491 - ath11k_debug_pdev_destroy(ab); 488 + ath11k_debugfs_pdev_destroy(ab); 492 489 493 490 return ret; 494 491 } ··· 500 497 ath11k_mac_unregister(ab); 501 498 ath11k_hif_irq_disable(ab); 502 499 ath11k_dp_pdev_free(ab); 503 - ath11k_debug_pdev_destroy(ab); 500 + ath11k_debugfs_pdev_destroy(ab); 504 501 } 505 502 506 503 static int ath11k_core_start(struct ath11k_base *ab, ··· 845 842 } 846 843 EXPORT_SYMBOL(ath11k_core_pre_init); 847 844 848 - static int ath11k_core_get_rproc(struct ath11k_base *ab) 849 - { 850 - struct device *dev = ab->dev; 851 - struct rproc *prproc; 852 - phandle rproc_phandle; 853 - 854 - if (!IS_ENABLED(CONFIG_REMOTEPROC)) 855 - return 0; 856 - 857 - if (ab->bus_params.mhi_support) 858 - return 0; 859 - 860 - if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) { 861 - ath11k_err(ab, "failed to get q6_rproc handle\n"); 862 - return -ENOENT; 863 - } 864 - 865 - prproc = rproc_get_by_phandle(rproc_phandle); 866 - if (!prproc) { 867 - ath11k_err(ab, "failed to get rproc\n"); 868 - return -EINVAL; 869 - } 870 - ab->tgt_rproc = prproc; 871 - 872 - return 0; 873 - } 874 - 875 845 int ath11k_core_init(struct ath11k_base *ab) 876 846 { 877 847 int ret; 878 - 879 - ret = ath11k_core_get_rproc(ab); 880 - if (ret) { 881 - ath11k_err(ab, "failed to get rproc: %d\n", ret); 882 - return ret; 883 - } 884 848 885 849 ret = ath11k_core_soc_create(ab); 886 850 if (ret) {
-1
drivers/net/wireless/ath/ath11k/core.h
··· 648 648 struct ath11k_qmi qmi; 649 649 struct ath11k_wmi_base wmi_ab; 650 650 struct completion fw_ready; 651 - struct rproc *tgt_rproc; 652 651 int num_radios; 653 652 /* HW channel counters frequency value in hertz common to all MACs */ 654 653 u32 cc_freq_hz;
+2 -1106
drivers/net/wireless/ath/ath11k/debug.c
··· 6 6 #include <linux/vmalloc.h> 7 7 #include "core.h" 8 8 #include "debug.h" 9 - #include "wmi.h" 10 - #include "hal_rx.h" 11 - #include "dp_tx.h" 12 - #include "debug_htt_stats.h" 13 - #include "peer.h" 14 - 15 - static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = { 16 - "REO2SW1_RING", 17 - "REO2SW2_RING", 18 - "REO2SW3_RING", 19 - "REO2SW4_RING", 20 - "WBM2REO_LINK_RING", 21 - "REO2TCL_RING", 22 - "REO2FW_RING", 23 - "RELEASE_RING", 24 - "PPE_RELEASE_RING", 25 - "TCL2TQM_RING", 26 - "TQM_RELEASE_RING", 27 - "REO_RELEASE_RING", 28 - "WBM2SW0_RELEASE_RING", 29 - "WBM2SW1_RELEASE_RING", 30 - "WBM2SW2_RELEASE_RING", 31 - "WBM2SW3_RELEASE_RING", 32 - "REO_CMD_RING", 33 - "REO_STATUS_RING", 34 - }; 35 - 36 - static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = { 37 - "FW2RXDMA_BUF_RING", 38 - "FW2RXDMA_STATUS_RING", 39 - "FW2RXDMA_LINK_RING", 40 - "SW2RXDMA_BUF_RING", 41 - "WBM2RXDMA_LINK_RING", 42 - "RXDMA2FW_RING", 43 - "RXDMA2SW_RING", 44 - "RXDMA2RELEASE_RING", 45 - "RXDMA2REO_RING", 46 - "MONITOR_STATUS_RING", 47 - "MONITOR_BUF_RING", 48 - "MONITOR_DESC_RING", 49 - "MONITOR_DEST_RING", 50 - }; 51 9 52 10 void ath11k_info(struct ath11k_base *ab, const char *fmt, ...) 53 11 { ··· 53 95 EXPORT_SYMBOL(ath11k_warn); 54 96 55 97 #ifdef CONFIG_ATH11K_DEBUG 98 + 56 99 void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask, 57 100 const char *fmt, ...) 58 101 { ··· 103 144 } 104 145 EXPORT_SYMBOL(ath11k_dbg_dump); 105 146 106 - #endif 107 - 108 - #ifdef CONFIG_ATH11K_DEBUGFS 109 - static void ath11k_fw_stats_pdevs_free(struct list_head *head) 110 - { 111 - struct ath11k_fw_stats_pdev *i, *tmp; 112 - 113 - list_for_each_entry_safe(i, tmp, head, list) { 114 - list_del(&i->list); 115 - kfree(i); 116 - } 117 - } 118 - 119 - static void ath11k_fw_stats_vdevs_free(struct list_head *head) 120 - { 121 - struct ath11k_fw_stats_vdev *i, *tmp; 122 - 123 - list_for_each_entry_safe(i, tmp, head, list) { 124 - list_del(&i->list); 125 - kfree(i); 126 - } 127 - } 128 - 129 - static void ath11k_fw_stats_bcn_free(struct list_head *head) 130 - { 131 - struct ath11k_fw_stats_bcn *i, *tmp; 132 - 133 - list_for_each_entry_safe(i, tmp, head, list) { 134 - list_del(&i->list); 135 - kfree(i); 136 - } 137 - } 138 - 139 - static void ath11k_debug_fw_stats_reset(struct ath11k *ar) 140 - { 141 - spin_lock_bh(&ar->data_lock); 142 - ar->debug.fw_stats_done = false; 143 - ath11k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); 144 - ath11k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); 145 - spin_unlock_bh(&ar->data_lock); 146 - } 147 - 148 - void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb) 149 - { 150 - struct ath11k_fw_stats stats = {}; 151 - struct ath11k *ar; 152 - struct ath11k_pdev *pdev; 153 - bool is_end; 154 - static unsigned int num_vdev, num_bcn; 155 - size_t total_vdevs_started = 0; 156 - int i, ret; 157 - 158 - INIT_LIST_HEAD(&stats.pdevs); 159 - INIT_LIST_HEAD(&stats.vdevs); 160 - INIT_LIST_HEAD(&stats.bcn); 161 - 162 - ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats); 163 - if (ret) { 164 - ath11k_warn(ab, "failed to pull fw stats: %d\n", ret); 165 - goto free; 166 - } 167 - 168 - rcu_read_lock(); 169 - ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id); 170 - if (!ar) { 171 - rcu_read_unlock(); 172 - ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n", 173 - stats.pdev_id, ret); 174 - goto free; 175 - } 176 - 177 - spin_lock_bh(&ar->data_lock); 178 - 179 - if (stats.stats_id == WMI_REQUEST_PDEV_STAT) { 180 - list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs); 181 - ar->debug.fw_stats_done = true; 182 - goto complete; 183 - } 184 - 185 - if (stats.stats_id == WMI_REQUEST_VDEV_STAT) { 186 - if (list_empty(&stats.vdevs)) { 187 - ath11k_warn(ab, "empty vdev stats"); 188 - goto complete; 189 - } 190 - /* FW sends all the active VDEV stats irrespective of PDEV, 191 - * hence limit until the count of all VDEVs started 192 - */ 193 - for (i = 0; i < ab->num_radios; i++) { 194 - pdev = rcu_dereference(ab->pdevs_active[i]); 195 - if (pdev && pdev->ar) 196 - total_vdevs_started += ar->num_started_vdevs; 197 - } 198 - 199 - is_end = ((++num_vdev) == total_vdevs_started); 200 - 201 - list_splice_tail_init(&stats.vdevs, 202 - &ar->debug.fw_stats.vdevs); 203 - 204 - if (is_end) { 205 - ar->debug.fw_stats_done = true; 206 - num_vdev = 0; 207 - } 208 - goto complete; 209 - } 210 - 211 - if (stats.stats_id == WMI_REQUEST_BCN_STAT) { 212 - if (list_empty(&stats.bcn)) { 213 - ath11k_warn(ab, "empty bcn stats"); 214 - goto complete; 215 - } 216 - /* Mark end until we reached the count of all started VDEVs 217 - * within the PDEV 218 - */ 219 - is_end = ((++num_bcn) == ar->num_started_vdevs); 220 - 221 - list_splice_tail_init(&stats.bcn, 222 - &ar->debug.fw_stats.bcn); 223 - 224 - if (is_end) { 225 - ar->debug.fw_stats_done = true; 226 - num_bcn = 0; 227 - } 228 - } 229 - complete: 230 - complete(&ar->debug.fw_stats_complete); 231 - rcu_read_unlock(); 232 - spin_unlock_bh(&ar->data_lock); 233 - 234 - free: 235 - ath11k_fw_stats_pdevs_free(&stats.pdevs); 236 - ath11k_fw_stats_vdevs_free(&stats.vdevs); 237 - ath11k_fw_stats_bcn_free(&stats.bcn); 238 - } 239 - 240 - static int ath11k_debug_fw_stats_request(struct ath11k *ar, 241 - struct stats_request_params *req_param) 242 - { 243 - struct ath11k_base *ab = ar->ab; 244 - unsigned long timeout, time_left; 245 - int ret; 246 - 247 - lockdep_assert_held(&ar->conf_mutex); 248 - 249 - /* FW stats can get split when exceeding the stats data buffer limit. 250 - * In that case, since there is no end marking for the back-to-back 251 - * received 'update stats' event, we keep a 3 seconds timeout in case, 252 - * fw_stats_done is not marked yet 253 - */ 254 - timeout = jiffies + msecs_to_jiffies(3 * HZ); 255 - 256 - ath11k_debug_fw_stats_reset(ar); 257 - 258 - reinit_completion(&ar->debug.fw_stats_complete); 259 - 260 - ret = ath11k_wmi_send_stats_request_cmd(ar, req_param); 261 - 262 - if (ret) { 263 - ath11k_warn(ab, "could not request fw stats (%d)\n", 264 - ret); 265 - return ret; 266 - } 267 - 268 - time_left = 269 - wait_for_completion_timeout(&ar->debug.fw_stats_complete, 270 - 1 * HZ); 271 - if (!time_left) 272 - return -ETIMEDOUT; 273 - 274 - for (;;) { 275 - if (time_after(jiffies, timeout)) 276 - break; 277 - 278 - spin_lock_bh(&ar->data_lock); 279 - if (ar->debug.fw_stats_done) { 280 - spin_unlock_bh(&ar->data_lock); 281 - break; 282 - } 283 - spin_unlock_bh(&ar->data_lock); 284 - } 285 - return 0; 286 - } 287 - 288 - static int ath11k_open_pdev_stats(struct inode *inode, struct file *file) 289 - { 290 - struct ath11k *ar = inode->i_private; 291 - struct ath11k_base *ab = ar->ab; 292 - struct stats_request_params req_param; 293 - void *buf = NULL; 294 - int ret; 295 - 296 - mutex_lock(&ar->conf_mutex); 297 - 298 - if (ar->state != ATH11K_STATE_ON) { 299 - ret = -ENETDOWN; 300 - goto err_unlock; 301 - } 302 - 303 - buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE); 304 - if (!buf) { 305 - ret = -ENOMEM; 306 - goto err_unlock; 307 - } 308 - 309 - req_param.pdev_id = ar->pdev->pdev_id; 310 - req_param.vdev_id = 0; 311 - req_param.stats_id = WMI_REQUEST_PDEV_STAT; 312 - 313 - ret = ath11k_debug_fw_stats_request(ar, &req_param); 314 - if (ret) { 315 - ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret); 316 - goto err_free; 317 - } 318 - 319 - ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id, 320 - buf); 321 - 322 - file->private_data = buf; 323 - 324 - mutex_unlock(&ar->conf_mutex); 325 - return 0; 326 - 327 - err_free: 328 - vfree(buf); 329 - 330 - err_unlock: 331 - mutex_unlock(&ar->conf_mutex); 332 - return ret; 333 - } 334 - 335 - static int ath11k_release_pdev_stats(struct inode *inode, struct file *file) 336 - { 337 - vfree(file->private_data); 338 - 339 - return 0; 340 - } 341 - 342 - static ssize_t ath11k_read_pdev_stats(struct file *file, 343 - char __user *user_buf, 344 - size_t count, loff_t *ppos) 345 - { 346 - const char *buf = file->private_data; 347 - size_t len = strlen(buf); 348 - 349 - return simple_read_from_buffer(user_buf, count, ppos, buf, len); 350 - } 351 - 352 - static const struct file_operations fops_pdev_stats = { 353 - .open = ath11k_open_pdev_stats, 354 - .release = ath11k_release_pdev_stats, 355 - .read = ath11k_read_pdev_stats, 356 - .owner = THIS_MODULE, 357 - .llseek = default_llseek, 358 - }; 359 - 360 - static int ath11k_open_vdev_stats(struct inode *inode, struct file *file) 361 - { 362 - struct ath11k *ar = inode->i_private; 363 - struct stats_request_params req_param; 364 - void *buf = NULL; 365 - int ret; 366 - 367 - mutex_lock(&ar->conf_mutex); 368 - 369 - if (ar->state != ATH11K_STATE_ON) { 370 - ret = -ENETDOWN; 371 - goto err_unlock; 372 - } 373 - 374 - buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE); 375 - if (!buf) { 376 - ret = -ENOMEM; 377 - goto err_unlock; 378 - } 379 - 380 - req_param.pdev_id = ar->pdev->pdev_id; 381 - /* VDEV stats is always sent for all active VDEVs from FW */ 382 - req_param.vdev_id = 0; 383 - req_param.stats_id = WMI_REQUEST_VDEV_STAT; 384 - 385 - ret = ath11k_debug_fw_stats_request(ar, &req_param); 386 - if (ret) { 387 - ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret); 388 - goto err_free; 389 - } 390 - 391 - ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id, 392 - buf); 393 - 394 - file->private_data = buf; 395 - 396 - mutex_unlock(&ar->conf_mutex); 397 - return 0; 398 - 399 - err_free: 400 - vfree(buf); 401 - 402 - err_unlock: 403 - mutex_unlock(&ar->conf_mutex); 404 - return ret; 405 - } 406 - 407 - static int ath11k_release_vdev_stats(struct inode *inode, struct file *file) 408 - { 409 - vfree(file->private_data); 410 - 411 - return 0; 412 - } 413 - 414 - static ssize_t ath11k_read_vdev_stats(struct file *file, 415 - char __user *user_buf, 416 - size_t count, loff_t *ppos) 417 - { 418 - const char *buf = file->private_data; 419 - size_t len = strlen(buf); 420 - 421 - return simple_read_from_buffer(user_buf, count, ppos, buf, len); 422 - } 423 - 424 - static const struct file_operations fops_vdev_stats = { 425 - .open = ath11k_open_vdev_stats, 426 - .release = ath11k_release_vdev_stats, 427 - .read = ath11k_read_vdev_stats, 428 - .owner = THIS_MODULE, 429 - .llseek = default_llseek, 430 - }; 431 - 432 - static int ath11k_open_bcn_stats(struct inode *inode, struct file *file) 433 - { 434 - struct ath11k *ar = inode->i_private; 435 - struct ath11k_vif *arvif; 436 - struct stats_request_params req_param; 437 - void *buf = NULL; 438 - int ret; 439 - 440 - mutex_lock(&ar->conf_mutex); 441 - 442 - if (ar->state != ATH11K_STATE_ON) { 443 - ret = -ENETDOWN; 444 - goto err_unlock; 445 - } 446 - 447 - buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE); 448 - if (!buf) { 449 - ret = -ENOMEM; 450 - goto err_unlock; 451 - } 452 - 453 - req_param.stats_id = WMI_REQUEST_BCN_STAT; 454 - req_param.pdev_id = ar->pdev->pdev_id; 455 - 456 - /* loop all active VDEVs for bcn stats */ 457 - list_for_each_entry(arvif, &ar->arvifs, list) { 458 - if (!arvif->is_up) 459 - continue; 460 - 461 - req_param.vdev_id = arvif->vdev_id; 462 - ret = ath11k_debug_fw_stats_request(ar, &req_param); 463 - if (ret) { 464 - ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret); 465 - goto err_free; 466 - } 467 - } 468 - 469 - ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id, 470 - buf); 471 - 472 - /* since beacon stats request is looped for all active VDEVs, saved fw 473 - * stats is not freed for each request until done for all active VDEVs 474 - */ 475 - spin_lock_bh(&ar->data_lock); 476 - ath11k_fw_stats_bcn_free(&ar->debug.fw_stats.bcn); 477 - spin_unlock_bh(&ar->data_lock); 478 - 479 - file->private_data = buf; 480 - 481 - mutex_unlock(&ar->conf_mutex); 482 - return 0; 483 - 484 - err_free: 485 - vfree(buf); 486 - 487 - err_unlock: 488 - mutex_unlock(&ar->conf_mutex); 489 - return ret; 490 - } 491 - 492 - static int ath11k_release_bcn_stats(struct inode *inode, struct file *file) 493 - { 494 - vfree(file->private_data); 495 - 496 - return 0; 497 - } 498 - 499 - static ssize_t ath11k_read_bcn_stats(struct file *file, 500 - char __user *user_buf, 501 - size_t count, loff_t *ppos) 502 - { 503 - const char *buf = file->private_data; 504 - size_t len = strlen(buf); 505 - 506 - return simple_read_from_buffer(user_buf, count, ppos, buf, len); 507 - } 508 - 509 - static const struct file_operations fops_bcn_stats = { 510 - .open = ath11k_open_bcn_stats, 511 - .release = ath11k_release_bcn_stats, 512 - .read = ath11k_read_bcn_stats, 513 - .owner = THIS_MODULE, 514 - .llseek = default_llseek, 515 - }; 516 - 517 - static ssize_t ath11k_read_simulate_fw_crash(struct file *file, 518 - char __user *user_buf, 519 - size_t count, loff_t *ppos) 520 - { 521 - const char buf[] = 522 - "To simulate firmware crash write one of the keywords to this file:\n" 523 - "`assert` - this will send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n" 524 - "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n"; 525 - 526 - return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); 527 - } 528 - 529 - /* Simulate firmware crash: 530 - * 'soft': Call wmi command causing firmware hang. This firmware hang is 531 - * recoverable by warm firmware reset. 532 - * 'hard': Force firmware crash by setting any vdev parameter for not allowed 533 - * vdev id. This is hard firmware crash because it is recoverable only by cold 534 - * firmware reset. 535 - */ 536 - static ssize_t ath11k_write_simulate_fw_crash(struct file *file, 537 - const char __user *user_buf, 538 - size_t count, loff_t *ppos) 539 - { 540 - struct ath11k_base *ab = file->private_data; 541 - struct ath11k_pdev *pdev; 542 - struct ath11k *ar = ab->pdevs[0].ar; 543 - char buf[32] = {0}; 544 - ssize_t rc; 545 - int i, ret, radioup = 0; 546 - 547 - for (i = 0; i < ab->num_radios; i++) { 548 - pdev = &ab->pdevs[i]; 549 - ar = pdev->ar; 550 - if (ar && ar->state == ATH11K_STATE_ON) { 551 - radioup = 1; 552 - break; 553 - } 554 - } 555 - /* filter partial writes and invalid commands */ 556 - if (*ppos != 0 || count >= sizeof(buf) || count == 0) 557 - return -EINVAL; 558 - 559 - rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); 560 - if (rc < 0) 561 - return rc; 562 - 563 - /* drop the possible '\n' from the end */ 564 - if (buf[*ppos - 1] == '\n') 565 - buf[*ppos - 1] = '\0'; 566 - 567 - if (radioup == 0) { 568 - ret = -ENETDOWN; 569 - goto exit; 570 - } 571 - 572 - if (!strcmp(buf, "assert")) { 573 - ath11k_info(ab, "simulating firmware assert crash\n"); 574 - ret = ath11k_wmi_force_fw_hang_cmd(ar, 575 - ATH11K_WMI_FW_HANG_ASSERT_TYPE, 576 - ATH11K_WMI_FW_HANG_DELAY); 577 - } else { 578 - ret = -EINVAL; 579 - goto exit; 580 - } 581 - 582 - if (ret) { 583 - ath11k_warn(ab, "failed to simulate firmware crash: %d\n", ret); 584 - goto exit; 585 - } 586 - 587 - ret = count; 588 - 589 - exit: 590 - return ret; 591 - } 592 - 593 - static const struct file_operations fops_simulate_fw_crash = { 594 - .read = ath11k_read_simulate_fw_crash, 595 - .write = ath11k_write_simulate_fw_crash, 596 - .open = simple_open, 597 - .owner = THIS_MODULE, 598 - .llseek = default_llseek, 599 - }; 600 - 601 - static ssize_t ath11k_write_enable_extd_tx_stats(struct file *file, 602 - const char __user *ubuf, 603 - size_t count, loff_t *ppos) 604 - { 605 - struct ath11k *ar = file->private_data; 606 - u32 filter; 607 - int ret; 608 - 609 - if (kstrtouint_from_user(ubuf, count, 0, &filter)) 610 - return -EINVAL; 611 - 612 - mutex_lock(&ar->conf_mutex); 613 - 614 - if (ar->state != ATH11K_STATE_ON) { 615 - ret = -ENETDOWN; 616 - goto out; 617 - } 618 - 619 - if (filter == ar->debug.extd_tx_stats) { 620 - ret = count; 621 - goto out; 622 - } 623 - 624 - ar->debug.extd_tx_stats = filter; 625 - ret = count; 626 - 627 - out: 628 - mutex_unlock(&ar->conf_mutex); 629 - return ret; 630 - } 631 - 632 - static ssize_t ath11k_read_enable_extd_tx_stats(struct file *file, 633 - char __user *ubuf, 634 - size_t count, loff_t *ppos) 635 - 636 - { 637 - char buf[32] = {0}; 638 - struct ath11k *ar = file->private_data; 639 - int len = 0; 640 - 641 - mutex_lock(&ar->conf_mutex); 642 - len = scnprintf(buf, sizeof(buf) - len, "%08x\n", 643 - ar->debug.extd_tx_stats); 644 - mutex_unlock(&ar->conf_mutex); 645 - 646 - return simple_read_from_buffer(ubuf, count, ppos, buf, len); 647 - } 648 - 649 - static const struct file_operations fops_extd_tx_stats = { 650 - .read = ath11k_read_enable_extd_tx_stats, 651 - .write = ath11k_write_enable_extd_tx_stats, 652 - .open = simple_open 653 - }; 654 - 655 - static ssize_t ath11k_write_extd_rx_stats(struct file *file, 656 - const char __user *ubuf, 657 - size_t count, loff_t *ppos) 658 - { 659 - struct ath11k *ar = file->private_data; 660 - struct ath11k_base *ab = ar->ab; 661 - struct htt_rx_ring_tlv_filter tlv_filter = {0}; 662 - u32 enable, rx_filter = 0, ring_id; 663 - int i; 664 - int ret; 665 - 666 - if (kstrtouint_from_user(ubuf, count, 0, &enable)) 667 - return -EINVAL; 668 - 669 - mutex_lock(&ar->conf_mutex); 670 - 671 - if (ar->state != ATH11K_STATE_ON) { 672 - ret = -ENETDOWN; 673 - goto exit; 674 - } 675 - 676 - if (enable > 1) { 677 - ret = -EINVAL; 678 - goto exit; 679 - } 680 - 681 - if (enable == ar->debug.extd_rx_stats) { 682 - ret = count; 683 - goto exit; 684 - } 685 - 686 - if (enable) { 687 - rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START; 688 - rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START; 689 - rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END; 690 - rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS; 691 - rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT; 692 - rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE; 693 - 694 - tlv_filter.rx_filter = rx_filter; 695 - tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0; 696 - tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1; 697 - tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2; 698 - tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 | 699 - HTT_RX_FP_DATA_FILTER_FLASG3; 700 - } else { 701 - tlv_filter = ath11k_mac_mon_status_filter_default; 702 - } 703 - 704 - ar->debug.rx_filter = tlv_filter.rx_filter; 705 - 706 - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 707 - ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 708 - ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id, 709 - HAL_RXDMA_MONITOR_STATUS, 710 - DP_RX_BUFFER_SIZE, &tlv_filter); 711 - 712 - if (ret) { 713 - ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n"); 714 - goto exit; 715 - } 716 - } 717 - 718 - ar->debug.extd_rx_stats = enable; 719 - ret = count; 720 - exit: 721 - mutex_unlock(&ar->conf_mutex); 722 - return ret; 723 - } 724 - 725 - static ssize_t ath11k_read_extd_rx_stats(struct file *file, 726 - char __user *ubuf, 727 - size_t count, loff_t *ppos) 728 - { 729 - struct ath11k *ar = file->private_data; 730 - char buf[32]; 731 - int len = 0; 732 - 733 - mutex_lock(&ar->conf_mutex); 734 - len = scnprintf(buf, sizeof(buf) - len, "%d\n", 735 - ar->debug.extd_rx_stats); 736 - mutex_unlock(&ar->conf_mutex); 737 - 738 - return simple_read_from_buffer(ubuf, count, ppos, buf, len); 739 - } 740 - 741 - static const struct file_operations fops_extd_rx_stats = { 742 - .read = ath11k_read_extd_rx_stats, 743 - .write = ath11k_write_extd_rx_stats, 744 - .open = simple_open, 745 - }; 746 - 747 - static int ath11k_fill_bp_stats(struct ath11k_base *ab, 748 - struct ath11k_bp_stats *bp_stats, 749 - char *buf, int len, int size) 750 - { 751 - lockdep_assert_held(&ab->base_lock); 752 - 753 - len += scnprintf(buf + len, size - len, "count: %u\n", 754 - bp_stats->count); 755 - len += scnprintf(buf + len, size - len, "hp: %u\n", 756 - bp_stats->hp); 757 - len += scnprintf(buf + len, size - len, "tp: %u\n", 758 - bp_stats->tp); 759 - len += scnprintf(buf + len, size - len, "seen before: %ums\n\n", 760 - jiffies_to_msecs(jiffies - bp_stats->jiffies)); 761 - return len; 762 - } 763 - 764 - static ssize_t ath11k_debug_dump_soc_ring_bp_stats(struct ath11k_base *ab, 765 - char *buf, int size) 766 - { 767 - struct ath11k_bp_stats *bp_stats; 768 - bool stats_rxd = false; 769 - u8 i, pdev_idx; 770 - int len = 0; 771 - 772 - len += scnprintf(buf + len, size - len, "\nBackpressure Stats\n"); 773 - len += scnprintf(buf + len, size - len, "==================\n"); 774 - 775 - spin_lock_bh(&ab->base_lock); 776 - for (i = 0; i < HTT_SW_UMAC_RING_IDX_MAX; i++) { 777 - bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[i]; 778 - 779 - if (!bp_stats->count) 780 - continue; 781 - 782 - len += scnprintf(buf + len, size - len, "Ring: %s\n", 783 - htt_bp_umac_ring[i]); 784 - len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size); 785 - stats_rxd = true; 786 - } 787 - 788 - for (i = 0; i < HTT_SW_LMAC_RING_IDX_MAX; i++) { 789 - for (pdev_idx = 0; pdev_idx < MAX_RADIOS; pdev_idx++) { 790 - bp_stats = 791 - &ab->soc_stats.bp_stats.lmac_ring_bp_stats[i][pdev_idx]; 792 - 793 - if (!bp_stats->count) 794 - continue; 795 - 796 - len += scnprintf(buf + len, size - len, "Ring: %s\n", 797 - htt_bp_lmac_ring[i]); 798 - len += scnprintf(buf + len, size - len, "pdev: %d\n", 799 - pdev_idx); 800 - len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size); 801 - stats_rxd = true; 802 - } 803 - } 804 - spin_unlock_bh(&ab->base_lock); 805 - 806 - if (!stats_rxd) 807 - len += scnprintf(buf + len, size - len, 808 - "No Ring Backpressure stats received\n\n"); 809 - 810 - return len; 811 - } 812 - 813 - static ssize_t ath11k_debug_dump_soc_dp_stats(struct file *file, 814 - char __user *user_buf, 815 - size_t count, loff_t *ppos) 816 - { 817 - struct ath11k_base *ab = file->private_data; 818 - struct ath11k_soc_dp_stats *soc_stats = &ab->soc_stats; 819 - int len = 0, i, retval; 820 - const int size = 4096; 821 - static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = { 822 - "Overflow", "MPDU len", "FCS", "Decrypt", "TKIP MIC", 823 - "Unencrypt", "MSDU len", "MSDU limit", "WiFi parse", 824 - "AMSDU parse", "SA timeout", "DA timeout", 825 - "Flow timeout", "Flush req"}; 826 - static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = { 827 - "Desc addr zero", "Desc inval", "AMPDU in non BA", 828 - "Non BA dup", "BA dup", "Frame 2k jump", "BAR 2k jump", 829 - "Frame OOR", "BAR OOR", "No BA session", 830 - "Frame SN equal SSN", "PN check fail", "2k err", 831 - "PN err", "Desc blocked"}; 832 - 833 - char *buf; 834 - 835 - buf = kzalloc(size, GFP_KERNEL); 836 - if (!buf) 837 - return -ENOMEM; 838 - 839 - len += scnprintf(buf + len, size - len, "SOC RX STATS:\n\n"); 840 - len += scnprintf(buf + len, size - len, "err ring pkts: %u\n", 841 - soc_stats->err_ring_pkts); 842 - len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n", 843 - soc_stats->invalid_rbm); 844 - len += scnprintf(buf + len, size - len, "RXDMA errors:\n"); 845 - for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++) 846 - len += scnprintf(buf + len, size - len, "%s: %u\n", 847 - rxdma_err[i], soc_stats->rxdma_error[i]); 848 - 849 - len += scnprintf(buf + len, size - len, "\nREO errors:\n"); 850 - for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++) 851 - len += scnprintf(buf + len, size - len, "%s: %u\n", 852 - reo_err[i], soc_stats->reo_error[i]); 853 - 854 - len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n"); 855 - len += scnprintf(buf + len, size - len, 856 - "ring0: %u\nring1: %u\nring2: %u\nring3: %u\n", 857 - soc_stats->hal_reo_error[0], 858 - soc_stats->hal_reo_error[1], 859 - soc_stats->hal_reo_error[2], 860 - soc_stats->hal_reo_error[3]); 861 - 862 - len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n"); 863 - len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n"); 864 - 865 - for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) 866 - len += scnprintf(buf + len, size - len, "ring%d: %u\n", 867 - i, soc_stats->tx_err.desc_na[i]); 868 - 869 - len += scnprintf(buf + len, size - len, 870 - "\nMisc Transmit Failures: %d\n", 871 - atomic_read(&soc_stats->tx_err.misc_fail)); 872 - 873 - len += ath11k_debug_dump_soc_ring_bp_stats(ab, buf + len, size - len); 874 - 875 - if (len > size) 876 - len = size; 877 - retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); 878 - kfree(buf); 879 - 880 - return retval; 881 - } 882 - 883 - static const struct file_operations fops_soc_dp_stats = { 884 - .read = ath11k_debug_dump_soc_dp_stats, 885 - .open = simple_open, 886 - .owner = THIS_MODULE, 887 - .llseek = default_llseek, 888 - }; 889 - 890 - int ath11k_debug_pdev_create(struct ath11k_base *ab) 891 - { 892 - if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) 893 - return 0; 894 - 895 - ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k); 896 - 897 - if (IS_ERR_OR_NULL(ab->debugfs_soc)) { 898 - if (IS_ERR(ab->debugfs_soc)) 899 - return PTR_ERR(ab->debugfs_soc); 900 - return -ENOMEM; 901 - } 902 - 903 - debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab, 904 - &fops_simulate_fw_crash); 905 - 906 - debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab, 907 - &fops_soc_dp_stats); 908 - 909 - return 0; 910 - } 911 - 912 - void ath11k_debug_pdev_destroy(struct ath11k_base *ab) 913 - { 914 - debugfs_remove_recursive(ab->debugfs_ath11k); 915 - ab->debugfs_ath11k = NULL; 916 - } 917 - 918 - int ath11k_debug_soc_create(struct ath11k_base *ab) 919 - { 920 - ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL); 921 - 922 - if (IS_ERR_OR_NULL(ab->debugfs_ath11k)) { 923 - if (IS_ERR(ab->debugfs_ath11k)) 924 - return PTR_ERR(ab->debugfs_ath11k); 925 - return -ENOMEM; 926 - } 927 - 928 - return 0; 929 - } 930 - 931 - void ath11k_debug_soc_destroy(struct ath11k_base *ab) 932 - { 933 - debugfs_remove_recursive(ab->debugfs_soc); 934 - ab->debugfs_soc = NULL; 935 - } 936 - 937 - void ath11k_debug_fw_stats_init(struct ath11k *ar) 938 - { 939 - struct dentry *fwstats_dir = debugfs_create_dir("fw_stats", 940 - ar->debug.debugfs_pdev); 941 - 942 - ar->debug.fw_stats.debugfs_fwstats = fwstats_dir; 943 - 944 - /* all stats debugfs files created are under "fw_stats" directory 945 - * created per PDEV 946 - */ 947 - debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar, 948 - &fops_pdev_stats); 949 - debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar, 950 - &fops_vdev_stats); 951 - debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar, 952 - &fops_bcn_stats); 953 - 954 - INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs); 955 - INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs); 956 - INIT_LIST_HEAD(&ar->debug.fw_stats.bcn); 957 - 958 - init_completion(&ar->debug.fw_stats_complete); 959 - } 960 - 961 - static ssize_t ath11k_write_pktlog_filter(struct file *file, 962 - const char __user *ubuf, 963 - size_t count, loff_t *ppos) 964 - { 965 - struct ath11k *ar = file->private_data; 966 - struct ath11k_base *ab = ar->ab; 967 - struct htt_rx_ring_tlv_filter tlv_filter = {0}; 968 - u32 rx_filter = 0, ring_id, filter, mode; 969 - u8 buf[128] = {0}; 970 - int i, ret; 971 - ssize_t rc; 972 - 973 - mutex_lock(&ar->conf_mutex); 974 - if (ar->state != ATH11K_STATE_ON) { 975 - ret = -ENETDOWN; 976 - goto out; 977 - } 978 - 979 - rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count); 980 - if (rc < 0) { 981 - ret = rc; 982 - goto out; 983 - } 984 - buf[rc] = '\0'; 985 - 986 - ret = sscanf(buf, "0x%x %u", &filter, &mode); 987 - if (ret != 2) { 988 - ret = -EINVAL; 989 - goto out; 990 - } 991 - 992 - if (filter) { 993 - ret = ath11k_wmi_pdev_pktlog_enable(ar, filter); 994 - if (ret) { 995 - ath11k_warn(ar->ab, 996 - "failed to enable pktlog filter %x: %d\n", 997 - ar->debug.pktlog_filter, ret); 998 - goto out; 999 - } 1000 - } else { 1001 - ret = ath11k_wmi_pdev_pktlog_disable(ar); 1002 - if (ret) { 1003 - ath11k_warn(ar->ab, "failed to disable pktlog: %d\n", ret); 1004 - goto out; 1005 - } 1006 - } 1007 - 1008 - #define HTT_RX_FILTER_TLV_LITE_MODE \ 1009 - (HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \ 1010 - HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \ 1011 - HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \ 1012 - HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \ 1013 - HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \ 1014 - HTT_RX_FILTER_TLV_FLAGS_MPDU_START) 1015 - 1016 - if (mode == ATH11K_PKTLOG_MODE_FULL) { 1017 - rx_filter = HTT_RX_FILTER_TLV_LITE_MODE | 1018 - HTT_RX_FILTER_TLV_FLAGS_MSDU_START | 1019 - HTT_RX_FILTER_TLV_FLAGS_MSDU_END | 1020 - HTT_RX_FILTER_TLV_FLAGS_MPDU_END | 1021 - HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | 1022 - HTT_RX_FILTER_TLV_FLAGS_ATTENTION; 1023 - } else if (mode == ATH11K_PKTLOG_MODE_LITE) { 1024 - ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar, 1025 - HTT_PPDU_STATS_TAG_PKTLOG); 1026 - if (ret) { 1027 - ath11k_err(ar->ab, "failed to enable pktlog lite: %d\n", ret); 1028 - goto out; 1029 - } 1030 - 1031 - rx_filter = HTT_RX_FILTER_TLV_LITE_MODE; 1032 - } else { 1033 - ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar, 1034 - HTT_PPDU_STATS_TAG_DEFAULT); 1035 - if (ret) { 1036 - ath11k_err(ar->ab, "failed to send htt ppdu stats req: %d\n", 1037 - ret); 1038 - goto out; 1039 - } 1040 - } 1041 - 1042 - tlv_filter.rx_filter = rx_filter; 1043 - if (rx_filter) { 1044 - tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0; 1045 - tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1; 1046 - tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2; 1047 - tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 | 1048 - HTT_RX_FP_DATA_FILTER_FLASG3; 1049 - } 1050 - 1051 - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 1052 - ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 1053 - ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id, 1054 - ar->dp.mac_id + i, 1055 - HAL_RXDMA_MONITOR_STATUS, 1056 - DP_RX_BUFFER_SIZE, &tlv_filter); 1057 - 1058 - if (ret) { 1059 - ath11k_warn(ab, "failed to set rx filter for monitor status ring\n"); 1060 - goto out; 1061 - } 1062 - } 1063 - 1064 - ath11k_dbg(ab, ATH11K_DBG_WMI, "pktlog filter %d mode %s\n", 1065 - filter, ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite")); 1066 - 1067 - ar->debug.pktlog_filter = filter; 1068 - ar->debug.pktlog_mode = mode; 1069 - ret = count; 1070 - 1071 - out: 1072 - mutex_unlock(&ar->conf_mutex); 1073 - return ret; 1074 - } 1075 - 1076 - static ssize_t ath11k_read_pktlog_filter(struct file *file, 1077 - char __user *ubuf, 1078 - size_t count, loff_t *ppos) 1079 - 1080 - { 1081 - char buf[32] = {0}; 1082 - struct ath11k *ar = file->private_data; 1083 - int len = 0; 1084 - 1085 - mutex_lock(&ar->conf_mutex); 1086 - len = scnprintf(buf, sizeof(buf) - len, "%08x %08x\n", 1087 - ar->debug.pktlog_filter, 1088 - ar->debug.pktlog_mode); 1089 - mutex_unlock(&ar->conf_mutex); 1090 - 1091 - return simple_read_from_buffer(ubuf, count, ppos, buf, len); 1092 - } 1093 - 1094 - static const struct file_operations fops_pktlog_filter = { 1095 - .read = ath11k_read_pktlog_filter, 1096 - .write = ath11k_write_pktlog_filter, 1097 - .open = simple_open 1098 - }; 1099 - 1100 - static ssize_t ath11k_write_simulate_radar(struct file *file, 1101 - const char __user *user_buf, 1102 - size_t count, loff_t *ppos) 1103 - { 1104 - struct ath11k *ar = file->private_data; 1105 - int ret; 1106 - 1107 - ret = ath11k_wmi_simulate_radar(ar); 1108 - if (ret) 1109 - return ret; 1110 - 1111 - return count; 1112 - } 1113 - 1114 - static const struct file_operations fops_simulate_radar = { 1115 - .write = ath11k_write_simulate_radar, 1116 - .open = simple_open 1117 - }; 1118 - 1119 - int ath11k_debug_register(struct ath11k *ar) 1120 - { 1121 - struct ath11k_base *ab = ar->ab; 1122 - char pdev_name[5]; 1123 - char buf[100] = {0}; 1124 - 1125 - snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx); 1126 - 1127 - ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc); 1128 - 1129 - if (IS_ERR_OR_NULL(ar->debug.debugfs_pdev)) { 1130 - if (IS_ERR(ar->debug.debugfs_pdev)) 1131 - return PTR_ERR(ar->debug.debugfs_pdev); 1132 - 1133 - return -ENOMEM; 1134 - } 1135 - 1136 - /* Create a symlink under ieee80211/phy* */ 1137 - snprintf(buf, 100, "../../ath11k/%pd2", ar->debug.debugfs_pdev); 1138 - debugfs_create_symlink("ath11k", ar->hw->wiphy->debugfsdir, buf); 1139 - 1140 - ath11k_debug_htt_stats_init(ar); 1141 - 1142 - ath11k_debug_fw_stats_init(ar); 1143 - 1144 - debugfs_create_file("ext_tx_stats", 0644, 1145 - ar->debug.debugfs_pdev, ar, 1146 - &fops_extd_tx_stats); 1147 - debugfs_create_file("ext_rx_stats", 0644, 1148 - ar->debug.debugfs_pdev, ar, 1149 - &fops_extd_rx_stats); 1150 - debugfs_create_file("pktlog_filter", 0644, 1151 - ar->debug.debugfs_pdev, ar, 1152 - &fops_pktlog_filter); 1153 - 1154 - if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) { 1155 - debugfs_create_file("dfs_simulate_radar", 0200, 1156 - ar->debug.debugfs_pdev, ar, 1157 - &fops_simulate_radar); 1158 - debugfs_create_bool("dfs_block_radar_events", 0200, 1159 - ar->debug.debugfs_pdev, 1160 - &ar->dfs_block_radar_events); 1161 - } 1162 - 1163 - return 0; 1164 - } 1165 - 1166 - void ath11k_debug_unregister(struct ath11k *ar) 1167 - { 1168 - } 1169 - #endif /* CONFIG_ATH11K_DEBUGFS */ 147 + #endif /* CONFIG_ATH11K_DEBUG */
+1 -243
drivers/net/wireless/ath/ath11k/debug.h
··· 6 6 #ifndef _ATH11K_DEBUG_H_ 7 7 #define _ATH11K_DEBUG_H_ 8 8 9 - #include "hal_tx.h" 10 9 #include "trace.h" 11 - 12 - #define ATH11K_TX_POWER_MAX_VAL 70 13 - #define ATH11K_TX_POWER_MIN_VAL 0 10 + #include "debugfs.h" 14 11 15 12 enum ath11k_debug_mask { 16 13 ATH11K_DBG_AHB = 0x00000001, ··· 26 29 ATH11K_DBG_DP_TX = 0x00001000, 27 30 ATH11K_DBG_DP_RX = 0x00002000, 28 31 ATH11K_DBG_ANY = 0xffffffff, 29 - }; 30 - 31 - /* htt_dbg_ext_stats_type */ 32 - enum ath11k_dbg_htt_ext_stats_type { 33 - ATH11K_DBG_HTT_EXT_STATS_RESET = 0, 34 - ATH11K_DBG_HTT_EXT_STATS_PDEV_TX = 1, 35 - ATH11K_DBG_HTT_EXT_STATS_PDEV_RX = 2, 36 - ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ = 3, 37 - ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4, 38 - ATH11K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5, 39 - ATH11K_DBG_HTT_EXT_STATS_PDEV_TQM = 6, 40 - ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ = 7, 41 - ATH11K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8, 42 - ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9, 43 - ATH11K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10, 44 - ATH11K_DBG_HTT_EXT_STATS_PEER_INFO = 11, 45 - ATH11K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12, 46 - ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ = 13, 47 - ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO = 14, 48 - ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO = 15, 49 - ATH11K_DBG_HTT_EXT_STATS_SFM_INFO = 16, 50 - ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17, 51 - ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST = 18, 52 - ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19, 53 - ATH11K_DBG_HTT_EXT_STATS_TWT_SESSIONS = 20, 54 - ATH11K_DBG_HTT_EXT_STATS_REO_RESOURCE_STATS = 21, 55 - ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22, 56 - ATH11K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23, 57 - ATH11K_DBG_HTT_EXT_STATS_RING_BACKPRESSURE_STATS = 24, 58 - 59 - /* keep this last */ 60 - ATH11K_DBG_HTT_NUM_EXT_STATS, 61 - }; 62 - 63 - struct debug_htt_stats_req { 64 - bool done; 65 - u8 pdev_id; 66 - u8 type; 67 - u8 peer_addr[ETH_ALEN]; 68 - struct completion cmpln; 69 - u32 buf_len; 70 - u8 buf[]; 71 - }; 72 - 73 - struct ath_pktlog_hdr { 74 - u16 flags; 75 - u16 missed_cnt; 76 - u16 log_type; 77 - u16 size; 78 - u32 timestamp; 79 - u32 type_specific_data; 80 - u8 payload[]; 81 - }; 82 - 83 - #define ATH11K_HTT_PEER_STATS_RESET BIT(16) 84 - 85 - #define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512) 86 - #define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024) 87 - 88 - enum ath11k_pktlog_filter { 89 - ATH11K_PKTLOG_RX = 0x000000001, 90 - ATH11K_PKTLOG_TX = 0x000000002, 91 - ATH11K_PKTLOG_RCFIND = 0x000000004, 92 - ATH11K_PKTLOG_RCUPDATE = 0x000000008, 93 - ATH11K_PKTLOG_EVENT_SMART_ANT = 0x000000020, 94 - ATH11K_PKTLOG_EVENT_SW = 0x000000040, 95 - ATH11K_PKTLOG_ANY = 0x00000006f, 96 - }; 97 - 98 - enum ath11k_pktlog_mode { 99 - ATH11K_PKTLOG_MODE_LITE = 1, 100 - ATH11K_PKTLOG_MODE_FULL = 2, 101 - }; 102 - 103 - enum ath11k_pktlog_enum { 104 - ATH11K_PKTLOG_TYPE_TX_CTRL = 1, 105 - ATH11K_PKTLOG_TYPE_TX_STAT = 2, 106 - ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3, 107 - ATH11K_PKTLOG_TYPE_RX_STAT = 5, 108 - ATH11K_PKTLOG_TYPE_RC_FIND = 6, 109 - ATH11K_PKTLOG_TYPE_RC_UPDATE = 7, 110 - ATH11K_PKTLOG_TYPE_TX_VIRT_ADDR = 8, 111 - ATH11K_PKTLOG_TYPE_RX_CBF = 10, 112 - ATH11K_PKTLOG_TYPE_RX_STATBUF = 22, 113 - ATH11K_PKTLOG_TYPE_PPDU_STATS = 23, 114 - ATH11K_PKTLOG_TYPE_LITE_RX = 24, 115 - }; 116 - 117 - enum ath11k_dbg_aggr_mode { 118 - ATH11K_DBG_AGGR_MODE_AUTO, 119 - ATH11K_DBG_AGGR_MODE_MANUAL, 120 - ATH11K_DBG_AGGR_MODE_MAX, 121 32 }; 122 33 123 34 __printf(2, 3) void ath11k_info(struct ath11k_base *ab, const char *fmt, ...); ··· 57 152 { 58 153 } 59 154 #endif /* CONFIG_ATH11K_DEBUG */ 60 - 61 - #ifdef CONFIG_ATH11K_DEBUGFS 62 - int ath11k_debug_soc_create(struct ath11k_base *ab); 63 - void ath11k_debug_soc_destroy(struct ath11k_base *ab); 64 - int ath11k_debug_pdev_create(struct ath11k_base *ab); 65 - void ath11k_debug_pdev_destroy(struct ath11k_base *ab); 66 - int ath11k_debug_register(struct ath11k *ar); 67 - void ath11k_debug_unregister(struct ath11k *ar); 68 - void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab, 69 - struct sk_buff *skb); 70 - void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb); 71 - 72 - void ath11k_debug_fw_stats_init(struct ath11k *ar); 73 - int ath11k_dbg_htt_stats_req(struct ath11k *ar); 74 - 75 - static inline bool ath11k_debug_is_pktlog_lite_mode_enabled(struct ath11k *ar) 76 - { 77 - return (ar->debug.pktlog_mode == ATH11K_PKTLOG_MODE_LITE); 78 - } 79 - 80 - static inline bool ath11k_debug_is_pktlog_rx_stats_enabled(struct ath11k *ar) 81 - { 82 - return (!ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode); 83 - } 84 - 85 - static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr) 86 - { 87 - return (ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode && 88 - ether_addr_equal(addr, ar->debug.pktlog_peer_addr)); 89 - } 90 - 91 - static inline int ath11k_debug_is_extd_tx_stats_enabled(struct ath11k *ar) 92 - { 93 - return ar->debug.extd_tx_stats; 94 - } 95 - 96 - static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar) 97 - { 98 - return ar->debug.extd_rx_stats; 99 - } 100 - 101 - static inline int ath11k_debug_rx_filter(struct ath11k *ar) 102 - { 103 - return ar->debug.rx_filter; 104 - } 105 - 106 - void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 107 - struct ieee80211_sta *sta, struct dentry *dir); 108 - void 109 - ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta, 110 - struct ath11k_per_peer_tx_stats *peer_stats, 111 - u8 legacy_rate_idx); 112 - void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar, 113 - struct sk_buff *msdu, 114 - struct hal_tx_status *ts); 115 - #else 116 - static inline int ath11k_debug_soc_create(struct ath11k_base *ab) 117 - { 118 - return 0; 119 - } 120 - 121 - static inline void ath11k_debug_soc_destroy(struct ath11k_base *ab) 122 - { 123 - } 124 - 125 - static inline int ath11k_debug_pdev_create(struct ath11k_base *ab) 126 - { 127 - return 0; 128 - } 129 - 130 - static inline void ath11k_debug_pdev_destroy(struct ath11k_base *ab) 131 - { 132 - } 133 - 134 - static inline int ath11k_debug_register(struct ath11k *ar) 135 - { 136 - return 0; 137 - } 138 - 139 - static inline void ath11k_debug_unregister(struct ath11k *ar) 140 - { 141 - } 142 - 143 - static inline void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab, 144 - struct sk_buff *skb) 145 - { 146 - } 147 - 148 - static inline void ath11k_debug_fw_stats_process(struct ath11k_base *ab, 149 - struct sk_buff *skb) 150 - { 151 - } 152 - 153 - static inline void ath11k_debug_fw_stats_init(struct ath11k *ar) 154 - { 155 - } 156 - 157 - static inline int ath11k_debug_is_extd_tx_stats_enabled(struct ath11k *ar) 158 - { 159 - return 0; 160 - } 161 - 162 - static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar) 163 - { 164 - return 0; 165 - } 166 - 167 - static inline int ath11k_dbg_htt_stats_req(struct ath11k *ar) 168 - { 169 - return 0; 170 - } 171 - 172 - static inline bool ath11k_debug_is_pktlog_lite_mode_enabled(struct ath11k *ar) 173 - { 174 - return false; 175 - } 176 - 177 - static inline bool ath11k_debug_is_pktlog_rx_stats_enabled(struct ath11k *ar) 178 - { 179 - return false; 180 - } 181 - 182 - static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr) 183 - { 184 - return false; 185 - } 186 - 187 - static inline int ath11k_debug_rx_filter(struct ath11k *ar) 188 - { 189 - return 0; 190 - } 191 - 192 - static inline void 193 - ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta, 194 - struct ath11k_per_peer_tx_stats *peer_stats, 195 - u8 legacy_rate_idx) 196 - { 197 - } 198 - 199 - static inline void 200 - ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar, 201 - struct sk_buff *msdu, 202 - struct hal_tx_status *ts) 203 - { 204 - } 205 - 206 - #endif /* CONFIG_MAC80211_DEBUGFS*/ 207 155 208 156 #define ath11k_dbg(ar, dbg_mask, fmt, ...) \ 209 157 do { \
+6 -6
drivers/net/wireless/ath/ath11k/debug_htt_stats.c drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
··· 8 8 #include "dp_tx.h" 9 9 #include "dp_rx.h" 10 10 #include "debug.h" 11 - #include "debug_htt_stats.h" 11 + #include "debugfs_htt_stats.h" 12 12 13 13 #define HTT_DBG_OUT(buf, len, fmt, ...) \ 14 14 scnprintf(buf, len, fmt "\n", ##__VA_ARGS__) ··· 4253 4253 return 0; 4254 4254 } 4255 4255 4256 - void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab, 4257 - struct sk_buff *skb) 4256 + void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab, 4257 + struct sk_buff *skb) 4258 4258 { 4259 4259 struct ath11k_htt_extd_stats_msg *msg; 4260 4260 struct debug_htt_stats_req *stats_req; ··· 4402 4402 return 0; 4403 4403 } 4404 4404 4405 - int ath11k_dbg_htt_stats_req(struct ath11k *ar) 4405 + int ath11k_debugfs_htt_stats_req(struct ath11k *ar) 4406 4406 { 4407 4407 struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req; 4408 4408 u8 type = stats_req->type; ··· 4476 4476 ar->debug.htt_stats.stats_req = stats_req; 4477 4477 stats_req->type = type; 4478 4478 4479 - ret = ath11k_dbg_htt_stats_req(ar); 4479 + ret = ath11k_debugfs_htt_stats_req(ar); 4480 4480 if (ret < 0) 4481 4481 goto out; 4482 4482 ··· 4586 4586 .llseek = default_llseek, 4587 4587 }; 4588 4588 4589 - void ath11k_debug_htt_stats_init(struct ath11k *ar) 4589 + void ath11k_debugfs_htt_stats_init(struct ath11k *ar) 4590 4590 { 4591 4591 spin_lock_init(&ar->debug.htt_stats.lock); 4592 4592 debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev,
+25 -2
drivers/net/wireless/ath/ath11k/debug_htt_stats.h drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
··· 1660 1660 u32 num_obss_tx_ppdu_failure; 1661 1661 }; 1662 1662 1663 - void ath11k_debug_htt_stats_init(struct ath11k *ar); 1664 - 1665 1663 struct htt_ring_backpressure_stats_tlv { 1666 1664 u32 pdev_id; 1667 1665 u32 current_head_idx; ··· 1684 1686 */ 1685 1687 u32 backpressure_hist[5]; 1686 1688 }; 1689 + 1690 + #ifdef CONFIG_ATH11K_DEBUGFS 1691 + 1692 + void ath11k_debugfs_htt_stats_init(struct ath11k *ar); 1693 + void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab, 1694 + struct sk_buff *skb); 1695 + int ath11k_debugfs_htt_stats_req(struct ath11k *ar); 1696 + 1697 + #else /* CONFIG_ATH11K_DEBUGFS */ 1698 + 1699 + static inline void ath11k_debugfs_htt_stats_init(struct ath11k *ar) 1700 + { 1701 + } 1702 + 1703 + static inline void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab, 1704 + struct sk_buff *skb) 1705 + { 1706 + } 1707 + 1708 + static inline int ath11k_debugfs_htt_stats_req(struct ath11k *ar) 1709 + { 1710 + return 0; 1711 + } 1712 + 1713 + #endif /* CONFIG_ATH11K_DEBUGFS */ 1687 1714 1688 1715 #endif
+1112
drivers/net/wireless/ath/ath11k/debugfs.c
··· 1 + // SPDX-License-Identifier: BSD-3-Clause-Clear 2 + /* 3 + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. 4 + */ 5 + 6 + #include "debugfs.h" 7 + 8 + #include "core.h" 9 + #include "debug.h" 10 + #include "wmi.h" 11 + #include "hal_rx.h" 12 + #include "dp_tx.h" 13 + #include "debugfs_htt_stats.h" 14 + #include "peer.h" 15 + 16 + static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = { 17 + "REO2SW1_RING", 18 + "REO2SW2_RING", 19 + "REO2SW3_RING", 20 + "REO2SW4_RING", 21 + "WBM2REO_LINK_RING", 22 + "REO2TCL_RING", 23 + "REO2FW_RING", 24 + "RELEASE_RING", 25 + "PPE_RELEASE_RING", 26 + "TCL2TQM_RING", 27 + "TQM_RELEASE_RING", 28 + "REO_RELEASE_RING", 29 + "WBM2SW0_RELEASE_RING", 30 + "WBM2SW1_RELEASE_RING", 31 + "WBM2SW2_RELEASE_RING", 32 + "WBM2SW3_RELEASE_RING", 33 + "REO_CMD_RING", 34 + "REO_STATUS_RING", 35 + }; 36 + 37 + static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = { 38 + "FW2RXDMA_BUF_RING", 39 + "FW2RXDMA_STATUS_RING", 40 + "FW2RXDMA_LINK_RING", 41 + "SW2RXDMA_BUF_RING", 42 + "WBM2RXDMA_LINK_RING", 43 + "RXDMA2FW_RING", 44 + "RXDMA2SW_RING", 45 + "RXDMA2RELEASE_RING", 46 + "RXDMA2REO_RING", 47 + "MONITOR_STATUS_RING", 48 + "MONITOR_BUF_RING", 49 + "MONITOR_DESC_RING", 50 + "MONITOR_DEST_RING", 51 + }; 52 + 53 + static void ath11k_fw_stats_pdevs_free(struct list_head *head) 54 + { 55 + struct ath11k_fw_stats_pdev *i, *tmp; 56 + 57 + list_for_each_entry_safe(i, tmp, head, list) { 58 + list_del(&i->list); 59 + kfree(i); 60 + } 61 + } 62 + 63 + static void ath11k_fw_stats_vdevs_free(struct list_head *head) 64 + { 65 + struct ath11k_fw_stats_vdev *i, *tmp; 66 + 67 + list_for_each_entry_safe(i, tmp, head, list) { 68 + list_del(&i->list); 69 + kfree(i); 70 + } 71 + } 72 + 73 + static void ath11k_fw_stats_bcn_free(struct list_head *head) 74 + { 75 + struct ath11k_fw_stats_bcn *i, *tmp; 76 + 77 + list_for_each_entry_safe(i, tmp, head, list) { 78 + list_del(&i->list); 79 + kfree(i); 80 + } 81 + } 82 + 83 + static void ath11k_debugfs_fw_stats_reset(struct ath11k *ar) 84 + { 85 + spin_lock_bh(&ar->data_lock); 86 + ar->debug.fw_stats_done = false; 87 + ath11k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); 88 + ath11k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); 89 + spin_unlock_bh(&ar->data_lock); 90 + } 91 + 92 + void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb) 93 + { 94 + struct ath11k_fw_stats stats = {}; 95 + struct ath11k *ar; 96 + struct ath11k_pdev *pdev; 97 + bool is_end; 98 + static unsigned int num_vdev, num_bcn; 99 + size_t total_vdevs_started = 0; 100 + int i, ret; 101 + 102 + INIT_LIST_HEAD(&stats.pdevs); 103 + INIT_LIST_HEAD(&stats.vdevs); 104 + INIT_LIST_HEAD(&stats.bcn); 105 + 106 + ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats); 107 + if (ret) { 108 + ath11k_warn(ab, "failed to pull fw stats: %d\n", ret); 109 + goto free; 110 + } 111 + 112 + rcu_read_lock(); 113 + ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id); 114 + if (!ar) { 115 + rcu_read_unlock(); 116 + ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n", 117 + stats.pdev_id, ret); 118 + goto free; 119 + } 120 + 121 + spin_lock_bh(&ar->data_lock); 122 + 123 + if (stats.stats_id == WMI_REQUEST_PDEV_STAT) { 124 + list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs); 125 + ar->debug.fw_stats_done = true; 126 + goto complete; 127 + } 128 + 129 + if (stats.stats_id == WMI_REQUEST_VDEV_STAT) { 130 + if (list_empty(&stats.vdevs)) { 131 + ath11k_warn(ab, "empty vdev stats"); 132 + goto complete; 133 + } 134 + /* FW sends all the active VDEV stats irrespective of PDEV, 135 + * hence limit until the count of all VDEVs started 136 + */ 137 + for (i = 0; i < ab->num_radios; i++) { 138 + pdev = rcu_dereference(ab->pdevs_active[i]); 139 + if (pdev && pdev->ar) 140 + total_vdevs_started += ar->num_started_vdevs; 141 + } 142 + 143 + is_end = ((++num_vdev) == total_vdevs_started); 144 + 145 + list_splice_tail_init(&stats.vdevs, 146 + &ar->debug.fw_stats.vdevs); 147 + 148 + if (is_end) { 149 + ar->debug.fw_stats_done = true; 150 + num_vdev = 0; 151 + } 152 + goto complete; 153 + } 154 + 155 + if (stats.stats_id == WMI_REQUEST_BCN_STAT) { 156 + if (list_empty(&stats.bcn)) { 157 + ath11k_warn(ab, "empty bcn stats"); 158 + goto complete; 159 + } 160 + /* Mark end until we reached the count of all started VDEVs 161 + * within the PDEV 162 + */ 163 + is_end = ((++num_bcn) == ar->num_started_vdevs); 164 + 165 + list_splice_tail_init(&stats.bcn, 166 + &ar->debug.fw_stats.bcn); 167 + 168 + if (is_end) { 169 + ar->debug.fw_stats_done = true; 170 + num_bcn = 0; 171 + } 172 + } 173 + complete: 174 + complete(&ar->debug.fw_stats_complete); 175 + rcu_read_unlock(); 176 + spin_unlock_bh(&ar->data_lock); 177 + 178 + free: 179 + ath11k_fw_stats_pdevs_free(&stats.pdevs); 180 + ath11k_fw_stats_vdevs_free(&stats.vdevs); 181 + ath11k_fw_stats_bcn_free(&stats.bcn); 182 + } 183 + 184 + static int ath11k_debugfs_fw_stats_request(struct ath11k *ar, 185 + struct stats_request_params *req_param) 186 + { 187 + struct ath11k_base *ab = ar->ab; 188 + unsigned long timeout, time_left; 189 + int ret; 190 + 191 + lockdep_assert_held(&ar->conf_mutex); 192 + 193 + /* FW stats can get split when exceeding the stats data buffer limit. 194 + * In that case, since there is no end marking for the back-to-back 195 + * received 'update stats' event, we keep a 3 seconds timeout in case, 196 + * fw_stats_done is not marked yet 197 + */ 198 + timeout = jiffies + msecs_to_jiffies(3 * HZ); 199 + 200 + ath11k_debugfs_fw_stats_reset(ar); 201 + 202 + reinit_completion(&ar->debug.fw_stats_complete); 203 + 204 + ret = ath11k_wmi_send_stats_request_cmd(ar, req_param); 205 + 206 + if (ret) { 207 + ath11k_warn(ab, "could not request fw stats (%d)\n", 208 + ret); 209 + return ret; 210 + } 211 + 212 + time_left = 213 + wait_for_completion_timeout(&ar->debug.fw_stats_complete, 214 + 1 * HZ); 215 + if (!time_left) 216 + return -ETIMEDOUT; 217 + 218 + for (;;) { 219 + if (time_after(jiffies, timeout)) 220 + break; 221 + 222 + spin_lock_bh(&ar->data_lock); 223 + if (ar->debug.fw_stats_done) { 224 + spin_unlock_bh(&ar->data_lock); 225 + break; 226 + } 227 + spin_unlock_bh(&ar->data_lock); 228 + } 229 + return 0; 230 + } 231 + 232 + static int ath11k_open_pdev_stats(struct inode *inode, struct file *file) 233 + { 234 + struct ath11k *ar = inode->i_private; 235 + struct ath11k_base *ab = ar->ab; 236 + struct stats_request_params req_param; 237 + void *buf = NULL; 238 + int ret; 239 + 240 + mutex_lock(&ar->conf_mutex); 241 + 242 + if (ar->state != ATH11K_STATE_ON) { 243 + ret = -ENETDOWN; 244 + goto err_unlock; 245 + } 246 + 247 + buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE); 248 + if (!buf) { 249 + ret = -ENOMEM; 250 + goto err_unlock; 251 + } 252 + 253 + req_param.pdev_id = ar->pdev->pdev_id; 254 + req_param.vdev_id = 0; 255 + req_param.stats_id = WMI_REQUEST_PDEV_STAT; 256 + 257 + ret = ath11k_debugfs_fw_stats_request(ar, &req_param); 258 + if (ret) { 259 + ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret); 260 + goto err_free; 261 + } 262 + 263 + ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id, 264 + buf); 265 + 266 + file->private_data = buf; 267 + 268 + mutex_unlock(&ar->conf_mutex); 269 + return 0; 270 + 271 + err_free: 272 + vfree(buf); 273 + 274 + err_unlock: 275 + mutex_unlock(&ar->conf_mutex); 276 + return ret; 277 + } 278 + 279 + static int ath11k_release_pdev_stats(struct inode *inode, struct file *file) 280 + { 281 + vfree(file->private_data); 282 + 283 + return 0; 284 + } 285 + 286 + static ssize_t ath11k_read_pdev_stats(struct file *file, 287 + char __user *user_buf, 288 + size_t count, loff_t *ppos) 289 + { 290 + const char *buf = file->private_data; 291 + size_t len = strlen(buf); 292 + 293 + return simple_read_from_buffer(user_buf, count, ppos, buf, len); 294 + } 295 + 296 + static const struct file_operations fops_pdev_stats = { 297 + .open = ath11k_open_pdev_stats, 298 + .release = ath11k_release_pdev_stats, 299 + .read = ath11k_read_pdev_stats, 300 + .owner = THIS_MODULE, 301 + .llseek = default_llseek, 302 + }; 303 + 304 + static int ath11k_open_vdev_stats(struct inode *inode, struct file *file) 305 + { 306 + struct ath11k *ar = inode->i_private; 307 + struct stats_request_params req_param; 308 + void *buf = NULL; 309 + int ret; 310 + 311 + mutex_lock(&ar->conf_mutex); 312 + 313 + if (ar->state != ATH11K_STATE_ON) { 314 + ret = -ENETDOWN; 315 + goto err_unlock; 316 + } 317 + 318 + buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE); 319 + if (!buf) { 320 + ret = -ENOMEM; 321 + goto err_unlock; 322 + } 323 + 324 + req_param.pdev_id = ar->pdev->pdev_id; 325 + /* VDEV stats is always sent for all active VDEVs from FW */ 326 + req_param.vdev_id = 0; 327 + req_param.stats_id = WMI_REQUEST_VDEV_STAT; 328 + 329 + ret = ath11k_debugfs_fw_stats_request(ar, &req_param); 330 + if (ret) { 331 + ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret); 332 + goto err_free; 333 + } 334 + 335 + ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id, 336 + buf); 337 + 338 + file->private_data = buf; 339 + 340 + mutex_unlock(&ar->conf_mutex); 341 + return 0; 342 + 343 + err_free: 344 + vfree(buf); 345 + 346 + err_unlock: 347 + mutex_unlock(&ar->conf_mutex); 348 + return ret; 349 + } 350 + 351 + static int ath11k_release_vdev_stats(struct inode *inode, struct file *file) 352 + { 353 + vfree(file->private_data); 354 + 355 + return 0; 356 + } 357 + 358 + static ssize_t ath11k_read_vdev_stats(struct file *file, 359 + char __user *user_buf, 360 + size_t count, loff_t *ppos) 361 + { 362 + const char *buf = file->private_data; 363 + size_t len = strlen(buf); 364 + 365 + return simple_read_from_buffer(user_buf, count, ppos, buf, len); 366 + } 367 + 368 + static const struct file_operations fops_vdev_stats = { 369 + .open = ath11k_open_vdev_stats, 370 + .release = ath11k_release_vdev_stats, 371 + .read = ath11k_read_vdev_stats, 372 + .owner = THIS_MODULE, 373 + .llseek = default_llseek, 374 + }; 375 + 376 + static int ath11k_open_bcn_stats(struct inode *inode, struct file *file) 377 + { 378 + struct ath11k *ar = inode->i_private; 379 + struct ath11k_vif *arvif; 380 + struct stats_request_params req_param; 381 + void *buf = NULL; 382 + int ret; 383 + 384 + mutex_lock(&ar->conf_mutex); 385 + 386 + if (ar->state != ATH11K_STATE_ON) { 387 + ret = -ENETDOWN; 388 + goto err_unlock; 389 + } 390 + 391 + buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE); 392 + if (!buf) { 393 + ret = -ENOMEM; 394 + goto err_unlock; 395 + } 396 + 397 + req_param.stats_id = WMI_REQUEST_BCN_STAT; 398 + req_param.pdev_id = ar->pdev->pdev_id; 399 + 400 + /* loop all active VDEVs for bcn stats */ 401 + list_for_each_entry(arvif, &ar->arvifs, list) { 402 + if (!arvif->is_up) 403 + continue; 404 + 405 + req_param.vdev_id = arvif->vdev_id; 406 + ret = ath11k_debugfs_fw_stats_request(ar, &req_param); 407 + if (ret) { 408 + ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret); 409 + goto err_free; 410 + } 411 + } 412 + 413 + ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id, 414 + buf); 415 + 416 + /* since beacon stats request is looped for all active VDEVs, saved fw 417 + * stats is not freed for each request until done for all active VDEVs 418 + */ 419 + spin_lock_bh(&ar->data_lock); 420 + ath11k_fw_stats_bcn_free(&ar->debug.fw_stats.bcn); 421 + spin_unlock_bh(&ar->data_lock); 422 + 423 + file->private_data = buf; 424 + 425 + mutex_unlock(&ar->conf_mutex); 426 + return 0; 427 + 428 + err_free: 429 + vfree(buf); 430 + 431 + err_unlock: 432 + mutex_unlock(&ar->conf_mutex); 433 + return ret; 434 + } 435 + 436 + static int ath11k_release_bcn_stats(struct inode *inode, struct file *file) 437 + { 438 + vfree(file->private_data); 439 + 440 + return 0; 441 + } 442 + 443 + static ssize_t ath11k_read_bcn_stats(struct file *file, 444 + char __user *user_buf, 445 + size_t count, loff_t *ppos) 446 + { 447 + const char *buf = file->private_data; 448 + size_t len = strlen(buf); 449 + 450 + return simple_read_from_buffer(user_buf, count, ppos, buf, len); 451 + } 452 + 453 + static const struct file_operations fops_bcn_stats = { 454 + .open = ath11k_open_bcn_stats, 455 + .release = ath11k_release_bcn_stats, 456 + .read = ath11k_read_bcn_stats, 457 + .owner = THIS_MODULE, 458 + .llseek = default_llseek, 459 + }; 460 + 461 + static ssize_t ath11k_read_simulate_fw_crash(struct file *file, 462 + char __user *user_buf, 463 + size_t count, loff_t *ppos) 464 + { 465 + const char buf[] = 466 + "To simulate firmware crash write one of the keywords to this file:\n" 467 + "`assert` - this will send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n" 468 + "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n"; 469 + 470 + return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); 471 + } 472 + 473 + /* Simulate firmware crash: 474 + * 'soft': Call wmi command causing firmware hang. This firmware hang is 475 + * recoverable by warm firmware reset. 476 + * 'hard': Force firmware crash by setting any vdev parameter for not allowed 477 + * vdev id. This is hard firmware crash because it is recoverable only by cold 478 + * firmware reset. 479 + */ 480 + static ssize_t ath11k_write_simulate_fw_crash(struct file *file, 481 + const char __user *user_buf, 482 + size_t count, loff_t *ppos) 483 + { 484 + struct ath11k_base *ab = file->private_data; 485 + struct ath11k_pdev *pdev; 486 + struct ath11k *ar = ab->pdevs[0].ar; 487 + char buf[32] = {0}; 488 + ssize_t rc; 489 + int i, ret, radioup = 0; 490 + 491 + for (i = 0; i < ab->num_radios; i++) { 492 + pdev = &ab->pdevs[i]; 493 + ar = pdev->ar; 494 + if (ar && ar->state == ATH11K_STATE_ON) { 495 + radioup = 1; 496 + break; 497 + } 498 + } 499 + /* filter partial writes and invalid commands */ 500 + if (*ppos != 0 || count >= sizeof(buf) || count == 0) 501 + return -EINVAL; 502 + 503 + rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); 504 + if (rc < 0) 505 + return rc; 506 + 507 + /* drop the possible '\n' from the end */ 508 + if (buf[*ppos - 1] == '\n') 509 + buf[*ppos - 1] = '\0'; 510 + 511 + if (radioup == 0) { 512 + ret = -ENETDOWN; 513 + goto exit; 514 + } 515 + 516 + if (!strcmp(buf, "assert")) { 517 + ath11k_info(ab, "simulating firmware assert crash\n"); 518 + ret = ath11k_wmi_force_fw_hang_cmd(ar, 519 + ATH11K_WMI_FW_HANG_ASSERT_TYPE, 520 + ATH11K_WMI_FW_HANG_DELAY); 521 + } else { 522 + ret = -EINVAL; 523 + goto exit; 524 + } 525 + 526 + if (ret) { 527 + ath11k_warn(ab, "failed to simulate firmware crash: %d\n", ret); 528 + goto exit; 529 + } 530 + 531 + ret = count; 532 + 533 + exit: 534 + return ret; 535 + } 536 + 537 + static const struct file_operations fops_simulate_fw_crash = { 538 + .read = ath11k_read_simulate_fw_crash, 539 + .write = ath11k_write_simulate_fw_crash, 540 + .open = simple_open, 541 + .owner = THIS_MODULE, 542 + .llseek = default_llseek, 543 + }; 544 + 545 + static ssize_t ath11k_write_enable_extd_tx_stats(struct file *file, 546 + const char __user *ubuf, 547 + size_t count, loff_t *ppos) 548 + { 549 + struct ath11k *ar = file->private_data; 550 + u32 filter; 551 + int ret; 552 + 553 + if (kstrtouint_from_user(ubuf, count, 0, &filter)) 554 + return -EINVAL; 555 + 556 + mutex_lock(&ar->conf_mutex); 557 + 558 + if (ar->state != ATH11K_STATE_ON) { 559 + ret = -ENETDOWN; 560 + goto out; 561 + } 562 + 563 + if (filter == ar->debug.extd_tx_stats) { 564 + ret = count; 565 + goto out; 566 + } 567 + 568 + ar->debug.extd_tx_stats = filter; 569 + ret = count; 570 + 571 + out: 572 + mutex_unlock(&ar->conf_mutex); 573 + return ret; 574 + } 575 + 576 + static ssize_t ath11k_read_enable_extd_tx_stats(struct file *file, 577 + char __user *ubuf, 578 + size_t count, loff_t *ppos) 579 + 580 + { 581 + char buf[32] = {0}; 582 + struct ath11k *ar = file->private_data; 583 + int len = 0; 584 + 585 + mutex_lock(&ar->conf_mutex); 586 + len = scnprintf(buf, sizeof(buf) - len, "%08x\n", 587 + ar->debug.extd_tx_stats); 588 + mutex_unlock(&ar->conf_mutex); 589 + 590 + return simple_read_from_buffer(ubuf, count, ppos, buf, len); 591 + } 592 + 593 + static const struct file_operations fops_extd_tx_stats = { 594 + .read = ath11k_read_enable_extd_tx_stats, 595 + .write = ath11k_write_enable_extd_tx_stats, 596 + .open = simple_open 597 + }; 598 + 599 + static ssize_t ath11k_write_extd_rx_stats(struct file *file, 600 + const char __user *ubuf, 601 + size_t count, loff_t *ppos) 602 + { 603 + struct ath11k *ar = file->private_data; 604 + struct ath11k_base *ab = ar->ab; 605 + struct htt_rx_ring_tlv_filter tlv_filter = {0}; 606 + u32 enable, rx_filter = 0, ring_id; 607 + int i; 608 + int ret; 609 + 610 + if (kstrtouint_from_user(ubuf, count, 0, &enable)) 611 + return -EINVAL; 612 + 613 + mutex_lock(&ar->conf_mutex); 614 + 615 + if (ar->state != ATH11K_STATE_ON) { 616 + ret = -ENETDOWN; 617 + goto exit; 618 + } 619 + 620 + if (enable > 1) { 621 + ret = -EINVAL; 622 + goto exit; 623 + } 624 + 625 + if (enable == ar->debug.extd_rx_stats) { 626 + ret = count; 627 + goto exit; 628 + } 629 + 630 + if (enable) { 631 + rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START; 632 + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START; 633 + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END; 634 + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS; 635 + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT; 636 + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE; 637 + 638 + tlv_filter.rx_filter = rx_filter; 639 + tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0; 640 + tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1; 641 + tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2; 642 + tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 | 643 + HTT_RX_FP_DATA_FILTER_FLASG3; 644 + } else { 645 + tlv_filter = ath11k_mac_mon_status_filter_default; 646 + } 647 + 648 + ar->debug.rx_filter = tlv_filter.rx_filter; 649 + 650 + for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 651 + ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 652 + ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id, 653 + HAL_RXDMA_MONITOR_STATUS, 654 + DP_RX_BUFFER_SIZE, &tlv_filter); 655 + 656 + if (ret) { 657 + ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n"); 658 + goto exit; 659 + } 660 + } 661 + 662 + ar->debug.extd_rx_stats = enable; 663 + ret = count; 664 + exit: 665 + mutex_unlock(&ar->conf_mutex); 666 + return ret; 667 + } 668 + 669 + static ssize_t ath11k_read_extd_rx_stats(struct file *file, 670 + char __user *ubuf, 671 + size_t count, loff_t *ppos) 672 + { 673 + struct ath11k *ar = file->private_data; 674 + char buf[32]; 675 + int len = 0; 676 + 677 + mutex_lock(&ar->conf_mutex); 678 + len = scnprintf(buf, sizeof(buf) - len, "%d\n", 679 + ar->debug.extd_rx_stats); 680 + mutex_unlock(&ar->conf_mutex); 681 + 682 + return simple_read_from_buffer(ubuf, count, ppos, buf, len); 683 + } 684 + 685 + static const struct file_operations fops_extd_rx_stats = { 686 + .read = ath11k_read_extd_rx_stats, 687 + .write = ath11k_write_extd_rx_stats, 688 + .open = simple_open, 689 + }; 690 + 691 + static int ath11k_fill_bp_stats(struct ath11k_base *ab, 692 + struct ath11k_bp_stats *bp_stats, 693 + char *buf, int len, int size) 694 + { 695 + lockdep_assert_held(&ab->base_lock); 696 + 697 + len += scnprintf(buf + len, size - len, "count: %u\n", 698 + bp_stats->count); 699 + len += scnprintf(buf + len, size - len, "hp: %u\n", 700 + bp_stats->hp); 701 + len += scnprintf(buf + len, size - len, "tp: %u\n", 702 + bp_stats->tp); 703 + len += scnprintf(buf + len, size - len, "seen before: %ums\n\n", 704 + jiffies_to_msecs(jiffies - bp_stats->jiffies)); 705 + return len; 706 + } 707 + 708 + static ssize_t ath11k_debugfs_dump_soc_ring_bp_stats(struct ath11k_base *ab, 709 + char *buf, int size) 710 + { 711 + struct ath11k_bp_stats *bp_stats; 712 + bool stats_rxd = false; 713 + u8 i, pdev_idx; 714 + int len = 0; 715 + 716 + len += scnprintf(buf + len, size - len, "\nBackpressure Stats\n"); 717 + len += scnprintf(buf + len, size - len, "==================\n"); 718 + 719 + spin_lock_bh(&ab->base_lock); 720 + for (i = 0; i < HTT_SW_UMAC_RING_IDX_MAX; i++) { 721 + bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[i]; 722 + 723 + if (!bp_stats->count) 724 + continue; 725 + 726 + len += scnprintf(buf + len, size - len, "Ring: %s\n", 727 + htt_bp_umac_ring[i]); 728 + len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size); 729 + stats_rxd = true; 730 + } 731 + 732 + for (i = 0; i < HTT_SW_LMAC_RING_IDX_MAX; i++) { 733 + for (pdev_idx = 0; pdev_idx < MAX_RADIOS; pdev_idx++) { 734 + bp_stats = 735 + &ab->soc_stats.bp_stats.lmac_ring_bp_stats[i][pdev_idx]; 736 + 737 + if (!bp_stats->count) 738 + continue; 739 + 740 + len += scnprintf(buf + len, size - len, "Ring: %s\n", 741 + htt_bp_lmac_ring[i]); 742 + len += scnprintf(buf + len, size - len, "pdev: %d\n", 743 + pdev_idx); 744 + len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size); 745 + stats_rxd = true; 746 + } 747 + } 748 + spin_unlock_bh(&ab->base_lock); 749 + 750 + if (!stats_rxd) 751 + len += scnprintf(buf + len, size - len, 752 + "No Ring Backpressure stats received\n\n"); 753 + 754 + return len; 755 + } 756 + 757 + static ssize_t ath11k_debugfs_dump_soc_dp_stats(struct file *file, 758 + char __user *user_buf, 759 + size_t count, loff_t *ppos) 760 + { 761 + struct ath11k_base *ab = file->private_data; 762 + struct ath11k_soc_dp_stats *soc_stats = &ab->soc_stats; 763 + int len = 0, i, retval; 764 + const int size = 4096; 765 + static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = { 766 + "Overflow", "MPDU len", "FCS", "Decrypt", "TKIP MIC", 767 + "Unencrypt", "MSDU len", "MSDU limit", "WiFi parse", 768 + "AMSDU parse", "SA timeout", "DA timeout", 769 + "Flow timeout", "Flush req"}; 770 + static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = { 771 + "Desc addr zero", "Desc inval", "AMPDU in non BA", 772 + "Non BA dup", "BA dup", "Frame 2k jump", "BAR 2k jump", 773 + "Frame OOR", "BAR OOR", "No BA session", 774 + "Frame SN equal SSN", "PN check fail", "2k err", 775 + "PN err", "Desc blocked"}; 776 + 777 + char *buf; 778 + 779 + buf = kzalloc(size, GFP_KERNEL); 780 + if (!buf) 781 + return -ENOMEM; 782 + 783 + len += scnprintf(buf + len, size - len, "SOC RX STATS:\n\n"); 784 + len += scnprintf(buf + len, size - len, "err ring pkts: %u\n", 785 + soc_stats->err_ring_pkts); 786 + len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n", 787 + soc_stats->invalid_rbm); 788 + len += scnprintf(buf + len, size - len, "RXDMA errors:\n"); 789 + for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++) 790 + len += scnprintf(buf + len, size - len, "%s: %u\n", 791 + rxdma_err[i], soc_stats->rxdma_error[i]); 792 + 793 + len += scnprintf(buf + len, size - len, "\nREO errors:\n"); 794 + for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++) 795 + len += scnprintf(buf + len, size - len, "%s: %u\n", 796 + reo_err[i], soc_stats->reo_error[i]); 797 + 798 + len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n"); 799 + len += scnprintf(buf + len, size - len, 800 + "ring0: %u\nring1: %u\nring2: %u\nring3: %u\n", 801 + soc_stats->hal_reo_error[0], 802 + soc_stats->hal_reo_error[1], 803 + soc_stats->hal_reo_error[2], 804 + soc_stats->hal_reo_error[3]); 805 + 806 + len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n"); 807 + len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n"); 808 + 809 + for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) 810 + len += scnprintf(buf + len, size - len, "ring%d: %u\n", 811 + i, soc_stats->tx_err.desc_na[i]); 812 + 813 + len += scnprintf(buf + len, size - len, 814 + "\nMisc Transmit Failures: %d\n", 815 + atomic_read(&soc_stats->tx_err.misc_fail)); 816 + 817 + len += ath11k_debugfs_dump_soc_ring_bp_stats(ab, buf + len, size - len); 818 + 819 + if (len > size) 820 + len = size; 821 + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); 822 + kfree(buf); 823 + 824 + return retval; 825 + } 826 + 827 + static const struct file_operations fops_soc_dp_stats = { 828 + .read = ath11k_debugfs_dump_soc_dp_stats, 829 + .open = simple_open, 830 + .owner = THIS_MODULE, 831 + .llseek = default_llseek, 832 + }; 833 + 834 + int ath11k_debugfs_pdev_create(struct ath11k_base *ab) 835 + { 836 + if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) 837 + return 0; 838 + 839 + ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k); 840 + 841 + if (IS_ERR_OR_NULL(ab->debugfs_soc)) { 842 + if (IS_ERR(ab->debugfs_soc)) 843 + return PTR_ERR(ab->debugfs_soc); 844 + return -ENOMEM; 845 + } 846 + 847 + debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab, 848 + &fops_simulate_fw_crash); 849 + 850 + debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab, 851 + &fops_soc_dp_stats); 852 + 853 + return 0; 854 + } 855 + 856 + void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab) 857 + { 858 + debugfs_remove_recursive(ab->debugfs_ath11k); 859 + ab->debugfs_ath11k = NULL; 860 + } 861 + 862 + int ath11k_debugfs_soc_create(struct ath11k_base *ab) 863 + { 864 + ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL); 865 + 866 + if (IS_ERR_OR_NULL(ab->debugfs_ath11k)) { 867 + if (IS_ERR(ab->debugfs_ath11k)) 868 + return PTR_ERR(ab->debugfs_ath11k); 869 + return -ENOMEM; 870 + } 871 + 872 + return 0; 873 + } 874 + 875 + void ath11k_debugfs_soc_destroy(struct ath11k_base *ab) 876 + { 877 + debugfs_remove_recursive(ab->debugfs_soc); 878 + ab->debugfs_soc = NULL; 879 + } 880 + 881 + void ath11k_debugfs_fw_stats_init(struct ath11k *ar) 882 + { 883 + struct dentry *fwstats_dir = debugfs_create_dir("fw_stats", 884 + ar->debug.debugfs_pdev); 885 + 886 + ar->debug.fw_stats.debugfs_fwstats = fwstats_dir; 887 + 888 + /* all stats debugfs files created are under "fw_stats" directory 889 + * created per PDEV 890 + */ 891 + debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar, 892 + &fops_pdev_stats); 893 + debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar, 894 + &fops_vdev_stats); 895 + debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar, 896 + &fops_bcn_stats); 897 + 898 + INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs); 899 + INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs); 900 + INIT_LIST_HEAD(&ar->debug.fw_stats.bcn); 901 + 902 + init_completion(&ar->debug.fw_stats_complete); 903 + } 904 + 905 + static ssize_t ath11k_write_pktlog_filter(struct file *file, 906 + const char __user *ubuf, 907 + size_t count, loff_t *ppos) 908 + { 909 + struct ath11k *ar = file->private_data; 910 + struct ath11k_base *ab = ar->ab; 911 + struct htt_rx_ring_tlv_filter tlv_filter = {0}; 912 + u32 rx_filter = 0, ring_id, filter, mode; 913 + u8 buf[128] = {0}; 914 + int i, ret; 915 + ssize_t rc; 916 + 917 + mutex_lock(&ar->conf_mutex); 918 + if (ar->state != ATH11K_STATE_ON) { 919 + ret = -ENETDOWN; 920 + goto out; 921 + } 922 + 923 + rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count); 924 + if (rc < 0) { 925 + ret = rc; 926 + goto out; 927 + } 928 + buf[rc] = '\0'; 929 + 930 + ret = sscanf(buf, "0x%x %u", &filter, &mode); 931 + if (ret != 2) { 932 + ret = -EINVAL; 933 + goto out; 934 + } 935 + 936 + if (filter) { 937 + ret = ath11k_wmi_pdev_pktlog_enable(ar, filter); 938 + if (ret) { 939 + ath11k_warn(ar->ab, 940 + "failed to enable pktlog filter %x: %d\n", 941 + ar->debug.pktlog_filter, ret); 942 + goto out; 943 + } 944 + } else { 945 + ret = ath11k_wmi_pdev_pktlog_disable(ar); 946 + if (ret) { 947 + ath11k_warn(ar->ab, "failed to disable pktlog: %d\n", ret); 948 + goto out; 949 + } 950 + } 951 + 952 + #define HTT_RX_FILTER_TLV_LITE_MODE \ 953 + (HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \ 954 + HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \ 955 + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \ 956 + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \ 957 + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \ 958 + HTT_RX_FILTER_TLV_FLAGS_MPDU_START) 959 + 960 + if (mode == ATH11K_PKTLOG_MODE_FULL) { 961 + rx_filter = HTT_RX_FILTER_TLV_LITE_MODE | 962 + HTT_RX_FILTER_TLV_FLAGS_MSDU_START | 963 + HTT_RX_FILTER_TLV_FLAGS_MSDU_END | 964 + HTT_RX_FILTER_TLV_FLAGS_MPDU_END | 965 + HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | 966 + HTT_RX_FILTER_TLV_FLAGS_ATTENTION; 967 + } else if (mode == ATH11K_PKTLOG_MODE_LITE) { 968 + ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar, 969 + HTT_PPDU_STATS_TAG_PKTLOG); 970 + if (ret) { 971 + ath11k_err(ar->ab, "failed to enable pktlog lite: %d\n", ret); 972 + goto out; 973 + } 974 + 975 + rx_filter = HTT_RX_FILTER_TLV_LITE_MODE; 976 + } else { 977 + ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar, 978 + HTT_PPDU_STATS_TAG_DEFAULT); 979 + if (ret) { 980 + ath11k_err(ar->ab, "failed to send htt ppdu stats req: %d\n", 981 + ret); 982 + goto out; 983 + } 984 + } 985 + 986 + tlv_filter.rx_filter = rx_filter; 987 + if (rx_filter) { 988 + tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0; 989 + tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1; 990 + tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2; 991 + tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 | 992 + HTT_RX_FP_DATA_FILTER_FLASG3; 993 + } 994 + 995 + for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { 996 + ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 997 + ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id, 998 + ar->dp.mac_id + i, 999 + HAL_RXDMA_MONITOR_STATUS, 1000 + DP_RX_BUFFER_SIZE, &tlv_filter); 1001 + 1002 + if (ret) { 1003 + ath11k_warn(ab, "failed to set rx filter for monitor status ring\n"); 1004 + goto out; 1005 + } 1006 + } 1007 + 1008 + ath11k_dbg(ab, ATH11K_DBG_WMI, "pktlog filter %d mode %s\n", 1009 + filter, ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite")); 1010 + 1011 + ar->debug.pktlog_filter = filter; 1012 + ar->debug.pktlog_mode = mode; 1013 + ret = count; 1014 + 1015 + out: 1016 + mutex_unlock(&ar->conf_mutex); 1017 + return ret; 1018 + } 1019 + 1020 + static ssize_t ath11k_read_pktlog_filter(struct file *file, 1021 + char __user *ubuf, 1022 + size_t count, loff_t *ppos) 1023 + 1024 + { 1025 + char buf[32] = {0}; 1026 + struct ath11k *ar = file->private_data; 1027 + int len = 0; 1028 + 1029 + mutex_lock(&ar->conf_mutex); 1030 + len = scnprintf(buf, sizeof(buf) - len, "%08x %08x\n", 1031 + ar->debug.pktlog_filter, 1032 + ar->debug.pktlog_mode); 1033 + mutex_unlock(&ar->conf_mutex); 1034 + 1035 + return simple_read_from_buffer(ubuf, count, ppos, buf, len); 1036 + } 1037 + 1038 + static const struct file_operations fops_pktlog_filter = { 1039 + .read = ath11k_read_pktlog_filter, 1040 + .write = ath11k_write_pktlog_filter, 1041 + .open = simple_open 1042 + }; 1043 + 1044 + static ssize_t ath11k_write_simulate_radar(struct file *file, 1045 + const char __user *user_buf, 1046 + size_t count, loff_t *ppos) 1047 + { 1048 + struct ath11k *ar = file->private_data; 1049 + int ret; 1050 + 1051 + ret = ath11k_wmi_simulate_radar(ar); 1052 + if (ret) 1053 + return ret; 1054 + 1055 + return count; 1056 + } 1057 + 1058 + static const struct file_operations fops_simulate_radar = { 1059 + .write = ath11k_write_simulate_radar, 1060 + .open = simple_open 1061 + }; 1062 + 1063 + int ath11k_debugfs_register(struct ath11k *ar) 1064 + { 1065 + struct ath11k_base *ab = ar->ab; 1066 + char pdev_name[5]; 1067 + char buf[100] = {0}; 1068 + 1069 + snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx); 1070 + 1071 + ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc); 1072 + 1073 + if (IS_ERR_OR_NULL(ar->debug.debugfs_pdev)) { 1074 + if (IS_ERR(ar->debug.debugfs_pdev)) 1075 + return PTR_ERR(ar->debug.debugfs_pdev); 1076 + 1077 + return -ENOMEM; 1078 + } 1079 + 1080 + /* Create a symlink under ieee80211/phy* */ 1081 + snprintf(buf, 100, "../../ath11k/%pd2", ar->debug.debugfs_pdev); 1082 + debugfs_create_symlink("ath11k", ar->hw->wiphy->debugfsdir, buf); 1083 + 1084 + ath11k_debugfs_htt_stats_init(ar); 1085 + 1086 + ath11k_debugfs_fw_stats_init(ar); 1087 + 1088 + debugfs_create_file("ext_tx_stats", 0644, 1089 + ar->debug.debugfs_pdev, ar, 1090 + &fops_extd_tx_stats); 1091 + debugfs_create_file("ext_rx_stats", 0644, 1092 + ar->debug.debugfs_pdev, ar, 1093 + &fops_extd_rx_stats); 1094 + debugfs_create_file("pktlog_filter", 0644, 1095 + ar->debug.debugfs_pdev, ar, 1096 + &fops_pktlog_filter); 1097 + 1098 + if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) { 1099 + debugfs_create_file("dfs_simulate_radar", 0200, 1100 + ar->debug.debugfs_pdev, ar, 1101 + &fops_simulate_radar); 1102 + debugfs_create_bool("dfs_block_radar_events", 0200, 1103 + ar->debug.debugfs_pdev, 1104 + &ar->dfs_block_radar_events); 1105 + } 1106 + 1107 + return 0; 1108 + } 1109 + 1110 + void ath11k_debugfs_unregister(struct ath11k *ar) 1111 + { 1112 + }
+217
drivers/net/wireless/ath/ath11k/debugfs.h
··· 1 + /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 + /* 3 + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 + */ 5 + 6 + #ifndef _ATH11K_DEBUGFS_H_ 7 + #define _ATH11K_DEBUGFS_H_ 8 + 9 + #include "hal_tx.h" 10 + 11 + #define ATH11K_TX_POWER_MAX_VAL 70 12 + #define ATH11K_TX_POWER_MIN_VAL 0 13 + 14 + /* htt_dbg_ext_stats_type */ 15 + enum ath11k_dbg_htt_ext_stats_type { 16 + ATH11K_DBG_HTT_EXT_STATS_RESET = 0, 17 + ATH11K_DBG_HTT_EXT_STATS_PDEV_TX = 1, 18 + ATH11K_DBG_HTT_EXT_STATS_PDEV_RX = 2, 19 + ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ = 3, 20 + ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4, 21 + ATH11K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5, 22 + ATH11K_DBG_HTT_EXT_STATS_PDEV_TQM = 6, 23 + ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ = 7, 24 + ATH11K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8, 25 + ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9, 26 + ATH11K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10, 27 + ATH11K_DBG_HTT_EXT_STATS_PEER_INFO = 11, 28 + ATH11K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12, 29 + ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ = 13, 30 + ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO = 14, 31 + ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO = 15, 32 + ATH11K_DBG_HTT_EXT_STATS_SFM_INFO = 16, 33 + ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17, 34 + ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST = 18, 35 + ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19, 36 + ATH11K_DBG_HTT_EXT_STATS_TWT_SESSIONS = 20, 37 + ATH11K_DBG_HTT_EXT_STATS_REO_RESOURCE_STATS = 21, 38 + ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22, 39 + ATH11K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23, 40 + ATH11K_DBG_HTT_EXT_STATS_RING_BACKPRESSURE_STATS = 24, 41 + 42 + /* keep this last */ 43 + ATH11K_DBG_HTT_NUM_EXT_STATS, 44 + }; 45 + 46 + struct debug_htt_stats_req { 47 + bool done; 48 + u8 pdev_id; 49 + u8 type; 50 + u8 peer_addr[ETH_ALEN]; 51 + struct completion cmpln; 52 + u32 buf_len; 53 + u8 buf[]; 54 + }; 55 + 56 + struct ath_pktlog_hdr { 57 + u16 flags; 58 + u16 missed_cnt; 59 + u16 log_type; 60 + u16 size; 61 + u32 timestamp; 62 + u32 type_specific_data; 63 + u8 payload[]; 64 + }; 65 + 66 + #define ATH11K_HTT_PEER_STATS_RESET BIT(16) 67 + 68 + #define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512) 69 + #define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024) 70 + 71 + enum ath11k_pktlog_filter { 72 + ATH11K_PKTLOG_RX = 0x000000001, 73 + ATH11K_PKTLOG_TX = 0x000000002, 74 + ATH11K_PKTLOG_RCFIND = 0x000000004, 75 + ATH11K_PKTLOG_RCUPDATE = 0x000000008, 76 + ATH11K_PKTLOG_EVENT_SMART_ANT = 0x000000020, 77 + ATH11K_PKTLOG_EVENT_SW = 0x000000040, 78 + ATH11K_PKTLOG_ANY = 0x00000006f, 79 + }; 80 + 81 + enum ath11k_pktlog_mode { 82 + ATH11K_PKTLOG_MODE_LITE = 1, 83 + ATH11K_PKTLOG_MODE_FULL = 2, 84 + }; 85 + 86 + enum ath11k_pktlog_enum { 87 + ATH11K_PKTLOG_TYPE_TX_CTRL = 1, 88 + ATH11K_PKTLOG_TYPE_TX_STAT = 2, 89 + ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3, 90 + ATH11K_PKTLOG_TYPE_RX_STAT = 5, 91 + ATH11K_PKTLOG_TYPE_RC_FIND = 6, 92 + ATH11K_PKTLOG_TYPE_RC_UPDATE = 7, 93 + ATH11K_PKTLOG_TYPE_TX_VIRT_ADDR = 8, 94 + ATH11K_PKTLOG_TYPE_RX_CBF = 10, 95 + ATH11K_PKTLOG_TYPE_RX_STATBUF = 22, 96 + ATH11K_PKTLOG_TYPE_PPDU_STATS = 23, 97 + ATH11K_PKTLOG_TYPE_LITE_RX = 24, 98 + }; 99 + 100 + enum ath11k_dbg_aggr_mode { 101 + ATH11K_DBG_AGGR_MODE_AUTO, 102 + ATH11K_DBG_AGGR_MODE_MANUAL, 103 + ATH11K_DBG_AGGR_MODE_MAX, 104 + }; 105 + 106 + #ifdef CONFIG_ATH11K_DEBUGFS 107 + int ath11k_debugfs_soc_create(struct ath11k_base *ab); 108 + void ath11k_debugfs_soc_destroy(struct ath11k_base *ab); 109 + int ath11k_debugfs_pdev_create(struct ath11k_base *ab); 110 + void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab); 111 + int ath11k_debugfs_register(struct ath11k *ar); 112 + void ath11k_debugfs_unregister(struct ath11k *ar); 113 + void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb); 114 + 115 + void ath11k_debugfs_fw_stats_init(struct ath11k *ar); 116 + 117 + static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar) 118 + { 119 + return (ar->debug.pktlog_mode == ATH11K_PKTLOG_MODE_LITE); 120 + } 121 + 122 + static inline bool ath11k_debugfs_is_pktlog_rx_stats_enabled(struct ath11k *ar) 123 + { 124 + return (!ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode); 125 + } 126 + 127 + static inline bool ath11k_debugfs_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr) 128 + { 129 + return (ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode && 130 + ether_addr_equal(addr, ar->debug.pktlog_peer_addr)); 131 + } 132 + 133 + static inline int ath11k_debugfs_is_extd_tx_stats_enabled(struct ath11k *ar) 134 + { 135 + return ar->debug.extd_tx_stats; 136 + } 137 + 138 + static inline int ath11k_debugfs_is_extd_rx_stats_enabled(struct ath11k *ar) 139 + { 140 + return ar->debug.extd_rx_stats; 141 + } 142 + 143 + static inline int ath11k_debugfs_rx_filter(struct ath11k *ar) 144 + { 145 + return ar->debug.rx_filter; 146 + } 147 + 148 + #else 149 + static inline int ath11k_debugfs_soc_create(struct ath11k_base *ab) 150 + { 151 + return 0; 152 + } 153 + 154 + static inline void ath11k_debugfs_soc_destroy(struct ath11k_base *ab) 155 + { 156 + } 157 + 158 + static inline int ath11k_debugfs_pdev_create(struct ath11k_base *ab) 159 + { 160 + return 0; 161 + } 162 + 163 + static inline void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab) 164 + { 165 + } 166 + 167 + static inline int ath11k_debugfs_register(struct ath11k *ar) 168 + { 169 + return 0; 170 + } 171 + 172 + static inline void ath11k_debugfs_unregister(struct ath11k *ar) 173 + { 174 + } 175 + 176 + static inline void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, 177 + struct sk_buff *skb) 178 + { 179 + } 180 + 181 + static inline void ath11k_debugfs_fw_stats_init(struct ath11k *ar) 182 + { 183 + } 184 + 185 + static inline int ath11k_debugfs_is_extd_tx_stats_enabled(struct ath11k *ar) 186 + { 187 + return 0; 188 + } 189 + 190 + static inline int ath11k_debugfs_is_extd_rx_stats_enabled(struct ath11k *ar) 191 + { 192 + return 0; 193 + } 194 + 195 + static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar) 196 + { 197 + return false; 198 + } 199 + 200 + static inline bool ath11k_debugfs_is_pktlog_rx_stats_enabled(struct ath11k *ar) 201 + { 202 + return false; 203 + } 204 + 205 + static inline bool ath11k_debugfs_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr) 206 + { 207 + return false; 208 + } 209 + 210 + static inline int ath11k_debugfs_rx_filter(struct ath11k *ar) 211 + { 212 + return 0; 213 + } 214 + 215 + #endif /* CONFIG_MAC80211_DEBUGFS*/ 216 + 217 + #endif /* _ATH11K_DEBUGFS_H_ */
+15 -14
drivers/net/wireless/ath/ath11k/debugfs_sta.c
··· 5 5 6 6 #include <linux/vmalloc.h> 7 7 8 + #include "debugfs_sta.h" 8 9 #include "core.h" 9 10 #include "peer.h" 10 11 #include "debug.h" 11 12 #include "dp_tx.h" 12 - #include "debug_htt_stats.h" 13 + #include "debugfs_htt_stats.h" 13 14 14 - void 15 - ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta, 16 - struct ath11k_per_peer_tx_stats *peer_stats, 17 - u8 legacy_rate_idx) 15 + void ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta, 16 + struct ath11k_per_peer_tx_stats *peer_stats, 17 + u8 legacy_rate_idx) 18 18 { 19 19 struct rate_info *txrate = &arsta->txrate; 20 20 struct ath11k_htt_tx_stats *tx_stats; ··· 125 125 tx_stats->tx_duration += peer_stats->duration; 126 126 } 127 127 128 - void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar, 129 - struct sk_buff *msdu, 130 - struct hal_tx_status *ts) 128 + void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar, 129 + struct sk_buff *msdu, 130 + struct hal_tx_status *ts) 131 131 { 132 132 struct ath11k_base *ab = ar->ab; 133 133 struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats; ··· 200 200 arsta->txrate.nss = arsta->last_txrate.nss; 201 201 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); 202 202 203 - ath11k_accumulate_per_peer_tx_stats(arsta, peer_stats, rate_idx); 203 + ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); 204 + 204 205 err_out: 205 206 spin_unlock_bh(&ab->base_lock); 206 207 rcu_read_unlock(); ··· 429 428 ar->debug.htt_stats.stats_req = stats_req; 430 429 stats_req->type = ATH11K_DBG_HTT_EXT_STATS_PEER_INFO; 431 430 memcpy(stats_req->peer_addr, sta->addr, ETH_ALEN); 432 - ret = ath11k_dbg_htt_stats_req(ar); 431 + ret = ath11k_debugfs_htt_stats_req(ar); 433 432 mutex_unlock(&ar->conf_mutex); 434 433 if (ret < 0) 435 434 goto out; ··· 821 820 .llseek = default_llseek, 822 821 }; 823 822 824 - void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 825 - struct ieee80211_sta *sta, struct dentry *dir) 823 + void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 824 + struct ieee80211_sta *sta, struct dentry *dir) 826 825 { 827 826 struct ath11k *ar = hw->priv; 828 827 829 - if (ath11k_debug_is_extd_tx_stats_enabled(ar)) 828 + if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) 830 829 debugfs_create_file("tx_stats", 0400, dir, sta, 831 830 &fops_tx_stats); 832 - if (ath11k_debug_is_extd_rx_stats_enabled(ar)) 831 + if (ath11k_debugfs_is_extd_rx_stats_enabled(ar)) 833 832 debugfs_create_file("rx_stats", 0400, dir, sta, 834 833 &fops_rx_stats); 835 834
+44
drivers/net/wireless/ath/ath11k/debugfs_sta.h
··· 1 + /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 + /* 3 + * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved. 4 + */ 5 + 6 + #ifndef _ATH11K_DEBUGFS_STA_H_ 7 + #define _ATH11K_DEBUGFS_STA_H_ 8 + 9 + #include <net/mac80211.h> 10 + 11 + #include "core.h" 12 + #include "hal_tx.h" 13 + 14 + #ifdef CONFIG_ATH11K_DEBUGFS 15 + 16 + void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 17 + struct ieee80211_sta *sta, struct dentry *dir); 18 + void ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta, 19 + struct ath11k_per_peer_tx_stats *peer_stats, 20 + u8 legacy_rate_idx); 21 + void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar, 22 + struct sk_buff *msdu, 23 + struct hal_tx_status *ts); 24 + 25 + #else /* CONFIG_ATH11K_DEBUGFS */ 26 + 27 + #define ath11k_debugfs_sta_op_add NULL 28 + 29 + static inline void 30 + ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta, 31 + struct ath11k_per_peer_tx_stats *peer_stats, 32 + u8 legacy_rate_idx) 33 + { 34 + } 35 + 36 + static inline void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar, 37 + struct sk_buff *msdu, 38 + struct hal_tx_status *ts) 39 + { 40 + } 41 + 42 + #endif /* CONFIG_ATH11K_DEBUGFS */ 43 + 44 + #endif /* _ATH11K_DEBUGFS_STA_H_ */
+1 -1
drivers/net/wireless/ath/ath11k/dp.c
··· 832 832 for (i = 0; i < ab->num_radios; i++) { 833 833 ar = ab->pdevs[i].ar; 834 834 ath11k_dp_rx_pdev_free(ab, i); 835 - ath11k_debug_unregister(ar); 835 + ath11k_debugfs_unregister(ar); 836 836 ath11k_dp_rx_pdev_mon_detach(ar); 837 837 } 838 838 }
+8 -7
drivers/net/wireless/ath/ath11k/dp_rx.c
··· 9 9 #include <crypto/hash.h> 10 10 #include "core.h" 11 11 #include "debug.h" 12 + #include "debugfs_htt_stats.h" 13 + #include "debugfs_sta.h" 12 14 #include "hal_desc.h" 13 15 #include "hw.h" 14 16 #include "dp_rx.h" ··· 1435 1433 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) + 1436 1434 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags); 1437 1435 1438 - if (ath11k_debug_is_extd_tx_stats_enabled(ar)) 1439 - ath11k_accumulate_per_peer_tx_stats(arsta, 1440 - peer_stats, rate_idx); 1436 + if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) 1437 + ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); 1441 1438 } 1442 1439 1443 1440 spin_unlock_bh(&ab->base_lock); ··· 1512 1511 goto exit; 1513 1512 } 1514 1513 1515 - if (ath11k_debug_is_pktlog_lite_mode_enabled(ar)) 1514 + if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) 1516 1515 trace_ath11k_htt_ppdu_stats(ar, skb->data, len); 1517 1516 1518 1517 ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); ··· 1659 1658 ath11k_htt_pull_ppdu_stats(ab, skb); 1660 1659 break; 1661 1660 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: 1662 - ath11k_dbg_htt_ext_stats_handler(ab, skb); 1661 + ath11k_debugfs_htt_ext_stats_handler(ab, skb); 1663 1662 break; 1664 1663 case HTT_T2H_MSG_TYPE_PKTLOG: 1665 1664 ath11k_htt_pktlog(ab, skb); ··· 2910 2909 memset(&ppdu_info, 0, sizeof(ppdu_info)); 2911 2910 ppdu_info.peer_id = HAL_INVALID_PEERID; 2912 2911 2913 - if (ath11k_debug_is_pktlog_rx_stats_enabled(ar)) 2912 + if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) 2914 2913 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2915 2914 2916 2915 hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb); ··· 2938 2937 arsta = (struct ath11k_sta *)peer->sta->drv_priv; 2939 2938 ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info); 2940 2939 2941 - if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr)) 2940 + if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr)) 2942 2941 trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE); 2943 2942 2944 2943 spin_unlock_bh(&ab->base_lock);
+4 -3
drivers/net/wireless/ath/ath11k/dp_tx.c
··· 6 6 #include "core.h" 7 7 #include "dp_tx.h" 8 8 #include "debug.h" 9 + #include "debugfs_sta.h" 9 10 #include "hw.h" 10 11 #include "peer.h" 11 12 ··· 458 457 (info->flags & IEEE80211_TX_CTL_NO_ACK)) 459 458 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 460 459 461 - if (ath11k_debug_is_extd_tx_stats_enabled(ar)) { 460 + if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) { 462 461 if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) { 463 462 if (ar->last_ppdu_id == 0) { 464 463 ar->last_ppdu_id = ts->ppdu_id; ··· 466 465 ar->cached_ppdu_id == ar->last_ppdu_id) { 467 466 ar->cached_ppdu_id = ar->last_ppdu_id; 468 467 ar->cached_stats.is_ampdu = true; 469 - ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts); 468 + ath11k_debugfs_sta_update_txcompl(ar, msdu, ts); 470 469 memset(&ar->cached_stats, 0, 471 470 sizeof(struct ath11k_per_peer_tx_stats)); 472 471 } else { 473 472 ar->cached_stats.is_ampdu = false; 474 - ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts); 473 + ath11k_debugfs_sta_update_txcompl(ar, msdu, ts); 475 474 memset(&ar->cached_stats, 0, 476 475 sizeof(struct ath11k_per_peer_tx_stats)); 477 476 }
-9
drivers/net/wireless/ath/ath11k/htc.c
··· 50 50 return skb; 51 51 } 52 52 53 - static inline void ath11k_htc_restore_tx_skb(struct ath11k_htc *htc, 54 - struct sk_buff *skb) 55 - { 56 - struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); 57 - 58 - dma_unmap_single(htc->ab->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); 59 - skb_pull(skb, sizeof(struct ath11k_htc_hdr)); 60 - } 61 - 62 53 static void ath11k_htc_prepare_tx_skb(struct ath11k_htc_ep *ep, 63 54 struct sk_buff *skb) 64 55 {
+6 -2
drivers/net/wireless/ath/ath11k/hw.c
··· 74 74 config->beacon_tx_offload_max_vdev = 0x2; 75 75 config->num_multicast_filter_entries = 0x20; 76 76 config->num_wow_filters = 0x16; 77 - config->num_keep_alive_pattern = 0x1; 78 77 config->num_keep_alive_pattern = 0; 79 78 } 80 79 ··· 103 104 config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI; 104 105 config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI; 105 106 config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI; 106 - config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI; 107 + 108 + if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) 109 + config->rx_decap_mode = TARGET_DECAP_MODE_RAW; 110 + else 111 + config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI; 112 + 107 113 config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS; 108 114 config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV; 109 115 config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+1
drivers/net/wireless/ath/ath11k/hw.h
··· 155 155 bool vdev_start_delay; 156 156 bool htt_peer_map_v2; 157 157 bool tcl_0_only; 158 + u8 spectral_fft_sz; 158 159 }; 159 160 160 161 struct ath11k_hw_ops {
+5 -4
drivers/net/wireless/ath/ath11k/mac.c
··· 14 14 #include "dp_rx.h" 15 15 #include "testmode.h" 16 16 #include "peer.h" 17 + #include "debugfs_sta.h" 17 18 18 19 #define CHAN2G(_channel, _freq, _flags) { \ 19 20 .band = NL80211_BAND_2GHZ, \ ··· 2968 2967 ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n", 2969 2968 sta->addr, arvif->vdev_id); 2970 2969 2971 - if (ath11k_debug_is_extd_tx_stats_enabled(ar)) { 2970 + if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) { 2972 2971 arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL); 2973 2972 if (!arsta->tx_stats) { 2974 2973 ret = -ENOMEM; ··· 4102 4101 4103 4102 if (enable) { 4104 4103 tlv_filter = ath11k_mac_mon_status_filter_default; 4105 - tlv_filter.rx_filter = ath11k_debug_rx_filter(ar); 4104 + tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar); 4106 4105 } 4107 4106 4108 4107 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { ··· 5874 5873 .sta_statistics = ath11k_mac_op_sta_statistics, 5875 5874 CFG80211_TESTMODE_CMD(ath11k_tm_cmd) 5876 5875 #ifdef CONFIG_ATH11K_DEBUGFS 5877 - .sta_add_debugfs = ath11k_sta_add_debugfs, 5876 + .sta_add_debugfs = ath11k_debugfs_sta_op_add, 5878 5877 #endif 5879 5878 }; 5880 5879 ··· 6234 6233 goto err_free; 6235 6234 } 6236 6235 6237 - ret = ath11k_debug_register(ar); 6236 + ret = ath11k_debugfs_register(ar); 6238 6237 if (ret) { 6239 6238 ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret); 6240 6239 goto err_free;
+16 -10
drivers/net/wireless/ath/ath11k/spectral.c
··· 17 17 #define ATH11K_SPECTRAL_ATH11K_MIN_IB_BINS 32 18 18 #define ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS 256 19 19 20 - #define ATH11K_SPECTRAL_SAMPLE_FFT_BIN_MASK 0xFF 21 - 22 20 #define ATH11K_SPECTRAL_SCAN_COUNT_MAX 4095 23 21 24 22 /* Max channel computed by sum of 2g and 5g band channels */ ··· 555 557 return max_exp; 556 558 } 557 559 558 - static void ath11k_spectral_parse_16bit_fft(u8 *outbins, u8 *inbins, int num_bins) 560 + static void ath11k_spectral_parse_fft(u8 *outbins, u8 *inbins, int num_bins, u8 fft_sz) 559 561 { 560 - int i; 561 - __le16 *data = (__le16 *)inbins; 562 + int i, j; 562 563 563 564 i = 0; 565 + j = 0; 564 566 while (i < num_bins) { 565 - outbins[i] = (__le16_to_cpu(data[i])) & 566 - ATH11K_SPECTRAL_SAMPLE_FFT_BIN_MASK; 567 + outbins[i] = inbins[j]; 567 568 i++; 569 + j += fft_sz; 568 570 } 569 571 } 570 572 ··· 585 587 int ret; 586 588 587 589 lockdep_assert_held(&ar->spectral.lock); 590 + 591 + if (!ab->hw_params.spectral_fft_sz) { 592 + ath11k_warn(ab, "invalid bin size type for hw rev %d\n", 593 + ab->hw_rev); 594 + return -EINVAL; 595 + } 588 596 589 597 tlv = (struct spectral_tlv *)data; 590 598 tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, __le32_to_cpu(tlv->header)); ··· 653 649 freq = summary->meta.freq2; 654 650 fft_sample->freq2 = __cpu_to_be16(freq); 655 651 656 - ath11k_spectral_parse_16bit_fft(fft_sample->data, 657 - fft_report->bins, 658 - num_bins); 652 + ath11k_spectral_parse_fft(fft_sample->data, fft_report->bins, num_bins, 653 + ab->hw_params.spectral_fft_sz); 659 654 660 655 fft_sample->max_exp = ath11k_spectral_get_max_exp(fft_sample->max_index, 661 656 search.peak_mag, ··· 960 957 961 958 if (!test_bit(WMI_TLV_SERVICE_FREQINFO_IN_METADATA, 962 959 ab->wmi_ab.svc_map)) 960 + return 0; 961 + 962 + if (!ab->hw_params.spectral_fft_sz) 963 963 return 0; 964 964 965 965 for (i = 0; i < ab->num_radios; i++) {
+1 -50
drivers/net/wireless/ath/ath11k/wmi.c
··· 3342 3342 memset(&init_param, 0, sizeof(init_param)); 3343 3343 memset(&config, 0, sizeof(config)); 3344 3344 3345 - config.num_vdevs = ab->num_radios * TARGET_NUM_VDEVS; 3346 - 3347 - if (ab->num_radios == 2) { 3348 - config.num_peers = TARGET_NUM_PEERS(DBS); 3349 - config.num_tids = TARGET_NUM_TIDS(DBS); 3350 - } else if (ab->num_radios == 3) { 3351 - config.num_peers = TARGET_NUM_PEERS(DBS_SBS); 3352 - config.num_tids = TARGET_NUM_TIDS(DBS_SBS); 3353 - } else { 3354 - /* Control should not reach here */ 3355 - config.num_peers = TARGET_NUM_PEERS(SINGLE); 3356 - config.num_tids = TARGET_NUM_TIDS(SINGLE); 3357 - } 3358 - config.num_offload_peers = TARGET_NUM_OFFLD_PEERS; 3359 - config.num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS; 3360 - config.num_peer_keys = TARGET_NUM_PEER_KEYS; 3361 - config.ast_skid_limit = TARGET_AST_SKID_LIMIT; 3362 - config.tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; 3363 - config.rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; 3364 - config.rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI; 3365 - config.rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI; 3366 - config.rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI; 3367 - config.rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI; 3368 - config.rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI; 3369 - 3370 - if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) 3371 - config.rx_decap_mode = TARGET_DECAP_MODE_RAW; 3372 - 3373 - config.scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS; 3374 - config.bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV; 3375 - config.roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV; 3376 - config.roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES; 3377 - config.num_mcast_groups = TARGET_NUM_MCAST_GROUPS; 3378 - config.num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS; 3379 - config.mcast2ucast_mode = TARGET_MCAST2UCAST_MODE; 3380 - config.tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE; 3381 - config.num_wds_entries = TARGET_NUM_WDS_ENTRIES; 3382 - config.dma_burst_size = TARGET_DMA_BURST_SIZE; 3383 - config.rx_skip_defrag_timeout_dup_detection_check = 3384 - TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; 3385 - config.vow_config = TARGET_VOW_CONFIG; 3386 - config.gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV; 3387 - config.num_msdu_desc = TARGET_NUM_MSDU_DESC; 3388 - config.beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD; 3389 - config.rx_batchmode = TARGET_RX_BATCHMODE; 3390 - config.peer_map_unmap_v2_support = 1; 3391 - config.twt_ap_pdev_count = ab->num_radios; 3392 - config.twt_ap_sta_count = 1000; 3393 - 3394 3345 ab->hw_params.hw_ops->wmi_init_config(ab, &config); 3395 3346 3396 3347 memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config)); ··· 6252 6301 6253 6302 static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb) 6254 6303 { 6255 - ath11k_debug_fw_stats_process(ab, skb); 6304 + ath11k_debugfs_fw_stats_process(ab, skb); 6256 6305 } 6257 6306 6258 6307 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
+3 -22
drivers/net/wireless/ath/ath5k/debug.c
··· 161 161 return 0; 162 162 } 163 163 164 - static const struct seq_operations register_seq_ops = { 164 + static const struct seq_operations registers_sops = { 165 165 .start = reg_start, 166 166 .next = reg_next, 167 167 .stop = reg_stop, 168 168 .show = reg_show 169 169 }; 170 170 171 - static int open_file_registers(struct inode *inode, struct file *file) 172 - { 173 - struct seq_file *s; 174 - int res; 175 - res = seq_open(file, &register_seq_ops); 176 - if (res == 0) { 177 - s = file->private_data; 178 - s->private = inode->i_private; 179 - } 180 - return res; 181 - } 182 - 183 - static const struct file_operations fops_registers = { 184 - .open = open_file_registers, 185 - .read = seq_read, 186 - .llseek = seq_lseek, 187 - .release = seq_release, 188 - .owner = THIS_MODULE, 189 - }; 190 - 171 + DEFINE_SEQ_ATTRIBUTE(registers); 191 172 192 173 /* debugfs: beacons */ 193 174 ··· 986 1005 return; 987 1006 988 1007 debugfs_create_file("debug", 0600, phydir, ah, &fops_debug); 989 - debugfs_create_file("registers", 0400, phydir, ah, &fops_registers); 1008 + debugfs_create_file("registers", 0400, phydir, ah, &registers_fops); 990 1009 debugfs_create_file("beacon", 0600, phydir, ah, &fops_beacon); 991 1010 debugfs_create_file("reset", 0200, phydir, ah, &fops_reset); 992 1011 debugfs_create_file("antenna", 0600, phydir, ah, &fops_antenna);
+5
drivers/net/wireless/ath/ath6kl/wmi.c
··· 2639 2639 return -EINVAL; 2640 2640 } 2641 2641 2642 + if (tsid >= 16) { 2643 + ath6kl_err("invalid tsid: %d\n", tsid); 2644 + return -EINVAL; 2645 + } 2646 + 2642 2647 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); 2643 2648 if (!skb) 2644 2649 return -ENOMEM;
+19
drivers/net/wireless/ath/ath9k/hif_usb.c
··· 449 449 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); 450 450 451 451 /* The pending URBs have to be canceled. */ 452 + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); 452 453 list_for_each_entry_safe(tx_buf, tx_buf_tmp, 453 454 &hif_dev->tx.tx_pending, list) { 455 + usb_get_urb(tx_buf->urb); 456 + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); 454 457 usb_kill_urb(tx_buf->urb); 458 + list_del(&tx_buf->list); 459 + usb_free_urb(tx_buf->urb); 460 + kfree(tx_buf->buf); 461 + kfree(tx_buf); 462 + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); 455 463 } 464 + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); 456 465 457 466 usb_kill_anchored_urbs(&hif_dev->mgmt_submitted); 458 467 } ··· 771 762 struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL; 772 763 unsigned long flags; 773 764 765 + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); 774 766 list_for_each_entry_safe(tx_buf, tx_buf_tmp, 775 767 &hif_dev->tx.tx_buf, list) { 768 + usb_get_urb(tx_buf->urb); 769 + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); 776 770 usb_kill_urb(tx_buf->urb); 777 771 list_del(&tx_buf->list); 778 772 usb_free_urb(tx_buf->urb); 779 773 kfree(tx_buf->buf); 780 774 kfree(tx_buf); 775 + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); 781 776 } 777 + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); 782 778 783 779 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); 784 780 hif_dev->tx.flags |= HIF_USB_TX_FLUSH; 785 781 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); 786 782 783 + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); 787 784 list_for_each_entry_safe(tx_buf, tx_buf_tmp, 788 785 &hif_dev->tx.tx_pending, list) { 786 + usb_get_urb(tx_buf->urb); 787 + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); 789 788 usb_kill_urb(tx_buf->urb); 790 789 list_del(&tx_buf->list); 791 790 usb_free_urb(tx_buf->urb); 792 791 kfree(tx_buf->buf); 793 792 kfree(tx_buf); 793 + spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); 794 794 } 795 + spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); 795 796 796 797 usb_kill_anchored_urbs(&hif_dev->mgmt_submitted); 797 798 }
+136 -2
drivers/net/wireless/ath/wcn36xx/hal.h
··· 726 726 #define WCN36XX_HAL_CFG_AP_LINK_MONITOR_TIMEOUT 102 727 727 #define WCN36XX_HAL_CFG_BTC_DWELL_TIME_MULTIPLIER 103 728 728 #define WCN36XX_HAL_CFG_ENABLE_TDLS_OXYGEN_MODE 104 729 - #define WCN36XX_HAL_CFG_MAX_PARAMS 105 729 + #define WCN36XX_HAL_CFG_ENABLE_NAT_KEEP_ALIVE_FILTER 105 730 + #define WCN36XX_HAL_CFG_ENABLE_SAP_OBSS_PROT 106 731 + #define WCN36XX_HAL_CFG_PSPOLL_DATA_RECEP_TIMEOUT 107 732 + #define WCN36XX_HAL_CFG_TDLS_PUAPSD_BUFFER_STA_CAPABLE 108 733 + #define WCN36XX_HAL_CFG_TDLS_PUAPSD_MASK 109 734 + #define WCN36XX_HAL_CFG_TDLS_PUAPSD_INACTIVITY_TIME 110 735 + #define WCN36XX_HAL_CFG_TDLS_PUAPSD_RX_FRAME_THRESHOLD 111 736 + #define WCN36XX_HAL_CFG_ANTENNA_DIVERSITY 112 737 + #define WCN36XX_HAL_CFG_ATH_DISABLE 113 738 + #define WCN36XX_HAL_CFG_FLEXCONNECT_POWER_FACTOR 114 739 + #define WCN36XX_HAL_CFG_ENABLE_ADAPTIVE_RX_DRAIN 115 740 + #define WCN36XX_HAL_CFG_TDLS_OFF_CHANNEL_CAPABLE 116 741 + #define WCN36XX_HAL_CFG_MWS_COEX_V1_WAN_FREQ 117 742 + #define WCN36XX_HAL_CFG_MWS_COEX_V1_WLAN_FREQ 118 743 + #define WCN36XX_HAL_CFG_MWS_COEX_V1_CONFIG 119 744 + #define WCN36XX_HAL_CFG_MWS_COEX_V1_CONFIG2 120 745 + #define WCN36XX_HAL_CFG_MWS_COEX_V2_WAN_FREQ 121 746 + #define WCN36XX_HAL_CFG_MWS_COEX_V2_WLAN_FREQ 122 747 + #define WCN36XX_HAL_CFG_MWS_COEX_V2_CONFIG 123 748 + #define WCN36XX_HAL_CFG_MWS_COEX_V2_CONFIG2 124 749 + #define WCN36XX_HAL_CFG_MWS_COEX_V3_WAN_FREQ 125 750 + #define WCN36XX_HAL_CFG_MWS_COEX_V3_WLAN_FREQ 126 751 + #define WCN36XX_HAL_CFG_MWS_COEX_V3_CONFIG 127 752 + #define WCN36XX_HAL_CFG_MWS_COEX_V3_CONFIG2 128 753 + #define WCN36XX_HAL_CFG_MWS_COEX_V4_WAN_FREQ 129 754 + #define WCN36XX_HAL_CFG_MWS_COEX_V4_WLAN_FREQ 130 755 + #define WCN36XX_HAL_CFG_MWS_COEX_V4_CONFIG 131 756 + #define WCN36XX_HAL_CFG_MWS_COEX_V4_CONFIG2 132 757 + #define WCN36XX_HAL_CFG_MWS_COEX_V5_WAN_FREQ 133 758 + #define WCN36XX_HAL_CFG_MWS_COEX_V5_WLAN_FREQ 134 759 + #define WCN36XX_HAL_CFG_MWS_COEX_V5_CONFIG 135 760 + #define WCN36XX_HAL_CFG_MWS_COEX_V5_CONFIG2 136 761 + #define WCN36XX_HAL_CFG_MWS_COEX_V6_WAN_FREQ 137 762 + #define WCN36XX_HAL_CFG_MWS_COEX_V6_WLAN_FREQ 138 763 + #define WCN36XX_HAL_CFG_MWS_COEX_V6_CONFIG 139 764 + #define WCN36XX_HAL_CFG_MWS_COEX_V6_CONFIG2 140 765 + #define WCN36XX_HAL_CFG_MWS_COEX_V7_WAN_FREQ 141 766 + #define WCN36XX_HAL_CFG_MWS_COEX_V7_WLAN_FREQ 142 767 + #define WCN36XX_HAL_CFG_MWS_COEX_V7_CONFIG 143 768 + #define WCN36XX_HAL_CFG_MWS_COEX_V7_CONFIG2 144 769 + #define WCN36XX_HAL_CFG_MWS_COEX_V8_WAN_FREQ 145 770 + #define WCN36XX_HAL_CFG_MWS_COEX_V8_WLAN_FREQ 146 771 + #define WCN36XX_HAL_CFG_MWS_COEX_V8_CONFIG 147 772 + #define WCN36XX_HAL_CFG_MWS_COEX_V8_CONFIG2 148 773 + #define WCN36XX_HAL_CFG_MWS_COEX_V9_WAN_FREQ 149 774 + #define WCN36XX_HAL_CFG_MWS_COEX_V9_WLAN_FREQ 150 775 + #define WCN36XX_HAL_CFG_MWS_COEX_V9_CONFIG 151 776 + #define WCN36XX_HAL_CFG_MWS_COEX_V9_CONFIG2 152 777 + #define WCN36XX_HAL_CFG_MWS_COEX_V10_WAN_FREQ 153 778 + #define WCN36XX_HAL_CFG_MWS_COEX_V10_WLAN_FREQ 154 779 + #define WCN36XX_HAL_CFG_MWS_COEX_V10_CONFIG 155 780 + #define WCN36XX_HAL_CFG_MWS_COEX_V10_CONFIG2 156 781 + #define WCN36XX_HAL_CFG_MWS_COEX_MODEM_BACKOFF 157 782 + #define WCN36XX_HAL_CFG_MWS_COEX_CONFIG1 158 783 + #define WCN36XX_HAL_CFG_MWS_COEX_CONFIG2 159 784 + #define WCN36XX_HAL_CFG_MWS_COEX_CONFIG3 160 785 + #define WCN36XX_HAL_CFG_MWS_COEX_CONFIG4 161 786 + #define WCN36XX_HAL_CFG_MWS_COEX_CONFIG5 162 787 + #define WCN36XX_HAL_CFG_MWS_COEX_CONFIG6 163 788 + #define WCN36XX_HAL_CFG_SAR_POWER_BACKOFF 164 789 + #define WCN36XX_HAL_CFG_GO_LINK_MONITOR_TIMEOUT 165 790 + #define WCN36XX_HAL_CFG_BTC_STATIC_OPP_WLAN_ACTIVE_WLAN_LEN 166 791 + #define WCN36XX_HAL_CFG_BTC_STATIC_OPP_WLAN_ACTIVE_BT_LEN 167 792 + #define WCN36XX_HAL_CFG_BTC_SAP_STATIC_OPP_ACTIVE_WLAN_LEN 168 793 + #define WCN36XX_HAL_CFG_BTC_SAP_STATIC_OPP_ACTIVE_BT_LEN 169 794 + #define WCN36XX_HAL_CFG_RMC_FIXED_RATE 170 795 + #define WCN36XX_HAL_CFG_ASD_PROBE_INTERVAL 171 796 + #define WCN36XX_HAL_CFG_ASD_TRIGGER_THRESHOLD 172 797 + #define WCN36XX_HAL_CFG_ASD_RTT_RSSI_HYST_THRESHOLD 173 798 + #define WCN36XX_HAL_CFG_BTC_CTS2S_ON_STA_DURING_SCO 174 799 + #define WCN36XX_HAL_CFG_SHORT_PREAMBLE 175 800 + #define WCN36XX_HAL_CFG_SHORT_SLOT_TIME 176 801 + #define WCN36XX_HAL_CFG_DELAYED_BA 177 802 + #define WCN36XX_HAL_CFG_IMMEDIATE_BA 178 803 + #define WCN36XX_HAL_CFG_DOT11_MODE 179 804 + #define WCN36XX_HAL_CFG_HT_CAPS 180 805 + #define WCN36XX_HAL_CFG_AMPDU_PARAMS 181 806 + #define WCN36XX_HAL_CFG_TX_BF_INFO 182 807 + #define WCN36XX_HAL_CFG_ASC_CAP_INFO 183 808 + #define WCN36XX_HAL_CFG_EXT_HT_CAPS 184 809 + #define WCN36XX_HAL_CFG_QOS_ENABLED 185 810 + #define WCN36XX_HAL_CFG_WME_ENABLED 186 811 + #define WCN36XX_HAL_CFG_WSM_ENABLED 187 812 + #define WCN36XX_HAL_CFG_WMM_ENABLED 188 813 + #define WCN36XX_HAL_CFG_UAPSD_PER_AC_BITMASK 189 814 + #define WCN36XX_HAL_CFG_MCS_RATES 190 815 + #define WCN36XX_HAL_CFG_VHT_CAPS 191 816 + #define WCN36XX_HAL_CFG_VHT_RX_SUPP_MCS 192 817 + #define WCN36XX_HAL_CFG_VHT_TX_SUPP_MCS 193 818 + #define WCN36XX_HAL_CFG_RA_FILTER_ENABLE 194 819 + #define WCN36XX_HAL_CFG_RA_RATE_LIMIT_INTERVAL 195 820 + #define WCN36XX_HAL_CFG_BTC_FATAL_HID_NSNIFF_BLK 196 821 + #define WCN36XX_HAL_CFG_BTC_CRITICAL_HID_NSNIFF_BLK 197 822 + #define WCN36XX_HAL_CFG_BTC_DYN_A2DP_TX_QUEUE_THOLD 198 823 + #define WCN36XX_HAL_CFG_BTC_DYN_OPP_TX_QUEUE_THOLD 199 824 + #define WCN36XX_HAL_CFG_LINK_FAIL_TIMEOUT 200 825 + #define WCN36XX_HAL_CFG_MAX_UAPSD_CONSEC_SP 201 826 + #define WCN36XX_HAL_CFG_MAX_UAPSD_CONSEC_RX_CNT 202 827 + #define WCN36XX_HAL_CFG_MAX_UAPSD_CONSEC_TX_CNT 203 828 + #define WCN36XX_HAL_CFG_MAX_UAPSD_CONSEC_RX_CNT_MEAS_WINDOW 204 829 + #define WCN36XX_HAL_CFG_MAX_UAPSD_CONSEC_TX_CNT_MEAS_WINDOW 205 830 + #define WCN36XX_HAL_CFG_MAX_PSPOLL_IN_WMM_UAPSD_PS_MODE 206 831 + #define WCN36XX_HAL_CFG_MAX_UAPSD_INACTIVITY_INTERVALS 207 832 + #define WCN36XX_HAL_CFG_ENABLE_DYNAMIC_WMMPS 208 833 + #define WCN36XX_HAL_CFG_BURST_MODE_BE_TXOP_VALUE 209 834 + #define WCN36XX_HAL_CFG_ENABLE_DYNAMIC_RA_START_RATE 210 835 + #define WCN36XX_HAL_CFG_BTC_FAST_WLAN_CONN_PREF 211 836 + #define WCN36XX_HAL_CFG_ENABLE_RTSCTS_HTVHT 212 837 + #define WCN36XX_HAL_CFG_BTC_STATIC_OPP_WLAN_IDLE_WLAN_LEN 213 838 + #define WCN36XX_HAL_CFG_BTC_STATIC_OPP_WLAN_IDLE_BT_LEN 214 839 + #define WCN36XX_HAL_CFG_LINK_FAIL_TX_CNT 215 840 + #define WCN36XX_HAL_CFG_TOGGLE_ARP_BDRATES 216 841 + #define WCN36XX_HAL_CFG_OPTIMIZE_CA_EVENT 217 842 + #define WCN36XX_HAL_CFG_EXT_SCAN_CONC_MODE 218 843 + #define WCN36XX_HAL_CFG_BAR_WAKEUP_HOST_DISABLE 219 844 + #define WCN36XX_HAL_CFG_SAR_BOFFSET_CORRECTION_ENABLE 220 845 + #define WCN36XX_HAL_CFG_UNITS_OF_BCN_WAIT_TIME 221 846 + #define WCN36XX_HAL_CFG_CONS_BCNMISS_COUNT 222 847 + #define WCN36XX_HAL_CFG_BTC_DISABLE_WLAN_LINK_CRITICAL 223 848 + #define WCN36XX_HAL_CFG_DISABLE_SCAN_DURING_SCO 224 849 + #define WCN36XX_HAL_CFG_TRIGGER_NULLFRAME_BEFORE_HB 225 850 + #define WCN36XX_HAL_CFG_ENABLE_POWERSAVE_OFFLOAD 226 851 + #define WCN36XX_HAL_CFG_MAX_PARAMS 227 730 852 731 853 /* Specify the starting bitrate, 11B and 11A/G rates can be specified in 732 854 * multiples of 0.5 So for 5.5 mbps => 11. for MCS 0 - 7 rates, Bit 7 should ··· 1714 1592 u8 reserved:4; 1715 1593 1716 1594 /* These rates are the intersection of peer and self capabilities. */ 1717 - struct wcn36xx_hal_supported_rates supported_rates; 1595 + struct wcn36xx_hal_supported_rates_v1 supported_rates; 1596 + 1597 + u8 vht_capable; 1598 + u8 vht_tx_channel_width_set; 1599 + 1718 1600 } __packed; 1601 + 1602 + #define WCN36XX_DIFF_STA_PARAMS_V1_NOVHT 10 1719 1603 1720 1604 struct wcn36xx_hal_config_sta_req_msg_v1 { 1721 1605 struct wcn36xx_hal_msg_header header; ··· 2143 2015 * "STA context" 2144 2016 */ 2145 2017 struct wcn36xx_hal_config_sta_params_v1 sta; 2018 + 2019 + u8 vht_capable; 2020 + u8 vht_tx_channel_width_set; 2021 + 2146 2022 } __packed; 2023 + 2024 + #define WCN36XX_DIFF_BSS_PARAMS_V1_NOVHT (WCN36XX_DIFF_STA_PARAMS_V1_NOVHT + 2) 2147 2025 2148 2026 struct wcn36xx_hal_config_bss_req_msg_v1 { 2149 2027 struct wcn36xx_hal_msg_header header;
+74 -25
drivers/net/wireless/ath/wcn36xx/main.c
··· 39 39 .max_power = 25, \ 40 40 } 41 41 42 - #define CHAN5G(_freq, _idx) { \ 42 + #define CHAN5G(_freq, _idx, _phy_val) { \ 43 43 .band = NL80211_BAND_5GHZ, \ 44 44 .center_freq = (_freq), \ 45 - .hw_value = (_idx), \ 45 + .hw_value = (_phy_val) << HW_VALUE_PHY_SHIFT | HW_VALUE_CHANNEL(_idx), \ 46 46 .max_power = 25, \ 47 47 } 48 48 ··· 67 67 }; 68 68 69 69 static struct ieee80211_channel wcn_5ghz_channels[] = { 70 - CHAN5G(5180, 36), 71 - CHAN5G(5200, 40), 72 - CHAN5G(5220, 44), 73 - CHAN5G(5240, 48), 74 - CHAN5G(5260, 52), 75 - CHAN5G(5280, 56), 76 - CHAN5G(5300, 60), 77 - CHAN5G(5320, 64), 78 - CHAN5G(5500, 100), 79 - CHAN5G(5520, 104), 80 - CHAN5G(5540, 108), 81 - CHAN5G(5560, 112), 82 - CHAN5G(5580, 116), 83 - CHAN5G(5600, 120), 84 - CHAN5G(5620, 124), 85 - CHAN5G(5640, 128), 86 - CHAN5G(5660, 132), 87 - CHAN5G(5700, 140), 88 - CHAN5G(5745, 149), 89 - CHAN5G(5765, 153), 90 - CHAN5G(5785, 157), 91 - CHAN5G(5805, 161), 92 - CHAN5G(5825, 165) 70 + CHAN5G(5180, 36, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW), 71 + CHAN5G(5200, 40, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW), 72 + CHAN5G(5220, 44, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH), 73 + CHAN5G(5240, 48, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH), 74 + CHAN5G(5260, 52, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW), 75 + CHAN5G(5280, 56, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW), 76 + CHAN5G(5300, 60, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH), 77 + CHAN5G(5320, 64, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH), 78 + CHAN5G(5500, 100, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW), 79 + CHAN5G(5520, 104, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW), 80 + CHAN5G(5540, 108, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH), 81 + CHAN5G(5560, 112, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH), 82 + CHAN5G(5580, 116, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW), 83 + CHAN5G(5600, 120, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW), 84 + CHAN5G(5620, 124, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH), 85 + CHAN5G(5640, 128, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH), 86 + CHAN5G(5660, 132, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW), 87 + CHAN5G(5700, 140, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH), 88 + CHAN5G(5745, 149, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW), 89 + CHAN5G(5765, 153, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW), 90 + CHAN5G(5785, 157, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH), 91 + CHAN5G(5805, 161, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH), 92 + CHAN5G(5825, 165, 0) 93 93 }; 94 94 95 95 #define RATE(_bitrate, _hw_rate, _flags) { \ ··· 766 766 sta->ht_cap.mcs.rx_mask, 767 767 sizeof(sta->ht_cap.mcs.rx_mask)); 768 768 } 769 + 770 + if (sta->vht_cap.vht_supported) { 771 + sta_priv->supported_rates.op_rate_mode = STA_11ac; 772 + sta_priv->supported_rates.vht_rx_mcs_map = 773 + sta->vht_cap.vht_mcs.rx_mcs_map; 774 + sta_priv->supported_rates.vht_tx_mcs_map = 775 + sta->vht_cap.vht_mcs.tx_mcs_map; 776 + } 769 777 } 778 + 770 779 void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates) 771 780 { 772 781 u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES] = { ··· 802 793 sizeof(*ofdm_rates) * WCN36XX_HAL_NUM_OFDM_RATES); 803 794 rates->supported_mcs_set[0] = 0xFF; 804 795 } 796 + 797 + void wcn36xx_set_default_rates_v1(struct wcn36xx_hal_supported_rates_v1 *rates) 798 + { 799 + rates->op_rate_mode = STA_11ac; 800 + rates->vht_rx_mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9; 801 + rates->vht_tx_mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9; 802 + } 803 + 805 804 static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, 806 805 struct ieee80211_vif *vif, 807 806 struct ieee80211_bss_conf *bss_conf, ··· 1201 1184 CFG80211_TESTMODE_CMD(wcn36xx_tm_cmd) 1202 1185 }; 1203 1186 1187 + static void 1188 + wcn36xx_set_ieee80211_vht_caps(struct ieee80211_sta_vht_cap *vht_cap) 1189 + { 1190 + vht_cap->vht_supported = true; 1191 + 1192 + vht_cap->cap = (IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 | 1193 + IEEE80211_VHT_CAP_SHORT_GI_80 | 1194 + IEEE80211_VHT_CAP_RXSTBC_1 | 1195 + IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 1196 + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | 1197 + 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT | 1198 + 7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT); 1199 + 1200 + vht_cap->vht_mcs.rx_mcs_map = 1201 + cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 | 1202 + IEEE80211_VHT_MCS_NOT_SUPPORTED << 2 | 1203 + IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | 1204 + IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | 1205 + IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | 1206 + IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | 1207 + IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | 1208 + IEEE80211_VHT_MCS_NOT_SUPPORTED << 14); 1209 + 1210 + vht_cap->vht_mcs.rx_highest = cpu_to_le16(433); 1211 + vht_cap->vht_mcs.tx_highest = vht_cap->vht_mcs.rx_highest; 1212 + 1213 + vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; 1214 + } 1215 + 1204 1216 static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) 1205 1217 { 1206 1218 static const u32 cipher_suites[] = { ··· 1255 1209 wcn->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wcn_band_2ghz; 1256 1210 if (wcn->rf_id != RF_IRIS_WCN3620) 1257 1211 wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz; 1212 + 1213 + if (wcn->rf_id == RF_IRIS_WCN3680) 1214 + wcn36xx_set_ieee80211_vht_caps(&wcn_band_5ghz.vht_cap); 1258 1215 1259 1216 wcn->hw->wiphy->max_scan_ssids = WCN36XX_MAX_SCAN_SSIDS; 1260 1217 wcn->hw->wiphy->max_scan_ie_len = WCN36XX_MAX_SCAN_IE_LEN;
+473 -206
drivers/net/wireless/ath/wcn36xx/smd.c
··· 80 80 WCN36XX_CFG_VAL(ENABLE_DYNAMIC_RA_START_RATE, 133), /* MCS 5 */ 81 81 }; 82 82 83 + static struct wcn36xx_cfg_val wcn3680_cfg_vals[] = { 84 + WCN36XX_CFG_VAL(CURRENT_TX_ANTENNA, 1), 85 + WCN36XX_CFG_VAL(CURRENT_RX_ANTENNA, 1), 86 + WCN36XX_CFG_VAL(LOW_GAIN_OVERRIDE, 0), 87 + WCN36XX_CFG_VAL(POWER_STATE_PER_CHAIN, 785), 88 + WCN36XX_CFG_VAL(CAL_PERIOD, 5), 89 + WCN36XX_CFG_VAL(CAL_CONTROL, 1), 90 + WCN36XX_CFG_VAL(PROXIMITY, 0), 91 + WCN36XX_CFG_VAL(NETWORK_DENSITY, 3), 92 + WCN36XX_CFG_VAL(MAX_MEDIUM_TIME, 4096), 93 + WCN36XX_CFG_VAL(MAX_MPDUS_IN_AMPDU, 64), 94 + WCN36XX_CFG_VAL(RTS_THRESHOLD, 2347), 95 + WCN36XX_CFG_VAL(SHORT_RETRY_LIMIT, 15), 96 + WCN36XX_CFG_VAL(LONG_RETRY_LIMIT, 15), 97 + WCN36XX_CFG_VAL(FRAGMENTATION_THRESHOLD, 8000), 98 + WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ZERO, 5), 99 + WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ONE, 10), 100 + WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_TWO, 15), 101 + WCN36XX_CFG_VAL(FIXED_RATE, 0), 102 + WCN36XX_CFG_VAL(RETRYRATE_POLICY, 4), 103 + WCN36XX_CFG_VAL(RETRYRATE_SECONDARY, 0), 104 + WCN36XX_CFG_VAL(RETRYRATE_TERTIARY, 0), 105 + WCN36XX_CFG_VAL(FORCE_POLICY_PROTECTION, 5), 106 + WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_24GHZ, 1), 107 + WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_5GHZ, 5), 108 + WCN36XX_CFG_VAL(DEFAULT_RATE_INDEX_24GHZ, 1), 109 + WCN36XX_CFG_VAL(DEFAULT_RATE_INDEX_5GHZ, 5), 110 + WCN36XX_CFG_VAL(MAX_BA_SESSIONS, 40), 111 + WCN36XX_CFG_VAL(PS_DATA_INACTIVITY_TIMEOUT, 200), 112 + WCN36XX_CFG_VAL(PS_ENABLE_BCN_FILTER, 1), 113 + WCN36XX_CFG_VAL(PS_ENABLE_RSSI_MONITOR, 1), 114 + WCN36XX_CFG_VAL(NUM_BEACON_PER_RSSI_AVERAGE, 20), 115 + WCN36XX_CFG_VAL(STATS_PERIOD, 10), 116 + WCN36XX_CFG_VAL(CFP_MAX_DURATION, 30000), 117 + WCN36XX_CFG_VAL(FRAME_TRANS_ENABLED, 0), 118 + WCN36XX_CFG_VAL(BA_THRESHOLD_HIGH, 128), 119 + WCN36XX_CFG_VAL(MAX_BA_BUFFERS, 2560), 120 + WCN36XX_CFG_VAL(DYNAMIC_PS_POLL_VALUE, 0), 121 + WCN36XX_CFG_VAL(TX_PWR_CTRL_ENABLE, 1), 122 + WCN36XX_CFG_VAL(ENABLE_CLOSE_LOOP, 1), 123 + WCN36XX_CFG_VAL(ENABLE_LPWR_IMG_TRANSITION, 0), 124 + WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_BT, 120000), 125 + WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_WLAN, 30000), 126 + WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10), 127 + WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0), 128 + WCN36XX_CFG_VAL(TDLS_PUAPSD_MASK, 0), 129 + WCN36XX_CFG_VAL(TDLS_PUAPSD_BUFFER_STA_CAPABLE, 1), 130 + WCN36XX_CFG_VAL(TDLS_PUAPSD_INACTIVITY_TIME, 0), 131 + WCN36XX_CFG_VAL(TDLS_PUAPSD_RX_FRAME_THRESHOLD, 10), 132 + WCN36XX_CFG_VAL(TDLS_OFF_CHANNEL_CAPABLE, 1), 133 + WCN36XX_CFG_VAL(ENABLE_ADAPTIVE_RX_DRAIN, 1), 134 + WCN36XX_CFG_VAL(FLEXCONNECT_POWER_FACTOR, 0), 135 + WCN36XX_CFG_VAL(ANTENNA_DIVERSITY, 3), 136 + WCN36XX_CFG_VAL(ATH_DISABLE, 0), 137 + WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_ACTIVE_WLAN_LEN, 60000), 138 + WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_ACTIVE_BT_LEN, 90000), 139 + WCN36XX_CFG_VAL(BTC_SAP_STATIC_OPP_ACTIVE_WLAN_LEN, 30000), 140 + WCN36XX_CFG_VAL(BTC_SAP_STATIC_OPP_ACTIVE_BT_LEN, 30000), 141 + WCN36XX_CFG_VAL(ASD_PROBE_INTERVAL, 50), 142 + WCN36XX_CFG_VAL(ASD_TRIGGER_THRESHOLD, -60), 143 + WCN36XX_CFG_VAL(ASD_RTT_RSSI_HYST_THRESHOLD, 3), 144 + WCN36XX_CFG_VAL(BTC_CTS2S_ON_STA_DURING_SCO, 0), 145 + WCN36XX_CFG_VAL(RA_FILTER_ENABLE, 0), 146 + WCN36XX_CFG_VAL(RA_RATE_LIMIT_INTERVAL, 60), 147 + WCN36XX_CFG_VAL(BTC_FATAL_HID_NSNIFF_BLK, 2), 148 + WCN36XX_CFG_VAL(BTC_CRITICAL_HID_NSNIFF_BLK, 1), 149 + WCN36XX_CFG_VAL(BTC_DYN_A2DP_TX_QUEUE_THOLD, 0), 150 + WCN36XX_CFG_VAL(BTC_DYN_OPP_TX_QUEUE_THOLD, 1), 151 + WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_SP, 10), 152 + WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_RX_CNT, 50), 153 + WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_TX_CNT, 50), 154 + WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_TX_CNT_MEAS_WINDOW, 500), 155 + WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_RX_CNT_MEAS_WINDOW, 500), 156 + WCN36XX_CFG_VAL(MAX_PSPOLL_IN_WMM_UAPSD_PS_MODE, 0), 157 + WCN36XX_CFG_VAL(MAX_UAPSD_INACTIVITY_INTERVALS, 10), 158 + WCN36XX_CFG_VAL(ENABLE_DYNAMIC_WMMPS, 1), 159 + WCN36XX_CFG_VAL(BURST_MODE_BE_TXOP_VALUE, 0), 160 + WCN36XX_CFG_VAL(ENABLE_DYNAMIC_RA_START_RATE, 136), 161 + WCN36XX_CFG_VAL(BTC_FAST_WLAN_CONN_PREF, 1), 162 + WCN36XX_CFG_VAL(ENABLE_RTSCTS_HTVHT, 0), 163 + WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_IDLE_WLAN_LEN, 30000), 164 + WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_IDLE_BT_LEN, 120000), 165 + WCN36XX_CFG_VAL(LINK_FAIL_TX_CNT, 200), 166 + WCN36XX_CFG_VAL(TOGGLE_ARP_BDRATES, 0), 167 + WCN36XX_CFG_VAL(OPTIMIZE_CA_EVENT, 0), 168 + WCN36XX_CFG_VAL(EXT_SCAN_CONC_MODE, 0), 169 + WCN36XX_CFG_VAL(BAR_WAKEUP_HOST_DISABLE, 0), 170 + WCN36XX_CFG_VAL(SAR_BOFFSET_CORRECTION_ENABLE, 0), 171 + WCN36XX_CFG_VAL(BTC_DISABLE_WLAN_LINK_CRITICAL, 5), 172 + WCN36XX_CFG_VAL(DISABLE_SCAN_DURING_SCO, 2), 173 + WCN36XX_CFG_VAL(CONS_BCNMISS_COUNT, 0), 174 + WCN36XX_CFG_VAL(UNITS_OF_BCN_WAIT_TIME, 0), 175 + WCN36XX_CFG_VAL(TRIGGER_NULLFRAME_BEFORE_HB, 0), 176 + WCN36XX_CFG_VAL(ENABLE_POWERSAVE_OFFLOAD, 0), 177 + }; 178 + 83 179 static int put_cfg_tlv_u32(struct wcn36xx *wcn, size_t *len, u32 id, u32 value) 84 180 { 85 181 struct wcn36xx_hal_cfg *entry; ··· 218 122 { 219 123 return caps & flag ? 1 : 0; 220 124 } 125 + 221 126 static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif, 222 127 struct ieee80211_sta *sta, 223 128 struct wcn36xx_hal_config_bss_params *bss_params) ··· 241 144 /* IEEE80211_HT_OP_MODE_PROTECTION_20MHZ */ 242 145 bss_params->ht20_coexist = 0; 243 146 } 147 + } 148 + 149 + static void 150 + wcn36xx_smd_set_bss_vht_params(struct ieee80211_vif *vif, 151 + struct ieee80211_sta *sta, 152 + struct wcn36xx_hal_config_bss_params_v1 *bss) 153 + { 154 + if (sta && sta->vht_cap.vht_supported) 155 + bss->vht_capable = 1; 244 156 } 245 157 246 158 static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta, ··· 280 174 } 281 175 } 282 176 177 + static void wcn36xx_smd_set_sta_vht_params(struct wcn36xx *wcn, 178 + struct ieee80211_sta *sta, 179 + struct wcn36xx_hal_config_sta_params_v1 *sta_params) 180 + { 181 + if (sta->vht_cap.vht_supported) { 182 + unsigned long caps = sta->vht_cap.cap; 183 + 184 + sta_params->vht_capable = sta->vht_cap.vht_supported; 185 + sta_params->vht_ldpc_enabled = 186 + is_cap_supported(caps, IEEE80211_VHT_CAP_RXLDPC); 187 + if (get_feat_caps(wcn->fw_feat_caps, MU_MIMO)) { 188 + sta_params->vht_tx_mu_beamformee_capable = 189 + is_cap_supported(caps, IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE); 190 + if (sta_params->vht_tx_mu_beamformee_capable) 191 + sta_params->vht_tx_bf_enabled = 1; 192 + } else { 193 + sta_params->vht_tx_mu_beamformee_capable = 0; 194 + } 195 + sta_params->vht_tx_channel_width_set = 0; 196 + } 197 + } 198 + 199 + static void wcn36xx_smd_set_sta_ht_ldpc_params(struct ieee80211_sta *sta, 200 + struct wcn36xx_hal_config_sta_params_v1 *sta_params) 201 + { 202 + if (sta->ht_cap.ht_supported) { 203 + sta_params->ht_ldpc_enabled = 204 + is_cap_supported(sta->ht_cap.cap, IEEE80211_HT_CAP_LDPC_CODING); 205 + } 206 + } 207 + 283 208 static void wcn36xx_smd_set_sta_default_ht_params( 284 209 struct wcn36xx_hal_config_sta_params *sta_params) 285 210 { ··· 325 188 sta_params->green_field_capable = 1; 326 189 sta_params->delayed_ba_support = 0; 327 190 sta_params->dsss_cck_mode_40mhz = 1; 191 + } 192 + 193 + static void wcn36xx_smd_set_sta_default_vht_params(struct wcn36xx *wcn, 194 + struct wcn36xx_hal_config_sta_params_v1 *sta_params) 195 + { 196 + if (wcn->rf_id == RF_IRIS_WCN3680) { 197 + sta_params->vht_capable = 1; 198 + sta_params->vht_tx_mu_beamformee_capable = 1; 199 + } else { 200 + sta_params->vht_capable = 0; 201 + sta_params->vht_tx_mu_beamformee_capable = 0; 202 + } 203 + 204 + sta_params->vht_ldpc_enabled = 0; 205 + sta_params->vht_tx_channel_width_set = 0; 206 + sta_params->vht_tx_bf_enabled = 0; 207 + } 208 + 209 + static void wcn36xx_smd_set_sta_default_ht_ldpc_params(struct wcn36xx *wcn, 210 + struct wcn36xx_hal_config_sta_params_v1 *sta_params) 211 + { 212 + if (wcn->rf_id == RF_IRIS_WCN3680) 213 + sta_params->ht_ldpc_enabled = 1; 214 + else 215 + sta_params->ht_ldpc_enabled = 0; 328 216 } 329 217 330 218 static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn, ··· 404 242 sta_params->aid = sta_priv->aid; 405 243 wcn36xx_smd_set_sta_ht_params(sta, sta_params); 406 244 memcpy(&sta_params->supported_rates, &sta_priv->supported_rates, 407 - sizeof(sta_priv->supported_rates)); 245 + sizeof(struct wcn36xx_hal_supported_rates)); 408 246 } else { 409 - wcn36xx_set_default_rates(&sta_params->supported_rates); 247 + wcn36xx_set_default_rates((struct wcn36xx_hal_supported_rates *) 248 + &sta_params->supported_rates); 410 249 wcn36xx_smd_set_sta_default_ht_params(sta_params); 411 250 } 412 251 } ··· 454 291 hdr->len = msg_size + sizeof(*hdr); 455 292 } 456 293 457 - #define INIT_HAL_MSG(msg_body, type) \ 294 + #define __INIT_HAL_MSG(msg_body, type, version) \ 458 295 do { \ 459 296 memset(&msg_body, 0, sizeof(msg_body)); \ 460 297 msg_body.header.msg_type = type; \ 461 - msg_body.header.msg_version = WCN36XX_HAL_MSG_VERSION0; \ 298 + msg_body.header.msg_version = version; \ 462 299 msg_body.header.len = sizeof(msg_body); \ 463 300 } while (0) \ 301 + 302 + #define INIT_HAL_MSG(msg_body, type) \ 303 + __INIT_HAL_MSG(msg_body, type, WCN36XX_HAL_MSG_VERSION0) 304 + 305 + #define INIT_HAL_MSG_V1(msg_body, type) \ 306 + __INIT_HAL_MSG(msg_body, type, WCN36XX_HAL_MSG_VERSION1) 464 307 465 308 #define INIT_HAL_PTT_MSG(p_msg_body, ppt_msg_len) \ 466 309 do { \ ··· 619 450 int ret; 620 451 int i; 621 452 size_t len; 453 + int cfg_elements; 454 + static struct wcn36xx_cfg_val *cfg_vals; 622 455 623 456 mutex_lock(&wcn->hal_mutex); 624 457 INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_REQ); ··· 633 462 body = (struct wcn36xx_hal_mac_start_req_msg *)wcn->hal_buf; 634 463 len = body->header.len; 635 464 636 - for (i = 0; i < ARRAY_SIZE(wcn36xx_cfg_vals); i++) { 637 - ret = put_cfg_tlv_u32(wcn, &len, wcn36xx_cfg_vals[i].cfg_id, 638 - wcn36xx_cfg_vals[i].value); 465 + if (wcn->rf_id == RF_IRIS_WCN3680) { 466 + cfg_vals = wcn3680_cfg_vals; 467 + cfg_elements = ARRAY_SIZE(wcn3680_cfg_vals); 468 + } else { 469 + cfg_vals = wcn36xx_cfg_vals; 470 + cfg_elements = ARRAY_SIZE(wcn36xx_cfg_vals); 471 + } 472 + 473 + for (i = 0; i < cfg_elements; i++) { 474 + ret = put_cfg_tlv_u32(wcn, &len, cfg_vals[i].cfg_id, 475 + cfg_vals[i].value); 639 476 if (ret) 640 477 goto out; 641 478 } ··· 873 694 874 695 msg_body->num_channel = min_t(u8, req->n_channels, 875 696 sizeof(msg_body->channels)); 876 - for (i = 0; i < msg_body->num_channel; i++) 877 - msg_body->channels[i] = req->channels[i]->hw_value; 697 + for (i = 0; i < msg_body->num_channel; i++) { 698 + msg_body->channels[i] = 699 + HW_VALUE_CHANNEL(req->channels[i]->hw_value); 700 + } 878 701 879 702 msg_body->header.len -= WCN36XX_MAX_SCAN_IE_LEN; 880 703 ··· 1364 1183 v1->p2p = orig->p2p; 1365 1184 } 1366 1185 1186 + static void 1187 + wcn36xx_smd_set_sta_params_v1(struct wcn36xx *wcn, 1188 + struct ieee80211_vif *vif, 1189 + struct ieee80211_sta *sta, 1190 + struct wcn36xx_hal_config_sta_params_v1 *sta_par) 1191 + { 1192 + struct wcn36xx_sta *sta_priv = NULL; 1193 + struct wcn36xx_hal_config_sta_params sta_par_v0; 1194 + 1195 + wcn36xx_smd_set_sta_params(wcn, vif, sta, &sta_par_v0); 1196 + wcn36xx_smd_convert_sta_to_v1(wcn, &sta_par_v0, sta_par); 1197 + 1198 + if (sta) { 1199 + sta_priv = wcn36xx_sta_to_priv(sta); 1200 + wcn36xx_smd_set_sta_vht_params(wcn, sta, sta_par); 1201 + wcn36xx_smd_set_sta_ht_ldpc_params(sta, sta_par); 1202 + memcpy(&sta_par->supported_rates, &sta_priv->supported_rates, 1203 + sizeof(sta_par->supported_rates)); 1204 + } else { 1205 + wcn36xx_set_default_rates_v1(&sta_par->supported_rates); 1206 + wcn36xx_smd_set_sta_default_vht_params(wcn, sta_par); 1207 + wcn36xx_smd_set_sta_default_ht_ldpc_params(wcn, sta_par); 1208 + } 1209 + } 1210 + 1367 1211 static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn, 1368 1212 struct ieee80211_sta *sta, 1369 1213 void *buf, ··· 1423 1217 } 1424 1218 1425 1219 static int wcn36xx_smd_config_sta_v1(struct wcn36xx *wcn, 1426 - const struct wcn36xx_hal_config_sta_req_msg *orig) 1220 + struct ieee80211_vif *vif, 1221 + struct ieee80211_sta *sta) 1427 1222 { 1428 1223 struct wcn36xx_hal_config_sta_req_msg_v1 msg_body; 1429 - struct wcn36xx_hal_config_sta_params_v1 *sta = &msg_body.sta_params; 1224 + struct wcn36xx_hal_config_sta_params_v1 *sta_params; 1430 1225 1431 - INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_STA_REQ); 1226 + if (wcn->rf_id == RF_IRIS_WCN3680) { 1227 + INIT_HAL_MSG_V1(msg_body, WCN36XX_HAL_CONFIG_STA_REQ); 1228 + } else { 1229 + INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_STA_REQ); 1230 + msg_body.header.len -= WCN36XX_DIFF_STA_PARAMS_V1_NOVHT; 1231 + } 1432 1232 1433 - wcn36xx_smd_convert_sta_to_v1(wcn, &orig->sta_params, 1434 - &msg_body.sta_params); 1233 + sta_params = &msg_body.sta_params; 1234 + 1235 + wcn36xx_smd_set_sta_params_v1(wcn, vif, sta, sta_params); 1435 1236 1436 1237 PREPARE_HAL_BUF(wcn->hal_buf, msg_body); 1437 1238 1438 1239 wcn36xx_dbg(WCN36XX_DBG_HAL, 1439 1240 "hal config sta v1 action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n", 1440 - sta->action, sta->sta_index, sta->bssid_index, 1441 - sta->bssid, sta->type, sta->mac, sta->aid); 1241 + sta_params->action, sta_params->sta_index, sta_params->bssid_index, 1242 + sta_params->bssid, sta_params->type, sta_params->mac, sta_params->aid); 1442 1243 1443 1244 return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len); 1444 1245 } 1445 1246 1446 - int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif, 1447 - struct ieee80211_sta *sta) 1247 + static int wcn36xx_smd_config_sta_v0(struct wcn36xx *wcn, 1248 + struct ieee80211_vif *vif, 1249 + struct ieee80211_sta *sta) 1448 1250 { 1449 1251 struct wcn36xx_hal_config_sta_req_msg msg; 1450 1252 struct wcn36xx_hal_config_sta_params *sta_params; 1451 - int ret; 1452 1253 1453 - mutex_lock(&wcn->hal_mutex); 1454 1254 INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ); 1455 1255 1456 1256 sta_params = &msg.sta_params; 1457 1257 1458 1258 wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params); 1459 1259 1460 - if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) { 1461 - ret = wcn36xx_smd_config_sta_v1(wcn, &msg); 1462 - } else { 1463 - PREPARE_HAL_BUF(wcn->hal_buf, msg); 1260 + PREPARE_HAL_BUF(wcn->hal_buf, msg); 1464 1261 1465 - wcn36xx_dbg(WCN36XX_DBG_HAL, 1466 - "hal config sta action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n", 1467 - sta_params->action, sta_params->sta_index, 1468 - sta_params->bssid_index, sta_params->bssid, 1469 - sta_params->type, sta_params->mac, sta_params->aid); 1262 + wcn36xx_dbg(WCN36XX_DBG_HAL, 1263 + "hal config sta action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n", 1264 + sta_params->action, sta_params->sta_index, 1265 + sta_params->bssid_index, sta_params->bssid, 1266 + sta_params->type, sta_params->mac, sta_params->aid); 1470 1267 1471 - ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len); 1472 - } 1268 + return wcn36xx_smd_send_and_wait(wcn, msg.header.len); 1269 + } 1270 + 1271 + int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif, 1272 + struct ieee80211_sta *sta) 1273 + { 1274 + int ret; 1275 + 1276 + mutex_lock(&wcn->hal_mutex); 1277 + 1278 + if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) 1279 + ret = wcn36xx_smd_config_sta_v1(wcn, vif, sta); 1280 + else 1281 + ret = wcn36xx_smd_config_sta_v0(wcn, vif, sta); 1282 + 1473 1283 if (ret) { 1474 1284 wcn36xx_err("Sending hal_config_sta failed\n"); 1475 1285 goto out; ··· 1503 1281 return ret; 1504 1282 } 1505 1283 1506 - static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn, 1507 - const struct wcn36xx_hal_config_bss_req_msg *orig) 1284 + static void wcn36xx_smd_set_bss_params(struct wcn36xx *wcn, 1285 + struct ieee80211_vif *vif, 1286 + struct ieee80211_sta *sta, 1287 + const u8 *bssid, 1288 + bool update, 1289 + struct wcn36xx_hal_config_bss_params *bss) 1508 1290 { 1509 - struct wcn36xx_hal_config_bss_req_msg_v1 *msg_body; 1510 - struct wcn36xx_hal_config_bss_params_v1 *bss; 1511 - struct wcn36xx_hal_config_sta_params_v1 *sta; 1512 - int ret; 1513 - 1514 - msg_body = kzalloc(sizeof(*msg_body), GFP_KERNEL); 1515 - if (!msg_body) 1516 - return -ENOMEM; 1517 - 1518 - INIT_HAL_MSG((*msg_body), WCN36XX_HAL_CONFIG_BSS_REQ); 1519 - 1520 - bss = &msg_body->bss_params; 1521 - sta = &bss->sta; 1522 - 1523 - /* convert orig to v1 */ 1524 - memcpy(bss->bssid, &orig->bss_params.bssid, ETH_ALEN); 1525 - memcpy(bss->self_mac_addr, &orig->bss_params.self_mac_addr, ETH_ALEN); 1526 - 1527 - bss->bss_type = orig->bss_params.bss_type; 1528 - bss->oper_mode = orig->bss_params.oper_mode; 1529 - bss->nw_type = orig->bss_params.nw_type; 1530 - 1531 - bss->short_slot_time_supported = 1532 - orig->bss_params.short_slot_time_supported; 1533 - bss->lla_coexist = orig->bss_params.lla_coexist; 1534 - bss->llb_coexist = orig->bss_params.llb_coexist; 1535 - bss->llg_coexist = orig->bss_params.llg_coexist; 1536 - bss->ht20_coexist = orig->bss_params.ht20_coexist; 1537 - bss->lln_non_gf_coexist = orig->bss_params.lln_non_gf_coexist; 1538 - 1539 - bss->lsig_tx_op_protection_full_support = 1540 - orig->bss_params.lsig_tx_op_protection_full_support; 1541 - bss->rifs_mode = orig->bss_params.rifs_mode; 1542 - bss->beacon_interval = orig->bss_params.beacon_interval; 1543 - bss->dtim_period = orig->bss_params.dtim_period; 1544 - bss->tx_channel_width_set = orig->bss_params.tx_channel_width_set; 1545 - bss->oper_channel = orig->bss_params.oper_channel; 1546 - bss->ext_channel = orig->bss_params.ext_channel; 1547 - 1548 - bss->reserved = orig->bss_params.reserved; 1549 - 1550 - memcpy(&bss->ssid, &orig->bss_params.ssid, 1551 - sizeof(orig->bss_params.ssid)); 1552 - 1553 - bss->action = orig->bss_params.action; 1554 - bss->rateset = orig->bss_params.rateset; 1555 - bss->ht = orig->bss_params.ht; 1556 - bss->obss_prot_enabled = orig->bss_params.obss_prot_enabled; 1557 - bss->rmf = orig->bss_params.rmf; 1558 - bss->ht_oper_mode = orig->bss_params.ht_oper_mode; 1559 - bss->dual_cts_protection = orig->bss_params.dual_cts_protection; 1560 - 1561 - bss->max_probe_resp_retry_limit = 1562 - orig->bss_params.max_probe_resp_retry_limit; 1563 - bss->hidden_ssid = orig->bss_params.hidden_ssid; 1564 - bss->proxy_probe_resp = orig->bss_params.proxy_probe_resp; 1565 - bss->edca_params_valid = orig->bss_params.edca_params_valid; 1566 - 1567 - memcpy(&bss->acbe, &orig->bss_params.acbe, 1568 - sizeof(orig->bss_params.acbe)); 1569 - memcpy(&bss->acbk, &orig->bss_params.acbk, 1570 - sizeof(orig->bss_params.acbk)); 1571 - memcpy(&bss->acvi, &orig->bss_params.acvi, 1572 - sizeof(orig->bss_params.acvi)); 1573 - memcpy(&bss->acvo, &orig->bss_params.acvo, 1574 - sizeof(orig->bss_params.acvo)); 1575 - 1576 - bss->ext_set_sta_key_param_valid = 1577 - orig->bss_params.ext_set_sta_key_param_valid; 1578 - 1579 - memcpy(&bss->ext_set_sta_key_param, 1580 - &orig->bss_params.ext_set_sta_key_param, 1581 - sizeof(orig->bss_params.acvo)); 1582 - 1583 - bss->wcn36xx_hal_persona = orig->bss_params.wcn36xx_hal_persona; 1584 - bss->spectrum_mgt_enable = orig->bss_params.spectrum_mgt_enable; 1585 - bss->tx_mgmt_power = orig->bss_params.tx_mgmt_power; 1586 - bss->max_tx_power = orig->bss_params.max_tx_power; 1587 - 1588 - wcn36xx_smd_convert_sta_to_v1(wcn, &orig->bss_params.sta, sta); 1589 - 1590 - PREPARE_HAL_BUF(wcn->hal_buf, (*msg_body)); 1591 - 1592 - wcn36xx_dbg(WCN36XX_DBG_HAL, 1593 - "hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n", 1594 - bss->bssid, bss->self_mac_addr, bss->bss_type, 1595 - bss->oper_mode, bss->nw_type); 1596 - 1597 - wcn36xx_dbg(WCN36XX_DBG_HAL, 1598 - "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n", 1599 - sta->bssid, sta->action, sta->sta_index, 1600 - sta->bssid_index, sta->aid, sta->type, sta->mac); 1601 - 1602 - ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len); 1603 - kfree(msg_body); 1604 - 1605 - return ret; 1606 - } 1607 - 1608 - 1609 - static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn, 1610 - struct ieee80211_vif *vif, 1611 - struct ieee80211_sta *sta, 1612 - void *buf, 1613 - size_t len) 1614 - { 1615 - struct wcn36xx_hal_config_bss_rsp_msg *rsp; 1616 - struct wcn36xx_hal_config_bss_rsp_params *params; 1617 1291 struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); 1618 - 1619 - if (len < sizeof(*rsp)) 1620 - return -EINVAL; 1621 - 1622 - rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf; 1623 - params = &rsp->bss_rsp_params; 1624 - 1625 - if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) { 1626 - wcn36xx_warn("hal config bss response failure: %d\n", 1627 - params->status); 1628 - return -EIO; 1629 - } 1630 - 1631 - wcn36xx_dbg(WCN36XX_DBG_HAL, 1632 - "hal config bss rsp status %d bss_idx %d dpu_desc_index %d" 1633 - " sta_idx %d self_idx %d bcast_idx %d mac %pM" 1634 - " power %d ucast_dpu_signature %d\n", 1635 - params->status, params->bss_index, params->dpu_desc_index, 1636 - params->bss_sta_index, params->bss_self_sta_index, 1637 - params->bss_bcast_sta_idx, params->mac, 1638 - params->tx_mgmt_power, params->ucast_dpu_signature); 1639 - 1640 - vif_priv->bss_index = params->bss_index; 1641 - 1642 - if (sta) { 1643 - struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta); 1644 - sta_priv->bss_sta_index = params->bss_sta_index; 1645 - sta_priv->bss_dpu_desc_index = params->dpu_desc_index; 1646 - } 1647 - 1648 - vif_priv->self_ucast_dpu_sign = params->ucast_dpu_signature; 1649 - 1650 - return 0; 1651 - } 1652 - 1653 - int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif, 1654 - struct ieee80211_sta *sta, const u8 *bssid, 1655 - bool update) 1656 - { 1657 - struct wcn36xx_hal_config_bss_req_msg *msg; 1658 - struct wcn36xx_hal_config_bss_params *bss; 1659 - struct wcn36xx_hal_config_sta_params *sta_params; 1660 - struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); 1661 - int ret; 1662 - 1663 - mutex_lock(&wcn->hal_mutex); 1664 - msg = kzalloc(sizeof(*msg), GFP_KERNEL); 1665 - if (!msg) { 1666 - ret = -ENOMEM; 1667 - goto out; 1668 - } 1669 - INIT_HAL_MSG((*msg), WCN36XX_HAL_CONFIG_BSS_REQ); 1670 - 1671 - bss = &msg->bss_params; 1672 - sta_params = &bss->sta; 1673 1292 1674 1293 WARN_ON(is_zero_ether_addr(bssid)); 1675 1294 ··· 1565 1502 bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_NONE; 1566 1503 1567 1504 bss->reserved = 0; 1568 - wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params); 1569 1505 1570 1506 /* wcn->ssid is only valid in AP and IBSS mode */ 1571 1507 bss->ssid.length = vif_priv->ssid.length; ··· 1589 1527 bss->action = update; 1590 1528 1591 1529 vif_priv->bss_type = bss->bss_type; 1530 + } 1531 + 1532 + static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn, 1533 + struct ieee80211_vif *vif, 1534 + struct ieee80211_sta *sta_80211, 1535 + const u8 *bssid, 1536 + bool update) 1537 + { 1538 + struct wcn36xx_hal_config_bss_req_msg_v1 *msg_body; 1539 + struct wcn36xx_hal_config_bss_params_v1 *bss; 1540 + struct wcn36xx_hal_config_bss_params bss_v0; 1541 + struct wcn36xx_hal_config_sta_params_v1 *sta; 1542 + struct cfg80211_chan_def *chandef; 1543 + int ret; 1544 + 1545 + msg_body = kzalloc(sizeof(*msg_body), GFP_KERNEL); 1546 + if (!msg_body) 1547 + return -ENOMEM; 1548 + 1549 + if (wcn->rf_id == RF_IRIS_WCN3680) { 1550 + INIT_HAL_MSG_V1((*msg_body), WCN36XX_HAL_CONFIG_BSS_REQ); 1551 + } else { 1552 + INIT_HAL_MSG((*msg_body), WCN36XX_HAL_CONFIG_BSS_REQ); 1553 + msg_body->header.len -= WCN36XX_DIFF_BSS_PARAMS_V1_NOVHT; 1554 + } 1555 + 1556 + bss = &msg_body->bss_params; 1557 + sta = &bss->sta; 1558 + 1559 + memset(&bss_v0, 0x00, sizeof(bss_v0)); 1560 + wcn36xx_smd_set_bss_params(wcn, vif, sta_80211, bssid, update, &bss_v0); 1561 + wcn36xx_smd_set_sta_params_v1(wcn, vif, sta_80211, sta); 1562 + 1563 + /* convert orig to v1 */ 1564 + memcpy(bss->bssid, &bss_v0.bssid, ETH_ALEN); 1565 + memcpy(bss->self_mac_addr, &bss_v0.self_mac_addr, ETH_ALEN); 1566 + 1567 + bss->bss_type = bss_v0.bss_type; 1568 + bss->oper_mode = bss_v0.oper_mode; 1569 + bss->nw_type = bss_v0.nw_type; 1570 + 1571 + bss->short_slot_time_supported = 1572 + bss_v0.short_slot_time_supported; 1573 + bss->lla_coexist = bss_v0.lla_coexist; 1574 + bss->llb_coexist = bss_v0.llb_coexist; 1575 + bss->llg_coexist = bss_v0.llg_coexist; 1576 + bss->ht20_coexist = bss_v0.ht20_coexist; 1577 + bss->lln_non_gf_coexist = bss_v0.lln_non_gf_coexist; 1578 + 1579 + bss->lsig_tx_op_protection_full_support = 1580 + bss_v0.lsig_tx_op_protection_full_support; 1581 + bss->rifs_mode = bss_v0.rifs_mode; 1582 + bss->beacon_interval = bss_v0.beacon_interval; 1583 + bss->dtim_period = bss_v0.dtim_period; 1584 + bss->tx_channel_width_set = bss_v0.tx_channel_width_set; 1585 + bss->oper_channel = bss_v0.oper_channel; 1586 + 1587 + if (wcn->hw->conf.chandef.width == NL80211_CHAN_WIDTH_80) { 1588 + chandef = &wcn->hw->conf.chandef; 1589 + bss->ext_channel = HW_VALUE_PHY(chandef->chan->hw_value); 1590 + } else { 1591 + bss->ext_channel = bss_v0.ext_channel; 1592 + } 1593 + 1594 + bss->reserved = bss_v0.reserved; 1595 + 1596 + memcpy(&bss->ssid, &bss_v0.ssid, 1597 + sizeof(bss_v0.ssid)); 1598 + 1599 + bss->action = bss_v0.action; 1600 + bss->rateset = bss_v0.rateset; 1601 + bss->ht = bss_v0.ht; 1602 + bss->obss_prot_enabled = bss_v0.obss_prot_enabled; 1603 + bss->rmf = bss_v0.rmf; 1604 + bss->ht_oper_mode = bss_v0.ht_oper_mode; 1605 + bss->dual_cts_protection = bss_v0.dual_cts_protection; 1606 + 1607 + bss->max_probe_resp_retry_limit = 1608 + bss_v0.max_probe_resp_retry_limit; 1609 + bss->hidden_ssid = bss_v0.hidden_ssid; 1610 + bss->proxy_probe_resp = bss_v0.proxy_probe_resp; 1611 + bss->edca_params_valid = bss_v0.edca_params_valid; 1612 + 1613 + memcpy(&bss->acbe, &bss_v0.acbe, 1614 + sizeof(bss_v0.acbe)); 1615 + memcpy(&bss->acbk, &bss_v0.acbk, 1616 + sizeof(bss_v0.acbk)); 1617 + memcpy(&bss->acvi, &bss_v0.acvi, 1618 + sizeof(bss_v0.acvi)); 1619 + memcpy(&bss->acvo, &bss_v0.acvo, 1620 + sizeof(bss_v0.acvo)); 1621 + 1622 + bss->ext_set_sta_key_param_valid = 1623 + bss_v0.ext_set_sta_key_param_valid; 1624 + 1625 + memcpy(&bss->ext_set_sta_key_param, 1626 + &bss_v0.ext_set_sta_key_param, 1627 + sizeof(bss_v0.acvo)); 1628 + 1629 + bss->wcn36xx_hal_persona = bss_v0.wcn36xx_hal_persona; 1630 + bss->spectrum_mgt_enable = bss_v0.spectrum_mgt_enable; 1631 + bss->tx_mgmt_power = bss_v0.tx_mgmt_power; 1632 + bss->max_tx_power = bss_v0.max_tx_power; 1633 + 1634 + wcn36xx_smd_set_bss_vht_params(vif, sta_80211, bss); 1635 + 1636 + PREPARE_HAL_BUF(wcn->hal_buf, (*msg_body)); 1637 + 1638 + wcn36xx_dbg(WCN36XX_DBG_HAL, 1639 + "hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n", 1640 + bss->bssid, bss->self_mac_addr, bss->bss_type, 1641 + bss->oper_mode, bss->nw_type); 1642 + 1643 + wcn36xx_dbg(WCN36XX_DBG_HAL, 1644 + "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n", 1645 + sta->bssid, sta->action, sta->sta_index, 1646 + sta->bssid_index, sta->aid, sta->type, sta->mac); 1647 + 1648 + ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len); 1649 + kfree(msg_body); 1650 + 1651 + return ret; 1652 + } 1653 + 1654 + static int wcn36xx_smd_config_bss_v0(struct wcn36xx *wcn, 1655 + struct ieee80211_vif *vif, 1656 + struct ieee80211_sta *sta, 1657 + const u8 *bssid, 1658 + bool update) 1659 + { 1660 + struct wcn36xx_hal_config_bss_req_msg *msg; 1661 + struct wcn36xx_hal_config_bss_params *bss; 1662 + struct wcn36xx_hal_config_sta_params *sta_params; 1663 + int ret; 1664 + 1665 + msg = kzalloc(sizeof(*msg), GFP_KERNEL); 1666 + if (!msg) 1667 + return -ENOMEM; 1668 + 1669 + INIT_HAL_MSG((*msg), WCN36XX_HAL_CONFIG_BSS_REQ); 1670 + 1671 + bss = &msg->bss_params; 1672 + sta_params = &bss->sta; 1673 + 1674 + wcn36xx_smd_set_bss_params(wcn, vif, sta, bssid, update, bss); 1675 + wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params); 1676 + 1677 + PREPARE_HAL_BUF(wcn->hal_buf, (*msg)); 1592 1678 1593 1679 wcn36xx_dbg(WCN36XX_DBG_HAL, 1594 1680 "hal config bss bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n", ··· 1750 1540 sta_params->aid, sta_params->type, 1751 1541 sta_params->mac); 1752 1542 1753 - if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) { 1754 - ret = wcn36xx_smd_config_bss_v1(wcn, msg); 1755 - } else { 1756 - PREPARE_HAL_BUF(wcn->hal_buf, (*msg)); 1543 + ret = wcn36xx_smd_send_and_wait(wcn, msg->header.len); 1544 + kfree(msg); 1757 1545 1758 - ret = wcn36xx_smd_send_and_wait(wcn, msg->header.len); 1546 + return ret; 1547 + } 1548 + 1549 + static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn, 1550 + struct ieee80211_vif *vif, 1551 + struct ieee80211_sta *sta, 1552 + void *buf, 1553 + size_t len) 1554 + { 1555 + struct wcn36xx_hal_config_bss_rsp_msg *rsp; 1556 + struct wcn36xx_hal_config_bss_rsp_params *params; 1557 + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); 1558 + 1559 + if (len < sizeof(*rsp)) 1560 + return -EINVAL; 1561 + 1562 + rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf; 1563 + params = &rsp->bss_rsp_params; 1564 + 1565 + if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) { 1566 + wcn36xx_warn("hal config bss response failure: %d\n", 1567 + params->status); 1568 + return -EIO; 1759 1569 } 1570 + 1571 + wcn36xx_dbg(WCN36XX_DBG_HAL, 1572 + "hal config bss rsp status %d bss_idx %d dpu_desc_index %d" 1573 + " sta_idx %d self_idx %d bcast_idx %d mac %pM" 1574 + " power %d ucast_dpu_signature %d\n", 1575 + params->status, params->bss_index, params->dpu_desc_index, 1576 + params->bss_sta_index, params->bss_self_sta_index, 1577 + params->bss_bcast_sta_idx, params->mac, 1578 + params->tx_mgmt_power, params->ucast_dpu_signature); 1579 + 1580 + vif_priv->bss_index = params->bss_index; 1581 + 1582 + if (sta) { 1583 + struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta); 1584 + sta_priv->bss_sta_index = params->bss_sta_index; 1585 + sta_priv->bss_dpu_desc_index = params->dpu_desc_index; 1586 + } 1587 + 1588 + vif_priv->self_ucast_dpu_sign = params->ucast_dpu_signature; 1589 + 1590 + return 0; 1591 + } 1592 + 1593 + int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif, 1594 + struct ieee80211_sta *sta, const u8 *bssid, 1595 + bool update) 1596 + { 1597 + int ret; 1598 + 1599 + mutex_lock(&wcn->hal_mutex); 1600 + 1601 + if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) 1602 + ret = wcn36xx_smd_config_bss_v1(wcn, vif, sta, bssid, update); 1603 + else 1604 + ret = wcn36xx_smd_config_bss_v0(wcn, vif, sta, bssid, update); 1605 + 1760 1606 if (ret) { 1761 1607 wcn36xx_err("Sending hal_config_bss failed\n"); 1762 1608 goto out; ··· 1822 1556 sta, 1823 1557 wcn->hal_buf, 1824 1558 wcn->hal_rsp_len); 1825 - if (ret) { 1559 + if (ret) 1826 1560 wcn36xx_err("hal_config_bss response failed err=%d\n", ret); 1827 - goto out; 1828 - } 1561 + 1829 1562 out: 1830 - kfree(msg); 1831 1563 mutex_unlock(&wcn->hal_mutex); 1832 1564 return ret; 1833 1565 } ··· 2192 1928 mutex_unlock(&wcn->hal_mutex); 2193 1929 return ret; 2194 1930 } 1931 + 2195 1932 int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim) 2196 1933 { 2197 1934 struct wcn36xx_hal_set_power_params_req_msg msg_body; ··· 2222 1957 mutex_unlock(&wcn->hal_mutex); 2223 1958 return ret; 2224 1959 } 1960 + 2225 1961 /* Notice: This function should be called after associated, or else it 2226 1962 * will be invalid 2227 1963 */ ··· 2902 2636 kfree(hal_ind_msg); 2903 2637 } 2904 2638 } 2639 + 2905 2640 int wcn36xx_smd_open(struct wcn36xx *wcn) 2906 2641 { 2907 2642 wcn->hal_ind_wq = create_freezable_workqueue("wcn36xx_smd_ind");
+7 -2
drivers/net/wireless/ath/wcn36xx/wcn36xx.h
··· 83 83 WCN36XX_AMPDU_OPERATIONAL, 84 84 }; 85 85 86 - #define WCN36XX_HW_CHANNEL(__wcn) (__wcn->hw->conf.chandef.chan->hw_value) 86 + #define HW_VALUE_PHY_SHIFT 8 87 + #define HW_VALUE_PHY(hw_value) ((hw_value) >> HW_VALUE_PHY_SHIFT) 88 + #define HW_VALUE_CHANNEL(hw_value) ((hw_value) & 0xFF) 89 + #define WCN36XX_HW_CHANNEL(__wcn)\ 90 + HW_VALUE_CHANNEL(__wcn->hw->conf.chandef.chan->hw_value) 87 91 #define WCN36XX_BAND(__wcn) (__wcn->hw->conf.chandef.chan->band) 88 92 #define WCN36XX_CENTER_FREQ(__wcn) (__wcn->hw->conf.chandef.chan->center_freq) 89 93 #define WCN36XX_LISTEN_INTERVAL(__wcn) (__wcn->hw->conf.listen_interval) ··· 173 169 u8 bss_dpu_desc_index; 174 170 bool is_data_encrypted; 175 171 /* Rates */ 176 - struct wcn36xx_hal_supported_rates supported_rates; 172 + struct wcn36xx_hal_supported_rates_v1 supported_rates; 177 173 178 174 spinlock_t ampdu_lock; /* protects next two fields */ 179 175 enum wcn36xx_ampdu_state ampdu_state[16]; ··· 275 271 wcn->fw_revision == revision); 276 272 } 277 273 void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates); 274 + void wcn36xx_set_default_rates_v1(struct wcn36xx_hal_supported_rates_v1 *rates); 278 275 279 276 static inline 280 277 struct ieee80211_sta *wcn36xx_priv_to_sta(struct wcn36xx_sta *sta_priv)
+46 -3
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
··· 56 56 #define RSN_AKM_PSK 2 /* Pre-shared Key */ 57 57 #define RSN_AKM_SHA256_1X 5 /* SHA256, 802.1X */ 58 58 #define RSN_AKM_SHA256_PSK 6 /* SHA256, Pre-shared Key */ 59 + #define RSN_AKM_SAE 8 /* SAE */ 59 60 #define RSN_CAP_LEN 2 /* Length of RSN capabilities */ 60 61 #define RSN_CAP_PTK_REPLAY_CNTR_MASK (BIT(2) | BIT(3)) 61 62 #define RSN_CAP_MFPR_MASK BIT(6) ··· 4243 4242 brcmf_dbg(TRACE, "RSN_AKM_MFP_1X\n"); 4244 4243 wpa_auth |= WPA2_AUTH_1X_SHA256; 4245 4244 break; 4245 + case RSN_AKM_SAE: 4246 + brcmf_dbg(TRACE, "RSN_AKM_SAE\n"); 4247 + wpa_auth |= WPA3_AUTH_SAE_PSK; 4248 + break; 4246 4249 default: 4247 4250 bphy_err(drvr, "Invalid key mgmt info\n"); 4248 4251 } ··· 4264 4259 brcmf_dbg(TRACE, "MFP Required\n"); 4265 4260 mfp = BRCMF_MFP_REQUIRED; 4266 4261 /* Firmware only supports mfp required in 4267 - * combination with WPA2_AUTH_PSK_SHA256 or 4268 - * WPA2_AUTH_1X_SHA256. 4262 + * combination with WPA2_AUTH_PSK_SHA256, 4263 + * WPA2_AUTH_1X_SHA256, or WPA3_AUTH_SAE_PSK. 4269 4264 */ 4270 4265 if (!(wpa_auth & (WPA2_AUTH_PSK_SHA256 | 4271 - WPA2_AUTH_1X_SHA256))) { 4266 + WPA2_AUTH_1X_SHA256 | 4267 + WPA3_AUTH_SAE_PSK))) { 4272 4268 err = -EINVAL; 4273 4269 goto exit; 4274 4270 } ··· 4685 4679 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 4686 4680 struct brcmf_if *ifp = netdev_priv(ndev); 4687 4681 struct brcmf_pub *drvr = cfg->pub; 4682 + struct brcmf_cfg80211_profile *profile = &ifp->vif->profile; 4683 + struct cfg80211_crypto_settings *crypto = &settings->crypto; 4688 4684 const struct brcmf_tlv *ssid_ie; 4689 4685 const struct brcmf_tlv *country_ie; 4690 4686 struct brcmf_ssid_le ssid_le; ··· 4826 4818 goto exit; 4827 4819 } 4828 4820 4821 + if (crypto->psk) { 4822 + brcmf_dbg(INFO, "using PSK offload\n"); 4823 + profile->use_fwauth |= BIT(BRCMF_PROFILE_FWAUTH_PSK); 4824 + err = brcmf_set_pmk(ifp, crypto->psk, 4825 + BRCMF_WSEC_MAX_PSK_LEN); 4826 + if (err < 0) 4827 + goto exit; 4828 + } 4829 + if (crypto->sae_pwd) { 4830 + brcmf_dbg(INFO, "using SAE offload\n"); 4831 + profile->use_fwauth |= BIT(BRCMF_PROFILE_FWAUTH_SAE); 4832 + err = brcmf_set_sae_password(ifp, crypto->sae_pwd, 4833 + crypto->sae_pwd_len); 4834 + if (err < 0) 4835 + goto exit; 4836 + } 4837 + if (profile->use_fwauth == 0) 4838 + profile->use_fwauth = BIT(BRCMF_PROFILE_FWAUTH_NONE); 4839 + 4829 4840 err = brcmf_parse_configure_security(ifp, settings, 4830 4841 NL80211_IFTYPE_AP); 4831 4842 if (err < 0) { ··· 4931 4904 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 4932 4905 struct brcmf_if *ifp = netdev_priv(ndev); 4933 4906 struct brcmf_pub *drvr = cfg->pub; 4907 + struct brcmf_cfg80211_profile *profile = &ifp->vif->profile; 4934 4908 s32 err; 4935 4909 struct brcmf_fil_bss_enable_le bss_enable; 4936 4910 struct brcmf_join_params join_params; ··· 4942 4914 /* Due to most likely deauths outstanding we sleep */ 4943 4915 /* first to make sure they get processed by fw. */ 4944 4916 msleep(400); 4917 + 4918 + if (profile->use_fwauth != BIT(BRCMF_PROFILE_FWAUTH_NONE)) { 4919 + if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_PSK)) 4920 + brcmf_set_pmk(ifp, NULL, 0); 4921 + if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_SAE)) 4922 + brcmf_set_sae_password(ifp, NULL, 0); 4923 + profile->use_fwauth = BIT(BRCMF_PROFILE_FWAUTH_NONE); 4924 + } 4945 4925 4946 4926 if (ifp->vif->mbss) { 4947 4927 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1); ··· 7098 7062 if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SAE)) 7099 7063 wiphy_ext_feature_set(wiphy, 7100 7064 NL80211_EXT_FEATURE_SAE_OFFLOAD); 7065 + } 7066 + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_FWAUTH)) { 7067 + wiphy_ext_feature_set(wiphy, 7068 + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK); 7069 + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SAE)) 7070 + wiphy_ext_feature_set(wiphy, 7071 + NL80211_EXT_FEATURE_SAE_OFFLOAD_AP); 7101 7072 } 7102 7073 wiphy->mgmt_stypes = brcmf_txrx_stypes; 7103 7074 wiphy->max_remain_on_channel_duration = 5000;
+14
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
··· 129 129 }; 130 130 131 131 /** 132 + * enum brcmf_profile_fwauth - firmware authenticator profile 133 + * 134 + * @BRCMF_PROFILE_FWAUTH_NONE: no firmware authenticator 135 + * @BRCMF_PROFILE_FWAUTH_PSK: authenticator for WPA/WPA2-PSK 136 + * @BRCMF_PROFILE_FWAUTH_SAE: authenticator for SAE 137 + */ 138 + enum brcmf_profile_fwauth { 139 + BRCMF_PROFILE_FWAUTH_NONE, 140 + BRCMF_PROFILE_FWAUTH_PSK, 141 + BRCMF_PROFILE_FWAUTH_SAE 142 + }; 143 + 144 + /** 132 145 * struct brcmf_cfg80211_profile - profile information. 133 146 * 134 147 * @bssid: bssid of joined/joining ibss. ··· 153 140 struct brcmf_cfg80211_security sec; 154 141 struct brcmf_wsec_key key[BRCMF_MAX_DEFAULT_KEYS]; 155 142 enum brcmf_profile_fwsup use_fwsup; 143 + u16 use_fwauth; 156 144 bool is_ft; 157 145 }; 158 146
+1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
··· 42 42 { BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" }, 43 43 { BRCMF_FEAT_DOT11H, "802.11h" }, 44 44 { BRCMF_FEAT_SAE, "sae" }, 45 + { BRCMF_FEAT_FWAUTH, "idauth" }, 45 46 }; 46 47 47 48 #ifdef DEBUG
+3 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
··· 28 28 * MONITOR_FMT_HW_RX_HDR: firmware provides monitor packets with hw/ucode header 29 29 * DOT11H: firmware supports 802.11h 30 30 * SAE: simultaneous authentication of equals 31 + * FWAUTH: Firmware authenticator 31 32 */ 32 33 #define BRCMF_FEAT_LIST \ 33 34 BRCMF_FEAT_DEF(MBSS) \ ··· 50 49 BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) \ 51 50 BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) \ 52 51 BRCMF_FEAT_DEF(DOT11H) \ 53 - BRCMF_FEAT_DEF(SAE) 52 + BRCMF_FEAT_DEF(SAE) \ 53 + BRCMF_FEAT_DEF(FWAUTH) 54 54 55 55 /* 56 56 * Quirks:
+3
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
··· 1578 1578 brcmf_dbg(USB, "Enter\n"); 1579 1579 ret = driver_for_each_device(drv, NULL, NULL, 1580 1580 brcmf_usb_reset_device); 1581 + if (ret) 1582 + brcmf_err("failed to reset all usb devices %d\n", ret); 1583 + 1581 1584 usb_deregister(&brcmf_usbdrvr); 1582 1585 } 1583 1586
-9
drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
··· 5085 5085 return 0; 5086 5086 } 5087 5087 5088 - static uint brcms_c_down_del_timer(struct brcms_c_info *wlc) 5089 - { 5090 - uint callbacks = 0; 5091 - 5092 - return callbacks; 5093 - } 5094 - 5095 5088 static int brcms_b_bmac_down_prep(struct brcms_hardware *wlc_hw) 5096 5089 { 5097 5090 bool dev_gone; ··· 5194 5201 callbacks++; 5195 5202 wlc->WDarmed = false; 5196 5203 } 5197 - /* cancel all other timers */ 5198 - callbacks += brcms_c_down_del_timer(wlc); 5199 5204 5200 5205 wlc->pub->up = false; 5201 5206
-55
drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
··· 357 357 RADIO_2064_REG12A, 358 358 }; 359 359 360 - static const 361 - struct lcnphy_rx_iqcomp lcnphy_rx_iqcomp_table_rev0[] = { 362 - {1, 0, 0}, 363 - {2, 0, 0}, 364 - {3, 0, 0}, 365 - {4, 0, 0}, 366 - {5, 0, 0}, 367 - {6, 0, 0}, 368 - {7, 0, 0}, 369 - {8, 0, 0}, 370 - {9, 0, 0}, 371 - {10, 0, 0}, 372 - {11, 0, 0}, 373 - {12, 0, 0}, 374 - {13, 0, 0}, 375 - {14, 0, 0}, 376 - {34, 0, 0}, 377 - {38, 0, 0}, 378 - {42, 0, 0}, 379 - {46, 0, 0}, 380 - {36, 0, 0}, 381 - {40, 0, 0}, 382 - {44, 0, 0}, 383 - {48, 0, 0}, 384 - {52, 0, 0}, 385 - {56, 0, 0}, 386 - {60, 0, 0}, 387 - {64, 0, 0}, 388 - {100, 0, 0}, 389 - {104, 0, 0}, 390 - {108, 0, 0}, 391 - {112, 0, 0}, 392 - {116, 0, 0}, 393 - {120, 0, 0}, 394 - {124, 0, 0}, 395 - {128, 0, 0}, 396 - {132, 0, 0}, 397 - {136, 0, 0}, 398 - {140, 0, 0}, 399 - {149, 0, 0}, 400 - {153, 0, 0}, 401 - {157, 0, 0}, 402 - {161, 0, 0}, 403 - {165, 0, 0}, 404 - {184, 0, 0}, 405 - {188, 0, 0}, 406 - {192, 0, 0}, 407 - {196, 0, 0}, 408 - {200, 0, 0}, 409 - {204, 0, 0}, 410 - {208, 0, 0}, 411 - {212, 0, 0}, 412 - {216, 0, 0}, 413 - }; 414 - 415 360 static const u32 lcnphy_23bitgaincode_table[] = { 416 361 0x200100, 417 362 0x200200,
-99
drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c
··· 105 105 0x00000000, 106 106 }; 107 107 108 - static const u32 dot11lcn_gain_tbl_rev1[] = { 109 - 0x00000000, 110 - 0x00000000, 111 - 0x00000000, 112 - 0x00000000, 113 - 0x00000000, 114 - 0x00000000, 115 - 0x00000000, 116 - 0x00000000, 117 - 0x00000008, 118 - 0x00000004, 119 - 0x00000008, 120 - 0x00000001, 121 - 0x00000005, 122 - 0x00000009, 123 - 0x0000000D, 124 - 0x00000011, 125 - 0x00000051, 126 - 0x00000091, 127 - 0x00000011, 128 - 0x00000051, 129 - 0x00000091, 130 - 0x000000d1, 131 - 0x00000053, 132 - 0x00000093, 133 - 0x000000d3, 134 - 0x000000d7, 135 - 0x00000117, 136 - 0x00000517, 137 - 0x00000917, 138 - 0x00000957, 139 - 0x00000d57, 140 - 0x00001157, 141 - 0x00001197, 142 - 0x00005197, 143 - 0x00009197, 144 - 0x0000d197, 145 - 0x00011197, 146 - 0x00000000, 147 - 0x00000000, 148 - 0x00000000, 149 - 0x00000000, 150 - 0x00000000, 151 - 0x00000000, 152 - 0x00000008, 153 - 0x00000004, 154 - 0x00000008, 155 - 0x00000001, 156 - 0x00000005, 157 - 0x00000009, 158 - 0x0000000D, 159 - 0x00000011, 160 - 0x00000051, 161 - 0x00000091, 162 - 0x00000011, 163 - 0x00000051, 164 - 0x00000091, 165 - 0x000000d1, 166 - 0x00000053, 167 - 0x00000093, 168 - 0x000000d3, 169 - 0x000000d7, 170 - 0x00000117, 171 - 0x00000517, 172 - 0x00000917, 173 - 0x00000957, 174 - 0x00000d57, 175 - 0x00001157, 176 - 0x00005157, 177 - 0x00009157, 178 - 0x0000d157, 179 - 0x00011157, 180 - 0x00015157, 181 - 0x00019157, 182 - 0x0001d157, 183 - 0x00000000, 184 - 0x00000000, 185 - 0x00000000, 186 - 0x00000000, 187 - 0x00000000, 188 - 0x00000000, 189 - 0x00000000, 190 - 0x00000000, 191 - 0x00000000, 192 - 0x00000000, 193 - 0x00000000, 194 - 0x00000000, 195 - 0x00000000, 196 - 0x00000000, 197 - 0x00000000, 198 - 0x00000000, 199 - 0x00000000, 200 - 0x00000000, 201 - 0x00000000, 202 - 0x00000000, 203 - 0x00000000, 204 - 0x00000000, 205 - }; 206 - 207 108 static const u16 dot11lcn_aux_gain_idx_tbl_rev0[] = { 208 109 0x0401, 209 110 0x0402,
+9 -6
drivers/net/wireless/cisco/airo.c
··· 2430 2430 iounmap(ai->pcimem); 2431 2431 if (ai->pciaux) 2432 2432 iounmap(ai->pciaux); 2433 - pci_free_consistent(ai->pci, PCI_SHARED_LEN, 2434 - ai->shared, ai->shared_dma); 2433 + dma_free_coherent(&ai->pci->dev, PCI_SHARED_LEN, 2434 + ai->shared, ai->shared_dma); 2435 2435 } 2436 2436 } 2437 2437 crypto_free_sync_skcipher(ai->tfm); ··· 2581 2581 } 2582 2582 2583 2583 /* Reserve PKTSIZE for each fid and 2K for the Rids */ 2584 - ai->shared = pci_alloc_consistent(pci, PCI_SHARED_LEN, &ai->shared_dma); 2584 + ai->shared = dma_alloc_coherent(&pci->dev, PCI_SHARED_LEN, 2585 + &ai->shared_dma, GFP_KERNEL); 2585 2586 if (!ai->shared) { 2586 - airo_print_err("", "Couldn't alloc_consistent %d", 2587 + airo_print_err("", "Couldn't alloc_coherent %d", 2587 2588 PCI_SHARED_LEN); 2588 2589 goto free_auxmap; 2589 2590 } ··· 2644 2643 2645 2644 return 0; 2646 2645 free_shared: 2647 - pci_free_consistent(pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma); 2646 + dma_free_coherent(&pci->dev, PCI_SHARED_LEN, ai->shared, 2647 + ai->shared_dma); 2648 2648 free_auxmap: 2649 2649 iounmap(ai->pciaux); 2650 2650 free_memmap: ··· 2932 2930 unregister_netdev(dev); 2933 2931 err_out_map: 2934 2932 if (test_bit(FLAG_MPI,&ai->flags) && pci) { 2935 - pci_free_consistent(pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma); 2933 + dma_free_coherent(&pci->dev, PCI_SHARED_LEN, ai->shared, 2934 + ai->shared_dma); 2936 2935 iounmap(ai->pciaux); 2937 2936 iounmap(ai->pcimem); 2938 2937 mpi_unmap_card(ai->pci);
+4 -5
drivers/net/wireless/mediatek/mt76/debugfs.c
··· 31 31 int i; 32 32 33 33 for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) { 34 - struct mt76_sw_queue *q = &dev->q_tx[i]; 34 + struct mt76_queue *q = dev->q_tx[i]; 35 35 36 - if (!q->q) 36 + if (!q) 37 37 continue; 38 38 39 39 seq_printf(s, 40 - "%d: queued=%d head=%d tail=%d swq_queued=%d\n", 41 - i, q->q->queued, q->q->head, q->q->tail, 42 - q->swq_queued); 40 + "%d: queued=%d head=%d tail=%d\n", 41 + i, q->queued, q->head, q->tail); 43 42 } 44 43 45 44 return 0;
+106 -56
drivers/net/wireless/mediatek/mt76/dma.c
··· 7 7 #include "mt76.h" 8 8 #include "dma.h" 9 9 10 + static struct mt76_txwi_cache * 11 + mt76_alloc_txwi(struct mt76_dev *dev) 12 + { 13 + struct mt76_txwi_cache *t; 14 + dma_addr_t addr; 15 + u8 *txwi; 16 + int size; 17 + 18 + size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); 19 + txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC); 20 + if (!txwi) 21 + return NULL; 22 + 23 + addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size, 24 + DMA_TO_DEVICE); 25 + t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); 26 + t->dma_addr = addr; 27 + 28 + return t; 29 + } 30 + 31 + static struct mt76_txwi_cache * 32 + __mt76_get_txwi(struct mt76_dev *dev) 33 + { 34 + struct mt76_txwi_cache *t = NULL; 35 + 36 + spin_lock(&dev->lock); 37 + if (!list_empty(&dev->txwi_cache)) { 38 + t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, 39 + list); 40 + list_del(&t->list); 41 + } 42 + spin_unlock(&dev->lock); 43 + 44 + return t; 45 + } 46 + 47 + static struct mt76_txwi_cache * 48 + mt76_get_txwi(struct mt76_dev *dev) 49 + { 50 + struct mt76_txwi_cache *t = __mt76_get_txwi(dev); 51 + 52 + if (t) 53 + return t; 54 + 55 + return mt76_alloc_txwi(dev); 56 + } 57 + 58 + void 59 + mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 60 + { 61 + if (!t) 62 + return; 63 + 64 + spin_lock(&dev->lock); 65 + list_add(&t->list, &dev->txwi_cache); 66 + spin_unlock(&dev->lock); 67 + } 68 + EXPORT_SYMBOL_GPL(mt76_put_txwi); 69 + 70 + static void 71 + mt76_free_pending_txwi(struct mt76_dev *dev) 72 + { 73 + struct mt76_txwi_cache *t; 74 + 75 + while ((t = __mt76_get_txwi(dev)) != NULL) 76 + dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size, 77 + DMA_TO_DEVICE); 78 + } 79 + 10 80 static int 11 81 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, 12 82 int idx, int n_desc, int bufsize, ··· 119 49 struct mt76_queue_buf *buf, int nbufs, u32 info, 120 50 struct sk_buff *skb, void *txwi) 121 51 { 52 + struct mt76_queue_entry *entry; 122 53 struct mt76_desc *desc; 123 54 u32 ctrl; 124 55 int i, idx = -1; ··· 132 61 for (i = 0; i < nbufs; i += 2, buf += 2) { 133 62 u32 buf0 = buf[0].addr, buf1 = 0; 134 63 64 + idx = q->head; 65 + q->head = (q->head + 1) % q->ndesc; 66 + 67 + desc = &q->desc[idx]; 68 + entry = &q->entry[idx]; 69 + 70 + if (buf[0].skip_unmap) 71 + entry->skip_buf0 = true; 72 + entry->skip_buf1 = i == nbufs - 1; 73 + 74 + entry->dma_addr[0] = buf[0].addr; 75 + entry->dma_len[0] = buf[0].len; 76 + 135 77 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); 136 78 if (i < nbufs - 1) { 79 + entry->dma_addr[1] = buf[1].addr; 80 + entry->dma_len[1] = buf[1].len; 137 81 buf1 = buf[1].addr; 138 82 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); 83 + if (buf[1].skip_unmap) 84 + entry->skip_buf1 = true; 139 85 } 140 86 141 87 if (i == nbufs - 1) 142 88 ctrl |= MT_DMA_CTL_LAST_SEC0; 143 89 else if (i == nbufs - 2) 144 90 ctrl |= MT_DMA_CTL_LAST_SEC1; 145 - 146 - idx = q->head; 147 - q->head = (q->head + 1) % q->ndesc; 148 - 149 - desc = &q->desc[idx]; 150 91 151 92 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); 152 93 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); ··· 179 96 struct mt76_queue_entry *prev_e) 180 97 { 181 98 struct mt76_queue_entry *e = &q->entry[idx]; 182 - __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl); 183 - u32 ctrl = le32_to_cpu(__ctrl); 184 99 185 - if (!e->skip_buf0) { 186 - __le32 addr = READ_ONCE(q->desc[idx].buf0); 187 - u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); 188 - 189 - dma_unmap_single(dev->dev, le32_to_cpu(addr), len, 100 + if (!e->skip_buf0) 101 + dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0], 190 102 DMA_TO_DEVICE); 191 - } 192 103 193 - if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) { 194 - __le32 addr = READ_ONCE(q->desc[idx].buf1); 195 - u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl); 196 - 197 - dma_unmap_single(dev->dev, le32_to_cpu(addr), len, 104 + if (!e->skip_buf1) 105 + dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1], 198 106 DMA_TO_DEVICE); 199 - } 200 107 201 108 if (e->txwi == DMA_DUMMY_DATA) 202 109 e->txwi = NULL; ··· 210 137 static void 211 138 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) 212 139 { 140 + wmb(); 213 141 writel(q->head, &q->regs->cpu_idx); 214 142 } 215 143 216 144 static void 217 145 mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) 218 146 { 219 - struct mt76_sw_queue *sq = &dev->q_tx[qid]; 220 - struct mt76_queue *q = sq->q; 147 + struct mt76_queue *q = dev->q_tx[qid]; 221 148 struct mt76_queue_entry entry; 222 - unsigned int n_swq_queued[8] = {}; 223 - unsigned int n_queued = 0; 224 149 bool wake = false; 225 - int i, last; 150 + int last; 226 151 227 152 if (!q) 228 153 return; ··· 230 159 else 231 160 last = readl(&q->regs->dma_idx); 232 161 233 - while ((q->queued > n_queued) && q->tail != last) { 162 + while (q->queued > 0 && q->tail != last) { 234 163 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); 235 - if (entry.schedule) 236 - n_swq_queued[entry.qid]++; 237 - 238 - q->tail = (q->tail + 1) % q->ndesc; 239 - n_queued++; 240 - 241 - if (entry.skb) 242 - dev->drv->tx_complete_skb(dev, qid, &entry); 164 + mt76_queue_tx_complete(dev, q, &entry); 243 165 244 166 if (entry.txwi) { 245 167 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) ··· 242 178 243 179 if (!flush && q->tail == last) 244 180 last = readl(&q->regs->dma_idx); 245 - } 246 181 247 - spin_lock_bh(&q->lock); 248 - 249 - q->queued -= n_queued; 250 - for (i = 0; i < 4; i++) { 251 - if (!n_swq_queued[i]) 252 - continue; 253 - 254 - dev->q_tx[i].swq_queued -= n_swq_queued[i]; 255 - } 256 - 257 - /* ext PHY */ 258 - for (i = 0; i < 4; i++) { 259 - if (!n_swq_queued[i]) 260 - continue; 261 - 262 - dev->q_tx[__MT_TXQ_MAX + i].swq_queued -= n_swq_queued[4 + i]; 263 182 } 264 183 265 184 if (flush) { 185 + spin_lock_bh(&q->lock); 266 186 mt76_dma_sync_idx(dev, q); 267 187 mt76_dma_kick_queue(dev, q); 188 + spin_unlock_bh(&q->lock); 268 189 } 269 190 270 191 wake = wake && q->stopped && ··· 259 210 260 211 if (!q->queued) 261 212 wake_up(&dev->tx_wait); 262 - 263 - spin_unlock_bh(&q->lock); 264 213 265 214 if (wake) 266 215 ieee80211_wake_queue(dev->hw, qid); ··· 274 227 void *buf = e->buf; 275 228 int buf_len = SKB_WITH_OVERHEAD(q->buf_size); 276 229 277 - buf_addr = le32_to_cpu(READ_ONCE(desc->buf0)); 230 + buf_addr = e->dma_addr[0]; 278 231 if (len) { 279 232 u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl)); 280 233 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl); ··· 315 268 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, 316 269 struct sk_buff *skb, u32 tx_info) 317 270 { 318 - struct mt76_queue *q = dev->q_tx[qid].q; 271 + struct mt76_queue *q = dev->q_tx[qid]; 319 272 struct mt76_queue_buf buf; 320 273 dma_addr_t addr; 321 274 ··· 347 300 struct sk_buff *skb, struct mt76_wcid *wcid, 348 301 struct ieee80211_sta *sta) 349 302 { 350 - struct mt76_queue *q = dev->q_tx[qid].q; 303 + struct mt76_queue *q = dev->q_tx[qid]; 351 304 struct mt76_tx_info tx_info = { 352 305 .skb = skb, 353 306 }; ··· 425 378 426 379 e.skb = tx_info.skb; 427 380 e.txwi = t; 428 - dev->drv->tx_complete_skb(dev, qid, &e); 381 + dev->drv->tx_complete_skb(dev, &e); 429 382 mt76_put_txwi(dev, t); 430 383 return ret; 431 384 } ··· 659 612 { 660 613 int i; 661 614 615 + mt76_worker_disable(&dev->tx_worker); 662 616 netif_napi_del(&dev->tx_napi); 663 617 for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) 664 618 mt76_dma_tx_cleanup(dev, i, true); ··· 668 620 netif_napi_del(&dev->napi[i]); 669 621 mt76_dma_rx_cleanup(dev, &dev->q_rx[i]); 670 622 } 623 + 624 + mt76_free_pending_txwi(dev); 671 625 } 672 626 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
+15 -28
drivers/net/wireless/mediatek/mt76/mac80211.c
··· 2 2 /* 3 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 4 */ 5 + #include <linux/sched.h> 5 6 #include <linux/of.h> 6 7 #include "mt76.h" 7 8 ··· 305 304 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 306 305 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); 307 306 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 308 - ieee80211_hw_set(hw, TX_AMSDU); 309 307 310 - /* TODO: avoid linearization for SDIO */ 311 - if (!mt76_is_sdio(dev)) 308 + if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) { 309 + ieee80211_hw_set(hw, TX_AMSDU); 312 310 ieee80211_hw_set(hw, TX_FRAG_LIST); 311 + } 313 312 314 313 ieee80211_hw_set(hw, MFP_CAPABLE); 315 314 ieee80211_hw_set(hw, AP_LINK_PS); ··· 434 433 skb_queue_head_init(&dev->mcu.res_q); 435 434 init_waitqueue_head(&dev->mcu.wait); 436 435 mutex_init(&dev->mcu.mutex); 436 + dev->tx_worker.fn = mt76_tx_worker; 437 437 438 438 INIT_LIST_HEAD(&dev->txwi_cache); 439 439 440 440 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) 441 441 skb_queue_head_init(&dev->rx_skb[i]); 442 - 443 - tasklet_init(&dev->tx_tasklet, mt76_tx_tasklet, (unsigned long)dev); 444 442 445 443 dev->wq = alloc_ordered_workqueue("mt76", 0); 446 444 if (!dev->wq) { ··· 483 483 return ret; 484 484 } 485 485 486 - return ieee80211_register_hw(hw); 486 + ret = ieee80211_register_hw(hw); 487 + if (ret) 488 + return ret; 489 + 490 + WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx")); 491 + sched_set_fifo_low(dev->tx_worker.task); 492 + 493 + return 0; 487 494 } 488 495 EXPORT_SYMBOL_GPL(mt76_register_device); 489 496 ··· 507 500 508 501 void mt76_free_device(struct mt76_dev *dev) 509 502 { 503 + mt76_worker_teardown(&dev->tx_worker); 510 504 if (dev->wq) { 511 505 destroy_workqueue(dev->wq); 512 506 dev->wq = NULL; 513 507 } 514 - if (mt76_is_mmio(dev)) 515 - mt76_tx_free(dev); 516 508 ieee80211_free_hw(dev->hw); 517 509 } 518 510 EXPORT_SYMBOL_GPL(mt76_free_device); ··· 546 540 offset = __MT_TXQ_MAX * (phy != &dev->phy); 547 541 548 542 for (i = 0; i < __MT_TXQ_MAX; i++) { 549 - q = dev->q_tx[offset + i].q; 543 + q = dev->q_tx[offset + i]; 550 544 if (q && q->queued) 551 545 return true; 552 546 } ··· 876 870 struct ieee80211_hw *hw; 877 871 struct mt76_wcid *wcid = status->wcid; 878 872 bool ps; 879 - int i; 880 873 881 874 hw = mt76_phy_hw(dev, status->ext_phy); 882 875 if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) { ··· 925 920 926 921 dev->drv->sta_ps(dev, sta, ps); 927 922 ieee80211_sta_ps_transition(sta, ps); 928 - 929 - if (ps) 930 - return; 931 - 932 - for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 933 - struct mt76_txq *mtxq; 934 - 935 - if (!sta->txq[i]) 936 - continue; 937 - 938 - mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv; 939 - if (!skb_queue_empty(&mtxq->retry_q)) 940 - ieee80211_schedule_txq(hw, sta->txq[i]); 941 - } 942 923 } 943 924 944 925 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, ··· 986 995 987 996 mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv; 988 997 mtxq->wcid = wcid; 989 - 990 - mt76_txq_init(dev, sta->txq[i]); 991 998 } 992 999 993 1000 ewma_signal_init(&wcid->rssi); ··· 1013 1024 dev->drv->sta_remove(dev, vif, sta); 1014 1025 1015 1026 mt76_tx_status_check(dev, wcid, true); 1016 - for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 1017 - mt76_txq_remove(dev, sta->txq[i]); 1018 1027 mt76_wcid_mask_clear(dev->wcid_mask, idx); 1019 1028 mt76_wcid_mask_clear(dev->wcid_phy_mask, idx); 1020 1029 }
+34 -27
drivers/net/wireless/mediatek/mt76/mt76.h
··· 17 17 #include "util.h" 18 18 #include "testmode.h" 19 19 20 - #define MT_TX_RING_SIZE 256 21 20 #define MT_MCU_RING_SIZE 32 22 21 #define MT_RX_BUF_SIZE 2048 23 22 #define MT_SKB_HEAD_LEN 128 23 + 24 + #define MT_MAX_NON_AQL_PKT 16 25 + #define MT_TXQ_FREE_THR 32 24 26 25 27 struct mt76_dev; 26 28 struct mt76_phy; ··· 81 79 82 80 struct mt76_queue_buf { 83 81 dma_addr_t addr; 84 - int len; 82 + u16 len; 83 + bool skip_unmap; 85 84 }; 86 85 87 86 struct mt76_tx_info { ··· 102 99 struct urb *urb; 103 100 int buf_sz; 104 101 }; 105 - enum mt76_txq_id qid; 102 + u32 dma_addr[2]; 103 + u16 dma_len[2]; 104 + u16 wcid; 106 105 bool skip_buf0:1; 107 - bool schedule:1; 106 + bool skip_buf1:1; 108 107 bool done:1; 109 108 }; 110 109 ··· 138 133 dma_addr_t desc_dma; 139 134 struct sk_buff *rx_head; 140 135 struct page_frag_cache rx_page; 141 - }; 142 - 143 - struct mt76_sw_queue { 144 - struct mt76_queue *q; 145 - 146 - struct list_head swq; 147 - int swq_queued; 148 136 }; 149 137 150 138 struct mt76_mcu_ops { ··· 202 204 struct mt76_wcid { 203 205 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; 204 206 207 + atomic_t non_aql_packets; 205 208 unsigned long flags; 206 209 207 210 struct ewma_signal rssi; ··· 213 214 214 215 u8 sta:1; 215 216 u8 ext_phy:1; 217 + u8 amsdu:1; 216 218 217 219 u8 rx_check_pn; 218 220 u8 rx_key_pn[IEEE80211_NUM_TIDS][6]; ··· 226 226 }; 227 227 228 228 struct mt76_txq { 229 - struct mt76_sw_queue *swq; 230 229 struct mt76_wcid *wcid; 231 - 232 - struct sk_buff_head retry_q; 233 230 234 231 u16 agg_ssn; 235 232 bool send_bar; ··· 306 309 #define MT_DRV_SW_RX_AIRTIME BIT(2) 307 310 #define MT_DRV_RX_DMA_HDR BIT(3) 308 311 #define MT_DRV_HW_MGMT_TXQ BIT(4) 312 + #define MT_DRV_AMSDU_OFFLOAD BIT(5) 309 313 310 314 struct mt76_driver_ops { 311 315 u32 drv_flags; ··· 320 322 struct ieee80211_sta *sta, 321 323 struct mt76_tx_info *tx_info); 322 324 323 - void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid, 325 + void (*tx_complete_skb)(struct mt76_dev *dev, 324 326 struct mt76_queue_entry *e); 325 327 326 328 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update); ··· 443 445 } mcu; 444 446 }; 445 447 448 + #define MT76S_XMIT_BUF_SZ (16 * PAGE_SIZE) 446 449 struct mt76_sdio { 447 - struct task_struct *tx_kthread; 448 - struct task_struct *kthread; 450 + struct workqueue_struct *txrx_wq; 451 + struct { 452 + struct work_struct xmit_work; 453 + struct work_struct status_work; 454 + } tx; 455 + struct { 456 + struct work_struct recv_work; 457 + struct work_struct net_work; 458 + } rx; 459 + 449 460 struct work_struct stat_work; 450 461 451 - unsigned long state; 462 + u8 *xmit_buf[MT_TXQ_MCU_WA]; 452 463 453 464 struct sdio_func *func; 465 + void *intr_data; 454 466 455 467 struct { 456 468 struct mutex lock; ··· 601 593 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 602 594 603 595 struct list_head txwi_cache; 604 - struct mt76_sw_queue q_tx[2 * __MT_TXQ_MAX]; 596 + struct mt76_queue *q_tx[2 * __MT_TXQ_MAX]; 605 597 struct mt76_queue q_rx[__MT_RXQ_MAX]; 606 598 const struct mt76_queue_ops *queue_ops; 607 599 int tx_dma_idx[4]; 608 600 609 - struct tasklet_struct tx_tasklet; 601 + struct mt76_worker tx_worker; 610 602 struct napi_struct tx_napi; 611 603 struct delayed_work mac_work; 612 604 ··· 900 892 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 901 893 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta, 902 894 struct mt76_wcid *wcid, struct sk_buff *skb); 903 - void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); 904 - void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq); 905 895 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 906 896 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 907 897 bool send_bar); 898 + void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb); 908 899 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid); 909 900 void mt76_txq_schedule_all(struct mt76_phy *phy); 910 - void mt76_tx_tasklet(unsigned long data); 901 + void mt76_tx_worker(struct mt76_worker *w); 911 902 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 912 903 struct ieee80211_sta *sta, 913 904 u16 tids, int nframes, ··· 939 932 struct sk_buff_head *list); 940 933 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 941 934 struct sk_buff_head *list); 942 - void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb); 935 + void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb); 943 936 void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, 944 937 bool flush); 945 938 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ··· 1003 996 return hw; 1004 997 } 1005 998 1006 - void mt76_tx_free(struct mt76_dev *dev); 1007 - struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev); 1008 999 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 1009 1000 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 1010 1001 struct napi_struct *napi); ··· 1010 1005 struct napi_struct *napi); 1011 1006 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 1012 1007 void mt76_testmode_tx_pending(struct mt76_dev *dev); 1008 + void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, 1009 + struct mt76_queue_entry *e); 1013 1010 1014 1011 /* usb */ 1015 1012 static inline bool mt76u_urb_error(struct urb *urb) ··· 1046 1039 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); 1047 1040 } 1048 1041 1049 - int mt76_skb_adjust_pad(struct sk_buff *skb); 1042 + int mt76_skb_adjust_pad(struct sk_buff *skb, int pad); 1050 1043 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 1051 1044 u8 req_type, u16 val, u16 offset, 1052 1045 void *buf, size_t len);
+4 -4
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
··· 29 29 mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY | 30 30 FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) | 31 31 FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, 32 - dev->mt76.q_tx[MT_TXQ_CAB].q->hw_idx) | 32 + dev->mt76.q_tx[MT_TXQ_CAB]->hw_idx) | 33 33 FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) | 34 34 FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8)); 35 35 ··· 78 78 data.dev = dev; 79 79 __skb_queue_head_init(&data.q); 80 80 81 - q = dev->mt76.q_tx[MT_TXQ_BEACON].q; 81 + q = dev->mt76.q_tx[MT_TXQ_BEACON]; 82 82 spin_lock_bh(&q->lock); 83 83 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), 84 84 IEEE80211_IFACE_ITER_RESUME_ALL, ··· 95 95 if (dev->mt76.csa_complete) 96 96 goto out; 97 97 98 - q = dev->mt76.q_tx[MT_TXQ_CAB].q; 98 + q = dev->mt76.q_tx[MT_TXQ_CAB]; 99 99 do { 100 100 nframes = skb_queue_len(&data.q); 101 101 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), ··· 136 136 137 137 out: 138 138 mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false); 139 - if (dev->mt76.q_tx[MT_TXQ_BEACON].q->queued > 139 + if (dev->mt76.q_tx[MT_TXQ_BEACON]->queued > 140 140 hweight8(dev->mt76.beacon_mask)) 141 141 dev->beacon_check++; 142 142 }
+4 -14
drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
··· 70 70 mt7603_edcca_set, "%lld\n"); 71 71 72 72 static int 73 - mt7603_ampdu_stat_read(struct seq_file *file, void *data) 73 + mt7603_ampdu_stat_show(struct seq_file *file, void *data) 74 74 { 75 75 struct mt7603_dev *dev = file->private; 76 76 int bound[3], i, range; ··· 91 91 return 0; 92 92 } 93 93 94 - static int 95 - mt7603_ampdu_stat_open(struct inode *inode, struct file *f) 96 - { 97 - return single_open(f, mt7603_ampdu_stat_read, inode->i_private); 98 - } 99 - 100 - static const struct file_operations fops_ampdu_stat = { 101 - .open = mt7603_ampdu_stat_open, 102 - .read = seq_read, 103 - .llseek = seq_lseek, 104 - .release = single_release, 105 - }; 94 + DEFINE_SHOW_ATTRIBUTE(mt7603_ampdu_stat); 106 95 107 96 void mt7603_init_debugfs(struct mt7603_dev *dev) 108 97 { ··· 101 112 if (!dir) 102 113 return; 103 114 104 - debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat); 115 + debugfs_create_file("ampdu_stat", 0400, dir, dev, 116 + &mt7603_ampdu_stat_fops); 105 117 debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir, 106 118 mt76_queues_read); 107 119 debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
+11 -15
drivers/net/wireless/mediatek/mt76/mt7603/dma.c
··· 5 5 #include "../dma.h" 6 6 7 7 static int 8 - mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_sw_queue *q, 9 - int idx, int n_desc) 8 + mt7603_init_tx_queue(struct mt7603_dev *dev, int qid, int idx, int n_desc) 10 9 { 11 10 struct mt76_queue *hwq; 12 11 int err; ··· 18 19 if (err < 0) 19 20 return err; 20 21 21 - INIT_LIST_HEAD(&q->swq); 22 - q->q = hwq; 22 + dev->mt76.q_tx[qid] = hwq; 23 23 24 24 mt7603_irq_enable(dev, MT_INT_TX_DONE(idx)); 25 25 ··· 121 123 mt76_rx(&dev->mt76, q, skb); 122 124 return; 123 125 } 124 - /* fall through */ 126 + fallthrough; 125 127 default: 126 128 dev_kfree_skb(skb); 127 129 break; ··· 163 165 164 166 mt7603_mac_sta_poll(dev); 165 167 166 - tasklet_schedule(&dev->mt76.tx_tasklet); 168 + mt76_worker_schedule(&dev->mt76.tx_worker); 167 169 168 170 return 0; 169 171 } ··· 191 193 mt7603_pse_client_reset(dev); 192 194 193 195 for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) { 194 - ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[i], 195 - wmm_queue_map[i], 196 - MT_TX_RING_SIZE); 196 + ret = mt7603_init_tx_queue(dev, i, wmm_queue_map[i], 197 + MT7603_TX_RING_SIZE); 197 198 if (ret) 198 199 return ret; 199 200 } 200 201 201 - ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD], 202 - MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE); 202 + ret = mt7603_init_tx_queue(dev, MT_TXQ_PSD, 203 + MT_TX_HW_QUEUE_MGMT, MT7603_PSD_RING_SIZE); 203 204 if (ret) 204 205 return ret; 205 206 206 - ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], 207 + ret = mt7603_init_tx_queue(dev, MT_TXQ_MCU, 207 208 MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE); 208 209 if (ret) 209 210 return ret; 210 211 211 - ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_BEACON], 212 + ret = mt7603_init_tx_queue(dev, MT_TXQ_BEACON, 212 213 MT_TX_HW_QUEUE_BCN, MT_MCU_RING_SIZE); 213 214 if (ret) 214 215 return ret; 215 216 216 - ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_CAB], 217 + ret = mt7603_init_tx_queue(dev, MT_TXQ_CAB, 217 218 MT_TX_HW_QUEUE_BMC, MT_MCU_RING_SIZE); 218 219 if (ret) 219 220 return ret; ··· 246 249 MT_WPDMA_GLO_CFG_RX_DMA_EN | 247 250 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 248 251 249 - tasklet_kill(&dev->mt76.tx_tasklet); 250 252 mt76_dma_cleanup(&dev->mt76); 251 253 }
+15 -2
drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
··· 147 147 } 148 148 } 149 149 150 + static inline bool is_mt7688(struct mt7603_dev *dev) 151 + { 152 + return mt76_rr(dev, MT_EFUSE_BASE + 0x64) & BIT(4); 153 + } 154 + 150 155 int mt7603_eeprom_init(struct mt7603_dev *dev) 151 156 { 157 + u8 *eeprom; 152 158 int ret; 153 159 154 160 ret = mt7603_eeprom_load(dev); ··· 169 163 MT7603_EEPROM_SIZE); 170 164 } 171 165 166 + eeprom = (u8 *)dev->mt76.eeprom.data; 172 167 dev->mt76.cap.has_2ghz = true; 173 - memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, 174 - ETH_ALEN); 168 + memcpy(dev->mt76.macaddr, eeprom + MT_EE_MAC_ADDR, ETH_ALEN); 169 + 170 + /* Check for 1SS devices */ 171 + dev->mphy.antenna_mask = 3; 172 + if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, eeprom[MT_EE_NIC_CONF_0]) == 1 || 173 + FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, eeprom[MT_EE_NIC_CONF_0]) == 1 || 174 + is_mt7688(dev)) 175 + dev->mphy.antenna_mask = 1; 175 176 176 177 mt76_eeprom_override(&dev->mt76); 177 178
+3
drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
··· 85 85 MT_EE_SRC_FLASH, 86 86 }; 87 87 88 + #define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0) 89 + #define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4) 90 + 88 91 #endif
-5
drivers/net/wireless/mediatek/mt76/mt7603/init.c
··· 536 536 tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet, 537 537 (unsigned long)dev); 538 538 539 - /* Check for 7688, which only has 1SS */ 540 - dev->mphy.antenna_mask = 3; 541 - if (mt76_rr(dev, MT_EFUSE_BASE + 0x64) & BIT(4)) 542 - dev->mphy.antenna_mask = 1; 543 - 544 539 dev->slottime = 9; 545 540 dev->sensitivity_limit = 28; 546 541 dev->dynamic_sensitivity = true;
+12 -13
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
··· 445 445 446 446 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 447 447 for (i = 0; i < 4; i++) { 448 - struct mt76_queue *q = dev->mt76.q_tx[i].q; 448 + struct mt76_queue *q = dev->mt76.q_tx[i]; 449 449 u8 qidx = q->hw_idx; 450 450 u8 tid = ac_to_tid[i]; 451 451 u32 txtime = airtime[qidx]; ··· 592 592 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) { 593 593 case MT_PHY_TYPE_CCK: 594 594 cck = true; 595 - /* fall through */ 595 + fallthrough; 596 596 case MT_PHY_TYPE_OFDM: 597 597 i = mt76_get_rate(&dev->mt76, sband, i, cck); 598 598 break; ··· 896 896 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 897 897 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 898 898 struct ieee80211_vif *vif = info->control.vif; 899 - struct mt76_queue *q = dev->mt76.q_tx[qid].q; 899 + struct mt76_queue *q = dev->mt76.q_tx[qid]; 900 900 struct mt7603_vif *mvif; 901 901 int wlan_idx; 902 902 int hdr_len = ieee80211_get_hdrlen_from_skb(skb); ··· 1036 1036 IEEE80211_TX_CTL_CLEAR_PS_FILT)) || 1037 1037 (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) 1038 1038 mt7603_wtbl_set_ps(dev, msta, false); 1039 + 1040 + mt76_tx_check_agg_ssn(sta, tx_info->skb); 1039 1041 } 1040 1042 1041 1043 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); ··· 1163 1161 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) { 1164 1162 case MT_PHY_TYPE_CCK: 1165 1163 cck = true; 1166 - /* fall through */ 1164 + fallthrough; 1167 1165 case MT_PHY_TYPE_OFDM: 1168 1166 if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ) 1169 1167 sband = &dev->mphy.sband_5g.sband; ··· 1271 1269 rcu_read_unlock(); 1272 1270 } 1273 1271 1274 - void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, 1275 - struct mt76_queue_entry *e) 1272 + void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) 1276 1273 { 1277 1274 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 1278 1275 struct sk_buff *skb = e->skb; ··· 1281 1280 return; 1282 1281 } 1283 1282 1284 - if (qid < 4) 1285 - dev->tx_hang_check = 0; 1286 - 1287 - mt76_tx_complete_skb(mdev, skb); 1283 + dev->tx_hang_check = 0; 1284 + mt76_tx_complete_skb(mdev, e->wcid, skb); 1288 1285 } 1289 1286 1290 1287 static bool ··· 1402 1403 /* lock/unlock all queues to ensure that no tx is pending */ 1403 1404 mt76_txq_schedule_all(&dev->mphy); 1404 1405 1405 - tasklet_disable(&dev->mt76.tx_tasklet); 1406 + mt76_worker_disable(&dev->mt76.tx_worker); 1406 1407 tasklet_disable(&dev->mt76.pre_tbtt_tasklet); 1407 1408 napi_disable(&dev->mt76.napi[0]); 1408 1409 napi_disable(&dev->mt76.napi[1]); ··· 1451 1452 clear_bit(MT76_RESET, &dev->mphy.state); 1452 1453 mutex_unlock(&dev->mt76.mutex); 1453 1454 1454 - tasklet_enable(&dev->mt76.tx_tasklet); 1455 + mt76_worker_enable(&dev->mt76.tx_worker); 1455 1456 napi_enable(&dev->mt76.tx_napi); 1456 1457 napi_schedule(&dev->mt76.tx_napi); 1457 1458 ··· 1514 1515 int i; 1515 1516 1516 1517 for (i = 0; i < 4; i++) { 1517 - q = dev->mt76.q_tx[i].q; 1518 + q = dev->mt76.q_tx[i]; 1518 1519 1519 1520 if (!q->queued) 1520 1521 continue;
+1 -3
drivers/net/wireless/mediatek/mt76/mt7603/main.c
··· 75 75 76 76 mtxq = (struct mt76_txq *)vif->txq->drv_priv; 77 77 mtxq->wcid = &mvif->sta.wcid; 78 - mt76_txq_init(&dev->mt76, vif->txq); 79 78 rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); 80 79 81 80 out: ··· 98 99 mt7603_beacon_set_timer(dev, mvif->idx, 0); 99 100 100 101 rcu_assign_pointer(dev->mt76.wcid[idx], NULL); 101 - mt76_txq_remove(&dev->mt76, vif->txq); 102 102 103 103 spin_lock_bh(&dev->sta_poll_lock); 104 104 if (!list_empty(&msta->poll_list)) ··· 512 514 u16 cw_max = (1 << 10) - 1; 513 515 u32 val; 514 516 515 - queue = dev->mt76.q_tx[queue].q->hw_idx; 517 + queue = dev->mt76.q_tx[queue]->hw_idx; 516 518 517 519 if (params->cw_min) 518 520 cw_min = params->cw_min;
+3 -2
drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
··· 17 17 18 18 #define MT7603_MCU_RX_RING_SIZE 64 19 19 #define MT7603_RX_RING_SIZE 128 20 + #define MT7603_TX_RING_SIZE 256 21 + #define MT7603_PSD_RING_SIZE 128 20 22 21 23 #define MT7603_FIRMWARE_E1 "mt7603_e1.bin" 22 24 #define MT7603_FIRMWARE_E2 "mt7603_e2.bin" ··· 243 241 struct ieee80211_sta *sta, 244 242 struct mt76_tx_info *tx_info); 245 243 246 - void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, 247 - struct mt76_queue_entry *e); 244 + void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); 248 245 249 246 void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 250 247 struct sk_buff *skb);
+2
drivers/net/wireless/mediatek/mt76/mt7603/pci.c
··· 44 44 (mt76_rr(dev, MT_HW_REV) & 0xff); 45 45 dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev); 46 46 47 + mt76_wr(dev, MT_INT_MASK_CSR, 0); 48 + 47 49 ret = devm_request_irq(mdev->dev, pdev->irq, mt7603_irq_handler, 48 50 IRQF_SHARED, KBUILD_MODNAME, dev); 49 51 if (ret)
+2
drivers/net/wireless/mediatek/mt76/mt7603/soc.c
··· 35 35 (mt76_rr(dev, MT_HW_REV) & 0xff); 36 36 dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev); 37 37 38 + mt76_wr(dev, MT_INT_MASK_CSR, 0); 39 + 38 40 ret = devm_request_irq(mdev->dev, irq, mt7603_irq_handler, 39 41 IRQF_SHARED, KBUILD_MODNAME, dev); 40 42 if (ret)
+9 -21
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
··· 165 165 if (!mt7615_wait_for_mcu_init(dev)) 166 166 return 0; 167 167 168 - mt7615_mutex_acquire(dev); 169 - 170 168 skb = alloc_skb(1, GFP_KERNEL); 171 169 if (!skb) 172 170 return -ENOMEM; 173 171 174 172 skb_put(skb, 1); 175 - mt76_tx_queue_skb_raw(dev, 0, skb, 0); 176 173 174 + mt7615_mutex_acquire(dev); 175 + mt76_tx_queue_skb_raw(dev, 0, skb, 0); 177 176 mt7615_mutex_release(dev); 178 177 179 178 return 0; ··· 220 221 } 221 222 222 223 static int 223 - mt7615_ampdu_stat_read(struct seq_file *file, void *data) 224 + mt7615_ampdu_stat_show(struct seq_file *file, void *data) 224 225 { 225 226 struct mt7615_dev *dev = file->private; 226 227 ··· 234 235 return 0; 235 236 } 236 237 237 - static int 238 - mt7615_ampdu_stat_open(struct inode *inode, struct file *f) 239 - { 240 - return single_open(f, mt7615_ampdu_stat_read, inode->i_private); 241 - } 242 - 243 - static const struct file_operations fops_ampdu_stat = { 244 - .open = mt7615_ampdu_stat_open, 245 - .read = seq_read, 246 - .llseek = seq_lseek, 247 - .release = single_release, 248 - }; 238 + DEFINE_SHOW_ATTRIBUTE(mt7615_ampdu_stat); 249 239 250 240 static void 251 241 mt7615_radio_read_phy(struct mt7615_phy *phy, struct seq_file *s) ··· 328 340 int i; 329 341 330 342 for (i = 0; i < ARRAY_SIZE(queue_map); i++) { 331 - struct mt76_sw_queue *q = &dev->mt76.q_tx[queue_map[i].id]; 343 + struct mt76_queue *q = dev->mt76.q_tx[queue_map[i].id]; 332 344 333 - if (!q->q) 345 + if (!q) 334 346 continue; 335 347 336 348 seq_printf(s, 337 349 "%s: queued=%d head=%d tail=%d\n", 338 - queue_map[i].queue, q->q->queued, q->q->head, 339 - q->q->tail); 350 + queue_map[i].queue, q->queued, q->head, 351 + q->tail); 340 352 } 341 353 342 354 return 0; ··· 381 393 mt76_queues_read); 382 394 debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir, 383 395 mt7615_queues_acq); 384 - debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat); 396 + debugfs_create_file("ampdu_stat", 0400, dir, dev, &mt7615_ampdu_stat_fops); 385 397 debugfs_create_file("scs", 0600, dir, dev, &fops_scs); 386 398 debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc); 387 399 debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
+13 -42
drivers/net/wireless/mediatek/mt76/mt7615/dma.c
··· 12 12 #include "mac.h" 13 13 14 14 static int 15 - mt7615_init_tx_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q, 16 - int idx, int n_desc) 15 + mt7615_init_tx_queue(struct mt7615_dev *dev, int qid, int idx, int n_desc) 17 16 { 18 17 struct mt76_queue *hwq; 19 18 int err; ··· 25 26 if (err < 0) 26 27 return err; 27 28 28 - INIT_LIST_HEAD(&q->swq); 29 - q->q = hwq; 29 + dev->mt76.q_tx[qid] = hwq; 30 30 31 31 return 0; 32 32 } ··· 43 45 int i; 44 46 45 47 for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) { 46 - ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[i], 47 - wmm_queue_map[i], 48 + ret = mt7615_init_tx_queue(dev, i, wmm_queue_map[i], 48 49 MT7615_TX_RING_SIZE / 2); 49 50 if (ret) 50 51 return ret; 51 52 } 52 53 53 - ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD], 54 + ret = mt7615_init_tx_queue(dev, MT_TXQ_PSD, 54 55 MT7622_TXQ_MGMT, MT7615_TX_MGMT_RING_SIZE); 55 56 if (ret) 56 57 return ret; 57 58 58 - ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], 59 + ret = mt7615_init_tx_queue(dev, MT_TXQ_MCU, 59 60 MT7622_TXQ_MCU, MT7615_TX_MCU_RING_SIZE); 60 61 return ret; 61 62 } ··· 62 65 static int 63 66 mt7615_init_tx_queues(struct mt7615_dev *dev) 64 67 { 65 - struct mt76_sw_queue *q; 66 68 int ret, i; 67 69 68 - ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL], 70 + ret = mt7615_init_tx_queue(dev, MT_TXQ_FWDL, 69 71 MT7615_TXQ_FWDL, 70 72 MT7615_TX_FWDL_RING_SIZE); 71 73 if (ret) ··· 73 77 if (!is_mt7615(&dev->mt76)) 74 78 return mt7622_init_tx_queues_multi(dev); 75 79 76 - ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[0], 0, 77 - MT7615_TX_RING_SIZE); 80 + ret = mt7615_init_tx_queue(dev, 0, 0, MT7615_TX_RING_SIZE); 78 81 if (ret) 79 82 return ret; 80 83 81 - for (i = 1; i < MT_TXQ_MCU; i++) { 82 - q = &dev->mt76.q_tx[i]; 83 - INIT_LIST_HEAD(&q->swq); 84 - q->q = dev->mt76.q_tx[0].q; 85 - } 84 + for (i = 1; i < MT_TXQ_MCU; i++) 85 + dev->mt76.q_tx[i] = dev->mt76.q_tx[0]; 86 86 87 - ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], 88 - MT7615_TXQ_MCU, 87 + ret = mt7615_init_tx_queue(dev, MT_TXQ_MCU, MT7615_TXQ_MCU, 89 88 MT7615_TX_MCU_RING_SIZE); 90 89 return 0; 91 - } 92 - 93 - static void 94 - mt7615_tx_cleanup(struct mt7615_dev *dev) 95 - { 96 - int i; 97 - 98 - mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false); 99 - mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false); 100 - if (is_mt7615(&dev->mt76)) { 101 - mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false); 102 - } else { 103 - for (i = 0; i < IEEE80211_NUM_ACS; i++) 104 - mt76_queue_tx_cleanup(dev, i, false); 105 - } 106 90 } 107 91 108 92 static int mt7615_poll_tx(struct napi_struct *napi, int budget) ··· 91 115 92 116 dev = container_of(napi, struct mt7615_dev, mt76.tx_napi); 93 117 94 - mt7615_tx_cleanup(dev); 118 + mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false); 95 119 96 120 if (napi_complete_done(napi, 0)) 97 - mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL); 98 - 99 - mt7615_tx_cleanup(dev); 100 - 101 - tasklet_schedule(&dev->mt76.tx_tasklet); 121 + mt7615_irq_enable(dev, mt7615_tx_mcu_int_mask(dev)); 102 122 103 123 return 0; 104 124 } ··· 278 306 MT_WPDMA_GLO_CFG_RX_DMA_EN); 279 307 280 308 /* enable interrupts for TX/RX rings */ 281 - mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | 309 + mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | mt7615_tx_mcu_int_mask(dev) | 282 310 MT_INT_MCU_CMD); 283 311 284 312 if (is_mt7622(&dev->mt76)) ··· 297 325 MT_WPDMA_GLO_CFG_RX_DMA_EN); 298 326 mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET); 299 327 300 - tasklet_kill(&dev->mt76.tx_tasklet); 301 328 mt76_dma_cleanup(&dev->mt76); 302 329 }
+3
drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
··· 125 125 case MT_EE_2GHZ: 126 126 dev->mt76.cap.has_2ghz = true; 127 127 break; 128 + case MT_EE_DBDC: 129 + dev->dbdc_support = true; 130 + /* fall through */ 128 131 default: 129 132 dev->mt76.cap.has_2ghz = true; 130 133 dev->mt76.cap.has_5ghz = true;
+23 -2
drivers/net/wireless/mediatek/mt76/mt7615/init.c
··· 217 217 } 218 218 }; 219 219 220 + static const struct ieee80211_iface_combination if_comb_radar[] = { 221 + { 222 + .limits = if_limits, 223 + .n_limits = ARRAY_SIZE(if_limits), 224 + .max_interfaces = 4, 225 + .num_different_channels = 1, 226 + .beacon_int_infra_match = true, 227 + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 228 + BIT(NL80211_CHAN_WIDTH_20) | 229 + BIT(NL80211_CHAN_WIDTH_40) | 230 + BIT(NL80211_CHAN_WIDTH_80) | 231 + BIT(NL80211_CHAN_WIDTH_160) | 232 + BIT(NL80211_CHAN_WIDTH_80P80), 233 + } 234 + }; 235 + 220 236 static const struct ieee80211_iface_combination if_comb[] = { 221 237 { 222 238 .limits = if_limits, ··· 322 306 hw->sta_data_size = sizeof(struct mt7615_sta); 323 307 hw->vif_data_size = sizeof(struct mt7615_vif); 324 308 325 - wiphy->iface_combinations = if_comb; 326 - wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); 309 + if (is_mt7663(&phy->dev->mt76)) { 310 + wiphy->iface_combinations = if_comb; 311 + wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); 312 + } else { 313 + wiphy->iface_combinations = if_comb_radar; 314 + wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_radar); 315 + } 327 316 wiphy->reg_notifier = mt7615_regd_notifier; 328 317 329 318 wiphy->max_sched_scan_plan_interval = MT7615_MAX_SCHED_SCAN_INTERVAL;
+31 -11
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
··· 378 378 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) { 379 379 case MT_PHY_TYPE_CCK: 380 380 cck = true; 381 - /* fall through */ 381 + fallthrough; 382 382 case MT_PHY_TYPE_OFDM: 383 383 i = mt76_get_rate(&dev->mt76, sband, i, cck); 384 384 break; ··· 1271 1271 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) { 1272 1272 case MT_PHY_TYPE_CCK: 1273 1273 cck = true; 1274 - /* fall through */ 1274 + fallthrough; 1275 1275 case MT_PHY_TYPE_OFDM: 1276 1276 mphy = &dev->mphy; 1277 1277 if (sta->wcid.ext_phy && dev->mt76.phy2) ··· 1400 1400 { 1401 1401 struct mt76_dev *mdev = &dev->mt76; 1402 1402 struct mt76_txwi_cache *txwi; 1403 + __le32 *txwi_data; 1404 + u32 val; 1405 + u8 wcid; 1403 1406 1404 1407 trace_mac_tx_free(dev, token); 1405 1408 ··· 1413 1410 if (!txwi) 1414 1411 return; 1415 1412 1413 + txwi_data = (__le32 *)mt76_get_txwi_ptr(mdev, txwi); 1414 + val = le32_to_cpu(txwi_data[1]); 1415 + wcid = FIELD_GET(MT_TXD1_WLAN_IDX, val); 1416 + 1416 1417 mt7615_txp_skb_unmap(mdev, txwi); 1417 1418 if (txwi->skb) { 1418 - mt76_tx_complete_skb(mdev, txwi->skb); 1419 + mt76_tx_complete_skb(mdev, wcid, txwi->skb); 1419 1420 txwi->skb = NULL; 1420 1421 } 1421 1422 ··· 1430 1423 { 1431 1424 struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data; 1432 1425 u8 i, count; 1426 + 1427 + mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false); 1428 + if (is_mt7615(&dev->mt76)) { 1429 + mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false); 1430 + } else { 1431 + for (i = 0; i < IEEE80211_NUM_ACS; i++) 1432 + mt76_queue_tx_cleanup(dev, i, false); 1433 + } 1433 1434 1434 1435 count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl)); 1435 1436 if (is_mt7615(&dev->mt76)) { ··· 1454 1439 1455 1440 dev_kfree_skb(skb); 1456 1441 1442 + if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state)) 1443 + return; 1444 + 1457 1445 rcu_read_lock(); 1458 1446 mt7615_mac_sta_poll(dev); 1459 1447 rcu_read_unlock(); 1460 1448 1461 - tasklet_schedule(&dev->mt76.tx_tasklet); 1449 + mt7615_pm_power_save_sched(dev); 1450 + mt76_worker_schedule(&dev->mt76.tx_worker); 1462 1451 } 1463 1452 1464 1453 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, ··· 1497 1478 mt76_rx(&dev->mt76, q, skb); 1498 1479 return; 1499 1480 } 1500 - /* fall through */ 1481 + fallthrough; 1501 1482 default: 1502 1483 dev_kfree_skb(skb); 1503 1484 break; ··· 1864 1845 pm.wake_work); 1865 1846 mphy = dev->phy.mt76; 1866 1847 1867 - if (mt7615_driver_own(dev)) { 1848 + if (mt7615_mcu_set_drv_ctrl(dev)) { 1868 1849 dev_err(mphy->dev->dev, "failed to wake device\n"); 1869 1850 goto out; 1870 1851 } ··· 1872 1853 spin_lock_bh(&dev->pm.txq_lock); 1873 1854 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 1874 1855 struct mt7615_sta *msta = dev->pm.tx_q[i].msta; 1875 - struct mt76_wcid *wcid = msta ? &msta->wcid : NULL; 1876 1856 struct ieee80211_sta *sta = NULL; 1857 + struct mt76_wcid *wcid; 1877 1858 1878 1859 if (!dev->pm.tx_q[i].skb) 1879 1860 continue; 1880 1861 1862 + wcid = msta ? &msta->wcid : &dev->mt76.global_wcid; 1881 1863 if (msta && wcid->sta) 1882 1864 sta = container_of((void *)msta, struct ieee80211_sta, 1883 1865 drv_priv); ··· 1888 1868 } 1889 1869 spin_unlock_bh(&dev->pm.txq_lock); 1890 1870 1891 - tasklet_schedule(&dev->mt76.tx_tasklet); 1871 + mt76_worker_schedule(&dev->mt76.tx_worker); 1892 1872 1893 1873 out: 1894 1874 ieee80211_wake_queues(mphy->hw); ··· 1963 1943 goto out; 1964 1944 } 1965 1945 1966 - if (!mt7615_firmware_own(dev)) 1946 + if (!mt7615_mcu_set_fw_ctrl(dev)) 1967 1947 return; 1968 1948 out: 1969 1949 queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta); ··· 2130 2110 if (ext_phy) 2131 2111 mt76_txq_schedule_all(ext_phy); 2132 2112 2133 - tasklet_disable(&dev->mt76.tx_tasklet); 2113 + mt76_worker_disable(&dev->mt76.tx_worker); 2134 2114 napi_disable(&dev->mt76.napi[0]); 2135 2115 napi_disable(&dev->mt76.napi[1]); 2136 2116 napi_disable(&dev->mt76.tx_napi); ··· 2151 2131 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 2152 2132 clear_bit(MT76_RESET, &dev->mphy.state); 2153 2133 2154 - tasklet_enable(&dev->mt76.tx_tasklet); 2134 + mt76_worker_enable(&dev->mt76.tx_worker); 2155 2135 napi_enable(&dev->mt76.tx_napi); 2156 2136 napi_schedule(&dev->mt76.tx_napi); 2157 2137
+6 -5
drivers/net/wireless/mediatek/mt76/mt7615/main.c
··· 205 205 if (vif->txq) { 206 206 mtxq = (struct mt76_txq *)vif->txq->drv_priv; 207 207 mtxq->wcid = &mvif->sta.wcid; 208 - mt76_txq_init(&dev->mt76, vif->txq); 209 208 } 210 209 211 210 ret = mt7615_mcu_add_dev_info(dev, vif, true); ··· 255 256 mt7615_mcu_add_dev_info(dev, vif, false); 256 257 257 258 rcu_assign_pointer(dev->mt76.wcid[idx], NULL); 258 - if (vif->txq) 259 - mt76_txq_remove(&dev->mt76, vif->txq); 260 259 261 260 dev->mphy.vif_mask &= ~BIT(mvif->idx); 262 261 dev->omac_mask &= ~BIT(mvif->omac_idx); ··· 358 361 wd->key.keylen = key->keylen; 359 362 wd->key.cmd = cmd; 360 363 364 + spin_lock_bh(&dev->mt76.lock); 361 365 list_add_tail(&wd->node, &dev->wd_head); 366 + spin_unlock_bh(&dev->mt76.lock); 367 + 362 368 queue_work(dev->mt76.wq, &dev->wtbl_work); 363 369 364 370 return 0; ··· 703 703 return; 704 704 } 705 705 706 - tasklet_schedule(&dev->mt76.tx_tasklet); 706 + dev->pm.last_activity = jiffies; 707 + mt76_worker_schedule(&dev->mt76.tx_worker); 707 708 } 708 709 709 710 static void mt7615_tx(struct ieee80211_hw *hw, ··· 733 732 } 734 733 735 734 if (!test_bit(MT76_STATE_PM, &mphy->state)) { 735 + dev->pm.last_activity = jiffies; 736 736 mt76_tx(mphy, control->sta, wcid, skb); 737 737 return; 738 738 } ··· 815 813 case IEEE80211_AMPDU_TX_START: 816 814 ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid); 817 815 params->ssn = ssn; 818 - mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn); 819 816 ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; 820 817 break; 821 818 case IEEE80211_AMPDU_TX_STOP_CONT:
+102 -88
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
··· 324 324 sizeof(req), false); 325 325 } 326 326 327 + static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en) 328 + { 329 + if (!is_mt7622(&dev->mt76)) 330 + return; 331 + 332 + regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC, 333 + MT_INFRACFG_MISC_AP2CONN_WAKE, 334 + !en * MT_INFRACFG_MISC_AP2CONN_WAKE); 335 + } 336 + 337 + static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev) 338 + { 339 + struct mt76_phy *mphy = &dev->mt76.phy; 340 + struct mt76_dev *mdev = &dev->mt76; 341 + u32 addr; 342 + int err; 343 + 344 + addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST; 345 + mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN); 346 + 347 + mt7622_trigger_hif_int(dev, true); 348 + 349 + addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; 350 + err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000); 351 + 352 + mt7622_trigger_hif_int(dev, false); 353 + 354 + if (err) { 355 + dev_err(mdev->dev, "driver own failed\n"); 356 + return -ETIMEDOUT; 357 + } 358 + 359 + clear_bit(MT76_STATE_PM, &mphy->state); 360 + 361 + return 0; 362 + } 363 + 364 + static int mt7615_mcu_lp_drv_pmctrl(struct mt7615_dev *dev) 365 + { 366 + struct mt76_phy *mphy = &dev->mt76.phy; 367 + int i; 368 + 369 + if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state)) 370 + goto out; 371 + 372 + for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) { 373 + mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN); 374 + if (mt76_poll_msec(dev, MT_CONN_HIF_ON_LPCTL, 375 + MT_CFG_LPCR_HOST_FW_OWN, 0, 50)) 376 + break; 377 + } 378 + 379 + if (i == MT7615_DRV_OWN_RETRY_COUNT) { 380 + dev_err(dev->mt76.dev, "driver own failed\n"); 381 + set_bit(MT76_STATE_PM, &mphy->state); 382 + return -EIO; 383 + } 384 + 385 + out: 386 + dev->pm.last_activity = jiffies; 387 + 388 + return 0; 389 + } 390 + 391 + static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev) 392 + { 393 + struct mt76_phy *mphy = &dev->mt76.phy; 394 + int err = 0; 395 + u32 addr; 396 + 397 + if (test_and_set_bit(MT76_STATE_PM, &mphy->state)) 398 + return 0; 399 + 400 + mt7622_trigger_hif_int(dev, true); 401 + 402 + addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; 403 + mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN); 404 + 405 + if (is_mt7622(&dev->mt76) && 406 + !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 407 + MT_CFG_LPCR_HOST_FW_OWN, 3000)) { 408 + dev_err(dev->mt76.dev, "Timeout for firmware own\n"); 409 + clear_bit(MT76_STATE_PM, &mphy->state); 410 + err = -EIO; 411 + } 412 + 413 + mt7622_trigger_hif_int(dev, false); 414 + 415 + return err; 416 + } 417 + 327 418 static void 328 419 mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) 329 420 { ··· 1197 1106 tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht), 1198 1107 wtbl_tlv, sta_wtbl); 1199 1108 ht = (struct wtbl_ht *)tlv; 1200 - ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING; 1109 + ht->ldpc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING); 1201 1110 ht->af = sta->ht_cap.ampdu_factor; 1202 1111 ht->mm = sta->ht_cap.ampdu_density; 1203 1112 ht->ht = 1; ··· 1215 1124 tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht), 1216 1125 wtbl_tlv, sta_wtbl); 1217 1126 vht = (struct wtbl_vht *)tlv; 1218 - vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC, 1127 + vht->ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); 1219 1128 vht->vht = 1; 1220 1129 1221 1130 af = (sta->vht_cap.cap & ··· 1405 1314 .add_tx_ba = mt7615_mcu_wtbl_tx_ba, 1406 1315 .add_rx_ba = mt7615_mcu_wtbl_rx_ba, 1407 1316 .sta_add = mt7615_mcu_wtbl_sta_add, 1317 + .set_drv_ctrl = mt7615_mcu_drv_pmctrl, 1318 + .set_fw_ctrl = mt7615_mcu_fw_pmctrl, 1408 1319 }; 1409 1320 1410 1321 static int ··· 1503 1410 .add_tx_ba = mt7615_mcu_sta_tx_ba, 1504 1411 .add_rx_ba = mt7615_mcu_sta_rx_ba, 1505 1412 .sta_add = mt7615_mcu_add_sta, 1413 + .set_drv_ctrl = mt7615_mcu_drv_pmctrl, 1414 + .set_fw_ctrl = mt7615_mcu_fw_pmctrl, 1506 1415 }; 1507 1416 1508 1417 static int ··· 1918 1823 .add_tx_ba = mt7615_mcu_uni_tx_ba, 1919 1824 .add_rx_ba = mt7615_mcu_uni_rx_ba, 1920 1825 .sta_add = mt7615_mcu_uni_add_sta, 1826 + .set_drv_ctrl = mt7615_mcu_lp_drv_pmctrl, 1827 + .set_fw_ctrl = mt7615_mcu_fw_pmctrl, 1921 1828 }; 1922 1829 1923 1830 static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data, ··· 1991 1894 return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_PATCH_FINISH_REQ, 1992 1895 &req, sizeof(req), true); 1993 1896 } 1994 - 1995 - static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en) 1996 - { 1997 - if (!is_mt7622(&dev->mt76)) 1998 - return; 1999 - 2000 - regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC, 2001 - MT_INFRACFG_MISC_AP2CONN_WAKE, 2002 - !en * MT_INFRACFG_MISC_AP2CONN_WAKE); 2003 - } 2004 - 2005 - int mt7615_driver_own(struct mt7615_dev *dev) 2006 - { 2007 - struct mt76_phy *mphy = &dev->mt76.phy; 2008 - struct mt76_dev *mdev = &dev->mt76; 2009 - int i; 2010 - 2011 - if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state)) 2012 - goto out; 2013 - 2014 - mt7622_trigger_hif_int(dev, true); 2015 - 2016 - for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) { 2017 - u32 addr; 2018 - 2019 - addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST; 2020 - mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN); 2021 - 2022 - addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; 2023 - if (mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 50)) 2024 - break; 2025 - } 2026 - 2027 - mt7622_trigger_hif_int(dev, false); 2028 - 2029 - if (i == MT7615_DRV_OWN_RETRY_COUNT) { 2030 - dev_err(mdev->dev, "driver own failed\n"); 2031 - set_bit(MT76_STATE_PM, &mphy->state); 2032 - return -EIO; 2033 - } 2034 - 2035 - out: 2036 - dev->pm.last_activity = jiffies; 2037 - 2038 - return 0; 2039 - } 2040 - EXPORT_SYMBOL_GPL(mt7615_driver_own); 2041 - 2042 - int mt7615_firmware_own(struct mt7615_dev *dev) 2043 - { 2044 - struct mt76_phy *mphy = &dev->mt76.phy; 2045 - int err = 0; 2046 - u32 addr; 2047 - 2048 - if (test_and_set_bit(MT76_STATE_PM, &mphy->state)) 2049 - return 0; 2050 - 2051 - mt7622_trigger_hif_int(dev, true); 2052 - 2053 - addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; 2054 - mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN); 2055 - 2056 - if (is_mt7622(&dev->mt76) && 2057 - !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 2058 - MT_CFG_LPCR_HOST_FW_OWN, 300)) { 2059 - dev_err(dev->mt76.dev, "Timeout for firmware own\n"); 2060 - clear_bit(MT76_STATE_PM, &mphy->state); 2061 - err = -EIO; 2062 - } 2063 - 2064 - mt7622_trigger_hif_int(dev, false); 2065 - 2066 - return err; 2067 - } 2068 - EXPORT_SYMBOL_GPL(mt7615_firmware_own); 2069 1897 2070 1898 static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name) 2071 1899 { ··· 2474 2452 2475 2453 dev->mt76.mcu_ops = &mt7615_mcu_ops, 2476 2454 2477 - ret = mt7615_driver_own(dev); 2455 + ret = mt7615_mcu_drv_pmctrl(dev); 2478 2456 if (ret) 2479 2457 return ret; 2480 2458 ··· 2504 2482 void mt7615_mcu_exit(struct mt7615_dev *dev) 2505 2483 { 2506 2484 __mt76_mcu_restart(&dev->mt76); 2507 - mt7615_firmware_own(dev); 2485 + mt7615_mcu_set_fw_ctrl(dev); 2508 2486 skb_queue_purge(&dev->mt76.mcu.res_q); 2509 2487 } 2510 2488 EXPORT_SYMBOL_GPL(mt7615_mcu_exit); ··· 2868 2846 .rx_streams_mask = phy->chainmask, 2869 2847 .center_chan2 = ieee80211_frequency_to_channel(freq2), 2870 2848 }; 2871 - 2872 - #ifdef CONFIG_NL80211_TESTMODE 2873 - if (dev->mt76.test.state == MT76_TM_STATE_TX_FRAMES && 2874 - dev->mt76.test.tx_antenna_mask) { 2875 - req.tx_streams = hweight8(dev->mt76.test.tx_antenna_mask); 2876 - req.rx_streams_mask = dev->mt76.test.tx_antenna_mask; 2877 - } 2878 - #endif 2879 2849 2880 2850 if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) 2881 2851 req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; ··· 3293 3279 freq = freq_bw40[idx]; 3294 3280 break; 3295 3281 } 3296 - /* fall through */ 3282 + fallthrough; 3297 3283 case NL80211_CHAN_WIDTH_40: 3298 3284 idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40), 3299 3285 freq);
+12 -13
drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
··· 101 101 static void mt7615_irq_tasklet(unsigned long data) 102 102 { 103 103 struct mt7615_dev *dev = (struct mt7615_dev *)data; 104 - u32 intr, mask = 0; 104 + u32 intr, mask = 0, tx_mcu_mask = mt7615_tx_mcu_int_mask(dev); 105 105 106 106 mt76_wr(dev, MT_INT_MASK_CSR, 0); 107 107 108 108 intr = mt76_rr(dev, MT_INT_SOURCE_CSR); 109 + intr &= dev->mt76.mmio.irqmask; 109 110 mt76_wr(dev, MT_INT_SOURCE_CSR, intr); 110 111 111 112 trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask); 112 - intr &= dev->mt76.mmio.irqmask; 113 113 114 - if (intr & MT_INT_TX_DONE_ALL) { 115 - mask |= MT_INT_TX_DONE_ALL; 114 + mask |= intr & MT_INT_RX_DONE_ALL; 115 + if (intr & tx_mcu_mask) 116 + mask |= tx_mcu_mask; 117 + mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0); 118 + 119 + if (intr & tx_mcu_mask) 116 120 napi_schedule(&dev->mt76.tx_napi); 117 - } 118 121 119 - if (intr & MT_INT_RX_DONE(0)) { 120 - mask |= MT_INT_RX_DONE(0); 122 + if (intr & MT_INT_RX_DONE(0)) 121 123 napi_schedule(&dev->mt76.napi[0]); 122 - } 123 124 124 - if (intr & MT_INT_RX_DONE(1)) { 125 - mask |= MT_INT_RX_DONE(1); 125 + if (intr & MT_INT_RX_DONE(1)) 126 126 napi_schedule(&dev->mt76.napi[1]); 127 - } 128 127 129 128 if (intr & MT_INT_MCU_CMD) { 130 129 u32 val = mt76_rr(dev, MT_MCU_CMD); ··· 134 135 wake_up(&dev->reset_wait); 135 136 } 136 137 } 137 - 138 - mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0); 139 138 } 140 139 141 140 static u32 __mt7615_reg_addr(struct mt7615_dev *dev, u32 addr) ··· 223 226 bus_ops->wr = mt7615_wr; 224 227 bus_ops->rmw = mt7615_rmw; 225 228 dev->mt76.bus = bus_ops; 229 + 230 + mt76_wr(dev, MT_INT_MASK_CSR, 0); 226 231 227 232 ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler, 228 233 IRQF_SHARED, KBUILD_MODNAME, dev);
+13 -8
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
··· 220 220 #define mt7615_mcu_add_bss_info(phy, ...) (phy->dev)->mcu_ops->add_bss_info((phy), __VA_ARGS__) 221 221 #define mt7615_mcu_add_beacon(dev, ...) (dev)->mcu_ops->add_beacon_offload((dev), __VA_ARGS__) 222 222 #define mt7615_mcu_set_pm(dev, ...) (dev)->mcu_ops->set_pm_state((dev), __VA_ARGS__) 223 + #define mt7615_mcu_set_drv_ctrl(dev) (dev)->mcu_ops->set_drv_ctrl((dev)) 224 + #define mt7615_mcu_set_fw_ctrl(dev) (dev)->mcu_ops->set_fw_ctrl((dev)) 223 225 struct mt7615_mcu_ops { 224 226 int (*add_tx_ba)(struct mt7615_dev *dev, 225 227 struct ieee80211_ampdu_params *params, ··· 240 238 struct ieee80211_hw *hw, 241 239 struct ieee80211_vif *vif, bool enable); 242 240 int (*set_pm_state)(struct mt7615_dev *dev, int band, int state); 241 + int (*set_drv_ctrl)(struct mt7615_dev *dev); 242 + int (*set_fw_ctrl)(struct mt7615_dev *dev); 243 243 }; 244 244 245 245 struct mt7615_dev { ··· 282 278 283 279 bool fw_debug; 284 280 bool flash_eeprom; 281 + bool dbdc_support; 285 282 286 283 spinlock_t token_lock; 287 284 struct idr token; ··· 540 535 return lmac_queue_map[ac]; 541 536 } 542 537 538 + static inline u32 mt7615_tx_mcu_int_mask(struct mt7615_dev *dev) 539 + { 540 + return MT_INT_TX_DONE(dev->mt76.q_tx[MT_TXQ_MCU]->hw_idx); 541 + } 542 + 543 543 void mt7615_dma_reset(struct mt7615_dev *dev); 544 544 void mt7615_scan_work(struct work_struct *work); 545 545 void mt7615_roc_work(struct work_struct *work); ··· 618 608 struct ieee80211_sta *sta, 619 609 struct mt76_tx_info *tx_info); 620 610 621 - void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, 622 - struct mt76_queue_entry *e); 611 + void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); 623 612 624 613 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 625 614 struct sk_buff *skb); ··· 647 638 struct ieee80211_vif *vif); 648 639 int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif, 649 640 struct ieee80211_channel *chan, int duration); 650 - int mt7615_firmware_own(struct mt7615_dev *dev); 651 - int mt7615_driver_own(struct mt7615_dev *dev); 652 641 653 642 int mt7615_init_debugfs(struct mt7615_dev *dev); 654 643 int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq); ··· 673 666 struct mt76_tx_info *tx_info); 674 667 bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update); 675 668 void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, 676 - enum mt76_txq_id qid, 677 669 struct mt76_queue_entry *e); 678 670 void mt7663_usb_sdio_wtbl_work(struct work_struct *work); 679 671 int mt7663_usb_sdio_register_device(struct mt7615_dev *dev); ··· 681 675 /* sdio */ 682 676 u32 mt7663s_read_pcr(struct mt7615_dev *dev); 683 677 int mt7663s_mcu_init(struct mt7615_dev *dev); 684 - int mt7663s_driver_own(struct mt7615_dev *dev); 685 - int mt7663s_firmware_own(struct mt7615_dev *dev); 686 - int mt7663s_kthread_run(void *data); 678 + void mt7663s_tx_work(struct work_struct *work); 679 + void mt7663s_rx_work(struct work_struct *work); 687 680 void mt7663s_sdio_irq(struct sdio_func *func); 688 681 689 682 #endif
+4 -3
drivers/net/wireless/mediatek/mt76/mt7615/pci.c
··· 88 88 } 89 89 90 90 napi_disable(&mdev->tx_napi); 91 - tasklet_kill(&mdev->tx_tasklet); 91 + mt76_worker_disable(&mdev->tx_worker); 92 92 93 93 mt76_for_each_q_rx(mdev, i) { 94 94 napi_disable(&mdev->napi[i]); ··· 118 118 if (err) 119 119 goto restore; 120 120 121 - err = mt7615_firmware_own(dev); 121 + err = mt7615_mcu_set_fw_ctrl(dev); 122 122 if (err) 123 123 goto restore; 124 124 ··· 142 142 bool pdma_reset; 143 143 int i, err; 144 144 145 - err = mt7615_driver_own(dev); 145 + err = mt7615_mcu_set_drv_ctrl(dev); 146 146 if (err < 0) 147 147 return err; 148 148 ··· 162 162 if (pdma_reset) 163 163 dev_err(mdev->dev, "PDMA engine must be reinitialized\n"); 164 164 165 + mt76_worker_enable(&mdev->tx_worker); 165 166 mt76_for_each_q_rx(mdev, i) { 166 167 napi_enable(&mdev->napi[i]); 167 168 napi_schedule(&mdev->napi[i]);
+3
drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
··· 25 25 mt7615_phy_init(dev); 26 26 mt7615_mcu_del_wtbl_all(dev); 27 27 mt7615_check_offload_capability(dev); 28 + 29 + if (dev->dbdc_support) 30 + mt7615_register_ext_phy(dev); 28 31 } 29 32 30 33 static int mt7615_init_hardware(struct mt7615_dev *dev)
+3 -3
drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
··· 14 14 #include "../dma.h" 15 15 #include "mac.h" 16 16 17 - void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, 18 - struct mt76_queue_entry *e) 17 + void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) 19 18 { 20 19 if (!e->txwi) { 21 20 dev_kfree_skb_any(e->skb); ··· 44 45 } 45 46 46 47 if (e->skb) 47 - mt76_tx_complete_skb(mdev, e->skb); 48 + mt76_tx_complete_skb(mdev, e->wcid, e->skb); 48 49 } 49 50 50 51 static void ··· 106 107 /* pass partial skb header to fw */ 107 108 tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp); 108 109 tx_info->buf[1].len = MT_CT_PARSE_LEN; 110 + tx_info->buf[1].skip_unmap = true; 109 111 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 110 112 111 113 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
+1 -1
drivers/net/wireless/mediatek/mt76/mt7615/regs.h
··· 575 575 #define MT_MCU_PTA_BASE 0x81060000 576 576 #define MT_MCU_PTA(_n) (MT_MCU_PTA_BASE + (_n)) 577 577 578 - #define MT_ANT_SWITCH_CON(n) MT_MCU_PTA(0x0c8) 578 + #define MT_ANT_SWITCH_CON(_n) MT_MCU_PTA(0x0c8 + ((_n) - 1) * 4) 579 579 #define MT_ANT_SWITCH_CON_MODE(_n) (GENMASK(4, 0) << (_n * 8)) 580 580 #define MT_ANT_SWITCH_CON_MODE1(_n) (GENMASK(3, 0) << (_n * 8)) 581 581
+28 -10
drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
··· 323 323 { 324 324 static const struct mt76_driver_ops drv_ops = { 325 325 .txwi_size = MT_USB_TXD_SIZE, 326 - .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ, 326 + .drv_flags = MT_DRV_RX_DMA_HDR, 327 327 .tx_prepare_skb = mt7663_usb_sdio_tx_prepare_skb, 328 328 .tx_complete_skb = mt7663_usb_sdio_tx_complete_skb, 329 329 .tx_status_data = mt7663_usb_sdio_tx_status_data, ··· 346 346 struct ieee80211_ops *ops; 347 347 struct mt7615_dev *dev; 348 348 struct mt76_dev *mdev; 349 - int ret; 349 + int i, ret; 350 350 351 351 ops = devm_kmemdup(&func->dev, &mt7615_ops, sizeof(mt7615_ops), 352 352 GFP_KERNEL); ··· 364 364 dev->ops = ops; 365 365 sdio_set_drvdata(func, dev); 366 366 367 - mdev->sdio.tx_kthread = kthread_create(mt7663s_kthread_run, dev, 368 - "mt7663s_tx"); 369 - if (IS_ERR(mdev->sdio.tx_kthread)) 370 - return PTR_ERR(mdev->sdio.tx_kthread); 371 - 372 367 ret = mt76s_init(mdev, func, &mt7663s_ops); 373 368 if (ret < 0) 374 369 goto err_free; 375 370 371 + INIT_WORK(&mdev->sdio.tx.xmit_work, mt7663s_tx_work); 372 + INIT_WORK(&mdev->sdio.rx.recv_work, mt7663s_rx_work); 373 + 376 374 ret = mt7663s_hw_init(dev, func); 377 375 if (ret) 378 - goto err_free; 376 + goto err_deinit; 379 377 380 378 mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | 381 379 (mt76_rr(dev, MT_HW_REV) & 0xff); 382 380 dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); 381 + 382 + mdev->sdio.intr_data = devm_kmalloc(mdev->dev, 383 + sizeof(struct mt76s_intr), 384 + GFP_KERNEL); 385 + if (!mdev->sdio.intr_data) { 386 + ret = -ENOMEM; 387 + goto err_deinit; 388 + } 389 + 390 + for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) { 391 + mdev->sdio.xmit_buf[i] = devm_kmalloc(mdev->dev, 392 + MT76S_XMIT_BUF_SZ, 393 + GFP_KERNEL); 394 + if (!mdev->sdio.xmit_buf[i]) { 395 + ret = -ENOMEM; 396 + goto err_deinit; 397 + } 398 + } 383 399 384 400 ret = mt76s_alloc_queues(&dev->mt76); 385 401 if (ret) ··· 442 426 return err; 443 427 } 444 428 429 + sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); 430 + 445 431 mt76s_stop_txrx(&mdev->mt76); 446 432 447 - return mt7663s_firmware_own(mdev); 433 + return mt7615_mcu_set_fw_ctrl(mdev); 448 434 } 449 435 450 436 static int mt7663s_resume(struct device *dev) ··· 455 437 struct mt7615_dev *mdev = sdio_get_drvdata(func); 456 438 int err; 457 439 458 - err = mt7663s_driver_own(mdev); 440 + err = mt7615_mcu_set_drv_ctrl(mdev); 459 441 if (err) 460 442 return err; 461 443
+16 -6
drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
··· 53 53 if (ret) 54 54 goto out; 55 55 56 - mt76_queue_kick(dev, mdev->q_tx[MT_TXQ_MCU].q); 56 + mt76_queue_kick(dev, mdev->q_tx[MT_TXQ_MCU]); 57 57 if (wait_resp) 58 58 ret = mt7615_mcu_wait_response(dev, cmd, seq); 59 59 ··· 63 63 return ret; 64 64 } 65 65 66 - int mt7663s_driver_own(struct mt7615_dev *dev) 66 + static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev) 67 67 { 68 68 struct sdio_func *func = dev->mt76.sdio.func; 69 69 struct mt76_phy *mphy = &dev->mt76.phy; ··· 75 75 76 76 sdio_claim_host(func); 77 77 78 - sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, 0); 78 + sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, NULL); 79 79 80 80 ret = readx_poll_timeout(mt7663s_read_pcr, dev, status, 81 81 status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000); ··· 95 95 return 0; 96 96 } 97 97 98 - int mt7663s_firmware_own(struct mt7615_dev *dev) 98 + static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev) 99 99 { 100 100 struct sdio_func *func = dev->mt76.sdio.func; 101 101 struct mt76_phy *mphy = &dev->mt76.phy; ··· 107 107 108 108 sdio_claim_host(func); 109 109 110 - sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, 0); 110 + sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, NULL); 111 111 112 112 ret = readx_poll_timeout(mt7663s_read_pcr, dev, status, 113 113 !(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000); ··· 132 132 .mcu_rr = mt7615_mcu_reg_rr, 133 133 .mcu_wr = mt7615_mcu_reg_wr, 134 134 }; 135 + struct mt7615_mcu_ops *mcu_ops; 135 136 int ret; 136 137 137 - ret = mt7663s_driver_own(dev); 138 + ret = mt7663s_mcu_drv_pmctrl(dev); 138 139 if (ret) 139 140 return ret; 140 141 ··· 152 151 ret = __mt7663_load_firmware(dev); 153 152 if (ret) 154 153 return ret; 154 + 155 + mcu_ops = devm_kmemdup(dev->mt76.dev, dev->mcu_ops, sizeof(*mcu_ops), 156 + GFP_KERNEL); 157 + if (!mcu_ops) 158 + return -ENOMEM; 159 + 160 + mcu_ops->set_drv_ctrl = mt7663s_mcu_drv_pmctrl; 161 + mcu_ops->set_fw_ctrl = mt7663s_mcu_fw_pmctrl; 162 + dev->mcu_ops = mcu_ops; 155 163 156 164 ret = mt7663s_mcu_init_sched(dev); 157 165 if (ret)
+184 -124
drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
··· 19 19 #include "sdio.h" 20 20 #include "mac.h" 21 21 22 - static void mt7663s_refill_sched_quota(struct mt7615_dev *dev, u32 *data) 22 + static int mt7663s_refill_sched_quota(struct mt76_dev *dev, u32 *data) 23 23 { 24 - struct mt76_sdio *sdio = &dev->mt76.sdio; 24 + u32 ple_ac_data_quota[] = { 25 + FIELD_GET(TXQ_CNT_L, data[4]), /* VO */ 26 + FIELD_GET(TXQ_CNT_H, data[3]), /* VI */ 27 + FIELD_GET(TXQ_CNT_L, data[3]), /* BE */ 28 + FIELD_GET(TXQ_CNT_H, data[2]), /* BK */ 29 + }; 30 + u32 pse_ac_data_quota[] = { 31 + FIELD_GET(TXQ_CNT_H, data[1]), /* VO */ 32 + FIELD_GET(TXQ_CNT_L, data[1]), /* VI */ 33 + FIELD_GET(TXQ_CNT_H, data[0]), /* BE */ 34 + FIELD_GET(TXQ_CNT_L, data[0]), /* BK */ 35 + }; 36 + u32 pse_mcu_quota = FIELD_GET(TXQ_CNT_L, data[2]); 37 + u32 pse_data_quota = 0, ple_data_quota = 0; 38 + struct mt76_sdio *sdio = &dev->sdio; 39 + int i; 40 + 41 + for (i = 0; i < ARRAY_SIZE(pse_ac_data_quota); i++) { 42 + pse_data_quota += pse_ac_data_quota[i]; 43 + ple_data_quota += ple_ac_data_quota[i]; 44 + } 45 + 46 + if (!pse_data_quota && !ple_data_quota && !pse_mcu_quota) 47 + return 0; 25 48 26 49 mutex_lock(&sdio->sched.lock); 27 - sdio->sched.pse_data_quota += FIELD_GET(TXQ_CNT_L, data[0]) + /* BK */ 28 - FIELD_GET(TXQ_CNT_H, data[0]) + /* BE */ 29 - FIELD_GET(TXQ_CNT_L, data[1]) + /* VI */ 30 - FIELD_GET(TXQ_CNT_H, data[1]); /* VO */ 31 - sdio->sched.ple_data_quota += FIELD_GET(TXQ_CNT_H, data[2]) + /* BK */ 32 - FIELD_GET(TXQ_CNT_L, data[3]) + /* BE */ 33 - FIELD_GET(TXQ_CNT_H, data[3]) + /* VI */ 34 - FIELD_GET(TXQ_CNT_L, data[4]); /* VO */ 35 - sdio->sched.pse_mcu_quota += FIELD_GET(TXQ_CNT_L, data[2]); 50 + sdio->sched.pse_mcu_quota += pse_mcu_quota; 51 + sdio->sched.pse_data_quota += pse_data_quota; 52 + sdio->sched.ple_data_quota += ple_data_quota; 36 53 mutex_unlock(&sdio->sched.lock); 54 + 55 + return pse_data_quota + ple_data_quota + pse_mcu_quota; 37 56 } 38 57 39 58 static struct sk_buff *mt7663s_build_rx_skb(void *data, int data_len, ··· 80 61 return skb; 81 62 } 82 63 83 - static int mt7663s_rx_run_queue(struct mt7615_dev *dev, enum mt76_rxq_id qid, 64 + static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, 84 65 struct mt76s_intr *intr) 85 66 { 86 - struct mt76_queue *q = &dev->mt76.q_rx[qid]; 87 - struct mt76_sdio *sdio = &dev->mt76.sdio; 67 + struct mt76_queue *q = &dev->q_rx[qid]; 68 + struct mt76_sdio *sdio = &dev->sdio; 88 69 int len = 0, err, i, order; 89 70 struct page *page; 90 71 u8 *buf; ··· 105 86 106 87 buf = page_address(page); 107 88 89 + sdio_claim_host(sdio->func); 108 90 err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len); 91 + sdio_release_host(sdio->func); 92 + 109 93 if (err < 0) { 110 - dev_err(dev->mt76.dev, "sdio read data failed:%d\n", err); 94 + dev_err(dev->dev, "sdio read data failed:%d\n", err); 111 95 __free_pages(page, order); 112 96 return err; 113 97 } 114 98 115 99 for (i = 0; i < intr->rx.num[qid]; i++) { 116 - int index = (q->tail + i) % q->ndesc; 100 + int index = (q->head + i) % q->ndesc; 117 101 struct mt76_queue_entry *e = &q->entry[index]; 118 102 119 103 len = intr->rx.len[qid][i]; ··· 131 109 __free_pages(page, order); 132 110 133 111 spin_lock_bh(&q->lock); 134 - q->tail = (q->tail + i) % q->ndesc; 112 + q->head = (q->head + i) % q->ndesc; 135 113 q->queued += i; 136 114 spin_unlock_bh(&q->lock); 115 + 116 + return i; 117 + } 118 + 119 + static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid, 120 + int buf_sz, int *pse_size, int *ple_size) 121 + { 122 + int pse_sz; 123 + 124 + pse_sz = DIV_ROUND_UP(buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ); 125 + 126 + if (qid == MT_TXQ_MCU) { 127 + if (sdio->sched.pse_mcu_quota < *pse_size + pse_sz) 128 + return -EBUSY; 129 + } else { 130 + if (sdio->sched.pse_data_quota < *pse_size + pse_sz || 131 + sdio->sched.ple_data_quota < *ple_size) 132 + return -EBUSY; 133 + 134 + *ple_size = *ple_size + 1; 135 + } 136 + *pse_size = *pse_size + pse_sz; 137 + 138 + return 0; 139 + } 140 + 141 + static void mt7663s_tx_update_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid, 142 + int pse_size, int ple_size) 143 + { 144 + mutex_lock(&sdio->sched.lock); 145 + if (qid == MT_TXQ_MCU) { 146 + sdio->sched.pse_mcu_quota -= pse_size; 147 + } else { 148 + sdio->sched.pse_data_quota -= pse_size; 149 + sdio->sched.ple_data_quota -= ple_size; 150 + } 151 + mutex_unlock(&sdio->sched.lock); 152 + } 153 + 154 + static int __mt7663s_xmit_queue(struct mt76_dev *dev, u8 *data, int len) 155 + { 156 + struct mt76_sdio *sdio = &dev->sdio; 157 + int err; 158 + 159 + if (len > sdio->func->cur_blksize) 160 + len = roundup(len, sdio->func->cur_blksize); 161 + 162 + sdio_claim_host(sdio->func); 163 + err = sdio_writesb(sdio->func, MCR_WTDR1, data, len); 164 + sdio_release_host(sdio->func); 165 + 166 + if (err) 167 + dev_err(dev->dev, "sdio write failed: %d\n", err); 137 168 138 169 return err; 139 170 } 140 171 141 - static int mt7663s_tx_update_sched(struct mt7615_dev *dev, 142 - struct mt76_queue_entry *e, 143 - bool mcu) 172 + static int mt7663s_tx_run_queue(struct mt76_dev *dev, enum mt76_txq_id qid) 144 173 { 145 - struct mt76_sdio *sdio = &dev->mt76.sdio; 146 - struct mt76_phy *mphy = &dev->mt76.phy; 147 - struct ieee80211_hdr *hdr; 148 - int size, ret = -EBUSY; 174 + int err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0; 175 + struct mt76_queue *q = dev->q_tx[qid]; 176 + struct mt76_sdio *sdio = &dev->sdio; 149 177 150 - size = DIV_ROUND_UP(e->buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ); 151 - 152 - if (mcu) { 153 - if (!test_bit(MT76_STATE_MCU_RUNNING, &mphy->state)) 154 - return 0; 155 - 156 - mutex_lock(&sdio->sched.lock); 157 - if (sdio->sched.pse_mcu_quota > size) { 158 - sdio->sched.pse_mcu_quota -= size; 159 - ret = 0; 160 - } 161 - mutex_unlock(&sdio->sched.lock); 162 - 163 - return ret; 164 - } 165 - 166 - hdr = (struct ieee80211_hdr *)(e->skb->data + MT_USB_TXD_SIZE); 167 - if (ieee80211_is_ctl(hdr->frame_control)) 168 - return 0; 169 - 170 - mutex_lock(&sdio->sched.lock); 171 - if (sdio->sched.pse_data_quota > size && 172 - sdio->sched.ple_data_quota > 0) { 173 - sdio->sched.pse_data_quota -= size; 174 - sdio->sched.ple_data_quota--; 175 - ret = 0; 176 - } 177 - mutex_unlock(&sdio->sched.lock); 178 - 179 - return ret; 180 - } 181 - 182 - static int mt7663s_tx_run_queue(struct mt7615_dev *dev, struct mt76_queue *q) 183 - { 184 - bool mcu = q == dev->mt76.q_tx[MT_TXQ_MCU].q; 185 - struct mt76_sdio *sdio = &dev->mt76.sdio; 186 - int nframes = 0; 187 - 188 - while (q->first != q->tail) { 178 + while (q->first != q->head) { 189 179 struct mt76_queue_entry *e = &q->entry[q->first]; 190 - int err, len = e->skb->len; 180 + struct sk_buff *iter; 191 181 192 - if (mt7663s_tx_update_sched(dev, e, mcu)) 182 + if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) { 183 + __skb_put_zero(e->skb, 4); 184 + err = __mt7663s_xmit_queue(dev, e->skb->data, 185 + e->skb->len); 186 + if (err) 187 + return err; 188 + 189 + goto next; 190 + } 191 + 192 + if (len + e->skb->len + 4 > MT76S_XMIT_BUF_SZ) 193 193 break; 194 194 195 - if (len > sdio->func->cur_blksize) 196 - len = roundup(len, sdio->func->cur_blksize); 195 + if (mt7663s_tx_pick_quota(sdio, qid, e->buf_sz, &pse_sz, 196 + &ple_sz)) 197 + break; 197 198 198 - /* TODO: skb_walk_frags and then write to SDIO port */ 199 - err = sdio_writesb(sdio->func, MCR_WTDR1, e->skb->data, len); 200 - if (err) { 201 - dev_err(dev->mt76.dev, "sdio write failed: %d\n", err); 202 - return -EIO; 203 - } 204 - 205 - e->done = true; 206 - q->first = (q->first + 1) % q->ndesc; 199 + memcpy(sdio->xmit_buf[qid] + len, e->skb->data, 200 + skb_headlen(e->skb)); 201 + len += skb_headlen(e->skb); 207 202 nframes++; 203 + 204 + skb_walk_frags(e->skb, iter) { 205 + memcpy(sdio->xmit_buf[qid] + len, iter->data, 206 + iter->len); 207 + len += iter->len; 208 + nframes++; 209 + } 210 + next: 211 + q->first = (q->first + 1) % q->ndesc; 212 + e->done = true; 208 213 } 214 + 215 + if (nframes) { 216 + memset(sdio->xmit_buf[qid] + len, 0, 4); 217 + err = __mt7663s_xmit_queue(dev, sdio->xmit_buf[qid], len + 4); 218 + if (err) 219 + return err; 220 + } 221 + mt7663s_tx_update_quota(sdio, qid, pse_sz, ple_sz); 209 222 210 223 return nframes; 211 224 } 212 225 213 - static int mt7663s_tx_run_queues(struct mt7615_dev *dev) 226 + void mt7663s_tx_work(struct work_struct *work) 214 227 { 228 + struct mt76_sdio *sdio = container_of(work, struct mt76_sdio, 229 + tx.xmit_work); 230 + struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); 215 231 int i, nframes = 0; 216 232 217 233 for (i = 0; i < MT_TXQ_MCU_WA; i++) { 218 234 int ret; 219 235 220 - ret = mt7663s_tx_run_queue(dev, dev->mt76.q_tx[i].q); 236 + ret = mt7663s_tx_run_queue(dev, i); 221 237 if (ret < 0) 222 - return ret; 238 + break; 223 239 224 240 nframes += ret; 225 241 } 242 + if (nframes) 243 + queue_work(sdio->txrx_wq, &sdio->tx.xmit_work); 226 244 227 - return nframes; 245 + queue_work(sdio->txrx_wq, &sdio->tx.status_work); 228 246 } 229 247 230 - int mt7663s_kthread_run(void *data) 248 + void mt7663s_rx_work(struct work_struct *work) 231 249 { 232 - struct mt7615_dev *dev = data; 233 - struct mt76_phy *mphy = &dev->mt76.phy; 250 + struct mt76_sdio *sdio = container_of(work, struct mt76_sdio, 251 + rx.recv_work); 252 + struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); 253 + struct mt76s_intr *intr = sdio->intr_data; 254 + int nframes = 0, ret; 234 255 235 - while (!kthread_should_stop()) { 236 - int ret; 256 + /* disable interrupt */ 257 + sdio_claim_host(sdio->func); 258 + sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL); 259 + ret = sdio_readsb(sdio->func, intr, MCR_WHISR, sizeof(*intr)); 260 + sdio_release_host(sdio->func); 237 261 238 - cond_resched(); 262 + if (ret < 0) 263 + goto out; 239 264 240 - sdio_claim_host(dev->mt76.sdio.func); 241 - ret = mt7663s_tx_run_queues(dev); 242 - sdio_release_host(dev->mt76.sdio.func); 265 + trace_dev_irq(dev, intr->isr, 0); 243 266 244 - if (ret <= 0 || !test_bit(MT76_STATE_RUNNING, &mphy->state)) { 245 - set_current_state(TASK_INTERRUPTIBLE); 246 - schedule(); 247 - } else { 248 - wake_up_process(dev->mt76.sdio.kthread); 267 + if (intr->isr & WHIER_RX0_DONE_INT_EN) { 268 + ret = mt7663s_rx_run_queue(dev, 0, intr); 269 + if (ret > 0) { 270 + queue_work(sdio->txrx_wq, &sdio->rx.net_work); 271 + nframes += ret; 249 272 } 250 273 } 251 274 252 - return 0; 275 + if (intr->isr & WHIER_RX1_DONE_INT_EN) { 276 + ret = mt7663s_rx_run_queue(dev, 1, intr); 277 + if (ret > 0) { 278 + queue_work(sdio->txrx_wq, &sdio->rx.net_work); 279 + nframes += ret; 280 + } 281 + } 282 + 283 + if (mt7663s_refill_sched_quota(dev, intr->tx.wtqcr)) 284 + queue_work(sdio->txrx_wq, &sdio->tx.xmit_work); 285 + 286 + if (nframes) { 287 + queue_work(sdio->txrx_wq, &sdio->rx.recv_work); 288 + return; 289 + } 290 + out: 291 + /* enable interrupt */ 292 + sdio_claim_host(sdio->func); 293 + sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL); 294 + sdio_release_host(sdio->func); 253 295 } 254 296 255 297 void mt7663s_sdio_irq(struct sdio_func *func) 256 298 { 257 299 struct mt7615_dev *dev = sdio_get_drvdata(func); 258 300 struct mt76_sdio *sdio = &dev->mt76.sdio; 259 - struct mt76s_intr intr; 260 301 261 - /* disable interrupt */ 262 - sdio_writel(func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, 0); 302 + if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state)) 303 + return; 263 304 264 - do { 265 - sdio_readsb(func, &intr, MCR_WHISR, sizeof(struct mt76s_intr)); 266 - trace_dev_irq(&dev->mt76, intr.isr, 0); 267 - 268 - if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state)) 269 - goto out; 270 - 271 - if (intr.isr & WHIER_RX0_DONE_INT_EN) { 272 - mt7663s_rx_run_queue(dev, 0, &intr); 273 - wake_up_process(sdio->kthread); 274 - } 275 - 276 - if (intr.isr & WHIER_RX1_DONE_INT_EN) { 277 - mt7663s_rx_run_queue(dev, 1, &intr); 278 - wake_up_process(sdio->kthread); 279 - } 280 - 281 - if (intr.isr & WHIER_TX_DONE_INT_EN) { 282 - mt7663s_refill_sched_quota(dev, intr.tx.wtqcr); 283 - mt7663s_tx_run_queues(dev); 284 - wake_up_process(sdio->kthread); 285 - } 286 - } while (intr.isr); 287 - out: 288 - /* enable interrupt */ 289 - sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, 0); 305 + queue_work(sdio->txrx_wq, &sdio->rx.recv_work); 290 306 }
+5 -6
drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
··· 70 70 if (dev->mt76.test.state != MT76_TM_STATE_OFF) 71 71 tx_power = dev->mt76.test.tx_power; 72 72 73 - len = sizeof(req_hdr) + MT7615_EE_MAX - MT_EE_NIC_CONF_0; 73 + len = MT7615_EE_MAX - MT_EE_NIC_CONF_0; 74 74 skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + len); 75 75 if (!skb) 76 76 return -ENOMEM; ··· 80 80 81 81 target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains; 82 82 for (i = 0; i < target_chains; i++) { 83 - int index; 84 - 85 83 ret = mt7615_eeprom_get_target_power_index(dev, chandef->chan, i); 86 - if (ret < 0) 84 + if (ret < 0) { 85 + dev_kfree_skb(skb); 87 86 return -EINVAL; 87 + } 88 88 89 - index = ret - MT_EE_NIC_CONF_0; 90 89 if (tx_power && tx_power[i]) 91 90 data[ret - MT_EE_NIC_CONF_0] = tx_power[i]; 92 91 } ··· 190 191 for (i = 0; i < 4; i++) { 191 192 mt76_rmw_field(dev, MT_WF_PHY_RFINTF3_0(i), 192 193 MT_WF_PHY_RFINTF3_0_ANT, 193 - td->tx_antenna_mask & BIT(i) ? 0 : 0xa); 194 + (td->tx_antenna_mask & BIT(i)) ? 0 : 0xa); 194 195 195 196 } 196 197
-2
drivers/net/wireless/mediatek/mt76/mt7615/usb.c
··· 180 180 } 181 181 182 182 mt76u_stop_rx(&dev->mt76); 183 - 184 183 mt76u_stop_tx(&dev->mt76); 185 - tasklet_kill(&dev->mt76.tx_tasklet); 186 184 187 185 return 0; 188 186 }
+5 -3
drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
··· 18 18 int cmd, bool wait_resp) 19 19 { 20 20 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 21 - int ret, seq, ep; 21 + int ret, seq, ep, len, pad; 22 22 23 23 mutex_lock(&mdev->mcu.mutex); 24 24 ··· 28 28 else 29 29 ep = MT_EP_OUT_AC_BE; 30 30 31 - put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len))); 32 - ret = mt76_skb_adjust_pad(skb); 31 + len = skb->len; 32 + put_unaligned_le32(len, skb_push(skb, sizeof(len))); 33 + pad = round_up(skb->len, 4) + 4 - skb->len; 34 + ret = mt76_skb_adjust_pad(skb, pad); 33 35 if (ret < 0) 34 36 goto out; 35 37
+18 -11
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
··· 226 226 EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_status_data); 227 227 228 228 void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, 229 - enum mt76_txq_id qid, 230 229 struct mt76_queue_entry *e) 231 230 { 232 231 unsigned int headroom = MT_USB_TXD_SIZE; ··· 234 235 headroom += MT_USB_HDR_SIZE; 235 236 skb_pull(e->skb, headroom); 236 237 237 - mt76_tx_complete_skb(mdev, e->skb); 238 + mt76_tx_complete_skb(mdev, e->wcid, e->skb); 238 239 } 239 240 EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_complete_skb); 240 241 ··· 247 248 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 248 249 struct sk_buff *skb = tx_info->skb; 249 250 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 251 + int pad; 250 252 251 253 if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && 252 254 !msta->rate_probe) { ··· 259 259 } 260 260 261 261 mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb); 262 - if (mt76_is_usb(mdev)) 263 - put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len))); 262 + if (mt76_is_usb(mdev)) { 263 + u32 len = skb->len; 264 264 265 - return mt76_skb_adjust_pad(skb); 265 + put_unaligned_le32(len, skb_push(skb, sizeof(len))); 266 + pad = round_up(skb->len, 4) + 4 - skb->len; 267 + } else { 268 + pad = round_up(skb->len, 4) - skb->len; 269 + } 270 + 271 + return mt76_skb_adjust_pad(skb, pad); 266 272 } 267 273 EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb); 268 274 ··· 365 359 if (err) 366 360 return err; 367 361 368 - /* check hw sg support in order to enable AMSDU */ 369 - if (dev->mt76.usb.sg_en || mt76_is_sdio(&dev->mt76)) 370 - hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM; 371 - else 372 - hw->max_tx_fragments = 1; 373 362 hw->extra_tx_headroom += MT_USB_TXD_SIZE; 374 - if (mt76_is_usb(&dev->mt76)) 363 + if (mt76_is_usb(&dev->mt76)) { 375 364 hw->extra_tx_headroom += MT_USB_HDR_SIZE; 365 + /* check hw sg support in order to enable AMSDU */ 366 + if (dev->mt76.usb.sg_en) 367 + hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM; 368 + else 369 + hw->max_tx_fragments = 1; 370 + } 376 371 377 372 err = mt76_register_device(&dev->mt76, true, mt7615_rates, 378 373 ARRAY_SIZE(mt7615_rates));
+1
drivers/net/wireless/mediatek/mt76/mt76x0/init.c
··· 10 10 #include "eeprom.h" 11 11 #include "mcu.h" 12 12 #include "initvals.h" 13 + #include "initvals_init.h" 13 14 #include "../mt76x02_phy.h" 14 15 15 16 static void
-145
drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
··· 11 11 12 12 #include "phy.h" 13 13 14 - static const struct mt76_reg_pair common_mac_reg_table[] = { 15 - { MT_BCN_OFFSET(0), 0xf8f0e8e0 }, 16 - { MT_BCN_OFFSET(1), 0x6f77d0c8 }, 17 - { MT_LEGACY_BASIC_RATE, 0x0000013f }, 18 - { MT_HT_BASIC_RATE, 0x00008003 }, 19 - { MT_MAC_SYS_CTRL, 0x00000000 }, 20 - { MT_RX_FILTR_CFG, 0x00017f97 }, 21 - { MT_BKOFF_SLOT_CFG, 0x00000209 }, 22 - { MT_TX_SW_CFG0, 0x00000000 }, 23 - { MT_TX_SW_CFG1, 0x00080606 }, 24 - { MT_TX_LINK_CFG, 0x00001020 }, 25 - { MT_TX_TIMEOUT_CFG, 0x000a2090 }, 26 - { MT_MAX_LEN_CFG, 0xa0fff | 0x00001000 }, 27 - { MT_LED_CFG, 0x7f031e46 }, 28 - { MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f }, 29 - { MT_PBF_RX_MAX_PCNT, 0x0000fe9f }, 30 - { MT_TX_RETRY_CFG, 0x47d01f0f }, 31 - { MT_AUTO_RSP_CFG, 0x00000013 }, 32 - { MT_CCK_PROT_CFG, 0x07f40003 }, 33 - { MT_OFDM_PROT_CFG, 0x07f42004 }, 34 - { MT_PBF_CFG, 0x00f40006 }, 35 - { MT_WPDMA_GLO_CFG, 0x00000030 }, 36 - { MT_GF20_PROT_CFG, 0x01742004 }, 37 - { MT_GF40_PROT_CFG, 0x03f42084 }, 38 - { MT_MM20_PROT_CFG, 0x01742004 }, 39 - { MT_MM40_PROT_CFG, 0x03f42084 }, 40 - { MT_TXOP_CTRL_CFG, 0x0000583f }, 41 - { MT_TX_RTS_CFG, 0x00ffff20 }, 42 - { MT_EXP_ACK_TIME, 0x002400ca }, 43 - { MT_TXOP_HLDR_ET, 0x00000002 }, 44 - { MT_XIFS_TIME_CFG, 0x33a41010 }, 45 - { MT_PWR_PIN_CFG, 0x00000000 }, 46 - }; 47 - 48 - static const struct mt76_reg_pair mt76x0_mac_reg_table[] = { 49 - { MT_IOCFG_6, 0xa0040080 }, 50 - { MT_PBF_SYS_CTRL, 0x00080c00 }, 51 - { MT_PBF_CFG, 0x77723c1f }, 52 - { MT_FCE_PSE_CTRL, 0x00000001 }, 53 - { MT_AMPDU_MAX_LEN_20M1S, 0xAAA99887 }, 54 - { MT_TX_SW_CFG0, 0x00000601 }, 55 - { MT_TX_SW_CFG1, 0x00040000 }, 56 - { MT_TX_SW_CFG2, 0x00000000 }, 57 - { 0xa44, 0x00000000 }, 58 - { MT_HEADER_TRANS_CTRL_REG, 0x00000000 }, 59 - { MT_TSO_CTRL, 0x00000000 }, 60 - { MT_BB_PA_MODE_CFG1, 0x00500055 }, 61 - { MT_RF_PA_MODE_CFG1, 0x00500055 }, 62 - { MT_TX_ALC_CFG_0, 0x2F2F000C }, 63 - { MT_TX0_BB_GAIN_ATTEN, 0x00000000 }, 64 - { MT_TX_PWR_CFG_0, 0x3A3A3A3A }, 65 - { MT_TX_PWR_CFG_1, 0x3A3A3A3A }, 66 - { MT_TX_PWR_CFG_2, 0x3A3A3A3A }, 67 - { MT_TX_PWR_CFG_3, 0x3A3A3A3A }, 68 - { MT_TX_PWR_CFG_4, 0x3A3A3A3A }, 69 - { MT_TX_PWR_CFG_7, 0x3A3A3A3A }, 70 - { MT_TX_PWR_CFG_8, 0x0000003A }, 71 - { MT_TX_PWR_CFG_9, 0x0000003A }, 72 - { 0x150C, 0x00000002 }, 73 - { 0x1238, 0x001700C8 }, 74 - { MT_LDO_CTRL_0, 0x00A647B6 }, 75 - { MT_LDO_CTRL_1, 0x6B006464 }, 76 - { MT_HT_BASIC_RATE, 0x00004003 }, 77 - { MT_HT_CTRL_CFG, 0x000001FF }, 78 - { MT_TXOP_HLDR_ET, 0x00000000 }, 79 - { MT_PN_PAD_MODE, 0x00000003 }, 80 - { MT_TX_PROT_CFG6, 0xe3f42004 }, 81 - { MT_TX_PROT_CFG7, 0xe3f42084 }, 82 - { MT_TX_PROT_CFG8, 0xe3f42104 }, 83 - { MT_VHT_HT_FBK_CFG1, 0xedcba980 }, 84 - }; 85 - 86 - static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = { 87 - { MT_BBP(CORE, 1), 0x00000002 }, 88 - { MT_BBP(CORE, 4), 0x00000000 }, 89 - { MT_BBP(CORE, 24), 0x00000000 }, 90 - { MT_BBP(CORE, 32), 0x4003000a }, 91 - { MT_BBP(CORE, 42), 0x00000000 }, 92 - { MT_BBP(CORE, 44), 0x00000000 }, 93 - { MT_BBP(IBI, 11), 0x0FDE8081 }, 94 - { MT_BBP(AGC, 0), 0x00021400 }, 95 - { MT_BBP(AGC, 1), 0x00000003 }, 96 - { MT_BBP(AGC, 2), 0x003A6464 }, 97 - { MT_BBP(AGC, 15), 0x88A28CB8 }, 98 - { MT_BBP(AGC, 22), 0x00001E21 }, 99 - { MT_BBP(AGC, 23), 0x0000272C }, 100 - { MT_BBP(AGC, 24), 0x00002F3A }, 101 - { MT_BBP(AGC, 25), 0x8000005A }, 102 - { MT_BBP(AGC, 26), 0x007C2005 }, 103 - { MT_BBP(AGC, 33), 0x00003238 }, 104 - { MT_BBP(AGC, 34), 0x000A0C0C }, 105 - { MT_BBP(AGC, 37), 0x2121262C }, 106 - { MT_BBP(AGC, 41), 0x38383E45 }, 107 - { MT_BBP(AGC, 57), 0x00001010 }, 108 - { MT_BBP(AGC, 59), 0xBAA20E96 }, 109 - { MT_BBP(AGC, 63), 0x00000001 }, 110 - { MT_BBP(TXC, 0), 0x00280403 }, 111 - { MT_BBP(TXC, 1), 0x00000000 }, 112 - { MT_BBP(RXC, 1), 0x00000012 }, 113 - { MT_BBP(RXC, 2), 0x00000011 }, 114 - { MT_BBP(RXC, 3), 0x00000005 }, 115 - { MT_BBP(RXC, 4), 0x00000000 }, 116 - { MT_BBP(RXC, 5), 0xF977C4EC }, 117 - { MT_BBP(RXC, 7), 0x00000090 }, 118 - { MT_BBP(TXO, 8), 0x00000000 }, 119 - { MT_BBP(TXBE, 0), 0x00000000 }, 120 - { MT_BBP(TXBE, 4), 0x00000004 }, 121 - { MT_BBP(TXBE, 6), 0x00000000 }, 122 - { MT_BBP(TXBE, 8), 0x00000014 }, 123 - { MT_BBP(TXBE, 9), 0x20000000 }, 124 - { MT_BBP(TXBE, 10), 0x00000000 }, 125 - { MT_BBP(TXBE, 12), 0x00000000 }, 126 - { MT_BBP(TXBE, 13), 0x00000000 }, 127 - { MT_BBP(TXBE, 14), 0x00000000 }, 128 - { MT_BBP(TXBE, 15), 0x00000000 }, 129 - { MT_BBP(TXBE, 16), 0x00000000 }, 130 - { MT_BBP(TXBE, 17), 0x00000000 }, 131 - { MT_BBP(RXFE, 1), 0x00008800 }, 132 - { MT_BBP(RXFE, 3), 0x00000000 }, 133 - { MT_BBP(RXFE, 4), 0x00000000 }, 134 - { MT_BBP(RXO, 13), 0x00000192 }, 135 - { MT_BBP(RXO, 14), 0x00060612 }, 136 - { MT_BBP(RXO, 15), 0xC8321B18 }, 137 - { MT_BBP(RXO, 16), 0x0000001E }, 138 - { MT_BBP(RXO, 17), 0x00000000 }, 139 - { MT_BBP(RXO, 18), 0xCC00A993 }, 140 - { MT_BBP(RXO, 19), 0xB9CB9CB9 }, 141 - { MT_BBP(RXO, 20), 0x26c00057 }, 142 - { MT_BBP(RXO, 21), 0x00000001 }, 143 - { MT_BBP(RXO, 24), 0x00000006 }, 144 - { MT_BBP(RXO, 28), 0x0000003F }, 145 - }; 146 - 147 14 static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = { 148 15 { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 4), 0x1FEDA049 } }, 149 16 { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 4), 0x1FECA054 } }, ··· 80 213 81 214 { RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(RXFE, 0), 0x3D5000E0 } }, 82 215 { RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(RXFE, 0), 0x895000E0 } }, 83 - }; 84 - 85 - static const struct mt76_reg_pair mt76x0_dcoc_tab[] = { 86 - { MT_BBP(CAL, 47), 0x000010F0 }, 87 - { MT_BBP(CAL, 48), 0x00008080 }, 88 - { MT_BBP(CAL, 49), 0x00000F07 }, 89 - { MT_BBP(CAL, 50), 0x00000040 }, 90 - { MT_BBP(CAL, 51), 0x00000404 }, 91 - { MT_BBP(CAL, 52), 0x00080803 }, 92 - { MT_BBP(CAL, 53), 0x00000704 }, 93 - { MT_BBP(CAL, 54), 0x00002828 }, 94 - { MT_BBP(CAL, 55), 0x00005050 }, 95 216 }; 96 217 97 218 #endif
+159
drivers/net/wireless/mediatek/mt76/mt76x0/initvals_init.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * (c) Copyright 2002-2010, Ralink Technology, Inc. 4 + * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> 5 + * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> 6 + * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> 7 + */ 8 + 9 + #ifndef __MT76X0U_INITVALS_INIT_H 10 + #define __MT76X0U_INITVALS_INIT_H 11 + 12 + #include "phy.h" 13 + 14 + static const struct mt76_reg_pair common_mac_reg_table[] = { 15 + { MT_BCN_OFFSET(0), 0xf8f0e8e0 }, 16 + { MT_BCN_OFFSET(1), 0x6f77d0c8 }, 17 + { MT_LEGACY_BASIC_RATE, 0x0000013f }, 18 + { MT_HT_BASIC_RATE, 0x00008003 }, 19 + { MT_MAC_SYS_CTRL, 0x00000000 }, 20 + { MT_RX_FILTR_CFG, 0x00017f97 }, 21 + { MT_BKOFF_SLOT_CFG, 0x00000209 }, 22 + { MT_TX_SW_CFG0, 0x00000000 }, 23 + { MT_TX_SW_CFG1, 0x00080606 }, 24 + { MT_TX_LINK_CFG, 0x00001020 }, 25 + { MT_TX_TIMEOUT_CFG, 0x000a2090 }, 26 + { MT_MAX_LEN_CFG, 0xa0fff | 0x00001000 }, 27 + { MT_LED_CFG, 0x7f031e46 }, 28 + { MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f }, 29 + { MT_PBF_RX_MAX_PCNT, 0x0000fe9f }, 30 + { MT_TX_RETRY_CFG, 0x47d01f0f }, 31 + { MT_AUTO_RSP_CFG, 0x00000013 }, 32 + { MT_CCK_PROT_CFG, 0x07f40003 }, 33 + { MT_OFDM_PROT_CFG, 0x07f42004 }, 34 + { MT_PBF_CFG, 0x00f40006 }, 35 + { MT_WPDMA_GLO_CFG, 0x00000030 }, 36 + { MT_GF20_PROT_CFG, 0x01742004 }, 37 + { MT_GF40_PROT_CFG, 0x03f42084 }, 38 + { MT_MM20_PROT_CFG, 0x01742004 }, 39 + { MT_MM40_PROT_CFG, 0x03f42084 }, 40 + { MT_TXOP_CTRL_CFG, 0x0000583f }, 41 + { MT_TX_RTS_CFG, 0x00ffff20 }, 42 + { MT_EXP_ACK_TIME, 0x002400ca }, 43 + { MT_TXOP_HLDR_ET, 0x00000002 }, 44 + { MT_XIFS_TIME_CFG, 0x33a41010 }, 45 + { MT_PWR_PIN_CFG, 0x00000000 }, 46 + }; 47 + 48 + static const struct mt76_reg_pair mt76x0_mac_reg_table[] = { 49 + { MT_IOCFG_6, 0xa0040080 }, 50 + { MT_PBF_SYS_CTRL, 0x00080c00 }, 51 + { MT_PBF_CFG, 0x77723c1f }, 52 + { MT_FCE_PSE_CTRL, 0x00000001 }, 53 + { MT_AMPDU_MAX_LEN_20M1S, 0xAAA99887 }, 54 + { MT_TX_SW_CFG0, 0x00000601 }, 55 + { MT_TX_SW_CFG1, 0x00040000 }, 56 + { MT_TX_SW_CFG2, 0x00000000 }, 57 + { 0xa44, 0x00000000 }, 58 + { MT_HEADER_TRANS_CTRL_REG, 0x00000000 }, 59 + { MT_TSO_CTRL, 0x00000000 }, 60 + { MT_BB_PA_MODE_CFG1, 0x00500055 }, 61 + { MT_RF_PA_MODE_CFG1, 0x00500055 }, 62 + { MT_TX_ALC_CFG_0, 0x2F2F000C }, 63 + { MT_TX0_BB_GAIN_ATTEN, 0x00000000 }, 64 + { MT_TX_PWR_CFG_0, 0x3A3A3A3A }, 65 + { MT_TX_PWR_CFG_1, 0x3A3A3A3A }, 66 + { MT_TX_PWR_CFG_2, 0x3A3A3A3A }, 67 + { MT_TX_PWR_CFG_3, 0x3A3A3A3A }, 68 + { MT_TX_PWR_CFG_4, 0x3A3A3A3A }, 69 + { MT_TX_PWR_CFG_7, 0x3A3A3A3A }, 70 + { MT_TX_PWR_CFG_8, 0x0000003A }, 71 + { MT_TX_PWR_CFG_9, 0x0000003A }, 72 + { 0x150C, 0x00000002 }, 73 + { 0x1238, 0x001700C8 }, 74 + { MT_LDO_CTRL_0, 0x00A647B6 }, 75 + { MT_LDO_CTRL_1, 0x6B006464 }, 76 + { MT_HT_BASIC_RATE, 0x00004003 }, 77 + { MT_HT_CTRL_CFG, 0x000001FF }, 78 + { MT_TXOP_HLDR_ET, 0x00000000 }, 79 + { MT_PN_PAD_MODE, 0x00000003 }, 80 + { MT_TX_PROT_CFG6, 0xe3f42004 }, 81 + { MT_TX_PROT_CFG7, 0xe3f42084 }, 82 + { MT_TX_PROT_CFG8, 0xe3f42104 }, 83 + { MT_VHT_HT_FBK_CFG1, 0xedcba980 }, 84 + }; 85 + 86 + static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = { 87 + { MT_BBP(CORE, 1), 0x00000002 }, 88 + { MT_BBP(CORE, 4), 0x00000000 }, 89 + { MT_BBP(CORE, 24), 0x00000000 }, 90 + { MT_BBP(CORE, 32), 0x4003000a }, 91 + { MT_BBP(CORE, 42), 0x00000000 }, 92 + { MT_BBP(CORE, 44), 0x00000000 }, 93 + { MT_BBP(IBI, 11), 0x0FDE8081 }, 94 + { MT_BBP(AGC, 0), 0x00021400 }, 95 + { MT_BBP(AGC, 1), 0x00000003 }, 96 + { MT_BBP(AGC, 2), 0x003A6464 }, 97 + { MT_BBP(AGC, 15), 0x88A28CB8 }, 98 + { MT_BBP(AGC, 22), 0x00001E21 }, 99 + { MT_BBP(AGC, 23), 0x0000272C }, 100 + { MT_BBP(AGC, 24), 0x00002F3A }, 101 + { MT_BBP(AGC, 25), 0x8000005A }, 102 + { MT_BBP(AGC, 26), 0x007C2005 }, 103 + { MT_BBP(AGC, 33), 0x00003238 }, 104 + { MT_BBP(AGC, 34), 0x000A0C0C }, 105 + { MT_BBP(AGC, 37), 0x2121262C }, 106 + { MT_BBP(AGC, 41), 0x38383E45 }, 107 + { MT_BBP(AGC, 57), 0x00001010 }, 108 + { MT_BBP(AGC, 59), 0xBAA20E96 }, 109 + { MT_BBP(AGC, 63), 0x00000001 }, 110 + { MT_BBP(TXC, 0), 0x00280403 }, 111 + { MT_BBP(TXC, 1), 0x00000000 }, 112 + { MT_BBP(RXC, 1), 0x00000012 }, 113 + { MT_BBP(RXC, 2), 0x00000011 }, 114 + { MT_BBP(RXC, 3), 0x00000005 }, 115 + { MT_BBP(RXC, 4), 0x00000000 }, 116 + { MT_BBP(RXC, 5), 0xF977C4EC }, 117 + { MT_BBP(RXC, 7), 0x00000090 }, 118 + { MT_BBP(TXO, 8), 0x00000000 }, 119 + { MT_BBP(TXBE, 0), 0x00000000 }, 120 + { MT_BBP(TXBE, 4), 0x00000004 }, 121 + { MT_BBP(TXBE, 6), 0x00000000 }, 122 + { MT_BBP(TXBE, 8), 0x00000014 }, 123 + { MT_BBP(TXBE, 9), 0x20000000 }, 124 + { MT_BBP(TXBE, 10), 0x00000000 }, 125 + { MT_BBP(TXBE, 12), 0x00000000 }, 126 + { MT_BBP(TXBE, 13), 0x00000000 }, 127 + { MT_BBP(TXBE, 14), 0x00000000 }, 128 + { MT_BBP(TXBE, 15), 0x00000000 }, 129 + { MT_BBP(TXBE, 16), 0x00000000 }, 130 + { MT_BBP(TXBE, 17), 0x00000000 }, 131 + { MT_BBP(RXFE, 1), 0x00008800 }, 132 + { MT_BBP(RXFE, 3), 0x00000000 }, 133 + { MT_BBP(RXFE, 4), 0x00000000 }, 134 + { MT_BBP(RXO, 13), 0x00000192 }, 135 + { MT_BBP(RXO, 14), 0x00060612 }, 136 + { MT_BBP(RXO, 15), 0xC8321B18 }, 137 + { MT_BBP(RXO, 16), 0x0000001E }, 138 + { MT_BBP(RXO, 17), 0x00000000 }, 139 + { MT_BBP(RXO, 18), 0xCC00A993 }, 140 + { MT_BBP(RXO, 19), 0xB9CB9CB9 }, 141 + { MT_BBP(RXO, 20), 0x26c00057 }, 142 + { MT_BBP(RXO, 21), 0x00000001 }, 143 + { MT_BBP(RXO, 24), 0x00000006 }, 144 + { MT_BBP(RXO, 28), 0x0000003F }, 145 + }; 146 + 147 + static const struct mt76_reg_pair mt76x0_dcoc_tab[] = { 148 + { MT_BBP(CAL, 47), 0x000010F0 }, 149 + { MT_BBP(CAL, 48), 0x00008080 }, 150 + { MT_BBP(CAL, 49), 0x00000F07 }, 151 + { MT_BBP(CAL, 50), 0x00000040 }, 152 + { MT_BBP(CAL, 51), 0x00000404 }, 153 + { MT_BBP(CAL, 52), 0x00080803 }, 154 + { MT_BBP(CAL, 53), 0x00000704 }, 155 + { MT_BBP(CAL, 54), 0x00002828 }, 156 + { MT_BBP(CAL, 55), 0x00005050 }, 157 + }; 158 + 159 + #endif
+3 -1
drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
··· 180 180 mdev->rev = mt76_rr(dev, MT_ASIC_VERSION); 181 181 dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev); 182 182 183 + mt76_wr(dev, MT_INT_MASK_CSR, 0); 184 + 183 185 ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler, 184 186 IRQF_SHARED, KBUILD_MODNAME, dev); 185 187 if (ret) ··· 204 202 tasklet_disable(&dev->mt76.pre_tbtt_tasklet); 205 203 mt76x0_chip_onoff(dev, false, false); 206 204 mt76x0e_stop_hw(dev); 207 - mt76x02_dma_cleanup(dev); 205 + mt76_dma_cleanup(&dev->mt76); 208 206 mt76x02_mcu_cleanup(dev); 209 207 } 210 208
+1 -1
drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
··· 734 734 case 1: 735 735 if (chan->band == NL80211_BAND_2GHZ) 736 736 tssi_target += 29491; /* 3.6 * 8192 */ 737 - /* fall through */ 737 + fallthrough; 738 738 case 0: 739 739 break; 740 740 default:
+2
drivers/net/wireless/mediatek/mt76/mt76x02.h
··· 15 15 #include "mt76x02_dfs.h" 16 16 #include "mt76x02_dma.h" 17 17 18 + #define MT76x02_TX_RING_SIZE 512 19 + #define MT76x02_PSD_RING_SIZE 128 18 20 #define MT76x02_N_WCIDS 128 19 21 #define MT_CALIBRATE_INTERVAL HZ 20 22 #define MT_MAC_WORK_INTERVAL (HZ / 10)
+6 -28
drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
··· 7 7 #include "mt76x02.h" 8 8 9 9 static int 10 - mt76x02_ampdu_stat_read(struct seq_file *file, void *data) 10 + mt76x02_ampdu_stat_show(struct seq_file *file, void *data) 11 11 { 12 12 struct mt76x02_dev *dev = file->private; 13 13 int i, j; ··· 31 31 return 0; 32 32 } 33 33 34 - static int 35 - mt76x02_ampdu_stat_open(struct inode *inode, struct file *f) 36 - { 37 - return single_open(f, mt76x02_ampdu_stat_read, inode->i_private); 38 - } 34 + DEFINE_SHOW_ATTRIBUTE(mt76x02_ampdu_stat); 39 35 40 36 static int read_txpower(struct seq_file *file, void *data) 41 37 { ··· 44 48 return 0; 45 49 } 46 50 47 - static const struct file_operations fops_ampdu_stat = { 48 - .open = mt76x02_ampdu_stat_open, 49 - .read = seq_read, 50 - .llseek = seq_lseek, 51 - .release = single_release, 52 - }; 53 - 54 51 static int 55 - mt76x02_dfs_stat_read(struct seq_file *file, void *data) 52 + mt76x02_dfs_stat_show(struct seq_file *file, void *data) 56 53 { 57 54 struct mt76x02_dev *dev = file->private; 58 55 struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; ··· 70 81 return 0; 71 82 } 72 83 73 - static int 74 - mt76x02_dfs_stat_open(struct inode *inode, struct file *f) 75 - { 76 - return single_open(f, mt76x02_dfs_stat_read, inode->i_private); 77 - } 78 - 79 - static const struct file_operations fops_dfs_stat = { 80 - .open = mt76x02_dfs_stat_open, 81 - .read = seq_read, 82 - .llseek = seq_lseek, 83 - .release = single_release, 84 - }; 84 + DEFINE_SHOW_ATTRIBUTE(mt76x02_dfs_stat); 85 85 86 86 static int read_agc(struct seq_file *file, void *data) 87 87 { ··· 128 150 debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc); 129 151 130 152 debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca); 131 - debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat); 132 - debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat); 153 + debugfs_create_file("ampdu_stat", 0400, dir, dev, &mt76x02_ampdu_stat_fops); 154 + debugfs_create_file("dfs_stats", 0400, dir, dev, &mt76x02_dfs_stat_fops); 133 155 debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir, 134 156 read_txpower); 135 157
+3 -3
drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
··· 429 429 { 430 430 struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; 431 431 struct mt76x02_dfs_sw_detector_params *sw_params; 432 - u32 width_delta, with_sum, factor, cur_pri; 432 + u32 width_delta, with_sum; 433 433 struct mt76x02_dfs_sequence seq, *seq_p; 434 434 struct mt76x02_dfs_event_rb *event_rb; 435 435 struct mt76x02_dfs_event *cur_event; 436 - int i, j, end, pri; 436 + int i, j, end, pri, factor, cur_pri; 437 437 438 438 event_rb = event->engine == 2 ? &dfs_pd->event_rb[1] 439 439 : &dfs_pd->event_rb[0]; ··· 517 517 struct mt76x02_dfs_sw_detector_params *sw_params; 518 518 struct mt76x02_dfs_sequence *seq, *tmp_seq; 519 519 u16 max_seq_len = 0; 520 - u32 factor, pri; 520 + int factor, pri; 521 521 522 522 sw_params = &dfs_pd->sw_dpd_params; 523 523 list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
-1
drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
··· 61 61 62 62 int mt76x02_dma_init(struct mt76x02_dev *dev); 63 63 void mt76x02_dma_disable(struct mt76x02_dev *dev); 64 - void mt76x02_dma_cleanup(struct mt76x02_dev *dev); 65 64 66 65 #endif /* __MT76x02_DMA_H */
+7 -6
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
··· 300 300 return 0; 301 301 case MT_PHY_TYPE_HT_GF: 302 302 txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD; 303 - /* fall through */ 303 + fallthrough; 304 304 case MT_PHY_TYPE_HT: 305 305 txrate->flags |= IEEE80211_TX_RC_MCS; 306 306 txrate->idx = idx; ··· 348 348 u8 ccmp_pn[8], nstreams = dev->chainmask & 0xf; 349 349 350 350 memset(txwi, 0, sizeof(*txwi)); 351 + 352 + mt76_tx_check_agg_ssn(sta, skb); 351 353 352 354 if (!info->control.hw_key && wcid && wcid->hw_key_idx != 0xff && 353 355 ieee80211_has_protected(hdr->frame_control)) { ··· 464 462 rates[1].idx = 0; 465 463 break; 466 464 } 467 - /* fall through */ 465 + fallthrough; 468 466 default: 469 467 rates[1].idx = max_t(int, rates[0].idx - 1, 0); 470 468 break; ··· 679 677 return 0; 680 678 case MT_PHY_TYPE_HT_GF: 681 679 status->enc_flags |= RX_ENC_FLAG_HT_GF; 682 - /* fall through */ 680 + fallthrough; 683 681 case MT_PHY_TYPE_HT: 684 682 status->encoding = RX_ENC_HT; 685 683 status->rate_idx = idx; ··· 900 898 } 901 899 } 902 900 903 - void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, 904 - struct mt76_queue_entry *e) 901 + void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) 905 902 { 906 903 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); 907 904 struct mt76x02_txwi *txwi; ··· 917 916 txwi = (struct mt76x02_txwi *)txwi_ptr; 918 917 trace_mac_txdone(mdev, txwi->wcid, txwi->pktid); 919 918 920 - mt76_tx_complete_skb(mdev, e->skb); 919 + mt76_tx_complete_skb(mdev, e->wcid, e->skb); 921 920 } 922 921 EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb); 923 922
+1 -2
drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
··· 194 194 struct sk_buff *skb, struct mt76_wcid *wcid, 195 195 struct ieee80211_sta *sta, int len); 196 196 void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq); 197 - void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, 198 - struct mt76_queue_entry *e); 197 + void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); 199 198 void mt76x02_update_channel(struct mt76_dev *mdev); 200 199 void mt76x02_mac_work(struct work_struct *work); 201 200
+29 -41
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
··· 14 14 static void mt76x02_pre_tbtt_tasklet(unsigned long arg) 15 15 { 16 16 struct mt76x02_dev *dev = (struct mt76x02_dev *)arg; 17 - struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD].q; 17 + struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD]; 18 18 struct beacon_bc_data data = {}; 19 19 struct sk_buff *skb; 20 20 int i; ··· 104 104 EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config); 105 105 106 106 static int 107 - mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_sw_queue *q, 108 - int idx, int n_desc) 107 + mt76x02_init_tx_queue(struct mt76x02_dev *dev, int qid, int idx, int n_desc) 109 108 { 110 109 struct mt76_queue *hwq; 111 110 int err; ··· 117 118 if (err < 0) 118 119 return err; 119 120 120 - INIT_LIST_HEAD(&q->swq); 121 - q->q = hwq; 121 + dev->mt76.q_tx[qid] = hwq; 122 122 123 123 mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx)); 124 124 ··· 149 151 mt76x02_send_tx_status(dev, &stat, &update); 150 152 } 151 153 152 - static void mt76x02_tx_tasklet(unsigned long data) 154 + static void mt76x02_tx_worker(struct mt76_worker *w) 153 155 { 154 - struct mt76x02_dev *dev = (struct mt76x02_dev *)data; 156 + struct mt76x02_dev *dev; 157 + 158 + dev = container_of(w, struct mt76x02_dev, mt76.tx_worker); 155 159 156 160 mt76x02_mac_poll_tx_status(dev, false); 157 161 mt76x02_process_tx_status_fifo(dev); ··· 178 178 for (i = MT_TXQ_MCU; i >= 0; i--) 179 179 mt76_queue_tx_cleanup(dev, i, false); 180 180 181 - tasklet_schedule(&dev->mt76.tx_tasklet); 181 + mt76_worker_schedule(&dev->mt76.tx_worker); 182 182 183 183 return 0; 184 184 } ··· 197 197 if (!status_fifo) 198 198 return -ENOMEM; 199 199 200 - tasklet_init(&dev->mt76.tx_tasklet, mt76x02_tx_tasklet, 201 - (unsigned long)dev); 200 + dev->mt76.tx_worker.fn = mt76x02_tx_worker; 202 201 tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet, 203 202 (unsigned long)dev); 204 203 ··· 209 210 mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); 210 211 211 212 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 212 - ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i], 213 - mt76_ac_to_hwq(i), 214 - MT_TX_RING_SIZE); 213 + ret = mt76x02_init_tx_queue(dev, i, mt76_ac_to_hwq(i), 214 + MT76x02_TX_RING_SIZE); 215 215 if (ret) 216 216 return ret; 217 217 } 218 218 219 - ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD], 220 - MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE); 219 + ret = mt76x02_init_tx_queue(dev, MT_TXQ_PSD, 220 + MT_TX_HW_QUEUE_MGMT, MT76x02_PSD_RING_SIZE); 221 221 if (ret) 222 222 return ret; 223 223 224 - ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], 224 + ret = mt76x02_init_tx_queue(dev, MT_TXQ_MCU, 225 225 MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE); 226 226 if (ret) 227 227 return ret; ··· 261 263 irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance) 262 264 { 263 265 struct mt76x02_dev *dev = dev_instance; 264 - u32 intr; 266 + u32 intr, mask; 265 267 266 268 intr = mt76_rr(dev, MT_INT_SOURCE_CSR); 269 + intr &= dev->mt76.mmio.irqmask; 267 270 mt76_wr(dev, MT_INT_SOURCE_CSR, intr); 268 271 269 272 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) ··· 272 273 273 274 trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask); 274 275 275 - intr &= dev->mt76.mmio.irqmask; 276 + mask = intr & (MT_INT_RX_DONE_ALL | MT_INT_GPTIMER); 277 + if (intr & (MT_INT_TX_DONE_ALL | MT_INT_TX_STAT)) 278 + mask |= MT_INT_TX_DONE_ALL; 276 279 277 - if (intr & MT_INT_RX_DONE(0)) { 278 - mt76x02_irq_disable(dev, MT_INT_RX_DONE(0)); 280 + mt76x02_irq_disable(dev, mask); 281 + 282 + if (intr & MT_INT_RX_DONE(0)) 279 283 napi_schedule(&dev->mt76.napi[0]); 280 - } 281 284 282 - if (intr & MT_INT_RX_DONE(1)) { 283 - mt76x02_irq_disable(dev, MT_INT_RX_DONE(1)); 285 + if (intr & MT_INT_RX_DONE(1)) 284 286 napi_schedule(&dev->mt76.napi[1]); 285 - } 286 287 287 288 if (intr & MT_INT_PRE_TBTT) 288 289 tasklet_schedule(&dev->mt76.pre_tbtt_tasklet); ··· 292 293 if (dev->mt76.csa_complete) 293 294 mt76_csa_finish(&dev->mt76); 294 295 else 295 - mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD].q); 296 + mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD]); 296 297 } 297 298 298 299 if (intr & MT_INT_TX_STAT) 299 300 mt76x02_mac_poll_tx_status(dev, true); 300 301 301 - if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL)) { 302 - mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL); 302 + if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL)) 303 303 napi_schedule(&dev->mt76.tx_napi); 304 - } 305 304 306 - if (intr & MT_INT_GPTIMER) { 307 - mt76x02_irq_disable(dev, MT_INT_GPTIMER); 305 + if (intr & MT_INT_GPTIMER) 308 306 tasklet_schedule(&dev->dfs_pd.dfs_tasklet); 309 - } 310 307 311 308 return IRQ_HANDLED; 312 309 } ··· 323 328 mt76_clear(dev, MT_WPDMA_GLO_CFG, 324 329 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 325 330 } 326 - 327 - void mt76x02_dma_cleanup(struct mt76x02_dev *dev) 328 - { 329 - tasklet_kill(&dev->mt76.tx_tasklet); 330 - mt76_dma_cleanup(&dev->mt76); 331 - } 332 - EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup); 333 331 334 332 void mt76x02_dma_disable(struct mt76x02_dev *dev) 335 333 { ··· 357 369 int i; 358 370 359 371 for (i = 0; i < 4; i++) { 360 - q = dev->mt76.q_tx[i].q; 372 + q = dev->mt76.q_tx[i]; 361 373 362 374 if (!q->queued) 363 375 continue; ··· 441 453 set_bit(MT76_RESET, &dev->mphy.state); 442 454 443 455 tasklet_disable(&dev->mt76.pre_tbtt_tasklet); 444 - tasklet_disable(&dev->mt76.tx_tasklet); 456 + mt76_worker_disable(&dev->mt76.tx_worker); 445 457 napi_disable(&dev->mt76.tx_napi); 446 458 447 459 mt76_for_each_q_rx(&dev->mt76, i) { ··· 498 510 499 511 clear_bit(MT76_RESET, &dev->mphy.state); 500 512 501 - tasklet_enable(&dev->mt76.tx_tasklet); 513 + mt76_worker_enable(&dev->mt76.tx_worker); 502 514 napi_enable(&dev->mt76.tx_napi); 503 515 napi_schedule(&dev->mt76.tx_napi); 504 516
+1 -2
drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
··· 19 19 enum mt76_txq_id qid, struct mt76_wcid *wcid, 20 20 struct ieee80211_sta *sta, 21 21 struct mt76_tx_info *tx_info); 22 - void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, 23 - struct mt76_queue_entry *e); 22 + void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); 24 23 void mt76x02u_init_beacon_config(struct mt76x02_dev *dev); 25 24 void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev); 26 25 #endif /* __MT76x02_USB_H */
+6 -6
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
··· 15 15 mt76x02_remove_hdr_pad(skb, 2); 16 16 } 17 17 18 - void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, 19 - struct mt76_queue_entry *e) 18 + void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) 20 19 { 21 20 mt76x02u_remove_dma_hdr(e->skb); 22 - mt76_tx_complete_skb(mdev, e->skb); 21 + mt76_tx_complete_skb(mdev, e->wcid, e->skb); 23 22 } 24 23 EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb); 25 24 ··· 45 46 46 47 int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) 47 48 { 48 - u32 info; 49 + u32 info, pad; 49 50 50 51 /* Buffer layout: 51 52 * | 4B | xfer len | pad | 4B | ··· 57 58 FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags; 58 59 put_unaligned_le32(info, skb_push(skb, sizeof(info))); 59 60 60 - return mt76_skb_adjust_pad(skb); 61 + pad = round_up(skb->len, 4) + 4 - skb->len; 62 + return mt76_skb_adjust_pad(skb, pad); 61 63 } 62 64 63 65 int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, ··· 67 67 struct mt76_tx_info *tx_info) 68 68 { 69 69 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); 70 - int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx); 70 + int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid]->hw_idx); 71 71 struct mt76x02_txwi *txwi; 72 72 bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU; 73 73 enum mt76_qsel qsel;
+1 -4
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
··· 294 294 mvif->group_wcid.hw_key_idx = -1; 295 295 mtxq = (struct mt76_txq *)vif->txq->drv_priv; 296 296 mtxq->wcid = &mvif->group_wcid; 297 - 298 - mt76_txq_init(&dev->mt76, vif->txq); 299 297 } 300 298 301 299 int ··· 345 347 struct mt76x02_dev *dev = hw->priv; 346 348 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 347 349 348 - mt76_txq_remove(&dev->mt76, vif->txq); 349 350 dev->mphy.vif_mask &= ~BIT(mvif->idx); 350 351 } 351 352 EXPORT_SYMBOL_GPL(mt76x02_remove_interface); ··· 487 490 u8 cw_min = 5, cw_max = 10, qid; 488 491 u32 val; 489 492 490 - qid = dev->mt76.q_tx[queue].q->hw_idx; 493 + qid = dev->mt76.q_tx[queue]->hw_idx; 491 494 492 495 if (params->cw_min) 493 496 cw_min = fls(params->cw_min);
+4 -1
drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
··· 63 63 mdev->rev = mt76_rr(dev, MT_ASIC_VERSION); 64 64 dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev); 65 65 66 + mt76_wr(dev, MT_INT_MASK_CSR, 0); 67 + 66 68 ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler, 67 69 IRQF_SHARED, KBUILD_MODNAME, dev); 68 70 if (ret) ··· 113 111 114 112 napi_disable(&mdev->tx_napi); 115 113 tasklet_kill(&mdev->pre_tbtt_tasklet); 116 - tasklet_kill(&mdev->tx_tasklet); 114 + mt76_worker_disable(&mdev->tx_worker); 117 115 118 116 mt76_for_each_q_rx(mdev, i) 119 117 napi_disable(&mdev->napi[i]); ··· 147 145 148 146 pci_restore_state(pdev); 149 147 148 + mt76_worker_enable(&mdev->tx_worker); 150 149 mt76_for_each_q_rx(mdev, i) { 151 150 napi_enable(&mdev->napi[i]); 152 151 napi_schedule(&mdev->napi[i]);
+1 -1
drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
··· 283 283 tasklet_disable(&dev->dfs_pd.dfs_tasklet); 284 284 tasklet_disable(&dev->mt76.pre_tbtt_tasklet); 285 285 mt76x2_stop_hardware(dev); 286 - mt76x02_dma_cleanup(dev); 286 + mt76_dma_cleanup(&dev->mt76); 287 287 mt76x02_mcu_cleanup(dev); 288 288 } 289 289
+5 -6
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
··· 21 21 switch (val) { 22 22 case SER_SET_RECOVER_L1: 23 23 case SER_SET_RECOVER_L2: 24 - /* fall through */ 25 24 ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), 0); 26 25 if (ret) 27 26 return ret; ··· 291 292 int i; 292 293 293 294 for (i = 0; i < ARRAY_SIZE(queue_map); i++) { 294 - struct mt76_sw_queue *q = &dev->mt76.q_tx[queue_map[i].id]; 295 + struct mt76_queue *q = dev->mt76.q_tx[queue_map[i].id]; 295 296 296 - if (!q->q) 297 + if (!q) 297 298 continue; 298 299 299 300 seq_printf(s, 300 301 "%s: queued=%d head=%d tail=%d\n", 301 - queue_map[i].queue, q->q->queued, q->q->head, 302 - q->q->tail); 302 + queue_map[i].queue, q->queued, q->head, 303 + q->tail); 303 304 } 304 305 305 306 return 0; ··· 399 400 struct ieee80211_sta *sta = data; 400 401 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 401 402 402 - return mt7915_mcu_set_fixed_rate(msta->vif->dev, sta, rate); 403 + return mt7915_mcu_set_fixed_rate(msta->vif->phy->dev, sta, rate); 403 404 } 404 405 405 406 DEFINE_DEBUGFS_ATTRIBUTE(fops_fixed_rate, NULL,
+118 -28
drivers/net/wireless/mediatek/mt76/mt7915/dma.c
··· 8 8 static int 9 9 mt7915_init_tx_queues(struct mt7915_dev *dev, int n_desc) 10 10 { 11 - struct mt76_sw_queue *q; 12 11 struct mt76_queue *hwq; 13 12 int err, i; 14 13 ··· 20 21 if (err < 0) 21 22 return err; 22 23 23 - for (i = 0; i < MT_TXQ_MCU; i++) { 24 - q = &dev->mt76.q_tx[i]; 25 - INIT_LIST_HEAD(&q->swq); 26 - q->q = hwq; 27 - } 24 + for (i = 0; i < MT_TXQ_MCU; i++) 25 + dev->mt76.q_tx[i] = hwq; 28 26 29 27 return 0; 30 28 } 31 29 32 30 static int 33 - mt7915_init_mcu_queue(struct mt7915_dev *dev, struct mt76_sw_queue *q, 34 - int idx, int n_desc) 31 + mt7915_init_mcu_queue(struct mt7915_dev *dev, int qid, int idx, int n_desc) 35 32 { 36 33 struct mt76_queue *hwq; 37 34 int err; ··· 40 45 if (err < 0) 41 46 return err; 42 47 43 - INIT_LIST_HEAD(&q->swq); 44 - q->q = hwq; 48 + dev->mt76.q_tx[qid] = hwq; 45 49 46 50 return 0; 47 51 } ··· 66 72 mt76_rx(&dev->mt76, q, skb); 67 73 return; 68 74 } 69 - /* fall through */ 75 + fallthrough; 70 76 default: 71 77 dev_kfree_skb(skb); 72 78 break; ··· 78 84 { 79 85 mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false); 80 86 mt76_queue_tx_cleanup(dev, MT_TXQ_MCU_WA, false); 81 - mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false); 82 - mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false); 83 87 } 84 88 85 89 static int mt7915_poll_tx(struct napi_struct *napi, int budget) ··· 89 97 mt7915_tx_cleanup(dev); 90 98 91 99 if (napi_complete_done(napi, 0)) 92 - mt7915_irq_enable(dev, MT_INT_TX_DONE_ALL); 93 - 94 - mt7915_tx_cleanup(dev); 95 - 96 - mt7915_mac_sta_poll(dev); 97 - 98 - tasklet_schedule(&dev->mt76.tx_tasklet); 100 + mt7915_irq_enable(dev, MT_INT_TX_DONE_MCU); 99 101 100 102 return 0; 101 103 } ··· 124 138 mt76_wr(dev, MT_WFDMA1_RX_RING3_EXT_CTRL, PREFETCH(0x480, 0x0)); 125 139 } 126 140 141 + static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr) 142 + { 143 + static const struct { 144 + u32 phys; 145 + u32 mapped; 146 + u32 size; 147 + } fixed_map[] = { 148 + { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */ 149 + { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */ 150 + { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */ 151 + { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */ 152 + { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */ 153 + { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */ 154 + { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */ 155 + { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */ 156 + { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */ 157 + { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */ 158 + { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */ 159 + { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */ 160 + { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */ 161 + { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */ 162 + { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */ 163 + { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */ 164 + { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */ 165 + { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */ 166 + { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */ 167 + { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */ 168 + { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */ 169 + { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */ 170 + { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */ 171 + { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */ 172 + { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */ 173 + { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */ 174 + { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */ 175 + { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */ 176 + { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */ 177 + { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */ 178 + { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */ 179 + { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */ 180 + { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */ 181 + { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */ 182 + { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */ 183 + { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */ 184 + { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */ 185 + { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */ 186 + }; 187 + int i; 188 + 189 + if (addr < 0x100000) 190 + return addr; 191 + 192 + for (i = 0; i < ARRAY_SIZE(fixed_map); i++) { 193 + u32 ofs; 194 + 195 + if (addr < fixed_map[i].phys) 196 + continue; 197 + 198 + ofs = addr - fixed_map[i].phys; 199 + if (ofs > fixed_map[i].size) 200 + continue; 201 + 202 + return fixed_map[i].mapped + ofs; 203 + } 204 + 205 + if ((addr >= 0x18000000 && addr < 0x18c00000) || 206 + (addr >= 0x70000000 && addr < 0x78000000) || 207 + (addr >= 0x7c000000 && addr < 0x7c400000)) 208 + return mt7915_reg_map_l1(dev, addr); 209 + 210 + return mt7915_reg_map_l2(dev, addr); 211 + } 212 + 213 + static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset) 214 + { 215 + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 216 + u32 addr = __mt7915_reg_addr(dev, offset); 217 + 218 + return dev->bus_ops->rr(mdev, addr); 219 + } 220 + 221 + static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val) 222 + { 223 + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 224 + u32 addr = __mt7915_reg_addr(dev, offset); 225 + 226 + dev->bus_ops->wr(mdev, addr, val); 227 + } 228 + 229 + static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val) 230 + { 231 + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 232 + u32 addr = __mt7915_reg_addr(dev, offset); 233 + 234 + return dev->bus_ops->rmw(mdev, addr, mask, val); 235 + } 236 + 127 237 int mt7915_dma_init(struct mt7915_dev *dev) 128 238 { 129 239 /* Increase buffer size to receive large VHT/HE MPDUs */ 240 + struct mt76_bus_ops *bus_ops; 130 241 int rx_buf_size = MT_RX_BUF_SIZE * 2; 131 242 int ret; 243 + 244 + dev->bus_ops = dev->mt76.bus; 245 + bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops), 246 + GFP_KERNEL); 247 + if (!bus_ops) 248 + return -ENOMEM; 249 + 250 + bus_ops->rr = mt7915_rr; 251 + bus_ops->wr = mt7915_wr; 252 + bus_ops->rmw = mt7915_rmw; 253 + dev->mt76.bus = bus_ops; 132 254 133 255 mt76_dma_attach(&dev->mt76); 134 256 ··· 262 168 return ret; 263 169 264 170 /* command to WM */ 265 - ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], 266 - MT7915_TXQ_MCU_WM, 171 + ret = mt7915_init_mcu_queue(dev, MT_TXQ_MCU, MT7915_TXQ_MCU_WM, 267 172 MT7915_TX_MCU_RING_SIZE); 268 173 if (ret) 269 174 return ret; 270 175 271 176 /* command to WA */ 272 - ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU_WA], 273 - MT7915_TXQ_MCU_WA, 177 + ret = mt7915_init_mcu_queue(dev, MT_TXQ_MCU_WA, MT7915_TXQ_MCU_WA, 274 178 MT7915_TX_MCU_RING_SIZE); 275 179 if (ret) 276 180 return ret; 277 181 278 182 /* firmware download */ 279 - ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL], 280 - MT7915_TXQ_FWDL, 183 + ret = mt7915_init_mcu_queue(dev, MT_TXQ_FWDL, MT7915_TXQ_FWDL, 281 184 MT7915_TX_FWDL_RING_SIZE); 282 185 if (ret) 283 186 return ret; ··· 339 248 MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN); 340 249 341 250 /* enable interrupts for TX/RX rings */ 342 - mt7915_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | 251 + mt7915_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_MCU | 343 252 MT_INT_MCU_CMD); 344 253 345 254 return 0; ··· 372 281 MT_WFDMA0_RST_DMASHDL_ALL_RST | 373 282 MT_WFDMA0_RST_LOGIC_RST); 374 283 375 - tasklet_kill(&dev->mt76.tx_tasklet); 376 284 mt76_dma_cleanup(&dev->mt76); 377 285 }
+10
drivers/net/wireless/mediatek/mt76/mt7915/init.c
··· 135 135 136 136 set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); 137 137 138 + /* 139 + * force firmware operation mode into normal state, 140 + * which should be set before firmware download stage. 141 + */ 142 + mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE); 143 + 138 144 ret = mt7915_mcu_init(dev); 139 145 if (ret) 140 146 return ret; ··· 618 612 mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1; 619 613 mt7915_init_wiphy(mphy->hw); 620 614 615 + INIT_LIST_HEAD(&phy->stats_list); 621 616 INIT_DELAYED_WORK(&phy->mac_work, mt7915_mac_work); 622 617 623 618 /* ··· 659 652 dev->phy.dev = dev; 660 653 dev->phy.mt76 = &dev->mt76.phy; 661 654 dev->mt76.phy.priv = &dev->phy; 655 + INIT_LIST_HEAD(&dev->phy.stats_list); 656 + INIT_WORK(&dev->rc_work, mt7915_mac_sta_rc_work); 662 657 INIT_DELAYED_WORK(&dev->phy.mac_work, mt7915_mac_work); 658 + INIT_LIST_HEAD(&dev->sta_rc_list); 663 659 INIT_LIST_HEAD(&dev->sta_poll_list); 664 660 spin_lock_init(&dev->sta_poll_lock); 665 661
+166 -105
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
··· 88 88 0, 5000); 89 89 } 90 90 91 - static u32 mt7915_mac_wtbl_lmac_read(struct mt7915_dev *dev, u16 wcid, 92 - u16 addr) 91 + static u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid) 93 92 { 94 93 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 95 94 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 96 95 97 - return mt76_rr(dev, MT_WTBL_LMAC_OFFS(wcid, addr)); 96 + return MT_WTBL_LMAC_OFFS(wcid, 0); 98 97 } 99 98 100 99 /* TODO: use txfree airtime info to avoid runtime accessing in the long run */ 101 - void mt7915_mac_sta_poll(struct mt7915_dev *dev) 100 + static void mt7915_mac_sta_poll(struct mt7915_dev *dev) 102 101 { 103 102 static const u8 ac_to_tid[] = { 104 103 [IEEE80211_AC_BE] = 0, ··· 105 106 [IEEE80211_AC_VI] = 4, 106 107 [IEEE80211_AC_VO] = 6 107 108 }; 108 - static const u8 hw_queue_map[] = { 109 - [IEEE80211_AC_BK] = 0, 110 - [IEEE80211_AC_BE] = 1, 111 - [IEEE80211_AC_VI] = 2, 112 - [IEEE80211_AC_VO] = 3, 113 - }; 114 109 struct ieee80211_sta *sta; 115 110 struct mt7915_sta *msta; 116 111 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 112 + LIST_HEAD(sta_poll_list); 117 113 int i; 114 + 115 + spin_lock_bh(&dev->sta_poll_lock); 116 + list_splice_init(&dev->sta_poll_list, &sta_poll_list); 117 + spin_unlock_bh(&dev->sta_poll_lock); 118 118 119 119 rcu_read_lock(); 120 120 121 121 while (true) { 122 122 bool clear = false; 123 + u32 addr; 123 124 u16 idx; 124 125 125 126 spin_lock_bh(&dev->sta_poll_lock); 126 - if (list_empty(&dev->sta_poll_list)) { 127 + if (list_empty(&sta_poll_list)) { 127 128 spin_unlock_bh(&dev->sta_poll_lock); 128 129 break; 129 130 } 130 - msta = list_first_entry(&dev->sta_poll_list, 131 + msta = list_first_entry(&sta_poll_list, 131 132 struct mt7915_sta, poll_list); 132 133 list_del_init(&msta->poll_list); 133 134 spin_unlock_bh(&dev->sta_poll_lock); 134 135 135 - for (i = 0, idx = msta->wcid.idx; i < IEEE80211_NUM_ACS; i++) { 136 - u32 tx_last = msta->airtime_ac[i]; 137 - u32 rx_last = msta->airtime_ac[i + IEEE80211_NUM_ACS]; 136 + idx = msta->wcid.idx; 137 + addr = mt7915_mac_wtbl_lmac_addr(dev, idx) + 20 * 4; 138 138 139 - msta->airtime_ac[i] = 140 - mt7915_mac_wtbl_lmac_read(dev, idx, 20 + i); 141 - msta->airtime_ac[i + IEEE80211_NUM_ACS] = 142 - mt7915_mac_wtbl_lmac_read(dev, idx, 21 + i); 139 + for (i = 0; i < IEEE80211_NUM_ACS; i++) { 140 + u32 tx_last = msta->airtime_ac[i]; 141 + u32 rx_last = msta->airtime_ac[i + 4]; 142 + 143 + msta->airtime_ac[i] = mt76_rr(dev, addr); 144 + msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 145 + 143 146 tx_time[i] = msta->airtime_ac[i] - tx_last; 144 - rx_time[i] = msta->airtime_ac[i + IEEE80211_NUM_ACS] - 145 - rx_last; 147 + rx_time[i] = msta->airtime_ac[i + 4] - rx_last; 146 148 147 149 if ((tx_last | rx_last) & BIT(30)) 148 150 clear = true; 151 + 152 + addr += 8; 149 153 } 150 154 151 155 if (clear) { ··· 163 161 sta = container_of((void *)msta, struct ieee80211_sta, 164 162 drv_priv); 165 163 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 166 - u32 tx_cur = tx_time[i]; 167 - u32 rx_cur = rx_time[hw_queue_map[i]]; 164 + u8 q = mt7915_lmac_mapping(dev, i); 165 + u32 tx_cur = tx_time[q]; 166 + u32 rx_cur = rx_time[q]; 168 167 u8 tid = ac_to_tid[i]; 169 168 170 169 if (!tx_cur && !rx_cur) ··· 471 468 switch (mode) { 472 469 case MT_PHY_TYPE_CCK: 473 470 cck = true; 474 - /* fall through */ 471 + fallthrough; 475 472 case MT_PHY_TYPE_OFDM: 476 473 i = mt76_get_rate(&dev->mt76, sband, i, cck); 477 474 break; ··· 490 487 break; 491 488 case MT_PHY_TYPE_HE_MU: 492 489 status->flag |= RX_FLAG_RADIOTAP_HE_MU; 493 - /* fall through */ 490 + fallthrough; 494 491 case MT_PHY_TYPE_HE_SU: 495 492 case MT_PHY_TYPE_HE_EXT_SU: 496 493 case MT_PHY_TYPE_HE_TB: ··· 568 565 { 569 566 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 570 567 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 568 + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 571 569 bool multicast = is_multicast_ether_addr(hdr->addr1); 572 570 struct ieee80211_vif *vif = info->control.vif; 573 571 struct mt76_phy *mphy = &dev->mphy; 574 572 bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY; 575 573 u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 576 574 __le16 fc = hdr->frame_control; 577 - u16 tx_count = 4, seqno = 0; 575 + u16 tx_count = 15, seqno = 0; 576 + u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 578 577 u32 val; 579 578 580 579 if (vif) { ··· 592 587 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 593 588 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 594 589 590 + txwi[4] = 0; 591 + txwi[5] = 0; 592 + txwi[6] = 0; 593 + 595 594 if (beacon) { 596 595 p_fmt = MT_TX_TYPE_FW; 597 596 q_idx = MT_LMAC_BCN0; ··· 608 599 mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb)); 609 600 } 610 601 602 + if (ieee80211_is_action(fc) && 603 + mgmt->u.action.category == WLAN_CATEGORY_BACK && 604 + mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) { 605 + u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); 606 + 607 + txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA); 608 + tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK; 609 + } else if (ieee80211_is_back_req(hdr->frame_control)) { 610 + struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr; 611 + u16 control = le16_to_cpu(bar->control); 612 + 613 + tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control); 614 + } 615 + 611 616 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 612 617 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) | 613 618 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); ··· 632 609 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 633 610 FIELD_PREP(MT_TXD1_HDR_INFO, 634 611 ieee80211_get_hdrlen_from_skb(skb) / 2) | 635 - FIELD_PREP(MT_TXD1_TID, 636 - skb->priority & IEEE80211_QOS_CTL_TID_MASK) | 612 + FIELD_PREP(MT_TXD1_TID, tid) | 637 613 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 638 614 639 615 if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) ··· 655 633 txwi[3] = 0; 656 634 } 657 635 txwi[2] = cpu_to_le32(val); 658 - 659 - txwi[4] = 0; 660 - txwi[5] = 0; 661 - txwi[6] = 0; 662 636 663 637 if (!ieee80211_is_data(fc) || multicast) { 664 638 u16 rate; ··· 683 665 684 666 val = FIELD_PREP(MT_TXD7_TYPE, fc_type) | 685 667 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype); 668 + if (wcid->amsdu) 669 + val |= MT_TXD7_HW_AMSDU; 686 670 txwi[7] = cpu_to_le32(val); 687 671 688 672 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 689 - if (ieee80211_is_data_qos(fc)) { 690 - seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 691 - val |= MT_TXD3_SN_VALID; 692 - } else if (ieee80211_is_back_req(fc)) { 693 - struct ieee80211_bar *bar; 673 + if (info->flags & IEEE80211_TX_CTL_INJECTED) { 674 + seqno = le16_to_cpu(hdr->seq_ctrl); 694 675 695 - bar = (struct ieee80211_bar *)skb->data; 696 - seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num)); 697 - val |= MT_TXD3_SN_VALID; 676 + if (ieee80211_is_back_req(hdr->frame_control)) { 677 + struct ieee80211_bar *bar; 678 + 679 + bar = (struct ieee80211_bar *)skb->data; 680 + seqno = le16_to_cpu(bar->start_seq_num); 681 + } 682 + 683 + val |= MT_TXD3_SN_VALID | 684 + FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 698 685 } 699 - val |= FIELD_PREP(MT_TXD3_SEQ, seqno); 700 686 txwi[3] |= cpu_to_le32(val); 701 687 } 702 688 ··· 737 715 738 716 /* pass partial skb header to fw */ 739 717 tx_info->buf[1].len = MT_CT_PARSE_LEN; 718 + tx_info->buf[1].skip_unmap = true; 740 719 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 741 720 742 721 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD); ··· 770 747 return 0; 771 748 } 772 749 773 - static inline bool 774 - mt7915_tx_check_aggr_tid(struct mt7915_sta *msta, u8 tid) 775 - { 776 - bool ret = false; 777 - 778 - spin_lock_bh(&msta->ampdu_lock); 779 - if (msta->ampdu_state[tid] == MT7915_AGGR_STOP) 780 - ret = true; 781 - spin_unlock_bh(&msta->ampdu_lock); 782 - 783 - return ret; 784 - } 785 - 786 750 static void 787 - mt7915_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb) 751 + mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) 788 752 { 789 - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 790 753 struct mt7915_sta *msta; 791 - u16 tid; 754 + u16 fc, tid; 755 + u32 val; 792 756 793 - if (!sta->ht_cap.ht_supported) 757 + if (!sta || !sta->ht_cap.ht_supported) 794 758 return; 795 759 796 - if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO) 760 + tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1])); 761 + if (tid >= 6) /* skip VO queue */ 797 762 return; 798 763 799 - if (unlikely(!ieee80211_is_data_qos(hdr->frame_control))) 800 - return; 801 - 802 - if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) 764 + val = le32_to_cpu(txwi[2]); 765 + fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | 766 + FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; 767 + if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 803 768 return; 804 769 805 770 msta = (struct mt7915_sta *)sta->drv_priv; 806 - tid = ieee80211_get_tid(hdr); 807 - 808 - if (mt7915_tx_check_aggr_tid(msta, tid)) { 771 + if (!test_and_set_bit(tid, &msta->ampdu_state)) 809 772 ieee80211_start_tx_ba_session(sta, tid, 0); 810 - mt7915_set_aggr_state(msta, tid, MT7915_AGGR_PROGRESS); 811 - } 812 773 } 813 774 814 775 static inline void ··· 829 822 830 823 if (info->flags & IEEE80211_TX_CTL_AMPDU) 831 824 info->flags |= IEEE80211_TX_STAT_AMPDU; 832 - else if (sta) 833 - mt7915_tx_check_aggr(sta, skb); 834 825 835 826 if (stat) 836 827 ieee80211_tx_info_clear_status(info); ··· 869 864 struct ieee80211_sta *sta = NULL; 870 865 u8 i, count; 871 866 867 + /* clean DMA queues and unmap buffers first */ 868 + mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false); 869 + mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false); 870 + 872 871 /* 873 872 * TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE, 874 873 * to the time ack is received or dropped by hw (air + hw queue time). ··· 889 880 */ 890 881 if (info & MT_TX_FREE_PAIR) { 891 882 struct mt7915_sta *msta; 883 + struct mt7915_phy *phy; 892 884 struct mt76_wcid *wcid; 893 885 u16 idx; 894 886 ··· 901 891 continue; 902 892 903 893 msta = container_of(wcid, struct mt7915_sta, wcid); 904 - ieee80211_queue_work(mt76_hw(dev), &msta->stats_work); 905 - continue; 894 + phy = msta->vif->phy; 895 + spin_lock_bh(&dev->sta_poll_lock); 896 + if (list_empty(&msta->stats_list)) 897 + list_add_tail(&msta->stats_list, &phy->stats_list); 898 + if (list_empty(&msta->poll_list)) 899 + list_add_tail(&msta->poll_list, &dev->sta_poll_list); 900 + spin_unlock_bh(&dev->sta_poll_lock); 906 901 } 907 902 908 903 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); ··· 922 907 923 908 mt7915_txp_skb_unmap(mdev, txwi); 924 909 if (txwi->skb) { 910 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb); 911 + void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi); 912 + 913 + if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE))) 914 + mt7915_tx_check_aggr(sta, txwi_ptr); 915 + 916 + if (sta && !info->tx_time_est) { 917 + struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 918 + int pending; 919 + 920 + pending = atomic_dec_return(&wcid->non_aql_packets); 921 + if (pending < 0) 922 + atomic_cmpxchg(&wcid->non_aql_packets, pending, 0); 923 + } 924 + 925 925 mt7915_tx_complete_status(mdev, txwi->skb, sta, stat); 926 926 txwi->skb = NULL; 927 927 } ··· 944 914 mt76_put_txwi(mdev, txwi); 945 915 } 946 916 dev_kfree_skb(skb); 917 + 918 + mt7915_mac_sta_poll(dev); 919 + mt76_worker_schedule(&dev->mt76.tx_worker); 947 920 } 948 921 949 - void mt7915_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, 950 - struct mt76_queue_entry *e) 922 + void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) 951 923 { 952 924 struct mt7915_dev *dev; 953 925 ··· 1218 1186 if (ext_phy) 1219 1187 mt76_txq_schedule_all(ext_phy); 1220 1188 1221 - tasklet_disable(&dev->mt76.tx_tasklet); 1189 + mt76_worker_disable(&dev->mt76.tx_worker); 1222 1190 napi_disable(&dev->mt76.napi[0]); 1223 1191 napi_disable(&dev->mt76.napi[1]); 1224 1192 napi_disable(&dev->mt76.napi[2]); ··· 1238 1206 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1239 1207 clear_bit(MT76_RESET, &dev->mphy.state); 1240 1208 1241 - tasklet_enable(&dev->mt76.tx_tasklet); 1209 + mt76_worker_enable(&dev->mt76.tx_worker); 1242 1210 napi_enable(&dev->mt76.tx_napi); 1243 1211 napi_schedule(&dev->mt76.tx_napi); 1244 1212 ··· 1313 1281 } 1314 1282 } 1315 1283 1316 - void mt7915_mac_sta_stats_work(struct work_struct *work) 1284 + static void 1285 + mt7915_mac_sta_stats_work(struct mt7915_phy *phy) 1317 1286 { 1318 - struct ieee80211_sta *sta; 1319 - struct ieee80211_vif *vif; 1320 - struct mt7915_sta_stats *stats; 1287 + struct mt7915_dev *dev = phy->dev; 1321 1288 struct mt7915_sta *msta; 1322 - struct mt7915_dev *dev; 1323 - 1324 - msta = container_of(work, struct mt7915_sta, stats_work); 1325 - sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 1326 - vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 1327 - dev = msta->vif->dev; 1328 - stats = &msta->stats; 1329 - 1330 - /* use MT_TX_FREE_RATE to report Tx rate for further devices */ 1331 - if (time_after(jiffies, stats->jiffies + HZ)) { 1332 - mt7915_mcu_get_rate_info(dev, RATE_CTRL_RU_INFO, 1333 - msta->wcid.idx); 1334 - 1335 - stats->jiffies = jiffies; 1336 - } 1337 - 1338 - if (test_and_clear_bit(IEEE80211_RC_SUPP_RATES_CHANGED | 1339 - IEEE80211_RC_NSS_CHANGED | 1340 - IEEE80211_RC_BW_CHANGED, &stats->changed)) 1341 - mt7915_mcu_add_rate_ctrl(dev, vif, sta); 1342 - 1343 - if (test_and_clear_bit(IEEE80211_RC_SMPS_CHANGED, &stats->changed)) 1344 - mt7915_mcu_add_smps(dev, vif, sta); 1289 + LIST_HEAD(list); 1345 1290 1346 1291 spin_lock_bh(&dev->sta_poll_lock); 1347 - if (list_empty(&msta->poll_list)) 1348 - list_add_tail(&msta->poll_list, &dev->sta_poll_list); 1292 + list_splice_init(&phy->stats_list, &list); 1293 + 1294 + while (!list_empty(&list)) { 1295 + msta = list_first_entry(&list, struct mt7915_sta, stats_list); 1296 + list_del_init(&msta->stats_list); 1297 + spin_unlock_bh(&dev->sta_poll_lock); 1298 + 1299 + /* use MT_TX_FREE_RATE to report Tx rate for further devices */ 1300 + mt7915_mcu_get_rate_info(dev, RATE_CTRL_RU_INFO, msta->wcid.idx); 1301 + 1302 + spin_lock_bh(&dev->sta_poll_lock); 1303 + } 1304 + 1305 + spin_unlock_bh(&dev->sta_poll_lock); 1306 + } 1307 + 1308 + void mt7915_mac_sta_rc_work(struct work_struct *work) 1309 + { 1310 + struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work); 1311 + struct ieee80211_sta *sta; 1312 + struct ieee80211_vif *vif; 1313 + struct mt7915_sta *msta; 1314 + u32 changed; 1315 + LIST_HEAD(list); 1316 + 1317 + spin_lock_bh(&dev->sta_poll_lock); 1318 + list_splice_init(&dev->sta_rc_list, &list); 1319 + 1320 + while (!list_empty(&list)) { 1321 + msta = list_first_entry(&list, struct mt7915_sta, rc_list); 1322 + list_del_init(&msta->rc_list); 1323 + changed = msta->stats.changed; 1324 + msta->stats.changed = 0; 1325 + spin_unlock_bh(&dev->sta_poll_lock); 1326 + 1327 + sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); 1328 + vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 1329 + 1330 + if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 1331 + IEEE80211_RC_NSS_CHANGED | 1332 + IEEE80211_RC_BW_CHANGED)) 1333 + mt7915_mcu_add_rate_ctrl(dev, vif, sta); 1334 + 1335 + if (changed & IEEE80211_RC_SMPS_CHANGED) 1336 + mt7915_mcu_add_smps(dev, vif, sta); 1337 + 1338 + spin_lock_bh(&dev->sta_poll_lock); 1339 + } 1340 + 1349 1341 spin_unlock_bh(&dev->sta_poll_lock); 1350 1342 } 1351 1343 ··· 1390 1334 1391 1335 mt7915_mac_update_mib_stats(phy); 1392 1336 } 1337 + 1338 + if (++phy->sta_work_count == 10) { 1339 + phy->sta_work_count = 0; 1340 + mt7915_mac_sta_stats_work(phy); 1341 + }; 1393 1342 1394 1343 mutex_unlock(&mdev->mutex); 1395 1344
+19 -20
drivers/net/wireless/mediatek/mt76/mt7915/main.c
··· 137 137 goto out; 138 138 } 139 139 mvif->omac_idx = idx; 140 - mvif->dev = dev; 140 + mvif->phy = phy; 141 141 mvif->band_idx = ext_phy; 142 142 143 143 if (ext_phy) ··· 155 155 156 156 idx = MT7915_WTBL_RESERVED - mvif->idx; 157 157 158 + INIT_LIST_HEAD(&mvif->sta.rc_list); 159 + INIT_LIST_HEAD(&mvif->sta.stats_list); 158 160 INIT_LIST_HEAD(&mvif->sta.poll_list); 159 161 mvif->sta.wcid.idx = idx; 160 162 mvif->sta.wcid.ext_phy = mvif->band_idx; ··· 169 167 if (vif->txq) { 170 168 mtxq = (struct mt76_txq *)vif->txq->drv_priv; 171 169 mtxq->wcid = &mvif->sta.wcid; 172 - mt76_txq_init(&dev->mt76, vif->txq); 173 170 } 174 171 175 172 out: ··· 191 190 mt7915_mcu_add_dev_info(dev, vif, false); 192 191 193 192 rcu_assign_pointer(dev->mt76.wcid[idx], NULL); 194 - if (vif->txq) 195 - mt76_txq_remove(&dev->mt76, vif->txq); 196 193 197 194 mutex_lock(&dev->mt76.mutex); 198 195 phy->mt76->vif_mask &= ~BIT(mvif->idx); ··· 492 493 if (idx < 0) 493 494 return -ENOSPC; 494 495 496 + INIT_LIST_HEAD(&msta->rc_list); 497 + INIT_LIST_HEAD(&msta->stats_list); 495 498 INIT_LIST_HEAD(&msta->poll_list); 496 - INIT_WORK(&msta->stats_work, mt7915_mac_sta_stats_work); 497 - spin_lock_init(&msta->ampdu_lock); 498 499 msta->vif = mvif; 499 500 msta->wcid.sta = 1; 500 501 msta->wcid.idx = idx; ··· 527 528 spin_lock_bh(&dev->sta_poll_lock); 528 529 if (!list_empty(&msta->poll_list)) 529 530 list_del_init(&msta->poll_list); 531 + if (!list_empty(&msta->stats_list)) 532 + list_del_init(&msta->stats_list); 533 + if (!list_empty(&msta->rc_list)) 534 + list_del_init(&msta->rc_list); 530 535 spin_unlock_bh(&dev->sta_poll_lock); 531 536 } 532 537 ··· 606 603 case IEEE80211_AMPDU_TX_OPERATIONAL: 607 604 mtxq->aggr = true; 608 605 mtxq->send_bar = false; 609 - mt7915_set_aggr_state(msta, tid, MT7915_AGGR_OPERATIONAL); 610 606 mt7915_mcu_add_tx_ba(dev, params, true); 611 607 break; 612 608 case IEEE80211_AMPDU_TX_STOP_FLUSH: 613 609 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 614 610 mtxq->aggr = false; 615 - mt7915_set_aggr_state(msta, tid, MT7915_AGGR_STOP); 611 + clear_bit(tid, &msta->ampdu_state); 616 612 mt7915_mcu_add_tx_ba(dev, params, false); 617 613 break; 618 614 case IEEE80211_AMPDU_TX_START: 619 - mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn); 620 - mt7915_set_aggr_state(msta, tid, MT7915_AGGR_START); 615 + set_bit(tid, &msta->ampdu_state); 621 616 ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; 622 617 break; 623 618 case IEEE80211_AMPDU_TX_STOP_CONT: 624 619 mtxq->aggr = false; 625 - mt7915_set_aggr_state(msta, tid, MT7915_AGGR_STOP); 620 + clear_bit(tid, &msta->ampdu_state); 626 621 mt7915_mcu_add_tx_ba(dev, params, false); 627 622 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 628 623 break; ··· 790 789 struct ieee80211_sta *sta, 791 790 u32 changed) 792 791 { 792 + struct mt7915_dev *dev = mt7915_hw_dev(hw); 793 793 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 794 794 795 - rcu_read_lock(); 796 - sta = ieee80211_find_sta(vif, sta->addr); 797 - if (!sta) { 798 - rcu_read_unlock(); 799 - return; 800 - } 801 - rcu_read_unlock(); 795 + spin_lock_bh(&dev->sta_poll_lock); 796 + msta->stats.changed |= changed; 797 + if (list_empty(&msta->rc_list)) 798 + list_add_tail(&msta->rc_list, &dev->sta_rc_list); 799 + spin_unlock_bh(&dev->sta_poll_lock); 802 800 803 - set_bit(changed, &msta->stats.changed); 804 - ieee80211_queue_work(hw, &msta->stats_work); 801 + ieee80211_queue_work(hw, &dev->rc_work); 805 802 } 806 803 807 804 const struct ieee80211_ops mt7915_ops = {
+115 -17
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
··· 522 522 return; 523 523 524 524 wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 525 + if (!wcid) 526 + return; 527 + 525 528 msta = container_of(wcid, struct mt7915_sta, wcid); 526 529 stats = &msta->stats; 527 530 ··· 717 714 ptlv = skb_put(skb, sub_len); 718 715 memcpy(ptlv, &tlv, sizeof(tlv)); 719 716 720 - *sub_ntlv = cpu_to_le16(le16_to_cpu(*sub_ntlv) + 1); 721 - *len = cpu_to_le16(le16_to_cpu(*len) + sub_len); 717 + le16_add_cpu(sub_ntlv, 1); 718 + le16_add_cpu(len, sub_len); 722 719 723 720 return ptlv; 724 721 } ··· 936 933 tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he)); 937 934 938 935 he = (struct bss_info_he *)tlv; 939 - he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext * 4; 936 + he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext; 940 937 if (!he->he_pe_duration) 941 938 he->he_pe_duration = DEFAULT_HE_PE_DURATION; 942 939 943 - he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th * 32); 940 + he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th); 944 941 if (!he->he_rts_thres) 945 942 he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES); 946 943 947 944 he->max_nss_mcs[CMD_HE_MCS_BW80] = cap->he_mcs_nss_supp.tx_mcs_80; 948 945 he->max_nss_mcs[CMD_HE_MCS_BW160] = cap->he_mcs_nss_supp.tx_mcs_160; 949 946 he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80; 947 + } 948 + 949 + static void 950 + mt7915_mcu_bss_hw_amsdu_tlv(struct sk_buff *skb) 951 + { 952 + #define TXD_CMP_MAP1 GENMASK(15, 0) 953 + #define TXD_CMP_MAP2 (GENMASK(31, 0) & ~BIT(23)) 954 + struct bss_info_hw_amsdu *amsdu; 955 + struct tlv *tlv; 956 + 957 + tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HW_AMSDU, sizeof(*amsdu)); 958 + 959 + amsdu = (struct bss_info_hw_amsdu *)tlv; 960 + amsdu->cmp_bitmap_0 = cpu_to_le32(TXD_CMP_MAP1); 961 + amsdu->cmp_bitmap_1 = cpu_to_le32(TXD_CMP_MAP2); 962 + amsdu->trig_thres = cpu_to_le16(2); 963 + amsdu->enable = true; 950 964 } 951 965 952 966 static void ··· 1040 1020 mt7915_mcu_bss_rfch_tlv(skb, vif, phy); 1041 1021 mt7915_mcu_bss_bmc_tlv(skb, phy); 1042 1022 mt7915_mcu_bss_ra_tlv(skb, vif, phy); 1023 + mt7915_mcu_bss_hw_amsdu_tlv(skb); 1043 1024 1044 1025 if (vif->bss_conf.he_support) 1045 1026 mt7915_mcu_bss_he_tlv(skb, vif, phy); ··· 1198 1177 struct tlv *sta_wtbl; 1199 1178 struct sk_buff *skb; 1200 1179 int ret; 1180 + 1181 + if (enable && tx && !params->amsdu) 1182 + msta->wcid.amsdu = false; 1201 1183 1202 1184 skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, 1203 1185 MT7915_STA_UPDATE_MAX_SIZE); ··· 1431 1407 1432 1408 he->max_nss_mcs[CMD_HE_MCS_BW160] = 1433 1409 he_cap->he_mcs_nss_supp.rx_mcs_160; 1434 - /* fall through */ 1410 + fallthrough; 1435 1411 default: 1436 1412 he->max_nss_mcs[CMD_HE_MCS_BW80] = 1437 1413 he_cap->he_mcs_nss_supp.rx_mcs_80; ··· 1462 1438 HE_PHY(CAP8_DCM_MAX_RU_MASK, elem->phy_cap_info[8]); 1463 1439 1464 1440 he->pkt_ext = 2; 1441 + } 1442 + 1443 + static void 1444 + mt7915_mcu_sta_uapsd_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, 1445 + struct ieee80211_vif *vif) 1446 + { 1447 + struct sta_rec_uapsd *uapsd; 1448 + struct tlv *tlv; 1449 + 1450 + if (vif->type != NL80211_IFTYPE_AP || !sta->wme) 1451 + return; 1452 + 1453 + tlv = mt7915_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd)); 1454 + uapsd = (struct sta_rec_uapsd *)tlv; 1455 + 1456 + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) { 1457 + uapsd->dac_map |= BIT(3); 1458 + uapsd->tac_map |= BIT(3); 1459 + } 1460 + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) { 1461 + uapsd->dac_map |= BIT(2); 1462 + uapsd->tac_map |= BIT(2); 1463 + } 1464 + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) { 1465 + uapsd->dac_map |= BIT(1); 1466 + uapsd->tac_map |= BIT(1); 1467 + } 1468 + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) { 1469 + uapsd->dac_map |= BIT(0); 1470 + uapsd->tac_map |= BIT(0); 1471 + } 1472 + uapsd->max_sp = sta->max_sp; 1465 1473 } 1466 1474 1467 1475 static void ··· 1568 1512 } 1569 1513 1570 1514 static void 1515 + mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) 1516 + { 1517 + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 1518 + struct sta_rec_amsdu *amsdu; 1519 + struct tlv *tlv; 1520 + 1521 + if (!sta->max_amsdu_len) 1522 + return; 1523 + 1524 + tlv = mt7915_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu)); 1525 + amsdu = (struct sta_rec_amsdu *)tlv; 1526 + amsdu->max_amsdu_num = 8; 1527 + amsdu->amsdu_en = true; 1528 + amsdu->max_mpdu_size = sta->max_amsdu_len >= 1529 + IEEE80211_MAX_MPDU_LEN_VHT_7991; 1530 + msta->wcid.amsdu = true; 1531 + } 1532 + 1533 + static bool 1534 + mt7915_hw_amsdu_supported(struct ieee80211_vif *vif) 1535 + { 1536 + switch (vif->type) { 1537 + case NL80211_IFTYPE_AP: 1538 + case NL80211_IFTYPE_STATION: 1539 + return true; 1540 + default: 1541 + return false; 1542 + } 1543 + } 1544 + 1545 + static void 1571 1546 mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb, 1572 - struct ieee80211_sta *sta) 1547 + struct ieee80211_sta *sta, struct ieee80211_vif *vif) 1573 1548 { 1574 1549 struct tlv *tlv; 1575 1550 ··· 1611 1524 tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht)); 1612 1525 ht = (struct sta_rec_ht *)tlv; 1613 1526 ht->ht_cap = cpu_to_le16(sta->ht_cap.cap); 1527 + 1528 + if (mt7915_hw_amsdu_supported(vif)) 1529 + mt7915_mcu_sta_amsdu_tlv(skb, sta); 1614 1530 } 1615 1531 1616 1532 /* starec vht */ ··· 1630 1540 /* starec he */ 1631 1541 if (sta->he_cap.has_he) 1632 1542 mt7915_mcu_sta_he_tlv(skb, sta); 1543 + 1544 + /* starec uapsd */ 1545 + mt7915_mcu_sta_uapsd_tlv(skb, sta, vif); 1633 1546 } 1634 1547 1635 1548 static void ··· 2269 2176 2270 2177 mt7915_mcu_sta_basic_tlv(skb, vif, sta, enable); 2271 2178 if (enable && sta) 2272 - mt7915_mcu_sta_tlv(dev, skb, sta); 2179 + mt7915_mcu_sta_tlv(dev, skb, sta, vif); 2273 2180 2274 2181 sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); 2275 2182 ··· 2428 2335 struct bss_info_bcn *bcn; 2429 2336 int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE; 2430 2337 2431 - rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len); 2432 - if (IS_ERR(rskb)) 2433 - return PTR_ERR(rskb); 2434 - 2435 - tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn)); 2436 - bcn = (struct bss_info_bcn *)tlv; 2437 - bcn->enable = en; 2438 - 2439 2338 skb = ieee80211_beacon_get_template(hw, vif, &offs); 2440 2339 if (!skb) 2441 2340 return -EINVAL; ··· 2437 2352 dev_kfree_skb(skb); 2438 2353 return -EINVAL; 2439 2354 } 2355 + 2356 + rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len); 2357 + if (IS_ERR(rskb)) { 2358 + dev_kfree_skb(skb); 2359 + return PTR_ERR(rskb); 2360 + } 2361 + 2362 + tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn)); 2363 + bcn = (struct bss_info_bcn *)tlv; 2364 + bcn->enable = en; 2440 2365 2441 2366 if (mvif->band_idx) { 2442 2367 info = IEEE80211_SKB_CB(skb); ··· 2996 2901 struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac]; 2997 2902 struct edca *e = &req.edca[ac]; 2998 2903 2904 + e->set = WMM_PARAM_SET; 2999 2905 e->queue = ac + mvif->wmm_idx * MT7915_MAX_WMM_SETS; 3000 2906 e->aifs = q->aifs; 3001 2907 e->txop = cpu_to_le16(q->txop); ··· 3148 3052 .channel_band = chandef->chan->band, 3149 3053 }; 3150 3054 3151 - if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && 3152 - chandef->chan->dfs_state != NL80211_DFS_AVAILABLE) 3055 + if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) 3056 + req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; 3057 + else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && 3058 + chandef->chan->dfs_state != NL80211_DFS_AVAILABLE) 3153 3059 req.switch_reason = CH_SWITCH_DFS; 3154 3060 else 3155 3061 req.switch_reason = CH_SWITCH_NORMAL;
+33
drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
··· 402 402 __le32 fast_interval; 403 403 } __packed; 404 404 405 + struct bss_info_hw_amsdu { 406 + __le16 tag; 407 + __le16 len; 408 + __le32 cmp_bitmap_0; 409 + __le32 cmp_bitmap_1; 410 + __le16 trig_thres; 411 + u8 enable; 412 + u8 rsv; 413 + } __packed; 414 + 405 415 struct bss_info_he { 406 416 __le16 tag; 407 417 __le16 len; ··· 655 645 u8 rsv[3]; 656 646 } __packed; 657 647 648 + struct sta_rec_uapsd { 649 + __le16 tag; 650 + __le16 len; 651 + u8 dac_map; 652 + u8 tac_map; 653 + u8 max_sp; 654 + u8 rsv0; 655 + __le16 listen_interval; 656 + u8 rsv1[2]; 657 + } __packed; 658 + 658 659 struct sta_rec_muru { 659 660 __le16 tag; 660 661 __le16 len; ··· 744 723 u8 ba_en; 745 724 __le16 ssn; 746 725 __le16 winsize; 726 + } __packed; 727 + 728 + struct sta_rec_amsdu { 729 + __le16 tag; 730 + __le16 len; 731 + u8 max_amsdu_num; 732 + u8 max_mpdu_size; 733 + u8 amsdu_en; 734 + u8 rsv; 747 735 } __packed; 748 736 749 737 struct sec_key { ··· 981 951 sizeof(struct sta_rec_he) + \ 982 952 sizeof(struct sta_rec_ba) + \ 983 953 sizeof(struct sta_rec_vht) + \ 954 + sizeof(struct sta_rec_uapsd) + \ 955 + sizeof(struct sta_rec_amsdu) + \ 984 956 sizeof(struct tlv) + \ 985 957 MT7915_WTBL_UPDATE_MAX_SIZE) 986 958 ··· 994 962 sizeof(struct bss_info_basic) +\ 995 963 sizeof(struct bss_info_rf_ch) +\ 996 964 sizeof(struct bss_info_ra) + \ 965 + sizeof(struct bss_info_hw_amsdu) +\ 997 966 sizeof(struct bss_info_he) + \ 998 967 sizeof(struct bss_info_bmc_rate) +\ 999 968 sizeof(struct bss_info_ext_bss) +\
+13 -35
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
··· 62 62 MT7915_RXQ_MCU_WA, 63 63 }; 64 64 65 - enum mt7915_ampdu_state { 66 - MT7915_AGGR_STOP, 67 - MT7915_AGGR_PROGRESS, 68 - MT7915_AGGR_START, 69 - MT7915_AGGR_OPERATIONAL 70 - }; 71 - 72 65 struct mt7915_sta_stats { 73 66 struct rate_info prob_rate; 74 67 struct rate_info tx_rate; ··· 76 83 77 84 struct mt7915_vif *vif; 78 85 86 + struct list_head stats_list; 79 87 struct list_head poll_list; 88 + struct list_head rc_list; 80 89 u32 airtime_ac[8]; 81 90 82 91 struct mt7915_sta_stats stats; 83 - struct work_struct stats_work; 84 92 85 - spinlock_t ampdu_lock; 86 - enum mt7915_ampdu_state ampdu_state[IEEE80211_NUM_TIDS]; 93 + unsigned long ampdu_state; 87 94 }; 88 95 89 96 struct mt7915_vif { ··· 93 100 u8 wmm_idx; 94 101 95 102 struct mt7915_sta sta; 96 - struct mt7915_dev *dev; 103 + struct mt7915_phy *phy; 97 104 98 105 struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; 99 106 }; ··· 128 135 u32 ampdu_ref; 129 136 130 137 struct mib_stats mib; 138 + struct list_head stats_list; 131 139 132 140 struct delayed_work mac_work; 133 141 u8 mac_work_count; 142 + u8 sta_work_count; 134 143 }; 135 144 136 145 struct mt7915_dev { ··· 141 146 struct mt76_phy mphy; 142 147 }; 143 148 149 + const struct mt76_bus_ops *bus_ops; 144 150 struct mt7915_phy phy; 145 151 146 152 u16 chainmask; 147 153 148 154 struct work_struct init_work; 155 + struct work_struct rc_work; 149 156 struct work_struct reset_work; 150 157 wait_queue_head_t reset_wait; 151 158 u32 reset_state; 152 159 160 + struct list_head sta_rc_list; 153 161 struct list_head sta_poll_list; 154 162 spinlock_t sta_poll_lock; 155 163 ··· 258 260 259 261 static inline u8 mt7915_lmac_mapping(struct mt7915_dev *dev, u8 ac) 260 262 { 261 - static const u8 lmac_queue_map[] = { 262 - [IEEE80211_AC_BK] = MT_LMAC_AC00, 263 - [IEEE80211_AC_BE] = MT_LMAC_AC01, 264 - [IEEE80211_AC_VI] = MT_LMAC_AC02, 265 - [IEEE80211_AC_VO] = MT_LMAC_AC03, 266 - }; 267 - 268 - if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map))) 269 - return MT_LMAC_AC01; /* BE */ 270 - 271 - return lmac_queue_map[ac]; 272 - } 273 - 274 - static inline void 275 - mt7915_set_aggr_state(struct mt7915_sta *msta, u8 tid, 276 - enum mt7915_ampdu_state state) 277 - { 278 - spin_lock_bh(&msta->ampdu_lock); 279 - msta->ampdu_state[tid] = state; 280 - spin_unlock_bh(&msta->ampdu_lock); 263 + /* LMAC uses the reverse order of mac80211 AC indexes */ 264 + return 3 - ac; 281 265 } 282 266 283 267 extern const struct ieee80211_ops mt7915_ops; ··· 428 448 bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask); 429 449 void mt7915_mac_reset_counters(struct mt7915_phy *phy); 430 450 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy); 431 - void mt7915_mac_sta_poll(struct mt7915_dev *dev); 432 451 void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, 433 452 struct sk_buff *skb, struct mt76_wcid *wcid, 434 453 struct ieee80211_key_conf *key, bool beacon); ··· 440 461 struct ieee80211_sta *sta); 441 462 void mt7915_mac_work(struct work_struct *work); 442 463 void mt7915_mac_reset_work(struct work_struct *work); 443 - void mt7915_mac_sta_stats_work(struct work_struct *work); 464 + void mt7915_mac_sta_rc_work(struct work_struct *work); 444 465 int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 445 466 enum mt76_txq_id qid, struct mt76_wcid *wcid, 446 467 struct ieee80211_sta *sta, 447 468 struct mt76_tx_info *tx_info); 448 - void mt7915_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, 449 - struct mt76_queue_entry *e); 469 + void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); 450 470 void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 451 471 struct sk_buff *skb); 452 472 void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+15 -15
drivers/net/wireless/mediatek/mt76/mt7915/pci.c
··· 29 29 static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance) 30 30 { 31 31 struct mt7915_dev *dev = dev_instance; 32 - u32 intr; 32 + u32 intr, mask; 33 33 34 34 intr = mt76_rr(dev, MT_INT_SOURCE_CSR); 35 + intr &= dev->mt76.mmio.irqmask; 35 36 mt76_wr(dev, MT_INT_SOURCE_CSR, intr); 36 37 37 38 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) ··· 40 39 41 40 trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask); 42 41 43 - intr &= dev->mt76.mmio.irqmask; 42 + mask = intr & MT_INT_RX_DONE_ALL; 43 + if (intr & MT_INT_TX_DONE_MCU) 44 + mask |= MT_INT_TX_DONE_MCU; 44 45 45 - if (intr & MT_INT_TX_DONE_ALL) { 46 - mt7915_irq_disable(dev, MT_INT_TX_DONE_ALL); 46 + mt7915_irq_disable(dev, mask); 47 + 48 + if (intr & MT_INT_TX_DONE_MCU) 47 49 napi_schedule(&dev->mt76.tx_napi); 48 - } 49 50 50 - if (intr & MT_INT_RX_DONE_DATA) { 51 - mt7915_irq_disable(dev, MT_INT_RX_DONE_DATA); 51 + if (intr & MT_INT_RX_DONE_DATA) 52 52 napi_schedule(&dev->mt76.napi[0]); 53 - } 54 53 55 - if (intr & MT_INT_RX_DONE_WM) { 56 - mt7915_irq_disable(dev, MT_INT_RX_DONE_WM); 54 + if (intr & MT_INT_RX_DONE_WM) 57 55 napi_schedule(&dev->mt76.napi[1]); 58 - } 59 56 60 - if (intr & MT_INT_RX_DONE_WA) { 61 - mt7915_irq_disable(dev, MT_INT_RX_DONE_WA); 57 + if (intr & MT_INT_RX_DONE_WA) 62 58 napi_schedule(&dev->mt76.napi[2]); 63 - } 64 59 65 60 if (intr & MT_INT_MCU_CMD) { 66 61 u32 val = mt76_rr(dev, MT_MCU_CMD); ··· 100 103 static const struct mt76_driver_ops drv_ops = { 101 104 /* txwi_size = txd size + txp size */ 102 105 .txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp), 103 - .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ, 106 + .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ | 107 + MT_DRV_AMSDU_OFFLOAD, 104 108 .survey_flags = SURVEY_INFO_TIME_TX | 105 109 SURVEY_INFO_TIME_RX | 106 110 SURVEY_INFO_TIME_BSS_RX, ··· 146 148 mdev->rev = (mt7915_l1_rr(dev, MT_HW_CHIPID) << 16) | 147 149 (mt7915_l1_rr(dev, MT_HW_REV) & 0xff); 148 150 dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); 151 + 152 + mt76_wr(dev, MT_INT_MASK_CSR, 0); 149 153 150 154 /* master switch of PCIe tnterrupt enable */ 151 155 mt7915_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+16 -1
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
··· 313 313 #define MT_INT_RX_DONE_WA BIT(1) 314 314 #define MT_INT_RX_DONE(_n) ((_n) ? BIT((_n) - 1) : BIT(16)) 315 315 #define MT_INT_RX_DONE_ALL (BIT(0) | BIT(1) | BIT(16)) 316 - #define MT_INT_TX_DONE_ALL (BIT(15) | GENMASK(27, 26) | BIT(30)) 316 + #define MT_INT_TX_DONE_MCU_WA BIT(15) 317 + #define MT_INT_TX_DONE_FWDL BIT(26) 318 + #define MT_INT_TX_DONE_MCU_WM BIT(27) 319 + #define MT_INT_TX_DONE_BAND0 BIT(30) 320 + #define MT_INT_TX_DONE_BAND1 BIT(31) 317 321 #define MT_INT_MCU_CMD BIT(29) 322 + 323 + #define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WA | \ 324 + MT_INT_TX_DONE_MCU_WM | \ 325 + MT_INT_TX_DONE_FWDL) 318 326 319 327 #define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44) 320 328 #define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0) ··· 359 351 #define MT_HIF_REMAP_L2_OFFSET GENMASK(11, 0) 360 352 #define MT_HIF_REMAP_L2_BASE GENMASK(31, 12) 361 353 #define MT_HIF_REMAP_BASE_L2 0x00000 354 + 355 + #define MT_SWDEF_BASE 0x41f200 356 + #define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs)) 357 + #define MT_SWDEF_MODE MT_SWDEF(0x3c) 358 + #define MT_SWDEF_NORMAL_MODE 0 359 + #define MT_SWDEF_ICAP_MODE 1 360 + #define MT_SWDEF_SPECTRUM_MODE 2 362 361 363 362 #define MT_TOP_BASE 0x18060000 364 363 #define MT_TOP(ofs) (MT_TOP_BASE + (ofs))
+78 -86
drivers/net/wireless/mediatek/mt76/sdio.c
··· 42 42 int i; 43 43 44 44 for (i = 0; i < MT_TXQ_MCU_WA; i++) { 45 - INIT_LIST_HEAD(&dev->q_tx[i].swq); 46 - 47 45 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); 48 46 if (!q) 49 47 return -ENOMEM; 50 48 51 49 spin_lock_init(&q->lock); 52 50 q->hw_idx = i; 53 - dev->q_tx[i].q = q; 51 + dev->q_tx[i] = q; 54 52 55 53 q->entry = devm_kcalloc(dev->dev, 56 54 MT_NUM_TX_ENTRIES, sizeof(*q->entry), ··· 66 68 { 67 69 struct mt76_sdio *sdio = &dev->sdio; 68 70 71 + cancel_work_sync(&sdio->tx.xmit_work); 72 + cancel_work_sync(&sdio->tx.status_work); 73 + cancel_work_sync(&sdio->rx.recv_work); 74 + cancel_work_sync(&sdio->rx.net_work); 69 75 cancel_work_sync(&sdio->stat_work); 70 76 clear_bit(MT76_READING_STATS, &dev->phy.state); 71 77 ··· 96 94 97 95 spin_lock_bh(&q->lock); 98 96 if (q->queued > 0) { 99 - e = &q->entry[q->head]; 100 - q->head = (q->head + 1) % q->ndesc; 97 + e = &q->entry[q->tail]; 98 + q->tail = (q->tail + 1) % q->ndesc; 101 99 q->queued--; 102 100 } 103 101 spin_unlock_bh(&q->lock); ··· 131 129 return nframes; 132 130 } 133 131 134 - static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid) 132 + static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid) 135 133 { 136 - struct mt76_sw_queue *sq = &dev->q_tx[qid]; 137 - u32 n_dequeued = 0, n_sw_dequeued = 0; 134 + struct mt76_queue *q = dev->q_tx[qid]; 138 135 struct mt76_queue_entry entry; 139 - struct mt76_queue *q = sq->q; 140 136 bool wake; 141 137 142 - while (q->queued > n_dequeued) { 143 - if (!q->entry[q->head].done) 138 + while (q->queued > 0) { 139 + if (!q->entry[q->tail].done) 144 140 break; 145 141 146 - if (q->entry[q->head].schedule) { 147 - q->entry[q->head].schedule = false; 148 - n_sw_dequeued++; 142 + entry = q->entry[q->tail]; 143 + q->entry[q->tail].done = false; 144 + 145 + if (qid == MT_TXQ_MCU) { 146 + dev_kfree_skb(entry.skb); 147 + entry.skb = NULL; 149 148 } 150 149 151 - entry = q->entry[q->head]; 152 - q->entry[q->head].done = false; 153 - q->head = (q->head + 1) % q->ndesc; 154 - n_dequeued++; 155 - 156 - if (qid == MT_TXQ_MCU) 157 - dev_kfree_skb(entry.skb); 158 - else 159 - dev->drv->tx_complete_skb(dev, qid, &entry); 150 + mt76_queue_tx_complete(dev, q, &entry); 160 151 } 161 - 162 - spin_lock_bh(&q->lock); 163 - 164 - sq->swq_queued -= n_sw_dequeued; 165 - q->queued -= n_dequeued; 166 152 167 153 wake = q->stopped && q->queued < q->ndesc - 8; 168 154 if (wake) ··· 159 169 if (!q->queued) 160 170 wake_up(&dev->tx_wait); 161 171 162 - spin_unlock_bh(&q->lock); 163 - 164 172 if (qid == MT_TXQ_MCU) 165 - goto out; 173 + return; 166 174 167 175 mt76_txq_schedule(&dev->phy, qid); 168 176 169 177 if (wake) 170 178 ieee80211_wake_queue(dev->hw, qid); 171 - 172 - wake_up_process(dev->sdio.tx_kthread); 173 - out: 174 - return n_dequeued; 175 179 } 176 180 177 181 static void mt76s_tx_status_data(struct work_struct *work) ··· 198 214 struct sk_buff *skb, struct mt76_wcid *wcid, 199 215 struct ieee80211_sta *sta) 200 216 { 201 - struct mt76_queue *q = dev->q_tx[qid].q; 217 + struct mt76_queue *q = dev->q_tx[qid]; 202 218 struct mt76_tx_info tx_info = { 203 219 .skb = skb, 204 220 }; 205 221 int err, len = skb->len; 206 - u16 idx = q->tail; 222 + u16 idx = q->head; 207 223 208 224 if (q->queued == q->ndesc) 209 225 return -ENOSPC; ··· 213 229 if (err < 0) 214 230 return err; 215 231 216 - q->entry[q->tail].skb = tx_info.skb; 217 - q->entry[q->tail].buf_sz = len; 218 - q->tail = (q->tail + 1) % q->ndesc; 232 + q->entry[q->head].skb = tx_info.skb; 233 + q->entry[q->head].buf_sz = len; 234 + q->head = (q->head + 1) % q->ndesc; 219 235 q->queued++; 220 236 221 237 return idx; ··· 225 241 mt76s_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, 226 242 struct sk_buff *skb, u32 tx_info) 227 243 { 228 - struct mt76_queue *q = dev->q_tx[qid].q; 229 - int ret = -ENOSPC, len = skb->len; 244 + struct mt76_queue *q = dev->q_tx[qid]; 245 + int ret = -ENOSPC, len = skb->len, pad; 246 + 247 + if (q->queued == q->ndesc) 248 + goto error; 249 + 250 + pad = round_up(skb->len, 4) - skb->len; 251 + ret = mt76_skb_adjust_pad(skb, pad); 252 + if (ret) 253 + goto error; 230 254 231 255 spin_lock_bh(&q->lock); 232 - if (q->queued == q->ndesc) 233 - goto out; 234 256 235 - ret = mt76_skb_adjust_pad(skb); 236 - if (ret) 237 - goto out; 238 - 239 - q->entry[q->tail].buf_sz = len; 240 - q->entry[q->tail].skb = skb; 241 - q->tail = (q->tail + 1) % q->ndesc; 257 + q->entry[q->head].buf_sz = len; 258 + q->entry[q->head].skb = skb; 259 + q->head = (q->head + 1) % q->ndesc; 242 260 q->queued++; 243 261 244 - out: 245 262 spin_unlock_bh(&q->lock); 263 + 264 + return 0; 265 + 266 + error: 267 + dev_kfree_skb(skb); 246 268 247 269 return ret; 248 270 } ··· 257 267 { 258 268 struct mt76_sdio *sdio = &dev->sdio; 259 269 260 - wake_up_process(sdio->tx_kthread); 270 + queue_work(sdio->txrx_wq, &sdio->tx.xmit_work); 261 271 } 262 272 263 273 static const struct mt76_queue_ops sdio_queue_ops = { ··· 266 276 .tx_queue_skb_raw = mt76s_tx_queue_skb_raw, 267 277 }; 268 278 269 - static int mt76s_kthread_run(void *data) 279 + static void mt76s_tx_work(struct work_struct *work) 270 280 { 271 - struct mt76_dev *dev = data; 272 - struct mt76_phy *mphy = &dev->phy; 281 + struct mt76_sdio *sdio = container_of(work, struct mt76_sdio, 282 + tx.status_work); 283 + struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); 284 + int i; 273 285 274 - while (!kthread_should_stop()) { 275 - int i, nframes = 0; 286 + for (i = 0; i < MT_TXQ_MCU_WA; i++) 287 + mt76s_process_tx_queue(dev, i); 276 288 277 - cond_resched(); 289 + if (dev->drv->tx_status_data && 290 + !test_and_set_bit(MT76_READING_STATS, &dev->phy.state)) 291 + queue_work(dev->wq, &dev->sdio.stat_work); 292 + } 278 293 279 - /* rx processing */ 280 - local_bh_disable(); 281 - rcu_read_lock(); 294 + static void mt76s_rx_work(struct work_struct *work) 295 + { 296 + struct mt76_sdio *sdio = container_of(work, struct mt76_sdio, 297 + rx.net_work); 298 + struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); 299 + int i; 282 300 283 - mt76_for_each_q_rx(dev, i) 284 - nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]); 301 + /* rx processing */ 302 + local_bh_disable(); 303 + rcu_read_lock(); 285 304 286 - rcu_read_unlock(); 287 - local_bh_enable(); 305 + mt76_for_each_q_rx(dev, i) 306 + mt76s_process_rx_queue(dev, &dev->q_rx[i]); 288 307 289 - /* tx processing */ 290 - for (i = 0; i < MT_TXQ_MCU_WA; i++) 291 - nframes += mt76s_process_tx_queue(dev, i); 292 - 293 - if (dev->drv->tx_status_data && 294 - !test_and_set_bit(MT76_READING_STATS, &mphy->state)) 295 - queue_work(dev->wq, &dev->sdio.stat_work); 296 - 297 - if (!nframes || !test_bit(MT76_STATE_RUNNING, &mphy->state)) { 298 - set_current_state(TASK_INTERRUPTIBLE); 299 - schedule(); 300 - } 301 - } 302 - 303 - return 0; 308 + rcu_read_unlock(); 309 + local_bh_enable(); 304 310 } 305 311 306 312 void mt76s_deinit(struct mt76_dev *dev) ··· 304 318 struct mt76_sdio *sdio = &dev->sdio; 305 319 int i; 306 320 307 - kthread_stop(sdio->kthread); 308 - kthread_stop(sdio->tx_kthread); 309 321 mt76s_stop_txrx(dev); 322 + if (sdio->txrx_wq) { 323 + destroy_workqueue(sdio->txrx_wq); 324 + sdio->txrx_wq = NULL; 325 + } 310 326 311 327 sdio_claim_host(sdio->func); 312 328 sdio_release_irq(sdio->func); ··· 336 348 { 337 349 struct mt76_sdio *sdio = &dev->sdio; 338 350 339 - sdio->kthread = kthread_create(mt76s_kthread_run, dev, "mt76s"); 340 - if (IS_ERR(sdio->kthread)) 341 - return PTR_ERR(sdio->kthread); 351 + sdio->txrx_wq = alloc_workqueue("mt76s_txrx_wq", 352 + WQ_UNBOUND | WQ_HIGHPRI, 353 + WQ_UNBOUND_MAX_ACTIVE); 354 + if (!sdio->txrx_wq) 355 + return -ENOMEM; 342 356 343 357 INIT_WORK(&sdio->stat_work, mt76s_tx_status_data); 358 + INIT_WORK(&sdio->tx.status_work, mt76s_tx_work); 359 + INIT_WORK(&sdio->rx.net_work, mt76s_rx_work); 344 360 345 361 mutex_init(&sdio->sched.lock); 346 362 dev->queue_ops = &sdio_queue_ops;
+12 -7
drivers/net/wireless/mediatek/mt76/testmode.c
··· 29 29 return; 30 30 31 31 qid = skb_get_queue_mapping(skb); 32 - q = dev->q_tx[qid].q; 32 + q = dev->q_tx[qid]; 33 33 34 34 spin_lock_bh(&q->lock); 35 35 36 - while (td->tx_pending > 0 && q->queued < q->ndesc / 2) { 36 + while (td->tx_pending > 0 && td->tx_queued - td->tx_done < 1000 && 37 + q->queued < q->ndesc / 2) { 37 38 int ret; 38 39 39 40 ret = dev->queue_ops->tx_queue_skb(dev, qid, skb_get(skb), wcid, NULL); ··· 161 160 td->tx_queued = 0; 162 161 td->tx_done = 0; 163 162 td->tx_pending = td->tx_count; 164 - tasklet_schedule(&dev->tx_tasklet); 163 + mt76_worker_schedule(&dev->tx_worker); 165 164 } 166 165 167 166 static void ··· 169 168 { 170 169 struct mt76_testmode_data *td = &dev->test; 171 170 172 - tasklet_disable(&dev->tx_tasklet); 171 + mt76_worker_disable(&dev->tx_worker); 173 172 174 173 td->tx_pending = 0; 175 174 176 - tasklet_enable(&dev->tx_tasklet); 175 + mt76_worker_enable(&dev->tx_worker); 177 176 178 177 wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued, 10 * HZ); 179 178 ··· 443 442 mutex_lock(&dev->mutex); 444 443 445 444 if (tb[MT76_TM_ATTR_STATS]) { 445 + err = -EINVAL; 446 + 446 447 a = nla_nest_start(msg, MT76_TM_ATTR_STATS); 447 - err = mt76_testmode_dump_stats(dev, msg); 448 - nla_nest_end(msg, a); 448 + if (a) { 449 + err = mt76_testmode_dump_stats(dev, msg); 450 + nla_nest_end(msg, a); 451 + } 449 452 450 453 goto out; 451 454 }
+127 -207
drivers/net/wireless/mediatek/mt76/tx.c
··· 5 5 6 6 #include "mt76.h" 7 7 8 - static struct mt76_txwi_cache * 9 - mt76_alloc_txwi(struct mt76_dev *dev) 10 - { 11 - struct mt76_txwi_cache *t; 12 - dma_addr_t addr; 13 - u8 *txwi; 14 - int size; 15 - 16 - size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); 17 - txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC); 18 - if (!txwi) 19 - return NULL; 20 - 21 - addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size, 22 - DMA_TO_DEVICE); 23 - t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); 24 - t->dma_addr = addr; 25 - 26 - return t; 27 - } 28 - 29 - static struct mt76_txwi_cache * 30 - __mt76_get_txwi(struct mt76_dev *dev) 31 - { 32 - struct mt76_txwi_cache *t = NULL; 33 - 34 - spin_lock_bh(&dev->lock); 35 - if (!list_empty(&dev->txwi_cache)) { 36 - t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, 37 - list); 38 - list_del(&t->list); 39 - } 40 - spin_unlock_bh(&dev->lock); 41 - 42 - return t; 43 - } 44 - 45 - struct mt76_txwi_cache * 46 - mt76_get_txwi(struct mt76_dev *dev) 47 - { 48 - struct mt76_txwi_cache *t = __mt76_get_txwi(dev); 49 - 50 - if (t) 51 - return t; 52 - 53 - return mt76_alloc_txwi(dev); 54 - } 55 - 56 - void 57 - mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 58 - { 59 - if (!t) 60 - return; 61 - 62 - spin_lock_bh(&dev->lock); 63 - list_add(&t->list, &dev->txwi_cache); 64 - spin_unlock_bh(&dev->lock); 65 - } 66 - EXPORT_SYMBOL_GPL(mt76_put_txwi); 67 - 68 - void mt76_tx_free(struct mt76_dev *dev) 69 - { 70 - struct mt76_txwi_cache *t; 71 - 72 - while ((t = __mt76_get_txwi(dev)) != NULL) 73 - dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size, 74 - DMA_TO_DEVICE); 75 - } 76 - 77 8 static int 78 9 mt76_txq_get_qid(struct ieee80211_txq *txq) 79 10 { ··· 14 83 return txq->ac; 15 84 } 16 85 17 - static void 18 - mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb) 86 + void 87 + mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb) 19 88 { 20 89 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 90 + struct ieee80211_txq *txq; 91 + struct mt76_txq *mtxq; 92 + u8 tid; 21 93 22 - if (!ieee80211_is_data_qos(hdr->frame_control) || 94 + if (!sta || !ieee80211_is_data_qos(hdr->frame_control) || 23 95 !ieee80211_is_data_present(hdr->frame_control)) 96 + return; 97 + 98 + tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 99 + txq = sta->txq[tid]; 100 + mtxq = (struct mt76_txq *)txq->drv_priv; 101 + if (!mtxq->aggr) 24 102 return; 25 103 26 104 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10; 27 105 } 106 + EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn); 28 107 29 108 void 30 109 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) ··· 172 231 } 173 232 EXPORT_SYMBOL_GPL(mt76_tx_status_check); 174 233 175 - void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb) 234 + static void 235 + mt76_tx_check_non_aql(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb) 236 + { 237 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 238 + struct mt76_wcid *wcid; 239 + int pending; 240 + 241 + if (info->tx_time_est) 242 + return; 243 + 244 + if (wcid_idx >= ARRAY_SIZE(dev->wcid)) 245 + return; 246 + 247 + rcu_read_lock(); 248 + 249 + wcid = rcu_dereference(dev->wcid[wcid_idx]); 250 + if (wcid) { 251 + pending = atomic_dec_return(&wcid->non_aql_packets); 252 + if (pending < 0) 253 + atomic_cmpxchg(&wcid->non_aql_packets, pending, 0); 254 + } 255 + 256 + rcu_read_unlock(); 257 + } 258 + 259 + void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb) 176 260 { 177 261 struct ieee80211_hw *hw; 178 262 struct sk_buff_head list; ··· 210 244 } 211 245 #endif 212 246 247 + mt76_tx_check_non_aql(dev, wcid_idx, skb); 248 + 213 249 if (!skb->prev) { 214 250 hw = mt76_tx_status_get_hw(dev, skb); 215 251 ieee80211_free_txskb(hw, skb); ··· 223 255 mt76_tx_status_unlock(dev, &list); 224 256 } 225 257 EXPORT_SYMBOL_GPL(mt76_tx_complete_skb); 258 + 259 + static int 260 + __mt76_tx_queue_skb(struct mt76_dev *dev, int qid, struct sk_buff *skb, 261 + struct mt76_wcid *wcid, struct ieee80211_sta *sta, 262 + bool *stop) 263 + { 264 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 265 + struct mt76_queue *q; 266 + bool non_aql; 267 + int pending; 268 + int idx; 269 + 270 + non_aql = !info->tx_time_est; 271 + idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta); 272 + if (idx < 0 || !sta || !non_aql) 273 + return idx; 274 + 275 + wcid = (struct mt76_wcid *)sta->drv_priv; 276 + q = dev->q_tx[qid]; 277 + q->entry[idx].wcid = wcid->idx; 278 + pending = atomic_inc_return(&wcid->non_aql_packets); 279 + if (stop && pending >= MT_MAX_NON_AQL_PKT) 280 + *stop = true; 281 + 282 + return idx; 283 + } 226 284 227 285 void 228 286 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, ··· 282 288 ieee80211_get_tx_rates(info->control.vif, sta, skb, 283 289 info->control.rates, 1); 284 290 285 - if (sta && ieee80211_is_data_qos(hdr->frame_control)) { 286 - struct ieee80211_txq *txq; 287 - struct mt76_txq *mtxq; 288 - u8 tid; 289 - 290 - tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 291 - txq = sta->txq[tid]; 292 - mtxq = (struct mt76_txq *)txq->drv_priv; 293 - 294 - if (mtxq->aggr) 295 - mt76_check_agg_ssn(mtxq, skb); 296 - } 297 - 298 291 if (ext_phy) 299 292 info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; 300 293 301 - q = dev->q_tx[qid].q; 294 + q = dev->q_tx[qid]; 302 295 303 296 spin_lock_bh(&q->lock); 304 - dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta); 297 + __mt76_tx_queue_skb(dev, qid, skb, wcid, sta, NULL); 305 298 dev->queue_ops->kick(dev, q); 306 299 307 300 if (q->queued > q->ndesc - 8 && !q->stopped) { ··· 301 320 EXPORT_SYMBOL_GPL(mt76_tx); 302 321 303 322 static struct sk_buff * 304 - mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq, bool ps) 323 + mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq) 305 324 { 306 325 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 307 326 struct ieee80211_tx_info *info; 308 327 bool ext_phy = phy != &phy->dev->phy; 309 328 struct sk_buff *skb; 310 - 311 - skb = skb_dequeue(&mtxq->retry_q); 312 - if (skb) { 313 - u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 314 - 315 - if (ps && skb_queue_empty(&mtxq->retry_q)) 316 - ieee80211_sta_set_buffered(txq->sta, tid, false); 317 - 318 - return skb; 319 - } 320 329 321 330 skb = ieee80211_tx_dequeue(phy->hw, txq); 322 331 if (!skb) ··· 332 361 IEEE80211_TX_CTL_REQ_TX_STATUS; 333 362 334 363 mt76_skb_set_moredata(skb, !last); 335 - dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta); 364 + __mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta, NULL); 336 365 } 337 366 338 367 void ··· 344 373 struct mt76_phy *phy = hw->priv; 345 374 struct mt76_dev *dev = phy->dev; 346 375 struct sk_buff *last_skb = NULL; 347 - struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q; 376 + struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD]; 348 377 int i; 349 378 350 379 spin_lock_bh(&hwq->lock); ··· 357 386 continue; 358 387 359 388 do { 360 - skb = mt76_txq_dequeue(phy, mtxq, true); 389 + skb = mt76_txq_dequeue(phy, mtxq); 361 390 if (!skb) 362 391 break; 363 - 364 - if (mtxq->aggr) 365 - mt76_check_agg_ssn(mtxq, skb); 366 392 367 393 nframes--; 368 394 if (last_skb) ··· 381 413 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); 382 414 383 415 static int 384 - mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_sw_queue *sq, 416 + mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, 385 417 struct mt76_txq *mtxq) 386 418 { 387 419 struct mt76_dev *dev = phy->dev; 388 420 struct ieee80211_txq *txq = mtxq_to_txq(mtxq); 389 421 enum mt76_txq_id qid = mt76_txq_get_qid(txq); 390 422 struct mt76_wcid *wcid = mtxq->wcid; 391 - struct mt76_queue *hwq = sq->q; 392 423 struct ieee80211_tx_info *info; 393 424 struct sk_buff *skb; 394 - int n_frames = 1, limit; 395 - struct ieee80211_tx_rate tx_rate; 396 - bool ampdu; 397 - bool probe; 425 + int n_frames = 1; 426 + bool stop = false; 398 427 int idx; 399 428 400 429 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) 401 430 return 0; 402 431 403 - skb = mt76_txq_dequeue(phy, mtxq, false); 432 + if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT) 433 + return 0; 434 + 435 + skb = mt76_txq_dequeue(phy, mtxq); 404 436 if (!skb) 405 437 return 0; 406 438 ··· 408 440 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 409 441 ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 410 442 info->control.rates, 1); 411 - tx_rate = info->control.rates[0]; 412 443 413 - probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 414 - ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU; 415 - limit = ampdu ? 16 : 3; 416 - 417 - if (ampdu) 418 - mt76_check_agg_ssn(mtxq, skb); 419 - 420 - idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta); 421 - 444 + idx = __mt76_tx_queue_skb(dev, qid, skb, wcid, txq->sta, &stop); 422 445 if (idx < 0) 423 446 return idx; 424 447 425 448 do { 426 - bool cur_ampdu; 427 - 428 - if (probe) 429 - break; 430 - 431 - if (test_bit(MT76_RESET, &phy->state)) 449 + if (test_bit(MT76_STATE_PM, &phy->state) || 450 + test_bit(MT76_RESET, &phy->state)) 432 451 return -EBUSY; 433 452 434 - skb = mt76_txq_dequeue(phy, mtxq, false); 453 + if (stop) 454 + break; 455 + 456 + if (q->queued + MT_TXQ_FREE_THR >= q->ndesc) 457 + break; 458 + 459 + skb = mt76_txq_dequeue(phy, mtxq); 435 460 if (!skb) 436 461 break; 437 462 438 463 info = IEEE80211_SKB_CB(skb); 439 - cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; 464 + if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) 465 + ieee80211_get_tx_rates(txq->vif, txq->sta, skb, 466 + info->control.rates, 1); 440 467 441 - if (ampdu != cur_ampdu || 442 - (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 443 - skb_queue_tail(&mtxq->retry_q, skb); 444 - break; 445 - } 446 - 447 - info->control.rates[0] = tx_rate; 448 - 449 - if (cur_ampdu) 450 - mt76_check_agg_ssn(mtxq, skb); 451 - 452 - idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, 453 - txq->sta); 468 + idx = __mt76_tx_queue_skb(dev, qid, skb, wcid, txq->sta, &stop); 454 469 if (idx < 0) 455 - return idx; 470 + break; 456 471 457 472 n_frames++; 458 - } while (n_frames < limit); 473 + } while (1); 459 474 460 - if (!probe) { 461 - hwq->entry[idx].qid = sq - dev->q_tx; 462 - hwq->entry[idx].schedule = true; 463 - sq->swq_queued++; 464 - } 465 - 466 - dev->queue_ops->kick(dev, hwq); 475 + dev->queue_ops->kick(dev, q); 467 476 468 477 return n_frames; 469 478 } ··· 449 504 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) 450 505 { 451 506 struct mt76_dev *dev = phy->dev; 452 - struct mt76_sw_queue *sq = &dev->q_tx[qid]; 453 - struct mt76_queue *hwq = sq->q; 507 + struct mt76_queue *q = dev->q_tx[qid]; 454 508 struct ieee80211_txq *txq; 455 509 struct mt76_txq *mtxq; 456 510 struct mt76_wcid *wcid; 457 511 int ret = 0; 458 512 459 - spin_lock_bh(&hwq->lock); 513 + spin_lock_bh(&q->lock); 460 514 while (1) { 461 - if (sq->swq_queued >= 4) 462 - break; 463 - 464 - if (test_bit(MT76_RESET, &phy->state)) { 515 + if (test_bit(MT76_STATE_PM, &phy->state) || 516 + test_bit(MT76_RESET, &phy->state)) { 465 517 ret = -EBUSY; 466 518 break; 467 519 } 520 + 521 + if (q->queued + MT_TXQ_FREE_THR >= q->ndesc) 522 + break; 468 523 469 524 txq = ieee80211_next_txq(phy->hw, qid); 470 525 if (!txq) ··· 483 538 u8 tid = txq->tid; 484 539 485 540 mtxq->send_bar = false; 486 - spin_unlock_bh(&hwq->lock); 541 + spin_unlock_bh(&q->lock); 487 542 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn); 488 - spin_lock_bh(&hwq->lock); 543 + spin_lock_bh(&q->lock); 489 544 } 490 545 491 - ret += mt76_txq_send_burst(phy, sq, mtxq); 492 - ieee80211_return_txq(phy->hw, txq, 493 - !skb_queue_empty(&mtxq->retry_q)); 546 + ret += mt76_txq_send_burst(phy, q, mtxq); 547 + ieee80211_return_txq(phy->hw, txq, false); 494 548 } 495 - spin_unlock_bh(&hwq->lock); 549 + spin_unlock_bh(&q->lock); 496 550 497 551 return ret; 498 552 } 499 553 500 554 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid) 501 555 { 502 - struct mt76_dev *dev = phy->dev; 503 - struct mt76_sw_queue *sq = &dev->q_tx[qid]; 504 556 int len; 505 557 506 558 if (qid >= 4) 507 - return; 508 - 509 - if (sq->swq_queued >= 4) 510 559 return; 511 560 512 561 rcu_read_lock(); ··· 524 585 } 525 586 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); 526 587 527 - void mt76_tx_tasklet(unsigned long data) 588 + void mt76_tx_worker(struct mt76_worker *w) 528 589 { 529 - struct mt76_dev *dev = (struct mt76_dev *)data; 590 + struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker); 530 591 531 592 mt76_txq_schedule_all(&dev->phy); 532 593 if (dev->phy2) ··· 551 612 if (!txq) 552 613 continue; 553 614 615 + hwq = dev->q_tx[mt76_txq_get_qid(txq)]; 554 616 mtxq = (struct mt76_txq *)txq->drv_priv; 555 - hwq = mtxq->swq->q; 556 617 557 618 spin_lock_bh(&hwq->lock); 558 619 mtxq->send_bar = mtxq->aggr && send_bar; ··· 569 630 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) 570 631 return; 571 632 572 - tasklet_schedule(&dev->tx_tasklet); 633 + mt76_worker_schedule(&dev->tx_worker); 573 634 } 574 635 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue); 575 - 576 - void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq) 577 - { 578 - struct ieee80211_hw *hw; 579 - struct mt76_txq *mtxq; 580 - struct sk_buff *skb; 581 - 582 - if (!txq) 583 - return; 584 - 585 - mtxq = (struct mt76_txq *)txq->drv_priv; 586 - 587 - while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) { 588 - hw = mt76_tx_status_get_hw(dev, skb); 589 - ieee80211_free_txskb(hw, skb); 590 - } 591 - } 592 - EXPORT_SYMBOL_GPL(mt76_txq_remove); 593 - 594 - void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) 595 - { 596 - struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv; 597 - 598 - skb_queue_head_init(&mtxq->retry_q); 599 - 600 - mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)]; 601 - } 602 - EXPORT_SYMBOL_GPL(mt76_txq_init); 603 636 604 637 u8 mt76_ac_to_hwq(u8 ac) 605 638 { ··· 589 678 } 590 679 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); 591 680 592 - int mt76_skb_adjust_pad(struct sk_buff *skb) 681 + int mt76_skb_adjust_pad(struct sk_buff *skb, int pad) 593 682 { 594 683 struct sk_buff *iter, *last = skb; 595 - u32 pad; 596 - 597 - /* Add zero pad of 4 - 7 bytes */ 598 - pad = round_up(skb->len, 4) + 4 - skb->len; 599 684 600 685 /* First packet of a A-MSDU burst keeps track of the whole burst 601 686 * length, need to update length of it and the last packet. ··· 613 706 return 0; 614 707 } 615 708 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad); 709 + 710 + void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, 711 + struct mt76_queue_entry *e) 712 + { 713 + if (e->skb) 714 + dev->drv->tx_complete_skb(dev, e); 715 + 716 + spin_lock_bh(&q->lock); 717 + q->tail = (q->tail + 1) % q->ndesc; 718 + q->queued--; 719 + spin_unlock_bh(&q->lock); 720 + } 721 + EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
+31 -55
drivers/net/wireless/mediatek/mt76/usb.c
··· 497 497 498 498 spin_lock_irqsave(&q->lock, flags); 499 499 if (q->queued > 0) { 500 - urb = q->entry[q->head].urb; 501 - q->head = (q->head + 1) % q->ndesc; 500 + urb = q->entry[q->tail].urb; 501 + q->tail = (q->tail + 1) % q->ndesc; 502 502 q->queued--; 503 503 } 504 504 spin_unlock_irqrestore(&q->lock, flags); ··· 616 616 default: 617 617 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n", 618 618 urb->status); 619 - /* fall through */ 619 + fallthrough; 620 620 case 0: 621 621 break; 622 622 } 623 623 624 624 spin_lock_irqsave(&q->lock, flags); 625 - if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch")) 625 + if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch")) 626 626 goto out; 627 627 628 - q->tail = (q->tail + 1) % q->ndesc; 628 + q->head = (q->head + 1) % q->ndesc; 629 629 q->queued++; 630 630 tasklet_schedule(&dev->usb.rx_tasklet); 631 631 out: ··· 792 792 } 793 793 EXPORT_SYMBOL_GPL(mt76u_resume_rx); 794 794 795 - static void mt76u_tx_tasklet(unsigned long data) 795 + static void mt76u_tx_worker(struct mt76_worker *w) 796 796 { 797 - struct mt76_dev *dev = (struct mt76_dev *)data; 797 + struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker); 798 798 struct mt76_queue_entry entry; 799 - struct mt76_sw_queue *sq; 800 799 struct mt76_queue *q; 801 800 bool wake; 802 801 int i; 803 802 804 803 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 805 - u32 n_dequeued = 0, n_sw_dequeued = 0; 804 + q = dev->q_tx[i]; 806 805 807 - sq = &dev->q_tx[i]; 808 - q = sq->q; 809 - 810 - while (q->queued > n_dequeued) { 811 - if (!q->entry[q->head].done) 806 + while (q->queued > 0) { 807 + if (!q->entry[q->tail].done) 812 808 break; 813 809 814 - if (q->entry[q->head].schedule) { 815 - q->entry[q->head].schedule = false; 816 - n_sw_dequeued++; 817 - } 810 + entry = q->entry[q->tail]; 811 + q->entry[q->tail].done = false; 818 812 819 - entry = q->entry[q->head]; 820 - q->entry[q->head].done = false; 821 - q->head = (q->head + 1) % q->ndesc; 822 - n_dequeued++; 823 - 824 - dev->drv->tx_complete_skb(dev, i, &entry); 813 + mt76_queue_tx_complete(dev, q, &entry); 825 814 } 826 - 827 - spin_lock_bh(&q->lock); 828 - 829 - sq->swq_queued -= n_sw_dequeued; 830 - q->queued -= n_dequeued; 831 815 832 816 wake = q->stopped && q->queued < q->ndesc - 8; 833 817 if (wake) ··· 819 835 820 836 if (!q->queued) 821 837 wake_up(&dev->tx_wait); 822 - 823 - spin_unlock_bh(&q->lock); 824 838 825 839 mt76_txq_schedule(&dev->phy, i); 826 840 ··· 864 882 dev_err(dev->dev, "tx urb failed: %d\n", urb->status); 865 883 e->done = true; 866 884 867 - tasklet_schedule(&dev->tx_tasklet); 885 + mt76_worker_schedule(&dev->tx_worker); 868 886 } 869 887 870 888 static int ··· 891 909 struct sk_buff *skb, struct mt76_wcid *wcid, 892 910 struct ieee80211_sta *sta) 893 911 { 894 - struct mt76_queue *q = dev->q_tx[qid].q; 912 + struct mt76_queue *q = dev->q_tx[qid]; 895 913 struct mt76_tx_info tx_info = { 896 914 .skb = skb, 897 915 }; 898 - u16 idx = q->tail; 916 + u16 idx = q->head; 899 917 int err; 900 918 901 919 if (q->queued == q->ndesc) ··· 914 932 q->entry[idx].urb, mt76u_complete_tx, 915 933 &q->entry[idx]); 916 934 917 - q->tail = (q->tail + 1) % q->ndesc; 935 + q->head = (q->head + 1) % q->ndesc; 918 936 q->entry[idx].skb = tx_info.skb; 919 937 q->queued++; 920 938 ··· 926 944 struct urb *urb; 927 945 int err; 928 946 929 - while (q->first != q->tail) { 947 + while (q->first != q->head) { 930 948 urb = q->entry[q->first].urb; 931 949 932 950 trace_submit_urb(dev, urb); ··· 969 987 int i, j, err; 970 988 971 989 for (i = 0; i <= MT_TXQ_PSD; i++) { 972 - INIT_LIST_HEAD(&dev->q_tx[i].swq); 973 - 974 990 if (i >= IEEE80211_NUM_ACS) { 975 - dev->q_tx[i].q = dev->q_tx[0].q; 991 + dev->q_tx[i] = dev->q_tx[0]; 976 992 continue; 977 993 } 978 994 ··· 980 1000 981 1001 spin_lock_init(&q->lock); 982 1002 q->hw_idx = mt76u_ac_to_hwq(dev, i); 983 - dev->q_tx[i].q = q; 1003 + dev->q_tx[i] = q; 984 1004 985 1005 q->entry = devm_kcalloc(dev->dev, 986 1006 MT_NUM_TX_ENTRIES, sizeof(*q->entry), ··· 1007 1027 struct mt76_queue *q; 1008 1028 int j; 1009 1029 1010 - q = dev->q_tx[i].q; 1030 + q = dev->q_tx[i]; 1011 1031 if (!q) 1012 1032 continue; 1013 1033 ··· 1020 1040 { 1021 1041 int ret; 1022 1042 1043 + mt76_worker_disable(&dev->tx_worker); 1044 + 1023 1045 ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy), 1024 1046 HZ / 5); 1025 1047 if (!ret) { ··· 1032 1050 dev_err(dev->dev, "timed out waiting for pending tx\n"); 1033 1051 1034 1052 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 1035 - q = dev->q_tx[i].q; 1053 + q = dev->q_tx[i]; 1036 1054 if (!q) 1037 1055 continue; 1038 1056 ··· 1040 1058 usb_kill_urb(q->entry[j].urb); 1041 1059 } 1042 1060 1043 - tasklet_kill(&dev->tx_tasklet); 1044 - 1045 1061 /* On device removal we maight queue skb's, but mt76u_tx_kick() 1046 1062 * will fail to submit urb, cleanup those skb's manually. 1047 1063 */ 1048 1064 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 1049 - q = dev->q_tx[i].q; 1065 + q = dev->q_tx[i]; 1050 1066 if (!q) 1051 1067 continue; 1052 1068 1053 - /* Assure we are in sync with killed tasklet. */ 1054 - spin_lock_bh(&q->lock); 1055 - while (q->queued) { 1056 - entry = q->entry[q->head]; 1057 - q->head = (q->head + 1) % q->ndesc; 1058 - q->queued--; 1069 + entry = q->entry[q->tail]; 1070 + q->entry[q->tail].done = false; 1059 1071 1060 - dev->drv->tx_complete_skb(dev, i, &entry); 1061 - } 1062 - spin_unlock_bh(&q->lock); 1072 + mt76_queue_tx_complete(dev, q, &entry); 1063 1073 } 1064 1074 } 1065 1075 1066 1076 cancel_work_sync(&dev->usb.stat_work); 1067 1077 clear_bit(MT76_READING_STATS, &dev->phy.state); 1078 + 1079 + mt76_worker_enable(&dev->tx_worker); 1068 1080 1069 1081 mt76_tx_status_check(dev, NULL, true); 1070 1082 } ··· 1109 1133 mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw; 1110 1134 mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy; 1111 1135 1136 + dev->tx_worker.fn = mt76u_tx_worker; 1112 1137 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev); 1113 - tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev); 1114 1138 INIT_WORK(&usb->stat_work, mt76u_tx_status_data); 1115 1139 1116 1140 usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
+28
drivers/net/wireless/mediatek/mt76/util.c
··· 110 110 } 111 111 EXPORT_SYMBOL_GPL(mt76_get_min_avg_rssi); 112 112 113 + int __mt76_worker_fn(void *ptr) 114 + { 115 + struct mt76_worker *w = ptr; 116 + 117 + while (!kthread_should_stop()) { 118 + set_current_state(TASK_INTERRUPTIBLE); 119 + 120 + if (kthread_should_park()) { 121 + kthread_parkme(); 122 + continue; 123 + } 124 + 125 + if (!test_and_clear_bit(MT76_WORKER_SCHEDULED, &w->state)) { 126 + schedule(); 127 + continue; 128 + } 129 + 130 + set_bit(MT76_WORKER_RUNNING, &w->state); 131 + set_current_state(TASK_RUNNING); 132 + w->fn(w); 133 + cond_resched(); 134 + clear_bit(MT76_WORKER_RUNNING, &w->state); 135 + } 136 + 137 + return 0; 138 + } 139 + EXPORT_SYMBOL_GPL(__mt76_worker_fn); 140 + 113 141 MODULE_LICENSE("Dual BSD/GPL");
+76
drivers/net/wireless/mediatek/mt76/util.h
··· 10 10 #include <linux/skbuff.h> 11 11 #include <linux/bitops.h> 12 12 #include <linux/bitfield.h> 13 + #include <net/mac80211.h> 14 + 15 + struct mt76_worker 16 + { 17 + struct task_struct *task; 18 + void (*fn)(struct mt76_worker *); 19 + unsigned long state; 20 + }; 21 + 22 + enum { 23 + MT76_WORKER_SCHEDULED, 24 + MT76_WORKER_RUNNING, 25 + }; 13 26 14 27 #define MT76_INCR(_var, _size) \ 15 28 (_var = (((_var) + 1) % (_size))) ··· 56 43 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 57 44 else 58 45 hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA); 46 + } 47 + 48 + int __mt76_worker_fn(void *ptr); 49 + 50 + static inline int 51 + mt76_worker_setup(struct ieee80211_hw *hw, struct mt76_worker *w, 52 + void (*fn)(struct mt76_worker *), 53 + const char *name) 54 + { 55 + const char *dev_name = wiphy_name(hw->wiphy); 56 + int ret; 57 + 58 + if (fn) 59 + w->fn = fn; 60 + w->task = kthread_create(__mt76_worker_fn, w, "mt76-%s %s", 61 + name, dev_name); 62 + 63 + ret = PTR_ERR_OR_ZERO(w->task); 64 + if (ret) { 65 + w->task = NULL; 66 + return ret; 67 + } 68 + 69 + wake_up_process(w->task); 70 + 71 + return 0; 72 + } 73 + 74 + static inline void mt76_worker_schedule(struct mt76_worker *w) 75 + { 76 + if (!w->task) 77 + return; 78 + 79 + if (!test_and_set_bit(MT76_WORKER_SCHEDULED, &w->state) && 80 + !test_bit(MT76_WORKER_RUNNING, &w->state)) 81 + wake_up_process(w->task); 82 + } 83 + 84 + static inline void mt76_worker_disable(struct mt76_worker *w) 85 + { 86 + if (!w->task) 87 + return; 88 + 89 + kthread_park(w->task); 90 + WRITE_ONCE(w->state, 0); 91 + } 92 + 93 + static inline void mt76_worker_enable(struct mt76_worker *w) 94 + { 95 + if (!w->task) 96 + return; 97 + 98 + kthread_unpark(w->task); 99 + mt76_worker_schedule(w); 100 + } 101 + 102 + static inline void mt76_worker_teardown(struct mt76_worker *w) 103 + { 104 + if (!w->task) 105 + return; 106 + 107 + kthread_stop(w->task); 108 + w->task = NULL; 59 109 } 60 110 61 111 #endif
+6 -28
drivers/net/wireless/mediatek/mt7601u/debugfs.c
··· 30 30 DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n"); 31 31 32 32 static int 33 - mt7601u_ampdu_stat_read(struct seq_file *file, void *data) 33 + mt7601u_ampdu_stat_show(struct seq_file *file, void *data) 34 34 { 35 35 struct mt7601u_dev *dev = file->private; 36 36 int i, j; ··· 73 73 return 0; 74 74 } 75 75 76 - static int 77 - mt7601u_ampdu_stat_open(struct inode *inode, struct file *f) 78 - { 79 - return single_open(f, mt7601u_ampdu_stat_read, inode->i_private); 80 - } 81 - 82 - static const struct file_operations fops_ampdu_stat = { 83 - .open = mt7601u_ampdu_stat_open, 84 - .read = seq_read, 85 - .llseek = seq_lseek, 86 - .release = single_release, 87 - }; 76 + DEFINE_SHOW_ATTRIBUTE(mt7601u_ampdu_stat); 88 77 89 78 static int 90 - mt7601u_eeprom_param_read(struct seq_file *file, void *data) 79 + mt7601u_eeprom_param_show(struct seq_file *file, void *data) 91 80 { 92 81 struct mt7601u_dev *dev = file->private; 93 82 struct mt7601u_rate_power *rp = &dev->ee->power_rate_table; ··· 120 131 return 0; 121 132 } 122 133 123 - static int 124 - mt7601u_eeprom_param_open(struct inode *inode, struct file *f) 125 - { 126 - return single_open(f, mt7601u_eeprom_param_read, inode->i_private); 127 - } 128 - 129 - static const struct file_operations fops_eeprom_param = { 130 - .open = mt7601u_eeprom_param_open, 131 - .read = seq_read, 132 - .llseek = seq_lseek, 133 - .release = single_release, 134 - }; 134 + DEFINE_SHOW_ATTRIBUTE(mt7601u_eeprom_param); 135 135 136 136 void mt7601u_init_debugfs(struct mt7601u_dev *dev) 137 137 { ··· 135 157 136 158 debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg); 137 159 debugfs_create_file("regval", 0600, dir, dev, &fops_regval); 138 - debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat); 139 - debugfs_create_file("eeprom_param", 0400, dir, dev, &fops_eeprom_param); 160 + debugfs_create_file("ampdu_stat", 0400, dir, dev, &mt7601u_ampdu_stat_fops); 161 + debugfs_create_file("eeprom_param", 0400, dir, dev, &mt7601u_eeprom_param_fops); 140 162 }
+1 -2
drivers/net/wireless/microchip/wilc1000/mon.c
··· 235 235 236 236 if (register_netdevice(wl->monitor_dev)) { 237 237 netdev_err(real_dev, "register_netdevice failed\n"); 238 + free_netdev(wl->monitor_dev); 238 239 return NULL; 239 240 } 240 241 priv = netdev_priv(wl->monitor_dev); 241 - if (!priv) 242 - return NULL; 243 242 244 243 priv->real_ndev = real_dev; 245 244
-1
drivers/net/wireless/quantenna/qtnfmac/core.c
··· 15 15 #include "util.h" 16 16 #include "switchdev.h" 17 17 18 - #define QTNF_DMP_MAX_LEN 48 19 18 #define QTNF_PRIMARY_VIF_IDX 0 20 19 21 20 static bool slave_radar = true;
+6 -6
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
··· 774 774 775 775 for (i = 0; i < (txpktbuf_bndy - 1); i++) { 776 776 status = _rtl88ee_llt_write(hw, i, i + 1); 777 - if (true != status) 777 + if (!status) 778 778 return status; 779 779 } 780 780 781 781 status = _rtl88ee_llt_write(hw, (txpktbuf_bndy - 1), 0xFF); 782 - if (true != status) 782 + if (!status) 783 783 return status; 784 784 785 785 for (i = txpktbuf_bndy; i < maxpage; i++) { 786 786 status = _rtl88ee_llt_write(hw, i, (i + 1)); 787 - if (true != status) 787 + if (!status) 788 788 return status; 789 789 } 790 790 791 791 status = _rtl88ee_llt_write(hw, maxpage, txpktbuf_bndy); 792 - if (true != status) 792 + if (!status) 793 793 return status; 794 794 795 795 return true; ··· 868 868 rtl_write_byte(rtlpriv, MSR, 0x00); 869 869 870 870 if (!rtlhal->mac_func_enable) { 871 - if (_rtl88ee_llt_table_init(hw) == false) { 871 + if (!_rtl88ee_llt_table_init(hw)) { 872 872 rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, 873 873 "LLT table init fail\n"); 874 874 return false; ··· 1067 1067 } 1068 1068 1069 1069 rtstatus = _rtl88ee_init_mac(hw); 1070 - if (rtstatus != true) { 1070 + if (!rtstatus) { 1071 1071 pr_info("Init MAC failed\n"); 1072 1072 err = 1; 1073 1073 goto exit;
+7 -13
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
··· 16 16 static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw, 17 17 enum radio_path rfpath, u32 offset, 18 18 u32 data); 19 - static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask); 19 + static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask) 20 + { 21 + u32 i = ffs(bitmask); 22 + 23 + return i ? i - 1 : 32; 24 + } 20 25 static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw); 21 26 static bool _rtl88e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); 22 27 static bool phy_config_bb_with_headerfile(struct ieee80211_hw *hw, ··· 211 206 rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, 212 207 "RFW-%d Addr[0x%x]=0x%x\n", 213 208 rfpath, pphyreg->rf3wire_offset, data_and_addr); 214 - } 215 - 216 - static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask) 217 - { 218 - u32 i; 219 - 220 - for (i = 0; i <= 31; i++) { 221 - if (((bitmask >> i) & 0x1) == 1) 222 - break; 223 - } 224 - return i; 225 209 } 226 210 227 211 bool rtl88e_phy_mac_config(struct ieee80211_hw *hw) ··· 1575 1581 u32 i; 1576 1582 1577 1583 pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4; 1578 - if (false == is2t) { 1584 + if (!is2t) { 1579 1585 pathon = 0x0bdb25a0; 1580 1586 rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0); 1581 1587 } else {
+2 -2
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
··· 732 732 { 733 733 __le32 *pdesc = (__le32 *)pdesc8; 734 734 735 - if (istx == true) { 735 + if (istx) { 736 736 switch (desc_name) { 737 737 case HW_DESC_OWN: 738 738 set_tx_desc_own(pdesc, 1); ··· 773 773 u32 ret = 0; 774 774 __le32 *pdesc = (__le32 *)pdesc8; 775 775 776 - if (istx == true) { 776 + if (istx) { 777 777 switch (desc_name) { 778 778 case HW_DESC_OWN: 779 779 ret = get_tx_desc_own(pdesc);
+3 -7
drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
··· 145 145 146 146 u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask) 147 147 { 148 - u32 i; 148 + u32 i = ffs(bitmask); 149 149 150 - for (i = 0; i <= 31; i++) { 151 - if (((bitmask >> i) & 0x1) == 1) 152 - break; 153 - } 154 - return i; 150 + return i ? i - 1 : 32; 155 151 } 156 152 EXPORT_SYMBOL(_rtl92c_phy_calculate_bit_shift); 157 153 ··· 1099 1103 u32 i; 1100 1104 1101 1105 pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4; 1102 - if (false == is2t) { 1106 + if (!is2t) { 1103 1107 pathon = 0x0bdb25a0; 1104 1108 rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0); 1105 1109 } else {
+4 -4
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
··· 613 613 614 614 for (i = 0; i < (txpktbuf_bndy - 1); i++) { 615 615 status = _rtl92ce_llt_write(hw, i, i + 1); 616 - if (true != status) 616 + if (!status) 617 617 return status; 618 618 } 619 619 620 620 status = _rtl92ce_llt_write(hw, (txpktbuf_bndy - 1), 0xFF); 621 - if (true != status) 621 + if (!status) 622 622 return status; 623 623 624 624 for (i = txpktbuf_bndy; i < maxpage; i++) { 625 625 status = _rtl92ce_llt_write(hw, i, (i + 1)); 626 - if (true != status) 626 + if (!status) 627 627 return status; 628 628 } 629 629 630 630 status = _rtl92ce_llt_write(hw, maxpage, txpktbuf_bndy); 631 - if (true != status) 631 + if (!status) 632 632 return status; 633 633 634 634 return true;
+1 -1
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
··· 828 828 ? WMM_CHIP_B_TX_PAGE_BOUNDARY 829 829 : WMM_CHIP_A_TX_PAGE_BOUNDARY; 830 830 } 831 - if (false == rtl92c_init_llt_table(hw, boundary)) { 831 + if (!rtl92c_init_llt_table(hw, boundary)) { 832 832 pr_err("Failed to init LLT Table!\n"); 833 833 return -EINVAL; 834 834 }
+4 -4
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
··· 158 158 159 159 for (i = 0; i < (boundary - 1); i++) { 160 160 rst = rtl92c_llt_write(hw, i , i + 1); 161 - if (true != rst) { 161 + if (!rst) { 162 162 pr_err("===> %s #1 fail\n", __func__); 163 163 return rst; 164 164 } 165 165 } 166 166 /* end of list */ 167 167 rst = rtl92c_llt_write(hw, (boundary - 1), 0xFF); 168 - if (true != rst) { 168 + if (!rst) { 169 169 pr_err("===> %s #2 fail\n", __func__); 170 170 return rst; 171 171 } ··· 176 176 */ 177 177 for (i = boundary; i < LLT_LAST_ENTRY_OF_TX_PKT_BUFFER; i++) { 178 178 rst = rtl92c_llt_write(hw, i, (i + 1)); 179 - if (true != rst) { 179 + if (!rst) { 180 180 pr_err("===> %s #3 fail\n", __func__); 181 181 return rst; 182 182 } 183 183 } 184 184 /* Let last entry point to the start entry of ring buffer */ 185 185 rst = rtl92c_llt_write(hw, LLT_LAST_ENTRY_OF_TX_PKT_BUFFER, boundary); 186 - if (true != rst) { 186 + if (!rst) { 187 187 pr_err("===> %s #4 fail\n", __func__); 188 188 return rst; 189 189 }
+4 -4
drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
··· 563 563 /* 18. LLT_table_init(Adapter); */ 564 564 for (i = 0; i < (txpktbuf_bndy - 1); i++) { 565 565 status = _rtl92de_llt_write(hw, i, i + 1); 566 - if (true != status) 566 + if (!status) 567 567 return status; 568 568 } 569 569 570 570 /* end of list */ 571 571 status = _rtl92de_llt_write(hw, (txpktbuf_bndy - 1), 0xFF); 572 - if (true != status) 572 + if (!status) 573 573 return status; 574 574 575 575 /* Make the other pages as ring buffer */ ··· 578 578 /* Otherwise used as local loopback buffer. */ 579 579 for (i = txpktbuf_bndy; i < maxpage; i++) { 580 580 status = _rtl92de_llt_write(hw, i, (i + 1)); 581 - if (true != status) 581 + if (!status) 582 582 return status; 583 583 } 584 584 585 585 /* Let last entry point to the start entry of ring buffer */ 586 586 status = _rtl92de_llt_write(hw, maxpage, txpktbuf_bndy); 587 - if (true != status) 587 + if (!status) 588 588 return status; 589 589 590 590 return true;
+2 -7
drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
··· 162 162 163 163 static u32 _rtl92d_phy_calculate_bit_shift(u32 bitmask) 164 164 { 165 - u32 i; 165 + u32 i = ffs(bitmask); 166 166 167 - for (i = 0; i <= 31; i++) { 168 - if (((bitmask >> i) & 0x1) == 1) 169 - break; 170 - } 171 - 172 - return i; 167 + return i ? i - 1 : 32; 173 168 } 174 169 175 170 u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+3 -3
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
··· 718 718 (rtldm->cfo_ave_pre - cfo_ave) : 719 719 (cfo_ave - rtldm->cfo_ave_pre); 720 720 721 - if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) { 722 - rtldm->large_cfo_hit = 1; 721 + if (cfo_ave_diff > 20 && !rtldm->large_cfo_hit) { 722 + rtldm->large_cfo_hit = true; 723 723 return; 724 724 } 725 - rtldm->large_cfo_hit = 0; 725 + rtldm->large_cfo_hit = false; 726 726 727 727 rtldm->cfo_ave_pre = cfo_ave; 728 728
+1 -1
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
··· 794 794 rtl_write_word(rtlpriv, REG_CR, 0x2ff); 795 795 796 796 if (!rtlhal->mac_func_enable) { 797 - if (_rtl92ee_llt_table_init(hw) == false) { 797 + if (!_rtl92ee_llt_table_init(hw)) { 798 798 rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, 799 799 "LLT table init fail\n"); 800 800 return false;
+2 -6
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
··· 203 203 204 204 static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask) 205 205 { 206 - u32 i; 206 + u32 i = ffs(bitmask); 207 207 208 - for (i = 0; i <= 31; i++) { 209 - if (((bitmask >> i) & 0x1) == 1) 210 - break; 211 - } 212 - return i; 208 + return i ? i - 1 : 32; 213 209 } 214 210 215 211 bool rtl92ee_phy_mac_config(struct ieee80211_hw *hw)
+2 -7
drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
··· 16 16 17 17 static u32 _rtl92s_phy_calculate_bit_shift(u32 bitmask) 18 18 { 19 - u32 i; 19 + u32 i = ffs(bitmask); 20 20 21 - for (i = 0; i <= 31; i++) { 22 - if (((bitmask >> i) & 0x1) == 1) 23 - break; 24 - } 25 - 26 - return i; 21 + return i ? i - 1 : 32; 27 22 } 28 23 29 24 u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+4 -4
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
··· 188 188 rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n"); 189 189 rtstatus = _rtl8723e_phy_config_bb_with_headerfile(hw, 190 190 BASEBAND_CONFIG_PHY_REG); 191 - if (rtstatus != true) { 191 + if (!rtstatus) { 192 192 pr_err("Write BB Reg Fail!!\n"); 193 193 return false; 194 194 } ··· 202 202 rtstatus = _rtl8723e_phy_config_bb_with_pgheaderfile(hw, 203 203 BASEBAND_CONFIG_PHY_REG); 204 204 } 205 - if (rtstatus != true) { 205 + if (!rtstatus) { 206 206 pr_err("BB_PG Reg Fail!!\n"); 207 207 return false; 208 208 } 209 209 rtstatus = 210 210 _rtl8723e_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB); 211 - if (rtstatus != true) { 211 + if (!rtstatus) { 212 212 pr_err("AGC Table Fail\n"); 213 213 return false; 214 214 } ··· 622 622 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 623 623 u8 cckpowerlevel[2], ofdmpowerlevel[2]; 624 624 625 - if (rtlefuse->txpwr_fromeprom == false) 625 + if (!rtlefuse->txpwr_fromeprom) 626 626 return; 627 627 _rtl8723e_get_txpower_index(hw, channel, 628 628 &cckpowerlevel[0], &ofdmpowerlevel[0]);
+2 -2
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
··· 49 49 if (rtlefuse->eeprom_regulatory != 0) 50 50 turbo_scanoff = true; 51 51 52 - if (mac->act_scanning == true) { 52 + if (mac->act_scanning) { 53 53 tx_agc[RF90_PATH_A] = 0x3f3f3f3f; 54 54 tx_agc[RF90_PATH_B] = 0x3f3f3f3f; 55 55 ··· 479 479 break; 480 480 } 481 481 482 - if (rtstatus != true) { 482 + if (!rtstatus) { 483 483 rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, 484 484 "Radio[%d] Fail!!\n", rfpath); 485 485 return false;
+2 -2
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
··· 589 589 { 590 590 __le32 *pdesc = (__le32 *)pdesc8; 591 591 592 - if (istx == true) { 592 + if (istx) { 593 593 switch (desc_name) { 594 594 case HW_DESC_OWN: 595 595 set_tx_desc_own(pdesc, 1); ··· 630 630 u32 ret = 0; 631 631 __le32 *pdesc = (__le32 *)pdesc8; 632 632 633 - if (istx == true) { 633 + if (istx) { 634 634 switch (desc_name) { 635 635 case HW_DESC_OWN: 636 636 ret = get_tx_desc_own(pdesc);
+3 -3
drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
··· 1152 1152 (rtldm->cfo_ave_pre - cfo_ave) : 1153 1153 (cfo_ave - rtldm->cfo_ave_pre); 1154 1154 1155 - if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) { 1156 - rtldm->large_cfo_hit = 1; 1155 + if (cfo_ave_diff > 20 && !rtldm->large_cfo_hit) { 1156 + rtldm->large_cfo_hit = true; 1157 1157 return; 1158 1158 } else 1159 - rtldm->large_cfo_hit = 0; 1159 + rtldm->large_cfo_hit = false; 1160 1160 1161 1161 rtldm->cfo_ave_pre = cfo_ave; 1162 1162
+1 -1
drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
··· 858 858 rtl_write_word(rtlpriv, REG_CR, 0x2ff); 859 859 860 860 if (!rtlhal->mac_func_enable) { 861 - if (_rtl8723be_llt_table_init(hw) == false) 861 + if (!_rtl8723be_llt_table_init(hw)) 862 862 return false; 863 863 } 864 864
+2 -6
drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
··· 53 53 54 54 u32 rtl8723_phy_calculate_bit_shift(u32 bitmask) 55 55 { 56 - u32 i; 56 + u32 i = ffs(bitmask); 57 57 58 - for (i = 0; i <= 31; i++) { 59 - if (((bitmask >> i) & 0x1) == 1) 60 - break; 61 - } 62 - return i; 58 + return i ? i - 1 : 32; 63 59 } 64 60 EXPORT_SYMBOL_GPL(rtl8723_phy_calculate_bit_shift); 65 61
+3 -3
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
··· 2677 2677 (rtldm->cfo_ave_pre - cfo_ave) : 2678 2678 (cfo_ave - rtldm->cfo_ave_pre); 2679 2679 2680 - if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) { 2680 + if (cfo_ave_diff > 20 && !rtldm->large_cfo_hit) { 2681 2681 rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, 2682 2682 "first large CFO hit\n"); 2683 - rtldm->large_cfo_hit = 1; 2683 + rtldm->large_cfo_hit = true; 2684 2684 return; 2685 2685 } else 2686 - rtldm->large_cfo_hit = 0; 2686 + rtldm->large_cfo_hit = false; 2687 2687 2688 2688 rtldm->cfo_ave_pre = cfo_ave; 2689 2689
+1 -1
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
··· 1894 1894 } 1895 1895 1896 1896 rtstatus = _rtl8821ae_init_mac(hw); 1897 - if (rtstatus != true) { 1897 + if (!rtstatus) { 1898 1898 pr_err("Init MAC failed\n"); 1899 1899 err = 1; 1900 1900 return err;
+9 -15
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
··· 27 27 static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw, 28 28 enum radio_path rfpath, u32 offset, 29 29 u32 data); 30 - static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask); 30 + static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask) 31 + { 32 + u32 i = ffs(bitmask); 33 + 34 + return i ? i - 1 : 32; 35 + } 31 36 static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw); 32 37 /*static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);*/ 33 38 static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); ··· 275 270 rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, 276 271 "RFW-%d Addr[0x%x]=0x%x\n", 277 272 rfpath, pphyreg->rf3wire_offset, data_and_addr); 278 - } 279 - 280 - static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask) 281 - { 282 - u32 i; 283 - 284 - for (i = 0; i <= 31; i++) { 285 - if (((bitmask >> i) & 0x1) == 1) 286 - break; 287 - } 288 - return i; 289 273 } 290 274 291 275 bool rtl8821ae_phy_mac_config(struct ieee80211_hw *hw) ··· 1807 1813 1808 1814 rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw, 1809 1815 BASEBAND_CONFIG_PHY_REG); 1810 - if (rtstatus != true) { 1816 + if (!rtstatus) { 1811 1817 pr_err("Write BB Reg Fail!!\n"); 1812 1818 return false; 1813 1819 } ··· 1816 1822 rtstatus = _rtl8821ae_phy_config_bb_with_pgheaderfile(hw, 1817 1823 BASEBAND_CONFIG_PHY_REG); 1818 1824 } 1819 - if (rtstatus != true) { 1825 + if (!rtstatus) { 1820 1826 pr_err("BB_PG Reg Fail!!\n"); 1821 1827 return false; 1822 1828 } ··· 1830 1836 rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw, 1831 1837 BASEBAND_CONFIG_AGC_TAB); 1832 1838 1833 - if (rtstatus != true) { 1839 + if (!rtstatus) { 1834 1840 pr_err("AGC Table Fail\n"); 1835 1841 return false; 1836 1842 }
+5
drivers/net/wireless/realtek/rtw88/main.c
··· 1472 1472 ret = rtw_load_firmware(rtwdev, RTW_WOWLAN_FW); 1473 1473 if (ret) { 1474 1474 rtw_warn(rtwdev, "no wow firmware loaded\n"); 1475 + wait_for_completion(&rtwdev->fw.completion); 1476 + if (rtwdev->fw.firmware) 1477 + release_firmware(rtwdev->fw.firmware); 1475 1478 return ret; 1476 1479 } 1477 1480 } ··· 1488 1485 struct rtw_fw_state *wow_fw = &rtwdev->wow_fw; 1489 1486 struct rtw_rsvd_page *rsvd_pkt, *tmp; 1490 1487 unsigned long flags; 1488 + 1489 + rtw_wait_firmware_completion(rtwdev); 1491 1490 1492 1491 if (fw->firmware) 1493 1492 release_firmware(fw->firmware);
+3 -12
drivers/net/wireless/realtek/rtw88/rtw8822c.c
··· 154 154 } 155 155 } 156 156 157 - static void swap_u32(u32 *v1, u32 *v2) 158 - { 159 - u32 tmp; 160 - 161 - tmp = *v1; 162 - *v1 = *v2; 163 - *v2 = tmp; 164 - } 165 - 166 157 static void __rtw8822c_dac_iq_sort(struct rtw_dev *rtwdev, u32 *v1, u32 *v2) 167 158 { 168 159 if (*v1 >= 0x200 && *v2 >= 0x200) { 169 160 if (*v1 > *v2) 170 - swap_u32(v1, v2); 161 + swap(*v1, *v2); 171 162 } else if (*v1 < 0x200 && *v2 < 0x200) { 172 163 if (*v1 > *v2) 173 - swap_u32(v1, v2); 164 + swap(*v1, *v2); 174 165 } else if (*v1 < 0x200 && *v2 >= 0x200) { 175 - swap_u32(v1, v2); 166 + swap(*v1, *v2); 176 167 } 177 168 } 178 169
-7
drivers/net/wireless/ti/wlcore/debugfs.c
··· 122 122 pm_runtime_put_autosuspend(wl->dev); 123 123 } 124 124 125 - 126 - static inline void no_write_handler(struct wl1271 *wl, 127 - unsigned long value, 128 - unsigned long param) 129 - { 130 - } 131 - 132 125 #define WL12XX_CONF_DEBUGFS(param, conf_sub_struct, \ 133 126 min_val, max_val, write_handler_locked, \ 134 127 write_handler_arg) \
-1
drivers/net/wireless/ti/wlcore/main.c
··· 30 30 #include "sysfs.h" 31 31 32 32 #define WL1271_BOOT_RETRIES 3 33 - #define WL1271_SUSPEND_SLEEP 100 34 33 #define WL1271_WAKEUP_TIMEOUT 500 35 34 36 35 static char *fwlog_param;
+1 -5
drivers/net/wireless/zydas/zd1201.c
··· 1652 1652 struct iw_request_info *info, struct iw_param *rrq, char *extra) 1653 1653 { 1654 1654 struct zd1201 *zd = netdev_priv(dev); 1655 - int err; 1656 1655 1657 1656 if (!zd->ap) 1658 1657 return -EOPNOTSUPP; 1659 1658 1660 - err = zd1201_setconfig16(zd, ZD1201_RID_CNFMAXASSOCSTATIONS, rrq->value); 1661 - if (err) 1662 - return err; 1663 - return 0; 1659 + return zd1201_setconfig16(zd, ZD1201_RID_CNFMAXASSOCSTATIONS, rrq->value); 1664 1660 } 1665 1661 1666 1662 static int zd1201_get_maxassoc(struct net_device *dev,
+1 -6
drivers/ssb/pci.c
··· 1164 1164 int ssb_pci_init(struct ssb_bus *bus) 1165 1165 { 1166 1166 struct pci_dev *pdev; 1167 - int err; 1168 1167 1169 1168 if (bus->bustype != SSB_BUSTYPE_PCI) 1170 1169 return 0; 1171 1170 1172 1171 pdev = bus->host_pci; 1173 1172 mutex_init(&bus->sprom_mutex); 1174 - err = device_create_file(&pdev->dev, &dev_attr_ssb_sprom); 1175 - if (err) 1176 - goto out; 1177 1173 1178 - out: 1179 - return err; 1174 + return device_create_file(&pdev->dev, &dev_attr_ssb_sprom); 1180 1175 }