Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'wireless-next-2024-12-19' of git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next

Kalle Valo says:

====================
wireless-next patches for v6.14

Multi-Link Operation implementation continues, both in stack and in
drivers. Otherwise it has been relatively quiet.

Major changes:

cfg80211/mac80211
- define wiphy guard
- get TX power per link
- EHT 320 MHz channel support for mesh

ath11k
- QCA6698AQ support

ath9k
- RX inactivity detection

rtl8xxxu
- add more USB device IDs

rtw88
- add more USB device IDs
- enable USB RX aggregation and USB 3 to improve performance

rtw89
- PowerSave flow for Multi-Link Operation

* tag 'wireless-next-2024-12-19' of git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next: (121 commits)
wifi: wlcore: sysfs: constify 'struct bin_attribute'
wifi: brcmfmac: clarify unmodifiable headroom log message
wifi: brcmfmac: add missing header include for brcmf_dbg
wifi: brcmsmac: add gain range check to wlc_phy_iqcal_gainparams_nphy()
wifi: qtnfmac: fix spelling error in core.h
wifi: rtw89: phy: add dummy C2H event handler for report of TAS power
wifi: rtw89: 8851b: rfk: remove unnecessary assignment of return value of _dpk_dgain_read()
wifi: rtw89: 8852c: rfk: refine target channel calculation in _rx_dck_channel_calc()
wifi: rtlwifi: pci: wait for firmware loading before releasing memory
wifi: rtlwifi: fix memory leaks and invalid access at probe error path
wifi: rtlwifi: destroy workqueue at rtl_deinit_core
wifi: rtlwifi: remove unused check_buddy_priv
wifi: rtw89: 8922a: update format of RFK pre-notify H2C command v2
wifi: rtw89: regd: update regulatory map to R68-R51
wifi: rtw89: 8852c: disable ER SU when 4x HE-LTF and 0.8 GI capability differ
wifi: rtw89: disable firmware training HE GI and LTF
wifi: rtw89: ps: update data for firmware and settings for hardware before/after PS
wifi: rtw89: ps: refactor channel info to firmware before entering PS
wifi: rtw89: ps: refactor PS flow to support MLO
wifi: mwifiex: decrease timeout waiting for host sleep from 10s to 5s
...
====================

Link: https://patch.msgid.link/20241219185709.774EDC4CECE@smtp.kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+6347 -1645
+132
drivers/net/wireless/ath/ath11k/core.c
··· 123 123 .tx_ring_size = DP_TCL_DATA_RING_SIZE, 124 124 .smp2p_wow_exit = false, 125 125 .support_dual_stations = false, 126 + .pdev_suspend = false, 126 127 }, 127 128 { 128 129 .hw_rev = ATH11K_HW_IPQ6018_HW10, ··· 208 207 .smp2p_wow_exit = false, 209 208 .support_fw_mac_sequence = false, 210 209 .support_dual_stations = false, 210 + .pdev_suspend = false, 211 211 }, 212 212 { 213 213 .name = "qca6390 hw2.0", ··· 298 296 .smp2p_wow_exit = false, 299 297 .support_fw_mac_sequence = true, 300 298 .support_dual_stations = true, 299 + .pdev_suspend = false, 301 300 }, 302 301 { 303 302 .name = "qcn9074 hw1.0", ··· 382 379 .smp2p_wow_exit = false, 383 380 .support_fw_mac_sequence = false, 384 381 .support_dual_stations = false, 382 + .pdev_suspend = false, 385 383 }, 386 384 { 387 385 .name = "wcn6855 hw2.0", ··· 472 468 .smp2p_wow_exit = false, 473 469 .support_fw_mac_sequence = true, 474 470 .support_dual_stations = true, 471 + .pdev_suspend = false, 475 472 }, 476 473 { 477 474 .name = "wcn6855 hw2.1", ··· 560 555 .smp2p_wow_exit = false, 561 556 .support_fw_mac_sequence = true, 562 557 .support_dual_stations = true, 558 + .pdev_suspend = false, 563 559 }, 564 560 { 565 561 .name = "wcn6750 hw1.0", ··· 643 637 .smp2p_wow_exit = true, 644 638 .support_fw_mac_sequence = true, 645 639 .support_dual_stations = false, 640 + .pdev_suspend = true, 646 641 }, 647 642 { 648 643 .hw_rev = ATH11K_HW_IPQ5018_HW10, ··· 726 719 .smp2p_wow_exit = false, 727 720 .support_fw_mac_sequence = false, 728 721 .support_dual_stations = false, 722 + .pdev_suspend = false, 729 723 }, 730 724 { 731 725 .name = "qca2066 hw2.1", ··· 816 808 .smp2p_wow_exit = false, 817 809 .support_fw_mac_sequence = true, 818 810 .support_dual_stations = true, 811 + }, 812 + { 813 + .name = "qca6698aq hw2.1", 814 + .hw_rev = ATH11K_HW_QCA6698AQ_HW21, 815 + .fw = { 816 + .dir = "QCA6698AQ/hw2.1", 817 + .board_size = 256 * 1024, 818 + .cal_offset = 128 * 1024, 819 + }, 820 + .max_radios = 3, 821 + .bdf_addr = 0x4B0C0000, 822 + .hw_ops = &wcn6855_ops, 823 + .ring_mask = &ath11k_hw_ring_mask_qca6390, 824 + .internal_sleep_clock = true, 825 + .regs = &wcn6855_regs, 826 + .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390, 827 + .host_ce_config = ath11k_host_ce_config_qca6390, 828 + .ce_count = 9, 829 + .target_ce_config = ath11k_target_ce_config_wlan_qca6390, 830 + .target_ce_count = 9, 831 + .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, 832 + .svc_to_ce_map_len = 14, 833 + .single_pdev_only = true, 834 + .rxdma1_enable = false, 835 + .num_rxdma_per_pdev = 2, 836 + .rx_mac_buf_ring = true, 837 + .vdev_start_delay = true, 838 + .htt_peer_map_v2 = false, 839 + 840 + .spectral = { 841 + .fft_sz = 0, 842 + .fft_pad_sz = 0, 843 + .summary_pad_sz = 0, 844 + .fft_hdr_len = 0, 845 + .max_fft_bins = 0, 846 + .fragment_160mhz = false, 847 + }, 848 + 849 + .interface_modes = BIT(NL80211_IFTYPE_STATION) | 850 + BIT(NL80211_IFTYPE_AP) | 851 + BIT(NL80211_IFTYPE_P2P_DEVICE) | 852 + BIT(NL80211_IFTYPE_P2P_CLIENT) | 853 + BIT(NL80211_IFTYPE_P2P_GO), 854 + .supports_monitor = false, 855 + .supports_shadow_regs = true, 856 + .idle_ps = true, 857 + .supports_sta_ps = true, 858 + .coldboot_cal_mm = false, 859 + .coldboot_cal_ftm = false, 860 + .cbcal_restart_fw = false, 861 + .fw_mem_mode = 0, 862 + .num_vdevs = 2 + 1, 863 + .num_peers = 512, 864 + .supports_suspend = true, 865 + .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855), 866 + .supports_regdb = true, 867 + .fix_l1ss = false, 868 + .credit_flow = true, 869 + .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, 870 + .hal_params = &ath11k_hw_hal_params_qca6390, 871 + .supports_dynamic_smps_6ghz = false, 872 + .alloc_cacheable_memory = false, 873 + .supports_rssi_stats = true, 874 + .fw_wmi_diag_event = true, 875 + .current_cc_support = true, 876 + .dbr_debug_support = false, 877 + .global_reset = true, 878 + .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855, 879 + .m3_fw_support = true, 880 + .fixed_bdf_addr = false, 881 + .fixed_mem_region = false, 882 + .static_window_map = false, 883 + .hybrid_bus_type = false, 884 + .fixed_fw_mem = false, 885 + .support_off_channel_tx = true, 886 + .supports_multi_bssid = true, 887 + 888 + .sram_dump = { 889 + .start = 0x01400000, 890 + .end = 0x0177ffff, 891 + }, 892 + 893 + .tcl_ring_retry = true, 894 + .tx_ring_size = DP_TCL_DATA_RING_SIZE, 895 + .smp2p_wow_exit = false, 896 + .support_fw_mac_sequence = true, 897 + .support_dual_stations = true, 898 + .pdev_suspend = false, 819 899 }, 820 900 }; 821 901 ··· 1765 1669 return ret; 1766 1670 } 1767 1671 1672 + static void ath11k_core_pdev_suspend_target(struct ath11k_base *ab) 1673 + { 1674 + struct ath11k *ar; 1675 + struct ath11k_pdev *pdev; 1676 + unsigned long time_left; 1677 + int ret; 1678 + int i; 1679 + 1680 + if (!ab->hw_params.pdev_suspend) 1681 + return; 1682 + 1683 + for (i = 0; i < ab->num_radios; i++) { 1684 + pdev = &ab->pdevs[i]; 1685 + ar = pdev->ar; 1686 + 1687 + reinit_completion(&ab->htc_suspend); 1688 + 1689 + ret = ath11k_wmi_pdev_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR, 1690 + pdev->pdev_id); 1691 + if (ret) { 1692 + ath11k_warn(ab, "could not suspend target :%d\n", ret); 1693 + /* pointless to try other pdevs */ 1694 + return; 1695 + } 1696 + 1697 + time_left = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ); 1698 + 1699 + if (!time_left) { 1700 + ath11k_warn(ab, "suspend timed out - target pause event never came\n"); 1701 + /* pointless to try other pdevs */ 1702 + return; 1703 + } 1704 + } 1705 + } 1706 + 1768 1707 static void ath11k_core_pdev_destroy(struct ath11k_base *ab) 1769 1708 { 1770 1709 ath11k_spectral_deinit(ab); 1771 1710 ath11k_thermal_unregister(ab); 1772 1711 ath11k_mac_unregister(ab); 1712 + ath11k_core_pdev_suspend_target(ab); 1773 1713 ath11k_hif_irq_disable(ab); 1774 1714 ath11k_dp_pdev_free(ab); 1775 1715 ath11k_debugfs_pdev_destroy(ab);
+1 -3
drivers/net/wireless/ath/ath11k/core.h
··· 148 148 ATH11K_HW_WCN6750_HW10, 149 149 ATH11K_HW_IPQ5018_HW10, 150 150 ATH11K_HW_QCA2066_HW21, 151 + ATH11K_HW_QCA6698AQ_HW21, 151 152 }; 152 153 153 154 enum ath11k_firmware_mode { ··· 341 340 * @ap_power_type: type of power (SP/LPI/VLP) 342 341 * @num_pwr_levels: number of power levels 343 342 * @reg_max: Array of maximum TX power (dBm) per PSD value 344 - * @ap_constraint_power: AP constraint power (dBm) 345 343 * @tpe: TPE values processed from TPE IE 346 344 * @chan_power_info: power info to send to firmware 347 345 */ ··· 350 350 enum wmi_reg_6ghz_ap_type ap_power_type; 351 351 u8 num_pwr_levels; 352 352 u8 reg_max[ATH11K_NUM_PWR_LEVELS]; 353 - u8 ap_constraint_power; 354 353 s8 tpe[ATH11K_NUM_PWR_LEVELS]; 355 354 struct ath11k_chan_power_info chan_power_info[ATH11K_NUM_PWR_LEVELS]; 356 355 }; ··· 369 370 struct ath11k *ar; 370 371 struct ieee80211_vif *vif; 371 372 372 - u16 tx_seq_no; 373 373 struct wmi_wmm_params_all_arg wmm_params; 374 374 struct list_head list; 375 375 union {
-1
drivers/net/wireless/ath/ath11k/dp.h
··· 165 165 struct ath11k_pdev_mon_stats rx_mon_stats; 166 166 /* lock for monitor data */ 167 167 spinlock_t mon_lock; 168 - struct sk_buff_head rx_status_q; 169 168 }; 170 169 171 170 struct ath11k_pdev_dp {
+7 -7
drivers/net/wireless/ath/ath11k/dp_rx.c
··· 3872 3872 ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3873 3873 &rbm); 3874 3874 if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && 3875 + rbm != HAL_RX_BUF_RBM_SW1_BM && 3875 3876 rbm != HAL_RX_BUF_RBM_SW3_BM) { 3876 3877 ab->soc_stats.invalid_rbm++; 3877 3878 ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); ··· 4691 4690 } 4692 4691 } 4693 4692 4694 - static u32 4695 - ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, 4696 - void *ring_entry, struct sk_buff **head_msdu, 4697 - struct sk_buff **tail_msdu, u32 *npackets, 4698 - u32 *ppdu_id) 4693 + /* clang stack usage explodes if this is inlined */ 4694 + static noinline_for_stack 4695 + u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, 4696 + void *ring_entry, struct sk_buff **head_msdu, 4697 + struct sk_buff **tail_msdu, u32 *npackets, 4698 + u32 *ppdu_id) 4699 4699 { 4700 4700 struct ath11k_pdev_dp *dp = &ar->dp; 4701 4701 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; ··· 5706 5704 { 5707 5705 struct ath11k_pdev_dp *dp = &ar->dp; 5708 5706 struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; 5709 - 5710 - skb_queue_head_init(&pmon->rx_status_q); 5711 5707 5712 5708 pmon->mon_ppdu_status = DP_PPDU_STATUS_START; 5713 5709
+3 -3
drivers/net/wireless/ath/ath11k/hal.h
··· 700 700 #define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7) 701 701 #define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8) 702 702 703 - /* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */ 703 + /* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */ 704 704 #define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8) 705 705 #define HAL_REO_CMD_UPD0_VLD BIT(9) 706 706 #define HAL_REO_CMD_UPD0_ALDC BIT(10) ··· 725 725 #define HAL_REO_CMD_UPD0_PN_VALID BIT(29) 726 726 #define HAL_REO_CMD_UPD0_PN BIT(30) 727 727 728 - /* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */ 728 + /* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* fields */ 729 729 #define HAL_REO_CMD_UPD1_VLD BIT(16) 730 730 #define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17) 731 731 #define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19) ··· 741 741 #define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30) 742 742 #define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31) 743 743 744 - /* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */ 744 + /* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* fields */ 745 745 #define HAL_REO_CMD_UPD2_SVLD BIT(10) 746 746 #define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11) 747 747 #define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
+2 -1
drivers/net/wireless/ath/ath11k/hal_rx.c
··· 372 372 373 373 ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, 374 374 wbm_desc->buf_addr_info.info1); 375 - if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) { 375 + if (ret_buf_mgr != HAL_RX_BUF_RBM_SW1_BM && 376 + ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) { 376 377 ab->soc_stats.invalid_rbm++; 377 378 return -EINVAL; 378 379 }
+1
drivers/net/wireless/ath/ath11k/hw.h
··· 227 227 bool smp2p_wow_exit; 228 228 bool support_fw_mac_sequence; 229 229 bool support_dual_stations; 230 + bool pdev_suspend; 230 231 }; 231 232 232 233 struct ath11k_hw_ops {
+3 -4
drivers/net/wireless/ath/ath11k/mac.c
··· 1697 1697 return; 1698 1698 } 1699 1699 1700 - arvif->tx_seq_no = 0x1000; 1701 - 1702 1700 arvif->aid = 0; 1703 1701 1704 1702 ether_addr_copy(arvif->bssid, info->bssid); ··· 2228 2230 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask); 2229 2231 2230 2232 /* In IPQ8074 platform, VHT mcs rate 10 and 11 is enabled by default. 2231 - * VHT mcs rate 10 and 11 is not suppoerted in 11ac standard. 2233 + * VHT mcs rate 10 and 11 is not supported in 11ac standard. 2232 2234 * so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode. 2233 2235 */ 2234 2236 arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK; ··· 6950 6952 /* Recalc txpower for remaining vdev */ 6951 6953 ath11k_mac_txpower_recalc(ar); 6952 6954 6953 - /* TODO: recal traffic pause state based on the available vdevs */ 6955 + /* TODO: recalc traffic pause state based on the available vdevs */ 6954 6956 6955 6957 mutex_unlock(&ar->conf_mutex); 6956 6958 } ··· 9354 9356 9355 9357 static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw, 9356 9358 struct ieee80211_vif *vif, 9359 + unsigned int link_id, 9357 9360 int *dbm) 9358 9361 { 9359 9362 struct ath11k *ar = hw->priv;
+1
drivers/net/wireless/ath/ath11k/mhi.c
··· 398 398 case ATH11K_HW_WCN6855_HW20: 399 399 case ATH11K_HW_WCN6855_HW21: 400 400 case ATH11K_HW_QCA2066_HW21: 401 + case ATH11K_HW_QCA6698AQ_HW21: 401 402 ath11k_mhi_config = &ath11k_mhi_config_qca6390; 402 403 break; 403 404 default:
+3
drivers/net/wireless/ath/ath11k/pci.c
··· 846 846 case 0x1019D0E1: 847 847 ab->hw_rev = ATH11K_HW_QCA2066_HW21; 848 848 break; 849 + case 0x001e60e1: 850 + ab->hw_rev = ATH11K_HW_QCA6698AQ_HW21; 851 + break; 849 852 default: 850 853 ab->hw_rev = ATH11K_HW_WCN6855_HW21; 851 854 }
+12 -1
drivers/net/wireless/ath/ath11k/pcic.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include "core.h" ··· 125 125 { .name = "DP", .num_vectors = 18, .base_vector = 14 }, 126 126 }, 127 127 .hw_rev = ATH11K_HW_QCA2066_HW21, 128 + }, 129 + { 130 + .total_vectors = 32, 131 + .total_users = 4, 132 + .users = (struct ath11k_msi_user[]) { 133 + { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 134 + { .name = "CE", .num_vectors = 10, .base_vector = 3 }, 135 + { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, 136 + { .name = "DP", .num_vectors = 18, .base_vector = 14 }, 137 + }, 138 + .hw_rev = ATH11K_HW_QCA6698AQ_HW21, 128 139 }, 129 140 }; 130 141
+6 -2
drivers/net/wireless/ath/ath11k/qmi.c
··· 1704 1704 }, 1705 1705 }; 1706 1706 1707 - static int ath11k_qmi_host_cap_send(struct ath11k_base *ab) 1707 + /* clang stack usage explodes if this is inlined */ 1708 + static noinline_for_stack 1709 + int ath11k_qmi_host_cap_send(struct ath11k_base *ab) 1708 1710 { 1709 1711 struct qmi_wlanfw_host_cap_req_msg_v01 req; 1710 1712 struct qmi_wlanfw_host_cap_resp_msg_v01 resp; ··· 2572 2570 m3_mem->size = 0; 2573 2571 } 2574 2572 2575 - static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab) 2573 + /* clang stack usage explodes if this is inlined */ 2574 + static noinline_for_stack 2575 + int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab) 2576 2576 { 2577 2577 struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; 2578 2578 struct qmi_wlanfw_m3_info_req_msg_v01 req;
+4 -2
drivers/net/wireless/ath/ath11k/wow.c
··· 148 148 * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... | 149 149 * +--+------------+----+-----------+---------------+-----------+ 150 150 */ 151 - static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new, 152 - const struct cfg80211_pkt_pattern *old) 151 + /* clang stack usage explodes if this is inlined */ 152 + static noinline_for_stack 153 + void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new, 154 + const struct cfg80211_pkt_pattern *old) 153 155 { 154 156 u8 hdr_8023_pattern[ETH_HLEN] = {}; 155 157 u8 hdr_8023_bit_mask[ETH_HLEN] = {};
+414 -72
drivers/net/wireless/ath/ath12k/core.c
··· 22 22 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644); 23 23 MODULE_PARM_DESC(debug_mask, "Debugging mask"); 24 24 25 + /* protected with ath12k_hw_group_mutex */ 26 + static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list); 27 + 28 + static DEFINE_MUTEX(ath12k_hw_group_mutex); 29 + 25 30 static int ath12k_core_rfkill_config(struct ath12k_base *ab) 26 31 { 27 32 struct ath12k *ar; ··· 84 79 ar = ab->pdevs[i].ar; 85 80 if (!ar) 86 81 continue; 82 + 83 + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); 84 + 87 85 ret = ath12k_mac_wait_tx_complete(ar); 88 86 if (ret) { 87 + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); 89 88 ath12k_warn(ab, "failed to wait tx complete: %d\n", ret); 90 89 return ret; 91 90 } 91 + 92 + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); 92 93 } 93 94 94 95 /* PM framework skips suspend_late/resume_early callbacks ··· 604 593 605 594 static void ath12k_core_stop(struct ath12k_base *ab) 606 595 { 596 + ath12k_core_stopped(ab); 597 + 607 598 if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags)) 608 599 ath12k_qmi_firmware_stop(ab); 609 600 610 601 ath12k_acpi_stop(ab); 611 602 603 + ath12k_dp_rx_pdev_reo_cleanup(ab); 612 604 ath12k_hif_stop(ab); 613 605 ath12k_wmi_detach(ab); 614 - ath12k_dp_rx_pdev_reo_cleanup(ab); 606 + ath12k_dp_free(ab); 615 607 616 608 /* De-Init of components as needed */ 617 609 } ··· 716 702 717 703 static void ath12k_core_soc_destroy(struct ath12k_base *ab) 718 704 { 719 - ath12k_dp_free(ab); 705 + ath12k_hif_power_down(ab, false); 720 706 ath12k_reg_free(ab); 721 707 ath12k_debugfs_soc_destroy(ab); 722 708 ath12k_qmi_deinit_service(ab); ··· 726 712 { 727 713 int ret; 728 714 729 - ret = ath12k_mac_register(ab); 730 - if (ret) { 731 - ath12k_err(ab, "failed register the radio with mac80211: %d\n", ret); 732 - return ret; 733 - } 734 - 735 715 ret = ath12k_dp_pdev_alloc(ab); 736 716 if (ret) { 737 717 ath12k_err(ab, "failed to attach DP pdev: %d\n", ret); 738 - goto err_mac_unregister; 718 + return ret; 739 719 } 740 720 741 721 return 0; 742 - 743 - err_mac_unregister: 744 - ath12k_mac_unregister(ab); 745 - 746 - return ret; 747 722 } 748 723 749 724 static void ath12k_core_pdev_destroy(struct ath12k_base *ab) 750 725 { 751 - ath12k_mac_unregister(ab); 752 - ath12k_hif_irq_disable(ab); 753 726 ath12k_dp_pdev_free(ab); 754 727 } 755 728 ··· 744 743 enum ath12k_firmware_mode mode) 745 744 { 746 745 int ret; 746 + 747 + lockdep_assert_held(&ab->core_lock); 747 748 748 749 ret = ath12k_wmi_attach(ab); 749 750 if (ret) { ··· 796 793 goto err_hif_stop; 797 794 } 798 795 799 - ret = ath12k_mac_allocate(ab); 800 - if (ret) { 801 - ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n", 802 - ret); 803 - goto err_hif_stop; 804 - } 805 - 806 796 ath12k_dp_cc_config(ab); 807 797 808 798 ret = ath12k_dp_rx_pdev_reo_setup(ab); 809 799 if (ret) { 810 800 ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret); 811 - goto err_mac_destroy; 801 + goto err_hif_stop; 812 802 } 813 803 814 804 ath12k_dp_hal_rx_desc_init(ab); ··· 840 844 /* ACPI is optional so continue in case of an error */ 841 845 ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret); 842 846 847 + if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) 848 + /* Indicate the core start in the appropriate group */ 849 + ath12k_core_started(ab); 850 + 843 851 return 0; 844 852 845 853 err_reo_cleanup: 846 854 ath12k_dp_rx_pdev_reo_cleanup(ab); 847 - err_mac_destroy: 848 - ath12k_mac_destroy(ab); 849 855 err_hif_stop: 850 856 ath12k_hif_stop(ab); 851 857 err_wmi_detach: 852 858 ath12k_wmi_detach(ab); 859 + return ret; 860 + } 861 + 862 + static void ath12k_core_device_cleanup(struct ath12k_base *ab) 863 + { 864 + mutex_lock(&ab->core_lock); 865 + 866 + ath12k_hif_irq_disable(ab); 867 + ath12k_core_pdev_destroy(ab); 868 + 869 + mutex_unlock(&ab->core_lock); 870 + } 871 + 872 + static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag) 873 + { 874 + struct ath12k_base *ab; 875 + int i; 876 + 877 + lockdep_assert_held(&ag->mutex); 878 + 879 + clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags); 880 + 881 + ath12k_mac_unregister(ag); 882 + 883 + for (i = ag->num_devices - 1; i >= 0; i--) { 884 + ab = ag->ab[i]; 885 + if (!ab) 886 + continue; 887 + ath12k_core_device_cleanup(ab); 888 + } 889 + 890 + ath12k_mac_destroy(ag); 891 + } 892 + 893 + static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag) 894 + { 895 + struct ath12k_base *ab; 896 + int ret, i; 897 + 898 + lockdep_assert_held(&ag->mutex); 899 + 900 + if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags)) 901 + goto core_pdev_create; 902 + 903 + ret = ath12k_mac_allocate(ag); 904 + if (WARN_ON(ret)) 905 + return ret; 906 + 907 + ret = ath12k_mac_register(ag); 908 + if (WARN_ON(ret)) 909 + goto err_mac_destroy; 910 + 911 + set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags); 912 + 913 + core_pdev_create: 914 + for (i = 0; i < ag->num_devices; i++) { 915 + ab = ag->ab[i]; 916 + if (!ab) 917 + continue; 918 + 919 + mutex_lock(&ab->core_lock); 920 + 921 + ret = ath12k_core_pdev_create(ab); 922 + if (ret) { 923 + ath12k_err(ab, "failed to create pdev core %d\n", ret); 924 + mutex_unlock(&ab->core_lock); 925 + goto err; 926 + } 927 + 928 + ath12k_hif_irq_enable(ab); 929 + 930 + ret = ath12k_core_rfkill_config(ab); 931 + if (ret && ret != -EOPNOTSUPP) { 932 + mutex_unlock(&ab->core_lock); 933 + goto err; 934 + } 935 + 936 + mutex_unlock(&ab->core_lock); 937 + } 938 + 939 + return 0; 940 + 941 + err: 942 + ath12k_core_hw_group_stop(ag); 943 + return ret; 944 + 945 + err_mac_destroy: 946 + ath12k_mac_destroy(ag); 947 + 853 948 return ret; 854 949 } 855 950 ··· 961 874 return ret; 962 875 } 963 876 877 + static inline 878 + bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag) 879 + { 880 + lockdep_assert_held(&ag->mutex); 881 + 882 + return (ag->num_started == ag->num_devices); 883 + } 884 + 885 + static void ath12k_core_trigger_partner(struct ath12k_base *ab) 886 + { 887 + struct ath12k_hw_group *ag = ab->ag; 888 + struct ath12k_base *partner_ab; 889 + bool found = false; 890 + int i; 891 + 892 + for (i = 0; i < ag->num_devices; i++) { 893 + partner_ab = ag->ab[i]; 894 + if (!partner_ab) 895 + continue; 896 + 897 + if (found) 898 + ath12k_qmi_trigger_host_cap(partner_ab); 899 + 900 + found = (partner_ab == ab); 901 + } 902 + } 903 + 964 904 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab) 965 905 { 966 - int ret; 906 + struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab); 907 + int ret, i; 967 908 968 909 ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL); 969 910 if (ret) { ··· 1011 896 goto err_firmware_stop; 1012 897 } 1013 898 899 + mutex_lock(&ag->mutex); 1014 900 mutex_lock(&ab->core_lock); 901 + 1015 902 ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL); 1016 903 if (ret) { 1017 904 ath12k_err(ab, "failed to start core: %d\n", ret); 1018 905 goto err_dp_free; 1019 906 } 1020 907 1021 - ret = ath12k_core_pdev_create(ab); 1022 - if (ret) { 1023 - ath12k_err(ab, "failed to create pdev core: %d\n", ret); 1024 - goto err_core_stop; 1025 - } 1026 - ath12k_hif_irq_enable(ab); 1027 - 1028 - ret = ath12k_core_rfkill_config(ab); 1029 - if (ret && ret != -EOPNOTSUPP) { 1030 - ath12k_err(ab, "failed to config rfkill: %d\n", ret); 1031 - goto err_core_pdev_destroy; 1032 - } 1033 - 1034 908 mutex_unlock(&ab->core_lock); 909 + 910 + if (ath12k_core_hw_group_start_ready(ag)) { 911 + ret = ath12k_core_hw_group_start(ag); 912 + if (ret) { 913 + ath12k_warn(ab, "unable to start hw group\n"); 914 + goto err_core_stop; 915 + } 916 + ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id); 917 + } else { 918 + ath12k_core_trigger_partner(ab); 919 + } 920 + 921 + mutex_unlock(&ag->mutex); 1035 922 1036 923 return 0; 1037 924 1038 - err_core_pdev_destroy: 1039 - ath12k_core_pdev_destroy(ab); 1040 925 err_core_stop: 1041 - ath12k_core_stop(ab); 1042 - ath12k_mac_destroy(ab); 926 + for (i = ag->num_devices - 1; i >= 0; i--) { 927 + ab = ag->ab[i]; 928 + if (!ab) 929 + continue; 930 + 931 + mutex_lock(&ab->core_lock); 932 + ath12k_core_stop(ab); 933 + mutex_unlock(&ab->core_lock); 934 + } 935 + goto exit; 936 + 1043 937 err_dp_free: 1044 938 ath12k_dp_free(ab); 1045 939 mutex_unlock(&ab->core_lock); 1046 940 err_firmware_stop: 1047 941 ath12k_qmi_firmware_stop(ab); 1048 942 943 + exit: 944 + mutex_unlock(&ag->mutex); 1049 945 return ret; 1050 946 } 1051 947 ··· 1108 982 rfkill_radio_on = ab->rfkill_radio_on; 1109 983 spin_unlock_bh(&ab->base_lock); 1110 984 1111 - for (i = 0; i < ab->num_hw; i++) { 1112 - ah = ab->ah[i]; 985 + for (i = 0; i < ath12k_get_num_hw(ab); i++) { 986 + ah = ath12k_ab_to_ah(ab, i); 1113 987 if (!ah) 1114 988 continue; 1115 989 ··· 1160 1034 if (ab->is_reset) 1161 1035 set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags); 1162 1036 1163 - for (i = 0; i < ab->num_hw; i++) { 1164 - ah = ab->ah[i]; 1037 + for (i = 0; i < ath12k_get_num_hw(ab); i++) { 1038 + ah = ath12k_ab_to_ah(ab, i); 1165 1039 if (!ah || ah->state == ATH12K_HW_STATE_OFF) 1166 1040 continue; 1167 1041 ··· 1199 1073 struct ath12k *ar; 1200 1074 int i, j; 1201 1075 1202 - for (i = 0; i < ab->num_hw; i++) { 1203 - ah = ab->ah[i]; 1076 + for (i = 0; i < ath12k_get_num_hw(ab); i++) { 1077 + ah = ath12k_ab_to_ah(ab, i); 1204 1078 if (!ah || ah->state == ATH12K_HW_STATE_OFF) 1205 1079 continue; 1206 1080 ··· 1253 1127 } 1254 1128 1255 1129 if (ab->is_reset) { 1256 - for (i = 0; i < ab->num_hw; i++) { 1257 - ah = ab->ah[i]; 1130 + if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) { 1131 + atomic_dec(&ab->reset_count); 1132 + complete(&ab->reset_complete); 1133 + ab->is_reset = false; 1134 + atomic_set(&ab->fail_cont_count, 0); 1135 + ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n"); 1136 + } 1137 + 1138 + for (i = 0; i < ath12k_get_num_hw(ab); i++) { 1139 + ah = ath12k_ab_to_ah(ab, i); 1258 1140 ieee80211_restart_hw(ah->hw); 1259 1141 } 1260 1142 } ··· 1276 1142 int reset_count, fail_cont_count; 1277 1143 long time_left; 1278 1144 1279 - if (!(test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))) { 1145 + if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) { 1280 1146 ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags); 1281 1147 return; 1282 1148 } ··· 1375 1241 &ab->panic_nb); 1376 1242 } 1377 1243 1244 + static inline 1245 + bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag) 1246 + { 1247 + lockdep_assert_held(&ag->mutex); 1248 + 1249 + return (ag->num_probed == ag->num_devices); 1250 + } 1251 + 1252 + static struct ath12k_hw_group *ath12k_core_hw_group_alloc(u8 id, u8 max_devices) 1253 + { 1254 + struct ath12k_hw_group *ag; 1255 + 1256 + lockdep_assert_held(&ath12k_hw_group_mutex); 1257 + 1258 + ag = kzalloc(sizeof(*ag), GFP_KERNEL); 1259 + if (!ag) 1260 + return NULL; 1261 + 1262 + ag->id = id; 1263 + ag->num_devices = max_devices; 1264 + list_add(&ag->list, &ath12k_hw_group_list); 1265 + mutex_init(&ag->mutex); 1266 + 1267 + return ag; 1268 + } 1269 + 1270 + static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag) 1271 + { 1272 + mutex_lock(&ath12k_hw_group_mutex); 1273 + 1274 + list_del(&ag->list); 1275 + kfree(ag); 1276 + 1277 + mutex_unlock(&ath12k_hw_group_mutex); 1278 + } 1279 + 1280 + static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab) 1281 + { 1282 + u32 group_id = ATH12K_INVALID_GROUP_ID; 1283 + struct ath12k_hw_group *ag; 1284 + 1285 + lockdep_assert_held(&ath12k_hw_group_mutex); 1286 + 1287 + /* The grouping of multiple devices will be done based on device tree file. 1288 + * TODO: device tree file parsing to know about the devices involved in group. 1289 + * 1290 + * The platforms that do not have any valid group information would have each 1291 + * device to be part of its own invalid group. 1292 + * 1293 + * Currently, we are not parsing any device tree information and hence, grouping 1294 + * of multiple devices is not involved. Thus, single device is added to device 1295 + * group. 1296 + */ 1297 + ag = ath12k_core_hw_group_alloc(group_id, 1); 1298 + if (!ag) { 1299 + ath12k_warn(ab, "unable to create new hw group\n"); 1300 + return NULL; 1301 + } 1302 + 1303 + ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n"); 1304 + 1305 + ab->device_id = ag->num_probed++; 1306 + ag->ab[ab->device_id] = ab; 1307 + ab->ag = ag; 1308 + ag->mlo_capable = false; 1309 + 1310 + return ag; 1311 + } 1312 + 1313 + void ath12k_core_hw_group_unassign(struct ath12k_base *ab) 1314 + { 1315 + struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab); 1316 + u8 device_id = ab->device_id; 1317 + int num_probed; 1318 + 1319 + if (!ag) 1320 + return; 1321 + 1322 + mutex_lock(&ag->mutex); 1323 + 1324 + if (WARN_ON(device_id >= ag->num_devices)) { 1325 + mutex_unlock(&ag->mutex); 1326 + return; 1327 + } 1328 + 1329 + if (WARN_ON(ag->ab[device_id] != ab)) { 1330 + mutex_unlock(&ag->mutex); 1331 + return; 1332 + } 1333 + 1334 + ag->ab[device_id] = NULL; 1335 + ab->ag = NULL; 1336 + ab->device_id = ATH12K_INVALID_DEVICE_ID; 1337 + 1338 + if (ag->num_probed) 1339 + ag->num_probed--; 1340 + 1341 + num_probed = ag->num_probed; 1342 + 1343 + mutex_unlock(&ag->mutex); 1344 + 1345 + if (!num_probed) 1346 + ath12k_core_hw_group_free(ag); 1347 + } 1348 + 1349 + static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag) 1350 + { 1351 + struct ath12k_base *ab; 1352 + int i; 1353 + 1354 + if (WARN_ON(!ag)) 1355 + return; 1356 + 1357 + for (i = 0; i < ag->num_devices; i++) { 1358 + ab = ag->ab[i]; 1359 + if (!ab) 1360 + continue; 1361 + 1362 + ath12k_core_soc_destroy(ab); 1363 + } 1364 + } 1365 + 1366 + static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag) 1367 + { 1368 + struct ath12k_base *ab; 1369 + int i; 1370 + 1371 + if (!ag) 1372 + return; 1373 + 1374 + mutex_lock(&ag->mutex); 1375 + 1376 + ath12k_core_hw_group_stop(ag); 1377 + 1378 + for (i = 0; i < ag->num_devices; i++) { 1379 + ab = ag->ab[i]; 1380 + if (!ab) 1381 + continue; 1382 + 1383 + mutex_lock(&ab->core_lock); 1384 + ath12k_core_stop(ab); 1385 + mutex_unlock(&ab->core_lock); 1386 + } 1387 + 1388 + mutex_unlock(&ag->mutex); 1389 + } 1390 + 1391 + static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag) 1392 + { 1393 + struct ath12k_base *ab; 1394 + int i, ret; 1395 + 1396 + lockdep_assert_held(&ag->mutex); 1397 + 1398 + for (i = 0; i < ag->num_devices; i++) { 1399 + ab = ag->ab[i]; 1400 + if (!ab) 1401 + continue; 1402 + 1403 + mutex_lock(&ab->core_lock); 1404 + 1405 + ret = ath12k_core_soc_create(ab); 1406 + if (ret) { 1407 + mutex_unlock(&ab->core_lock); 1408 + ath12k_err(ab, "failed to create soc core: %d\n", ret); 1409 + return ret; 1410 + } 1411 + 1412 + mutex_unlock(&ab->core_lock); 1413 + } 1414 + 1415 + return 0; 1416 + } 1417 + 1418 + void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag) 1419 + { 1420 + lockdep_assert_held(&ag->mutex); 1421 + 1422 + /* If more than one devices are grouped, then inter MLO 1423 + * functionality can work still independent of whether internally 1424 + * each device supports single_chip_mlo or not. 1425 + * Only when there is one device, then it depends whether the 1426 + * device can support intra chip MLO or not 1427 + */ 1428 + if (ag->num_devices > 1) 1429 + ag->mlo_capable = true; 1430 + else 1431 + ag->mlo_capable = ag->ab[0]->single_chip_mlo_supp; 1432 + } 1433 + 1378 1434 int ath12k_core_init(struct ath12k_base *ab) 1379 1435 { 1436 + struct ath12k_hw_group *ag; 1380 1437 int ret; 1381 - 1382 - ret = ath12k_core_soc_create(ab); 1383 - if (ret) { 1384 - ath12k_err(ab, "failed to create soc core: %d\n", ret); 1385 - return ret; 1386 - } 1387 1438 1388 1439 ret = ath12k_core_panic_notifier_register(ab); 1389 1440 if (ret) 1390 1441 ath12k_warn(ab, "failed to register panic handler: %d\n", ret); 1391 1442 1443 + mutex_lock(&ath12k_hw_group_mutex); 1444 + 1445 + ag = ath12k_core_hw_group_assign(ab); 1446 + if (!ag) { 1447 + mutex_unlock(&ath12k_hw_group_mutex); 1448 + ath12k_warn(ab, "unable to get hw group\n"); 1449 + return -ENODEV; 1450 + } 1451 + 1452 + mutex_unlock(&ath12k_hw_group_mutex); 1453 + 1454 + mutex_lock(&ag->mutex); 1455 + 1456 + ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n", 1457 + ag->num_devices, ag->num_probed); 1458 + 1459 + if (ath12k_core_hw_group_create_ready(ag)) { 1460 + ret = ath12k_core_hw_group_create(ag); 1461 + if (ret) { 1462 + mutex_unlock(&ag->mutex); 1463 + ath12k_warn(ab, "unable to create hw group\n"); 1464 + goto err; 1465 + } 1466 + } 1467 + 1468 + mutex_unlock(&ag->mutex); 1469 + 1392 1470 return 0; 1471 + 1472 + err: 1473 + ath12k_core_hw_group_destroy(ab->ag); 1474 + ath12k_core_hw_group_unassign(ab); 1475 + return ret; 1393 1476 } 1394 1477 1395 1478 void ath12k_core_deinit(struct ath12k_base *ab) 1396 1479 { 1397 1480 ath12k_core_panic_notifier_unregister(ab); 1398 - 1399 - mutex_lock(&ab->core_lock); 1400 - 1401 - ath12k_core_pdev_destroy(ab); 1402 - ath12k_core_stop(ab); 1403 - 1404 - mutex_unlock(&ab->core_lock); 1405 - 1406 - ath12k_hif_power_down(ab, false); 1407 - ath12k_mac_destroy(ab); 1408 - ath12k_core_soc_destroy(ab); 1409 - ath12k_fw_unmap(ab); 1481 + ath12k_core_hw_group_cleanup(ab->ag); 1482 + ath12k_core_hw_group_destroy(ab->ag); 1483 + ath12k_core_hw_group_unassign(ab); 1410 1484 } 1411 1485 1412 1486 void ath12k_core_free(struct ath12k_base *ab) ··· 1664 1322 ab->dev = dev; 1665 1323 ab->hif.bus = bus; 1666 1324 ab->qmi.num_radios = U8_MAX; 1667 - ab->mlo_capable_flags = ATH12K_INTRA_DEVICE_MLO_SUPPORT; 1325 + ab->single_chip_mlo_supp = false; 1668 1326 1669 1327 /* Device index used to identify the devices in a group. 1670 1328 *
+105 -29
drivers/net/wireless/ath/ath12k/core.h
··· 63 63 #define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ) 64 64 #define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ) 65 65 66 + #define ATH12K_MAX_SOCS 3 67 + #define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_SOCS * MAX_RADIOS) 68 + #define ATH12K_INVALID_GROUP_ID 0xFF 69 + #define ATH12K_INVALID_DEVICE_ID 0xFF 70 + 71 + #define ATH12K_MAX_MLO_PEERS 256 72 + #define ATH12K_MLO_PEER_ID_INVALID 0xFFFF 73 + 66 74 enum ath12k_bdf_search { 67 75 ATH12K_BDF_SEARCH_DEFAULT, 68 76 ATH12K_BDF_SEARCH_BUS_AND_BOARD, ··· 123 115 dma_addr_t paddr_ext_desc; 124 116 u32 cipher; 125 117 u8 flags; 118 + u8 link_id; 126 119 }; 127 120 128 121 struct ath12k_skb_rxcb { ··· 217 208 ATH12K_SCAN_ABORTING, 218 209 }; 219 210 211 + enum ath12k_hw_group_flags { 212 + ATH12K_GROUP_FLAG_REGISTERED, 213 + }; 214 + 220 215 enum ath12k_dev_flags { 221 216 ATH12K_CAC_RUNNING, 222 217 ATH12K_FLAG_CRASH_FLUSH, ··· 233 220 ATH12K_FLAG_HTC_SUSPEND_COMPLETE, 234 221 ATH12K_FLAG_CE_IRQ_ENABLED, 235 222 ATH12K_FLAG_EXT_IRQ_ENABLED, 223 + ATH12K_FLAG_QMI_FW_READY_COMPLETE, 236 224 }; 237 225 238 226 struct ath12k_tx_conf { ··· 328 314 bool ps; 329 315 330 316 struct ath12k_link_vif deflink; 331 - struct ath12k_link_vif __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; 317 + struct ath12k_link_vif __rcu *link[ATH12K_NUM_MAX_LINKS]; 332 318 struct ath12k_vif_cache *cache[IEEE80211_MLD_MAX_NUM_LINKS]; 333 319 /* indicates bitmap of link vif created in FW */ 334 320 u16 links_map; 321 + u8 last_scan_link; 335 322 336 323 /* Must be last - ends in a flexible-array member. 337 324 * ··· 484 469 struct ath12k_link_vif *arvif; 485 470 struct ath12k_sta *ahsta; 486 471 472 + /* link address similar to ieee80211_link_sta */ 473 + u8 addr[ETH_ALEN]; 474 + 487 475 /* the following are protected by ar->data_lock */ 488 476 u32 changed; /* IEEE80211_RC_* */ 489 477 u32 bw; ··· 503 485 struct ath12k_rx_peer_stats *rx_stats; 504 486 struct ath12k_wbm_tx_stats *wbm_tx_stats; 505 487 u32 bw_prev; 488 + 489 + /* For now the assoc link will be considered primary */ 490 + bool is_assoc_link; 491 + 492 + /* for firmware use only */ 493 + u8 link_idx; 506 494 }; 507 495 508 496 struct ath12k_sta { 497 + struct ath12k_vif *ahvif; 509 498 enum hal_pn_type pn_type; 510 499 struct ath12k_link_sta deflink; 511 500 struct ath12k_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; 512 501 /* indicates bitmap of link sta created in FW */ 513 502 u16 links_map; 503 + u8 assoc_link_id; 504 + u16 ml_peer_id; 505 + u8 num_peer; 506 + 507 + enum ieee80211_sta_state state; 514 508 }; 515 509 516 510 #define ATH12K_MIN_5G_FREQ 4150 ··· 687 657 688 658 struct work_struct regd_update_work; 689 659 690 - struct work_struct wmi_mgmt_tx_work; 660 + struct wiphy_work wmi_mgmt_tx_work; 691 661 struct sk_buff_head wmi_mgmt_tx_queue; 692 662 693 663 struct ath12k_wow wow; ··· 718 688 719 689 struct ath12k_hw { 720 690 struct ieee80211_hw *hw; 691 + struct device *dev; 692 + 721 693 /* Protect the write operation of the hardware state ath12k_hw::state 722 694 * between hardware start<=>reconfigure<=>stop transitions. 723 695 */ ··· 729 697 bool use_6ghz_regd; 730 698 731 699 u8 num_radio; 700 + 701 + DECLARE_BITMAP(free_ml_peer_id_map, ATH12K_MAX_MLO_PEERS); 702 + 703 + /* protected by wiphy_lock() */ 704 + struct list_head ml_peers; 732 705 733 706 /* Keep last */ 734 707 struct ath12k radio[] __aligned(sizeof(void *)); ··· 821 784 struct ath12k_soc_dp_tx_err_stats tx_err; 822 785 }; 823 786 824 - /** 825 - * enum ath12k_link_capable_flags - link capable flags 826 - * 827 - * Single/Multi link capability information 828 - * 829 - * @ATH12K_INTRA_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all 830 - * the links (radios) present within a device. 831 - * @ATH12K_INTER_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all 832 - * the links (radios) present across the devices. 787 + /* Holds info on the group of devices that are registered as a single 788 + * wiphy, protected with struct ath12k_hw_group::mutex. 833 789 */ 834 - enum ath12k_link_capable_flags { 835 - ATH12K_INTRA_DEVICE_MLO_SUPPORT = BIT(0), 836 - ATH12K_INTER_DEVICE_MLO_SUPPORT = BIT(1), 790 + struct ath12k_hw_group { 791 + struct list_head list; 792 + u8 id; 793 + u8 num_devices; 794 + u8 num_probed; 795 + u8 num_started; 796 + unsigned long flags; 797 + struct ath12k_base *ab[ATH12K_MAX_SOCS]; 798 + 799 + /* protects access to this struct */ 800 + struct mutex mutex; 801 + 802 + /* Holds information of wiphy (hw) registration. 803 + * 804 + * In Multi/Single Link Operation case, all pdevs are registered as 805 + * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is 806 + * registered as separate wiphys. 807 + */ 808 + struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO]; 809 + u8 num_hw; 810 + bool mlo_capable; 837 811 }; 838 812 839 813 /* Master structure to hold the hw data which may be used in core module */ ··· 909 861 u8 fw_pdev_count; 910 862 911 863 struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS]; 912 - 913 - /* Holds information of wiphy (hw) registration. 914 - * 915 - * In Multi/Single Link Operation case, all pdevs are registered as 916 - * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is 917 - * registered as separate wiphys. 918 - */ 919 - struct ath12k_hw *ah[MAX_RADIOS]; 920 - u8 num_hw; 921 864 922 865 struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS]; 923 866 unsigned long long free_vdev_map; ··· 1003 964 1004 965 const struct hal_rx_ops *hal_rx_ops; 1005 966 1006 - /* mlo_capable_flags denotes the single/multi link operation 1007 - * capabilities of the Device. 1008 - * 1009 - * See enum ath12k_link_capable_flags 1010 - */ 1011 - u8 mlo_capable_flags; 967 + /* Denotes the whether MLO is possible within the chip */ 968 + bool single_chip_mlo_supp; 1012 969 1013 970 struct completion restart_completed; 1014 971 ··· 1026 991 #endif /* CONFIG_ACPI */ 1027 992 1028 993 struct notifier_block panic_nb; 994 + 995 + struct ath12k_hw_group *ag; 1029 996 1030 997 /* must be last */ 1031 998 u8 drv_priv[] __aligned(sizeof(void *)); ··· 1059 1022 int ath12k_core_resume(struct ath12k_base *ab); 1060 1023 int ath12k_core_suspend(struct ath12k_base *ab); 1061 1024 int ath12k_core_suspend_late(struct ath12k_base *ab); 1025 + void ath12k_core_hw_group_unassign(struct ath12k_base *ab); 1062 1026 1063 1027 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab, 1064 1028 const char *filename); 1065 1029 u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab); 1066 1030 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab); 1067 1031 u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab); 1032 + 1033 + void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag); 1068 1034 1069 1035 static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state) 1070 1036 { ··· 1169 1129 #define for_each_ar(ah, ar, index) \ 1170 1130 for ((index) = 0; ((index) < (ah)->num_radio && \ 1171 1131 ((ar) = &(ah)->radio[(index)])); (index)++) 1132 + 1133 + static inline struct ath12k_hw *ath12k_ab_to_ah(struct ath12k_base *ab, int idx) 1134 + { 1135 + return ab->ag->ah[idx]; 1136 + } 1137 + 1138 + static inline void ath12k_ab_set_ah(struct ath12k_base *ab, int idx, 1139 + struct ath12k_hw *ah) 1140 + { 1141 + ab->ag->ah[idx] = ah; 1142 + } 1143 + 1144 + static inline int ath12k_get_num_hw(struct ath12k_base *ab) 1145 + { 1146 + return ab->ag->num_hw; 1147 + } 1148 + 1149 + static inline struct ath12k_hw_group *ath12k_ab_to_ag(struct ath12k_base *ab) 1150 + { 1151 + return ab->ag; 1152 + } 1153 + 1154 + static inline void ath12k_core_started(struct ath12k_base *ab) 1155 + { 1156 + lockdep_assert_held(&ab->ag->mutex); 1157 + 1158 + ab->ag->num_started++; 1159 + } 1160 + 1161 + static inline void ath12k_core_stopped(struct ath12k_base *ab) 1162 + { 1163 + lockdep_assert_held(&ab->ag->mutex); 1164 + 1165 + ab->ag->num_started--; 1166 + } 1167 + 1172 1168 #endif /* _CORE_H_ */
+3 -3
drivers/net/wireless/ath/ath12k/debug.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/vmalloc.h> ··· 36 36 va_end(args); 37 37 } 38 38 39 - void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...) 39 + void __ath12k_warn(struct device *dev, const char *fmt, ...) 40 40 { 41 41 struct va_format vaf = { 42 42 .fmt = fmt, ··· 45 45 46 46 va_start(args, fmt); 47 47 vaf.va = &args; 48 - dev_warn_ratelimited(ab->dev, "%pV", &vaf); 48 + dev_warn_ratelimited(dev, "%pV", &vaf); 49 49 /* TODO: Trace the log */ 50 50 va_end(args); 51 51 }
+4 -1
drivers/net/wireless/ath/ath12k/debug.h
··· 31 31 32 32 __printf(2, 3) void ath12k_info(struct ath12k_base *ab, const char *fmt, ...); 33 33 __printf(2, 3) void ath12k_err(struct ath12k_base *ab, const char *fmt, ...); 34 - __printf(2, 3) void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...); 34 + __printf(2, 3) void __ath12k_warn(struct device *dev, const char *fmt, ...); 35 + 36 + #define ath12k_warn(ab, fmt, ...) __ath12k_warn((ab)->dev, fmt, ##__VA_ARGS__) 37 + #define ath12k_hw_warn(ah, fmt, ...) __ath12k_warn((ah)->dev, fmt, ##__VA_ARGS__) 35 38 36 39 extern unsigned int ath12k_debug_mask; 37 40
+674 -7
drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
··· 48 48 footer); 49 49 } 50 50 51 + static const char *ath12k_htt_ax_tx_rx_ru_size_to_str(u8 ru_size) 52 + { 53 + switch (ru_size) { 54 + case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26: 55 + return "26"; 56 + case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52: 57 + return "52"; 58 + case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106: 59 + return "106"; 60 + case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242: 61 + return "242"; 62 + case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484: 63 + return "484"; 64 + case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996: 65 + return "996"; 66 + case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2: 67 + return "996x2"; 68 + default: 69 + return "unknown"; 70 + } 71 + } 72 + 51 73 static const char *ath12k_htt_be_tx_rx_ru_size_to_str(u8 ru_size) 52 74 { 53 75 switch (ru_size) { ··· 108 86 default: 109 87 return "unknown"; 110 88 } 89 + } 90 + 91 + static const char* 92 + ath12k_tx_ru_size_to_str(enum ath12k_htt_stats_ru_type ru_type, u8 ru_size) 93 + { 94 + if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY) 95 + return ath12k_htt_ax_tx_rx_ru_size_to_str(ru_size); 96 + else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU) 97 + return ath12k_htt_be_tx_rx_ru_size_to_str(ru_size); 98 + else 99 + return "unknown"; 111 100 } 112 101 113 102 static void ··· 1595 1562 le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndp)); 1596 1563 len += print_array_to_buf_index(buf, len, "ac_mu_mimo_brpollX_tried = ", 1, 1597 1564 htt_stats_buf->ac_mu_mimo_brpoll, 1598 - ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS, "\n\n"); 1565 + ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS - 1, 1566 + "\n\n"); 1599 1567 1600 1568 stats_req->buf_len = len; 1601 1569 } ··· 1624 1590 le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndp)); 1625 1591 len += print_array_to_buf_index(buf, len, "ax_mu_mimo_brpollX_tried = ", 1, 1626 1592 htt_stats_buf->ax_mu_mimo_brpoll, 1627 - ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n"); 1593 + ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1, "\n"); 1628 1594 len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n", 1629 1595 le32_to_cpu(htt_stats_buf->ax_basic_trigger)); 1630 1596 len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_total_trigger = %u\n", ··· 2310 2276 len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_grp_size", 2311 2277 htt_stats_buf->ul_mumimo_grp_best_grp_size, 2312 2278 ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n"); 2313 - len += print_array_to_buf_index(buf, len, "ul_mumimo_grp_best_num_usrs = ", 1, 2314 - htt_stats_buf->ul_mumimo_grp_best_usrs, 2315 - ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n"); 2279 + len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_num_usrs = ", 2280 + htt_stats_buf->ul_mumimo_grp_best_usrs, 2281 + ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n"); 2316 2282 len += print_array_to_buf(buf, len, 2317 2283 "ul_mumimo_grp_tputs_observed (per bin = 300 mbps)", 2318 2284 htt_stats_buf->ul_mumimo_grp_tputs, ··· 2576 2542 stats_req->buf_len = len; 2577 2543 } 2578 2544 2545 + static void ath12k_htt_print_dlpager_entry(const struct ath12k_htt_pgs_info *pg_info, 2546 + int idx, char *str_buf) 2547 + { 2548 + u64 page_timestamp; 2549 + u16 index = 0; 2550 + 2551 + page_timestamp = ath12k_le32hilo_to_u64(pg_info->ts_msb, pg_info->ts_lsb); 2552 + 2553 + index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index, 2554 + "Index - %u ; Page Number - %u ; ", 2555 + idx, le32_to_cpu(pg_info->page_num)); 2556 + index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index, 2557 + "Num of pages - %u ; Timestamp - %lluus\n", 2558 + le32_to_cpu(pg_info->num_pgs), page_timestamp); 2559 + } 2560 + 2561 + static void 2562 + ath12k_htt_print_dlpager_stats_tlv(const void *tag_buf, u16 tag_len, 2563 + struct debug_htt_stats_req *stats_req) 2564 + { 2565 + const struct ath12k_htt_dl_pager_stats_tlv *stat_buf = tag_buf; 2566 + u32 len = stats_req->buf_len; 2567 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2568 + u32 dword_lock, dword_unlock; 2569 + int i; 2570 + u8 *buf = stats_req->buf; 2571 + u8 pg_locked; 2572 + u8 pg_unlock; 2573 + char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {0}; 2574 + 2575 + if (tag_len < sizeof(*stat_buf)) 2576 + return; 2577 + 2578 + dword_lock = le32_get_bits(stat_buf->info2, 2579 + ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2); 2580 + dword_unlock = le32_get_bits(stat_buf->info2, 2581 + ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2); 2582 + 2583 + pg_locked = ATH12K_HTT_STATS_PAGE_LOCKED; 2584 + pg_unlock = ATH12K_HTT_STATS_PAGE_UNLOCKED; 2585 + 2586 + len += scnprintf(buf + len, buf_len - len, "HTT_DLPAGER_STATS_TLV:\n"); 2587 + len += scnprintf(buf + len, buf_len - len, "ASYNC locked pages = %u\n", 2588 + le32_get_bits(stat_buf->info0, 2589 + ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0)); 2590 + len += scnprintf(buf + len, buf_len - len, "SYNC locked pages = %u\n", 2591 + le32_get_bits(stat_buf->info0, 2592 + ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0)); 2593 + len += scnprintf(buf + len, buf_len - len, "Total locked pages = %u\n", 2594 + le32_get_bits(stat_buf->info1, 2595 + ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1)); 2596 + len += scnprintf(buf + len, buf_len - len, "Total free pages = %u\n", 2597 + le32_get_bits(stat_buf->info1, 2598 + ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1)); 2599 + 2600 + len += scnprintf(buf + len, buf_len - len, "\nLOCKED PAGES HISTORY\n"); 2601 + len += scnprintf(buf + len, buf_len - len, "last_locked_page_idx = %u\n", 2602 + dword_lock ? dword_lock - 1 : (ATH12K_PAGER_MAX - 1)); 2603 + 2604 + for (i = 0; i < ATH12K_PAGER_MAX; i++) { 2605 + memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN); 2606 + ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_locked][i], 2607 + i, str_buf); 2608 + len += scnprintf(buf + len, buf_len - len, "%s", str_buf); 2609 + } 2610 + 2611 + len += scnprintf(buf + len, buf_len - len, "\nUNLOCKED PAGES HISTORY\n"); 2612 + len += scnprintf(buf + len, buf_len - len, "last_unlocked_page_idx = %u\n", 2613 + dword_unlock ? dword_unlock - 1 : ATH12K_PAGER_MAX - 1); 2614 + 2615 + for (i = 0; i < ATH12K_PAGER_MAX; i++) { 2616 + memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN); 2617 + ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_unlock][i], 2618 + i, str_buf); 2619 + len += scnprintf(buf + len, buf_len - len, "%s", str_buf); 2620 + } 2621 + 2622 + len += scnprintf(buf + len, buf_len - len, "\n"); 2623 + 2624 + stats_req->buf_len = len; 2625 + } 2626 + 2627 + static void 2628 + ath12k_htt_print_phy_stats_tlv(const void *tag_buf, u16 tag_len, 2629 + struct debug_htt_stats_req *stats_req) 2630 + { 2631 + const struct ath12k_htt_phy_stats_tlv *htt_stats_buf = tag_buf; 2632 + u32 len = stats_req->buf_len; 2633 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2634 + u8 *buf = stats_req->buf, i; 2635 + 2636 + if (tag_len < sizeof(*htt_stats_buf)) 2637 + return; 2638 + 2639 + len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n"); 2640 + for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++) 2641 + len += scnprintf(buf + len, buf_len - len, "bdf_nf_chain[%d] = %d\n", 2642 + i, a_sle32_to_cpu(htt_stats_buf->nf_chain[i])); 2643 + for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++) 2644 + len += scnprintf(buf + len, buf_len - len, "runtime_nf_chain[%d] = %d\n", 2645 + i, a_sle32_to_cpu(htt_stats_buf->runtime_nf_chain[i])); 2646 + len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u / %u (mins)\n", 2647 + le32_to_cpu(htt_stats_buf->false_radar_cnt), 2648 + le32_to_cpu(htt_stats_buf->fw_run_time)); 2649 + len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n", 2650 + le32_to_cpu(htt_stats_buf->radar_cs_cnt)); 2651 + len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n\n", 2652 + a_sle32_to_cpu(htt_stats_buf->ani_level)); 2653 + 2654 + stats_req->buf_len = len; 2655 + } 2656 + 2657 + static void 2658 + ath12k_htt_print_phy_counters_tlv(const void *tag_buf, u16 tag_len, 2659 + struct debug_htt_stats_req *stats_req) 2660 + { 2661 + const struct ath12k_htt_phy_counters_tlv *htt_stats_buf = tag_buf; 2662 + u32 len = stats_req->buf_len; 2663 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2664 + u8 *buf = stats_req->buf; 2665 + 2666 + if (tag_len < sizeof(*htt_stats_buf)) 2667 + return; 2668 + 2669 + len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n"); 2670 + len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n", 2671 + le32_to_cpu(htt_stats_buf->rx_ofdma_timing_err_cnt)); 2672 + len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n", 2673 + le32_to_cpu(htt_stats_buf->rx_cck_fail_cnt)); 2674 + len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n", 2675 + le32_to_cpu(htt_stats_buf->mactx_abort_cnt)); 2676 + len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n", 2677 + le32_to_cpu(htt_stats_buf->macrx_abort_cnt)); 2678 + len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n", 2679 + le32_to_cpu(htt_stats_buf->phytx_abort_cnt)); 2680 + len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n", 2681 + le32_to_cpu(htt_stats_buf->phyrx_abort_cnt)); 2682 + len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n", 2683 + le32_to_cpu(htt_stats_buf->phyrx_defer_abort_cnt)); 2684 + len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n", 2685 + le32_to_cpu(htt_stats_buf->rx_gain_adj_lstf_event_cnt)); 2686 + len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n", 2687 + le32_to_cpu(htt_stats_buf->rx_gain_adj_non_legacy_cnt)); 2688 + len += print_array_to_buf(buf, len, "rx_pkt_cnt", htt_stats_buf->rx_pkt_cnt, 2689 + ATH12K_HTT_MAX_RX_PKT_CNT, "\n"); 2690 + len += print_array_to_buf(buf, len, "rx_pkt_crc_pass_cnt", 2691 + htt_stats_buf->rx_pkt_crc_pass_cnt, 2692 + ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT, "\n"); 2693 + len += print_array_to_buf(buf, len, "per_blk_err_cnt", 2694 + htt_stats_buf->per_blk_err_cnt, 2695 + ATH12K_HTT_MAX_PER_BLK_ERR_CNT, "\n"); 2696 + len += print_array_to_buf(buf, len, "rx_ota_err_cnt", 2697 + htt_stats_buf->rx_ota_err_cnt, 2698 + ATH12K_HTT_MAX_RX_OTA_ERR_CNT, "\n\n"); 2699 + 2700 + stats_req->buf_len = len; 2701 + } 2702 + 2703 + static void 2704 + ath12k_htt_print_phy_reset_stats_tlv(const void *tag_buf, u16 tag_len, 2705 + struct debug_htt_stats_req *stats_req) 2706 + { 2707 + const struct ath12k_htt_phy_reset_stats_tlv *htt_stats_buf = tag_buf; 2708 + u32 len = stats_req->buf_len; 2709 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2710 + u8 *buf = stats_req->buf; 2711 + 2712 + if (tag_len < sizeof(*htt_stats_buf)) 2713 + return; 2714 + 2715 + len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_STATS_TLV:\n"); 2716 + len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n", 2717 + le32_to_cpu(htt_stats_buf->pdev_id)); 2718 + len += scnprintf(buf + len, buf_len - len, "chan_mhz = %u\n", 2719 + le32_to_cpu(htt_stats_buf->chan_mhz)); 2720 + len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq1 = %u\n", 2721 + le32_to_cpu(htt_stats_buf->chan_band_center_freq1)); 2722 + len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq2 = %u\n", 2723 + le32_to_cpu(htt_stats_buf->chan_band_center_freq2)); 2724 + len += scnprintf(buf + len, buf_len - len, "chan_phy_mode = %u\n", 2725 + le32_to_cpu(htt_stats_buf->chan_phy_mode)); 2726 + len += scnprintf(buf + len, buf_len - len, "chan_flags = 0x%0x\n", 2727 + le32_to_cpu(htt_stats_buf->chan_flags)); 2728 + len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n", 2729 + le32_to_cpu(htt_stats_buf->chan_num)); 2730 + len += scnprintf(buf + len, buf_len - len, "reset_cause = 0x%0x\n", 2731 + le32_to_cpu(htt_stats_buf->reset_cause)); 2732 + len += scnprintf(buf + len, buf_len - len, "prev_reset_cause = 0x%0x\n", 2733 + le32_to_cpu(htt_stats_buf->prev_reset_cause)); 2734 + len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_src = 0x%0x\n", 2735 + le32_to_cpu(htt_stats_buf->phy_warm_reset_src)); 2736 + len += scnprintf(buf + len, buf_len - len, "rx_gain_tbl_mode = %d\n", 2737 + le32_to_cpu(htt_stats_buf->rx_gain_tbl_mode)); 2738 + len += scnprintf(buf + len, buf_len - len, "xbar_val = 0x%0x\n", 2739 + le32_to_cpu(htt_stats_buf->xbar_val)); 2740 + len += scnprintf(buf + len, buf_len - len, "force_calibration = %u\n", 2741 + le32_to_cpu(htt_stats_buf->force_calibration)); 2742 + len += scnprintf(buf + len, buf_len - len, "phyrf_mode = %u\n", 2743 + le32_to_cpu(htt_stats_buf->phyrf_mode)); 2744 + len += scnprintf(buf + len, buf_len - len, "phy_homechan = %u\n", 2745 + le32_to_cpu(htt_stats_buf->phy_homechan)); 2746 + len += scnprintf(buf + len, buf_len - len, "phy_tx_ch_mask = 0x%0x\n", 2747 + le32_to_cpu(htt_stats_buf->phy_tx_ch_mask)); 2748 + len += scnprintf(buf + len, buf_len - len, "phy_rx_ch_mask = 0x%0x\n", 2749 + le32_to_cpu(htt_stats_buf->phy_rx_ch_mask)); 2750 + len += scnprintf(buf + len, buf_len - len, "phybb_ini_mask = 0x%0x\n", 2751 + le32_to_cpu(htt_stats_buf->phybb_ini_mask)); 2752 + len += scnprintf(buf + len, buf_len - len, "phyrf_ini_mask = 0x%0x\n", 2753 + le32_to_cpu(htt_stats_buf->phyrf_ini_mask)); 2754 + len += scnprintf(buf + len, buf_len - len, "phy_dfs_en_mask = 0x%0x\n", 2755 + le32_to_cpu(htt_stats_buf->phy_dfs_en_mask)); 2756 + len += scnprintf(buf + len, buf_len - len, "phy_sscan_en_mask = 0x%0x\n", 2757 + le32_to_cpu(htt_stats_buf->phy_sscan_en_mask)); 2758 + len += scnprintf(buf + len, buf_len - len, "phy_synth_sel_mask = 0x%0x\n", 2759 + le32_to_cpu(htt_stats_buf->phy_synth_sel_mask)); 2760 + len += scnprintf(buf + len, buf_len - len, "phy_adfs_freq = %u\n", 2761 + le32_to_cpu(htt_stats_buf->phy_adfs_freq)); 2762 + len += scnprintf(buf + len, buf_len - len, "cck_fir_settings = 0x%0x\n", 2763 + le32_to_cpu(htt_stats_buf->cck_fir_settings)); 2764 + len += scnprintf(buf + len, buf_len - len, "phy_dyn_pri_chan = %u\n", 2765 + le32_to_cpu(htt_stats_buf->phy_dyn_pri_chan)); 2766 + len += scnprintf(buf + len, buf_len - len, "cca_thresh = 0x%0x\n", 2767 + le32_to_cpu(htt_stats_buf->cca_thresh)); 2768 + len += scnprintf(buf + len, buf_len - len, "dyn_cca_status = %u\n", 2769 + le32_to_cpu(htt_stats_buf->dyn_cca_status)); 2770 + len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_hw = 0x%x\n", 2771 + le32_to_cpu(htt_stats_buf->rxdesense_thresh_hw)); 2772 + len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_sw = 0x%x\n\n", 2773 + le32_to_cpu(htt_stats_buf->rxdesense_thresh_sw)); 2774 + 2775 + stats_req->buf_len = len; 2776 + } 2777 + 2778 + static void 2779 + ath12k_htt_print_phy_reset_counters_tlv(const void *tag_buf, u16 tag_len, 2780 + struct debug_htt_stats_req *stats_req) 2781 + { 2782 + const struct ath12k_htt_phy_reset_counters_tlv *htt_stats_buf = tag_buf; 2783 + u32 len = stats_req->buf_len; 2784 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2785 + u8 *buf = stats_req->buf; 2786 + 2787 + if (tag_len < sizeof(*htt_stats_buf)) 2788 + return; 2789 + 2790 + len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_COUNTERS_TLV:\n"); 2791 + len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n", 2792 + le32_to_cpu(htt_stats_buf->pdev_id)); 2793 + len += scnprintf(buf + len, buf_len - len, "cf_active_low_fail_cnt = %u\n", 2794 + le32_to_cpu(htt_stats_buf->cf_active_low_fail_cnt)); 2795 + len += scnprintf(buf + len, buf_len - len, "cf_active_low_pass_cnt = %u\n", 2796 + le32_to_cpu(htt_stats_buf->cf_active_low_pass_cnt)); 2797 + len += scnprintf(buf + len, buf_len - len, "phy_off_through_vreg_cnt = %u\n", 2798 + le32_to_cpu(htt_stats_buf->phy_off_through_vreg_cnt)); 2799 + len += scnprintf(buf + len, buf_len - len, "force_calibration_cnt = %u\n", 2800 + le32_to_cpu(htt_stats_buf->force_calibration_cnt)); 2801 + len += scnprintf(buf + len, buf_len - len, "rf_mode_switch_phy_off_cnt = %u\n", 2802 + le32_to_cpu(htt_stats_buf->rf_mode_switch_phy_off_cnt)); 2803 + len += scnprintf(buf + len, buf_len - len, "temperature_recal_cnt = %u\n\n", 2804 + le32_to_cpu(htt_stats_buf->temperature_recal_cnt)); 2805 + 2806 + stats_req->buf_len = len; 2807 + } 2808 + 2809 + static void 2810 + ath12k_htt_print_phy_tpc_stats_tlv(const void *tag_buf, u16 tag_len, 2811 + struct debug_htt_stats_req *stats_req) 2812 + { 2813 + const struct ath12k_htt_phy_tpc_stats_tlv *htt_stats_buf = tag_buf; 2814 + u32 len = stats_req->buf_len; 2815 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2816 + u8 *buf = stats_req->buf; 2817 + 2818 + if (tag_len < sizeof(*htt_stats_buf)) 2819 + return; 2820 + 2821 + len += scnprintf(buf + len, buf_len - len, "HTT_PHY_TPC_STATS_TLV:\n"); 2822 + len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n", 2823 + le32_to_cpu(htt_stats_buf->pdev_id)); 2824 + len += scnprintf(buf + len, buf_len - len, "tx_power_scale = %u\n", 2825 + le32_to_cpu(htt_stats_buf->tx_power_scale)); 2826 + len += scnprintf(buf + len, buf_len - len, "tx_power_scale_db = %u\n", 2827 + le32_to_cpu(htt_stats_buf->tx_power_scale_db)); 2828 + len += scnprintf(buf + len, buf_len - len, "min_negative_tx_power = %d\n", 2829 + le32_to_cpu(htt_stats_buf->min_negative_tx_power)); 2830 + len += scnprintf(buf + len, buf_len - len, "reg_ctl_domain = %u\n", 2831 + le32_to_cpu(htt_stats_buf->reg_ctl_domain)); 2832 + len += scnprintf(buf + len, buf_len - len, "twice_max_rd_power = %u\n", 2833 + le32_to_cpu(htt_stats_buf->twice_max_rd_power)); 2834 + len += scnprintf(buf + len, buf_len - len, "max_tx_power = %u\n", 2835 + le32_to_cpu(htt_stats_buf->max_tx_power)); 2836 + len += scnprintf(buf + len, buf_len - len, "home_max_tx_power = %u\n", 2837 + le32_to_cpu(htt_stats_buf->home_max_tx_power)); 2838 + len += scnprintf(buf + len, buf_len - len, "psd_power = %d\n", 2839 + le32_to_cpu(htt_stats_buf->psd_power)); 2840 + len += scnprintf(buf + len, buf_len - len, "eirp_power = %u\n", 2841 + le32_to_cpu(htt_stats_buf->eirp_power)); 2842 + len += scnprintf(buf + len, buf_len - len, "power_type_6ghz = %u\n", 2843 + le32_to_cpu(htt_stats_buf->power_type_6ghz)); 2844 + len += print_array_to_buf(buf, len, "max_reg_allowed_power", 2845 + htt_stats_buf->max_reg_allowed_power, 2846 + ATH12K_HTT_STATS_MAX_CHAINS, "\n"); 2847 + len += print_array_to_buf(buf, len, "max_reg_allowed_power_6ghz", 2848 + htt_stats_buf->max_reg_allowed_power_6ghz, 2849 + ATH12K_HTT_STATS_MAX_CHAINS, "\n"); 2850 + len += print_array_to_buf(buf, len, "sub_band_cfreq", 2851 + htt_stats_buf->sub_band_cfreq, 2852 + ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n"); 2853 + len += print_array_to_buf(buf, len, "sub_band_txpower", 2854 + htt_stats_buf->sub_band_txpower, 2855 + ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n\n"); 2856 + 2857 + stats_req->buf_len = len; 2858 + } 2859 + 2860 + static void 2861 + ath12k_htt_print_soc_txrx_stats_common_tlv(const void *tag_buf, u16 tag_len, 2862 + struct debug_htt_stats_req *stats_req) 2863 + { 2864 + const struct ath12k_htt_t2h_soc_txrx_stats_common_tlv *htt_stats_buf = tag_buf; 2865 + u64 drop_count; 2866 + u32 len = stats_req->buf_len; 2867 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2868 + u8 *buf = stats_req->buf; 2869 + 2870 + if (tag_len < sizeof(*htt_stats_buf)) 2871 + return; 2872 + 2873 + drop_count = ath12k_le32hilo_to_u64(htt_stats_buf->inv_peers_msdu_drop_count_hi, 2874 + htt_stats_buf->inv_peers_msdu_drop_count_lo); 2875 + 2876 + len += scnprintf(buf + len, buf_len - len, "HTT_SOC_COMMON_STATS_TLV:\n"); 2877 + len += scnprintf(buf + len, buf_len - len, "soc_drop_count = %llu\n\n", 2878 + drop_count); 2879 + 2880 + stats_req->buf_len = len; 2881 + } 2882 + 2883 + static void 2884 + ath12k_htt_print_tx_per_rate_stats_tlv(const void *tag_buf, u16 tag_len, 2885 + struct debug_htt_stats_req *stats_req) 2886 + { 2887 + const struct ath12k_htt_tx_per_rate_stats_tlv *stats_buf = tag_buf; 2888 + u32 len = stats_req->buf_len; 2889 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2890 + u32 ru_size_cnt = 0; 2891 + u32 rc_mode, ru_type; 2892 + u8 *buf = stats_req->buf, i; 2893 + const char *mode_prefix; 2894 + 2895 + if (tag_len < sizeof(*stats_buf)) 2896 + return; 2897 + 2898 + rc_mode = le32_to_cpu(stats_buf->rc_mode); 2899 + ru_type = le32_to_cpu(stats_buf->ru_type); 2900 + 2901 + switch (rc_mode) { 2902 + case ATH12K_HTT_STATS_RC_MODE_DLSU: 2903 + len += scnprintf(buf + len, buf_len - len, "HTT_TX_PER_STATS:\n"); 2904 + len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_SU:\n"); 2905 + mode_prefix = "su"; 2906 + break; 2907 + case ATH12K_HTT_STATS_RC_MODE_DLMUMIMO: 2908 + len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_MUMIMO:\n"); 2909 + mode_prefix = "mu"; 2910 + break; 2911 + case ATH12K_HTT_STATS_RC_MODE_DLOFDMA: 2912 + len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_OFDMA:\n"); 2913 + mode_prefix = "ofdma"; 2914 + if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY) 2915 + ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS; 2916 + else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU) 2917 + ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS; 2918 + break; 2919 + case ATH12K_HTT_STATS_RC_MODE_ULMUMIMO: 2920 + len += scnprintf(buf + len, buf_len - len, "HTT_RX_PER_STATS:\n"); 2921 + len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_MUMIMO:\n"); 2922 + mode_prefix = "ulmu"; 2923 + break; 2924 + case ATH12K_HTT_STATS_RC_MODE_ULOFDMA: 2925 + len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_OFDMA:\n"); 2926 + mode_prefix = "ulofdma"; 2927 + if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY) 2928 + ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS; 2929 + else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU) 2930 + ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS; 2931 + break; 2932 + default: 2933 + return; 2934 + } 2935 + 2936 + len += scnprintf(buf + len, buf_len - len, "\nPER per BW:\n"); 2937 + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA || 2938 + rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO) 2939 + len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ", 2940 + mode_prefix); 2941 + else 2942 + len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ", 2943 + mode_prefix); 2944 + for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++) 2945 + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i, 2946 + le32_to_cpu(stats_buf->per_bw[i].ppdus_tried)); 2947 + len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i, 2948 + le32_to_cpu(stats_buf->per_bw320.ppdus_tried)); 2949 + 2950 + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA || 2951 + rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO) 2952 + len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ", 2953 + mode_prefix); 2954 + else 2955 + len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ", 2956 + mode_prefix); 2957 + for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++) 2958 + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i, 2959 + le32_to_cpu(stats_buf->per_bw[i].ppdus_ack_failed)); 2960 + len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i, 2961 + le32_to_cpu(stats_buf->per_bw320.ppdus_ack_failed)); 2962 + 2963 + len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix); 2964 + for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++) 2965 + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i, 2966 + le32_to_cpu(stats_buf->per_bw[i].mpdus_tried)); 2967 + len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i, 2968 + le32_to_cpu(stats_buf->per_bw320.mpdus_tried)); 2969 + 2970 + len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix); 2971 + for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++) 2972 + len += scnprintf(buf + len, buf_len - len, " %u:%u", i, 2973 + le32_to_cpu(stats_buf->per_bw[i].mpdus_failed)); 2974 + len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i, 2975 + le32_to_cpu(stats_buf->per_bw320.mpdus_failed)); 2976 + 2977 + len += scnprintf(buf + len, buf_len - len, "\nPER per NSS:\n"); 2978 + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA || 2979 + rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO) 2980 + len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ", 2981 + mode_prefix); 2982 + else 2983 + len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ", 2984 + mode_prefix); 2985 + for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++) 2986 + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1, 2987 + le32_to_cpu(stats_buf->per_nss[i].ppdus_tried)); 2988 + len += scnprintf(buf + len, buf_len - len, "\n"); 2989 + 2990 + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA || 2991 + rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO) 2992 + len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ", 2993 + mode_prefix); 2994 + else 2995 + len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ", 2996 + mode_prefix); 2997 + for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++) 2998 + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1, 2999 + le32_to_cpu(stats_buf->per_nss[i].ppdus_ack_failed)); 3000 + len += scnprintf(buf + len, buf_len - len, "\n"); 3001 + 3002 + len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix); 3003 + for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++) 3004 + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1, 3005 + le32_to_cpu(stats_buf->per_nss[i].mpdus_tried)); 3006 + len += scnprintf(buf + len, buf_len - len, "\n"); 3007 + 3008 + len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix); 3009 + for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++) 3010 + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1, 3011 + le32_to_cpu(stats_buf->per_nss[i].mpdus_failed)); 3012 + len += scnprintf(buf + len, buf_len - len, "\n"); 3013 + 3014 + len += scnprintf(buf + len, buf_len - len, "\nPER per MCS:\n"); 3015 + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA || 3016 + rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO) 3017 + len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ", 3018 + mode_prefix); 3019 + else 3020 + len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ", 3021 + mode_prefix); 3022 + for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++) 3023 + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i, 3024 + le32_to_cpu(stats_buf->per_mcs[i].ppdus_tried)); 3025 + len += scnprintf(buf + len, buf_len - len, "\n"); 3026 + 3027 + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA || 3028 + rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO) 3029 + len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ", 3030 + mode_prefix); 3031 + else 3032 + len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ", 3033 + mode_prefix); 3034 + for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++) 3035 + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i, 3036 + le32_to_cpu(stats_buf->per_mcs[i].ppdus_ack_failed)); 3037 + len += scnprintf(buf + len, buf_len - len, "\n"); 3038 + 3039 + len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix); 3040 + for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++) 3041 + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i, 3042 + le32_to_cpu(stats_buf->per_mcs[i].mpdus_tried)); 3043 + len += scnprintf(buf + len, buf_len - len, "\n"); 3044 + 3045 + len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix); 3046 + for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++) 3047 + len += scnprintf(buf + len, buf_len - len, " %u:%u ", i, 3048 + le32_to_cpu(stats_buf->per_mcs[i].mpdus_failed)); 3049 + len += scnprintf(buf + len, buf_len - len, "\n"); 3050 + 3051 + if ((rc_mode == ATH12K_HTT_STATS_RC_MODE_DLOFDMA || 3052 + rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA) && 3053 + ru_type != ATH12K_HTT_STATS_RU_TYPE_INVALID) { 3054 + len += scnprintf(buf + len, buf_len - len, "\nPER per RU:\n"); 3055 + 3056 + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA) 3057 + len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ", 3058 + mode_prefix); 3059 + else 3060 + len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ", 3061 + mode_prefix); 3062 + for (i = 0; i < ru_size_cnt; i++) 3063 + len += scnprintf(buf + len, buf_len - len, " %s:%u ", 3064 + ath12k_tx_ru_size_to_str(ru_type, i), 3065 + le32_to_cpu(stats_buf->ru[i].ppdus_tried)); 3066 + len += scnprintf(buf + len, buf_len - len, "\n"); 3067 + 3068 + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA) 3069 + len += scnprintf(buf + len, buf_len - len, 3070 + "non_data_ppdus_%s = ", mode_prefix); 3071 + else 3072 + len += scnprintf(buf + len, buf_len - len, 3073 + "ppdus_ack_failed_%s = ", mode_prefix); 3074 + for (i = 0; i < ru_size_cnt; i++) 3075 + len += scnprintf(buf + len, buf_len - len, " %s:%u ", 3076 + ath12k_tx_ru_size_to_str(ru_type, i), 3077 + le32_to_cpu(stats_buf->ru[i].ppdus_ack_failed)); 3078 + len += scnprintf(buf + len, buf_len - len, "\n"); 3079 + 3080 + len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", 3081 + mode_prefix); 3082 + for (i = 0; i < ru_size_cnt; i++) 3083 + len += scnprintf(buf + len, buf_len - len, " %s:%u ", 3084 + ath12k_tx_ru_size_to_str(ru_type, i), 3085 + le32_to_cpu(stats_buf->ru[i].mpdus_tried)); 3086 + len += scnprintf(buf + len, buf_len - len, "\n"); 3087 + 3088 + len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", 3089 + mode_prefix); 3090 + for (i = 0; i < ru_size_cnt; i++) 3091 + len += scnprintf(buf + len, buf_len - len, " %s:%u ", 3092 + ath12k_tx_ru_size_to_str(ru_type, i), 3093 + le32_to_cpu(stats_buf->ru[i].mpdus_failed)); 3094 + len += scnprintf(buf + len, buf_len - len, "\n\n"); 3095 + } 3096 + 3097 + if (rc_mode == ATH12K_HTT_STATS_RC_MODE_DLMUMIMO) { 3098 + len += scnprintf(buf + len, buf_len - len, "\nlast_probed_bw = %u\n", 3099 + le32_to_cpu(stats_buf->last_probed_bw)); 3100 + len += scnprintf(buf + len, buf_len - len, "last_probed_nss = %u\n", 3101 + le32_to_cpu(stats_buf->last_probed_nss)); 3102 + len += scnprintf(buf + len, buf_len - len, "last_probed_mcs = %u\n", 3103 + le32_to_cpu(stats_buf->last_probed_mcs)); 3104 + len += print_array_to_buf(buf, len, "MU Probe count per RC MODE", 3105 + stats_buf->probe_cnt, 3106 + ATH12K_HTT_RC_MODE_2D_COUNT, "\n\n"); 3107 + } 3108 + 3109 + stats_req->buf_len = len; 3110 + } 3111 + 2579 3112 static void 2580 3113 ath12k_htt_print_dmac_reset_stats_tlv(const void *tag_buf, u16 tag_len, 2581 3114 struct debug_htt_stats_req *stats_req) ··· 3162 2561 time = ath12k_le32hilo_to_u64(htt_stats_buf->reset_time_hi_ms, 3163 2562 htt_stats_buf->reset_time_lo_ms); 3164 2563 len += scnprintf(buf + len, buf_len - len, "reset_time_ms = %llu\n", time); 3165 - 3166 2564 time = ath12k_le32hilo_to_u64(htt_stats_buf->disengage_time_hi_ms, 3167 2565 htt_stats_buf->disengage_time_lo_ms); 3168 2566 len += scnprintf(buf + len, buf_len - len, "disengage_time_ms = %llu\n", time); ··· 3280 2680 len += scnprintf(buf + len, buf_len - len, "\n"); 3281 2681 len += print_array_to_buf_index(buf, len, "be_ofdma_tx_nss = ", 1, 3282 2682 htt_stats_buf->be_ofdma_tx_nss, 3283 - ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, 2683 + ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS, 3284 2684 "\n"); 3285 2685 len += print_array_to_buf(buf, len, "be_ofdma_tx_bw", 3286 2686 htt_stats_buf->be_ofdma_tx_bw, ··· 3292 2692 ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS, "\n"); 3293 2693 } 3294 2694 len += scnprintf(buf + len, buf_len - len, "\n"); 2695 + 2696 + stats_req->buf_len = len; 2697 + } 2698 + 2699 + static void 2700 + ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(const void *tag_buf, u16 tag_len, 2701 + struct debug_htt_stats_req *stats_req) 2702 + { 2703 + const struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv *htt_stats_buf = tag_buf; 2704 + u8 *buf = stats_req->buf; 2705 + u32 len = stats_req->buf_len; 2706 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2707 + u32 mac_id_word; 2708 + 2709 + if (tag_len < sizeof(*htt_stats_buf)) 2710 + return; 2711 + 2712 + mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word); 2713 + 2714 + len += scnprintf(buf + len, buf_len - len, "HTT_MBSSID_CTRL_FRAME_STATS_TLV:\n"); 2715 + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", 2716 + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); 2717 + len += scnprintf(buf + len, buf_len - len, "basic_trigger_across_bss = %u\n", 2718 + le32_to_cpu(htt_stats_buf->basic_trigger_across_bss)); 2719 + len += scnprintf(buf + len, buf_len - len, "basic_trigger_within_bss = %u\n", 2720 + le32_to_cpu(htt_stats_buf->basic_trigger_within_bss)); 2721 + len += scnprintf(buf + len, buf_len - len, "bsr_trigger_across_bss = %u\n", 2722 + le32_to_cpu(htt_stats_buf->bsr_trigger_across_bss)); 2723 + len += scnprintf(buf + len, buf_len - len, "bsr_trigger_within_bss = %u\n", 2724 + le32_to_cpu(htt_stats_buf->bsr_trigger_within_bss)); 2725 + len += scnprintf(buf + len, buf_len - len, "mu_rts_across_bss = %u\n", 2726 + le32_to_cpu(htt_stats_buf->mu_rts_across_bss)); 2727 + len += scnprintf(buf + len, buf_len - len, "mu_rts_within_bss = %u\n", 2728 + le32_to_cpu(htt_stats_buf->mu_rts_within_bss)); 2729 + len += scnprintf(buf + len, buf_len - len, "ul_mumimo_trigger_across_bss = %u\n", 2730 + le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_across_bss)); 2731 + len += scnprintf(buf + len, buf_len - len, 2732 + "ul_mumimo_trigger_within_bss = %u\n\n", 2733 + le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_within_bss)); 3295 2734 3296 2735 stats_req->buf_len = len; 3297 2736 } ··· 3508 2869 case HTT_STATS_PDEV_OBSS_PD_TAG: 3509 2870 ath12k_htt_print_pdev_obss_pd_stats_tlv(tag_buf, len, stats_req); 3510 2871 break; 2872 + case HTT_STATS_DLPAGER_STATS_TAG: 2873 + ath12k_htt_print_dlpager_stats_tlv(tag_buf, len, stats_req); 2874 + break; 2875 + case HTT_STATS_PHY_STATS_TAG: 2876 + ath12k_htt_print_phy_stats_tlv(tag_buf, len, stats_req); 2877 + break; 2878 + case HTT_STATS_PHY_COUNTERS_TAG: 2879 + ath12k_htt_print_phy_counters_tlv(tag_buf, len, stats_req); 2880 + break; 2881 + case HTT_STATS_PHY_RESET_STATS_TAG: 2882 + ath12k_htt_print_phy_reset_stats_tlv(tag_buf, len, stats_req); 2883 + break; 2884 + case HTT_STATS_PHY_RESET_COUNTERS_TAG: 2885 + ath12k_htt_print_phy_reset_counters_tlv(tag_buf, len, stats_req); 2886 + break; 2887 + case HTT_STATS_PHY_TPC_STATS_TAG: 2888 + ath12k_htt_print_phy_tpc_stats_tlv(tag_buf, len, stats_req); 2889 + break; 2890 + case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG: 2891 + ath12k_htt_print_soc_txrx_stats_common_tlv(tag_buf, len, stats_req); 2892 + break; 2893 + case HTT_STATS_PER_RATE_STATS_TAG: 2894 + ath12k_htt_print_tx_per_rate_stats_tlv(tag_buf, len, stats_req); 2895 + break; 3511 2896 case HTT_STATS_DMAC_RESET_STATS_TAG: 3512 2897 ath12k_htt_print_dmac_reset_stats_tlv(tag_buf, len, stats_req); 3513 2898 break; ··· 3540 2877 break; 3541 2878 case HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG: 3542 2879 ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(tag_buf, len, stats_req); 2880 + break; 2881 + case HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG: 2882 + ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(tag_buf, len, 2883 + stats_req); 3543 2884 break; 3544 2885 default: 3545 2886 break;
+216 -2
drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
··· 135 135 ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17, 136 136 ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19, 137 137 ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23, 138 + ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36, 139 + ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37, 140 + ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38, 141 + ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40, 138 142 ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45, 139 143 ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49, 140 144 ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51, 145 + ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54, 141 146 142 147 /* keep this last */ 143 148 ATH12K_DBG_HTT_NUM_EXT_STATS, ··· 199 194 HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102, 200 195 HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG = 111, 201 196 HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG = 112, 197 + HTT_STATS_DLPAGER_STATS_TAG = 120, 198 + HTT_STATS_PHY_COUNTERS_TAG = 121, 199 + HTT_STATS_PHY_STATS_TAG = 122, 200 + HTT_STATS_PHY_RESET_COUNTERS_TAG = 123, 201 + HTT_STATS_PHY_RESET_STATS_TAG = 124, 202 + HTT_STATS_SOC_TXRX_STATS_COMMON_TAG = 125, 203 + HTT_STATS_PER_RATE_STATS_TAG = 128, 202 204 HTT_STATS_MU_PPDU_DIST_TAG = 129, 203 205 HTT_STATS_TX_PDEV_MUMIMO_GRP_STATS_TAG = 130, 204 206 HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG = 135, ··· 213 201 HTT_STATS_TX_SELFGEN_BE_STATS_TAG = 138, 214 202 HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG = 139, 215 203 HTT_STATS_DMAC_RESET_STATS_TAG = 155, 204 + HTT_STATS_PHY_TPC_STATS_TAG = 157, 216 205 HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG = 165, 206 + HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG = 176, 217 207 218 208 HTT_STATS_MAX_TAG, 219 209 }; ··· 1068 1054 __le32 num_sr_ppdu_abort_flush_cnt; 1069 1055 } __packed; 1070 1056 1057 + enum ath12k_htt_stats_page_lock_state { 1058 + ATH12K_HTT_STATS_PAGE_LOCKED = 0, 1059 + ATH12K_HTT_STATS_PAGE_UNLOCKED = 1, 1060 + ATH12K_NUM_PG_LOCK_STATE 1061 + }; 1062 + 1063 + #define ATH12K_PAGER_MAX 10 1064 + 1065 + #define ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0 GENMASK(7, 0) 1066 + #define ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0 GENMASK(15, 8) 1067 + #define ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1 GENMASK(15, 0) 1068 + #define ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1 GENMASK(31, 16) 1069 + #define ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2 GENMASK(15, 0) 1070 + #define ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2 GENMASK(31, 16) 1071 + 1072 + struct ath12k_htt_pgs_info { 1073 + __le32 page_num; 1074 + __le32 num_pgs; 1075 + __le32 ts_lsb; 1076 + __le32 ts_msb; 1077 + } __packed; 1078 + 1079 + struct ath12k_htt_dl_pager_stats_tlv { 1080 + __le32 info0; 1081 + __le32 info1; 1082 + __le32 info2; 1083 + struct ath12k_htt_pgs_info pgs_info[ATH12K_NUM_PG_LOCK_STATE][ATH12K_PAGER_MAX]; 1084 + } __packed; 1085 + 1086 + #define ATH12K_HTT_STATS_MAX_CHAINS 8 1087 + #define ATH12K_HTT_MAX_RX_PKT_CNT 8 1088 + #define ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT 8 1089 + #define ATH12K_HTT_MAX_PER_BLK_ERR_CNT 20 1090 + #define ATH12K_HTT_MAX_RX_OTA_ERR_CNT 14 1091 + #define ATH12K_HTT_MAX_CH_PWR_INFO_SIZE 16 1092 + 1093 + struct ath12k_htt_phy_stats_tlv { 1094 + a_sle32 nf_chain[ATH12K_HTT_STATS_MAX_CHAINS]; 1095 + __le32 false_radar_cnt; 1096 + __le32 radar_cs_cnt; 1097 + a_sle32 ani_level; 1098 + __le32 fw_run_time; 1099 + a_sle32 runtime_nf_chain[ATH12K_HTT_STATS_MAX_CHAINS]; 1100 + } __packed; 1101 + 1102 + struct ath12k_htt_phy_counters_tlv { 1103 + __le32 rx_ofdma_timing_err_cnt; 1104 + __le32 rx_cck_fail_cnt; 1105 + __le32 mactx_abort_cnt; 1106 + __le32 macrx_abort_cnt; 1107 + __le32 phytx_abort_cnt; 1108 + __le32 phyrx_abort_cnt; 1109 + __le32 phyrx_defer_abort_cnt; 1110 + __le32 rx_gain_adj_lstf_event_cnt; 1111 + __le32 rx_gain_adj_non_legacy_cnt; 1112 + __le32 rx_pkt_cnt[ATH12K_HTT_MAX_RX_PKT_CNT]; 1113 + __le32 rx_pkt_crc_pass_cnt[ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT]; 1114 + __le32 per_blk_err_cnt[ATH12K_HTT_MAX_PER_BLK_ERR_CNT]; 1115 + __le32 rx_ota_err_cnt[ATH12K_HTT_MAX_RX_OTA_ERR_CNT]; 1116 + } __packed; 1117 + 1118 + struct ath12k_htt_phy_reset_stats_tlv { 1119 + __le32 pdev_id; 1120 + __le32 chan_mhz; 1121 + __le32 chan_band_center_freq1; 1122 + __le32 chan_band_center_freq2; 1123 + __le32 chan_phy_mode; 1124 + __le32 chan_flags; 1125 + __le32 chan_num; 1126 + __le32 reset_cause; 1127 + __le32 prev_reset_cause; 1128 + __le32 phy_warm_reset_src; 1129 + __le32 rx_gain_tbl_mode; 1130 + __le32 xbar_val; 1131 + __le32 force_calibration; 1132 + __le32 phyrf_mode; 1133 + __le32 phy_homechan; 1134 + __le32 phy_tx_ch_mask; 1135 + __le32 phy_rx_ch_mask; 1136 + __le32 phybb_ini_mask; 1137 + __le32 phyrf_ini_mask; 1138 + __le32 phy_dfs_en_mask; 1139 + __le32 phy_sscan_en_mask; 1140 + __le32 phy_synth_sel_mask; 1141 + __le32 phy_adfs_freq; 1142 + __le32 cck_fir_settings; 1143 + __le32 phy_dyn_pri_chan; 1144 + __le32 cca_thresh; 1145 + __le32 dyn_cca_status; 1146 + __le32 rxdesense_thresh_hw; 1147 + __le32 rxdesense_thresh_sw; 1148 + } __packed; 1149 + 1150 + struct ath12k_htt_phy_reset_counters_tlv { 1151 + __le32 pdev_id; 1152 + __le32 cf_active_low_fail_cnt; 1153 + __le32 cf_active_low_pass_cnt; 1154 + __le32 phy_off_through_vreg_cnt; 1155 + __le32 force_calibration_cnt; 1156 + __le32 rf_mode_switch_phy_off_cnt; 1157 + __le32 temperature_recal_cnt; 1158 + } __packed; 1159 + 1160 + struct ath12k_htt_phy_tpc_stats_tlv { 1161 + __le32 pdev_id; 1162 + __le32 tx_power_scale; 1163 + __le32 tx_power_scale_db; 1164 + __le32 min_negative_tx_power; 1165 + __le32 reg_ctl_domain; 1166 + __le32 max_reg_allowed_power[ATH12K_HTT_STATS_MAX_CHAINS]; 1167 + __le32 max_reg_allowed_power_6ghz[ATH12K_HTT_STATS_MAX_CHAINS]; 1168 + __le32 twice_max_rd_power; 1169 + __le32 max_tx_power; 1170 + __le32 home_max_tx_power; 1171 + __le32 psd_power; 1172 + __le32 eirp_power; 1173 + __le32 power_type_6ghz; 1174 + __le32 sub_band_cfreq[ATH12K_HTT_MAX_CH_PWR_INFO_SIZE]; 1175 + __le32 sub_band_txpower[ATH12K_HTT_MAX_CH_PWR_INFO_SIZE]; 1176 + } __packed; 1177 + 1178 + struct ath12k_htt_t2h_soc_txrx_stats_common_tlv { 1179 + __le32 inv_peers_msdu_drop_count_hi; 1180 + __le32 inv_peers_msdu_drop_count_lo; 1181 + } __packed; 1182 + 1071 1183 struct ath12k_htt_dmac_reset_stats_tlv { 1072 1184 __le32 reset_count; 1073 1185 __le32 reset_time_lo_ms; ··· 1225 1085 __le32 dlofdma_disabled_consec_no_mpdus_success[ATH12K_HTT_NUM_AC_WMM]; 1226 1086 } __packed; 1227 1087 1088 + #define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS 4 1089 + #define ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS 8 1090 + #define ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS 14 1091 + 1228 1092 enum ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE { 1229 1093 ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_26, 1230 1094 ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52, ··· 1249 1105 ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS, 1250 1106 }; 1251 1107 1252 - #define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8 1108 + enum ATH12K_HTT_RC_MODE { 1109 + ATH12K_HTT_RC_MODE_SU_OL, 1110 + ATH12K_HTT_RC_MODE_SU_BF, 1111 + ATH12K_HTT_RC_MODE_MU1_INTF, 1112 + ATH12K_HTT_RC_MODE_MU2_INTF, 1113 + ATH12K_HTT_RC_MODE_MU3_INTF, 1114 + ATH12K_HTT_RC_MODE_MU4_INTF, 1115 + ATH12K_HTT_RC_MODE_MU5_INTF, 1116 + ATH12K_HTT_RC_MODE_MU6_INTF, 1117 + ATH12K_HTT_RC_MODE_MU7_INTF, 1118 + ATH12K_HTT_RC_MODE_2D_COUNT 1119 + }; 1120 + 1121 + enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE { 1122 + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26, 1123 + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52, 1124 + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106, 1125 + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242, 1126 + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484, 1127 + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996, 1128 + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2, 1129 + ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS 1130 + }; 1131 + 1132 + enum ath12k_htt_stats_rc_mode { 1133 + ATH12K_HTT_STATS_RC_MODE_DLSU = 0, 1134 + ATH12K_HTT_STATS_RC_MODE_DLMUMIMO = 1, 1135 + ATH12K_HTT_STATS_RC_MODE_DLOFDMA = 2, 1136 + ATH12K_HTT_STATS_RC_MODE_ULMUMIMO = 3, 1137 + ATH12K_HTT_STATS_RC_MODE_ULOFDMA = 4, 1138 + }; 1139 + 1140 + enum ath12k_htt_stats_ru_type { 1141 + ATH12K_HTT_STATS_RU_TYPE_INVALID, 1142 + ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY, 1143 + ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU, 1144 + }; 1145 + 1146 + struct ath12k_htt_tx_rate_stats { 1147 + __le32 ppdus_tried; 1148 + __le32 ppdus_ack_failed; 1149 + __le32 mpdus_tried; 1150 + __le32 mpdus_failed; 1151 + } __packed; 1152 + 1153 + struct ath12k_htt_tx_per_rate_stats_tlv { 1154 + __le32 rc_mode; 1155 + __le32 last_probed_mcs; 1156 + __le32 last_probed_nss; 1157 + __le32 last_probed_bw; 1158 + struct ath12k_htt_tx_rate_stats per_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS]; 1159 + struct ath12k_htt_tx_rate_stats per_nss[ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS]; 1160 + struct ath12k_htt_tx_rate_stats per_mcs[ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS]; 1161 + struct ath12k_htt_tx_rate_stats per_bw320; 1162 + __le32 probe_cnt[ATH12K_HTT_RC_MODE_2D_COUNT]; 1163 + __le32 ru_type; 1164 + struct ath12k_htt_tx_rate_stats ru[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS]; 1165 + } __packed; 1166 + 1253 1167 #define ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS 16 1254 1168 #define ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS 5 1255 1169 #define ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS 4 ··· 1317 1115 __le32 mac_id__word; 1318 1116 __le32 be_ofdma_tx_ldpc; 1319 1117 __le32 be_ofdma_tx_mcs[ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS]; 1320 - __le32 be_ofdma_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; 1118 + __le32 be_ofdma_tx_nss[ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS]; 1321 1119 __le32 be_ofdma_tx_bw[ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS]; 1322 1120 __le32 gi[ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS][ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS]; 1323 1121 __le32 be_ofdma_tx_ru_size[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS]; 1324 1122 __le32 be_ofdma_eht_sig_mcs[ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS]; 1123 + } __packed; 1124 + 1125 + struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv { 1126 + __le32 mac_id__word; 1127 + __le32 basic_trigger_across_bss; 1128 + __le32 basic_trigger_within_bss; 1129 + __le32 bsr_trigger_across_bss; 1130 + __le32 bsr_trigger_within_bss; 1131 + __le32 mu_rts_across_bss; 1132 + __le32 mu_rts_within_bss; 1133 + __le32 ul_mumimo_trigger_across_bss; 1134 + __le32 ul_mumimo_trigger_within_bss; 1325 1135 } __packed; 1326 1136 1327 1137 #endif
+49 -21
drivers/net/wireless/ath/ath12k/dp.c
··· 41 41 return; 42 42 } 43 43 44 + if (!peer->primary_link) { 45 + spin_unlock_bh(&ab->base_lock); 46 + return; 47 + } 48 + 44 49 ath12k_dp_rx_peer_tid_cleanup(ar, peer); 45 50 crypto_free_shash(peer->tfm_mmic); 46 51 peer->dp_setup_done = false; ··· 982 977 { 983 978 int i; 984 979 980 + if (!ab->mon_reap_timer.function) 981 + return; 982 + 985 983 del_timer_sync(&ab->mon_reap_timer); 986 984 987 985 for (i = 0; i < ab->num_radios; i++) 988 986 ath12k_dp_rx_pdev_free(ab, i); 989 987 } 990 988 991 - void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab) 989 + void ath12k_dp_pdev_pre_alloc(struct ath12k *ar) 992 990 { 993 - struct ath12k *ar; 994 - struct ath12k_pdev_dp *dp; 995 - int i; 991 + struct ath12k_pdev_dp *dp = &ar->dp; 996 992 997 - for (i = 0; i < ab->num_radios; i++) { 998 - ar = ab->pdevs[i].ar; 999 - dp = &ar->dp; 1000 - dp->mac_id = i; 1001 - atomic_set(&dp->num_tx_pending, 0); 1002 - init_waitqueue_head(&dp->tx_empty_waitq); 1003 - 1004 - /* TODO: Add any RXDMA setup required per pdev */ 1005 - } 993 + dp->mac_id = ar->pdev_idx; 994 + atomic_set(&dp->num_tx_pending, 0); 995 + init_waitqueue_head(&dp->tx_empty_waitq); 996 + /* TODO: Add any RXDMA setup required per pdev */ 1006 997 } 1007 998 1008 999 bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab) ··· 1261 1260 if (!ab->hw_params->reoq_lut_support) 1262 1261 return; 1263 1262 1264 - if (!dp->reoq_lut.vaddr) 1265 - return; 1263 + if (dp->reoq_lut.vaddr) { 1264 + ath12k_hif_write32(ab, 1265 + HAL_SEQ_WCSS_UMAC_REO_REG + 1266 + HAL_REO1_QDESC_LUT_BASE0(ab), 0); 1267 + dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE, 1268 + dp->reoq_lut.vaddr, dp->reoq_lut.paddr); 1269 + dp->reoq_lut.vaddr = NULL; 1270 + } 1266 1271 1267 - dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE, 1268 - dp->reoq_lut.vaddr, dp->reoq_lut.paddr); 1269 - dp->reoq_lut.vaddr = NULL; 1270 - 1271 - ath12k_hif_write32(ab, 1272 - HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0); 1272 + if (dp->ml_reoq_lut.vaddr) { 1273 + ath12k_hif_write32(ab, 1274 + HAL_SEQ_WCSS_UMAC_REO_REG + 1275 + HAL_REO1_QDESC_LUT_BASE1(ab), 0); 1276 + dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE, 1277 + dp->ml_reoq_lut.vaddr, dp->ml_reoq_lut.paddr); 1278 + dp->ml_reoq_lut.vaddr = NULL; 1279 + } 1273 1280 } 1274 1281 1275 1282 void ath12k_dp_free(struct ath12k_base *ab) 1276 1283 { 1277 1284 struct ath12k_dp *dp = &ab->dp; 1278 1285 int i; 1286 + 1287 + if (!dp->ab) 1288 + return; 1279 1289 1280 1290 ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks, 1281 1291 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring); ··· 1305 1293 1306 1294 ath12k_dp_rx_free(ab); 1307 1295 /* Deinit any SOC level resource */ 1296 + dp->ab = NULL; 1308 1297 } 1309 1298 1310 1299 void ath12k_dp_cc_config(struct ath12k_base *ab) ··· 1607 1594 return -ENOMEM; 1608 1595 } 1609 1596 1597 + dp->ml_reoq_lut.vaddr = dma_alloc_coherent(ab->dev, 1598 + DP_REOQ_LUT_SIZE, 1599 + &dp->ml_reoq_lut.paddr, 1600 + GFP_KERNEL | __GFP_ZERO); 1601 + if (!dp->ml_reoq_lut.vaddr) { 1602 + ath12k_warn(ab, "failed to allocate memory for ML reoq table"); 1603 + dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE, 1604 + dp->reoq_lut.vaddr, dp->reoq_lut.paddr); 1605 + dp->reoq_lut.vaddr = NULL; 1606 + return -ENOMEM; 1607 + } 1608 + 1610 1609 ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 1611 1610 dp->reoq_lut.paddr); 1611 + ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE1(ab), 1612 + dp->ml_reoq_lut.paddr >> 8); 1613 + 1612 1614 return 0; 1613 1615 } 1614 1616
+2 -1
drivers/net/wireless/ath/ath12k/dp.h
··· 368 368 struct dp_rxdma_mon_ring rxdma_mon_buf_ring; 369 369 struct dp_rxdma_mon_ring tx_mon_buf_ring; 370 370 struct ath12k_reo_q_addr_lut reoq_lut; 371 + struct ath12k_reo_q_addr_lut ml_reoq_lut; 371 372 }; 372 373 373 374 /* HTT definitions */ ··· 1806 1805 int ath12k_dp_alloc(struct ath12k_base *ab); 1807 1806 void ath12k_dp_cc_config(struct ath12k_base *ab); 1808 1807 int ath12k_dp_pdev_alloc(struct ath12k_base *ab); 1809 - void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab); 1808 + void ath12k_dp_pdev_pre_alloc(struct ath12k *ar); 1810 1809 void ath12k_dp_pdev_free(struct ath12k_base *ab); 1811 1810 int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id, 1812 1811 int mac_id, enum hal_ring_type ring_type);
+74 -22
drivers/net/wireless/ath/ath12k/dp_rx.c
··· 740 740 { 741 741 struct ath12k_reo_queue_ref *qref; 742 742 struct ath12k_dp *dp = &ab->dp; 743 + bool ml_peer = false; 743 744 744 745 if (!ab->hw_params->reoq_lut_support) 745 746 return; 746 747 747 - /* TODO: based on ML peer or not, select the LUT. below assumes non 748 - * ML peer 749 - */ 750 - qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + 751 - (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 748 + if (peer_id & ATH12K_PEER_ML_ID_VALID) { 749 + peer_id &= ~ATH12K_PEER_ML_ID_VALID; 750 + ml_peer = true; 751 + } 752 + 753 + if (ml_peer) 754 + qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + 755 + (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 756 + else 757 + qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + 758 + (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 752 759 753 760 qref->info0 = u32_encode_bits(lower_32_bits(paddr), 754 761 BUFFER_ADDR_INFO0_ADDR); ··· 768 761 { 769 762 struct ath12k_reo_queue_ref *qref; 770 763 struct ath12k_dp *dp = &ab->dp; 764 + bool ml_peer = false; 771 765 772 766 if (!ab->hw_params->reoq_lut_support) 773 767 return; 774 768 775 - /* TODO: based on ML peer or not, select the LUT. below assumes non 776 - * ML peer 777 - */ 778 - qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + 779 - (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 769 + if (peer_id & ATH12K_PEER_ML_ID_VALID) { 770 + peer_id &= ~ATH12K_PEER_ML_ID_VALID; 771 + ml_peer = true; 772 + } 773 + 774 + if (ml_peer) 775 + qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr + 776 + (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 777 + else 778 + qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr + 779 + (peer_id * (IEEE80211_NUM_TIDS + 1) + tid); 780 780 781 781 qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR); 782 782 qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) | ··· 816 802 rx_tid->vaddr = NULL; 817 803 } 818 804 819 - ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid); 805 + if (peer->mlo) 806 + ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid); 807 + else 808 + ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid); 820 809 821 810 rx_tid->active = false; 822 811 } ··· 957 940 return -ENOENT; 958 941 } 959 942 960 - if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) { 943 + if (!peer->primary_link) { 944 + spin_unlock_bh(&ab->base_lock); 945 + return 0; 946 + } 947 + 948 + if (ab->hw_params->reoq_lut_support && 949 + (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) { 961 950 spin_unlock_bh(&ab->base_lock); 962 951 ath12k_warn(ab, "reo qref table is not setup\n"); 963 952 return -EINVAL; ··· 1044 1021 /* Update the REO queue LUT at the corresponding peer id 1045 1022 * and tid with qaddr. 1046 1023 */ 1047 - ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr); 1024 + if (peer->mlo) 1025 + ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid, paddr); 1026 + else 1027 + ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr); 1028 + 1048 1029 spin_unlock_bh(&ab->base_lock); 1049 1030 } else { 1050 1031 spin_unlock_bh(&ab->base_lock); ··· 1065 1038 } 1066 1039 1067 1040 int ath12k_dp_rx_ampdu_start(struct ath12k *ar, 1068 - struct ieee80211_ampdu_params *params) 1041 + struct ieee80211_ampdu_params *params, 1042 + u8 link_id) 1069 1043 { 1070 1044 struct ath12k_base *ab = ar->ab; 1071 1045 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); 1072 - struct ath12k_link_sta *arsta = &ahsta->deflink; 1073 - int vdev_id = arsta->arvif->vdev_id; 1046 + struct ath12k_link_sta *arsta; 1047 + int vdev_id; 1074 1048 int ret; 1075 1049 1076 - ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id, 1050 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 1051 + 1052 + arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 1053 + ahsta->link[link_id]); 1054 + if (!arsta) 1055 + return -ENOLINK; 1056 + 1057 + vdev_id = arsta->arvif->vdev_id; 1058 + 1059 + ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id, 1077 1060 params->tid, params->buf_size, 1078 1061 params->ssn, arsta->ahsta->pn_type); 1079 1062 if (ret) ··· 1093 1056 } 1094 1057 1095 1058 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, 1096 - struct ieee80211_ampdu_params *params) 1059 + struct ieee80211_ampdu_params *params, 1060 + u8 link_id) 1097 1061 { 1098 1062 struct ath12k_base *ab = ar->ab; 1099 1063 struct ath12k_peer *peer; 1100 1064 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); 1101 - struct ath12k_link_sta *arsta = &ahsta->deflink; 1102 - int vdev_id = arsta->arvif->vdev_id; 1065 + struct ath12k_link_sta *arsta; 1066 + int vdev_id; 1103 1067 bool active; 1104 1068 int ret; 1105 1069 1070 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 1071 + 1072 + arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 1073 + ahsta->link[link_id]); 1074 + if (!arsta) 1075 + return -ENOLINK; 1076 + 1077 + vdev_id = arsta->arvif->vdev_id; 1078 + 1106 1079 spin_lock_bh(&ab->base_lock); 1107 1080 1108 - peer = ath12k_peer_find(ab, vdev_id, params->sta->addr); 1081 + peer = ath12k_peer_find(ab, vdev_id, arsta->addr); 1109 1082 if (!peer) { 1110 1083 spin_unlock_bh(&ab->base_lock); 1111 1084 ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n"); ··· 2828 2781 return -ENOENT; 2829 2782 } 2830 2783 2784 + if (!peer->primary_link) { 2785 + spin_unlock_bh(&ab->base_lock); 2786 + return 0; 2787 + } 2788 + 2831 2789 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) { 2832 2790 rx_tid = &peer->rx_tid[i]; 2833 2791 rx_tid->ab = ab; ··· 3964 3912 ath12k_hal_srng_access_begin(ab, srng); 3965 3913 3966 3914 while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { 3967 - tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG); 3915 + tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG); 3968 3916 3969 3917 switch (tag) { 3970 3918 case HAL_REO_GET_QUEUE_STATS_STATUS:
+4 -2
drivers/net/wireless/ath/ath12k/dp_rx.h
··· 85 85 } 86 86 87 87 int ath12k_dp_rx_ampdu_start(struct ath12k *ar, 88 - struct ieee80211_ampdu_params *params); 88 + struct ieee80211_ampdu_params *params, 89 + u8 link_id); 89 90 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, 90 - struct ieee80211_ampdu_params *params); 91 + struct ieee80211_ampdu_params *params, 92 + u8 link_id); 91 93 int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif, 92 94 const u8 *peer_addr, 93 95 enum set_key_cmd key_cmd,
+1 -1
drivers/net/wireless/ath/ath12k/hal_desc.h
··· 581 581 #define HAL_TLV_64_HDR_LEN GENMASK(21, 10) 582 582 583 583 struct hal_tlv_64_hdr { 584 - u64 tl; 584 + __le64 tl; 585 585 u8 value[]; 586 586 } __packed; 587 587
+6 -6
drivers/net/wireless/ath/ath12k/hal_rx.c
··· 26 26 { 27 27 struct hal_reo_get_queue_stats *desc; 28 28 29 - tlv->tl = u32_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) | 30 - u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN); 29 + tlv->tl = le64_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) | 30 + le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN); 31 31 32 32 desc = (struct hal_reo_get_queue_stats *)tlv->value; 33 33 memset_startat(desc, 0, queue_addr_lo); ··· 59 59 hal->current_blk_index = avail_slot; 60 60 } 61 61 62 - tlv->tl = u32_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) | 63 - u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN); 62 + tlv->tl = le64_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) | 63 + le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN); 64 64 65 65 desc = (struct hal_reo_flush_cache *)tlv->value; 66 66 memset_startat(desc, 0, cache_addr_lo); ··· 97 97 { 98 98 struct hal_reo_update_rx_queue *desc; 99 99 100 - tlv->tl = u32_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) | 101 - u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN); 100 + tlv->tl = le64_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) | 101 + le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN); 102 102 103 103 desc = (struct hal_reo_update_rx_queue *)tlv->value; 104 104 memset_startat(desc, 0, queue_addr_lo);
+1524 -475
drivers/net/wireless/ath/ath12k/mac.c
··· 501 501 return 0; 502 502 } 503 503 504 + static struct ieee80211_bss_conf * 505 + ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif) 506 + { 507 + struct ieee80211_vif *vif = arvif->ahvif->vif; 508 + struct ieee80211_bss_conf *link_conf; 509 + struct ath12k *ar = arvif->ar; 510 + 511 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 512 + 513 + if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) 514 + return NULL; 515 + 516 + link_conf = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 517 + vif->link_conf[arvif->link_id]); 518 + 519 + return link_conf; 520 + } 521 + 522 + static struct ieee80211_link_sta *ath12k_mac_get_link_sta(struct ath12k_link_sta *arsta) 523 + { 524 + struct ath12k_sta *ahsta = arsta->ahsta; 525 + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta); 526 + struct ieee80211_link_sta *link_sta; 527 + 528 + lockdep_assert_wiphy(ahsta->ahvif->ah->hw->wiphy); 529 + 530 + if (arsta->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) 531 + return NULL; 532 + 533 + link_sta = wiphy_dereference(ahsta->ahvif->ah->hw->wiphy, 534 + sta->link[arsta->link_id]); 535 + 536 + return link_sta; 537 + } 538 + 504 539 static bool ath12k_mac_bitrate_is_cck(int bitrate) 505 540 { 506 541 switch (bitrate) { ··· 683 648 return NULL; 684 649 } 685 650 651 + static bool ath12k_mac_is_ml_arvif(struct ath12k_link_vif *arvif) 652 + { 653 + struct ath12k_vif *ahvif = arvif->ahvif; 654 + 655 + lockdep_assert_wiphy(ahvif->ah->hw->wiphy); 656 + 657 + if (ahvif->vif->valid_links & BIT(arvif->link_id)) 658 + return true; 659 + 660 + return false; 661 + } 662 + 686 663 static struct ath12k *ath12k_mac_get_ar_by_chan(struct ieee80211_hw *hw, 687 664 struct ieee80211_channel *channel) 688 665 { ··· 725 678 } 726 679 727 680 static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, 728 - struct ieee80211_vif *vif) 681 + struct ieee80211_vif *vif, 682 + u8 link_id) 729 683 { 730 684 struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 731 - struct ath12k_link_vif *arvif = &ahvif->deflink; 732 685 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 686 + struct ath12k_link_vif *arvif; 687 + 688 + lockdep_assert_wiphy(hw->wiphy); 733 689 734 690 /* If there is one pdev within ah, then we return 735 691 * ar directly. ··· 740 690 if (ah->num_radio == 1) 741 691 return ah->radio; 742 692 743 - if (arvif->is_created) 693 + if (!(ahvif->links_map & BIT(link_id))) 694 + return NULL; 695 + 696 + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); 697 + if (arvif && arvif->is_created) 744 698 return arvif->ar; 745 699 746 700 return NULL; ··· 1293 1239 return ret; 1294 1240 } 1295 1241 1296 - static int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif) 1242 + int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif) 1297 1243 { 1298 1244 struct ath12k_vif *ahvif = arvif->ahvif; 1299 1245 struct ath12k *ar = arvif->ar; ··· 1540 1486 static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif) 1541 1487 { 1542 1488 struct ath12k_vif *ahvif = arvif->ahvif; 1543 - struct ieee80211_bss_conf *bss_conf = &ahvif->vif->bss_conf; 1489 + struct ieee80211_bss_conf *bss_conf; 1544 1490 struct ath12k_wmi_bcn_tmpl_ema_arg ema_args; 1545 1491 struct ieee80211_ema_beacons *beacons; 1546 1492 struct ath12k_link_vif *tx_arvif; ··· 1549 1495 int ret = 0; 1550 1496 u8 i; 1551 1497 1498 + bss_conf = ath12k_mac_get_link_bss_conf(arvif); 1499 + if (!bss_conf) { 1500 + ath12k_warn(arvif->ar->ab, 1501 + "failed to get link bss conf to update bcn tmpl for vif %pM link %u\n", 1502 + ahvif->vif->addr, arvif->link_id); 1503 + return -ENOLINK; 1504 + } 1505 + 1552 1506 tx_ahvif = ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif); 1553 1507 tx_arvif = &tx_ahvif->deflink; 1554 1508 beacons = ieee80211_beacon_get_template_ema_list(ath12k_ar_to_hw(tx_arvif->ar), 1555 - tx_ahvif->vif, 0); 1509 + tx_ahvif->vif, 1510 + tx_arvif->link_id); 1556 1511 if (!beacons || !beacons->cnt) { 1557 1512 ath12k_warn(arvif->ar->ab, 1558 1513 "failed to get ema beacon templates from mac80211\n"); ··· 1603 1540 { 1604 1541 struct ath12k_vif *ahvif = arvif->ahvif; 1605 1542 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); 1543 + struct ieee80211_bss_conf *link_conf; 1606 1544 struct ath12k_link_vif *tx_arvif = arvif; 1607 1545 struct ath12k *ar = arvif->ar; 1608 1546 struct ath12k_base *ab = ar->ab; ··· 1616 1552 if (ahvif->vdev_type != WMI_VDEV_TYPE_AP) 1617 1553 return 0; 1618 1554 1555 + link_conf = ath12k_mac_get_link_bss_conf(arvif); 1556 + if (!link_conf) { 1557 + ath12k_warn(ar->ab, "unable to access bss link conf to set bcn tmpl for vif %pM link %u\n", 1558 + vif->addr, arvif->link_id); 1559 + return -ENOLINK; 1560 + } 1561 + 1619 1562 if (vif->mbssid_tx_vif) { 1620 1563 tx_ahvif = ath12k_vif_to_ahvif(vif->mbssid_tx_vif); 1621 1564 tx_arvif = &tx_ahvif->deflink; 1622 1565 if (tx_arvif != arvif && arvif->is_up) 1623 1566 return 0; 1624 1567 1625 - if (vif->bss_conf.ema_ap) 1568 + if (link_conf->ema_ap) 1626 1569 return ath12k_mac_setup_bcn_tmpl_ema(arvif); 1627 1570 } 1628 1571 1629 1572 bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_ahvif->vif, 1630 - &offs, 0); 1573 + &offs, tx_arvif->link_id); 1631 1574 if (!bcn) { 1632 1575 ath12k_warn(ab, "failed to get beacon template from mac80211\n"); 1633 1576 return -EPERM; ··· 1644 1573 ath12k_mac_set_arvif_ies(arvif, bcn, 0, NULL); 1645 1574 } else { 1646 1575 ath12k_mac_set_arvif_ies(arvif, bcn, 1647 - ahvif->vif->bss_conf.bssid_index, 1576 + link_conf->bssid_index, 1648 1577 &nontx_profile_found); 1649 1578 if (!nontx_profile_found) 1650 1579 ath12k_warn(ab, ··· 1715 1644 1716 1645 ahvif->aid = 0; 1717 1646 1718 - ether_addr_copy(arvif->bssid, info->bssid); 1647 + ether_addr_copy(arvif->bssid, info->addr); 1719 1648 1720 1649 params.vdev_id = arvif->vdev_id; 1721 1650 params.aid = ahvif->aid; ··· 1820 1749 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 1821 1750 struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 1822 1751 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1752 + struct ieee80211_bss_conf *bss_conf; 1823 1753 u32 aid; 1824 1754 1825 1755 lockdep_assert_wiphy(hw->wiphy); ··· 1830 1758 else 1831 1759 aid = sta->aid; 1832 1760 1833 - ether_addr_copy(arg->peer_mac, sta->addr); 1761 + ether_addr_copy(arg->peer_mac, arsta->addr); 1834 1762 arg->vdev_id = arvif->vdev_id; 1835 1763 arg->peer_associd = aid; 1836 1764 arg->auth_flag = true; 1837 1765 /* TODO: STA WAR in ath10k for listen interval required? */ 1838 1766 arg->peer_listen_intval = hw->conf.listen_interval; 1839 1767 arg->peer_nss = 1; 1840 - arg->peer_caps = vif->bss_conf.assoc_capability; 1768 + 1769 + bss_conf = ath12k_mac_get_link_bss_conf(arvif); 1770 + if (!bss_conf) { 1771 + ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc for vif %pM link %u\n", 1772 + vif->addr, arvif->link_id); 1773 + return; 1774 + } 1775 + 1776 + arg->peer_caps = bss_conf->assoc_capability; 1841 1777 } 1842 1778 1843 1779 static void ath12k_peer_assoc_h_crypto(struct ath12k *ar, ··· 1855 1775 { 1856 1776 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 1857 1777 struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 1858 - struct ieee80211_bss_conf *info = &vif->bss_conf; 1778 + struct ieee80211_bss_conf *info; 1859 1779 struct cfg80211_chan_def def; 1860 1780 struct cfg80211_bss *bss; 1861 1781 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); ··· 1863 1783 const u8 *wpaie = NULL; 1864 1784 1865 1785 lockdep_assert_wiphy(hw->wiphy); 1786 + 1787 + info = ath12k_mac_get_link_bss_conf(arvif); 1788 + if (!info) { 1789 + ath12k_warn(ar->ab, "unable to access bss link conf for peer assoc crypto for vif %pM link %u\n", 1790 + vif->addr, arvif->link_id); 1791 + return; 1792 + } 1866 1793 1867 1794 if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) 1868 1795 return; ··· 1926 1839 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 1927 1840 struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 1928 1841 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; 1842 + struct ieee80211_link_sta *link_sta; 1929 1843 struct cfg80211_chan_def def; 1930 1844 const struct ieee80211_supported_band *sband; 1931 1845 const struct ieee80211_rate *rates; ··· 1941 1853 if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) 1942 1854 return; 1943 1855 1856 + link_sta = ath12k_mac_get_link_sta(arsta); 1857 + if (!link_sta) { 1858 + ath12k_warn(ar->ab, "unable to access link sta in peer assoc rates for sta %pM link %u\n", 1859 + sta->addr, arsta->link_id); 1860 + return; 1861 + } 1862 + 1944 1863 band = def.chan->band; 1945 1864 sband = hw->wiphy->bands[band]; 1946 - ratemask = sta->deflink.supp_rates[band]; 1865 + ratemask = link_sta->supp_rates[band]; 1947 1866 ratemask &= arvif->bitrate_mask.control[band].legacy; 1948 1867 rates = sband->bitrates; 1949 1868 ··· 1997 1902 { 1998 1903 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 1999 1904 struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 2000 - const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; 1905 + const struct ieee80211_sta_ht_cap *ht_cap; 1906 + struct ieee80211_link_sta *link_sta; 2001 1907 struct cfg80211_chan_def def; 2002 1908 enum nl80211_band band; 2003 1909 const u8 *ht_mcs_mask; ··· 2011 1915 if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) 2012 1916 return; 2013 1917 1918 + link_sta = ath12k_mac_get_link_sta(arsta); 1919 + if (!link_sta) { 1920 + ath12k_warn(ar->ab, "unable to access link sta in peer assoc ht for sta %pM link %u\n", 1921 + sta->addr, arsta->link_id); 1922 + return; 1923 + } 1924 + 1925 + ht_cap = &link_sta->ht_cap; 2014 1926 if (!ht_cap->ht_supported) 2015 1927 return; 2016 1928 ··· 2042 1938 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) 2043 1939 arg->ldpc_flag = true; 2044 1940 2045 - if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) { 1941 + if (link_sta->bandwidth >= IEEE80211_STA_RX_BW_40) { 2046 1942 arg->bw_40 = true; 2047 1943 arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG; 2048 1944 } ··· 2092 1988 arg->peer_ht_rates.rates[i] = i; 2093 1989 } else { 2094 1990 arg->peer_ht_rates.num_rates = n; 2095 - arg->peer_nss = min(sta->deflink.rx_nss, max_nss); 1991 + arg->peer_nss = min(link_sta->rx_nss, max_nss); 2096 1992 } 2097 1993 2098 1994 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", ··· 2168 2064 { 2169 2065 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 2170 2066 struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 2171 - const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; 2067 + const struct ieee80211_sta_vht_cap *vht_cap; 2068 + struct ieee80211_link_sta *link_sta; 2172 2069 struct cfg80211_chan_def def; 2173 2070 enum nl80211_band band; 2174 2071 const u16 *vht_mcs_mask; ··· 2183 2078 if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) 2184 2079 return; 2185 2080 2081 + link_sta = ath12k_mac_get_link_sta(arsta); 2082 + if (!link_sta) { 2083 + ath12k_warn(ar->ab, "unable to access link sta in peer assoc vht for sta %pM link %u\n", 2084 + sta->addr, arsta->link_id); 2085 + return; 2086 + } 2087 + 2088 + vht_cap = &link_sta->vht_cap; 2186 2089 if (!vht_cap->vht_supported) 2187 2090 return; 2188 2091 ··· 2223 2110 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2224 2111 ampdu_factor)) - 1); 2225 2112 2226 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) 2113 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80) 2227 2114 arg->bw_80 = true; 2228 2115 2229 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) 2116 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) 2230 2117 arg->bw_160 = true; 2231 2118 2232 2119 /* Calculate peer NSS capability from VHT capabilities if STA ··· 2240 2127 vht_mcs_mask[i]) 2241 2128 max_nss = i + 1; 2242 2129 } 2243 - arg->peer_nss = min(sta->deflink.rx_nss, max_nss); 2130 + arg->peer_nss = min(link_sta->rx_nss, max_nss); 2244 2131 arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); 2245 2132 arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); 2246 2133 arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest); ··· 2263 2150 arg->tx_max_mcs_nss = 0xFF; 2264 2151 2265 2152 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", 2266 - sta->addr, arg->peer_max_mpdu, arg->peer_flags); 2153 + arsta->addr, arg->peer_max_mpdu, arg->peer_flags); 2267 2154 2268 2155 /* TODO: rxnss_override */ 2269 2156 } ··· 2275 2162 { 2276 2163 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 2277 2164 struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 2278 - const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; 2165 + const struct ieee80211_sta_he_cap *he_cap; 2166 + struct ieee80211_bss_conf *link_conf; 2167 + struct ieee80211_link_sta *link_sta; 2279 2168 int i; 2280 2169 u8 ampdu_factor, max_nss; 2281 2170 u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED; ··· 2286 2171 bool support_160; 2287 2172 u16 v; 2288 2173 2174 + link_conf = ath12k_mac_get_link_bss_conf(arvif); 2175 + if (!link_conf) { 2176 + ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc he for vif %pM link %u", 2177 + vif->addr, arvif->link_id); 2178 + return; 2179 + } 2180 + 2181 + link_sta = ath12k_mac_get_link_sta(arsta); 2182 + if (!link_sta) { 2183 + ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n", 2184 + sta->addr, arsta->link_id); 2185 + return; 2186 + } 2187 + 2188 + he_cap = &link_sta->he_cap; 2289 2189 if (!he_cap->has_he) 2290 2190 return; 2291 2191 ··· 2338 2208 else 2339 2209 max_nss = rx_mcs_80; 2340 2210 2341 - arg->peer_nss = min(sta->deflink.rx_nss, max_nss); 2211 + arg->peer_nss = min(link_sta->rx_nss, max_nss); 2342 2212 2343 2213 memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info, 2344 2214 sizeof(he_cap->he_cap_elem.mac_cap_info)); 2345 2215 memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info, 2346 2216 sizeof(he_cap->he_cap_elem.phy_cap_info)); 2347 - arg->peer_he_ops = vif->bss_conf.he_oper.params; 2217 + arg->peer_he_ops = link_conf->he_oper.params; 2348 2218 2349 2219 /* the top most byte is used to indicate BSS color info */ 2350 2220 arg->peer_he_ops &= 0xffffff; ··· 2365 2235 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); 2366 2236 2367 2237 if (ampdu_factor) { 2368 - if (sta->deflink.vht_cap.vht_supported) 2238 + if (link_sta->vht_cap.vht_supported) 2369 2239 arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR + 2370 2240 ampdu_factor)) - 1; 2371 - else if (sta->deflink.ht_cap.ht_supported) 2241 + else if (link_sta->ht_cap.ht_supported) 2372 2242 arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR + 2373 2243 ampdu_factor)) - 1; 2374 2244 } ··· 2409 2279 if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ) 2410 2280 arg->twt_requester = true; 2411 2281 2412 - switch (sta->deflink.bandwidth) { 2282 + switch (link_sta->bandwidth) { 2413 2283 case IEEE80211_STA_RX_BW_160: 2414 2284 if (he_cap->he_cap_elem.phy_cap_info[0] & 2415 2285 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) { ··· 2449 2319 { 2450 2320 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 2451 2321 struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 2452 - const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; 2322 + const struct ieee80211_sta_he_cap *he_cap; 2323 + struct ieee80211_link_sta *link_sta; 2453 2324 struct cfg80211_chan_def def; 2454 2325 enum nl80211_band band; 2455 2326 u8 ampdu_factor, mpdu_density; ··· 2460 2329 2461 2330 band = def.chan->band; 2462 2331 2463 - if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa) 2332 + link_sta = ath12k_mac_get_link_sta(arsta); 2333 + if (!link_sta) { 2334 + ath12k_warn(ar->ab, "unable to access link sta in peer assoc he 6ghz for sta %pM link %u\n", 2335 + sta->addr, arsta->link_id); 2336 + return; 2337 + } 2338 + 2339 + he_cap = &link_sta->he_cap; 2340 + 2341 + if (!arg->he_flag || band != NL80211_BAND_6GHZ || !link_sta->he_6ghz_capa.capa) 2464 2342 return; 2465 2343 2466 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2344 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) 2467 2345 arg->bw_40 = true; 2468 2346 2469 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) 2347 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80) 2470 2348 arg->bw_80 = true; 2471 2349 2472 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) 2350 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) 2473 2351 arg->bw_160 = true; 2474 2352 2475 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320) 2353 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320) 2476 2354 arg->bw_320 = true; 2477 2355 2478 - arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa); 2356 + arg->peer_he_caps_6ghz = le16_to_cpu(link_sta->he_6ghz_capa.capa); 2479 2357 2480 2358 mpdu_density = u32_get_bits(arg->peer_he_caps_6ghz, 2481 2359 IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); ··· 2528 2388 struct ath12k_wmi_peer_assoc_arg *arg) 2529 2389 { 2530 2390 struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 2531 - const struct ieee80211_he_6ghz_capa *he_6ghz_capa = &sta->deflink.he_6ghz_capa; 2532 - const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; 2391 + const struct ieee80211_he_6ghz_capa *he_6ghz_capa; 2392 + struct ath12k_link_vif *arvif = arsta->arvif; 2393 + const struct ieee80211_sta_ht_cap *ht_cap; 2394 + struct ieee80211_link_sta *link_sta; 2395 + struct ath12k *ar = arvif->ar; 2533 2396 int smps; 2397 + 2398 + link_sta = ath12k_mac_get_link_sta(arsta); 2399 + if (!link_sta) { 2400 + ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n", 2401 + sta->addr, arsta->link_id); 2402 + return; 2403 + } 2404 + 2405 + he_6ghz_capa = &link_sta->he_6ghz_capa; 2406 + ht_cap = &link_sta->ht_cap; 2534 2407 2535 2408 if (!ht_cap->ht_supported && !he_6ghz_capa->capa) 2536 2409 return; ··· 2599 2446 } 2600 2447 2601 2448 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM qos %d\n", 2602 - sta->addr, arg->qos_flag); 2449 + arsta->addr, arg->qos_flag); 2603 2450 } 2604 2451 2605 2452 static int ath12k_peer_assoc_qos_ap(struct ath12k *ar, ··· 2639 2486 2640 2487 arg.param = WMI_AP_PS_PEER_PARAM_UAPSD; 2641 2488 arg.value = uapsd; 2642 - ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); 2489 + ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg); 2643 2490 if (ret) 2644 2491 goto err; 2645 2492 2646 2493 arg.param = WMI_AP_PS_PEER_PARAM_MAX_SP; 2647 2494 arg.value = max_sp; 2648 - ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); 2495 + ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg); 2649 2496 if (ret) 2650 2497 goto err; 2651 2498 2652 2499 /* TODO: revisit during testing */ 2653 2500 arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE; 2654 2501 arg.value = DISABLE_SIFS_RESPONSE_TRIGGER; 2655 - ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); 2502 + ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg); 2656 2503 if (ret) 2657 2504 goto err; 2658 2505 2659 2506 arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD; 2660 2507 arg.value = DISABLE_SIFS_RESPONSE_TRIGGER; 2661 - ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); 2508 + ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg); 2662 2509 if (ret) 2663 2510 goto err; 2664 2511 ··· 2670 2517 return ret; 2671 2518 } 2672 2519 2673 - static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) 2520 + static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_link_sta *sta) 2674 2521 { 2675 - return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >> 2522 + return sta->supp_rates[NL80211_BAND_2GHZ] >> 2676 2523 ATH12K_MAC_FIRST_OFDM_RATE_IDX; 2677 2524 } 2678 2525 2679 2526 static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar, 2680 - struct ieee80211_sta *sta) 2527 + struct ieee80211_link_sta *link_sta) 2681 2528 { 2682 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { 2683 - switch (sta->deflink.vht_cap.cap & 2529 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) { 2530 + switch (link_sta->vht_cap.cap & 2684 2531 IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 2685 2532 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: 2686 2533 return MODE_11AC_VHT160; ··· 2692 2539 } 2693 2540 } 2694 2541 2695 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) 2542 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80) 2696 2543 return MODE_11AC_VHT80; 2697 2544 2698 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2545 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) 2699 2546 return MODE_11AC_VHT40; 2700 2547 2701 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) 2548 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20) 2702 2549 return MODE_11AC_VHT20; 2703 2550 2704 2551 return MODE_UNKNOWN; 2705 2552 } 2706 2553 2707 2554 static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar, 2708 - struct ieee80211_sta *sta) 2555 + struct ieee80211_link_sta *link_sta) 2709 2556 { 2710 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { 2711 - if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & 2557 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) { 2558 + if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] & 2712 2559 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) 2713 2560 return MODE_11AX_HE160; 2714 - else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & 2561 + else if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] & 2715 2562 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) 2716 2563 return MODE_11AX_HE80_80; 2717 2564 /* not sure if this is a valid case? */ 2718 2565 return MODE_11AX_HE160; 2719 2566 } 2720 2567 2721 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) 2568 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80) 2722 2569 return MODE_11AX_HE80; 2723 2570 2724 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2571 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) 2725 2572 return MODE_11AX_HE40; 2726 2573 2727 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) 2574 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20) 2728 2575 return MODE_11AX_HE20; 2729 2576 2730 2577 return MODE_UNKNOWN; 2731 2578 } 2732 2579 2733 2580 static enum wmi_phy_mode ath12k_mac_get_phymode_eht(struct ath12k *ar, 2734 - struct ieee80211_sta *sta) 2581 + struct ieee80211_link_sta *link_sta) 2735 2582 { 2736 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320) 2737 - if (sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[0] & 2583 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320) 2584 + if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[0] & 2738 2585 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) 2739 2586 return MODE_11BE_EHT320; 2740 2587 2741 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { 2742 - if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & 2588 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) { 2589 + if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] & 2743 2590 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) 2744 2591 return MODE_11BE_EHT160; 2745 2592 2746 - if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & 2593 + if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] & 2747 2594 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) 2748 2595 return MODE_11BE_EHT80_80; 2749 2596 2750 2597 ath12k_warn(ar->ab, "invalid EHT PHY capability info for 160 Mhz: %d\n", 2751 - sta->deflink.he_cap.he_cap_elem.phy_cap_info[0]); 2598 + link_sta->he_cap.he_cap_elem.phy_cap_info[0]); 2752 2599 2753 2600 return MODE_11BE_EHT160; 2754 2601 } 2755 2602 2756 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) 2603 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80) 2757 2604 return MODE_11BE_EHT80; 2758 2605 2759 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2606 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) 2760 2607 return MODE_11BE_EHT40; 2761 2608 2762 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) 2609 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20) 2763 2610 return MODE_11BE_EHT20; 2764 2611 2765 2612 return MODE_UNKNOWN; ··· 2770 2617 struct ath12k_link_sta *arsta, 2771 2618 struct ath12k_wmi_peer_assoc_arg *arg) 2772 2619 { 2620 + struct ieee80211_link_sta *link_sta; 2773 2621 struct cfg80211_chan_def def; 2774 2622 enum nl80211_band band; 2775 2623 const u8 *ht_mcs_mask; ··· 2789 2635 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2790 2636 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2791 2637 2638 + link_sta = ath12k_mac_get_link_sta(arsta); 2639 + if (!link_sta) { 2640 + ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n", 2641 + sta->addr, arsta->link_id); 2642 + return; 2643 + } 2644 + 2792 2645 switch (band) { 2793 2646 case NL80211_BAND_2GHZ: 2794 - if (sta->deflink.eht_cap.has_eht) { 2795 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2647 + if (link_sta->eht_cap.has_eht) { 2648 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) 2796 2649 phymode = MODE_11BE_EHT40_2G; 2797 2650 else 2798 2651 phymode = MODE_11BE_EHT20_2G; 2799 - } else if (sta->deflink.he_cap.has_he) { 2800 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) 2652 + } else if (link_sta->he_cap.has_he) { 2653 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80) 2801 2654 phymode = MODE_11AX_HE80_2G; 2802 - else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2655 + else if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) 2803 2656 phymode = MODE_11AX_HE40_2G; 2804 2657 else 2805 2658 phymode = MODE_11AX_HE20_2G; 2806 - } else if (sta->deflink.vht_cap.vht_supported && 2659 + } else if (link_sta->vht_cap.vht_supported && 2807 2660 !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2808 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2661 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) 2809 2662 phymode = MODE_11AC_VHT40; 2810 2663 else 2811 2664 phymode = MODE_11AC_VHT20; 2812 - } else if (sta->deflink.ht_cap.ht_supported && 2665 + } else if (link_sta->ht_cap.ht_supported && 2813 2666 !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2814 - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2667 + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40) 2815 2668 phymode = MODE_11NG_HT40; 2816 2669 else 2817 2670 phymode = MODE_11NG_HT20; 2818 - } else if (ath12k_mac_sta_has_ofdm_only(sta)) { 2671 + } else if (ath12k_mac_sta_has_ofdm_only(link_sta)) { 2819 2672 phymode = MODE_11G; 2820 2673 } else { 2821 2674 phymode = MODE_11B; ··· 2831 2670 case NL80211_BAND_5GHZ: 2832 2671 case NL80211_BAND_6GHZ: 2833 2672 /* Check EHT first */ 2834 - if (sta->deflink.eht_cap.has_eht) { 2835 - phymode = ath12k_mac_get_phymode_eht(ar, sta); 2836 - } else if (sta->deflink.he_cap.has_he) { 2837 - phymode = ath12k_mac_get_phymode_he(ar, sta); 2838 - } else if (sta->deflink.vht_cap.vht_supported && 2673 + if (link_sta->eht_cap.has_eht) { 2674 + phymode = ath12k_mac_get_phymode_eht(ar, link_sta); 2675 + } else if (link_sta->he_cap.has_he) { 2676 + phymode = ath12k_mac_get_phymode_he(ar, link_sta); 2677 + } else if (link_sta->vht_cap.vht_supported && 2839 2678 !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2840 - phymode = ath12k_mac_get_phymode_vht(ar, sta); 2841 - } else if (sta->deflink.ht_cap.ht_supported && 2679 + phymode = ath12k_mac_get_phymode_vht(ar, link_sta); 2680 + } else if (link_sta->ht_cap.ht_supported && 2842 2681 !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2843 - if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) 2682 + if (link_sta->bandwidth >= IEEE80211_STA_RX_BW_40) 2844 2683 phymode = MODE_11NA_HT40; 2845 2684 else 2846 2685 phymode = MODE_11NA_HT20; ··· 2853 2692 } 2854 2693 2855 2694 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM phymode %s\n", 2856 - sta->addr, ath12k_mac_phymode_str(phymode)); 2695 + arsta->addr, ath12k_mac_phymode_str(phymode)); 2857 2696 2858 2697 arg->peer_phymode = phymode; 2859 2698 WARN_ON(phymode == MODE_UNKNOWN); ··· 2928 2767 struct ath12k_wmi_peer_assoc_arg *arg) 2929 2768 { 2930 2769 struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 2931 - const struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap; 2932 - const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; 2933 2770 const struct ieee80211_eht_mcs_nss_supp_20mhz_only *bw_20; 2934 2771 const struct ieee80211_eht_mcs_nss_supp_bw *bw; 2772 + const struct ieee80211_sta_eht_cap *eht_cap; 2773 + const struct ieee80211_sta_he_cap *he_cap; 2774 + struct ieee80211_link_sta *link_sta; 2935 2775 u32 *rx_mcs, *tx_mcs; 2936 2776 2937 2777 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 2938 2778 2939 - if (!sta->deflink.he_cap.has_he || !eht_cap->has_eht) 2779 + link_sta = ath12k_mac_get_link_sta(arsta); 2780 + if (!link_sta) { 2781 + ath12k_warn(ar->ab, "unable to access link sta in peer assoc eht for sta %pM link %u\n", 2782 + sta->addr, arsta->link_id); 2783 + return; 2784 + } 2785 + 2786 + eht_cap = &link_sta->eht_cap; 2787 + he_cap = &link_sta->he_cap; 2788 + if (!he_cap->has_he || !eht_cap->has_eht) 2940 2789 return; 2941 2790 2942 2791 arg->eht_flag = true; ··· 2965 2794 rx_mcs = arg->peer_eht_rx_mcs_set; 2966 2795 tx_mcs = arg->peer_eht_tx_mcs_set; 2967 2796 2968 - switch (sta->deflink.bandwidth) { 2797 + switch (link_sta->bandwidth) { 2969 2798 case IEEE80211_STA_RX_BW_320: 2970 2799 bw = &eht_cap->eht_mcs_nss_supp.bw._320; 2971 2800 ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss, ··· 3017 2846 arg->punct_bitmap = ~arvif->punct_bitmap; 3018 2847 } 3019 2848 2849 + static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta, 2850 + struct ath12k_wmi_peer_assoc_arg *arg) 2851 + { 2852 + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 2853 + struct peer_assoc_mlo_params *ml = &arg->ml; 2854 + struct ath12k_sta *ahsta = arsta->ahsta; 2855 + struct ath12k_link_sta *arsta_p; 2856 + struct ath12k_link_vif *arvif; 2857 + unsigned long links; 2858 + u8 link_id; 2859 + int i; 2860 + 2861 + if (!sta->mlo || ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) 2862 + return; 2863 + 2864 + ml->enabled = true; 2865 + ml->assoc_link = arsta->is_assoc_link; 2866 + 2867 + /* For now considering the primary umac based on assoc link */ 2868 + ml->primary_umac = arsta->is_assoc_link; 2869 + ml->peer_id_valid = true; 2870 + ml->logical_link_idx_valid = true; 2871 + 2872 + ether_addr_copy(ml->mld_addr, sta->addr); 2873 + ml->logical_link_idx = arsta->link_idx; 2874 + ml->ml_peer_id = ahsta->ml_peer_id; 2875 + ml->ieee_link_id = arsta->link_id; 2876 + ml->num_partner_links = 0; 2877 + links = ahsta->links_map; 2878 + 2879 + rcu_read_lock(); 2880 + 2881 + i = 0; 2882 + 2883 + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { 2884 + if (i >= ATH12K_WMI_MLO_MAX_LINKS) 2885 + break; 2886 + 2887 + arsta_p = rcu_dereference(ahsta->link[link_id]); 2888 + arvif = rcu_dereference(ahsta->ahvif->link[link_id]); 2889 + 2890 + if (arsta_p == arsta) 2891 + continue; 2892 + 2893 + if (!arvif->is_started) 2894 + continue; 2895 + 2896 + ml->partner_info[i].vdev_id = arvif->vdev_id; 2897 + ml->partner_info[i].hw_link_id = arvif->ar->pdev->hw_link_id; 2898 + ml->partner_info[i].assoc_link = arsta_p->is_assoc_link; 2899 + ml->partner_info[i].primary_umac = arsta_p->is_assoc_link; 2900 + ml->partner_info[i].logical_link_idx_valid = true; 2901 + ml->partner_info[i].logical_link_idx = arsta_p->link_idx; 2902 + ml->num_partner_links++; 2903 + 2904 + i++; 2905 + } 2906 + 2907 + rcu_read_unlock(); 2908 + } 2909 + 3020 2910 static void ath12k_peer_assoc_prepare(struct ath12k *ar, 3021 2911 struct ath12k_link_vif *arvif, 3022 2912 struct ath12k_link_sta *arsta, ··· 3102 2870 ath12k_peer_assoc_h_qos(ar, arvif, arsta, arg); 3103 2871 ath12k_peer_assoc_h_phymode(ar, arvif, arsta, arg); 3104 2872 ath12k_peer_assoc_h_smps(arsta, arg); 2873 + ath12k_peer_assoc_h_mlo(arsta, arg); 3105 2874 3106 2875 /* TODO: amsdu_disable req? */ 3107 2876 } ··· 3133 2900 struct ath12k_vif *ahvif = arvif->ahvif; 3134 2901 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); 3135 2902 struct ath12k_wmi_vdev_up_params params = {}; 3136 - struct ath12k_wmi_peer_assoc_arg peer_arg; 2903 + struct ath12k_wmi_peer_assoc_arg peer_arg = {}; 2904 + struct ieee80211_link_sta *link_sta; 2905 + u8 link_id = bss_conf->link_id; 3137 2906 struct ath12k_link_sta *arsta; 3138 2907 struct ieee80211_sta *ap_sta; 3139 2908 struct ath12k_sta *ahsta; ··· 3145 2910 3146 2911 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 3147 2912 3148 - ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", 3149 - arvif->vdev_id, arvif->bssid, ahvif->aid); 2913 + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 2914 + "mac vdev %i link id %u assoc bssid %pM aid %d\n", 2915 + arvif->vdev_id, link_id, arvif->bssid, ahvif->aid); 3150 2916 3151 2917 rcu_read_lock(); 3152 2918 3153 - ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 2919 + /* During ML connection, cfg.ap_addr has the MLD address. For 2920 + * non-ML connection, it has the BSSID. 2921 + */ 2922 + ap_sta = ieee80211_find_sta(vif, vif->cfg.ap_addr); 3154 2923 if (!ap_sta) { 3155 2924 ath12k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n", 3156 - bss_conf->bssid, arvif->vdev_id); 2925 + vif->cfg.ap_addr, arvif->vdev_id); 3157 2926 rcu_read_unlock(); 3158 2927 return; 3159 2928 } 3160 2929 3161 2930 ahsta = ath12k_sta_to_ahsta(ap_sta); 3162 - arsta = &ahsta->deflink; 3163 2931 2932 + arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 2933 + ahsta->link[link_id]); 3164 2934 if (WARN_ON(!arsta)) { 2935 + rcu_read_unlock(); 2936 + return; 2937 + } 2938 + 2939 + link_sta = ath12k_mac_get_link_sta(arsta); 2940 + if (WARN_ON(!link_sta)) { 3165 2941 rcu_read_unlock(); 3166 2942 return; 3167 2943 } ··· 3195 2949 } 3196 2950 3197 2951 ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid, 3198 - &ap_sta->deflink.ht_cap, 3199 - &ap_sta->deflink.he_6ghz_capa); 2952 + &link_sta->ht_cap, &link_sta->he_6ghz_capa); 3200 2953 if (ret) { 3201 2954 ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", 3202 2955 arvif->vdev_id, ret); ··· 3303 3058 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 3304 3059 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 3305 3060 const struct ieee80211_supported_band *sband; 3061 + struct ieee80211_bss_conf *bss_conf; 3306 3062 u8 basic_rate_idx; 3307 3063 int hw_rate_code; 3308 3064 u32 vdev_param; ··· 3312 3066 3313 3067 lockdep_assert_wiphy(hw->wiphy); 3314 3068 3069 + bss_conf = ath12k_mac_get_link_bss_conf(arvif); 3070 + if (!bss_conf) { 3071 + ath12k_warn(ar->ab, "unable to access bss link conf in mgmt rate calc for vif %pM link %u\n", 3072 + vif->addr, arvif->link_id); 3073 + return; 3074 + } 3075 + 3315 3076 sband = hw->wiphy->bands[def->chan->band]; 3316 - basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; 3077 + basic_rate_idx = ffs(bss_conf->basic_rates) - 1; 3317 3078 bitrate = sband->bitrates[basic_rate_idx].bitrate; 3318 3079 3319 3080 hw_rate_code = ath12k_mac_get_rate_hw_value(bitrate); ··· 3404 3151 { 3405 3152 struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 3406 3153 unsigned long links = ahvif->links_map; 3154 + struct ieee80211_bss_conf *info; 3407 3155 struct ath12k_link_vif *arvif; 3408 3156 struct ath12k *ar; 3409 3157 u8 link_id; ··· 3425 3171 3426 3172 ar = arvif->ar; 3427 3173 3428 - if (vif->cfg.assoc) 3429 - ath12k_bss_assoc(ar, arvif, &vif->bss_conf); 3430 - else 3174 + if (vif->cfg.assoc) { 3175 + info = ath12k_mac_get_link_bss_conf(arvif); 3176 + if (!info) 3177 + continue; 3178 + 3179 + ath12k_bss_assoc(ar, arvif, info); 3180 + } else { 3431 3181 ath12k_bss_disassoc(ar, arvif); 3182 + } 3432 3183 } 3433 3184 } 3434 3185 } ··· 3444 3185 struct ieee80211_vif *vif = arvif->ahvif->vif; 3445 3186 struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf; 3446 3187 enum wmi_sta_powersave_param param; 3188 + struct ieee80211_bss_conf *info; 3447 3189 enum wmi_sta_ps_mode psmode; 3448 3190 int ret; 3449 3191 int timeout; ··· 3462 3202 3463 3203 timeout = conf->dynamic_ps_timeout; 3464 3204 if (timeout == 0) { 3205 + info = ath12k_mac_get_link_bss_conf(arvif); 3206 + if (!info) { 3207 + ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n", 3208 + vif->addr, arvif->link_id); 3209 + return; 3210 + } 3211 + 3465 3212 /* firmware doesn't like 0 */ 3466 - timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000; 3213 + timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000; 3467 3214 } 3468 3215 3469 3216 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, ··· 3581 3314 if (changed & BSS_CHANGED_BEACON_ENABLED) { 3582 3315 ath12k_control_beaconing(arvif, info); 3583 3316 3584 - if (arvif->is_up && vif->bss_conf.he_support && 3585 - vif->bss_conf.he_oper.params) { 3317 + if (arvif->is_up && info->he_support && 3318 + info->he_oper.params) { 3586 3319 /* TODO: Extend to support 1024 BA Bitmap size */ 3587 3320 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 3588 3321 WMI_VDEV_PARAM_BA_MODE, ··· 3593 3326 arvif->vdev_id); 3594 3327 3595 3328 param_id = WMI_VDEV_PARAM_HEOPS_0_31; 3596 - param_value = vif->bss_conf.he_oper.params; 3329 + param_value = info->he_oper.params; 3597 3330 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 3598 3331 param_id, param_value); 3599 3332 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, ··· 3685 3418 if (changed & BSS_CHANGED_MCAST_RATE && 3686 3419 !ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)) { 3687 3420 band = def.chan->band; 3688 - mcast_rate = vif->bss_conf.mcast_rate[band]; 3421 + mcast_rate = info->mcast_rate[band]; 3689 3422 3690 3423 if (mcast_rate > 0) 3691 3424 rateidx = mcast_rate - 1; 3692 3425 else 3693 - rateidx = ffs(vif->bss_conf.basic_rates) - 1; 3426 + rateidx = ffs(info->basic_rates) - 1; 3694 3427 3695 3428 if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) 3696 3429 rateidx += ATH12K_MAC_FIRST_OFDM_RATE_IDX; ··· 3804 3537 3805 3538 static void ath12k_ahvif_put_link_cache(struct ath12k_vif *ahvif, u8 link_id) 3806 3539 { 3540 + if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) 3541 + return; 3542 + 3807 3543 ath12k_ahvif_put_link_key_cache(ahvif->cache[link_id]); 3808 3544 kfree(ahvif->cache[link_id]); 3809 3545 ahvif->cache[link_id] = NULL; ··· 3867 3597 arvif = &ahvif->deflink; 3868 3598 } else { 3869 3599 /* If this is the first link arvif being created for an ML VIF 3870 - * use the preallocated deflink memory 3600 + * use the preallocated deflink memory except for scan arvifs 3871 3601 */ 3872 - if (!ahvif->links_map) { 3602 + if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) { 3873 3603 arvif = &ahvif->deflink; 3874 3604 } else { 3875 3605 arvif = (struct ath12k_link_vif *) ··· 4169 3899 return link_id; 4170 3900 } 4171 3901 4172 - /* input ar is not assigned to any of the links, use link id 4173 - * 0 for scan vdev creation. 3902 + /* input ar is not assigned to any of the links of ML VIF, use scan 3903 + * link (15) for scan vdev creation. 4174 3904 */ 4175 - return 0; 3905 + return ATH12K_DEFAULT_SCAN_LINK; 4176 3906 } 4177 3907 4178 3908 static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, ··· 4203 3933 4204 3934 /* check if any of the links of ML VIF is already started on 4205 3935 * radio(ar) correpsondig to given scan frequency and use it, 4206 - * if not use deflink(link 0) for scan purpose. 3936 + * if not use scan link (link 15) for scan purpose. 4207 3937 */ 4208 3938 link_id = ath12k_mac_find_link_id_by_ar(ahvif, ar); 4209 3939 arvif = ath12k_mac_assign_link_vif(ah, vif, link_id); ··· 4313 4043 spin_unlock_bh(&ar->data_lock); 4314 4044 } 4315 4045 4046 + /* As per cfg80211/mac80211 scan design, it allows only one 4047 + * scan at a time. Hence last_scan link id is used for 4048 + * tracking the link id on which the scan is been done on 4049 + * this vif. 4050 + */ 4051 + ahvif->last_scan_link = arvif->link_id; 4052 + 4316 4053 /* Add a margin to account for event/command processing */ 4317 4054 ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout, 4318 4055 msecs_to_jiffies(arg->max_scan_time + ··· 4339 4062 struct ieee80211_vif *vif) 4340 4063 { 4341 4064 struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 4065 + u16 link_id = ahvif->last_scan_link; 4342 4066 struct ath12k_link_vif *arvif; 4343 4067 struct ath12k *ar; 4344 4068 4345 4069 lockdep_assert_wiphy(hw->wiphy); 4346 4070 4347 - arvif = &ahvif->deflink; 4348 - 4349 - if (!arvif->is_created) 4071 + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); 4072 + if (!arvif || !arvif->is_created) 4350 4073 return; 4351 4074 4352 4075 ar = arvif->ar; ··· 4480 4203 { 4481 4204 struct ath12k_vif *ahvif = arvif->ahvif; 4482 4205 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); 4206 + struct ieee80211_bss_conf *link_conf; 4483 4207 struct ieee80211_sta *sta = NULL; 4484 4208 struct ath12k_base *ab = ar->ab; 4485 4209 struct ath12k_peer *peer; ··· 4497 4219 if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags)) 4498 4220 return 1; 4499 4221 4222 + link_conf = ath12k_mac_get_link_bss_conf(arvif); 4223 + if (!link_conf) { 4224 + ath12k_warn(ab, "unable to access bss link conf in set key for vif %pM link %u\n", 4225 + vif->addr, arvif->link_id); 4226 + return -ENOLINK; 4227 + } 4228 + 4500 4229 if (sta) 4501 - peer_addr = sta->addr; 4230 + peer_addr = arsta->addr; 4502 4231 else if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) 4503 - peer_addr = vif->bss_conf.bssid; 4232 + peer_addr = link_conf->bssid; 4504 4233 else 4505 - peer_addr = vif->addr; 4234 + peer_addr = link_conf->addr; 4506 4235 4507 4236 key->hw_key_idx = key->keyidx; 4508 4237 ··· 4657 4372 4658 4373 if (sta) { 4659 4374 ahsta = ath12k_sta_to_ahsta(sta); 4375 + 4660 4376 /* For an ML STA Pairwise key is same for all associated link Stations, 4661 4377 * hence do set key for all link STAs which are active. 4662 4378 */ ··· 4680 4394 if (ret) 4681 4395 break; 4682 4396 } 4683 - } else { 4684 - arsta = &ahsta->deflink; 4685 - arvif = arsta->arvif; 4686 - if (WARN_ON(!arvif)) { 4687 - ret = -EINVAL; 4688 - goto out; 4689 - } 4690 4397 4691 - ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, arsta, key); 4692 - } 4693 - } else { 4694 - if (key->link_id >= 0 && key->link_id < IEEE80211_MLD_MAX_NUM_LINKS) { 4695 - link_id = key->link_id; 4696 - arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); 4697 - } else { 4698 - link_id = 0; 4699 - arvif = &ahvif->deflink; 4398 + return 0; 4700 4399 } 4701 4400 4702 - if (!arvif || !arvif->is_created) { 4703 - cache = ath12k_ahvif_get_link_cache(ahvif, link_id); 4704 - if (!cache) 4705 - return -ENOSPC; 4401 + arsta = &ahsta->deflink; 4402 + arvif = arsta->arvif; 4403 + if (WARN_ON(!arvif)) 4404 + return -EINVAL; 4706 4405 4707 - ret = ath12k_mac_update_key_cache(cache, cmd, sta, key); 4708 - 4406 + ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, arsta, key); 4407 + if (ret) 4709 4408 return ret; 4710 - } 4711 4409 4712 - ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, NULL, key); 4410 + return 0; 4713 4411 } 4714 4412 4715 - out: 4413 + if (key->link_id >= 0 && key->link_id < IEEE80211_MLD_MAX_NUM_LINKS) { 4414 + link_id = key->link_id; 4415 + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); 4416 + } else { 4417 + link_id = 0; 4418 + arvif = &ahvif->deflink; 4419 + } 4716 4420 4717 - return ret; 4421 + if (!arvif || !arvif->is_created) { 4422 + cache = ath12k_ahvif_get_link_cache(ahvif, link_id); 4423 + if (!cache) 4424 + return -ENOSPC; 4425 + 4426 + ret = ath12k_mac_update_key_cache(cache, cmd, sta, key); 4427 + if (ret) 4428 + return ret; 4429 + 4430 + return 0; 4431 + } 4432 + 4433 + ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, NULL, key); 4434 + if (ret) 4435 + return ret; 4436 + 4437 + return 0; 4718 4438 } 4719 4439 4720 4440 static int ··· 4743 4451 const struct cfg80211_bitrate_mask *mask, 4744 4452 enum nl80211_band band) 4745 4453 { 4746 - struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 4747 4454 struct ath12k *ar = arvif->ar; 4748 4455 u8 vht_rate, nss; 4749 4456 u32 rate_code; ··· 4761 4470 4762 4471 if (!nss) { 4763 4472 ath12k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM", 4764 - sta->addr); 4473 + arsta->addr); 4765 4474 return -EINVAL; 4766 4475 } 4767 4476 4768 4477 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 4769 4478 "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates", 4770 - sta->addr); 4479 + arsta->addr); 4771 4480 4772 4481 rate_code = ATH12K_HW_RATE_CODE(vht_rate, nss - 1, 4773 4482 WMI_RATE_PREAMBLE_VHT); 4774 - ret = ath12k_wmi_set_peer_param(ar, sta->addr, 4483 + ret = ath12k_wmi_set_peer_param(ar, arsta->addr, 4775 4484 arvif->vdev_id, 4776 4485 WMI_PEER_PARAM_FIXED_RATE, 4777 4486 rate_code); 4778 4487 if (ret) 4779 4488 ath12k_warn(ar->ab, 4780 4489 "failed to update STA %pM Fixed Rate %d: %d\n", 4781 - sta->addr, rate_code, ret); 4490 + arsta->addr, rate_code, ret); 4782 4491 4783 4492 return ret; 4784 4493 } 4785 4494 4786 - static int ath12k_station_assoc(struct ath12k *ar, 4787 - struct ath12k_link_vif *arvif, 4788 - struct ath12k_link_sta *arsta, 4789 - bool reassoc) 4495 + static int ath12k_mac_station_assoc(struct ath12k *ar, 4496 + struct ath12k_link_vif *arvif, 4497 + struct ath12k_link_sta *arsta, 4498 + bool reassoc) 4790 4499 { 4791 4500 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 4792 4501 struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 4793 4502 struct ath12k_wmi_peer_assoc_arg peer_arg; 4503 + struct ieee80211_link_sta *link_sta; 4794 4504 int ret; 4795 4505 struct cfg80211_chan_def def; 4796 4506 enum nl80211_band band; 4797 4507 struct cfg80211_bitrate_mask *mask; 4798 4508 u8 num_vht_rates; 4509 + u8 link_id = arvif->link_id; 4799 4510 4800 4511 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 4801 4512 4802 4513 if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) 4803 4514 return -EPERM; 4515 + 4516 + if (WARN_ON(!rcu_access_pointer(sta->link[link_id]))) 4517 + return -EINVAL; 4804 4518 4805 4519 band = def.chan->band; 4806 4520 mask = &arvif->bitrate_mask; ··· 4820 4524 ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); 4821 4525 if (ret) { 4822 4526 ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", 4823 - sta->addr, arvif->vdev_id, ret); 4527 + arsta->addr, arvif->vdev_id, ret); 4824 4528 return ret; 4825 4529 } 4826 4530 4827 4531 if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) { 4828 4532 ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", 4829 - sta->addr, arvif->vdev_id); 4533 + arsta->addr, arvif->vdev_id); 4830 4534 return -ETIMEDOUT; 4831 4535 } 4832 4536 ··· 4837 4541 * fixed param. 4838 4542 * Note that all other rates and NSS will be disabled for this peer. 4839 4543 */ 4840 - if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { 4544 + link_sta = ath12k_mac_get_link_sta(arsta); 4545 + if (!link_sta) { 4546 + ath12k_warn(ar->ab, "unable to access link sta in station assoc\n"); 4547 + return -EINVAL; 4548 + } 4549 + 4550 + if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) { 4841 4551 ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask, 4842 4552 band); 4843 4553 if (ret) ··· 4856 4554 if (reassoc) 4857 4555 return 0; 4858 4556 4859 - ret = ath12k_setup_peer_smps(ar, arvif, sta->addr, 4860 - &sta->deflink.ht_cap, 4861 - &sta->deflink.he_6ghz_capa); 4557 + ret = ath12k_setup_peer_smps(ar, arvif, arsta->addr, 4558 + &link_sta->ht_cap, &link_sta->he_6ghz_capa); 4862 4559 if (ret) { 4863 4560 ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", 4864 4561 arvif->vdev_id, ret); ··· 4875 4574 ret = ath12k_peer_assoc_qos_ap(ar, arvif, arsta); 4876 4575 if (ret) { 4877 4576 ath12k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n", 4878 - sta->addr, arvif->vdev_id, ret); 4577 + arsta->addr, arvif->vdev_id, ret); 4879 4578 return ret; 4880 4579 } 4881 4580 } ··· 4883 4582 return 0; 4884 4583 } 4885 4584 4886 - static int ath12k_station_disassoc(struct ath12k *ar, 4887 - struct ath12k_link_vif *arvif, 4888 - struct ath12k_link_sta *arsta) 4585 + static int ath12k_mac_station_disassoc(struct ath12k *ar, 4586 + struct ath12k_link_vif *arvif, 4587 + struct ath12k_link_sta *arsta) 4889 4588 { 4890 4589 struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 4891 - int ret; 4892 4590 4893 4591 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 4894 4592 4895 4593 if (!sta->wme) { 4896 4594 arvif->num_legacy_stations--; 4897 - ret = ath12k_recalc_rtscts_prot(arvif); 4898 - if (ret) 4899 - return ret; 4595 + return ath12k_recalc_rtscts_prot(arvif); 4900 4596 } 4901 4597 4902 - ret = ath12k_clear_peer_keys(arvif, sta->addr); 4903 - if (ret) { 4904 - ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n", 4905 - arvif->vdev_id, ret); 4906 - return ret; 4907 - } 4908 4598 return 0; 4909 4599 } 4910 4600 4911 4601 static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk) 4912 4602 { 4603 + struct ieee80211_link_sta *link_sta; 4913 4604 struct ath12k *ar; 4914 4605 struct ath12k_link_vif *arvif; 4915 4606 struct ieee80211_sta *sta; ··· 4959 4666 * WMI_PEER_CHWIDTH 4960 4667 */ 4961 4668 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth upgrade for sta %pM new %d old %d\n", 4962 - sta->addr, bw, bw_prev); 4963 - err = ath12k_wmi_set_peer_param(ar, sta->addr, 4669 + arsta->addr, bw, bw_prev); 4670 + err = ath12k_wmi_set_peer_param(ar, arsta->addr, 4964 4671 arvif->vdev_id, WMI_PEER_PHYMODE, 4965 4672 peer_phymode); 4966 4673 if (err) { 4967 4674 ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n", 4968 - sta->addr, peer_phymode, err); 4675 + arsta->addr, peer_phymode, err); 4969 4676 return; 4970 4677 } 4971 - err = ath12k_wmi_set_peer_param(ar, sta->addr, 4678 + err = ath12k_wmi_set_peer_param(ar, arsta->addr, 4972 4679 arvif->vdev_id, WMI_PEER_CHWIDTH, 4973 4680 bw); 4974 4681 if (err) 4975 4682 ath12k_warn(ar->ab, "failed to update STA %pM to peer bandwidth %d: %d\n", 4976 - sta->addr, bw, err); 4683 + arsta->addr, bw, err); 4977 4684 } else { 4978 4685 /* When we downgrade bandwidth this will conflict with phymode 4979 4686 * and cause to trigger firmware crash. In this case we send 4980 4687 * WMI_PEER_CHWIDTH followed by WMI_PEER_PHYMODE 4981 4688 */ 4982 4689 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth downgrade for sta %pM new %d old %d\n", 4983 - sta->addr, bw, bw_prev); 4984 - err = ath12k_wmi_set_peer_param(ar, sta->addr, 4690 + arsta->addr, bw, bw_prev); 4691 + err = ath12k_wmi_set_peer_param(ar, arsta->addr, 4985 4692 arvif->vdev_id, WMI_PEER_CHWIDTH, 4986 4693 bw); 4987 4694 if (err) { 4988 4695 ath12k_warn(ar->ab, "failed to update STA %pM peer to bandwidth %d: %d\n", 4989 - sta->addr, bw, err); 4696 + arsta->addr, bw, err); 4990 4697 return; 4991 4698 } 4992 - err = ath12k_wmi_set_peer_param(ar, sta->addr, 4699 + err = ath12k_wmi_set_peer_param(ar, arsta->addr, 4993 4700 arvif->vdev_id, WMI_PEER_PHYMODE, 4994 4701 peer_phymode); 4995 4702 if (err) 4996 4703 ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n", 4997 - sta->addr, peer_phymode, err); 4704 + arsta->addr, peer_phymode, err); 4998 4705 } 4999 4706 } 5000 4707 5001 4708 if (changed & IEEE80211_RC_NSS_CHANGED) { 5002 4709 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM nss %d\n", 5003 - sta->addr, nss); 4710 + arsta->addr, nss); 5004 4711 5005 - err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, 4712 + err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id, 5006 4713 WMI_PEER_NSS, nss); 5007 4714 if (err) 5008 4715 ath12k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n", 5009 - sta->addr, nss, err); 4716 + arsta->addr, nss, err); 5010 4717 } 5011 4718 5012 4719 if (changed & IEEE80211_RC_SMPS_CHANGED) { 5013 4720 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM smps %d\n", 5014 - sta->addr, smps); 4721 + arsta->addr, smps); 5015 4722 5016 - err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, 4723 + err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id, 5017 4724 WMI_PEER_MIMO_PS_STATE, smps); 5018 4725 if (err) 5019 4726 ath12k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n", 5020 - sta->addr, smps, err); 4727 + arsta->addr, smps, err); 5021 4728 } 5022 4729 5023 4730 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { ··· 5036 4743 * TODO: Check RATEMASK_CMDID to support auto rates selection 5037 4744 * across HT/VHT and for multiple VHT MCS support. 5038 4745 */ 5039 - if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { 4746 + link_sta = ath12k_mac_get_link_sta(arsta); 4747 + if (!link_sta) { 4748 + ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n", 4749 + sta->addr, arsta->link_id); 4750 + return; 4751 + } 4752 + 4753 + if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) { 5040 4754 ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask, 5041 4755 band); 5042 4756 } else { ··· 5057 4757 err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); 5058 4758 if (err) 5059 4759 ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", 5060 - sta->addr, arvif->vdev_id, err); 4760 + arsta->addr, arvif->vdev_id, err); 5061 4761 5062 4762 if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) 5063 4763 ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", 5064 - sta->addr, arvif->vdev_id); 4764 + arsta->addr, arvif->vdev_id); 5065 4765 } 5066 4766 } 4767 + } 4768 + 4769 + static void ath12k_mac_free_unassign_link_sta(struct ath12k_hw *ah, 4770 + struct ath12k_sta *ahsta, 4771 + u8 link_id) 4772 + { 4773 + struct ath12k_link_sta *arsta; 4774 + 4775 + lockdep_assert_wiphy(ah->hw->wiphy); 4776 + 4777 + if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS)) 4778 + return; 4779 + 4780 + arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]); 4781 + if (WARN_ON(!arsta)) 4782 + return; 4783 + 4784 + ahsta->links_map &= ~BIT(link_id); 4785 + rcu_assign_pointer(ahsta->link[link_id], NULL); 4786 + synchronize_rcu(); 4787 + 4788 + if (arsta == &ahsta->deflink) { 4789 + arsta->link_id = ATH12K_INVALID_LINK_ID; 4790 + arsta->ahsta = NULL; 4791 + arsta->arvif = NULL; 4792 + return; 4793 + } 4794 + 4795 + kfree(arsta); 5067 4796 } 5068 4797 5069 4798 static int ath12k_mac_inc_num_stations(struct ath12k_link_vif *arvif, ··· 5128 4799 ar->num_stations--; 5129 4800 } 5130 4801 4802 + static void ath12k_mac_station_post_remove(struct ath12k *ar, 4803 + struct ath12k_link_vif *arvif, 4804 + struct ath12k_link_sta *arsta) 4805 + { 4806 + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 4807 + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 4808 + struct ath12k_peer *peer; 4809 + 4810 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 4811 + 4812 + ath12k_mac_dec_num_stations(arvif, arsta); 4813 + 4814 + spin_lock_bh(&ar->ab->base_lock); 4815 + 4816 + peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr); 4817 + if (peer && peer->sta == sta) { 4818 + ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n", 4819 + vif->addr, arvif->vdev_id); 4820 + peer->sta = NULL; 4821 + list_del(&peer->list); 4822 + kfree(peer); 4823 + ar->num_peers--; 4824 + } 4825 + 4826 + spin_unlock_bh(&ar->ab->base_lock); 4827 + 4828 + kfree(arsta->rx_stats); 4829 + arsta->rx_stats = NULL; 4830 + } 4831 + 4832 + static int ath12k_mac_station_unauthorize(struct ath12k *ar, 4833 + struct ath12k_link_vif *arvif, 4834 + struct ath12k_link_sta *arsta) 4835 + { 4836 + struct ath12k_peer *peer; 4837 + int ret; 4838 + 4839 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 4840 + 4841 + spin_lock_bh(&ar->ab->base_lock); 4842 + 4843 + peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr); 4844 + if (peer) 4845 + peer->is_authorized = false; 4846 + 4847 + spin_unlock_bh(&ar->ab->base_lock); 4848 + 4849 + /* Driver must clear the keys during the state change from 4850 + * IEEE80211_STA_AUTHORIZED to IEEE80211_STA_ASSOC, since after 4851 + * returning from here, mac80211 is going to delete the keys 4852 + * in __sta_info_destroy_part2(). This will ensure that the driver does 4853 + * not retain stale key references after mac80211 deletes the keys. 4854 + */ 4855 + ret = ath12k_clear_peer_keys(arvif, arsta->addr); 4856 + if (ret) { 4857 + ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n", 4858 + arvif->vdev_id, ret); 4859 + return ret; 4860 + } 4861 + 4862 + return 0; 4863 + } 4864 + 4865 + static int ath12k_mac_station_authorize(struct ath12k *ar, 4866 + struct ath12k_link_vif *arvif, 4867 + struct ath12k_link_sta *arsta) 4868 + { 4869 + struct ath12k_peer *peer; 4870 + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 4871 + int ret; 4872 + 4873 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 4874 + 4875 + spin_lock_bh(&ar->ab->base_lock); 4876 + 4877 + peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr); 4878 + if (peer) 4879 + peer->is_authorized = true; 4880 + 4881 + spin_unlock_bh(&ar->ab->base_lock); 4882 + 4883 + if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) { 4884 + ret = ath12k_wmi_set_peer_param(ar, arsta->addr, 4885 + arvif->vdev_id, 4886 + WMI_PEER_AUTHORIZE, 4887 + 1); 4888 + if (ret) { 4889 + ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", 4890 + arsta->addr, arvif->vdev_id, ret); 4891 + return ret; 4892 + } 4893 + } 4894 + 4895 + return 0; 4896 + } 4897 + 4898 + static int ath12k_mac_station_remove(struct ath12k *ar, 4899 + struct ath12k_link_vif *arvif, 4900 + struct ath12k_link_sta *arsta) 4901 + { 4902 + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 4903 + struct ath12k_vif *ahvif = arvif->ahvif; 4904 + int ret = 0; 4905 + 4906 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 4907 + 4908 + wiphy_work_cancel(ar->ah->hw->wiphy, &arsta->update_wk); 4909 + 4910 + if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) { 4911 + ath12k_bss_disassoc(ar, arvif); 4912 + ret = ath12k_mac_vdev_stop(arvif); 4913 + if (ret) 4914 + ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n", 4915 + arvif->vdev_id, ret); 4916 + } 4917 + 4918 + if (sta->mlo) 4919 + return ret; 4920 + 4921 + ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr); 4922 + 4923 + ret = ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr); 4924 + if (ret) 4925 + ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n", 4926 + arsta->addr, arvif->vdev_id); 4927 + else 4928 + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n", 4929 + arsta->addr, arvif->vdev_id); 4930 + 4931 + ath12k_mac_station_post_remove(ar, arvif, arsta); 4932 + 4933 + if (sta->valid_links) 4934 + ath12k_mac_free_unassign_link_sta(ahvif->ah, 4935 + arsta->ahsta, arsta->link_id); 4936 + 4937 + return ret; 4938 + } 4939 + 5131 4940 static int ath12k_mac_station_add(struct ath12k *ar, 5132 4941 struct ath12k_link_vif *arvif, 5133 4942 struct ath12k_link_sta *arsta) ··· 5273 4806 struct ath12k_base *ab = ar->ab; 5274 4807 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 5275 4808 struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 5276 - struct ath12k_wmi_peer_create_arg peer_param; 4809 + struct ath12k_wmi_peer_create_arg peer_param = {0}; 5277 4810 int ret; 5278 4811 5279 4812 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); ··· 5291 4824 } 5292 4825 5293 4826 peer_param.vdev_id = arvif->vdev_id; 5294 - peer_param.peer_addr = sta->addr; 4827 + peer_param.peer_addr = arsta->addr; 5295 4828 peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; 4829 + peer_param.ml_enabled = sta->mlo; 5296 4830 5297 4831 ret = ath12k_peer_create(ar, arvif, sta, &peer_param); 5298 4832 if (ret) { 5299 4833 ath12k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n", 5300 - sta->addr, arvif->vdev_id); 4834 + arsta->addr, arvif->vdev_id); 5301 4835 goto free_peer; 5302 4836 } 5303 4837 5304 4838 ath12k_dbg(ab, ATH12K_DBG_MAC, "Added peer: %pM for VDEV: %d\n", 5305 - sta->addr, arvif->vdev_id); 4839 + arsta->addr, arvif->vdev_id); 5306 4840 5307 4841 if (ieee80211_vif_is_mesh(vif)) { 5308 - ret = ath12k_wmi_set_peer_param(ar, sta->addr, 4842 + ret = ath12k_wmi_set_peer_param(ar, arsta->addr, 5309 4843 arvif->vdev_id, 5310 4844 WMI_PEER_USE_4ADDR, 1); 5311 4845 if (ret) { 5312 4846 ath12k_warn(ab, "failed to STA %pM 4addr capability: %d\n", 5313 - sta->addr, ret); 4847 + arsta->addr, ret); 5314 4848 goto free_peer; 5315 4849 } 5316 4850 } 5317 4851 5318 - ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, sta->addr); 4852 + ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, arsta->addr); 5319 4853 if (ret) { 5320 4854 ath12k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n", 5321 - sta->addr, arvif->vdev_id, ret); 4855 + arsta->addr, arvif->vdev_id, ret); 5322 4856 goto free_peer; 5323 4857 } 5324 4858 ··· 5336 4868 return 0; 5337 4869 5338 4870 free_peer: 5339 - ath12k_peer_delete(ar, arvif->vdev_id, sta->addr); 4871 + ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr); 4872 + kfree(arsta->rx_stats); 4873 + arsta->rx_stats = NULL; 5340 4874 dec_num_station: 5341 4875 ath12k_mac_dec_num_stations(arvif, arsta); 5342 4876 exit: ··· 5376 4906 return bw; 5377 4907 } 5378 4908 4909 + static int ath12k_mac_assign_link_sta(struct ath12k_hw *ah, 4910 + struct ath12k_sta *ahsta, 4911 + struct ath12k_link_sta *arsta, 4912 + struct ath12k_vif *ahvif, 4913 + u8 link_id) 4914 + { 4915 + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta); 4916 + struct ieee80211_link_sta *link_sta; 4917 + struct ath12k_link_vif *arvif; 4918 + 4919 + lockdep_assert_wiphy(ah->hw->wiphy); 4920 + 4921 + if (!arsta || link_id >= IEEE80211_MLD_MAX_NUM_LINKS) 4922 + return -EINVAL; 4923 + 4924 + arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]); 4925 + if (!arvif) 4926 + return -EINVAL; 4927 + 4928 + memset(arsta, 0, sizeof(*arsta)); 4929 + 4930 + link_sta = wiphy_dereference(ah->hw->wiphy, sta->link[link_id]); 4931 + if (!link_sta) 4932 + return -EINVAL; 4933 + 4934 + ether_addr_copy(arsta->addr, link_sta->addr); 4935 + 4936 + /* logical index of the link sta in order of creation */ 4937 + arsta->link_idx = ahsta->num_peer++; 4938 + 4939 + arsta->link_id = link_id; 4940 + ahsta->links_map |= BIT(arsta->link_id); 4941 + arsta->arvif = arvif; 4942 + arsta->ahsta = ahsta; 4943 + ahsta->ahvif = ahvif; 4944 + 4945 + wiphy_work_init(&arsta->update_wk, ath12k_sta_rc_update_wk); 4946 + 4947 + rcu_assign_pointer(ahsta->link[link_id], arsta); 4948 + 4949 + return 0; 4950 + } 4951 + 4952 + static void ath12k_mac_ml_station_remove(struct ath12k_vif *ahvif, 4953 + struct ath12k_sta *ahsta) 4954 + { 4955 + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta); 4956 + struct ath12k_hw *ah = ahvif->ah; 4957 + struct ath12k_link_vif *arvif; 4958 + struct ath12k_link_sta *arsta; 4959 + unsigned long links; 4960 + struct ath12k *ar; 4961 + u8 link_id; 4962 + 4963 + lockdep_assert_wiphy(ah->hw->wiphy); 4964 + 4965 + ath12k_peer_mlo_link_peers_delete(ahvif, ahsta); 4966 + 4967 + /* validate link station removal and clear arsta links */ 4968 + links = ahsta->links_map; 4969 + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { 4970 + arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]); 4971 + arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]); 4972 + if (!arvif || !arsta) 4973 + continue; 4974 + 4975 + ar = arvif->ar; 4976 + 4977 + ath12k_mac_station_post_remove(ar, arvif, arsta); 4978 + 4979 + ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id); 4980 + } 4981 + 4982 + ath12k_peer_ml_delete(ah, sta); 4983 + } 4984 + 4985 + static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw, 4986 + struct ath12k_link_vif *arvif, 4987 + struct ath12k_link_sta *arsta, 4988 + enum ieee80211_sta_state old_state, 4989 + enum ieee80211_sta_state new_state) 4990 + { 4991 + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 4992 + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 4993 + struct ath12k *ar = arvif->ar; 4994 + int ret = 0; 4995 + 4996 + lockdep_assert_wiphy(hw->wiphy); 4997 + 4998 + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac handle link %u sta %pM state %d -> %d\n", 4999 + arsta->link_id, arsta->addr, old_state, new_state); 5000 + 5001 + /* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST: Remove the station 5002 + * from driver 5003 + */ 5004 + if ((old_state == IEEE80211_STA_NONE && 5005 + new_state == IEEE80211_STA_NOTEXIST)) { 5006 + ret = ath12k_mac_station_remove(ar, arvif, arsta); 5007 + if (ret) { 5008 + ath12k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n", 5009 + arsta->addr, arvif->vdev_id); 5010 + goto exit; 5011 + } 5012 + } 5013 + 5014 + /* IEEE80211_STA_NOTEXIST -> IEEE80211_STA_NONE: Add new station to driver */ 5015 + if (old_state == IEEE80211_STA_NOTEXIST && 5016 + new_state == IEEE80211_STA_NONE) { 5017 + ret = ath12k_mac_station_add(ar, arvif, arsta); 5018 + if (ret) 5019 + ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n", 5020 + arsta->addr, arvif->vdev_id); 5021 + 5022 + /* IEEE80211_STA_AUTH -> IEEE80211_STA_ASSOC: Send station assoc command for 5023 + * peer associated to AP/Mesh/ADHOC vif type. 5024 + */ 5025 + } else if (old_state == IEEE80211_STA_AUTH && 5026 + new_state == IEEE80211_STA_ASSOC && 5027 + (vif->type == NL80211_IFTYPE_AP || 5028 + vif->type == NL80211_IFTYPE_MESH_POINT || 5029 + vif->type == NL80211_IFTYPE_ADHOC)) { 5030 + ret = ath12k_mac_station_assoc(ar, arvif, arsta, false); 5031 + if (ret) 5032 + ath12k_warn(ar->ab, "Failed to associate station: %pM\n", 5033 + arsta->addr); 5034 + 5035 + spin_lock_bh(&ar->data_lock); 5036 + 5037 + arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta); 5038 + arsta->bw_prev = sta->deflink.bandwidth; 5039 + 5040 + spin_unlock_bh(&ar->data_lock); 5041 + 5042 + /* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTHORIZED: set peer status as 5043 + * authorized 5044 + */ 5045 + } else if (old_state == IEEE80211_STA_ASSOC && 5046 + new_state == IEEE80211_STA_AUTHORIZED) { 5047 + ret = ath12k_mac_station_authorize(ar, arvif, arsta); 5048 + if (ret) 5049 + ath12k_warn(ar->ab, "Failed to authorize station: %pM\n", 5050 + arsta->addr); 5051 + 5052 + /* IEEE80211_STA_AUTHORIZED -> IEEE80211_STA_ASSOC: station may be in removal, 5053 + * deauthorize it. 5054 + */ 5055 + } else if (old_state == IEEE80211_STA_AUTHORIZED && 5056 + new_state == IEEE80211_STA_ASSOC) { 5057 + ath12k_mac_station_unauthorize(ar, arvif, arsta); 5058 + 5059 + /* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTH: disassoc peer connected to 5060 + * AP/mesh/ADHOC vif type. 5061 + */ 5062 + } else if (old_state == IEEE80211_STA_ASSOC && 5063 + new_state == IEEE80211_STA_AUTH && 5064 + (vif->type == NL80211_IFTYPE_AP || 5065 + vif->type == NL80211_IFTYPE_MESH_POINT || 5066 + vif->type == NL80211_IFTYPE_ADHOC)) { 5067 + ret = ath12k_mac_station_disassoc(ar, arvif, arsta); 5068 + if (ret) 5069 + ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n", 5070 + arsta->addr); 5071 + } 5072 + 5073 + exit: 5074 + return ret; 5075 + } 5076 + 5379 5077 static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, 5380 5078 struct ieee80211_vif *vif, 5381 5079 struct ieee80211_sta *sta, ··· 5552 4914 { 5553 4915 struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 5554 4916 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); 5555 - struct ath12k *ar; 4917 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5556 4918 struct ath12k_link_vif *arvif; 5557 4919 struct ath12k_link_sta *arsta; 5558 - struct ath12k_peer *peer; 5559 - int ret = 0; 4920 + unsigned long valid_links; 4921 + u8 link_id = 0; 4922 + int ret; 5560 4923 5561 4924 lockdep_assert_wiphy(hw->wiphy); 5562 4925 5563 - arvif = &ahvif->deflink; 5564 - arsta = &ahsta->deflink; 5565 - 5566 - ar = ath12k_get_ar_by_vif(hw, vif); 5567 - if (!ar) { 5568 - WARN_ON_ONCE(1); 5569 - return -EINVAL; 4926 + if (ieee80211_vif_is_mld(vif) && sta->valid_links) { 4927 + WARN_ON(!sta->mlo && hweight16(sta->valid_links) != 1); 4928 + link_id = ffs(sta->valid_links) - 1; 5570 4929 } 5571 4930 4931 + /* IEEE80211_STA_NOTEXIST -> IEEE80211_STA_NONE: 4932 + * New station add received. If this is a ML station then 4933 + * ahsta->links_map will be zero and sta->valid_links will be 1. 4934 + * Assign default link to the first link sta. 4935 + */ 5572 4936 if (old_state == IEEE80211_STA_NOTEXIST && 5573 4937 new_state == IEEE80211_STA_NONE) { 5574 - memset(arsta, 0, sizeof(*arsta)); 5575 - rcu_assign_pointer(ahsta->link[0], arsta); 5576 - /* TODO use appropriate link id once MLO support is added */ 5577 - arsta->link_id = ATH12K_DEFAULT_LINK_ID; 5578 - ahsta->links_map = BIT(arsta->link_id); 5579 - arsta->ahsta = ahsta; 5580 - arsta->arvif = arvif; 5581 - wiphy_work_init(&arsta->update_wk, ath12k_sta_rc_update_wk); 4938 + memset(ahsta, 0, sizeof(*ahsta)); 5582 4939 5583 - synchronize_rcu(); 4940 + arsta = &ahsta->deflink; 5584 4941 5585 - ret = ath12k_mac_station_add(ar, arvif, arsta); 5586 - if (ret) 5587 - ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n", 5588 - sta->addr, arvif->vdev_id); 5589 - } else if ((old_state == IEEE80211_STA_NONE && 5590 - new_state == IEEE80211_STA_NOTEXIST)) { 5591 - wiphy_work_cancel(hw->wiphy, &arsta->update_wk); 5592 - 5593 - if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) { 5594 - ath12k_bss_disassoc(ar, arvif); 5595 - ret = ath12k_mac_vdev_stop(arvif); 5596 - if (ret) 5597 - ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n", 5598 - arvif->vdev_id, ret); 4942 + /* ML sta */ 4943 + if (sta->mlo && !ahsta->links_map && 4944 + (hweight16(sta->valid_links) == 1)) { 4945 + ret = ath12k_peer_ml_create(ah, sta); 4946 + if (ret) { 4947 + ath12k_hw_warn(ah, "unable to create ML peer for sta %pM", 4948 + sta->addr); 4949 + goto exit; 4950 + } 5599 4951 } 5600 - ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr); 5601 4952 5602 - ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr); 5603 - if (ret) 5604 - ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n", 5605 - sta->addr, arvif->vdev_id); 5606 - else 5607 - ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n", 5608 - sta->addr, arvif->vdev_id); 5609 - 5610 - ath12k_mac_dec_num_stations(arvif, arsta); 5611 - spin_lock_bh(&ar->ab->base_lock); 5612 - peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 5613 - if (peer && peer->sta == sta) { 5614 - ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n", 5615 - vif->addr, arvif->vdev_id); 5616 - peer->sta = NULL; 5617 - list_del(&peer->list); 5618 - kfree(peer); 5619 - ar->num_peers--; 4953 + ret = ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif, 4954 + link_id); 4955 + if (ret) { 4956 + ath12k_hw_warn(ah, "unable assign link %d for sta %pM", 4957 + link_id, sta->addr); 4958 + goto exit; 5620 4959 } 5621 - spin_unlock_bh(&ar->ab->base_lock); 5622 4960 5623 - kfree(arsta->rx_stats); 5624 - arsta->rx_stats = NULL; 5625 - 5626 - if (arsta->link_id < IEEE80211_MLD_MAX_NUM_LINKS) { 5627 - rcu_assign_pointer(ahsta->link[arsta->link_id], NULL); 5628 - synchronize_rcu(); 5629 - ahsta->links_map &= ~(BIT(arsta->link_id)); 5630 - arsta->link_id = ATH12K_INVALID_LINK_ID; 5631 - arsta->ahsta = NULL; 4961 + /* above arsta will get memset, hence do this after assign 4962 + * link sta 4963 + */ 4964 + if (sta->mlo) { 4965 + arsta->is_assoc_link = true; 4966 + ahsta->assoc_link_id = link_id; 5632 4967 } 5633 - } else if (old_state == IEEE80211_STA_AUTH && 5634 - new_state == IEEE80211_STA_ASSOC && 5635 - (vif->type == NL80211_IFTYPE_AP || 5636 - vif->type == NL80211_IFTYPE_MESH_POINT || 5637 - vif->type == NL80211_IFTYPE_ADHOC)) { 5638 - ret = ath12k_station_assoc(ar, arvif, arsta, false); 5639 - if (ret) 5640 - ath12k_warn(ar->ab, "Failed to associate station: %pM\n", 5641 - sta->addr); 5642 - 5643 - spin_lock_bh(&ar->data_lock); 5644 - 5645 - arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta); 5646 - arsta->bw_prev = sta->deflink.bandwidth; 5647 - 5648 - spin_unlock_bh(&ar->data_lock); 5649 - } else if (old_state == IEEE80211_STA_ASSOC && 5650 - new_state == IEEE80211_STA_AUTHORIZED) { 5651 - spin_lock_bh(&ar->ab->base_lock); 5652 - 5653 - peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 5654 - if (peer) 5655 - peer->is_authorized = true; 5656 - 5657 - spin_unlock_bh(&ar->ab->base_lock); 5658 - 5659 - if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) { 5660 - ret = ath12k_wmi_set_peer_param(ar, sta->addr, 5661 - arvif->vdev_id, 5662 - WMI_PEER_AUTHORIZE, 5663 - 1); 5664 - if (ret) 5665 - ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", 5666 - sta->addr, arvif->vdev_id, ret); 5667 - } 5668 - } else if (old_state == IEEE80211_STA_AUTHORIZED && 5669 - new_state == IEEE80211_STA_ASSOC) { 5670 - spin_lock_bh(&ar->ab->base_lock); 5671 - 5672 - peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 5673 - if (peer) 5674 - peer->is_authorized = false; 5675 - 5676 - spin_unlock_bh(&ar->ab->base_lock); 5677 - } else if (old_state == IEEE80211_STA_ASSOC && 5678 - new_state == IEEE80211_STA_AUTH && 5679 - (vif->type == NL80211_IFTYPE_AP || 5680 - vif->type == NL80211_IFTYPE_MESH_POINT || 5681 - vif->type == NL80211_IFTYPE_ADHOC)) { 5682 - ret = ath12k_station_disassoc(ar, arvif, arsta); 5683 - if (ret) 5684 - ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n", 5685 - sta->addr); 5686 4968 } 4969 + 4970 + /* Handle all the other state transitions in generic way */ 4971 + valid_links = ahsta->links_map; 4972 + for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) { 4973 + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); 4974 + arsta = wiphy_dereference(hw->wiphy, ahsta->link[link_id]); 4975 + /* some assumptions went wrong! */ 4976 + if (WARN_ON(!arvif || !arsta)) 4977 + continue; 4978 + 4979 + /* vdev might be in deleted */ 4980 + if (WARN_ON(!arvif->ar)) 4981 + continue; 4982 + 4983 + ret = ath12k_mac_handle_link_sta_state(hw, arvif, arsta, 4984 + old_state, new_state); 4985 + if (ret) { 4986 + ath12k_hw_warn(ah, "unable to move link sta %d of sta %pM from state %d to %d", 4987 + link_id, arsta->addr, old_state, new_state); 4988 + goto exit; 4989 + } 4990 + } 4991 + 4992 + /* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST: 4993 + * Remove the station from driver (handle ML sta here since that 4994 + * needs special handling. Normal sta will be handled in generic 4995 + * handler below 4996 + */ 4997 + if (old_state == IEEE80211_STA_NONE && 4998 + new_state == IEEE80211_STA_NOTEXIST && sta->mlo) 4999 + ath12k_mac_ml_station_remove(ahvif, ahsta); 5000 + 5001 + ret = 0; 5002 + 5003 + exit: 5004 + /* update the state if everything went well */ 5005 + if (!ret) 5006 + ahsta->state = new_state; 5687 5007 5688 5008 return ret; 5689 5009 } ··· 5650 5054 struct ieee80211_vif *vif, 5651 5055 struct ieee80211_sta *sta) 5652 5056 { 5653 - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5057 + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); 5654 5058 struct ath12k *ar; 5655 5059 struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 5656 5060 struct ath12k_link_vif *arvif; 5061 + struct ath12k_link_sta *arsta; 5062 + u8 link_id; 5657 5063 int ret; 5658 5064 s16 txpwr; 5659 5065 5660 5066 lockdep_assert_wiphy(hw->wiphy); 5661 5067 5662 - arvif = &ahvif->deflink; 5068 + /* TODO: use link id from mac80211 once that's implemented */ 5069 + link_id = 0; 5070 + 5071 + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); 5072 + arsta = wiphy_dereference(hw->wiphy, ahsta->link[link_id]); 5663 5073 5664 5074 if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) { 5665 5075 txpwr = 0; ··· 5682 5080 goto out; 5683 5081 } 5684 5082 5685 - ar = ath12k_ah_to_ar(ah, 0); 5083 + ar = arvif->ar; 5686 5084 5687 - ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, 5085 + ret = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id, 5688 5086 WMI_PEER_USE_FIXED_PWR, txpwr); 5689 5087 if (ret) { 5690 5088 ath12k_warn(ar->ab, "failed to set tx power for station ret: %d\n", ··· 5696 5094 return ret; 5697 5095 } 5698 5096 5699 - static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw, 5700 - struct ieee80211_vif *vif, 5701 - struct ieee80211_link_sta *link_sta, 5702 - u32 changed) 5097 + static void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw, 5098 + struct ieee80211_vif *vif, 5099 + struct ieee80211_link_sta *link_sta, 5100 + u32 changed) 5703 5101 { 5704 5102 struct ieee80211_sta *sta = link_sta->sta; 5705 5103 struct ath12k *ar; 5706 5104 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); 5707 5105 struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 5106 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5708 5107 struct ath12k_link_sta *arsta; 5709 5108 struct ath12k_link_vif *arvif; 5710 5109 struct ath12k_peer *peer; 5711 5110 u32 bw, smps; 5712 - /* TODO: use proper link id once link sta specific rc update support is 5713 - * available in mac80211. 5714 - */ 5715 - u8 link_id = ATH12K_DEFAULT_LINK_ID; 5716 - 5717 - ar = ath12k_get_ar_by_vif(hw, vif); 5718 - if (!ar) { 5719 - WARN_ON_ONCE(1); 5720 - return; 5721 - } 5722 5111 5723 5112 rcu_read_lock(); 5724 - arvif = rcu_dereference(ahvif->link[link_id]); 5113 + arvif = rcu_dereference(ahvif->link[link_sta->link_id]); 5725 5114 if (!arvif) { 5726 - ath12k_warn(ar->ab, "mac sta rc update failed to fetch link vif on link id %u for peer %pM\n", 5727 - link_id, sta->addr); 5115 + ath12k_hw_warn(ah, "mac sta rc update failed to fetch link vif on link id %u for peer %pM\n", 5116 + link_sta->link_id, sta->addr); 5728 5117 rcu_read_unlock(); 5729 5118 return; 5730 5119 } 5731 - arsta = rcu_dereference(ahsta->link[link_id]); 5120 + 5121 + ar = arvif->ar; 5122 + 5123 + arsta = rcu_dereference(ahsta->link[link_sta->link_id]); 5732 5124 if (!arsta) { 5733 5125 rcu_read_unlock(); 5734 5126 ath12k_warn(ar->ab, "mac sta rc update failed to fetch link sta on link id %u for peer %pM\n", 5735 - link_id, sta->addr); 5127 + link_sta->link_id, sta->addr); 5736 5128 return; 5737 5129 } 5738 5130 spin_lock_bh(&ar->ab->base_lock); 5739 5131 5740 - peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 5132 + peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr); 5741 5133 if (!peer) { 5742 5134 spin_unlock_bh(&ar->ab->base_lock); 5743 5135 rcu_read_unlock(); 5744 5136 ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n", 5745 - sta->addr, arvif->vdev_id); 5137 + arsta->addr, arvif->vdev_id); 5746 5138 return; 5747 5139 } 5748 5140 5749 5141 spin_unlock_bh(&ar->ab->base_lock); 5750 5142 5143 + if (arsta->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) { 5144 + rcu_read_unlock(); 5145 + return; 5146 + } 5147 + 5148 + link_sta = rcu_dereference(sta->link[arsta->link_id]); 5149 + if (!link_sta) { 5150 + rcu_read_unlock(); 5151 + ath12k_warn(ar->ab, "unable to access link sta in rc update for sta %pM link %u\n", 5152 + sta->addr, arsta->link_id); 5153 + return; 5154 + } 5155 + 5751 5156 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 5752 5157 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", 5753 - sta->addr, changed, sta->deflink.bandwidth, sta->deflink.rx_nss, 5754 - sta->deflink.smps_mode); 5158 + arsta->addr, changed, link_sta->bandwidth, link_sta->rx_nss, 5159 + link_sta->smps_mode); 5755 5160 5756 5161 spin_lock_bh(&ar->data_lock); 5757 5162 ··· 5769 5160 } 5770 5161 5771 5162 if (changed & IEEE80211_RC_NSS_CHANGED) 5772 - arsta->nss = sta->deflink.rx_nss; 5163 + arsta->nss = link_sta->rx_nss; 5773 5164 5774 5165 if (changed & IEEE80211_RC_SMPS_CHANGED) { 5775 5166 smps = WMI_PEER_SMPS_PS_NONE; 5776 5167 5777 - switch (sta->deflink.smps_mode) { 5168 + switch (link_sta->smps_mode) { 5778 5169 case IEEE80211_SMPS_AUTOMATIC: 5779 5170 case IEEE80211_SMPS_OFF: 5780 5171 smps = WMI_PEER_SMPS_PS_NONE; ··· 5786 5177 smps = WMI_PEER_SMPS_DYNAMIC; 5787 5178 break; 5788 5179 default: 5789 - ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n", 5790 - sta->deflink.smps_mode, sta->addr); 5180 + ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM link %u\n", 5181 + link_sta->smps_mode, arsta->addr, link_sta->link_id); 5791 5182 smps = WMI_PEER_SMPS_PS_NONE; 5792 5183 break; 5793 5184 } ··· 5802 5193 wiphy_work_queue(hw->wiphy, &arsta->update_wk); 5803 5194 5804 5195 rcu_read_unlock(); 5196 + } 5197 + 5198 + static struct ath12k_link_sta *ath12k_mac_alloc_assign_link_sta(struct ath12k_hw *ah, 5199 + struct ath12k_sta *ahsta, 5200 + struct ath12k_vif *ahvif, 5201 + u8 link_id) 5202 + { 5203 + struct ath12k_link_sta *arsta; 5204 + int ret; 5205 + 5206 + lockdep_assert_wiphy(ah->hw->wiphy); 5207 + 5208 + if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) 5209 + return NULL; 5210 + 5211 + arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]); 5212 + if (arsta) 5213 + return NULL; 5214 + 5215 + arsta = kmalloc(sizeof(*arsta), GFP_KERNEL); 5216 + if (!arsta) 5217 + return NULL; 5218 + 5219 + ret = ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif, link_id); 5220 + if (ret) { 5221 + kfree(arsta); 5222 + return NULL; 5223 + } 5224 + 5225 + return arsta; 5226 + } 5227 + 5228 + static int ath12k_mac_op_change_sta_links(struct ieee80211_hw *hw, 5229 + struct ieee80211_vif *vif, 5230 + struct ieee80211_sta *sta, 5231 + u16 old_links, u16 new_links) 5232 + { 5233 + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 5234 + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); 5235 + struct ath12k_hw *ah = hw->priv; 5236 + struct ath12k_link_vif *arvif; 5237 + struct ath12k_link_sta *arsta; 5238 + unsigned long valid_links; 5239 + struct ath12k *ar; 5240 + u8 link_id; 5241 + int ret; 5242 + 5243 + lockdep_assert_wiphy(hw->wiphy); 5244 + 5245 + if (!sta->valid_links) 5246 + return -EINVAL; 5247 + 5248 + /* Firmware does not support removal of one of link stas. All sta 5249 + * would be removed during ML STA delete in sta_state(), hence link 5250 + * sta removal is not handled here. 5251 + */ 5252 + if (new_links < old_links) 5253 + return 0; 5254 + 5255 + if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) { 5256 + ath12k_hw_warn(ah, "unable to add link for ml sta %pM", sta->addr); 5257 + return -EINVAL; 5258 + } 5259 + 5260 + /* this op is expected only after initial sta insertion with default link */ 5261 + if (WARN_ON(ahsta->links_map == 0)) 5262 + return -EINVAL; 5263 + 5264 + valid_links = new_links; 5265 + for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) { 5266 + if (ahsta->links_map & BIT(link_id)) 5267 + continue; 5268 + 5269 + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); 5270 + arsta = ath12k_mac_alloc_assign_link_sta(ah, ahsta, ahvif, link_id); 5271 + 5272 + if (!arvif || !arsta) { 5273 + ath12k_hw_warn(ah, "Failed to alloc/assign link sta"); 5274 + continue; 5275 + } 5276 + 5277 + ar = arvif->ar; 5278 + if (!ar) 5279 + continue; 5280 + 5281 + ret = ath12k_mac_station_add(ar, arvif, arsta); 5282 + if (ret) { 5283 + ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n", 5284 + arsta->addr, arvif->vdev_id); 5285 + ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id); 5286 + return ret; 5287 + } 5288 + } 5289 + 5290 + return 0; 5805 5291 } 5806 5292 5807 5293 static int ath12k_conf_tx_uapsd(struct ath12k_link_vif *arvif, ··· 6758 6054 { 6759 6055 int num_mgmt; 6760 6056 6057 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 6058 + 6761 6059 ieee80211_free_txskb(ath12k_ar_to_hw(ar), skb); 6762 6060 6763 6061 num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); ··· 6821 6115 int buf_id; 6822 6116 int ret; 6823 6117 6118 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 6119 + 6824 6120 ATH12K_SKB_CB(skb)->ar = ar; 6825 6121 spin_lock_bh(&ar->txmgmt_idr_lock); 6826 6122 buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0, ··· 6877 6169 ath12k_mgmt_over_wmi_tx_drop(ar, skb); 6878 6170 } 6879 6171 6880 - static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work) 6172 + static void ath12k_mgmt_over_wmi_tx_work(struct wiphy *wiphy, struct wiphy_work *work) 6881 6173 { 6882 6174 struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work); 6175 + struct ath12k_hw *ah = ar->ah; 6883 6176 struct ath12k_skb_cb *skb_cb; 6884 6177 struct ath12k_vif *ahvif; 6885 6178 struct ath12k_link_vif *arvif; 6886 6179 struct sk_buff *skb; 6887 6180 int ret; 6181 + 6182 + lockdep_assert_wiphy(wiphy); 6888 6183 6889 6184 while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) { 6890 6185 skb_cb = ATH12K_SKB_CB(skb); ··· 6898 6187 } 6899 6188 6900 6189 ahvif = ath12k_vif_to_ahvif(skb_cb->vif); 6901 - arvif = &ahvif->deflink; 6190 + if (!(ahvif->links_map & BIT(skb_cb->link_id))) { 6191 + ath12k_warn(ar->ab, 6192 + "invalid linkid %u in mgmt over wmi tx with linkmap 0x%x\n", 6193 + skb_cb->link_id, ahvif->links_map); 6194 + ath12k_mgmt_over_wmi_tx_drop(ar, skb); 6195 + continue; 6196 + } 6197 + 6198 + arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[skb_cb->link_id]); 6902 6199 if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) { 6903 6200 ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb); 6904 6201 if (ret) { ··· 6916 6197 } 6917 6198 } else { 6918 6199 ath12k_warn(ar->ab, 6919 - "dropping mgmt frame for vdev %d, is_started %d\n", 6200 + "dropping mgmt frame for vdev %d link %u is_started %d\n", 6920 6201 arvif->vdev_id, 6202 + skb_cb->link_id, 6921 6203 arvif->is_started); 6922 6204 ath12k_mgmt_over_wmi_tx_drop(ar, skb); 6923 6205 } ··· 6952 6232 6953 6233 skb_queue_tail(q, skb); 6954 6234 atomic_inc(&ar->num_pending_mgmt_tx); 6955 - ieee80211_queue_work(ath12k_ar_to_hw(ar), &ar->wmi_mgmt_tx_work); 6235 + wiphy_work_queue(ath12k_ar_to_hw(ar)->wiphy, &ar->wmi_mgmt_tx_work); 6956 6236 6957 6237 return 0; 6958 6238 } ··· 6978 6258 spin_unlock_bh(&ar->data_lock); 6979 6259 } 6980 6260 6261 + /* Note: called under rcu_read_lock() */ 6262 + static u8 ath12k_mac_get_tx_link(struct ieee80211_sta *sta, struct ieee80211_vif *vif, 6263 + u8 link, struct sk_buff *skb, u32 info_flags) 6264 + { 6265 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 6266 + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 6267 + struct ieee80211_link_sta *link_sta; 6268 + struct ieee80211_bss_conf *bss_conf; 6269 + struct ath12k_sta *ahsta; 6270 + 6271 + /* Use the link id passed or the default vif link */ 6272 + if (!sta) { 6273 + if (link != IEEE80211_LINK_UNSPECIFIED) 6274 + return link; 6275 + 6276 + return ahvif->deflink.link_id; 6277 + } 6278 + 6279 + ahsta = ath12k_sta_to_ahsta(sta); 6280 + 6281 + /* Below translation ensures we pass proper A2 & A3 for non ML clients. 6282 + * Also it assumes for now support only for MLO AP in this path 6283 + */ 6284 + if (!sta->mlo) { 6285 + link = ahsta->deflink.link_id; 6286 + 6287 + if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) 6288 + return link; 6289 + 6290 + bss_conf = rcu_dereference(vif->link_conf[link]); 6291 + if (bss_conf) { 6292 + ether_addr_copy(hdr->addr2, bss_conf->addr); 6293 + if (!ieee80211_has_tods(hdr->frame_control) && 6294 + !ieee80211_has_fromds(hdr->frame_control)) 6295 + ether_addr_copy(hdr->addr3, bss_conf->addr); 6296 + } 6297 + 6298 + return link; 6299 + } 6300 + 6301 + /* enqueue eth enacap & data frames on primary link, FW does link 6302 + * selection and address translation. 6303 + */ 6304 + if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP || 6305 + ieee80211_is_data(hdr->frame_control)) 6306 + return ahsta->assoc_link_id; 6307 + 6308 + /* 802.11 frame cases */ 6309 + if (link == IEEE80211_LINK_UNSPECIFIED) 6310 + link = ahsta->deflink.link_id; 6311 + 6312 + if (!ieee80211_is_mgmt(hdr->frame_control)) 6313 + return link; 6314 + 6315 + /* Perform address conversion for ML STA Tx */ 6316 + bss_conf = rcu_dereference(vif->link_conf[link]); 6317 + link_sta = rcu_dereference(sta->link[link]); 6318 + 6319 + if (bss_conf && link_sta) { 6320 + ether_addr_copy(hdr->addr1, link_sta->addr); 6321 + ether_addr_copy(hdr->addr2, bss_conf->addr); 6322 + 6323 + if (vif->type == NL80211_IFTYPE_STATION && bss_conf->bssid) 6324 + ether_addr_copy(hdr->addr3, bss_conf->bssid); 6325 + else if (vif->type == NL80211_IFTYPE_AP) 6326 + ether_addr_copy(hdr->addr3, bss_conf->addr); 6327 + 6328 + return link; 6329 + } 6330 + 6331 + if (bss_conf) { 6332 + /* In certain cases where a ML sta associated and added subset of 6333 + * links on which the ML AP is active, but now sends some frame 6334 + * (ex. Probe request) on a different link which is active in our 6335 + * MLD but was not added during previous association, we can 6336 + * still honor the Tx to that ML STA via the requested link. 6337 + * The control would reach here in such case only when that link 6338 + * address is same as the MLD address or in worst case clients 6339 + * used MLD address at TA wrongly which would have helped 6340 + * identify the ML sta object and pass it here. 6341 + * If the link address of that STA is different from MLD address, 6342 + * then the sta object would be NULL and control won't reach 6343 + * here but return at the start of the function itself with !sta 6344 + * check. Also this would not need any translation at hdr->addr1 6345 + * from MLD to link address since the RA is the MLD address 6346 + * (same as that link address ideally) already. 6347 + */ 6348 + ether_addr_copy(hdr->addr2, bss_conf->addr); 6349 + 6350 + if (vif->type == NL80211_IFTYPE_STATION && bss_conf->bssid) 6351 + ether_addr_copy(hdr->addr3, bss_conf->bssid); 6352 + else if (vif->type == NL80211_IFTYPE_AP) 6353 + ether_addr_copy(hdr->addr3, bss_conf->addr); 6354 + } 6355 + 6356 + return link; 6357 + } 6358 + 6359 + /* Note: called under rcu_read_lock() */ 6981 6360 static void ath12k_mac_op_tx(struct ieee80211_hw *hw, 6982 6361 struct ieee80211_tx_control *control, 6983 6362 struct sk_buff *skb) ··· 7086 6267 struct ieee80211_vif *vif = info->control.vif; 7087 6268 struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 7088 6269 struct ath12k_link_vif *arvif = &ahvif->deflink; 7089 - struct ath12k *ar = arvif->ar; 7090 6270 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 7091 6271 struct ieee80211_key_conf *key = info->control.hw_key; 6272 + struct ieee80211_sta *sta = control->sta; 7092 6273 u32 info_flags = info->flags; 6274 + struct ath12k *ar; 7093 6275 bool is_prb_rsp; 6276 + u8 link_id; 7094 6277 int ret; 7095 6278 6279 + link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK); 7096 6280 memset(skb_cb, 0, sizeof(*skb_cb)); 7097 6281 skb_cb->vif = vif; 7098 6282 ··· 7104 6282 skb_cb->flags |= ATH12K_SKB_CIPHER_SET; 7105 6283 } 7106 6284 6285 + /* handle only for MLO case, use deflink for non MLO case */ 6286 + if (ieee80211_vif_is_mld(vif)) { 6287 + link_id = ath12k_mac_get_tx_link(sta, vif, link_id, skb, info_flags); 6288 + if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) { 6289 + ieee80211_free_txskb(hw, skb); 6290 + return; 6291 + } 6292 + } else { 6293 + link_id = 0; 6294 + } 6295 + 6296 + arvif = rcu_dereference(ahvif->link[link_id]); 6297 + if (!arvif || !arvif->ar) { 6298 + ath12k_warn(ahvif->ah, "failed to find arvif link id %u for frame transmission", 6299 + link_id); 6300 + ieee80211_free_txskb(hw, skb); 6301 + return; 6302 + } 6303 + 6304 + ar = arvif->ar; 6305 + skb_cb->link_id = link_id; 7107 6306 is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control); 7108 6307 7109 6308 if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { ··· 7152 6309 7153 6310 void ath12k_mac_drain_tx(struct ath12k *ar) 7154 6311 { 6312 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 6313 + 7155 6314 /* make sure rcu-protected mac80211 tx path itself is drained */ 7156 6315 synchronize_net(); 7157 6316 7158 - cancel_work_sync(&ar->wmi_mgmt_tx_work); 6317 + wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->wmi_mgmt_tx_work); 7159 6318 ath12k_mgmt_over_wmi_tx_purge(ar); 7160 6319 } 7161 6320 ··· 7273 6428 { 7274 6429 struct ath12k *ar; 7275 6430 int i; 6431 + 6432 + lockdep_assert_wiphy(ah->hw->wiphy); 7276 6433 7277 6434 for_each_ar(ah, ar, i) 7278 6435 ath12k_mac_drain_tx(ar); ··· 7469 6622 { 7470 6623 struct ath12k_vif *ahvif = arvif->ahvif; 7471 6624 struct ieee80211_vif *tx_vif = ahvif->vif->mbssid_tx_vif; 6625 + struct ieee80211_bss_conf *link_conf; 7472 6626 struct ath12k *ar = arvif->ar; 7473 6627 struct ath12k_link_vif *tx_arvif; 7474 6628 struct ath12k_vif *tx_ahvif; ··· 7477 6629 if (!tx_vif) 7478 6630 return 0; 7479 6631 6632 + link_conf = ath12k_mac_get_link_bss_conf(arvif); 6633 + if (!link_conf) { 6634 + ath12k_warn(ar->ab, "unable to access bss link conf in set mbssid params for vif %pM link %u\n", 6635 + ahvif->vif->addr, arvif->link_id); 6636 + return -ENOLINK; 6637 + } 6638 + 7480 6639 tx_ahvif = ath12k_vif_to_ahvif(tx_vif); 7481 6640 tx_arvif = &tx_ahvif->deflink; 7482 6641 7483 - if (ahvif->vif->bss_conf.nontransmitted) { 6642 + if (link_conf->nontransmitted) { 7484 6643 if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy) 7485 6644 return -EINVAL; 7486 6645 ··· 7499 6644 return -EINVAL; 7500 6645 } 7501 6646 7502 - if (ahvif->vif->bss_conf.ema_ap) 6647 + if (link_conf->ema_ap) 7503 6648 *flags |= WMI_VDEV_MBSSID_FLAGS_EMA_MODE; 7504 6649 7505 6650 return 0; ··· 7512 6657 struct ath12k_pdev *pdev = ar->pdev; 7513 6658 struct ath12k_vif *ahvif = arvif->ahvif; 7514 6659 int ret; 6660 + 6661 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 7515 6662 7516 6663 arg->if_id = arvif->vdev_id; 7517 6664 arg->type = ahvif->vdev_type; ··· 7546 6689 } 7547 6690 7548 6691 arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif); 6692 + 6693 + if (ath12k_mac_is_ml_arvif(arvif)) { 6694 + if (hweight16(ahvif->vif->valid_links) > ATH12K_WMI_MLO_MAX_LINKS) { 6695 + ath12k_warn(ar->ab, "too many MLO links during setting up vdev: %d", 6696 + ahvif->vif->valid_links); 6697 + return -EINVAL; 6698 + } 6699 + 6700 + ether_addr_copy(arg->mld_addr, ahvif->vif->addr); 6701 + } 6702 + 7549 6703 return 0; 7550 6704 } 7551 6705 ··· 7707 6839 struct ath12k_vif *ahvif = arvif->ahvif; 7708 6840 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); 7709 6841 struct ath12k_wmi_vdev_create_arg vdev_arg = {0}; 7710 - struct ath12k_wmi_peer_create_arg peer_param; 6842 + struct ath12k_wmi_peer_create_arg peer_param = {0}; 7711 6843 struct ieee80211_bss_conf *link_conf; 7712 6844 u32 param_id, param_value; 7713 6845 u16 nss; 7714 6846 int i; 7715 6847 int ret, vdev_id; 6848 + u8 link_id; 7716 6849 7717 6850 lockdep_assert_wiphy(hw->wiphy); 7718 6851 7719 - link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[arvif->link_id]); 6852 + /* If no link is active and scan vdev is requested 6853 + * use a default link conf for scan address purpose. 6854 + */ 6855 + if (arvif->link_id == ATH12K_DEFAULT_SCAN_LINK && vif->valid_links) 6856 + link_id = ffs(vif->valid_links) - 1; 6857 + else 6858 + link_id = arvif->link_id; 6859 + 6860 + link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]); 7720 6861 if (!link_conf) { 7721 6862 ath12k_warn(ar->ab, "unable to access bss link conf in vdev create for vif %pM link %u\n", 7722 6863 vif->addr, arvif->link_id); ··· 7876 6999 break; 7877 7000 } 7878 7001 7879 - arvif->txpower = vif->bss_conf.txpower; 7002 + arvif->txpower = link_conf->txpower; 7880 7003 ret = ath12k_mac_txpower_recalc(ar); 7881 7004 if (ret) 7882 7005 goto err_peer_del; ··· 7911 7034 ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, 7912 7035 arvif->bssid); 7913 7036 if (ret) 7914 - /* KVALO: why not goto err? */ 7915 - return ret; 7037 + goto err_vdev_del; 7916 7038 7917 7039 ar->num_peers--; 7918 7040 } ··· 8005 7129 struct ath12k_link_vif *arvif, 8006 7130 struct ieee80211_chanctx_conf *ctx) 8007 7131 { 8008 - struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 7132 + struct ath12k_vif *ahvif = arvif->ahvif; 7133 + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); 7134 + struct ath12k_link_vif *scan_arvif; 8009 7135 struct ath12k_hw *ah = hw->priv; 8010 7136 struct ath12k *ar; 8011 7137 struct ath12k_base *ab; ··· 8025 7147 8026 7148 if (!ar) 8027 7149 return NULL; 7150 + 7151 + /* cleanup the scan vdev if we are done scan on that ar 7152 + * and now we want to create for actual usage. 7153 + */ 7154 + if (ieee80211_vif_is_mld(vif)) { 7155 + scan_arvif = wiphy_dereference(hw->wiphy, 7156 + ahvif->link[ATH12K_DEFAULT_SCAN_LINK]); 7157 + if (scan_arvif && scan_arvif->ar == ar) { 7158 + ar->scan.vdev_id = -1; 7159 + ath12k_mac_remove_link_interface(hw, scan_arvif); 7160 + ath12k_mac_unassign_link_vif(scan_arvif); 7161 + } 7162 + } 8028 7163 8029 7164 if (arvif->ar) { 8030 7165 /* This is not expected really */ ··· 8134 7243 vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE; 8135 7244 8136 7245 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 8137 - /* For non-ml vifs, vif->addr is the actual vdev address but for 8138 - * ML vif link(link BSSID) address is the vdev address and it can be a 8139 - * different one from vif->addr (i.e ML address). 8140 - * Defer vdev creation until assign_chanctx or hw_scan is initiated as driver 7246 + /* Defer vdev creation until assign_chanctx or hw_scan is initiated as driver 8141 7247 * will not know if this interface is an ML vif at this point. 8142 7248 */ 8143 - ath12k_mac_assign_vif_to_vdev(hw, arvif, NULL); 8144 - 8145 7249 return 0; 8146 7250 } 8147 7251 ··· 8238 7352 8239 7353 lockdep_assert_wiphy(hw->wiphy); 8240 7354 8241 - for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { 7355 + for (link_id = 0; link_id < ATH12K_NUM_MAX_LINKS; link_id++) { 8242 7356 /* if we cached some config but never received assign chanctx, 8243 7357 * free the allocated cache. 8244 7358 */ ··· 8339 7453 return ret; 8340 7454 } 8341 7455 8342 - static int ath12k_mac_ampdu_action(struct ath12k_link_vif *arvif, 8343 - struct ieee80211_ampdu_params *params) 7456 + static int ath12k_mac_ampdu_action(struct ieee80211_hw *hw, 7457 + struct ieee80211_vif *vif, 7458 + struct ieee80211_ampdu_params *params, 7459 + u8 link_id) 8344 7460 { 8345 - struct ath12k *ar = arvif->ar; 7461 + struct ath12k *ar; 8346 7462 int ret = -EINVAL; 8347 7463 8348 - lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 7464 + lockdep_assert_wiphy(hw->wiphy); 7465 + 7466 + ar = ath12k_get_ar_by_vif(hw, vif, link_id); 7467 + if (!ar) 7468 + return -EINVAL; 8349 7469 8350 7470 switch (params->action) { 8351 7471 case IEEE80211_AMPDU_RX_START: 8352 - ret = ath12k_dp_rx_ampdu_start(ar, params); 7472 + ret = ath12k_dp_rx_ampdu_start(ar, params, link_id); 8353 7473 break; 8354 7474 case IEEE80211_AMPDU_RX_STOP: 8355 - ret = ath12k_dp_rx_ampdu_stop(ar, params); 7475 + ret = ath12k_dp_rx_ampdu_stop(ar, params, link_id); 8356 7476 break; 8357 7477 case IEEE80211_AMPDU_TX_START: 8358 7478 case IEEE80211_AMPDU_TX_STOP_CONT: ··· 8372 7480 break; 8373 7481 } 8374 7482 7483 + if (ret) 7484 + ath12k_warn(ar->ab, "unable to perform ampdu action %d for vif %pM link %u ret %d\n", 7485 + params->action, vif->addr, link_id, ret); 7486 + 8375 7487 return ret; 8376 7488 } 8377 7489 ··· 8383 7487 struct ieee80211_vif *vif, 8384 7488 struct ieee80211_ampdu_params *params) 8385 7489 { 8386 - struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 8387 - struct ath12k *ar; 8388 - struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 8389 - struct ath12k_link_vif *arvif; 7490 + struct ieee80211_sta *sta = params->sta; 7491 + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); 7492 + unsigned long links_map = ahsta->links_map; 8390 7493 int ret = -EINVAL; 7494 + u8 link_id; 8391 7495 8392 7496 lockdep_assert_wiphy(hw->wiphy); 8393 7497 8394 - ar = ath12k_get_ar_by_vif(hw, vif); 8395 - if (!ar) 8396 - return -EINVAL; 7498 + if (WARN_ON(!links_map)) 7499 + return ret; 8397 7500 8398 - ar = ath12k_ah_to_ar(ah, 0); 8399 - arvif = &ahvif->deflink; 7501 + for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) { 7502 + ret = ath12k_mac_ampdu_action(hw, vif, params, link_id); 7503 + if (ret) 7504 + return ret; 7505 + } 8400 7506 8401 - ret = ath12k_mac_ampdu_action(arvif, params); 8402 - if (ret) 8403 - ath12k_warn(ar->ab, "pdev idx %d unable to perform ampdu action %d ret %d\n", 8404 - ar->pdev_idx, params->action, ret); 8405 - 8406 - return ret; 7507 + return 0; 8407 7508 } 8408 7509 8409 7510 static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw, ··· 8520 7627 return down_mode; 8521 7628 } 8522 7629 7630 + static void 7631 + ath12k_mac_mlo_get_vdev_args(struct ath12k_link_vif *arvif, 7632 + struct wmi_ml_arg *ml_arg) 7633 + { 7634 + struct ath12k_vif *ahvif = arvif->ahvif; 7635 + struct wmi_ml_partner_info *partner_info; 7636 + struct ieee80211_bss_conf *link_conf; 7637 + struct ath12k_link_vif *arvif_p; 7638 + unsigned long links; 7639 + u8 link_id; 7640 + 7641 + lockdep_assert_wiphy(ahvif->ah->hw->wiphy); 7642 + 7643 + if (!ath12k_mac_is_ml_arvif(arvif)) 7644 + return; 7645 + 7646 + if (hweight16(ahvif->vif->valid_links) > ATH12K_WMI_MLO_MAX_LINKS) 7647 + return; 7648 + 7649 + ml_arg->enabled = true; 7650 + 7651 + /* Driver always add a new link via VDEV START, FW takes 7652 + * care of internally adding this link to existing 7653 + * link vdevs which are advertised as partners below 7654 + */ 7655 + ml_arg->link_add = true; 7656 + partner_info = ml_arg->partner_info; 7657 + 7658 + links = ahvif->links_map; 7659 + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { 7660 + arvif_p = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]); 7661 + 7662 + if (WARN_ON(!arvif_p)) 7663 + continue; 7664 + 7665 + if (arvif == arvif_p) 7666 + continue; 7667 + 7668 + link_conf = wiphy_dereference(ahvif->ah->hw->wiphy, 7669 + ahvif->vif->link_conf[arvif_p->link_id]); 7670 + 7671 + if (!link_conf) 7672 + continue; 7673 + 7674 + partner_info->vdev_id = arvif_p->vdev_id; 7675 + partner_info->hw_link_id = arvif_p->ar->pdev->hw_link_id; 7676 + ether_addr_copy(partner_info->addr, link_conf->addr); 7677 + ml_arg->num_partner_links++; 7678 + partner_info++; 7679 + } 7680 + } 7681 + 8523 7682 static int 8524 7683 ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif, 8525 7684 struct ieee80211_chanctx_conf *ctx, ··· 8582 7637 struct wmi_vdev_start_req_arg arg = {}; 8583 7638 const struct cfg80211_chan_def *chandef = &ctx->def; 8584 7639 struct ath12k_vif *ahvif = arvif->ahvif; 8585 - int he_support = ahvif->vif->bss_conf.he_support; 7640 + struct ieee80211_bss_conf *link_conf; 8586 7641 int ret; 8587 7642 8588 7643 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 7644 + 7645 + link_conf = ath12k_mac_get_link_bss_conf(arvif); 7646 + if (!link_conf) { 7647 + ath12k_warn(ar->ab, "unable to access bss link conf in vdev start for vif %pM link %u\n", 7648 + ahvif->vif->addr, arvif->link_id); 7649 + return -ENOLINK; 7650 + } 8589 7651 8590 7652 reinit_completion(&ar->vdev_setup_done); 8591 7653 ··· 8645 7693 spin_unlock_bh(&ab->base_lock); 8646 7694 8647 7695 /* TODO: Notify if secondary 80Mhz also needs radar detection */ 8648 - if (he_support) { 7696 + if (link_conf->he_support) { 8649 7697 ret = ath12k_set_he_mu_sounding_mode(ar, arvif); 8650 7698 if (ret) { 8651 7699 ath12k_warn(ar->ab, "failed to set he mode vdev %i\n", ··· 8656 7704 } 8657 7705 8658 7706 arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR); 7707 + 7708 + if (!restart) 7709 + ath12k_mac_mlo_get_vdev_args(arvif, &arg.ml); 8659 7710 8660 7711 ath12k_dbg(ab, ATH12K_DBG_MAC, 8661 7712 "mac vdev %d start center_freq %d phymode %s punct_bitmap 0x%x\n", ··· 8733 7778 { 8734 7779 struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 8735 7780 struct ath12k_mac_change_chanctx_arg *arg = data; 7781 + struct ieee80211_bss_conf *link_conf; 8736 7782 struct ath12k_link_vif *arvif; 7783 + unsigned long links_map; 7784 + u8 link_id; 8737 7785 8738 7786 lockdep_assert_wiphy(ahvif->ah->hw->wiphy); 8739 7787 8740 - arvif = &ahvif->deflink; 7788 + links_map = ahvif->links_map; 7789 + for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) { 7790 + arvif = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]); 7791 + if (WARN_ON(!arvif)) 7792 + continue; 8741 7793 8742 - if (arvif->ar != arg->ar) 8743 - return; 7794 + if (arvif->ar != arg->ar) 7795 + continue; 8744 7796 8745 - if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx) 8746 - return; 7797 + link_conf = wiphy_dereference(ahvif->ah->hw->wiphy, 7798 + vif->link_conf[link_id]); 7799 + if (WARN_ON(!link_conf)) 7800 + continue; 8747 7801 8748 - arg->n_vifs++; 7802 + if (rcu_access_pointer(link_conf->chanctx_conf) != arg->ctx) 7803 + continue; 7804 + 7805 + arg->n_vifs++; 7806 + } 8749 7807 } 8750 7808 8751 7809 static void ··· 8767 7799 { 8768 7800 struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 8769 7801 struct ath12k_mac_change_chanctx_arg *arg = data; 7802 + struct ieee80211_bss_conf *link_conf; 8770 7803 struct ieee80211_chanctx_conf *ctx; 8771 7804 struct ath12k_link_vif *arvif; 7805 + unsigned long links_map; 7806 + u8 link_id; 8772 7807 8773 7808 lockdep_assert_wiphy(ahvif->ah->hw->wiphy); 8774 7809 8775 - arvif = &ahvif->deflink; 7810 + links_map = ahvif->links_map; 7811 + for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) { 7812 + arvif = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]); 7813 + if (WARN_ON(!arvif)) 7814 + continue; 8776 7815 8777 - if (arvif->ar != arg->ar) 8778 - return; 7816 + if (arvif->ar != arg->ar) 7817 + continue; 8779 7818 8780 - ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf); 8781 - if (ctx != arg->ctx) 8782 - return; 7819 + link_conf = wiphy_dereference(ahvif->ah->hw->wiphy, 7820 + vif->link_conf[arvif->link_id]); 7821 + if (WARN_ON(!link_conf)) 7822 + continue; 8783 7823 8784 - if (WARN_ON(arg->next_vif == arg->n_vifs)) 8785 - return; 7824 + ctx = rcu_access_pointer(link_conf->chanctx_conf); 7825 + if (ctx != arg->ctx) 7826 + continue; 8786 7827 8787 - arg->vifs[arg->next_vif].vif = vif; 8788 - arg->vifs[arg->next_vif].old_ctx = ctx; 8789 - arg->vifs[arg->next_vif].new_ctx = ctx; 8790 - arg->next_vif++; 7828 + if (WARN_ON(arg->next_vif == arg->n_vifs)) 7829 + return; 7830 + 7831 + arg->vifs[arg->next_vif].vif = vif; 7832 + arg->vifs[arg->next_vif].old_ctx = ctx; 7833 + arg->vifs[arg->next_vif].new_ctx = ctx; 7834 + arg->vifs[arg->next_vif].link_conf = link_conf; 7835 + arg->next_vif++; 7836 + } 8791 7837 } 8792 7838 8793 7839 static u32 ath12k_mac_nlwidth_to_wmiwidth(enum nl80211_chan_width width) ··· 8861 7879 int n_vifs) 8862 7880 { 8863 7881 struct ath12k_wmi_vdev_up_params params = {}; 7882 + struct ieee80211_bss_conf *link_conf; 8864 7883 struct ath12k_base *ab = ar->ab; 8865 7884 struct ath12k_link_vif *arvif; 8866 7885 struct ieee80211_vif *vif; 8867 7886 struct ath12k_vif *ahvif; 7887 + u8 link_id; 8868 7888 int ret; 8869 7889 int i; 8870 7890 bool monitor_vif = false; ··· 8876 7892 for (i = 0; i < n_vifs; i++) { 8877 7893 vif = vifs[i].vif; 8878 7894 ahvif = ath12k_vif_to_ahvif(vif); 8879 - arvif = &ahvif->deflink; 7895 + link_conf = vifs[i].link_conf; 7896 + link_id = link_conf->link_id; 7897 + arvif = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 7898 + ahvif->link[link_id]); 8880 7899 8881 7900 if (vif->type == NL80211_IFTYPE_MONITOR) 8882 7901 monitor_vif = true; ··· 8932 7945 params.aid = ahvif->aid; 8933 7946 params.bssid = arvif->bssid; 8934 7947 if (vif->mbssid_tx_vif) { 8935 - struct ath12k_vif *ahvif = 7948 + struct ath12k_vif *tx_ahvif = 8936 7949 ath12k_vif_to_ahvif(vif->mbssid_tx_vif); 8937 - struct ath12k_link_vif *arvif = &ahvif->deflink; 7950 + struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink; 8938 7951 8939 - params.tx_bssid = arvif->bssid; 8940 - params.nontx_profile_idx = vif->bss_conf.bssid_index; 8941 - params.nontx_profile_cnt = 1 << vif->bss_conf.bssid_indicator; 7952 + params.tx_bssid = tx_arvif->bssid; 7953 + params.nontx_profile_idx = link_conf->bssid_index; 7954 + params.nontx_profile_cnt = 1 << link_conf->bssid_indicator; 8942 7955 } 8943 7956 ret = ath12k_wmi_vdev_up(arvif->ar, &params); 8944 7957 if (ret) { ··· 9086 8099 return -ENOMEM; 9087 8100 } 9088 8101 9089 - if (!arvif->is_started) { 9090 - ar = ath12k_mac_assign_vif_to_vdev(hw, arvif, ctx); 9091 - if (!ar) 9092 - return -EINVAL; 9093 - } else { 8102 + ar = ath12k_mac_assign_vif_to_vdev(hw, arvif, ctx); 8103 + if (!ar) { 9094 8104 ath12k_warn(arvif->ar->ab, "failed to assign chanctx for vif %pM link id %u link vif is already started", 9095 8105 vif->addr, link_id); 9096 8106 return -EINVAL; ··· 9332 8348 9333 8349 int ath12k_mac_wait_tx_complete(struct ath12k *ar) 9334 8350 { 8351 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 8352 + 9335 8353 ath12k_mac_drain_tx(ar); 9336 8354 return ath12k_mac_flush(ar); 9337 8355 } ··· 9342 8356 u32 queues, bool drop) 9343 8357 { 9344 8358 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 8359 + struct ath12k_link_vif *arvif; 8360 + struct ath12k_vif *ahvif; 8361 + unsigned long links; 9345 8362 struct ath12k *ar; 8363 + u8 link_id; 9346 8364 int i; 9347 8365 9348 8366 lockdep_assert_wiphy(hw->wiphy); ··· 9361 8371 return; 9362 8372 } 9363 8373 9364 - ar = ath12k_get_ar_by_vif(hw, vif); 8374 + for_each_ar(ah, ar, i) 8375 + wiphy_work_flush(hw->wiphy, &ar->wmi_mgmt_tx_work); 9365 8376 9366 - if (!ar) 9367 - return; 8377 + ahvif = ath12k_vif_to_ahvif(vif); 8378 + links = ahvif->links_map; 8379 + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { 8380 + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); 8381 + if (!(arvif && arvif->ar)) 8382 + continue; 9368 8383 9369 - ath12k_mac_flush(ar); 8384 + ath12k_mac_flush(arvif->ar); 8385 + } 9370 8386 } 9371 8387 9372 8388 static int ··· 9571 8575 { 9572 8576 struct ath12k_link_vif *arvif = data; 9573 8577 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); 9574 - struct ath12k_link_sta *arsta = &ahsta->deflink; 8578 + struct ath12k_link_sta *arsta; 9575 8579 struct ath12k *ar = arvif->ar; 9576 8580 9577 - if (arsta->arvif != arvif) 8581 + arsta = rcu_dereference(ahsta->link[arvif->link_id]); 8582 + if (!arsta || arsta->arvif != arvif) 9578 8583 return; 9579 8584 9580 8585 spin_lock_bh(&ar->data_lock); ··· 9590 8593 { 9591 8594 struct ath12k_link_vif *arvif = data; 9592 8595 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); 9593 - struct ath12k_link_sta *arsta = &ahsta->deflink; 8596 + struct ath12k_link_sta *arsta; 9594 8597 struct ath12k *ar = arvif->ar; 9595 8598 int ret; 9596 8599 9597 - if (arsta->arvif != arvif) 8600 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 8601 + 8602 + arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 8603 + ahsta->link[arvif->link_id]); 8604 + 8605 + if (!arsta || arsta->arvif != arvif) 9598 8606 return; 9599 8607 9600 - ret = ath12k_wmi_set_peer_param(ar, sta->addr, 8608 + ret = ath12k_wmi_set_peer_param(ar, arsta->addr, 9601 8609 arvif->vdev_id, 9602 8610 WMI_PEER_PARAM_FIXED_RATE, 9603 8611 WMI_FIXED_RATE_NONE); 9604 8612 if (ret) 9605 8613 ath12k_warn(ar->ab, 9606 8614 "failed to disable peer fixed rate for STA %pM ret %d\n", 9607 - sta->addr, ret); 8615 + arsta->addr, ret); 9608 8616 } 9609 8617 9610 8618 static int ··· 10161 9159 .set_rekey_data = ath12k_mac_op_set_rekey_data, 10162 9160 .sta_state = ath12k_mac_op_sta_state, 10163 9161 .sta_set_txpwr = ath12k_mac_op_sta_set_txpwr, 10164 - .link_sta_rc_update = ath12k_mac_op_sta_rc_update, 9162 + .link_sta_rc_update = ath12k_mac_op_link_sta_rc_update, 10165 9163 .conf_tx = ath12k_mac_op_conf_tx, 10166 9164 .set_antenna = ath12k_mac_op_set_antenna, 10167 9165 .get_antenna = ath12k_mac_op_get_antenna, ··· 10180 9178 .sta_statistics = ath12k_mac_op_sta_statistics, 10181 9179 .remain_on_channel = ath12k_mac_op_remain_on_channel, 10182 9180 .cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel, 10183 - 9181 + .change_sta_links = ath12k_mac_op_change_sta_links, 10184 9182 #ifdef CONFIG_PM 10185 9183 .suspend = ath12k_wow_op_suspend, 10186 9184 .resume = ath12k_wow_op_resume, ··· 10814 9812 INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work); 10815 9813 INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work); 10816 9814 10817 - INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work); 9815 + wiphy_work_init(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work); 10818 9816 skb_queue_head_init(&ar->wmi_mgmt_tx_queue); 10819 9817 } 10820 9818 10821 - int ath12k_mac_register(struct ath12k_base *ab) 9819 + int ath12k_mac_register(struct ath12k_hw_group *ag) 10822 9820 { 9821 + struct ath12k_base *ab = ag->ab[0]; 10823 9822 struct ath12k_hw *ah; 10824 9823 int i; 10825 9824 int ret; 10826 9825 10827 - if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) 10828 - return 0; 10829 - 10830 - /* Initialize channel counters frequency value in hertz */ 10831 - ab->cc_freq_hz = 320000; 10832 - ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1; 10833 - 10834 - for (i = 0; i < ab->num_hw; i++) { 10835 - ah = ab->ah[i]; 9826 + for (i = 0; i < ath12k_get_num_hw(ab); i++) { 9827 + ah = ath12k_ab_to_ah(ab, i); 10836 9828 10837 9829 ret = ath12k_mac_hw_register(ah); 10838 9830 if (ret) 10839 9831 goto err; 10840 9832 } 10841 9833 9834 + set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags); 9835 + 10842 9836 return 0; 10843 9837 10844 9838 err: 10845 9839 for (i = i - 1; i >= 0; i--) { 10846 - ah = ab->ah[i]; 9840 + ah = ath12k_ab_to_ah(ab, i); 10847 9841 if (!ah) 10848 9842 continue; 10849 9843 ··· 10849 9851 return ret; 10850 9852 } 10851 9853 10852 - void ath12k_mac_unregister(struct ath12k_base *ab) 9854 + void ath12k_mac_unregister(struct ath12k_hw_group *ag) 10853 9855 { 9856 + struct ath12k_base *ab = ag->ab[0]; 10854 9857 struct ath12k_hw *ah; 10855 9858 int i; 10856 9859 10857 - for (i = ab->num_hw - 1; i >= 0; i--) { 10858 - ah = ab->ah[i]; 9860 + clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags); 9861 + 9862 + for (i = ath12k_get_num_hw(ab) - 1; i >= 0; i--) { 9863 + ah = ath12k_ab_to_ah(ab, i); 10859 9864 if (!ah) 10860 9865 continue; 10861 9866 ··· 10871 9870 ieee80211_free_hw(ah->hw); 10872 9871 } 10873 9872 10874 - static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab, 9873 + static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_hw_group *ag, 10875 9874 struct ath12k_pdev_map *pdev_map, 10876 9875 u8 num_pdev_map) 10877 9876 { 10878 9877 struct ieee80211_hw *hw; 10879 9878 struct ath12k *ar; 9879 + struct ath12k_base *ab; 10880 9880 struct ath12k_pdev *pdev; 10881 9881 struct ath12k_hw *ah; 10882 9882 int i; ··· 10893 9891 ah->num_radio = num_pdev_map; 10894 9892 10895 9893 mutex_init(&ah->hw_mutex); 9894 + INIT_LIST_HEAD(&ah->ml_peers); 10896 9895 10897 9896 for (i = 0; i < num_pdev_map; i++) { 10898 9897 ab = pdev_map[i].ab; ··· 10909 9906 pdev->ar = ar; 10910 9907 10911 9908 ath12k_mac_setup(ar); 9909 + ath12k_dp_pdev_pre_alloc(ar); 10912 9910 } 10913 9911 10914 9912 return ah; 10915 9913 } 10916 9914 10917 - void ath12k_mac_destroy(struct ath12k_base *ab) 9915 + void ath12k_mac_destroy(struct ath12k_hw_group *ag) 10918 9916 { 10919 9917 struct ath12k_pdev *pdev; 10920 - int i; 9918 + struct ath12k_base *ab = ag->ab[0]; 9919 + int i, j; 9920 + struct ath12k_hw *ah; 10921 9921 10922 - for (i = 0; i < ab->num_radios; i++) { 10923 - pdev = &ab->pdevs[i]; 10924 - if (!pdev->ar) 9922 + for (i = 0; i < ag->num_devices; i++) { 9923 + ab = ag->ab[i]; 9924 + if (!ab) 10925 9925 continue; 10926 9926 10927 - pdev->ar = NULL; 9927 + for (j = 0; j < ab->num_radios; j++) { 9928 + pdev = &ab->pdevs[j]; 9929 + if (!pdev->ar) 9930 + continue; 9931 + pdev->ar = NULL; 9932 + } 10928 9933 } 10929 9934 10930 - for (i = 0; i < ab->num_hw; i++) { 10931 - if (!ab->ah[i]) 9935 + for (i = 0; i < ath12k_get_num_hw(ab); i++) { 9936 + ah = ath12k_ab_to_ah(ab, i); 9937 + if (!ah) 10932 9938 continue; 10933 9939 10934 - ath12k_mac_hw_destroy(ab->ah[i]); 10935 - ab->ah[i] = NULL; 9940 + ath12k_mac_hw_destroy(ah); 9941 + ath12k_ab_set_ah(ab, i, NULL); 10936 9942 } 10937 9943 } 10938 9944 10939 - int ath12k_mac_allocate(struct ath12k_base *ab) 9945 + static void ath12k_mac_set_device_defaults(struct ath12k_base *ab) 10940 9946 { 9947 + /* Initialize channel counters frequency value in hertz */ 9948 + ab->cc_freq_hz = 320000; 9949 + ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1; 9950 + } 9951 + 9952 + int ath12k_mac_allocate(struct ath12k_hw_group *ag) 9953 + { 9954 + struct ath12k_pdev_map pdev_map[ATH12K_GROUP_MAX_RADIO]; 9955 + int mac_id, device_id, total_radio, num_hw; 9956 + struct ath12k_base *ab; 10941 9957 struct ath12k_hw *ah; 10942 - struct ath12k_pdev_map pdev_map[MAX_RADIOS]; 10943 9958 int ret, i, j; 10944 9959 u8 radio_per_hw; 10945 9960 10946 - if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) 10947 - return 0; 9961 + total_radio = 0; 9962 + for (i = 0; i < ag->num_devices; i++) 9963 + total_radio += ag->ab[i]->num_radios; 10948 9964 10949 - ab->num_hw = ab->num_radios; 10950 - radio_per_hw = 1; 9965 + /* All pdev get combined and register as single wiphy based on 9966 + * hardware group which participate in multi-link operation else 9967 + * each pdev get register separately. 9968 + */ 9969 + if (ag->mlo_capable) 9970 + radio_per_hw = total_radio; 9971 + else 9972 + radio_per_hw = 1; 10951 9973 10952 - for (i = 0; i < ab->num_hw; i++) { 9974 + num_hw = total_radio / radio_per_hw; 9975 + 9976 + if (WARN_ON(num_hw >= ATH12K_GROUP_MAX_RADIO)) 9977 + return -ENOSPC; 9978 + 9979 + ag->num_hw = 0; 9980 + device_id = 0; 9981 + mac_id = 0; 9982 + for (i = 0; i < num_hw; i++) { 10953 9983 for (j = 0; j < radio_per_hw; j++) { 9984 + ab = ag->ab[device_id]; 10954 9985 pdev_map[j].ab = ab; 10955 - pdev_map[j].pdev_idx = (i * radio_per_hw) + j; 9986 + pdev_map[j].pdev_idx = mac_id; 9987 + mac_id++; 9988 + 9989 + /* If mac_id falls beyond the current device MACs then 9990 + * move to next device 9991 + */ 9992 + if (mac_id >= ab->num_radios) { 9993 + mac_id = 0; 9994 + device_id++; 9995 + ath12k_mac_set_device_defaults(ab); 9996 + } 10956 9997 } 10957 9998 10958 - ah = ath12k_mac_hw_allocate(ab, pdev_map, radio_per_hw); 9999 + ah = ath12k_mac_hw_allocate(ag, pdev_map, radio_per_hw); 10959 10000 if (!ah) { 10960 10001 ath12k_warn(ab, "failed to allocate mac80211 hw device for hw_idx %d\n", 10961 10002 i); ··· 11007 9960 goto err; 11008 9961 } 11009 9962 11010 - ab->ah[i] = ah; 11011 - } 9963 + ah->dev = ab->dev; 11012 9964 11013 - ath12k_dp_pdev_pre_alloc(ab); 9965 + ag->ah[i] = ah; 9966 + ag->num_hw++; 9967 + } 11014 9968 11015 9969 return 0; 11016 9970 11017 9971 err: 11018 9972 for (i = i - 1; i >= 0; i--) { 11019 - if (!ab->ah[i]) 9973 + ah = ath12k_ab_to_ah(ab, i); 9974 + if (!ah) 11020 9975 continue; 11021 9976 11022 - ath12k_mac_hw_destroy(ab->ah[i]); 11023 - ab->ah[i] = NULL; 9977 + ath12k_mac_hw_destroy(ah); 9978 + ath12k_ab_set_ah(ab, i, NULL); 11024 9979 } 11025 9980 11026 9981 return ret;
+12 -4
drivers/net/wireless/ath/ath12k/mac.h
··· 14 14 struct ath12k; 15 15 struct ath12k_base; 16 16 struct ath12k_hw; 17 + struct ath12k_hw_group; 17 18 struct ath12k_pdev_map; 18 19 19 20 struct ath12k_generic_iter { ··· 45 44 #define ATH12K_DEFAULT_LINK_ID 0 46 45 #define ATH12K_INVALID_LINK_ID 255 47 46 47 + /* Default link after the IEEE802.11 defined Max link id limit 48 + * for driver usage purpose. 49 + */ 50 + #define ATH12K_DEFAULT_SCAN_LINK IEEE80211_MLD_MAX_NUM_LINKS 51 + #define ATH12K_NUM_MAX_LINKS (IEEE80211_MLD_MAX_NUM_LINKS + 1) 52 + 48 53 enum ath12k_supported_bw { 49 54 ATH12K_BW_20 = 0, 50 55 ATH12K_BW_40 = 1, ··· 61 54 62 55 extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default; 63 56 64 - void ath12k_mac_destroy(struct ath12k_base *ab); 65 - void ath12k_mac_unregister(struct ath12k_base *ab); 66 - int ath12k_mac_register(struct ath12k_base *ab); 67 - int ath12k_mac_allocate(struct ath12k_base *ab); 57 + void ath12k_mac_destroy(struct ath12k_hw_group *ag); 58 + void ath12k_mac_unregister(struct ath12k_hw_group *ag); 59 + int ath12k_mac_register(struct ath12k_hw_group *ag); 60 + int ath12k_mac_allocate(struct ath12k_hw_group *ag); 68 61 int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx, 69 62 u16 *rate); 70 63 u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, ··· 96 89 enum wmi_sta_keepalive_method method, 97 90 u32 interval); 98 91 u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar); 92 + int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif); 99 93 100 94 #endif
+10
drivers/net/wireless/ath/ath12k/pci.c
··· 1123 1123 1124 1124 void ath12k_pci_ext_irq_disable(struct ath12k_base *ab) 1125 1125 { 1126 + if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) 1127 + return; 1128 + 1126 1129 __ath12k_pci_ext_irq_disable(ab); 1127 1130 ath12k_pci_sync_ext_irqs(ab); 1128 1131 } ··· 1150 1147 1151 1148 void ath12k_pci_stop(struct ath12k_base *ab) 1152 1149 { 1150 + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1151 + 1152 + if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags)) 1153 + return; 1154 + 1153 1155 ath12k_pci_ce_irq_disable_sync(ab); 1154 1156 ath12k_ce_cleanup_pipes(ab); 1155 1157 } ··· 1725 1717 if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) { 1726 1718 ath12k_pci_power_down(ab, false); 1727 1719 ath12k_qmi_deinit_service(ab); 1720 + ath12k_core_hw_group_unassign(ab); 1728 1721 goto qmi_fail; 1729 1722 } 1730 1723 ··· 1734 1725 cancel_work_sync(&ab->reset_work); 1735 1726 cancel_work_sync(&ab->dump_work); 1736 1727 ath12k_core_deinit(ab); 1728 + ath12k_fw_unmap(ab); 1737 1729 1738 1730 qmi_fail: 1739 1731 ath12k_mhi_unregister(ab_pci);
+221 -2
drivers/net/wireless/ath/ath12k/peer.c
··· 8 8 #include "peer.h" 9 9 #include "debug.h" 10 10 11 + static struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr) 12 + { 13 + struct ath12k_ml_peer *ml_peer; 14 + 15 + lockdep_assert_wiphy(ah->hw->wiphy); 16 + 17 + list_for_each_entry(ml_peer, &ah->ml_peers, list) { 18 + if (!ether_addr_equal(ml_peer->addr, addr)) 19 + continue; 20 + 21 + return ml_peer; 22 + } 23 + 24 + return NULL; 25 + } 26 + 11 27 struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id, 12 28 const u8 *addr) 13 29 { ··· 79 63 return NULL; 80 64 } 81 65 66 + static struct ath12k_peer *ath12k_peer_find_by_ml_id(struct ath12k_base *ab, 67 + int ml_peer_id) 68 + { 69 + struct ath12k_peer *peer; 70 + 71 + lockdep_assert_held(&ab->base_lock); 72 + 73 + list_for_each_entry(peer, &ab->peers, list) 74 + if (ml_peer_id == peer->ml_id) 75 + return peer; 76 + 77 + return NULL; 78 + } 79 + 82 80 struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab, 83 81 int peer_id) 84 82 { 85 83 struct ath12k_peer *peer; 86 84 87 85 lockdep_assert_held(&ab->base_lock); 86 + 87 + if (peer_id & ATH12K_PEER_ML_ID_VALID) 88 + return ath12k_peer_find_by_ml_id(ab, peer_id); 88 89 89 90 list_for_each_entry(peer, &ab->peers, list) 90 91 if (peer_id == peer->peer_id) ··· 264 231 return 0; 265 232 } 266 233 267 - int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr) 234 + static int ath12k_peer_delete_send(struct ath12k *ar, u32 vdev_id, const u8 *addr) 268 235 { 236 + struct ath12k_base *ab = ar->ab; 269 237 int ret; 270 238 271 239 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); ··· 275 241 276 242 ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id); 277 243 if (ret) { 278 - ath12k_warn(ar->ab, 244 + ath12k_warn(ab, 279 245 "failed to delete peer vdev_id %d addr %pM ret %d\n", 280 246 vdev_id, addr, ret); 281 247 return ret; 282 248 } 249 + 250 + return 0; 251 + } 252 + 253 + int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr) 254 + { 255 + int ret; 256 + 257 + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 258 + 259 + ret = ath12k_peer_delete_send(ar, vdev_id, addr); 260 + if (ret) 261 + return ret; 283 262 284 263 ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr); 285 264 if (ret) ··· 313 266 struct ath12k_wmi_peer_create_arg *arg) 314 267 { 315 268 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 269 + struct ath12k_link_sta *arsta; 270 + u8 link_id = arvif->link_id; 316 271 struct ath12k_peer *peer; 272 + struct ath12k_sta *ahsta; 273 + u16 ml_peer_id; 317 274 int ret; 318 275 319 276 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); ··· 383 332 arvif->ast_idx = peer->hw_peer_id; 384 333 } 385 334 335 + if (sta) { 336 + ahsta = ath12k_sta_to_ahsta(sta); 337 + arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 338 + ahsta->link[link_id]); 339 + 340 + /* Fill ML info into created peer */ 341 + if (sta->mlo) { 342 + ml_peer_id = ahsta->ml_peer_id; 343 + peer->ml_id = ml_peer_id | ATH12K_PEER_ML_ID_VALID; 344 + ether_addr_copy(peer->ml_addr, sta->addr); 345 + 346 + /* the assoc link is considered primary for now */ 347 + peer->primary_link = arsta->is_assoc_link; 348 + peer->mlo = true; 349 + } else { 350 + peer->ml_id = ATH12K_MLO_PEER_ID_INVALID; 351 + peer->primary_link = true; 352 + peer->mlo = false; 353 + } 354 + } 355 + 386 356 peer->sec_type = HAL_ENCRYPT_TYPE_OPEN; 387 357 peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN; 388 358 ··· 412 340 spin_unlock_bh(&ar->ab->base_lock); 413 341 414 342 return 0; 343 + } 344 + 345 + static u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah) 346 + { 347 + u16 ml_peer_id; 348 + 349 + lockdep_assert_wiphy(ah->hw->wiphy); 350 + 351 + for (ml_peer_id = 0; ml_peer_id < ATH12K_MAX_MLO_PEERS; ml_peer_id++) { 352 + if (test_bit(ml_peer_id, ah->free_ml_peer_id_map)) 353 + continue; 354 + 355 + set_bit(ml_peer_id, ah->free_ml_peer_id_map); 356 + break; 357 + } 358 + 359 + if (ml_peer_id == ATH12K_MAX_MLO_PEERS) 360 + ml_peer_id = ATH12K_MLO_PEER_ID_INVALID; 361 + 362 + return ml_peer_id; 363 + } 364 + 365 + int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta) 366 + { 367 + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); 368 + struct ath12k_ml_peer *ml_peer; 369 + 370 + lockdep_assert_wiphy(ah->hw->wiphy); 371 + 372 + if (!sta->mlo) 373 + return -EINVAL; 374 + 375 + ml_peer = ath12k_peer_ml_find(ah, sta->addr); 376 + if (ml_peer) { 377 + ath12k_hw_warn(ah, "ML peer %d exists already, unable to add new entry for %pM", 378 + ml_peer->id, sta->addr); 379 + return -EEXIST; 380 + } 381 + 382 + ml_peer = kzalloc(sizeof(*ml_peer), GFP_ATOMIC); 383 + if (!ml_peer) 384 + return -ENOMEM; 385 + 386 + ahsta->ml_peer_id = ath12k_peer_ml_alloc(ah); 387 + 388 + if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) { 389 + ath12k_hw_warn(ah, "unable to allocate ML peer id for sta %pM", 390 + sta->addr); 391 + kfree(ml_peer); 392 + return -ENOMEM; 393 + } 394 + 395 + ether_addr_copy(ml_peer->addr, sta->addr); 396 + ml_peer->id = ahsta->ml_peer_id; 397 + list_add(&ml_peer->list, &ah->ml_peers); 398 + 399 + return 0; 400 + } 401 + 402 + int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta) 403 + { 404 + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); 405 + struct ath12k_ml_peer *ml_peer; 406 + 407 + lockdep_assert_wiphy(ah->hw->wiphy); 408 + 409 + if (!sta->mlo) 410 + return -EINVAL; 411 + 412 + clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map); 413 + ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID; 414 + 415 + ml_peer = ath12k_peer_ml_find(ah, sta->addr); 416 + if (!ml_peer) { 417 + ath12k_hw_warn(ah, "ML peer for %pM not found", sta->addr); 418 + return -EINVAL; 419 + } 420 + 421 + list_del(&ml_peer->list); 422 + kfree(ml_peer); 423 + 424 + return 0; 425 + } 426 + 427 + int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta) 428 + { 429 + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta); 430 + struct ath12k_hw *ah = ahvif->ah; 431 + struct ath12k_link_vif *arvif; 432 + struct ath12k_link_sta *arsta; 433 + unsigned long links; 434 + struct ath12k *ar; 435 + int ret, err_ret = 0; 436 + u8 link_id; 437 + 438 + lockdep_assert_wiphy(ah->hw->wiphy); 439 + 440 + if (!sta->mlo) 441 + return -EINVAL; 442 + 443 + /* FW expects delete of all link peers at once before waiting for reception 444 + * of peer unmap or delete responses 445 + */ 446 + links = ahsta->links_map; 447 + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { 448 + arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]); 449 + arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]); 450 + if (!arvif || !arsta) 451 + continue; 452 + 453 + ar = arvif->ar; 454 + if (!ar) 455 + continue; 456 + 457 + ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr); 458 + 459 + ret = ath12k_peer_delete_send(ar, arvif->vdev_id, arsta->addr); 460 + if (ret) { 461 + ath12k_warn(ar->ab, 462 + "failed to delete peer vdev_id %d addr %pM ret %d\n", 463 + arvif->vdev_id, arsta->addr, ret); 464 + err_ret = ret; 465 + continue; 466 + } 467 + } 468 + 469 + /* Ensure all link peers are deleted and unmapped */ 470 + links = ahsta->links_map; 471 + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { 472 + arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]); 473 + arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]); 474 + if (!arvif || !arsta) 475 + continue; 476 + 477 + ar = arvif->ar; 478 + if (!ar) 479 + continue; 480 + 481 + ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, arsta->addr); 482 + if (ret) { 483 + err_ret = ret; 484 + continue; 485 + } 486 + ar->num_peers--; 487 + } 488 + 489 + return err_ret; 415 490 }
+22 -1
drivers/net/wireless/ath/ath12k/peer.h
··· 19 19 u32 resp_rate_flags; 20 20 }; 21 21 22 + #define ATH12K_PEER_ML_ID_VALID BIT(13) 23 + 22 24 struct ath12k_peer { 23 25 struct list_head list; 24 26 struct ieee80211_sta *sta; ··· 46 44 struct ppdu_user_delayba ppdu_stats_delayba; 47 45 bool delayba_flag; 48 46 bool is_authorized; 49 - 47 + bool mlo; 50 48 /* protected by ab->data_lock */ 51 49 bool dp_setup_done; 50 + 51 + u16 ml_id; 52 + 53 + /* any other ML info common for all partners can be added 54 + * here and would be same for all partner peers. 55 + */ 56 + u8 ml_addr[ETH_ALEN]; 57 + 58 + /* To ensure only certain work related to dp is done once */ 59 + bool primary_link; 60 + }; 61 + 62 + struct ath12k_ml_peer { 63 + struct list_head list; 64 + u8 addr[ETH_ALEN]; 65 + u16 id; 52 66 }; 53 67 54 68 void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id); ··· 84 66 const u8 *addr); 85 67 bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id); 86 68 struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, int ast_hash); 69 + int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta); 70 + int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta); 71 + int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta); 87 72 88 73 #endif /* _PEER_H_ */
+139 -28
drivers/net/wireless/ath/ath12k/qmi.c
··· 2023 2023 u8 hw_link_id = 0; 2024 2024 int i; 2025 2025 2026 - if (!(ab->mlo_capable_flags & ATH12K_INTRA_DEVICE_MLO_SUPPORT)) { 2026 + if (!ab->ag->mlo_capable) { 2027 2027 ath12k_dbg(ab, ATH12K_DBG_QMI, 2028 - "intra device MLO is disabled hence skip QMI MLO cap"); 2028 + "MLO is disabled hence skip QMI MLO cap"); 2029 2029 return; 2030 2030 } 2031 2031 2032 2032 if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) { 2033 - ab->mlo_capable_flags = 0; 2033 + ab->single_chip_mlo_supp = false; 2034 2034 2035 2035 ath12k_dbg(ab, ATH12K_DBG_QMI, 2036 2036 "skip QMI MLO cap due to invalid num_radio %d\n", ··· 2066 2066 req->mlo_chip_info_valid = 1; 2067 2067 } 2068 2068 2069 - static int ath12k_qmi_host_cap_send(struct ath12k_base *ab) 2069 + /* clang stack usage explodes if this is inlined */ 2070 + static noinline_for_stack 2071 + int ath12k_qmi_host_cap_send(struct ath12k_base *ab) 2070 2072 { 2071 2073 struct qmi_wlanfw_host_cap_req_msg_v01 req = {}; 2072 2074 struct qmi_wlanfw_host_cap_resp_msg_v01 resp = {}; ··· 2176 2174 goto out; 2177 2175 } 2178 2176 2179 - if (resp.single_chip_mlo_support_valid) { 2180 - if (resp.single_chip_mlo_support) 2181 - ab->mlo_capable_flags |= ATH12K_INTRA_DEVICE_MLO_SUPPORT; 2182 - else 2183 - ab->mlo_capable_flags &= ~ATH12K_INTRA_DEVICE_MLO_SUPPORT; 2184 - } 2177 + if (resp.single_chip_mlo_support_valid && 2178 + resp.single_chip_mlo_support) 2179 + ab->single_chip_mlo_supp = true; 2185 2180 2186 2181 if (!resp.num_phy_valid) { 2187 2182 ret = -ENODATA; ··· 2274 2275 return ret; 2275 2276 } 2276 2277 2277 - static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab) 2278 + /* clang stack usage explodes if this is inlined */ 2279 + static noinline_for_stack 2280 + int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab) 2278 2281 { 2279 2282 struct qmi_wlanfw_respond_mem_req_msg_v01 *req; 2280 2283 struct qmi_wlanfw_respond_mem_resp_msg_v01 resp = {}; ··· 2434 2433 return 0; 2435 2434 } 2436 2435 2437 - static int ath12k_qmi_request_target_cap(struct ath12k_base *ab) 2436 + /* clang stack usage explodes if this is inlined */ 2437 + static noinline_for_stack 2438 + int ath12k_qmi_request_target_cap(struct ath12k_base *ab) 2438 2439 { 2439 2440 struct qmi_wlanfw_cap_req_msg_v01 req = {}; 2440 2441 struct qmi_wlanfw_cap_resp_msg_v01 resp = {}; ··· 2622 2619 return ret; 2623 2620 } 2624 2621 2625 - static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab, 2626 - enum ath12k_qmi_bdf_type type) 2622 + /* clang stack usage explodes if this is inlined */ 2623 + static noinline_for_stack 2624 + int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab, 2625 + enum ath12k_qmi_bdf_type type) 2627 2626 { 2628 2627 struct device *dev = ab->dev; 2629 2628 char filename[ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE]; ··· 2796 2791 return ret; 2797 2792 } 2798 2793 2799 - static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab) 2794 + /* clang stack usage explodes if this is inlined */ 2795 + static noinline_for_stack 2796 + int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab) 2800 2797 { 2801 2798 struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; 2802 2799 struct qmi_wlanfw_m3_info_req_msg_v01 req = {}; ··· 3030 3023 { 3031 3024 int ret; 3032 3025 3026 + clear_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags); 3027 + 3033 3028 ret = ath12k_qmi_wlanfw_mode_send(ab, ATH12K_FIRMWARE_MODE_OFF); 3034 3029 if (ret < 0) { 3035 3030 ath12k_warn(ab, "qmi failed to send wlan mode off\n"); ··· 3088 3079 return 0; 3089 3080 } 3090 3081 3091 - static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi) 3082 + void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab) 3092 3083 { 3093 - struct ath12k_base *ab = qmi->ab; 3084 + struct ath12k_qmi *qmi = &ab->qmi; 3085 + 3086 + spin_lock(&qmi->event_lock); 3087 + 3088 + if (ath12k_qmi_get_event_block(qmi)) 3089 + ath12k_qmi_set_event_block(qmi, false); 3090 + 3091 + spin_unlock(&qmi->event_lock); 3092 + 3093 + ath12k_dbg(ab, ATH12K_DBG_QMI, "trigger host cap for device id %d\n", 3094 + ab->device_id); 3095 + 3096 + ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_HOST_CAP, NULL); 3097 + } 3098 + 3099 + static bool ath12k_qmi_hw_group_host_cap_ready(struct ath12k_hw_group *ag) 3100 + { 3101 + struct ath12k_base *ab; 3102 + int i; 3103 + 3104 + for (i = 0; i < ag->num_devices; i++) { 3105 + ab = ag->ab[i]; 3106 + 3107 + if (!(ab && ab->qmi.num_radios != U8_MAX)) 3108 + return false; 3109 + } 3110 + 3111 + return true; 3112 + } 3113 + 3114 + static struct ath12k_base *ath12k_qmi_hw_group_find_blocked(struct ath12k_hw_group *ag) 3115 + { 3116 + struct ath12k_base *ab; 3117 + int i; 3118 + 3119 + lockdep_assert_held(&ag->mutex); 3120 + 3121 + for (i = 0; i < ag->num_devices; i++) { 3122 + ab = ag->ab[i]; 3123 + if (!ab) 3124 + continue; 3125 + 3126 + spin_lock(&ab->qmi.event_lock); 3127 + 3128 + if (ath12k_qmi_get_event_block(&ab->qmi)) { 3129 + spin_unlock(&ab->qmi.event_lock); 3130 + return ab; 3131 + } 3132 + 3133 + spin_unlock(&ab->qmi.event_lock); 3134 + } 3135 + 3136 + return NULL; 3137 + } 3138 + 3139 + /* clang stack usage explodes if this is inlined */ 3140 + static noinline_for_stack 3141 + int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi) 3142 + { 3143 + struct ath12k_base *ab = qmi->ab, *block_ab; 3144 + struct ath12k_hw_group *ag = ab->ag; 3094 3145 int ret; 3095 3146 3096 3147 ath12k_qmi_phy_cap_send(ab); ··· 3161 3092 return ret; 3162 3093 } 3163 3094 3164 - ret = ath12k_qmi_host_cap_send(ab); 3165 - if (ret < 0) { 3166 - ath12k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret); 3167 - return ret; 3095 + spin_lock(&qmi->event_lock); 3096 + 3097 + ath12k_qmi_set_event_block(qmi, true); 3098 + 3099 + spin_unlock(&qmi->event_lock); 3100 + 3101 + mutex_lock(&ag->mutex); 3102 + 3103 + if (ath12k_qmi_hw_group_host_cap_ready(ag)) { 3104 + ath12k_core_hw_group_set_mlo_capable(ag); 3105 + 3106 + block_ab = ath12k_qmi_hw_group_find_blocked(ag); 3107 + if (block_ab) 3108 + ath12k_qmi_trigger_host_cap(block_ab); 3168 3109 } 3110 + 3111 + mutex_unlock(&ag->mutex); 3169 3112 3170 3113 return ret; 3171 3114 } 3172 3115 3173 - static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi) 3116 + /* clang stack usage explodes if this is inlined */ 3117 + static noinline_for_stack 3118 + int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi) 3174 3119 { 3175 3120 struct ath12k_base *ab = qmi->ab; 3176 3121 int ret; ··· 3198 3115 return ret; 3199 3116 } 3200 3117 3201 - static int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi) 3118 + /* clang stack usage explodes if this is inlined */ 3119 + static noinline_for_stack 3120 + int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi) 3202 3121 { 3203 3122 struct ath12k_base *ab = qmi->ab; 3204 3123 int ret; ··· 3365 3280 .del_server = ath12k_qmi_ops_del_server, 3366 3281 }; 3367 3282 3283 + static int ath12k_qmi_event_host_cap(struct ath12k_qmi *qmi) 3284 + { 3285 + struct ath12k_base *ab = qmi->ab; 3286 + int ret; 3287 + 3288 + ret = ath12k_qmi_host_cap_send(ab); 3289 + if (ret < 0) { 3290 + ath12k_warn(ab, "failed to send qmi host cap for device id %d: %d\n", 3291 + ab->device_id, ret); 3292 + return ret; 3293 + } 3294 + 3295 + return ret; 3296 + } 3297 + 3368 3298 static void ath12k_qmi_driver_event_work(struct work_struct *work) 3369 3299 { 3370 3300 struct ath12k_qmi *qmi = container_of(work, struct ath12k_qmi, ··· 3406 3306 break; 3407 3307 case ATH12K_QMI_EVENT_SERVER_EXIT: 3408 3308 set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags); 3409 - set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags); 3410 3309 break; 3411 3310 case ATH12K_QMI_EVENT_REQUEST_MEM: 3412 3311 ret = ath12k_qmi_event_mem_request(qmi); ··· 3419 3320 break; 3420 3321 case ATH12K_QMI_EVENT_FW_READY: 3421 3322 clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags); 3422 - if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) { 3323 + if (test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags)) { 3423 3324 if (ab->is_reset) 3424 3325 ath12k_hal_dump_srng_stats(ab); 3326 + 3327 + set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags); 3425 3328 queue_work(ab->workqueue, &ab->restart_work); 3426 3329 break; 3427 3330 } 3428 3331 3429 3332 clear_bit(ATH12K_FLAG_CRASH_FLUSH, 3430 3333 &ab->dev_flags); 3431 - clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags); 3432 - ath12k_core_qmi_firmware_ready(ab); 3433 - set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags); 3334 + ret = ath12k_core_qmi_firmware_ready(ab); 3335 + if (!ret) 3336 + set_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, 3337 + &ab->dev_flags); 3434 3338 3339 + break; 3340 + case ATH12K_QMI_EVENT_HOST_CAP: 3341 + ret = ath12k_qmi_event_host_cap(qmi); 3342 + if (ret < 0) 3343 + set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags); 3435 3344 break; 3436 3345 default: 3437 3346 ath12k_warn(ab, "invalid event type: %d", event->type); ··· 3493 3386 3494 3387 void ath12k_qmi_deinit_service(struct ath12k_base *ab) 3495 3388 { 3389 + if (!ab->qmi.ab) 3390 + return; 3391 + 3496 3392 qmi_handle_release(&ab->qmi.handle); 3497 3393 cancel_work_sync(&ab->qmi.event_work); 3498 3394 destroy_workqueue(ab->qmi.event_wq); 3499 3395 ath12k_qmi_m3_free(ab); 3500 3396 ath12k_qmi_free_target_mem_chunk(ab); 3397 + ab->qmi.ab = NULL; 3501 3398 } 3502 3399 3503 3400 void ath12k_qmi_free_resource(struct ath12k_base *ab)
+20
drivers/net/wireless/ath/ath12k/qmi.h
··· 68 68 ATH12K_QMI_EVENT_FORCE_FW_ASSERT, 69 69 ATH12K_QMI_EVENT_POWER_UP, 70 70 ATH12K_QMI_EVENT_POWER_DOWN, 71 + ATH12K_QMI_EVENT_HOST_CAP, 71 72 ATH12K_QMI_EVENT_MAX, 72 73 }; 73 74 ··· 143 142 u32 target_mem_mode; 144 143 bool target_mem_delayed; 145 144 u8 cal_done; 145 + 146 + /* protected with struct ath12k_qmi::event_lock */ 147 + bool block_event; 148 + 146 149 u8 num_radios; 147 150 struct target_info target; 148 151 struct m3_mem_region m3_mem; ··· 599 594 struct qmi_response_type_v01 resp; 600 595 }; 601 596 597 + static inline void ath12k_qmi_set_event_block(struct ath12k_qmi *qmi, bool block) 598 + { 599 + lockdep_assert_held(&qmi->event_lock); 600 + 601 + qmi->block_event = block; 602 + } 603 + 604 + static inline bool ath12k_qmi_get_event_block(struct ath12k_qmi *qmi) 605 + { 606 + lockdep_assert_held(&qmi->event_lock); 607 + 608 + return qmi->block_event; 609 + } 610 + 602 611 int ath12k_qmi_firmware_start(struct ath12k_base *ab, 603 612 u32 mode); 604 613 void ath12k_qmi_firmware_stop(struct ath12k_base *ab); 605 614 void ath12k_qmi_deinit_service(struct ath12k_base *ab); 606 615 int ath12k_qmi_init_service(struct ath12k_base *ab); 607 616 void ath12k_qmi_free_resource(struct ath12k_base *ab); 617 + void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab); 608 618 609 619 #endif
+194 -13
drivers/net/wireless/ath/ath12k/wmi.c
··· 821 821 struct wmi_vdev_create_cmd *cmd; 822 822 struct sk_buff *skb; 823 823 struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams; 824 + bool is_ml_vdev = is_valid_ether_addr(args->mld_addr); 825 + struct wmi_vdev_create_mlo_params *ml_params; 824 826 struct wmi_tlv *tlv; 825 827 int ret, len; 826 828 void *ptr; ··· 832 830 * both the bands. 833 831 */ 834 832 len = sizeof(*cmd) + TLV_HDR_SIZE + 835 - (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)); 833 + (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) + 834 + (is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0); 836 835 837 836 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 838 837 if (!skb) ··· 881 878 cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx); 882 879 txrx_streams->supported_rx_streams = 883 880 cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx); 881 + 882 + ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams); 883 + 884 + if (is_ml_vdev) { 885 + tlv = ptr; 886 + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 887 + sizeof(*ml_params)); 888 + ptr += TLV_HDR_SIZE; 889 + ml_params = ptr; 890 + 891 + ml_params->tlv_header = 892 + ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS, 893 + sizeof(*ml_params)); 894 + ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr); 895 + } 884 896 885 897 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 886 898 "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n", ··· 1038 1020 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, 1039 1021 bool restart) 1040 1022 { 1023 + struct wmi_vdev_start_mlo_params *ml_params; 1024 + struct wmi_partner_link_info *partner_info; 1041 1025 struct ath12k_wmi_pdev *wmi = ar->wmi; 1042 1026 struct wmi_vdev_start_request_cmd *cmd; 1043 1027 struct sk_buff *skb; 1044 1028 struct ath12k_wmi_channel_params *chan; 1045 1029 struct wmi_tlv *tlv; 1046 1030 void *ptr; 1047 - int ret, len; 1031 + int ret, len, i, ml_arg_size = 0; 1048 1032 1049 1033 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) 1050 1034 return -EINVAL; 1051 1035 1052 1036 len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE; 1053 1037 1038 + if (!restart && arg->ml.enabled) { 1039 + ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) + 1040 + TLV_HDR_SIZE + (arg->ml.num_partner_links * 1041 + sizeof(*partner_info)); 1042 + len += ml_arg_size; 1043 + } 1054 1044 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1055 1045 if (!skb) 1056 1046 return -ENOMEM; ··· 1110 1084 */ 1111 1085 1112 1086 ptr += sizeof(*tlv); 1087 + 1088 + if (ml_arg_size) { 1089 + tlv = ptr; 1090 + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 1091 + sizeof(*ml_params)); 1092 + ptr += TLV_HDR_SIZE; 1093 + 1094 + ml_params = ptr; 1095 + 1096 + ml_params->tlv_header = 1097 + ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS, 1098 + sizeof(*ml_params)); 1099 + 1100 + ml_params->flags = le32_encode_bits(arg->ml.enabled, 1101 + ATH12K_WMI_FLAG_MLO_ENABLED) | 1102 + le32_encode_bits(arg->ml.assoc_link, 1103 + ATH12K_WMI_FLAG_MLO_ASSOC_LINK) | 1104 + le32_encode_bits(arg->ml.mcast_link, 1105 + ATH12K_WMI_FLAG_MLO_MCAST_VDEV) | 1106 + le32_encode_bits(arg->ml.link_add, 1107 + ATH12K_WMI_FLAG_MLO_LINK_ADD); 1108 + 1109 + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n", 1110 + arg->vdev_id, ml_params->flags); 1111 + 1112 + ptr += sizeof(*ml_params); 1113 + 1114 + tlv = ptr; 1115 + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 1116 + arg->ml.num_partner_links * 1117 + sizeof(*partner_info)); 1118 + ptr += TLV_HDR_SIZE; 1119 + 1120 + partner_info = ptr; 1121 + 1122 + for (i = 0; i < arg->ml.num_partner_links; i++) { 1123 + partner_info->tlv_header = 1124 + ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS, 1125 + sizeof(*partner_info)); 1126 + partner_info->vdev_id = 1127 + cpu_to_le32(arg->ml.partner_info[i].vdev_id); 1128 + partner_info->hw_link_id = 1129 + cpu_to_le32(arg->ml.partner_info[i].hw_link_id); 1130 + ether_addr_copy(partner_info->vdev_addr.addr, 1131 + arg->ml.partner_info[i].addr); 1132 + 1133 + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n", 1134 + partner_info->vdev_id, partner_info->hw_link_id, 1135 + partner_info->vdev_addr.addr); 1136 + 1137 + partner_info++; 1138 + } 1139 + 1140 + ptr = partner_info; 1141 + } 1113 1142 1114 1143 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n", 1115 1144 restart ? "restart" : "start", arg->vdev_id, ··· 1230 1149 struct ath12k_wmi_pdev *wmi = ar->wmi; 1231 1150 struct wmi_peer_create_cmd *cmd; 1232 1151 struct sk_buff *skb; 1233 - int ret; 1152 + int ret, len; 1153 + struct wmi_peer_create_mlo_params *ml_param; 1154 + void *ptr; 1155 + struct wmi_tlv *tlv; 1234 1156 1235 - skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1157 + len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param); 1158 + 1159 + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1236 1160 if (!skb) 1237 1161 return -ENOMEM; 1238 1162 ··· 1249 1163 cmd->peer_type = cpu_to_le32(arg->peer_type); 1250 1164 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1251 1165 1166 + ptr = skb->data + sizeof(*cmd); 1167 + tlv = ptr; 1168 + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 1169 + sizeof(*ml_param)); 1170 + ptr += TLV_HDR_SIZE; 1171 + ml_param = ptr; 1172 + ml_param->tlv_header = 1173 + ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS, 1174 + sizeof(*ml_param)); 1175 + if (arg->ml_enabled) 1176 + ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); 1177 + 1178 + ptr += sizeof(*ml_param); 1179 + 1252 1180 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1253 - "WMI peer create vdev_id %d peer_addr %pM\n", 1254 - arg->vdev_id, arg->peer_addr); 1181 + "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n", 1182 + arg->vdev_id, arg->peer_addr, ml_param->flags); 1255 1183 1256 1184 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID); 1257 1185 if (ret) { ··· 2101 2001 struct ath12k_wmi_vht_rate_set_params *mcs; 2102 2002 struct ath12k_wmi_he_rate_set_params *he_mcs; 2103 2003 struct ath12k_wmi_eht_rate_set_params *eht_mcs; 2004 + struct wmi_peer_assoc_mlo_params *ml_params; 2005 + struct wmi_peer_assoc_mlo_partner_info_params *partner_info; 2104 2006 struct sk_buff *skb; 2105 2007 struct wmi_tlv *tlv; 2106 2008 void *ptr; 2107 2009 u32 peer_legacy_rates_align; 2108 2010 u32 peer_ht_rates_align; 2109 2011 int i, ret, len; 2012 + __le32 v; 2110 2013 2111 2014 peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates, 2112 2015 sizeof(u32)); ··· 2121 2018 TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) + 2122 2019 sizeof(*mcs) + TLV_HDR_SIZE + 2123 2020 (sizeof(*he_mcs) * arg->peer_he_mcs_count) + 2124 - TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count) + 2125 - TLV_HDR_SIZE + TLV_HDR_SIZE; 2021 + TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count); 2022 + 2023 + if (arg->ml.enabled) 2024 + len += TLV_HDR_SIZE + sizeof(*ml_params) + 2025 + TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info)); 2026 + else 2027 + len += (2 * TLV_HDR_SIZE); 2126 2028 2127 2029 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2128 2030 if (!skb) ··· 2251 2143 ptr += sizeof(*he_mcs); 2252 2144 } 2253 2145 2254 - /* MLO header tag with 0 length */ 2255 - len = 0; 2256 2146 tlv = ptr; 2147 + len = arg->ml.enabled ? sizeof(*ml_params) : 0; 2257 2148 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2258 2149 ptr += TLV_HDR_SIZE; 2150 + if (!len) 2151 + goto skip_ml_params; 2259 2152 2153 + ml_params = ptr; 2154 + ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS, 2155 + len); 2156 + ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); 2157 + 2158 + if (arg->ml.assoc_link) 2159 + ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK); 2160 + 2161 + if (arg->ml.primary_umac) 2162 + ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC); 2163 + 2164 + if (arg->ml.logical_link_idx_valid) 2165 + ml_params->flags |= 2166 + cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID); 2167 + 2168 + if (arg->ml.peer_id_valid) 2169 + ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID); 2170 + 2171 + ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr); 2172 + ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx); 2173 + ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id); 2174 + ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id); 2175 + ptr += sizeof(*ml_params); 2176 + 2177 + skip_ml_params: 2260 2178 /* Loop through the EHT rate set */ 2261 2179 len = arg->peer_eht_mcs_count * sizeof(*eht_mcs); 2262 2180 tlv = ptr; ··· 2299 2165 ptr += sizeof(*eht_mcs); 2300 2166 } 2301 2167 2302 - /* ML partner links tag with 0 length */ 2303 - len = 0; 2304 2168 tlv = ptr; 2169 + len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0; 2170 + /* fill ML Partner links */ 2305 2171 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2306 2172 ptr += TLV_HDR_SIZE; 2307 2173 2174 + if (len == 0) 2175 + goto send; 2176 + 2177 + for (i = 0; i < arg->ml.num_partner_links; i++) { 2178 + u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC; 2179 + 2180 + partner_info = ptr; 2181 + partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd, 2182 + sizeof(*partner_info)); 2183 + partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id); 2184 + partner_info->hw_link_id = 2185 + cpu_to_le32(arg->ml.partner_info[i].hw_link_id); 2186 + partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); 2187 + 2188 + if (arg->ml.partner_info[i].assoc_link) 2189 + partner_info->flags |= 2190 + cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK); 2191 + 2192 + if (arg->ml.partner_info[i].primary_umac) 2193 + partner_info->flags |= 2194 + cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC); 2195 + 2196 + if (arg->ml.partner_info[i].logical_link_idx_valid) { 2197 + v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID); 2198 + partner_info->flags |= v; 2199 + } 2200 + 2201 + partner_info->logical_link_idx = 2202 + cpu_to_le32(arg->ml.partner_info[i].logical_link_idx); 2203 + ptr += sizeof(*partner_info); 2204 + } 2205 + 2206 + send: 2308 2207 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2309 2208 "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n", 2310 2209 cmd->vdev_id, cmd->peer_associd, arg->peer_mac, ··· 6854 6687 const u32 *vdev_ids) 6855 6688 { 6856 6689 int i; 6690 + struct ieee80211_bss_conf *conf; 6857 6691 struct ath12k_link_vif *arvif; 6858 6692 struct ath12k_vif *ahvif; 6859 6693 ··· 6873 6705 } 6874 6706 ahvif = arvif->ahvif; 6875 6707 6876 - if (arvif->is_up && ahvif->vif->bss_conf.csa_active) 6708 + if (arvif->link_id > IEEE80211_MLD_MAX_NUM_LINKS) { 6709 + ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n", 6710 + arvif->link_id); 6711 + continue; 6712 + } 6713 + 6714 + conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]); 6715 + if (!conf) { 6716 + ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n", 6717 + ahvif->vif->addr, arvif->link_id); 6718 + continue; 6719 + } 6720 + 6721 + if (arvif->is_up && conf->csa_active) 6877 6722 ieee80211_csa_finish(ahvif->vif, 0); 6878 6723 } 6879 6724 rcu_read_unlock();
+115
drivers/net/wireless/ath/ath12k/wmi.h
··· 1929 1929 WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9, 1930 1930 WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT, 1931 1931 WMI_TAG_EHT_RATE_SET = 0x3C4, 1932 + WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5, 1933 + WMI_TAG_MLO_TX_SEND_PARAMS, 1934 + WMI_TAG_MLO_PARTNER_LINK_PARAMS, 1935 + WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC, 1936 + WMI_TAG_MLO_SETUP_CMD = 0x3C9, 1937 + WMI_TAG_MLO_SETUP_COMPLETE_EVENT, 1938 + WMI_TAG_MLO_READY_CMD, 1939 + WMI_TAG_MLO_TEARDOWN_CMD, 1940 + WMI_TAG_MLO_TEARDOWN_COMPLETE, 1941 + WMI_TAG_MLO_PEER_ASSOC_PARAMS = 0x3D0, 1942 + WMI_TAG_MLO_PEER_CREATE_PARAMS = 0x3D5, 1943 + WMI_TAG_MLO_VDEV_START_PARAMS = 0x3D6, 1944 + WMI_TAG_MLO_VDEV_CREATE_PARAMS = 0x3D7, 1932 1945 WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8, 1933 1946 WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9, 1934 1947 WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB, ··· 2753 2740 u8 if_stats_id; 2754 2741 u32 mbssid_flags; 2755 2742 u32 mbssid_tx_vdev_id; 2743 + u8 mld_addr[ETH_ALEN]; 2756 2744 }; 2757 2745 2758 2746 #define ATH12K_MAX_VDEV_STATS_ID 0x30 ··· 2778 2764 __le32 band; 2779 2765 __le32 supported_tx_streams; 2780 2766 __le32 supported_rx_streams; 2767 + } __packed; 2768 + 2769 + struct wmi_vdev_create_mlo_params { 2770 + __le32 tlv_header; 2771 + struct ath12k_wmi_mac_addr_params mld_macaddr; 2772 + } __packed; 2773 + 2774 + #define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0) 2775 + #define ATH12K_WMI_FLAG_MLO_ASSOC_LINK BIT(1) 2776 + #define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC BIT(2) 2777 + #define ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID BIT(3) 2778 + #define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID BIT(4) 2779 + #define ATH12K_WMI_FLAG_MLO_MCAST_VDEV BIT(5) 2780 + #define ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT BIT(6) 2781 + #define ATH12K_WMI_FLAG_MLO_FORCED_INACTIVE BIT(7) 2782 + #define ATH12K_WMI_FLAG_MLO_LINK_ADD BIT(8) 2783 + 2784 + struct wmi_vdev_start_mlo_params { 2785 + __le32 tlv_header; 2786 + __le32 flags; 2787 + } __packed; 2788 + 2789 + struct wmi_partner_link_info { 2790 + __le32 tlv_header; 2791 + __le32 vdev_id; 2792 + __le32 hw_link_id; 2793 + struct ath12k_wmi_mac_addr_params vdev_addr; 2781 2794 } __packed; 2782 2795 2783 2796 struct wmi_vdev_delete_cmd { ··· 2950 2909 MODE_MAX = 33, 2951 2910 }; 2952 2911 2912 + #define ATH12K_WMI_MLO_MAX_LINKS 4 2913 + 2914 + struct wmi_ml_partner_info { 2915 + u32 vdev_id; 2916 + u32 hw_link_id; 2917 + u8 addr[ETH_ALEN]; 2918 + bool assoc_link; 2919 + bool primary_umac; 2920 + bool logical_link_idx_valid; 2921 + u32 logical_link_idx; 2922 + }; 2923 + 2924 + struct wmi_ml_arg { 2925 + bool enabled; 2926 + bool assoc_link; 2927 + bool mcast_link; 2928 + bool link_add; 2929 + u8 num_partner_links; 2930 + struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS]; 2931 + }; 2932 + 2953 2933 struct wmi_vdev_start_req_arg { 2954 2934 u32 vdev_id; 2955 2935 u32 freq; ··· 3008 2946 u32 mbssid_flags; 3009 2947 u32 mbssid_tx_vdev_id; 3010 2948 u32 punct_bitmap; 2949 + struct wmi_ml_arg ml; 3011 2950 }; 3012 2951 3013 2952 struct ath12k_wmi_peer_create_arg { 3014 2953 const u8 *peer_addr; 3015 2954 u32 peer_type; 3016 2955 u32 vdev_id; 2956 + bool ml_enabled; 2957 + }; 2958 + 2959 + struct wmi_peer_create_mlo_params { 2960 + __le32 tlv_header; 2961 + __le32 flags; 3017 2962 }; 3018 2963 3019 2964 struct ath12k_wmi_pdev_set_regdomain_arg { ··· 3687 3618 #define WMI_HECAP_TXRX_MCS_NSS_IDX_160 1 3688 3619 #define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80 2 3689 3620 3621 + #define ATH12K_WMI_MLO_MAX_PARTNER_LINKS \ 3622 + (ATH12K_WMI_MLO_MAX_LINKS + ATH12K_MAX_NUM_BRIDGE_LINKS - 1) 3623 + 3624 + struct peer_assoc_mlo_params { 3625 + bool enabled; 3626 + bool assoc_link; 3627 + bool primary_umac; 3628 + bool peer_id_valid; 3629 + bool logical_link_idx_valid; 3630 + bool bridge_peer; 3631 + u8 mld_addr[ETH_ALEN]; 3632 + u32 logical_link_idx; 3633 + u32 ml_peer_id; 3634 + u32 ieee_link_id; 3635 + u8 num_partner_links; 3636 + struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS]; 3637 + }; 3638 + 3690 3639 struct wmi_rate_set_arg { 3691 3640 u32 num_rates; 3692 3641 u8 rates[WMI_MAX_SUPPORTED_RATES]; ··· 3779 3692 u32 peer_eht_tx_mcs_set[WMI_MAX_EHTCAP_RATE_SET]; 3780 3693 struct ath12k_wmi_ppe_threshold_arg peer_eht_ppet; 3781 3694 u32 punct_bitmap; 3695 + bool is_assoc; 3696 + struct peer_assoc_mlo_params ml; 3782 3697 }; 3698 + 3699 + #define ATH12K_WMI_FLAG_MLO_ENABLED BIT(0) 3700 + #define ATH12K_WMI_FLAG_MLO_ASSOC_LINK BIT(1) 3701 + #define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC BIT(2) 3702 + #define ATH12K_WMI_FLAG_MLO_LINK_ID_VALID BIT(3) 3703 + #define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID BIT(4) 3704 + 3705 + struct wmi_peer_assoc_mlo_partner_info_params { 3706 + __le32 tlv_header; 3707 + __le32 vdev_id; 3708 + __le32 hw_link_id; 3709 + __le32 flags; 3710 + __le32 logical_link_idx; 3711 + } __packed; 3712 + 3713 + struct wmi_peer_assoc_mlo_params { 3714 + __le32 tlv_header; 3715 + __le32 flags; 3716 + struct ath12k_wmi_mac_addr_params mld_addr; 3717 + __le32 logical_link_idx; 3718 + __le32 ml_peer_id; 3719 + __le32 ieee_link_id; 3720 + __le32 emlsr_trans_timeout_us; 3721 + __le32 emlsr_trans_delay_us; 3722 + __le32 emlsr_padding_delay_us; 3723 + } __packed; 3783 3724 3784 3725 struct wmi_peer_assoc_complete_cmd { 3785 3726 __le32 tlv_header;
+1
drivers/net/wireless/ath/ath6kl/cfg80211.c
··· 1441 1441 1442 1442 static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, 1443 1443 struct wireless_dev *wdev, 1444 + unsigned int link_id, 1444 1445 int *dbm) 1445 1446 { 1446 1447 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+1 -1
drivers/net/wireless/ath/ath9k/antenna.c
··· 193 193 static void ath_ant_set_alt_ratio(struct ath_ant_comb *antcomb, 194 194 struct ath_hw_antcomb_conf *conf) 195 195 { 196 - /* set alt to the conf with maximun ratio */ 196 + /* set alt to the conf with maximum ratio */ 197 197 if (antcomb->first_ratio && antcomb->second_ratio) { 198 198 if (antcomb->rssi_second > antcomb->rssi_third) { 199 199 /* first alt*/
+1 -1
drivers/net/wireless/ath/ath9k/ar9002_hw.c
··· 395 395 ah->config.hw_hang_checks |= HW_MAC_HANG; 396 396 } 397 397 398 - /* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */ 398 + /* Sets up the AR5008/AR9001/AR9002 hardware family callbacks */ 399 399 int ar9002_hw_attach_ops(struct ath_hw *ah) 400 400 { 401 401 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+1 -1
drivers/net/wireless/ath/ath9k/ar9003_hw.c
··· 1170 1170 return false; 1171 1171 } 1172 1172 1173 - /* Sets up the AR9003 hardware familiy callbacks */ 1173 + /* Sets up the AR9003 hardware family callbacks */ 1174 1174 void ar9003_hw_attach_ops(struct ath_hw *ah) 1175 1175 { 1176 1176 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
+2 -2
drivers/net/wireless/ath/ath9k/ar9003_mci.c
··· 637 637 * same time. Since BT's calibration doesn't happen 638 638 * that often, we'll let BT completes calibration then 639 639 * we continue to wait for cal_grant from BT. 640 - * Orginal: Wait BT_CAL_GRANT. 640 + * Original: Wait BT_CAL_GRANT. 641 641 * New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait 642 642 * BT_CAL_DONE -> Wait BT_CAL_GRANT. 643 643 */ ··· 747 747 * BT is sleeping. Check if BT wakes up during 748 748 * WLAN calibration. If BT wakes up during 749 749 * WLAN calibration, need to go through all 750 - * message exchanges again and recal. 750 + * message exchanges again and recalibrate. 751 751 */ 752 752 REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, 753 753 (AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
+1 -1
drivers/net/wireless/ath/ath9k/ar9003_phy.h
··· 246 246 247 247 248 248 /* 249 - * MRC Feild Definitions 249 + * MRC Field Definitions 250 250 */ 251 251 #define AR_PHY_SGI_DSC_MAN 0x0007FFF0 252 252 #define AR_PHY_SGI_DSC_MAN_S 4
+2
drivers/net/wireless/ath/ath9k/ath9k.h
··· 1018 1018 1019 1019 u8 gtt_cnt; 1020 1020 u32 intrstatus; 1021 + u32 rx_active_check_time; 1022 + u32 rx_active_count; 1021 1023 u16 ps_flags; /* PS_* */ 1022 1024 bool ps_enabled; 1023 1025 bool ps_idle;
+1 -1
drivers/net/wireless/ath/ath9k/channel.c
··· 17 17 #include "ath9k.h" 18 18 19 19 /* Set/change channels. If the channel is really being changed, it's done 20 - * by reseting the chip. To accomplish this we must first cleanup any pending 20 + * by resetting the chip. To accomplish this we must first cleanup any pending 21 21 * DMA, then restart stuff. 22 22 */ 23 23 static int ath_set_channel(struct ath_softc *sc)
+1 -1
drivers/net/wireless/ath/ath9k/common-spectral.c
··· 734 734 ATH9K_RX_FILTER_PHYRADAR | 735 735 ATH9K_RX_FILTER_PHYERR); 736 736 737 - /* TODO: usually this should not be neccesary, but for some reason 737 + /* TODO: usually this should not be necessary, but for some reason 738 738 * (or in some mode?) the trigger must be called after the 739 739 * configuration, otherwise the register will have its values reset 740 740 * (on my ar9220 to value 0x01002310)
+1
drivers/net/wireless/ath/ath9k/debug.c
··· 750 750 [RESET_TYPE_CALIBRATION] = "Calibration error", 751 751 [RESET_TX_DMA_ERROR] = "Tx DMA stop error", 752 752 [RESET_RX_DMA_ERROR] = "Rx DMA stop error", 753 + [RESET_TYPE_RX_INACTIVE] = "Rx path inactive", 753 754 }; 754 755 int i; 755 756
+1
drivers/net/wireless/ath/ath9k/debug.h
··· 53 53 RESET_TYPE_CALIBRATION, 54 54 RESET_TX_DMA_ERROR, 55 55 RESET_RX_DMA_ERROR, 56 + RESET_TYPE_RX_INACTIVE, 56 57 __RESET_TYPE_MAX 57 58 }; 58 59
+1 -1
drivers/net/wireless/ath/ath9k/dfs.c
··· 79 79 const int DFS_UPPER_BIN_OFFSET = 64; 80 80 /* if detected radar on both channels, select the significant one */ 81 81 if (is_ctl && is_ext) { 82 - /* first check wether channels have 'strong' bins */ 82 + /* first check whether channels have 'strong' bins */ 83 83 is_ctl = fft_bitmap_weight(fft->lower_bins) != 0; 84 84 is_ext = fft_bitmap_weight(fft->upper_bins) != 0; 85 85
+1 -1
drivers/net/wireless/ath/ath9k/hif_usb.c
··· 1198 1198 filename = FIRMWARE_AR9271; 1199 1199 1200 1200 /* expected fw locations: 1201 - * - htc_9271.fw (stable version 1.3, depricated) 1201 + * - htc_9271.fw (stable version 1.3, deprecated) 1202 1202 */ 1203 1203 snprintf(hif_dev->fw_name, sizeof(hif_dev->fw_name), 1204 1204 "%s", filename);
+2 -2
drivers/net/wireless/ath/ath9k/hw.c
··· 2149 2149 2150 2150 /* When chip goes into network sleep, it could be waken 2151 2151 * up by MCI_INT interrupt caused by BT's HW messages 2152 - * (LNA_xxx, CONT_xxx) which chould be in a very fast 2152 + * (LNA_xxx, CONT_xxx) which could be in a very fast 2153 2153 * rate (~100us). This will cause chip to leave and 2154 2154 * re-enter network sleep mode frequently, which in 2155 2155 * consequence will have WLAN MCI HW to generate lots of ··· 2544 2544 2545 2545 pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK); 2546 2546 /* 2547 - * For AR9271 we will temporarilly uses the rx chainmax as read from 2547 + * For AR9271 we will temporarily use the rx chainmax as read from 2548 2548 * the EEPROM. 2549 2549 */ 2550 2550 if ((ah->hw_version.devid == AR5416_DEVID_PCI) &&
+1 -1
drivers/net/wireless/ath/ath9k/hw.h
··· 282 282 * an exact user defined pattern or de-authentication/disassoc pattern. 283 283 * @ATH9K_HW_WOW_PATTERN_MATCH_DWORD: device requires the first four 284 284 * bytes of the pattern for user defined pattern, de-authentication and 285 - * disassociation patterns for all types of possible frames recieved 285 + * disassociation patterns for all types of possible frames received 286 286 * of those types. 287 287 */ 288 288
+31 -2
drivers/net/wireless/ath/ath9k/link.c
··· 50 50 "tx hung, resetting the chip\n"); 51 51 ath9k_queue_reset(sc, RESET_TYPE_TX_HANG); 52 52 return false; 53 + } 53 54 55 + #define RX_INACTIVE_CHECK_INTERVAL (4 * MSEC_PER_SEC) 56 + 57 + static bool ath_hw_rx_inactive_check(struct ath_softc *sc) 58 + { 59 + struct ath_common *common = ath9k_hw_common(sc->sc_ah); 60 + u32 interval, count; 61 + 62 + interval = jiffies_to_msecs(jiffies - sc->rx_active_check_time); 63 + count = sc->rx_active_count; 64 + 65 + if (interval < RX_INACTIVE_CHECK_INTERVAL) 66 + return true; /* too soon to check */ 67 + 68 + sc->rx_active_count = 0; 69 + sc->rx_active_check_time = jiffies; 70 + 71 + /* Need at least one interrupt per second, and we should only react if 72 + * we are within a factor two of the expected interval 73 + */ 74 + if (interval > RX_INACTIVE_CHECK_INTERVAL * 2 || 75 + count >= interval / MSEC_PER_SEC) 76 + return true; 77 + 78 + ath_dbg(common, RESET, 79 + "RX inactivity detected. Schedule chip reset\n"); 80 + ath9k_queue_reset(sc, RESET_TYPE_RX_INACTIVE); 81 + 82 + return false; 54 83 } 55 84 56 85 void ath_hw_check_work(struct work_struct *work) ··· 87 58 struct ath_softc *sc = container_of(work, struct ath_softc, 88 59 hw_check_work.work); 89 60 90 - if (!ath_hw_check(sc) || 91 - !ath_tx_complete_check(sc)) 61 + if (!ath_hw_check(sc) || !ath_tx_complete_check(sc) || 62 + !ath_hw_rx_inactive_check(sc)) 92 63 return; 93 64 94 65 ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,
+1 -1
drivers/net/wireless/ath/ath9k/mac.h
··· 251 251 * when the descriptor is specifically marked to generate 252 252 * an interrupt with this flag. Descriptors should be 253 253 * marked periodically to insure timely replenishing of the 254 - * supply needed for sending frames. Defering interrupts 254 + * supply needed for sending frames. Deferring interrupts 255 255 * reduces system load and potentially allows more concurrent 256 256 * work to be done but if done to aggressively can cause 257 257 * senders to backup. When the hardware queue is left too
+3 -2
drivers/net/wireless/ath/ath9k/main.c
··· 453 453 ath_rx_tasklet(sc, 0, true); 454 454 455 455 ath_rx_tasklet(sc, 0, false); 456 + sc->rx_active_count++; 456 457 } 457 458 458 459 if (status & ATH9K_INT_TX) { ··· 1002 1001 static void ath9k_vif_iter_set_beacon(struct ath9k_vif_iter_data *iter_data, 1003 1002 struct ieee80211_vif *vif) 1004 1003 { 1005 - /* Use the first (configured) interface, but prefering AP interfaces. */ 1004 + /* Use the first (configured) interface, but preferring AP interfaces. */ 1006 1005 if (!iter_data->primary_beacon_vif) { 1007 1006 iter_data->primary_beacon_vif = vif; 1008 1007 } else { ··· 2768 2767 #endif 2769 2768 2770 2769 static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2771 - int *dbm) 2770 + unsigned int link_id, int *dbm) 2772 2771 { 2773 2772 struct ath_softc *sc = hw->priv; 2774 2773 struct ath_vif *avp = (void *)vif->drv_priv;
+3 -3
drivers/net/wireless/ath/ath9k/wow.c
··· 60 60 memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE); 61 61 62 62 /* 63 - * Create Dissassociate / Deauthenticate packet filter 63 + * Create Disassociate / Deauthenticate packet filter 64 64 * 65 65 * 2 bytes 2 byte 6 bytes 6 bytes 6 bytes 66 66 * +--------------+----------+---------+--------+--------+---- ··· 70 70 * The above is the management frame format for disassociate/ 71 71 * deauthenticate pattern, from this we need to match the first byte 72 72 * of 'Frame Control' and DA, SA, and BSSID fields 73 - * (skipping 2nd byte of FC and Duration feild. 73 + * (skipping 2nd byte of FC and Duration field. 74 74 * 75 75 * Disassociate pattern 76 76 * -------------------- ··· 225 225 ath9k_stop_btcoex(sc); 226 226 227 227 /* 228 - * Enable wake up on recieving disassoc/deauth 228 + * Enable wake up on receiving disassoc/deauth 229 229 * frame by default. 230 230 */ 231 231 ret = ath9k_wow_add_disassoc_deauth_pattern(sc);
+1 -1
drivers/net/wireless/ath/ath9k/xmit.c
··· 557 557 /* 558 558 * AR5416 can become deaf/mute when BA 559 559 * issue happens. Chip needs to be reset. 560 - * But AP code may have sychronization issues 560 + * But AP code may have synchronization issues 561 561 * when perform internal reset in this routine. 562 562 * Only enable reset in STA mode for now. 563 563 */
+4 -1
drivers/net/wireless/ath/wcn36xx/main.c
··· 1590 1590 } 1591 1591 1592 1592 n_channels = wcn_band_2ghz.n_channels + wcn_band_5ghz.n_channels; 1593 - wcn->chan_survey = devm_kmalloc(wcn->dev, n_channels, GFP_KERNEL); 1593 + wcn->chan_survey = devm_kcalloc(wcn->dev, 1594 + n_channels, 1595 + sizeof(struct wcn36xx_chan_survey), 1596 + GFP_KERNEL); 1594 1597 if (!wcn->chan_survey) { 1595 1598 ret = -ENOMEM; 1596 1599 goto out_wq;
+5
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
··· 455 455 if (sg_data_sz > max_req_sz - req_sz) 456 456 sg_data_sz = max_req_sz - req_sz; 457 457 458 + if (!sgl) { 459 + /* out of (pre-allocated) scatterlist entries */ 460 + ret = -ENOMEM; 461 + goto exit; 462 + } 458 463 sg_set_buf(sgl, pkt_data, sg_data_sz); 459 464 sg_cnt++; 460 465
+6 -2
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
··· 2676 2676 2677 2677 static s32 2678 2678 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, 2679 - s32 *dbm) 2679 + unsigned int link_id, s32 *dbm) 2680 2680 { 2681 2681 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 2682 2682 struct brcmf_cfg80211_vif *vif = wdev_to_vif(wdev); ··· 4999 4999 s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif) 5000 5000 { 5001 5001 static const s32 pktflags[] = { 5002 - BRCMF_VNDR_IE_PRBREQ_FLAG, 5003 5002 BRCMF_VNDR_IE_PRBRSP_FLAG, 5004 5003 BRCMF_VNDR_IE_BEACON_FLAG 5005 5004 }; 5006 5005 int i; 5006 + 5007 + if (vif->wdev.iftype == NL80211_IFTYPE_AP) 5008 + brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_ASSOCRSP_FLAG, NULL, 0); 5009 + else 5010 + brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBREQ_FLAG, NULL, 0); 5007 5011 5008 5012 for (i = 0; i < ARRAY_SIZE(pktflags); i++) 5009 5013 brcmf_vif_set_mgmt_ie(vif, pktflags[i], NULL, 0);
+2 -2
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
··· 327 327 if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) { 328 328 head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0); 329 329 330 - brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n", 331 - brcmf_ifname(ifp), head_delta); 330 + brcmf_dbg(INFO, "%s: %s headroom\n", brcmf_ifname(ifp), 331 + head_delta ? "insufficient" : "unmodifiable"); 332 332 atomic_inc(&drvr->bus_if->stats.pktcowed); 333 333 ret = pskb_expand_head(skb, ALIGN(head_delta, NET_SKB_PAD), 0, 334 334 GFP_ATOMIC);
+2
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
··· 6 6 #ifndef _fwil_h_ 7 7 #define _fwil_h_ 8 8 9 + #include "debug.h" 10 + 9 11 /******************************************************************************* 10 12 * Dongle command codes that are interpreted by firmware 11 13 ******************************************************************************/
+3
drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
··· 23423 23423 break; 23424 23424 } 23425 23425 23426 + if (WARN_ON(k == NPHY_IQCAL_NUMGAINS)) 23427 + return; 23428 + 23426 23429 params->txgm = tbl_iqcal_gainparams_nphy[band_idx][k][1]; 23427 23430 params->pga = tbl_iqcal_gainparams_nphy[band_idx][k][2]; 23428 23431 params->pad = tbl_iqcal_gainparams_nphy[band_idx][k][3];
+3 -3
drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
··· 104 104 }; 105 105 106 106 enum iwl_mvm_vendor_events_idx { 107 - /* 0x0 - 0x3 are deprecated */ 108 - IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN = 4, 109 - NUM_IWL_MVM_VENDOR_EVENT_IDX 107 + /* 0x0 - 0x3 are deprecated */ 108 + IWL_MVM_VENDOR_EVENT_IDX_ROAMING_FORBIDDEN = 4, 109 + NUM_IWL_MVM_VENDOR_EVENT_IDX 110 110 }; 111 111 112 112 static const struct nl80211_vendor_cmd_info
+1 -1
drivers/net/wireless/marvell/mwifiex/cfg80211.c
··· 410 410 static int 411 411 mwifiex_cfg80211_get_tx_power(struct wiphy *wiphy, 412 412 struct wireless_dev *wdev, 413 - int *dbm) 413 + unsigned int link_id, int *dbm) 414 414 { 415 415 struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); 416 416 struct mwifiex_private *priv = mwifiex_get_priv(adapter,
+1 -1
drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
··· 545 545 546 546 if (wait_event_interruptible_timeout(adapter->hs_activate_wait_q, 547 547 adapter->hs_activate_wait_q_woken, 548 - (10 * HZ)) <= 0) { 548 + (5 * HZ)) <= 0) { 549 549 mwifiex_dbg(adapter, ERROR, 550 550 "hs_activate_wait_q terminated\n"); 551 551 return false;
+1 -1
drivers/net/wireless/mediatek/mt76/mac80211.c
··· 1596 1596 EXPORT_SYMBOL_GPL(mt76_wcid_cleanup); 1597 1597 1598 1598 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1599 - int *dbm) 1599 + unsigned int link_id, int *dbm) 1600 1600 { 1601 1601 struct mt76_phy *phy = hw->priv; 1602 1602 int n_chains = hweight16(phy->chainmask);
+1 -1
drivers/net/wireless/mediatek/mt76/mt76.h
··· 1431 1431 int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy); 1432 1432 1433 1433 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1434 - int *dbm); 1434 + unsigned int link_id, int *dbm); 1435 1435 int mt76_init_sar_power(struct ieee80211_hw *hw, 1436 1436 const struct cfg80211_sar_specs *sar); 1437 1437 int mt76_get_sar_power(struct mt76_phy *phy,
+1 -1
drivers/net/wireless/microchip/wilc1000/cfg80211.c
··· 1669 1669 } 1670 1670 1671 1671 static int get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, 1672 - int *dbm) 1672 + unsigned int link_id, int *dbm) 1673 1673 { 1674 1674 int ret; 1675 1675 struct wilc_vif *vif = netdev_priv(wdev->netdev);
+1 -1
drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
··· 881 881 } 882 882 883 883 static int qtnf_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, 884 - int *dbm) 884 + unsigned int link_id, int *dbm) 885 885 { 886 886 struct qtnf_vif *vif = qtnf_netdev_get_priv(wdev->netdev); 887 887 int ret;
+1 -1
drivers/net/wireless/quantenna/qtnfmac/core.h
··· 102 102 struct qtnf_mac_info macinfo; 103 103 struct qtnf_vif iflist[QTNF_MAX_INTF]; 104 104 struct cfg80211_scan_request *scan_req; 105 - struct mutex mac_lock; /* lock during wmac speicific ops */ 105 + struct mutex mac_lock; /* lock during wmac specific ops */ 106 106 struct delayed_work scan_timeout; 107 107 struct ieee80211_regdomain *rd; 108 108 struct platform_device *pdev;
+20
drivers/net/wireless/realtek/rtl8xxxu/core.c
··· 8147 8147 .driver_info = (unsigned long)&rtl8192cu_fops}, 8148 8148 {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817e, 0xff, 0xff, 0xff), 8149 8149 .driver_info = (unsigned long)&rtl8192cu_fops}, 8150 + {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8186, 0xff, 0xff, 0xff), 8151 + .driver_info = (unsigned long)&rtl8192cu_fops}, 8150 8152 {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818a, 0xff, 0xff, 0xff), 8151 8153 .driver_info = (unsigned long)&rtl8192cu_fops}, 8152 8154 {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff), ··· 8159 8157 .driver_info = (unsigned long)&rtl8192cu_fops}, 8160 8158 {USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1102, 0xff, 0xff, 0xff), 8161 8159 .driver_info = (unsigned long)&rtl8192cu_fops}, 8160 + {USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x11f2, 0xff, 0xff, 0xff), 8161 + .driver_info = (unsigned long)&rtl8192cu_fops}, 8162 8162 {USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff), 8163 + .driver_info = (unsigned long)&rtl8192cu_fops}, 8164 + {USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8188, 0xff, 0xff, 0xff), 8163 8165 .driver_info = (unsigned long)&rtl8192cu_fops}, 8164 8166 {USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff), 8165 8167 .driver_info = (unsigned long)&rtl8192cu_fops}, 8166 8168 {USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9041, 0xff, 0xff, 0xff), 8169 + .driver_info = (unsigned long)&rtl8192cu_fops}, 8170 + {USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9043, 0xff, 0xff, 0xff), 8167 8171 .driver_info = (unsigned long)&rtl8192cu_fops}, 8168 8172 {USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ba, 0xff, 0xff, 0xff), 8169 8173 .driver_info = (unsigned long)&rtl8192cu_fops}, ··· 8187 8179 .driver_info = (unsigned long)&rtl8192cu_fops}, 8188 8180 {USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3357, 0xff, 0xff, 0xff), 8189 8181 .driver_info = (unsigned long)&rtl8192cu_fops}, 8182 + {USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3358, 0xff, 0xff, 0xff), 8183 + .driver_info = (unsigned long)&rtl8192cu_fops}, 8184 + {USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3359, 0xff, 0xff, 0xff), 8185 + .driver_info = (unsigned long)&rtl8192cu_fops}, 8190 8186 {USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330b, 0xff, 0xff, 0xff), 8191 8187 .driver_info = (unsigned long)&rtl8192cu_fops}, 8192 8188 {USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x4902, 0xff, 0xff, 0xff), ··· 8204 8192 {USB_DEVICE_AND_INTERFACE_INFO(0x4855, 0x0090, 0xff, 0xff, 0xff), 8205 8193 .driver_info = (unsigned long)&rtl8192cu_fops}, 8206 8194 {USB_DEVICE_AND_INTERFACE_INFO(0x4856, 0x0091, 0xff, 0xff, 0xff), 8195 + .driver_info = (unsigned long)&rtl8192cu_fops}, 8196 + {USB_DEVICE_AND_INTERFACE_INFO(0x9846, 0x9041, 0xff, 0xff, 0xff), 8207 8197 .driver_info = (unsigned long)&rtl8192cu_fops}, 8208 8198 {USB_DEVICE_AND_INTERFACE_INFO(0xcdab, 0x8010, 0xff, 0xff, 0xff), 8209 8199 .driver_info = (unsigned long)&rtl8192cu_fops}, ··· 8232 8218 .driver_info = (unsigned long)&rtl8192cu_fops}, 8233 8219 {USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x341f, 0xff, 0xff, 0xff), 8234 8220 .driver_info = (unsigned long)&rtl8192cu_fops}, 8221 + {USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff), 8222 + .driver_info = (unsigned long)&rtl8192cu_fops}, 8235 8223 {USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe035, 0xff, 0xff, 0xff), 8236 8224 .driver_info = (unsigned long)&rtl8192cu_fops}, 8237 8225 {USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ab, 0xff, 0xff, 0xff), ··· 8241 8225 {USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0061, 0xff, 0xff, 0xff), 8242 8226 .driver_info = (unsigned long)&rtl8192cu_fops}, 8243 8227 {USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0070, 0xff, 0xff, 0xff), 8228 + .driver_info = (unsigned long)&rtl8192cu_fops}, 8229 + {USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0077, 0xff, 0xff, 0xff), 8244 8230 .driver_info = (unsigned long)&rtl8192cu_fops}, 8245 8231 {USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016d, 0xff, 0xff, 0xff), 8246 8232 .driver_info = (unsigned long)&rtl8192cu_fops}, ··· 8265 8247 {USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3309, 0xff, 0xff, 0xff), 8266 8248 .driver_info = (unsigned long)&rtl8192cu_fops}, 8267 8249 {USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330a, 0xff, 0xff, 0xff), 8250 + .driver_info = (unsigned long)&rtl8192cu_fops}, 8251 + {USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330d, 0xff, 0xff, 0xff), 8268 8252 .driver_info = (unsigned long)&rtl8192cu_fops}, 8269 8253 {USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2b, 0xff, 0xff, 0xff), 8270 8254 .driver_info = (unsigned long)&rtl8192cu_fops},
+6 -7
drivers/net/wireless/realtek/rtlwifi/base.c
··· 575 575 576 576 void rtl_deinit_core(struct ieee80211_hw *hw) 577 577 { 578 + struct rtl_priv *rtlpriv = rtl_priv(hw); 579 + 578 580 rtl_c2hcmd_launcher(hw, 0); 579 581 rtl_free_entries_from_scan_list(hw); 580 582 rtl_free_entries_from_ack_queue(hw, false); 583 + if (rtlpriv->works.rtl_wq) { 584 + destroy_workqueue(rtlpriv->works.rtl_wq); 585 + rtlpriv->works.rtl_wq = NULL; 586 + } 581 587 } 582 588 EXPORT_SYMBOL_GPL(rtl_deinit_core); 583 589 ··· 2702 2696 MODULE_LICENSE("GPL"); 2703 2697 MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core"); 2704 2698 2705 - struct rtl_global_var rtl_global_var = {}; 2706 - EXPORT_SYMBOL_GPL(rtl_global_var); 2707 - 2708 2699 static int __init rtl_core_module_init(void) 2709 2700 { 2710 2701 BUILD_BUG_ON(TX_PWR_BY_RATE_NUM_RATE < TX_PWR_BY_RATE_NUM_SECTION); ··· 2714 2711 2715 2712 /* add debugfs */ 2716 2713 rtl_debugfs_add_topdir(); 2717 - 2718 - /* init some global vars */ 2719 - INIT_LIST_HEAD(&rtl_global_var.glb_priv_list); 2720 - spin_lock_init(&rtl_global_var.glb_list_lock); 2721 2714 2722 2715 return 0; 2723 2716 }
-1
drivers/net/wireless/realtek/rtlwifi/base.h
··· 124 124 u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie); 125 125 void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len); 126 126 u8 rtl_tid_to_ac(u8 tid); 127 - extern struct rtl_global_var rtl_global_var; 128 127 void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation); 129 128 130 129 #endif
+9 -52
drivers/net/wireless/realtek/rtlwifi/pci.c
··· 295 295 return status; 296 296 } 297 297 298 - static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw, 299 - struct rtl_priv **buddy_priv) 300 - { 301 - struct rtl_priv *rtlpriv = rtl_priv(hw); 302 - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); 303 - struct rtl_priv *tpriv = NULL, *iter; 304 - struct rtl_pci_priv *tpcipriv = NULL; 305 - 306 - if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) { 307 - list_for_each_entry(iter, &rtlpriv->glb_var->glb_priv_list, 308 - list) { 309 - tpcipriv = (struct rtl_pci_priv *)iter->priv; 310 - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, 311 - "pcipriv->ndis_adapter.funcnumber %x\n", 312 - pcipriv->ndis_adapter.funcnumber); 313 - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, 314 - "tpcipriv->ndis_adapter.funcnumber %x\n", 315 - tpcipriv->ndis_adapter.funcnumber); 316 - 317 - if (pcipriv->ndis_adapter.busnumber == 318 - tpcipriv->ndis_adapter.busnumber && 319 - pcipriv->ndis_adapter.devnumber == 320 - tpcipriv->ndis_adapter.devnumber && 321 - pcipriv->ndis_adapter.funcnumber != 322 - tpcipriv->ndis_adapter.funcnumber) { 323 - tpriv = iter; 324 - break; 325 - } 326 - } 327 - } 328 - 329 - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, 330 - "find_buddy_priv %d\n", tpriv != NULL); 331 - 332 - if (tpriv) 333 - *buddy_priv = tpriv; 334 - 335 - return tpriv != NULL; 336 - } 337 - 338 298 static void rtl_pci_parse_configuration(struct pci_dev *pdev, 339 299 struct ieee80211_hw *hw) 340 300 { ··· 1656 1696 synchronize_irq(rtlpci->pdev->irq); 1657 1697 tasklet_kill(&rtlpriv->works.irq_tasklet); 1658 1698 cancel_work_sync(&rtlpriv->works.lps_change_work); 1659 - 1660 - destroy_workqueue(rtlpriv->works.rtl_wq); 1661 1699 } 1662 1700 1663 1701 static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev) ··· 1969 2011 pcipriv->ndis_adapter.amd_l1_patch); 1970 2012 1971 2013 rtl_pci_parse_configuration(pdev, hw); 1972 - list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list); 1973 2014 1974 2015 return true; 1975 2016 } ··· 2115 2158 rtlpriv->rtlhal.interface = INTF_PCI; 2116 2159 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data); 2117 2160 rtlpriv->intf_ops = &rtl_pci_ops; 2118 - rtlpriv->glb_var = &rtl_global_var; 2119 2161 rtl_efuse_ops_init(hw); 2120 2162 2121 2163 /* MEM map */ ··· 2165 2209 if (rtlpriv->cfg->ops->init_sw_vars(hw)) { 2166 2210 pr_err("Can't init_sw_vars\n"); 2167 2211 err = -ENODEV; 2168 - goto fail3; 2212 + goto fail2; 2169 2213 } 2170 2214 rtl_init_sw_leds(hw); 2171 2215 ··· 2183 2227 err = rtl_pci_init(hw, pdev); 2184 2228 if (err) { 2185 2229 pr_err("Failed to init PCI\n"); 2186 - goto fail3; 2230 + goto fail4; 2187 2231 } 2188 2232 2189 2233 err = ieee80211_register_hw(hw); 2190 2234 if (err) { 2191 2235 pr_err("Can't register mac80211 hw.\n"); 2192 2236 err = -ENODEV; 2193 - goto fail3; 2237 + goto fail5; 2194 2238 } 2195 2239 rtlpriv->mac80211.mac80211_registered = 1; 2196 2240 ··· 2213 2257 set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); 2214 2258 return 0; 2215 2259 2216 - fail3: 2217 - pci_set_drvdata(pdev, NULL); 2260 + fail5: 2261 + rtl_pci_deinit(hw); 2262 + fail4: 2218 2263 rtl_deinit_core(hw); 2264 + fail3: 2265 + wait_for_completion(&rtlpriv->firmware_loading_complete); 2266 + rtlpriv->cfg->ops->deinit_sw_vars(hw); 2219 2267 2220 2268 fail2: 2221 2269 if (rtlpriv->io.pci_mem_start != 0) 2222 2270 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start); 2223 2271 2224 2272 pci_release_regions(pdev); 2225 - complete(&rtlpriv->firmware_loading_complete); 2226 2273 2227 2274 fail1: 2228 2275 if (hw) ··· 2276 2317 if (rtlpci->using_msi) 2277 2318 pci_disable_msi(rtlpci->pdev); 2278 2319 2279 - list_del(&rtlpriv->list); 2280 2320 if (rtlpriv->io.pci_mem_start != 0) { 2281 2321 pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start); 2282 2322 pci_release_regions(pdev); ··· 2334 2376 const struct rtl_intf_ops rtl_pci_ops = { 2335 2377 .adapter_start = rtl_pci_start, 2336 2378 .adapter_stop = rtl_pci_stop, 2337 - .check_buddy_priv = rtl_pci_check_buddy_priv, 2338 2379 .adapter_tx = rtl_pci_tx, 2339 2380 .flush = rtl_pci_flush, 2340 2381 .reset_trx_ring = rtl_pci_reset_trx_ring,
+4 -3
drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
··· 64 64 65 65 rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD, 66 66 "Firmware callback routine entered!\n"); 67 - complete(&rtlpriv->firmware_loading_complete); 68 67 if (!firmware) { 69 68 pr_err("Firmware %s not available\n", fw_name); 70 69 rtlpriv->max_fw_size = 0; 71 - return; 70 + goto exit; 72 71 } 73 72 if (firmware->size > rtlpriv->max_fw_size) { 74 73 pr_err("Firmware is too big!\n"); 75 74 rtlpriv->max_fw_size = 0; 76 75 release_firmware(firmware); 77 - return; 76 + goto exit; 78 77 } 79 78 pfirmware = (struct rt_firmware *)rtlpriv->rtlhal.pfirmware; 80 79 memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size); 81 80 pfirmware->sz_fw_tmpbufferlen = firmware->size; 82 81 release_firmware(firmware); 82 + exit: 83 + complete(&rtlpriv->firmware_loading_complete); 83 84 } 84 85 85 86 static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
+3 -1
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
··· 2033 2033 if (!_rtl8821ae_check_condition(hw, v1)) { 2034 2034 i += 2; /* skip the pair of expression*/ 2035 2035 v2 = array[i+1]; 2036 - while (v2 != 0xDEAD) 2036 + while (v2 != 0xDEAD) { 2037 2037 i += 3; 2038 + v2 = array[i + 1]; 2039 + } 2038 2040 } 2039 2041 } 2040 2042 }
+5 -7
drivers/net/wireless/realtek/rtlwifi/usb.c
··· 629 629 tasklet_kill(&rtlusb->rx_work_tasklet); 630 630 cancel_work_sync(&rtlpriv->works.lps_change_work); 631 631 632 - if (rtlpriv->works.rtl_wq) { 633 - destroy_workqueue(rtlpriv->works.rtl_wq); 634 - rtlpriv->works.rtl_wq = NULL; 635 - } 636 - 637 632 skb_queue_purge(&rtlusb->rx_queue); 638 633 639 634 while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) { ··· 1023 1028 err = ieee80211_register_hw(hw); 1024 1029 if (err) { 1025 1030 pr_err("Can't register mac80211 hw.\n"); 1026 - goto error_out; 1031 + goto error_init_vars; 1027 1032 } 1028 1033 rtlpriv->mac80211.mac80211_registered = 1; 1029 1034 1030 1035 set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); 1031 1036 return 0; 1032 1037 1038 + error_init_vars: 1039 + wait_for_completion(&rtlpriv->firmware_loading_complete); 1040 + rtlpriv->cfg->ops->deinit_sw_vars(hw); 1033 1041 error_out: 1042 + rtl_usb_deinit(hw); 1034 1043 rtl_deinit_core(hw); 1035 1044 error_out2: 1036 1045 _rtl_usb_io_handler_release(hw); 1037 1046 usb_put_dev(udev); 1038 - complete(&rtlpriv->firmware_loading_complete); 1039 1047 kfree(rtlpriv->usb_data); 1040 1048 ieee80211_free_hw(hw); 1041 1049 return -ENODEV;
-12
drivers/net/wireless/realtek/rtlwifi/wifi.h
··· 2270 2270 /*com */ 2271 2271 int (*adapter_start)(struct ieee80211_hw *hw); 2272 2272 void (*adapter_stop)(struct ieee80211_hw *hw); 2273 - bool (*check_buddy_priv)(struct ieee80211_hw *hw, 2274 - struct rtl_priv **buddy_priv); 2275 2273 2276 2274 int (*adapter_tx)(struct ieee80211_hw *hw, 2277 2275 struct ieee80211_sta *sta, ··· 2512 2514 u32 rssi_max; 2513 2515 }; 2514 2516 2515 - struct rtl_global_var { 2516 - /* from this list we can get 2517 - * other adapter's rtl_priv 2518 - */ 2519 - struct list_head glb_priv_list; 2520 - spinlock_t glb_list_lock; 2521 - }; 2522 - 2523 2517 #define IN_4WAY_TIMEOUT_TIME (30 * MSEC_PER_SEC) /* 30 seconds */ 2524 2518 2525 2519 struct rtl_btc_info { ··· 2657 2667 struct rtl_priv { 2658 2668 struct ieee80211_hw *hw; 2659 2669 struct completion firmware_loading_complete; 2660 - struct list_head list; 2661 2670 struct rtl_priv *buddy_priv; 2662 - struct rtl_global_var *glb_var; 2663 2671 struct rtl_dmsp_ctl dmsp_ctl; 2664 2672 struct rtl_locks locks; 2665 2673 struct rtl_works works;
+67 -1
drivers/net/wireless/realtek/rtw88/rtw8812au.c
··· 9 9 #include "usb.h" 10 10 11 11 static const struct usb_device_id rtw_8812au_id_table[] = { 12 - { USB_DEVICE_AND_INTERFACE_INFO(0x2604, 0x0012, 0xff, 0xff, 0xff), 12 + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8812, 0xff, 0xff, 0xff), 13 13 .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, 14 + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x881a, 0xff, 0xff, 0xff), 15 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, 16 + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x881b, 0xff, 0xff, 0xff), 17 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, 18 + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x881c, 0xff, 0xff, 0xff), 19 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, 20 + { USB_DEVICE_AND_INTERFACE_INFO(0x0409, 0x0408, 0xff, 0xff, 0xff), 21 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* NEC */ 22 + { USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x025d, 0xff, 0xff, 0xff), 23 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Buffalo */ 24 + { USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0952, 0xff, 0xff, 0xff), 25 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* I-O DATA */ 26 + { USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1106, 0xff, 0xff, 0xff), 27 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Belkin */ 28 + { USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1109, 0xff, 0xff, 0xff), 29 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Belkin */ 30 + { USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x3426, 0xff, 0xff, 0xff), 31 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* ZyXEL */ 32 + { USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016e, 0xff, 0xff, 0xff), 33 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Logitec */ 34 + { USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8812, 0xff, 0xff, 0xff), 35 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Abocom */ 36 + { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9051, 0xff, 0xff, 0xff), 37 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Netgear */ 38 + { USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17d2, 0xff, 0xff, 0xff), 39 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* ASUS */ 40 + { USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0074, 0xff, 0xff, 0xff), 41 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Sitecom */ 42 + { USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0022, 0xff, 0xff, 0xff), 43 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Hawking */ 44 + { USB_DEVICE_AND_INTERFACE_INFO(0x1058, 0x0632, 0xff, 0xff, 0xff), 45 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* WD */ 46 + { USB_DEVICE_AND_INTERFACE_INFO(0x13b1, 0x003f, 0xff, 0xff, 0xff), 47 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Linksys */ 48 + { USB_DEVICE_AND_INTERFACE_INFO(0x148f, 0x9097, 0xff, 0xff, 0xff), 49 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Amped Wireless */ 50 + { USB_DEVICE_AND_INTERFACE_INFO(0x1740, 0x0100, 0xff, 0xff, 0xff), 51 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* EnGenius */ 52 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330e, 0xff, 0xff, 0xff), 53 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */ 54 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3313, 0xff, 0xff, 0xff), 55 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */ 56 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3315, 0xff, 0xff, 0xff), 57 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */ 58 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3316, 0xff, 0xff, 0xff), 59 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* D-Link */ 60 + { USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab30, 0xff, 0xff, 0xff), 61 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Planex */ 62 + { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x805b, 0xff, 0xff, 0xff), 63 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TRENDnet */ 64 + { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0101, 0xff, 0xff, 0xff), 65 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */ 66 + { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0103, 0xff, 0xff, 0xff), 67 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */ 68 + { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010d, 0xff, 0xff, 0xff), 69 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */ 70 + { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010e, 0xff, 0xff, 0xff), 71 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */ 72 + { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010f, 0xff, 0xff, 0xff), 73 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */ 74 + { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0122, 0xff, 0xff, 0xff), 75 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* TP-Link */ 76 + { USB_DEVICE_AND_INTERFACE_INFO(0x2604, 0x0012, 0xff, 0xff, 0xff), 77 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Tenda */ 78 + { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa822, 0xff, 0xff, 0xff), 79 + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, /* Edimax */ 14 80 {}, 15 81 }; 16 82 MODULE_DEVICE_TABLE(usb, rtw_8812au_id_table);
+51 -1
drivers/net/wireless/realtek/rtw88/rtw8821au.c
··· 9 9 #include "usb.h" 10 10 11 11 static const struct usb_device_id rtw_8821au_id_table[] = { 12 - { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x011e, 0xff, 0xff, 0xff), 12 + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0811, 0xff, 0xff, 0xff), 13 13 .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, 14 + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0820, 0xff, 0xff, 0xff), 15 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, 16 + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0821, 0xff, 0xff, 0xff), 17 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, 18 + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8822, 0xff, 0xff, 0xff), 19 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, 20 + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x0823, 0xff, 0xff, 0xff), 21 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, 22 + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xa811, 0xff, 0xff, 0xff), 23 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, 24 + { USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x0242, 0xff, 0xff, 0xff), 25 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Buffalo */ 26 + { USB_DEVICE_AND_INTERFACE_INFO(0x0411, 0x029b, 0xff, 0xff, 0xff), 27 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Buffalo */ 28 + { USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0953, 0xff, 0xff, 0xff), 29 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* I-O DATA */ 30 + { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x4007, 0xff, 0xff, 0xff), 31 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* ELECOM */ 32 + { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x400e, 0xff, 0xff, 0xff), 33 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* ELECOM */ 34 + { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x400f, 0xff, 0xff, 0xff), 35 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* ELECOM */ 36 + { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9052, 0xff, 0xff, 0xff), 37 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Netgear */ 38 + { USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0023, 0xff, 0xff, 0xff), 39 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* HAWKING */ 40 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3314, 0xff, 0xff, 0xff), 41 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* D-Link */ 42 + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3318, 0xff, 0xff, 0xff), 43 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* D-Link */ 44 + { USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab32, 0xff, 0xff, 0xff), 45 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Planex */ 46 + { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x804b, 0xff, 0xff, 0xff), 47 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TRENDnet */ 48 + { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x011e, 0xff, 0xff, 0xff), 49 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TP Link */ 50 + { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x011f, 0xff, 0xff, 0xff), 51 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TP Link */ 52 + { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0120, 0xff, 0xff, 0xff), 53 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* TP Link */ 54 + { USB_DEVICE_AND_INTERFACE_INFO(0x3823, 0x6249, 0xff, 0xff, 0xff), 55 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Obihai */ 56 + { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa811, 0xff, 0xff, 0xff), 57 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */ 58 + { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa812, 0xff, 0xff, 0xff), 59 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */ 60 + { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa813, 0xff, 0xff, 0xff), 61 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */ 62 + { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xb611, 0xff, 0xff, 0xff), 63 + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, /* Edimax */ 14 64 {}, 15 65 }; 16 66 MODULE_DEVICE_TABLE(usb, rtw_8821au_id_table);
+6
drivers/net/wireless/realtek/rtw88/rtw8822bu.c
··· 67 67 .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* LiteOn */ 68 68 { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x808a, 0xff, 0xff, 0xff), 69 69 .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TRENDnet TEW-808UBM */ 70 + { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x805a, 0xff, 0xff, 0xff), 71 + .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* TRENDnet TEW-805UBH */ 72 + { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x4011, 0xff, 0xff, 0xff), 73 + .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* ELECOM WDB-867DU3S */ 74 + { USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0107, 0xff, 0xff, 0xff), 75 + .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Mercusys MA30H */ 70 76 {}, 71 77 }; 72 78 MODULE_DEVICE_TABLE(usb, rtw_8822bu_id_table);
+70 -2
drivers/net/wireless/realtek/rtw88/usb.c
··· 789 789 rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16); 790 790 } 791 791 792 + static void rtw_usb_dynamic_rx_agg_v2(struct rtw_dev *rtwdev, bool enable) 793 + { 794 + struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 795 + u8 size, timeout; 796 + u16 val16; 797 + 798 + if (!enable) { 799 + size = 0x0; 800 + timeout = 0x1; 801 + } else if (rtwusb->udev->speed == USB_SPEED_SUPER) { 802 + size = 0x6; 803 + timeout = 0x1a; 804 + } else { 805 + size = 0x5; 806 + timeout = 0x20; 807 + } 808 + 809 + val16 = u16_encode_bits(size, BIT_RXDMA_AGG_PG_TH) | 810 + u16_encode_bits(timeout, BIT_DMA_AGG_TO_V1); 811 + 812 + rtw_write16(rtwdev, REG_RXDMA_AGG_PG_TH, val16); 813 + rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_AGG_EN); 814 + } 815 + 792 816 static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable) 793 817 { 794 818 switch (rtwdev->chip->id) { ··· 820 796 case RTW_CHIP_TYPE_8822B: 821 797 case RTW_CHIP_TYPE_8821C: 822 798 rtw_usb_dynamic_rx_agg_v1(rtwdev, enable); 799 + break; 800 + case RTW_CHIP_TYPE_8821A: 801 + case RTW_CHIP_TYPE_8812A: 802 + rtw_usb_dynamic_rx_agg_v2(rtwdev, enable); 823 803 break; 824 804 case RTW_CHIP_TYPE_8723D: 825 805 /* Doesn't like aggregation. */ ··· 958 930 usb_set_intfdata(intf, NULL); 959 931 } 960 932 933 + static int rtw_usb_switch_mode_old(struct rtw_dev *rtwdev) 934 + { 935 + struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); 936 + enum usb_device_speed cur_speed = rtwusb->udev->speed; 937 + u8 hci_opt; 938 + 939 + if (cur_speed == USB_SPEED_HIGH) { 940 + hci_opt = rtw_read8(rtwdev, REG_HCI_OPT_CTRL); 941 + 942 + if ((hci_opt & (BIT(2) | BIT(3))) != BIT(3)) { 943 + rtw_write8(rtwdev, REG_HCI_OPT_CTRL, 0x8); 944 + rtw_write8(rtwdev, REG_SYS_SDIO_CTRL, 0x2); 945 + rtw_write8(rtwdev, REG_ACLK_MON, 0x1); 946 + rtw_write8(rtwdev, 0x3d, 0x3); 947 + /* usb disconnect */ 948 + rtw_write8(rtwdev, REG_SYS_PW_CTRL + 1, 0x80); 949 + return 1; 950 + } 951 + } else if (cur_speed == USB_SPEED_SUPER) { 952 + rtw_write8_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT(1)); 953 + rtw_write8_clr(rtwdev, REG_ACLK_MON, BIT(0)); 954 + } 955 + 956 + return 0; 957 + } 958 + 961 959 static int rtw_usb_switch_mode_new(struct rtw_dev *rtwdev) 962 960 { 963 961 enum usb_device_speed cur_speed; ··· 1033 979 return 1; 1034 980 } 1035 981 982 + static bool rtw_usb3_chip_old(u8 chip_id) 983 + { 984 + return chip_id == RTW_CHIP_TYPE_8812A; 985 + } 986 + 987 + static bool rtw_usb3_chip_new(u8 chip_id) 988 + { 989 + return chip_id == RTW_CHIP_TYPE_8822C || 990 + chip_id == RTW_CHIP_TYPE_8822B; 991 + } 992 + 1036 993 static int rtw_usb_switch_mode(struct rtw_dev *rtwdev) 1037 994 { 1038 995 u8 id = rtwdev->chip->id; 1039 996 1040 - if (id != RTW_CHIP_TYPE_8822C && id != RTW_CHIP_TYPE_8822B) 997 + if (!rtw_usb3_chip_new(id) && !rtw_usb3_chip_old(id)) 1041 998 return 0; 1042 999 1043 1000 if (!rtwdev->efuse.usb_mode_switch) { ··· 1063 998 return 0; 1064 999 } 1065 1000 1066 - return rtw_usb_switch_mode_new(rtwdev); 1001 + if (rtw_usb3_chip_old(id)) 1002 + return rtw_usb_switch_mode_old(rtwdev); 1003 + else 1004 + return rtw_usb_switch_mode_new(rtwdev); 1067 1005 } 1068 1006 1069 1007 int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+47
drivers/net/wireless/realtek/rtw89/acpi.c
··· 148 148 ACPI_FREE(obj); 149 149 return ret; 150 150 } 151 + 152 + int rtw89_acpi_evaluate_rtag(struct rtw89_dev *rtwdev, 153 + struct rtw89_acpi_rtag_result *res) 154 + { 155 + struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; 156 + acpi_handle root, handle; 157 + union acpi_object *obj; 158 + acpi_status status; 159 + u32 buf_len; 160 + int ret = 0; 161 + 162 + root = ACPI_HANDLE(rtwdev->dev); 163 + if (!root) 164 + return -EOPNOTSUPP; 165 + 166 + status = acpi_get_handle(root, (acpi_string)"RTAG", &handle); 167 + if (ACPI_FAILURE(status)) 168 + return -EIO; 169 + 170 + status = acpi_evaluate_object(handle, NULL, NULL, &buf); 171 + if (ACPI_FAILURE(status)) 172 + return -EIO; 173 + 174 + obj = buf.pointer; 175 + if (obj->type != ACPI_TYPE_BUFFER) { 176 + rtw89_debug(rtwdev, RTW89_DBG_ACPI, 177 + "acpi: expect buffer but type: %d\n", obj->type); 178 + ret = -EINVAL; 179 + goto out; 180 + } 181 + 182 + buf_len = obj->buffer.length; 183 + if (buf_len != sizeof(*res)) { 184 + rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n", 185 + __func__, buf_len); 186 + ret = -EINVAL; 187 + goto out; 188 + } 189 + 190 + *res = *(struct rtw89_acpi_rtag_result *)obj->buffer.pointer; 191 + 192 + rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "antenna_gain: ", res, sizeof(*res)); 193 + 194 + out: 195 + ACPI_FREE(obj); 196 + return ret; 197 + }
+9
drivers/net/wireless/realtek/rtw89/acpi.h
··· 63 63 } u; 64 64 }; 65 65 66 + struct rtw89_acpi_rtag_result { 67 + u8 tag[4]; 68 + u8 revision; 69 + __le32 domain; 70 + u8 ant_gain_table[RTW89_ANT_GAIN_CHAIN_NUM][RTW89_ANT_GAIN_SUBBAND_NR]; 71 + } __packed; 72 + 66 73 int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev, 67 74 enum rtw89_acpi_dsm_func func, 68 75 struct rtw89_acpi_dsm_result *res); 76 + int rtw89_acpi_evaluate_rtag(struct rtw89_dev *rtwdev, 77 + struct rtw89_acpi_rtag_result *res); 69 78 70 79 #endif
+28 -4
drivers/net/wireless/realtek/rtw89/cam.c
··· 135 135 } 136 136 137 137 static int rtw89_cam_get_addr_cam_key_idx(struct rtw89_addr_cam_entry *addr_cam, 138 - struct rtw89_sec_cam_entry *sec_cam, 139 - struct ieee80211_key_conf *key, 138 + const struct rtw89_sec_cam_entry *sec_cam, 139 + const struct ieee80211_key_conf *key, 140 140 u8 *key_idx) 141 141 { 142 142 u8 idx; ··· 246 246 static int __rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev, 247 247 struct rtw89_vif_link *rtwvif_link, 248 248 struct rtw89_sta_link *rtwsta_link, 249 - struct ieee80211_key_conf *key, 250 - struct rtw89_sec_cam_entry *sec_cam) 249 + const struct ieee80211_key_conf *key, 250 + const struct rtw89_sec_cam_entry *sec_cam) 251 251 { 252 252 struct rtw89_addr_cam_entry *addr_cam; 253 253 u8 key_idx = 0; ··· 286 286 return 0; 287 287 } 288 288 289 + int rtw89_cam_attach_link_sec_cam(struct rtw89_dev *rtwdev, 290 + struct rtw89_vif_link *rtwvif_link, 291 + struct rtw89_sta_link *rtwsta_link, 292 + u8 sec_cam_idx) 293 + { 294 + struct rtw89_cam_info *cam_info = &rtwdev->cam_info; 295 + const struct rtw89_sec_cam_entry *sec_cam; 296 + 297 + sec_cam = cam_info->sec_entries[sec_cam_idx]; 298 + if (!sec_cam) 299 + return -ENOENT; 300 + 301 + return __rtw89_cam_attach_sec_cam(rtwdev, rtwvif_link, rtwsta_link, 302 + sec_cam->key_conf, sec_cam); 303 + } 304 + 289 305 static int rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev, 290 306 struct ieee80211_vif *vif, 291 307 struct ieee80211_sta *sta, ··· 321 305 } 322 306 323 307 rtwvif = vif_to_rtwvif(vif); 308 + 309 + if (rtwsta) 310 + clear_bit(sec_cam->sec_cam_idx, rtwsta->pairwise_sec_cam_map); 324 311 325 312 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 326 313 rtwsta_link = rtwsta ? rtwsta->links[link_id] : NULL; ··· 388 369 return ret; 389 370 } 390 371 372 + set_bit(sec_cam->sec_cam_idx, rtwsta->pairwise_sec_cam_map); 373 + 391 374 return 0; 392 375 } 393 376 ··· 431 410 sec_cam->len = RTW89_SEC_CAM_LEN; 432 411 sec_cam->ext_key = ext_key; 433 412 memcpy(sec_cam->key, key->key, key->keylen); 413 + 414 + sec_cam->key_conf = key; 415 + 434 416 ret = rtw89_cam_send_sec_key_cmd(rtwdev, sec_cam); 435 417 if (ret) { 436 418 rtw89_err(rtwdev, "failed to send sec key cmd: %d\n", ret);
+5
drivers/net/wireless/realtek/rtw89/cam.h
··· 578 578 void rtw89_cam_bssid_changed(struct rtw89_dev *rtwdev, 579 579 struct rtw89_vif_link *rtwvif_link); 580 580 void rtw89_cam_reset_keys(struct rtw89_dev *rtwdev); 581 + int rtw89_cam_attach_link_sec_cam(struct rtw89_dev *rtwdev, 582 + struct rtw89_vif_link *rtwvif_link, 583 + struct rtw89_sta_link *rtwsta_link, 584 + u8 sec_cam_idx); 585 + 581 586 #endif
+140 -24
drivers/net/wireless/realtek/rtw89/core.c
··· 203 203 }, 204 204 }; 205 205 206 + #define RTW89_6GHZ_SPAN_HEAD 6145 207 + #define RTW89_6GHZ_SPAN_IDX(center_freq) \ 208 + ((((int)(center_freq) - RTW89_6GHZ_SPAN_HEAD) / 5) / 2) 209 + 210 + #define RTW89_DECL_6GHZ_SPAN(center_freq, subband_l, subband_h) \ 211 + [RTW89_6GHZ_SPAN_IDX(center_freq)] = { \ 212 + .sar_subband_low = RTW89_SAR_6GHZ_ ## subband_l, \ 213 + .sar_subband_high = RTW89_SAR_6GHZ_ ## subband_h, \ 214 + .ant_gain_subband_low = RTW89_ANT_GAIN_6GHZ_ ## subband_l, \ 215 + .ant_gain_subband_high = RTW89_ANT_GAIN_6GHZ_ ## subband_h, \ 216 + } 217 + 218 + /* Since 6GHz subbands are not edge aligned, some cases span two subbands. 219 + * In the following, we describe each of them with rtw89_6ghz_span. 220 + */ 221 + static const struct rtw89_6ghz_span rtw89_overlapping_6ghz[] = { 222 + RTW89_DECL_6GHZ_SPAN(6145, SUBBAND_5_L, SUBBAND_5_H), 223 + RTW89_DECL_6GHZ_SPAN(6165, SUBBAND_5_L, SUBBAND_5_H), 224 + RTW89_DECL_6GHZ_SPAN(6185, SUBBAND_5_L, SUBBAND_5_H), 225 + RTW89_DECL_6GHZ_SPAN(6505, SUBBAND_6, SUBBAND_7_L), 226 + RTW89_DECL_6GHZ_SPAN(6525, SUBBAND_6, SUBBAND_7_L), 227 + RTW89_DECL_6GHZ_SPAN(6545, SUBBAND_6, SUBBAND_7_L), 228 + RTW89_DECL_6GHZ_SPAN(6665, SUBBAND_7_L, SUBBAND_7_H), 229 + RTW89_DECL_6GHZ_SPAN(6705, SUBBAND_7_L, SUBBAND_7_H), 230 + RTW89_DECL_6GHZ_SPAN(6825, SUBBAND_7_H, SUBBAND_8), 231 + RTW89_DECL_6GHZ_SPAN(6865, SUBBAND_7_H, SUBBAND_8), 232 + RTW89_DECL_6GHZ_SPAN(6875, SUBBAND_7_H, SUBBAND_8), 233 + RTW89_DECL_6GHZ_SPAN(6885, SUBBAND_7_H, SUBBAND_8), 234 + }; 235 + 236 + const struct rtw89_6ghz_span * 237 + rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq) 238 + { 239 + int idx; 240 + 241 + if (center_freq >= RTW89_6GHZ_SPAN_HEAD) { 242 + idx = RTW89_6GHZ_SPAN_IDX(center_freq); 243 + /* To decrease size of rtw89_overlapping_6ghz[], 244 + * RTW89_6GHZ_SPAN_IDX() truncates the leading NULLs 245 + * to make first span as index 0 of the table. So, if center 246 + * frequency is less than the first one, it will get netative. 247 + */ 248 + if (idx >= 0 && idx < ARRAY_SIZE(rtw89_overlapping_6ghz)) 249 + return &rtw89_overlapping_6ghz[idx]; 250 + } 251 + 252 + return NULL; 253 + } 254 + 206 255 bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate) 207 256 { 208 257 struct ieee80211_rate rate; ··· 2189 2140 2190 2141 if (phy_ppdu) 2191 2142 ewma_rssi_add(&rtwdev->phystat.bcn_rssi, phy_ppdu->rssi_avg); 2143 + 2144 + pkt_stat->beacon_rate = desc_info->data_rate; 2192 2145 } 2193 2146 2194 2147 if (!ether_addr_equal(bss_conf->addr, hdr->addr1)) ··· 2368 2317 } 2369 2318 } 2370 2319 2320 + static void rtw89_core_validate_rx_signal(struct ieee80211_rx_status *rx_status) 2321 + { 2322 + if (!rx_status->signal) 2323 + rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2324 + } 2325 + 2371 2326 static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev, 2372 2327 struct rtw89_rx_phy_ppdu *phy_ppdu, 2373 2328 struct rtw89_rx_desc_info *desc_info, ··· 2390 2333 rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu); 2391 2334 rtw89_core_update_rx_status_by_ppdu(rtwdev, rx_status, phy_ppdu); 2392 2335 rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status); 2336 + rtw89_core_validate_rx_signal(rx_status); 2337 + 2393 2338 /* In low power mode, it does RX in thread context. */ 2394 2339 local_bh_disable(); 2395 2340 ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, napi); ··· 2527 2468 struct rtw89_rx_desc_info *desc_info, 2528 2469 u8 *data, u32 data_offset) 2529 2470 { 2471 + struct rtw89_rxdesc_phy_rpt_v2 *rxd_rpt; 2530 2472 struct rtw89_rxdesc_short_v2 *rxd_s; 2531 2473 struct rtw89_rxdesc_long_v2 *rxd_l; 2532 2474 u16 shift_len, drv_info_len, phy_rtp_len, hdr_cnv_len; ··· 2574 2514 else 2575 2515 desc_info->rxd_len = sizeof(struct rtw89_rxdesc_short_v2); 2576 2516 desc_info->ready = true; 2517 + 2518 + if (phy_rtp_len == sizeof(*rxd_rpt)) { 2519 + rxd_rpt = (struct rtw89_rxdesc_phy_rpt_v2 *)(data + data_offset + 2520 + desc_info->rxd_len); 2521 + desc_info->rssi = le32_get_bits(rxd_rpt->dword0, BE_RXD_PHY_RSSI); 2522 + } 2577 2523 2578 2524 if (!desc_info->long_rxdesc) 2579 2525 return; ··· 2723 2657 rx_status->flag |= RX_FLAG_MACTIME_START; 2724 2658 rx_status->mactime = desc_info->free_run_cnt; 2725 2659 2660 + rtw89_chip_phy_rpt_to_rssi(rtwdev, desc_info, rx_status); 2726 2661 rtw89_core_stats_sta_rx_status(rtwdev, desc_info, rx_status); 2727 2662 } 2728 2663 2729 2664 static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev) 2730 2665 { 2731 2666 const struct rtw89_chip_info *chip = rtwdev->chip; 2732 - 2733 - /* FIXME: Fix __rtw89_enter_ps_mode() to consider MLO cases. */ 2734 - if (rtwdev->support_mlo) 2735 - return RTW89_PS_MODE_NONE; 2736 2667 2737 2668 if (rtw89_disable_ps_mode || !chip->ps_mode_supported || 2738 2669 RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw)) ··· 2763 2700 } 2764 2701 } 2765 2702 2703 + static 2704 + void rtw89_core_rx_pkt_hdl(struct rtw89_dev *rtwdev, const struct sk_buff *skb, 2705 + const struct rtw89_rx_desc_info *desc) 2706 + { 2707 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2708 + struct rtw89_sta_link *rtwsta_link; 2709 + struct ieee80211_sta *sta; 2710 + struct rtw89_sta *rtwsta; 2711 + u8 macid = desc->mac_id; 2712 + 2713 + if (!refcount_read(&rtwdev->refcount_ap_info)) 2714 + return; 2715 + 2716 + rcu_read_lock(); 2717 + 2718 + rtwsta_link = rtw89_assoc_link_rcu_dereference(rtwdev, macid); 2719 + if (!rtwsta_link) 2720 + goto out; 2721 + 2722 + rtwsta = rtwsta_link->rtwsta; 2723 + if (!test_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags)) 2724 + goto out; 2725 + 2726 + sta = rtwsta_to_sta(rtwsta); 2727 + if (ieee80211_is_pspoll(hdr->frame_control)) 2728 + ieee80211_sta_pspoll(sta); 2729 + else if (ieee80211_has_pm(hdr->frame_control) && 2730 + (ieee80211_is_data_qos(hdr->frame_control) || 2731 + ieee80211_is_qos_nullfunc(hdr->frame_control))) 2732 + ieee80211_sta_uapsd_trigger(sta, ieee80211_get_tid(hdr)); 2733 + 2734 + out: 2735 + rcu_read_unlock(); 2736 + } 2737 + 2766 2738 void rtw89_core_rx(struct rtw89_dev *rtwdev, 2767 2739 struct rtw89_rx_desc_info *desc_info, 2768 2740 struct sk_buff *skb) ··· 2820 2722 rx_status = IEEE80211_SKB_RXCB(skb); 2821 2723 memset(rx_status, 0, sizeof(*rx_status)); 2822 2724 rtw89_core_update_rx_status(rtwdev, desc_info, rx_status); 2725 + rtw89_core_rx_pkt_hdl(rtwdev, skb, desc_info); 2823 2726 if (desc_info->long_rxdesc && 2824 2727 BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP) 2825 2728 skb_queue_tail(&ppdu_sts->rx_queue[band], skb); ··· 3230 3131 struct rtw89_vif_link *rtwvif_link, bool qos, bool ps) 3231 3132 { 3232 3133 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3134 + int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1; 3233 3135 struct ieee80211_sta *sta; 3234 3136 struct ieee80211_hdr *hdr; 3235 3137 struct sk_buff *skb; ··· 3246 3146 goto out; 3247 3147 } 3248 3148 3249 - skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, qos); 3149 + skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, qos); 3250 3150 if (!skb) { 3251 3151 ret = -ENOMEM; 3252 3152 goto out; ··· 3467 3367 return tfc_changed; 3468 3368 } 3469 3369 3470 - static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev, 3471 - struct rtw89_vif_link *rtwvif_link) 3472 - { 3473 - if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION && 3474 - rtwvif_link->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT) 3475 - return; 3476 - 3477 - rtw89_enter_lps(rtwdev, rtwvif_link, true); 3478 - } 3479 - 3480 3370 static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev) 3481 3371 { 3482 - struct rtw89_vif_link *rtwvif_link; 3372 + struct ieee80211_vif *vif; 3483 3373 struct rtw89_vif *rtwvif; 3484 - unsigned int link_id; 3485 3374 3486 3375 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 3487 3376 if (rtwvif->tdls_peer) ··· 3482 3393 rtwvif->stats.rx_tfc_lv != RTW89_TFC_IDLE) 3483 3394 continue; 3484 3395 3485 - rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 3486 - rtw89_vif_enter_lps(rtwdev, rtwvif_link); 3396 + vif = rtwvif_to_vif(rtwvif); 3397 + 3398 + if (!(vif->type == NL80211_IFTYPE_STATION || 3399 + vif->type == NL80211_IFTYPE_P2P_CLIENT)) 3400 + continue; 3401 + 3402 + rtw89_enter_lps(rtwdev, rtwvif, true); 3487 3403 } 3488 3404 } 3489 3405 ··· 3793 3699 { 3794 3700 const struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3795 3701 3702 + rtw89_assoc_link_clr(rtwsta_link); 3703 + 3796 3704 if (vif->type == NL80211_IFTYPE_STATION) 3797 3705 rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, false); 3798 3706 ··· 3844 3748 return ret; 3845 3749 } 3846 3750 3751 + static bool rtw89_sta_link_can_er(struct rtw89_dev *rtwdev, 3752 + struct ieee80211_bss_conf *bss_conf, 3753 + struct ieee80211_link_sta *link_sta) 3754 + { 3755 + if (!bss_conf->he_support || 3756 + bss_conf->he_oper.params & IEEE80211_HE_OPERATION_ER_SU_DISABLE) 3757 + return false; 3758 + 3759 + if (rtwdev->chip->chip_id == RTL8852C && 3760 + rtw89_sta_link_has_su_mu_4xhe08(link_sta) && 3761 + !rtw89_sta_link_has_er_su_4xhe08(link_sta)) 3762 + return false; 3763 + 3764 + return true; 3765 + } 3766 + 3847 3767 int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev, 3848 3768 struct rtw89_vif_link *rtwvif_link, 3849 3769 struct rtw89_sta_link *rtwsta_link) ··· 3870 3758 rtwsta_link); 3871 3759 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3872 3760 rtwvif_link->chanctx_idx); 3761 + struct ieee80211_link_sta *link_sta; 3873 3762 int ret; 3874 3763 3875 3764 if (vif->type == NL80211_IFTYPE_AP || sta->tdls) { 3876 3765 if (sta->tdls) { 3877 - struct ieee80211_link_sta *link_sta; 3878 - 3879 3766 rcu_read_lock(); 3880 3767 3881 3768 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); ··· 3925 3814 rcu_read_lock(); 3926 3815 3927 3816 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3928 - if (bss_conf->he_support && 3929 - !(bss_conf->he_oper.params & IEEE80211_HE_OPERATION_ER_SU_DISABLE)) 3930 - rtwsta_link->er_cap = true; 3817 + link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3818 + rtwsta_link->er_cap = rtw89_sta_link_can_er(rtwdev, bss_conf, link_sta); 3931 3819 3932 3820 rcu_read_unlock(); 3933 3821 ··· 3944 3834 rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true); 3945 3835 } 3946 3836 3837 + rtw89_assoc_link_set(rtwsta_link); 3947 3838 return ret; 3948 3839 } 3949 3840 ··· 4544 4433 rtw89_phy_dm_init(rtwdev); 4545 4434 4546 4435 rtw89_mac_cfg_ppdu_status_bands(rtwdev, true); 4436 + rtw89_mac_cfg_phy_rpt_bands(rtwdev, true); 4547 4437 rtw89_mac_update_rts_threshold(rtwdev); 4548 4438 4549 4439 rtw89_tas_reset(rtwdev); ··· 4867 4755 rtw89_ser_init(rtwdev); 4868 4756 rtw89_entity_init(rtwdev); 4869 4757 rtw89_tas_init(rtwdev); 4758 + rtw89_phy_ant_gain_init(rtwdev); 4870 4759 4871 4760 return 0; 4872 4761 } ··· 5212 5099 5213 5100 if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 5214 5101 ieee80211_hw_set(hw, CONNECTION_MONITOR); 5102 + 5103 + if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw)) 5104 + ieee80211_hw_set(hw, AP_LINK_PS); 5215 5105 5216 5106 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 5217 5107 BIT(NL80211_IFTYPE_AP) |
+145
drivers/net/wireless/realtek/rtw89/core.h
··· 830 830 }; 831 831 832 832 #define __RTW89_MLD_MAX_LINK_NUM 2 833 + #define RTW89_MLD_NON_STA_LINK_NUM 1 833 834 834 835 enum rtw89_chanctx_idx { 835 836 RTW89_CHANCTX_0 = 0, ··· 1084 1083 u16 offset; 1085 1084 u16 rxd_len; 1086 1085 bool ready; 1086 + u16 rssi; 1087 1087 }; 1088 1088 1089 1089 struct rtw89_rxdesc_short { ··· 1125 1123 __le32 dword7; 1126 1124 __le32 dword8; 1127 1125 __le32 dword9; 1126 + } __packed; 1127 + 1128 + struct rtw89_rxdesc_phy_rpt_v2 { 1129 + __le32 dword0; 1130 + __le32 dword1; 1128 1131 } __packed; 1129 1132 1130 1133 struct rtw89_tx_desc_info { ··· 3365 3358 u8 spp_mode : 1; 3366 3359 /* 256 bits */ 3367 3360 u8 key[32]; 3361 + 3362 + struct ieee80211_key_conf *key_conf; 3368 3363 }; 3369 3364 3370 3365 struct rtw89_sta_link { ··· 3630 3621 struct ieee80211_rx_status *status); 3631 3622 void (*convert_rpl_to_rssi)(struct rtw89_dev *rtwdev, 3632 3623 struct rtw89_rx_phy_ppdu *phy_ppdu); 3624 + void (*phy_rpt_to_rssi)(struct rtw89_dev *rtwdev, 3625 + struct rtw89_rx_desc_info *desc_info, 3626 + struct ieee80211_rx_status *rx_status); 3633 3627 void (*ctrl_nbtg_bt_tx)(struct rtw89_dev *rtwdev, bool en, 3634 3628 enum rtw89_phy_idx phy_idx); 3635 3629 void (*cfg_txrx_path)(struct rtw89_dev *rtwdev); ··· 4267 4255 u16 support_bandwidths; 4268 4256 bool support_unii4; 4269 4257 bool support_rnr; 4258 + bool support_ant_gain; 4270 4259 bool ul_tb_waveform_ctrl; 4271 4260 bool ul_tb_pwr_diff; 4272 4261 bool hw_sec_hdr; ··· 4309 4296 const struct rtw89_rfe_parms *dflt_parms; 4310 4297 const struct rtw89_chanctx_listener *chanctx_listener; 4311 4298 4299 + u8 txpwr_factor_bb; 4312 4300 u8 txpwr_factor_rf; 4313 4301 u8 txpwr_factor_mac; 4314 4302 ··· 4462 4448 RTW89_FW_FEATURE_SCAN_OFFLOAD_BE_V0, 4463 4449 RTW89_FW_FEATURE_WOW_REASON_V1, 4464 4450 RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V0, 4451 + RTW89_FW_FEATURE_RFK_PRE_NOTIFY_V1, 4465 4452 RTW89_FW_FEATURE_RFK_RXDCK_V0, 4466 4453 RTW89_FW_FEATURE_NO_WOW_CPU_IO_RX, 4454 + RTW89_FW_FEATURE_NOTIFY_AP_INFO, 4455 + RTW89_FW_FEATURE_CH_INFO_BE_V0, 4456 + RTW89_FW_FEATURE_LPS_CH_INFO, 4467 4457 }; 4468 4458 4469 4459 struct rtw89_fw_suit { ··· 4614 4596 struct rtw89_sar_cfg_common cfg_common; 4615 4597 }; 4616 4598 }; 4599 + 4600 + enum rtw89_ant_gain_subband { 4601 + RTW89_ANT_GAIN_2GHZ_SUBBAND, 4602 + RTW89_ANT_GAIN_5GHZ_SUBBAND_1, /* U-NII-1 */ 4603 + RTW89_ANT_GAIN_5GHZ_SUBBAND_2, /* U-NII-2 */ 4604 + RTW89_ANT_GAIN_5GHZ_SUBBAND_2E, /* U-NII-2-Extended */ 4605 + RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4, /* U-NII-3 and U-NII-4 */ 4606 + RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L, /* U-NII-5 lower part */ 4607 + RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H, /* U-NII-5 higher part */ 4608 + RTW89_ANT_GAIN_6GHZ_SUBBAND_6, /* U-NII-6 */ 4609 + RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L, /* U-NII-7 lower part */ 4610 + RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H, /* U-NII-7 higher part */ 4611 + RTW89_ANT_GAIN_6GHZ_SUBBAND_8, /* U-NII-8 */ 4612 + 4613 + RTW89_ANT_GAIN_SUBBAND_NR, 4614 + }; 4615 + 4616 + enum rtw89_ant_gain_domain_type { 4617 + RTW89_ANT_GAIN_ETSI = 0, 4618 + 4619 + RTW89_ANT_GAIN_DOMAIN_NUM, 4620 + }; 4621 + 4622 + #define RTW89_ANT_GAIN_CHAIN_NUM 2 4623 + struct rtw89_ant_gain_info { 4624 + s8 offset[RTW89_ANT_GAIN_CHAIN_NUM][RTW89_ANT_GAIN_SUBBAND_NR]; 4625 + u32 regd_enabled; 4626 + }; 4627 + 4628 + struct rtw89_6ghz_span { 4629 + enum rtw89_sar_subband sar_subband_low; 4630 + enum rtw89_sar_subband sar_subband_high; 4631 + enum rtw89_ant_gain_subband ant_gain_subband_low; 4632 + enum rtw89_ant_gain_subband ant_gain_subband_high; 4633 + }; 4634 + 4635 + #define RTW89_SAR_SPAN_VALID(span) ((span)->sar_subband_high) 4636 + #define RTW89_ANT_GAIN_SPAN_VALID(span) ((span)->ant_gain_subband_high) 4617 4637 4618 4638 enum rtw89_tas_state { 4619 4639 RTW89_TAS_STATE_DPR_OFF, ··· 4837 4781 4838 4782 struct rtw89_pkt_stat { 4839 4783 u16 beacon_nr; 4784 + u8 beacon_rate; 4840 4785 u32 rx_rate_cnt[RTW89_HW_RATE_NR]; 4841 4786 }; 4842 4787 ··· 5613 5556 struct rtw89_rfe_data *rfe_data; 5614 5557 enum rtw89_custid custid; 5615 5558 5559 + struct rtw89_sta_link __rcu *assoc_link_on_macid[RTW89_MAX_MAC_ID_NUM]; 5560 + refcount_t refcount_ap_info; 5561 + 5616 5562 /* ensures exclusive access from mac80211 callbacks */ 5617 5563 struct mutex mutex; 5618 5564 struct list_head rtwvifs_list; ··· 5696 5636 struct rtw89_regulatory_info regulatory; 5697 5637 struct rtw89_sar_info sar; 5698 5638 struct rtw89_tas_info tas; 5639 + struct rtw89_ant_gain_info ant_gain; 5699 5640 5700 5641 struct rtw89_btc btc; 5701 5642 enum rtw89_ps_mode ps_mode; ··· 5715 5654 u8 priv[] __aligned(sizeof(void *)); 5716 5655 }; 5717 5656 5657 + struct rtw89_link_conf_container { 5658 + struct ieee80211_bss_conf *link_conf[IEEE80211_MLD_MAX_NUM_LINKS]; 5659 + }; 5660 + 5661 + #define RTW89_VIF_IDLE_LINK_ID 0 5662 + 5718 5663 struct rtw89_vif { 5719 5664 struct rtw89_dev *rtwdev; 5720 5665 struct list_head list; 5721 5666 struct list_head mgnt_entry; 5667 + struct rtw89_link_conf_container __rcu *snap_link_confs; 5722 5668 5723 5669 u8 mac_addr[ETH_ALEN]; 5724 5670 __be32 ip_addr; ··· 5757 5689 for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) \ 5758 5690 if (rtw89_vif_assign_link_is_valid(&(rtwvif_link), rtwvif, link_id)) 5759 5691 5692 + enum rtw89_sta_flags { 5693 + RTW89_REMOTE_STA_IN_PS, 5694 + 5695 + NUM_OF_RTW89_STA_FLAGS, 5696 + }; 5697 + 5760 5698 struct rtw89_sta { 5761 5699 struct rtw89_dev *rtwdev; 5762 5700 struct rtw89_vif *rtwvif; 5701 + 5702 + DECLARE_BITMAP(flags, NUM_OF_RTW89_STA_FLAGS); 5763 5703 5764 5704 bool disassoc; 5765 5705 ··· 5775 5699 5776 5700 struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS]; 5777 5701 DECLARE_BITMAP(ampdu_map, IEEE80211_NUM_TIDS); 5702 + 5703 + DECLARE_BITMAP(pairwise_sec_cam_map, RTW89_MAX_SEC_CAM_NUM); 5778 5704 5779 5705 u8 links_inst_valid_num; 5780 5706 DECLARE_BITMAP(links_inst_map, __RTW89_MLD_MAX_LINK_NUM); ··· 5846 5768 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta; 5847 5769 5848 5770 return rtwsta_link - rtwsta->links_inst; 5771 + } 5772 + 5773 + static inline void rtw89_assoc_link_set(struct rtw89_sta_link *rtwsta_link) 5774 + { 5775 + struct rtw89_sta *rtwsta = rtwsta_link->rtwsta; 5776 + struct rtw89_dev *rtwdev = rtwsta->rtwdev; 5777 + 5778 + rcu_assign_pointer(rtwdev->assoc_link_on_macid[rtwsta_link->mac_id], 5779 + rtwsta_link); 5780 + } 5781 + 5782 + static inline void rtw89_assoc_link_clr(struct rtw89_sta_link *rtwsta_link) 5783 + { 5784 + struct rtw89_sta *rtwsta = rtwsta_link->rtwsta; 5785 + struct rtw89_dev *rtwdev = rtwsta->rtwdev; 5786 + 5787 + rcu_assign_pointer(rtwdev->assoc_link_on_macid[rtwsta_link->mac_id], 5788 + NULL); 5789 + synchronize_rcu(); 5790 + } 5791 + 5792 + static inline struct rtw89_sta_link * 5793 + rtw89_assoc_link_rcu_dereference(struct rtw89_dev *rtwdev, u8 macid) 5794 + { 5795 + return rcu_dereference(rtwdev->assoc_link_on_macid[macid]); 5849 5796 } 5850 5797 5851 5798 static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev, ··· 6297 6194 __rtw89_vif_rcu_dereference_link(struct rtw89_vif_link *rtwvif_link, bool *nolink) 6298 6195 { 6299 6196 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 6197 + struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6198 + struct rtw89_link_conf_container *snap; 6300 6199 struct ieee80211_bss_conf *bss_conf; 6301 6200 6201 + snap = rcu_dereference(rtwvif->snap_link_confs); 6202 + if (snap) { 6203 + bss_conf = snap->link_conf[rtwvif_link->link_id]; 6204 + goto out; 6205 + } 6206 + 6302 6207 bss_conf = rcu_dereference(vif->link_conf[rtwvif_link->link_id]); 6208 + 6209 + out: 6303 6210 if (unlikely(!bss_conf)) { 6304 6211 *nolink = true; 6305 6212 return &vif->bss_conf; ··· 6718 6605 chip->ops->convert_rpl_to_rssi(rtwdev, phy_ppdu); 6719 6606 } 6720 6607 6608 + static inline void rtw89_chip_phy_rpt_to_rssi(struct rtw89_dev *rtwdev, 6609 + struct rtw89_rx_desc_info *desc_info, 6610 + struct ieee80211_rx_status *rx_status) 6611 + { 6612 + const struct rtw89_chip_info *chip = rtwdev->chip; 6613 + 6614 + if (chip->ops->phy_rpt_to_rssi) 6615 + chip->ops->phy_rpt_to_rssi(rtwdev, desc_info, rx_status); 6616 + } 6617 + 6721 6618 static inline void rtw89_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, 6722 6619 enum rtw89_phy_idx phy_idx) 6723 6620 { ··· 6873 6750 (link_sta->he_cap.he_cap_elem.phy_cap_info[4] & 6874 6751 IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) 6875 6752 return true; 6753 + return false; 6754 + } 6755 + 6756 + static inline 6757 + bool rtw89_sta_link_has_su_mu_4xhe08(struct ieee80211_link_sta *link_sta) 6758 + { 6759 + if (link_sta->he_cap.he_cap_elem.phy_cap_info[7] & 6760 + IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI) 6761 + return true; 6762 + 6763 + return false; 6764 + } 6765 + 6766 + static inline 6767 + bool rtw89_sta_link_has_er_su_4xhe08(struct ieee80211_link_sta *link_sta) 6768 + { 6769 + if (link_sta->he_cap.he_cap_elem.phy_cap_info[8] & 6770 + IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI) 6771 + return true; 6772 + 6876 6773 return false; 6877 6774 } 6878 6775 ··· 7051 6908 unsigned int link_id); 7052 6909 void rtw89_sta_unset_link(struct rtw89_sta *rtwsta, unsigned int link_id); 7053 6910 void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev); 6911 + const struct rtw89_6ghz_span * 6912 + rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq); 7054 6913 void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef); 7055 6914 void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef, 7056 6915 struct rtw89_chan *chan);
+4
drivers/net/wireless/realtek/rtw89/debug.c
··· 9 9 #include "fw.h" 10 10 #include "mac.h" 11 11 #include "pci.h" 12 + #include "phy.h" 12 13 #include "ps.h" 13 14 #include "reg.h" 14 15 #include "sar.h" ··· 882 881 883 882 seq_puts(m, "[TAS]\n"); 884 883 rtw89_print_tas(m, rtwdev); 884 + 885 + seq_puts(m, "[DAG]\n"); 886 + rtw89_print_ant_gain(m, rtwdev, chan); 885 887 886 888 tbl = dbgfs_txpwr_tables[chip_gen]; 887 889 if (!tbl) {
+235 -31
drivers/net/wireless/realtek/rtw89/fw.c
··· 709 709 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 710 710 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 711 711 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 712 + __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER), 712 713 __CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX), 713 714 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG), 714 715 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE), 715 716 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER), 716 717 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD), 718 + __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER), 717 719 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 718 720 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 719 721 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), ··· 729 727 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), 730 728 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), 731 729 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0), 730 + __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO), 732 731 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0), 732 + __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO), 733 + __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0), 734 + __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1), 733 735 }; 734 736 735 737 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, ··· 2420 2414 u8 *id) 2421 2415 { 2422 2416 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 2417 + int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1; 2423 2418 struct rtw89_pktofld_info *info; 2424 2419 struct sk_buff *skb; 2425 2420 int ret; ··· 2437 2430 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 2438 2431 break; 2439 2432 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 2440 - skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false); 2433 + skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false); 2441 2434 break; 2442 2435 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 2443 - skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); 2436 + skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true); 2444 2437 break; 2445 2438 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY: 2446 2439 skb = rtw89_eapol_get(rtwdev, rtwvif_link); ··· 2596 2589 return ret; 2597 2590 } 2598 2591 2599 - int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 2592 + int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2600 2593 { 2601 - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 2602 - rtwvif_link->chanctx_idx); 2603 2594 const struct rtw89_chip_info *chip = rtwdev->chip; 2595 + const struct rtw89_chan *chan; 2596 + struct rtw89_vif_link *rtwvif_link; 2604 2597 struct rtw89_h2c_lps_ch_info *h2c; 2605 2598 u32 len = sizeof(*h2c); 2599 + unsigned int link_id; 2606 2600 struct sk_buff *skb; 2601 + bool no_chan = true; 2602 + u8 phy_idx; 2607 2603 u32 done; 2608 2604 int ret; 2609 2605 ··· 2621 2611 skb_put(skb, len); 2622 2612 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data; 2623 2613 2624 - h2c->info[0].central_ch = chan->channel; 2625 - h2c->info[0].pri_ch = chan->primary_channel; 2626 - h2c->info[0].band = chan->band_type; 2627 - h2c->info[0].bw = chan->band_width; 2628 - h2c->mlo_dbcc_mode_lps = cpu_to_le32(MLO_2_PLUS_0_1RF); 2614 + rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2615 + phy_idx = rtwvif_link->phy_idx; 2616 + if (phy_idx >= ARRAY_SIZE(h2c->info)) 2617 + continue; 2618 + 2619 + chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2620 + no_chan = false; 2621 + 2622 + h2c->info[phy_idx].central_ch = chan->channel; 2623 + h2c->info[phy_idx].pri_ch = chan->primary_channel; 2624 + h2c->info[phy_idx].band = chan->band_type; 2625 + h2c->info[phy_idx].bw = chan->band_width; 2626 + } 2627 + 2628 + if (no_chan) { 2629 + rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n"); 2630 + ret = -ENOENT; 2631 + goto fail; 2632 + } 2633 + 2634 + h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode); 2629 2635 2630 2636 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2631 2637 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, ··· 2658 2632 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2659 2633 if (ret) 2660 2634 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n"); 2635 + 2636 + return 0; 2637 + fail: 2638 + dev_kfree_skb_any(skb); 2639 + 2640 + return ret; 2641 + } 2642 + 2643 + int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev, 2644 + struct rtw89_vif *rtwvif) 2645 + { 2646 + const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be; 2647 + struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; 2648 + const struct rtw89_chip_info *chip = rtwdev->chip; 2649 + struct rtw89_h2c_lps_ml_cmn_info *h2c; 2650 + struct rtw89_vif_link *rtwvif_link; 2651 + const struct rtw89_chan *chan; 2652 + u8 bw_idx = RTW89_BB_BW_20_40; 2653 + u32 len = sizeof(*h2c); 2654 + unsigned int link_id; 2655 + struct sk_buff *skb; 2656 + u8 gain_band; 2657 + u32 done; 2658 + u8 path; 2659 + int ret; 2660 + int i; 2661 + 2662 + if (chip->chip_gen != RTW89_CHIP_BE) 2663 + return 0; 2664 + 2665 + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2666 + if (!skb) { 2667 + rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n"); 2668 + return -ENOMEM; 2669 + } 2670 + skb_put(skb, len); 2671 + h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data; 2672 + 2673 + h2c->fmt_id = 0x1; 2674 + 2675 + h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 2676 + 2677 + rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2678 + path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A; 2679 + chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2680 + gain_band = rtw89_subband_to_gain_band_be(chan->subband_type); 2681 + 2682 + h2c->central_ch[rtwvif_link->phy_idx] = chan->channel; 2683 + h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel; 2684 + h2c->band[rtwvif_link->phy_idx] = chan->band_type; 2685 + h2c->bw[rtwvif_link->phy_idx] = chan->band_width; 2686 + if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6) 2687 + h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1; 2688 + else 2689 + h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2; 2690 + 2691 + /* Fill BW20 RX gain table for beacon mode */ 2692 + for (i = 0; i < TIA_GAIN_NUM; i++) { 2693 + h2c->tia_gain[rtwvif_link->phy_idx][i] = 2694 + cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]); 2695 + } 2696 + memcpy(h2c->lna_gain[rtwvif_link->phy_idx], 2697 + gain->lna_gain[gain_band][bw_idx][path], 2698 + LNA_GAIN_NUM); 2699 + } 2700 + 2701 + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2702 + H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2703 + H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len); 2704 + 2705 + rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2706 + ret = rtw89_h2c_tx(rtwdev, skb, false); 2707 + if (ret) { 2708 + rtw89_err(rtwdev, "failed to send h2c\n"); 2709 + goto fail; 2710 + } 2711 + 2712 + ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2713 + true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2714 + if (ret) 2715 + rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n"); 2661 2716 2662 2717 return 0; 2663 2718 fail: ··· 5061 4954 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5062 4955 struct rtw89_h2c_chinfo_elem_be *elem; 5063 4956 struct rtw89_mac_chinfo_be *ch_info; 5064 - struct rtw89_h2c_chinfo *h2c; 4957 + struct rtw89_h2c_chinfo_be *h2c; 5065 4958 struct sk_buff *skb; 5066 4959 unsigned int cond; 4960 + u8 ver = U8_MAX; 5067 4961 int skb_len; 5068 4962 int ret; 5069 4963 5070 - static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 4964 + static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE); 5071 4965 5072 4966 skb_len = struct_size(h2c, elem, ch_num); 5073 4967 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); ··· 5077 4969 return -ENOMEM; 5078 4970 } 5079 4971 4972 + if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 4973 + ver = 0; 4974 + 5080 4975 skb_put(skb, sizeof(*h2c)); 5081 - h2c = (struct rtw89_h2c_chinfo *)skb->data; 4976 + h2c = (struct rtw89_h2c_chinfo_be *)skb->data; 5082 4977 5083 4978 h2c->ch_num = ch_num; 5084 4979 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ ··· 5091 4980 list_for_each_entry(ch_info, chan_list, list) { 5092 4981 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem)); 5093 4982 5094 - elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD) | 5095 - le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) | 4983 + elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) | 5096 4984 le32_encode_bits(ch_info->central_ch, 5097 4985 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) | 5098 4986 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH); ··· 5138 5028 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) | 5139 5029 le32_encode_bits(ch_info->fw_probe0_bssids, 5140 5030 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS); 5031 + if (ver == 0) 5032 + elem->w0 |= 5033 + le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD); 5034 + else 5035 + elem->w7 = le32_encode_bits(ch_info->period, 5036 + RTW89_H2C_CHINFO_BE_W7_PERIOD_V1); 5141 5037 } 5142 5038 5143 5039 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, ··· 5287 5171 u8 probe_id[NUM_NL80211_BANDS]; 5288 5172 u8 cfg_len = sizeof(*h2c); 5289 5173 unsigned int cond; 5174 + u8 ver = U8_MAX; 5290 5175 void *ptr; 5291 5176 int ret; 5292 5177 u32 len; ··· 5307 5190 ptr = skb->data; 5308 5191 5309 5192 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); 5193 + 5194 + if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5195 + ver = 0; 5310 5196 5311 5197 if (!wowlan) { 5312 5198 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { ··· 5406 5286 le32_encode_bits(RTW89_OFF_CHAN_TIME / 10, 5407 5287 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL); 5408 5288 5409 - opch->w1 = le32_encode_bits(RTW89_CHANNEL_TIME, 5410 - RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION) | 5411 - le32_encode_bits(op->band_type, 5289 + opch->w1 = le32_encode_bits(op->band_type, 5412 5290 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) | 5413 5291 le32_encode_bits(op->band_width, 5414 5292 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) | ··· 5432 5314 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) | 5433 5315 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5434 5316 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3); 5317 + 5318 + if (ver == 0) 5319 + opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME, 5320 + RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION); 5321 + else 5322 + opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME, 5323 + RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1); 5435 5324 ptr += sizeof(*opch); 5436 5325 } 5437 5326 ··· 5541 5416 enum rtw89_phy_idx phy_idx) 5542 5417 { 5543 5418 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 5419 + struct rtw89_fw_h2c_rfk_pre_info_common *common; 5544 5420 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0; 5421 + struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1; 5545 5422 struct rtw89_fw_h2c_rfk_pre_info *h2c; 5546 5423 u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH]; 5547 5424 u32 len = sizeof(*h2c); ··· 5553 5426 u32 val32; 5554 5427 int ret; 5555 5428 5556 - if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) { 5429 + if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) { 5430 + len = sizeof(*h2c_v1); 5431 + ver = 1; 5432 + } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) { 5557 5433 len = sizeof(*h2c_v0); 5558 5434 ver = 0; 5559 5435 } ··· 5568 5438 } 5569 5439 skb_put(skb, len); 5570 5440 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data; 5441 + common = &h2c->base_v1.common; 5571 5442 5572 - h2c->common.mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 5443 + common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 5573 5444 5574 5445 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); 5575 5446 BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH); 5576 5447 5577 5448 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { 5578 5449 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5579 - h2c->common.dbcc.ch[path][tbl] = 5450 + common->dbcc.ch[path][tbl] = 5580 5451 cpu_to_le32(rfk_mcc->data[path].ch[tbl]); 5581 - h2c->common.dbcc.band[path][tbl] = 5452 + common->dbcc.band[path][tbl] = 5582 5453 cpu_to_le32(rfk_mcc->data[path].band[tbl]); 5583 5454 } 5584 5455 } ··· 5587 5456 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5588 5457 tbl_sel[path] = rfk_mcc->data[path].table_idx; 5589 5458 5590 - h2c->common.tbl.cur_ch[path] = 5459 + common->tbl.cur_ch[path] = 5591 5460 cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]); 5592 - h2c->common.tbl.cur_band[path] = 5461 + common->tbl.cur_band[path] = 5593 5462 cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]); 5463 + 5464 + if (ver <= 1) 5465 + continue; 5466 + 5467 + h2c->cur_bandwidth[path] = 5468 + cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]); 5594 5469 } 5595 5470 5596 - h2c->common.phy_idx = cpu_to_le32(phy_idx); 5471 + common->phy_idx = cpu_to_le32(phy_idx); 5597 5472 5598 5473 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */ 5599 5474 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data; ··· 5625 5488 goto done; 5626 5489 } 5627 5490 5628 - if (rtw89_is_mlo_1_1(rtwdev)) 5629 - h2c->mlo_1_1 = cpu_to_le32(1); 5491 + if (rtw89_is_mlo_1_1(rtwdev)) { 5492 + h2c_v1 = &h2c->base_v1; 5493 + h2c_v1->mlo_1_1 = cpu_to_le32(1); 5494 + } 5630 5495 done: 5631 5496 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5632 5497 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, ··· 6635 6496 6636 6497 INIT_LIST_HEAD(&chan_list); 6637 6498 for (idx = 0, list_len = 0; 6638 - idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6499 + idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; 6639 6500 idx++, list_len++) { 6640 6501 channel = nd_config->channels[idx]; 6641 6502 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); ··· 6686 6547 6687 6548 INIT_LIST_HEAD(&chan_list); 6688 6549 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6689 - idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6550 + idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; 6690 6551 idx++, list_len++) { 6691 6552 channel = req->channels[idx]; 6692 6553 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); ··· 6763 6624 INIT_LIST_HEAD(&chan_list); 6764 6625 6765 6626 for (idx = 0, list_len = 0; 6766 - idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6627 + idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; 6767 6628 idx++, list_len++) { 6768 6629 channel = nd_config->channels[idx]; 6769 6630 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); ··· 6818 6679 INIT_LIST_HEAD(&chan_list); 6819 6680 6820 6681 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6821 - idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6682 + idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; 6822 6683 idx++, list_len++) { 6823 6684 channel = req->channels[idx]; 6824 6685 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); ··· 8299 8160 dev_kfree_skb_any(skb); 8300 8161 return -EBUSY; 8301 8162 } 8163 + 8164 + return 0; 8165 + } 8166 + 8167 + static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en) 8168 + { 8169 + struct rtw89_h2c_ap_info *h2c; 8170 + u32 len = sizeof(*h2c); 8171 + struct sk_buff *skb; 8172 + int ret; 8173 + 8174 + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8175 + if (!skb) { 8176 + rtw89_err(rtwdev, "failed to alloc skb for ap info\n"); 8177 + return -ENOMEM; 8178 + } 8179 + 8180 + skb_put(skb, len); 8181 + h2c = (struct rtw89_h2c_ap_info *)skb->data; 8182 + 8183 + h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN); 8184 + 8185 + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8186 + H2C_CAT_MAC, 8187 + H2C_CL_AP, 8188 + H2C_FUNC_AP_INFO, 0, 0, 8189 + len); 8190 + 8191 + ret = rtw89_h2c_tx(rtwdev, skb, false); 8192 + if (ret) { 8193 + rtw89_err(rtwdev, "failed to send h2c\n"); 8194 + dev_kfree_skb_any(skb); 8195 + return -EBUSY; 8196 + } 8197 + 8198 + return 0; 8199 + } 8200 + 8201 + int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en) 8202 + { 8203 + int ret; 8204 + 8205 + if (en) { 8206 + if (refcount_inc_not_zero(&rtwdev->refcount_ap_info)) 8207 + return 0; 8208 + } else { 8209 + if (!refcount_dec_and_test(&rtwdev->refcount_ap_info)) 8210 + return 0; 8211 + } 8212 + 8213 + ret = rtw89_fw_h2c_ap_info(rtwdev, en); 8214 + if (ret) { 8215 + if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 8216 + return ret; 8217 + 8218 + /* During recovery, neither driver nor stack has full error 8219 + * handling, so show a warning, but return 0 with refcount 8220 + * increased normally. It can avoid underflow when calling 8221 + * with @en == false later. 8222 + */ 8223 + rtw89_warn(rtwdev, "h2c ap_info failed during SER\n"); 8224 + } 8225 + 8226 + if (en) 8227 + refcount_set(&rtwdev->refcount_ap_info, 1); 8302 8228 8303 8229 return 0; 8304 8230 }
+64 -5
drivers/net/wireless/realtek/rtw89/fw.h
··· 310 310 #define RTW89_SCANOFLD_DEBUG_MASK 0x1F 311 311 #define RTW89_CHAN_INVALID 0xFF 312 312 #define RTW89_MAC_CHINFO_SIZE 28 313 + #define RTW89_MAC_CHINFO_SIZE_BE 32 313 314 #define RTW89_SCAN_LIST_GUARD 4 314 - #define RTW89_SCAN_LIST_LIMIT \ 315 - ((RTW89_H2C_MAX_SIZE / RTW89_MAC_CHINFO_SIZE) - RTW89_SCAN_LIST_GUARD) 315 + #define RTW89_SCAN_LIST_LIMIT(size) \ 316 + ((RTW89_H2C_MAX_SIZE / (size)) - RTW89_SCAN_LIST_GUARD) 317 + #define RTW89_SCAN_LIST_LIMIT_AX RTW89_SCAN_LIST_LIMIT(RTW89_MAC_CHINFO_SIZE) 318 + #define RTW89_SCAN_LIST_LIMIT_BE RTW89_SCAN_LIST_LIMIT(RTW89_MAC_CHINFO_SIZE_BE) 316 319 317 320 #define RTW89_BCN_LOSS_CNT 10 318 321 ··· 1783 1780 __le32 mlo_dbcc_mode_lps; 1784 1781 } __packed; 1785 1782 1783 + struct rtw89_h2c_lps_ml_cmn_info { 1784 + u8 fmt_id; 1785 + u8 rsvd0[3]; 1786 + __le32 mlo_dbcc_mode; 1787 + u8 central_ch[RTW89_PHY_MAX]; 1788 + u8 pri_ch[RTW89_PHY_MAX]; 1789 + u8 bw[RTW89_PHY_MAX]; 1790 + u8 band[RTW89_PHY_MAX]; 1791 + u8 bcn_rate_type[RTW89_PHY_MAX]; 1792 + u8 rsvd1[2]; 1793 + __le16 tia_gain[RTW89_PHY_MAX][TIA_GAIN_NUM]; 1794 + u8 lna_gain[RTW89_PHY_MAX][LNA_GAIN_NUM]; 1795 + u8 rsvd2[2]; 1796 + } __packed; 1797 + 1786 1798 static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val) 1787 1799 { 1788 1800 le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 0)); ··· 2665 2647 __le32 w4; 2666 2648 __le32 w5; 2667 2649 __le32 w6; 2650 + __le32 w7; 2668 2651 } __packed; 2669 2652 2670 2653 #define RTW89_H2C_CHINFO_BE_W0_PERIOD GENMASK(7, 0) ··· 2697 2678 #define RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS GENMASK(31, 16) 2698 2679 #define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS GENMASK(15, 0) 2699 2680 #define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS GENMASK(31, 16) 2681 + #define RTW89_H2C_CHINFO_BE_W7_PERIOD_V1 GENMASK(15, 0) 2700 2682 2701 2683 struct rtw89_h2c_chinfo { 2702 2684 u8 ch_num; ··· 2705 2685 u8 arg; 2706 2686 u8 rsvd0; 2707 2687 struct rtw89_h2c_chinfo_elem elem[] __counted_by(ch_num); 2688 + } __packed; 2689 + 2690 + struct rtw89_h2c_chinfo_be { 2691 + u8 ch_num; 2692 + u8 elem_size; 2693 + u8 arg; 2694 + u8 rsvd0; 2695 + struct rtw89_h2c_chinfo_elem_be elem[] __counted_by(ch_num); 2708 2696 } __packed; 2709 2697 2710 2698 #define RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK BIT(0) ··· 2761 2733 __le32 w1; 2762 2734 __le32 w2; 2763 2735 __le32 w3; 2736 + __le32 w4; 2764 2737 } __packed; 2765 2738 2766 2739 #define RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID GENMASK(15, 0) ··· 2783 2754 #define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1 GENMASK(15, 8) 2784 2755 #define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2 GENMASK(23, 16) 2785 2756 #define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3 GENMASK(31, 24) 2757 + #define RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1 GENMASK(15, 0) 2786 2758 2787 2759 struct rtw89_h2c_scanofld_be { 2788 2760 __le32 w0; ··· 3496 3466 __le32 w0; 3497 3467 } __packed; 3498 3468 3469 + struct rtw89_h2c_ap_info { 3470 + __le32 w0; 3471 + } __packed; 3472 + 3473 + #define RTW89_H2C_AP_INFO_W0_PWR_INT_EN BIT(0) 3474 + 3499 3475 #define RTW89_C2H_HEADER_LEN 8 3500 3476 3501 3477 struct rtw89_c2h_hdr { ··· 3626 3590 __le32 w5; 3627 3591 __le32 w6; 3628 3592 __le32 w7; 3593 + __le32 w8; 3629 3594 } __packed; 3630 3595 3631 3596 #define RTW89_C2H_SCANOFLD_W2_PRI_CH GENMASK(7, 0) ··· 3641 3604 #define RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD GENMASK(15, 8) 3642 3605 #define RTW89_C2H_SCANOFLD_W6_FW_DEF GENMASK(23, 16) 3643 3606 #define RTW89_C2H_SCANOFLD_W7_REPORT_TSF GENMASK(31, 0) 3607 + #define RTW89_C2H_SCANOFLD_W8_PERIOD_V1 GENMASK(15, 0) 3608 + #define RTW89_C2H_SCANOFLD_W8_EXPECT_PERIOD_V1 GENMASK(31, 16) 3644 3609 3645 3610 #define RTW89_GET_MAC_C2H_MCC_RCV_ACK_GROUP(c2h) \ 3646 3611 le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0)) ··· 3763 3724 } __packed; 3764 3725 3765 3726 #define RTW89_C2H_WOW_AOAC_RPT_REKEY_IDX BIT(0) 3727 + 3728 + struct rtw89_c2h_pwr_int_notify { 3729 + struct rtw89_c2h_hdr hdr; 3730 + __le32 w2; 3731 + } __packed; 3732 + 3733 + #define RTW89_C2H_PWR_INT_NOTIFY_W2_MACID GENMASK(15, 0) 3734 + #define RTW89_C2H_PWR_INT_NOTIFY_W2_PWR_STATUS BIT(16) 3766 3735 3767 3736 struct rtw89_h2c_tx_duty { 3768 3737 __le32 w0; ··· 4215 4168 #define RTW89_MRC_WAIT_COND_REQ_TSF \ 4216 4169 RTW89_MRC_WAIT_COND(0 /* don't care */, H2C_FUNC_MRC_REQ_TSF) 4217 4170 4171 + /* CLASS 36 - AP */ 4172 + #define H2C_CL_AP 0x24 4173 + #define H2C_FUNC_AP_INFO 0x0 4174 + 4218 4175 #define H2C_CAT_OUTSRC 0x2 4219 4176 4220 4177 #define H2C_CL_OUTSRC_RA 0x1 ··· 4226 4175 4227 4176 #define H2C_CL_OUTSRC_DM 0x2 4228 4177 #define H2C_FUNC_FW_LPS_CH_INFO 0xb 4178 + #define H2C_FUNC_FW_LPS_ML_CMN_INFO 0xe 4229 4179 4230 4180 #define H2C_CL_OUTSRC_RF_REG_A 0x8 4231 4181 #define H2C_CL_OUTSRC_RF_REG_B 0x9 ··· 4293 4241 } __packed mlo; 4294 4242 } __packed; 4295 4243 4296 - struct rtw89_fw_h2c_rfk_pre_info { 4244 + struct rtw89_fw_h2c_rfk_pre_info_v1 { 4297 4245 struct rtw89_fw_h2c_rfk_pre_info_common common; 4298 4246 __le32 mlo_1_1; 4247 + } __packed; 4248 + 4249 + struct rtw89_fw_h2c_rfk_pre_info { 4250 + struct rtw89_fw_h2c_rfk_pre_info_v1 base_v1; 4251 + __le32 cur_bandwidth[NUM_OF_RTW89_FW_RFK_PATH]; 4299 4252 } __packed; 4300 4253 4301 4254 struct rtw89_h2c_rf_tssi { ··· 4659 4602 4660 4603 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 4661 4604 struct rtw89_lps_parm *lps_param); 4662 - int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, 4663 - struct rtw89_vif_link *rtwvif_link); 4605 + int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif); 4606 + int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev, 4607 + struct rtw89_vif *rtwvif); 4664 4608 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 4665 4609 bool enable); 4666 4610 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len); ··· 4755 4697 const struct rtw89_fw_mrc_sync_arg *arg); 4756 4698 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev, 4757 4699 const struct rtw89_fw_mrc_upd_duration_arg *arg); 4700 + int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en); 4758 4701 4759 4702 static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev) 4760 4703 {
+71 -8
drivers/net/wireless/realtek/rtw89/mac.c
··· 4788 4788 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 4789 4789 struct rtw89_vif *rtwvif; 4790 4790 struct rtw89_chan new; 4791 - u8 reason, status, tx_fail, band, actual_period, expect_period; 4792 4791 u32 last_chan = rtwdev->scan_info.last_chan_idx, report_tsf; 4792 + u16 actual_period, expect_period; 4793 + u8 reason, status, tx_fail, band; 4793 4794 u8 mac_idx, sw_def, fw_def; 4795 + u8 ver = U8_MAX; 4794 4796 u16 chan; 4795 4797 int ret; 4796 4798 ··· 4800 4798 return; 4801 4799 4802 4800 rtwvif = rtwvif_link->rtwvif; 4801 + 4802 + if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 4803 + ver = 0; 4803 4804 4804 4805 tx_fail = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_TX_FAIL); 4805 4806 status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS); ··· 4816 4811 if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ))) 4817 4812 band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G; 4818 4813 4819 - rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 4820 - "mac_idx[%d] band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n", 4821 - mac_idx, band, chan, reason, status, tx_fail, actual_period); 4822 - 4823 4814 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 4824 4815 sw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_SW_DEF); 4825 - expect_period = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD); 4826 4816 fw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_FW_DEF); 4827 4817 report_tsf = le32_get_bits(c2h->w7, RTW89_C2H_SCANOFLD_W7_REPORT_TSF); 4818 + if (ver == 0) { 4819 + expect_period = 4820 + le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD); 4821 + } else { 4822 + actual_period = le32_get_bits(c2h->w8, RTW89_C2H_SCANOFLD_W8_PERIOD_V1); 4823 + expect_period = 4824 + le32_get_bits(c2h->w8, RTW89_C2H_SCANOFLD_W8_EXPECT_PERIOD_V1); 4825 + } 4828 4826 4829 4827 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 4830 4828 "sw_def: %d, fw_def: %d, tsf: %x, expect: %d\n", 4831 4829 sw_def, fw_def, report_tsf, expect_period); 4832 4830 } 4831 + 4832 + rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 4833 + "mac_idx[%d] band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n", 4834 + mac_idx, band, chan, reason, status, tx_fail, actual_period); 4833 4835 4834 4836 switch (reason) { 4835 4837 case RTW89_SCAN_LEAVE_OP_NOTIFY: ··· 5376 5364 rtw89_complete_cond(wait, cond, &data); 5377 5365 } 5378 5366 5367 + static void 5368 + rtw89_mac_c2h_pwr_int_notify(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 len) 5369 + { 5370 + const struct rtw89_c2h_pwr_int_notify *c2h; 5371 + struct rtw89_sta_link *rtwsta_link; 5372 + struct ieee80211_sta *sta; 5373 + struct rtw89_sta *rtwsta; 5374 + u16 macid; 5375 + bool ps; 5376 + 5377 + c2h = (const struct rtw89_c2h_pwr_int_notify *)skb->data; 5378 + macid = le32_get_bits(c2h->w2, RTW89_C2H_PWR_INT_NOTIFY_W2_MACID); 5379 + ps = le32_get_bits(c2h->w2, RTW89_C2H_PWR_INT_NOTIFY_W2_PWR_STATUS); 5380 + 5381 + rcu_read_lock(); 5382 + 5383 + rtwsta_link = rtw89_assoc_link_rcu_dereference(rtwdev, macid); 5384 + if (unlikely(!rtwsta_link)) 5385 + goto out; 5386 + 5387 + rtwsta = rtwsta_link->rtwsta; 5388 + if (ps) 5389 + set_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags); 5390 + else 5391 + clear_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags); 5392 + 5393 + sta = rtwsta_to_sta(rtwsta); 5394 + ieee80211_sta_ps_transition(sta, ps); 5395 + 5396 + out: 5397 + rcu_read_unlock(); 5398 + } 5399 + 5379 5400 static 5380 5401 void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev, 5381 5402 struct sk_buff *c2h, u32 len) = { ··· 5452 5407 void (* const rtw89_mac_c2h_wow_handler[])(struct rtw89_dev *rtwdev, 5453 5408 struct sk_buff *c2h, u32 len) = { 5454 5409 [RTW89_MAC_C2H_FUNC_AOAC_REPORT] = rtw89_mac_c2h_wow_aoac_rpt, 5410 + }; 5411 + 5412 + static 5413 + void (* const rtw89_mac_c2h_ap_handler[])(struct rtw89_dev *rtwdev, 5414 + struct sk_buff *c2h, u32 len) = { 5415 + [RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY] = rtw89_mac_c2h_pwr_int_notify, 5455 5416 }; 5456 5417 5457 5418 static void rtw89_mac_c2h_scanofld_rsp_atomic(struct rtw89_dev *rtwdev, ··· 5514 5463 return true; 5515 5464 case RTW89_MAC_C2H_CLASS_WOW: 5516 5465 return true; 5466 + case RTW89_MAC_C2H_CLASS_AP: 5467 + switch (func) { 5468 + default: 5469 + return false; 5470 + case RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY: 5471 + return true; 5472 + } 5517 5473 } 5518 5474 } 5519 5475 ··· 5551 5493 if (func < NUM_OF_RTW89_MAC_C2H_FUNC_WOW) 5552 5494 handler = rtw89_mac_c2h_wow_handler[func]; 5553 5495 break; 5496 + case RTW89_MAC_C2H_CLASS_AP: 5497 + if (func < NUM_OF_RTW89_MAC_C2H_FUNC_AP) 5498 + handler = rtw89_mac_c2h_ap_handler[func]; 5499 + break; 5554 5500 case RTW89_MAC_C2H_CLASS_FWDBG: 5555 5501 return; 5556 5502 default: 5557 - rtw89_info(rtwdev, "c2h class %d not support\n", class); 5503 + rtw89_info(rtwdev, "MAC c2h class %d not support\n", class); 5558 5504 return; 5559 5505 } 5560 5506 if (!handler) { 5561 - rtw89_info(rtwdev, "c2h class %d func %d not support\n", class, 5507 + rtw89_info(rtwdev, "MAC c2h class %d func %d not support\n", class, 5562 5508 func); 5563 5509 return; 5564 5510 } ··· 6736 6674 6737 6675 .typ_fltr_opt = rtw89_mac_typ_fltr_opt_ax, 6738 6676 .cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_ax, 6677 + .cfg_phy_rpt = NULL, 6739 6678 6740 6679 .dle_mix_cfg = dle_mix_cfg_ax, 6741 6680 .chk_dle_rdy = chk_dle_rdy_ax,
+43
drivers/net/wireless/realtek/rtw89/mac.h
··· 169 169 MAC_AX_L0_TO_L1_EVENT_MAX = 15, 170 170 }; 171 171 172 + enum rtw89_mac_phy_rpt_size { 173 + MAC_AX_PHY_RPT_SIZE_0 = 0, 174 + MAC_AX_PHY_RPT_SIZE_8 = 1, 175 + MAC_AX_PHY_RPT_SIZE_16 = 2, 176 + MAC_AX_PHY_RPT_SIZE_24 = 3, 177 + }; 178 + 179 + enum rtw89_mac_hdr_cnv_size { 180 + MAC_AX_HDR_CNV_SIZE_0 = 0, 181 + MAC_AX_HDR_CNV_SIZE_32 = 1, 182 + MAC_AX_HDR_CNV_SIZE_64 = 2, 183 + MAC_AX_HDR_CNV_SIZE_96 = 3, 184 + }; 185 + 172 186 enum rtw89_mac_wow_fw_status { 173 187 WOWLAN_NOT_READY = 0x00, 174 188 WOWLAN_SLEEP_READY = 0x01, ··· 440 426 NUM_OF_RTW89_MAC_C2H_FUNC_WOW, 441 427 }; 442 428 429 + enum rtw89_mac_c2h_ap_func { 430 + RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY = 0, 431 + 432 + NUM_OF_RTW89_MAC_C2H_FUNC_AP, 433 + }; 434 + 443 435 enum rtw89_mac_c2h_class { 444 436 RTW89_MAC_C2H_CLASS_INFO = 0x0, 445 437 RTW89_MAC_C2H_CLASS_OFLD = 0x1, ··· 454 434 RTW89_MAC_C2H_CLASS_MCC = 0x4, 455 435 RTW89_MAC_C2H_CLASS_FWDBG = 0x5, 456 436 RTW89_MAC_C2H_CLASS_MRC = 0xe, 437 + RTW89_MAC_C2H_CLASS_AP = 0x18, 457 438 RTW89_MAC_C2H_CLASS_MAX, 458 439 }; 459 440 ··· 982 961 enum rtw89_mac_fwd_target fwd_target, 983 962 u8 mac_idx); 984 963 int (*cfg_ppdu_status)(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable); 964 + void (*cfg_phy_rpt)(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable); 985 965 986 966 int (*dle_mix_cfg)(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg); 987 967 int (*chk_dle_rdy)(struct rtw89_dev *rtwdev, bool wde_or_ple); ··· 1238 1216 int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en); 1239 1217 int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en); 1240 1218 int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en); 1219 + void rtw89_mac_cfg_phy_rpt_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable); 1220 + 1221 + static inline 1222 + void rtw89_mac_cfg_phy_rpt(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable) 1223 + { 1224 + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1225 + 1226 + if (mac->cfg_phy_rpt) 1227 + mac->cfg_phy_rpt(rtwdev, mac_idx, enable); 1228 + } 1229 + 1230 + static inline 1231 + void rtw89_mac_cfg_phy_rpt_bands(struct rtw89_dev *rtwdev, bool enable) 1232 + { 1233 + rtw89_mac_cfg_phy_rpt(rtwdev, RTW89_MAC_0, enable); 1234 + 1235 + if (!rtwdev->dbcc_en) 1236 + return; 1237 + 1238 + rtw89_mac_cfg_phy_rpt(rtwdev, RTW89_MAC_1, enable); 1239 + } 1241 1240 1242 1241 static inline 1243 1242 int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+281 -8
drivers/net/wireless/realtek/rtw89/mac80211.c
··· 202 202 203 203 rtw89_traffic_stats_init(rtwdev, &rtwvif->stats); 204 204 205 - rtwvif_link = rtw89_vif_set_link(rtwvif, 0); 205 + rtwvif_link = rtw89_vif_set_link(rtwvif, RTW89_VIF_IDLE_LINK_ID); 206 206 if (!rtwvif_link) { 207 207 ret = -EINVAL; 208 208 goto release_port; ··· 218 218 return 0; 219 219 220 220 unset_link: 221 - rtw89_vif_unset_link(rtwvif, 0); 221 + rtw89_vif_unset_link(rtwvif, RTW89_VIF_IDLE_LINK_ID); 222 222 release_port: 223 223 list_del_init(&rtwvif->list); 224 224 rtw89_core_release_bit_map(rtwdev->hw_port, port); ··· 246 246 247 247 mutex_lock(&rtwdev->mutex); 248 248 249 - rtwvif_link = rtwvif->links[0]; 249 + rtwvif_link = rtwvif->links[RTW89_VIF_IDLE_LINK_ID]; 250 250 if (unlikely(!rtwvif_link)) { 251 251 rtw89_err(rtwdev, 252 252 "%s: rtwvif link (link_id %u) is not active\n", 253 - __func__, 0); 253 + __func__, RTW89_VIF_IDLE_LINK_ID); 254 254 goto bottom; 255 255 } 256 256 257 257 __rtw89_ops_remove_iface_link(rtwdev, rtwvif_link); 258 258 259 - rtw89_vif_unset_link(rtwvif, 0); 259 + rtw89_vif_unset_link(rtwvif, RTW89_VIF_IDLE_LINK_ID); 260 260 261 261 bottom: 262 262 list_del_init(&rtwvif->list); ··· 509 509 rtw89_core_txq_init(rtwdev, sta->txq[i]); 510 510 511 511 skb_queue_head_init(&rtwsta->roc_queue); 512 + bitmap_zero(rtwsta->pairwise_sec_cam_map, RTW89_MAX_SEC_CAM_NUM); 512 513 513 514 rtwsta_link = rtw89_sta_set_link(rtwsta, sta->deflink.link_id); 514 515 if (!rtwsta_link) { ··· 776 775 struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); 777 776 struct rtw89_vif_link *rtwvif_link; 778 777 const struct rtw89_chan *chan; 778 + int ret = 0; 779 779 780 780 mutex_lock(&rtwdev->mutex); 781 781 ··· 785 783 rtw89_err(rtwdev, 786 784 "%s: rtwvif link (link_id %u) is not active\n", 787 785 __func__, link_conf->link_id); 786 + ret = -ENOLINK; 788 787 goto out; 789 788 } 790 789 ··· 807 804 rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, NULL); 808 805 rtw89_chip_rfk_channel(rtwdev, rtwvif_link); 809 806 807 + if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw)) { 808 + ret = rtw89_fw_h2c_ap_info_refcount(rtwdev, true); 809 + if (ret) 810 + goto out; 811 + } 812 + 810 813 rtw89_queue_chanctx_work(rtwdev); 811 814 812 815 out: 813 816 mutex_unlock(&rtwdev->mutex); 814 817 815 - return 0; 818 + return ret; 816 819 } 817 820 818 821 static ··· 838 829 __func__, link_conf->link_id); 839 830 goto out; 840 831 } 832 + 833 + if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw)) 834 + rtw89_fw_h2c_ap_info_refcount(rtwdev, false); 841 835 842 836 rtw89_mac_stop_ap(rtwdev, rtwvif_link); 843 837 rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, NULL); ··· 1307 1295 struct ieee80211_link_sta *link_sta, 1308 1296 u32 changed) 1309 1297 { 1310 - struct ieee80211_sta *sta = link_sta->sta; 1298 + struct rtw89_sta *rtwsta = sta_to_rtwsta(link_sta->sta); 1311 1299 struct rtw89_dev *rtwdev = hw->priv; 1300 + struct rtw89_sta_link *rtwsta_link; 1312 1301 1313 - rtw89_phy_ra_update_sta(rtwdev, sta, changed); 1302 + rtwsta_link = rtwsta->links[link_sta->link_id]; 1303 + if (unlikely(!rtwsta_link)) 1304 + return; 1305 + 1306 + rtw89_phy_ra_update_sta_link(rtwdev, rtwsta_link, changed); 1314 1307 } 1315 1308 1316 1309 static int rtw89_ops_add_chanctx(struct ieee80211_hw *hw, ··· 1490 1473 return 0; 1491 1474 } 1492 1475 1476 + static bool rtw89_can_work_on_links(struct rtw89_dev *rtwdev, 1477 + struct ieee80211_vif *vif, u16 links) 1478 + { 1479 + struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); 1480 + u8 w = hweight16(links); 1481 + 1482 + if (vif->type != NL80211_IFTYPE_STATION && 1483 + w > RTW89_MLD_NON_STA_LINK_NUM) 1484 + return false; 1485 + 1486 + return w <= rtwvif->links_inst_valid_num; 1487 + } 1488 + 1489 + static bool rtw89_ops_can_activate_links(struct ieee80211_hw *hw, 1490 + struct ieee80211_vif *vif, 1491 + u16 active_links) 1492 + { 1493 + struct rtw89_dev *rtwdev = hw->priv; 1494 + 1495 + guard(mutex)(&rtwdev->mutex); 1496 + 1497 + return rtw89_can_work_on_links(rtwdev, vif, active_links); 1498 + } 1499 + 1500 + static void __rtw89_ops_clr_vif_links(struct rtw89_dev *rtwdev, 1501 + struct rtw89_vif *rtwvif, 1502 + unsigned long clr_links) 1503 + { 1504 + struct rtw89_vif_link *rtwvif_link; 1505 + unsigned int link_id; 1506 + 1507 + for_each_set_bit(link_id, &clr_links, IEEE80211_MLD_MAX_NUM_LINKS) { 1508 + rtwvif_link = rtwvif->links[link_id]; 1509 + if (unlikely(!rtwvif_link)) 1510 + continue; 1511 + 1512 + __rtw89_ops_remove_iface_link(rtwdev, rtwvif_link); 1513 + 1514 + rtw89_vif_unset_link(rtwvif, link_id); 1515 + } 1516 + } 1517 + 1518 + static int __rtw89_ops_set_vif_links(struct rtw89_dev *rtwdev, 1519 + struct rtw89_vif *rtwvif, 1520 + unsigned long set_links) 1521 + { 1522 + struct rtw89_vif_link *rtwvif_link; 1523 + unsigned int link_id; 1524 + int ret; 1525 + 1526 + for_each_set_bit(link_id, &set_links, IEEE80211_MLD_MAX_NUM_LINKS) { 1527 + rtwvif_link = rtw89_vif_set_link(rtwvif, link_id); 1528 + if (!rtwvif_link) 1529 + return -EINVAL; 1530 + 1531 + ret = __rtw89_ops_add_iface_link(rtwdev, rtwvif_link); 1532 + if (ret) { 1533 + rtw89_err(rtwdev, "%s: failed to add iface (link id %u)\n", 1534 + __func__, link_id); 1535 + return ret; 1536 + } 1537 + } 1538 + 1539 + return 0; 1540 + } 1541 + 1542 + static 1543 + int rtw89_ops_change_vif_links(struct ieee80211_hw *hw, 1544 + struct ieee80211_vif *vif, 1545 + u16 old_links, u16 new_links, 1546 + struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]) 1547 + { 1548 + struct rtw89_dev *rtwdev = hw->priv; 1549 + struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); 1550 + unsigned long clr_links = old_links & ~new_links; 1551 + unsigned long set_links = new_links & ~old_links; 1552 + bool removing_links = !old_links || clr_links; 1553 + struct rtw89_link_conf_container *snap; 1554 + int ret = 0; 1555 + int i; 1556 + 1557 + guard(mutex)(&rtwdev->mutex); 1558 + 1559 + rtw89_debug(rtwdev, RTW89_DBG_STATE, 1560 + "%s: old_links (0x%08x) -> new_links (0x%08x)\n", 1561 + __func__, old_links, new_links); 1562 + 1563 + if (!rtw89_can_work_on_links(rtwdev, vif, new_links)) 1564 + return -EOPNOTSUPP; 1565 + 1566 + if (removing_links) { 1567 + snap = kzalloc(sizeof(*snap), GFP_KERNEL); 1568 + if (!snap) 1569 + return -ENOMEM; 1570 + 1571 + for (i = 0; i < ARRAY_SIZE(snap->link_conf); i++) 1572 + snap->link_conf[i] = old[i]; 1573 + 1574 + rcu_assign_pointer(rtwvif->snap_link_confs, snap); 1575 + } 1576 + 1577 + /* might depend on @snap; don't change order */ 1578 + rtw89_leave_ips_by_hwflags(rtwdev); 1579 + 1580 + if (rtwdev->scanning) 1581 + rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif); 1582 + 1583 + if (!old_links) 1584 + __rtw89_ops_clr_vif_links(rtwdev, rtwvif, 1585 + BIT(RTW89_VIF_IDLE_LINK_ID)); 1586 + else if (clr_links) 1587 + __rtw89_ops_clr_vif_links(rtwdev, rtwvif, clr_links); 1588 + 1589 + if (removing_links) { 1590 + /* @snap is required if and only if during removing links. 1591 + * However, it's done here. So, cleanup @snap immediately. 1592 + */ 1593 + rcu_assign_pointer(rtwvif->snap_link_confs, NULL); 1594 + 1595 + /* The pointers in @old will free after this function return, 1596 + * so synchronously wait for all readers of snap to be done. 1597 + */ 1598 + synchronize_rcu(); 1599 + kfree(snap); 1600 + } 1601 + 1602 + if (set_links) { 1603 + ret = __rtw89_ops_set_vif_links(rtwdev, rtwvif, set_links); 1604 + if (ret) 1605 + __rtw89_ops_clr_vif_links(rtwdev, rtwvif, set_links); 1606 + } else if (!new_links) { 1607 + ret = __rtw89_ops_set_vif_links(rtwdev, rtwvif, 1608 + BIT(RTW89_VIF_IDLE_LINK_ID)); 1609 + if (ret) 1610 + __rtw89_ops_clr_vif_links(rtwdev, rtwvif, 1611 + BIT(RTW89_VIF_IDLE_LINK_ID)); 1612 + } 1613 + 1614 + rtw89_enter_ips_by_hwflags(rtwdev); 1615 + return ret; 1616 + } 1617 + 1618 + static void __rtw89_ops_clr_sta_links(struct rtw89_dev *rtwdev, 1619 + struct rtw89_sta *rtwsta, 1620 + unsigned long clr_links) 1621 + { 1622 + struct rtw89_vif_link *rtwvif_link; 1623 + struct rtw89_sta_link *rtwsta_link; 1624 + unsigned int link_id; 1625 + 1626 + for_each_set_bit(link_id, &clr_links, IEEE80211_MLD_MAX_NUM_LINKS) { 1627 + rtwsta_link = rtwsta->links[link_id]; 1628 + if (unlikely(!rtwsta_link)) 1629 + continue; 1630 + 1631 + rtwvif_link = rtwsta_link->rtwvif_link; 1632 + 1633 + rtw89_core_sta_link_disassoc(rtwdev, rtwvif_link, rtwsta_link); 1634 + rtw89_core_sta_link_disconnect(rtwdev, rtwvif_link, rtwsta_link); 1635 + rtw89_core_sta_link_remove(rtwdev, rtwvif_link, rtwsta_link); 1636 + 1637 + rtw89_sta_unset_link(rtwsta, link_id); 1638 + } 1639 + } 1640 + 1641 + static int __rtw89_ops_set_sta_links(struct rtw89_dev *rtwdev, 1642 + struct rtw89_sta *rtwsta, 1643 + unsigned long set_links) 1644 + { 1645 + struct rtw89_vif_link *rtwvif_link; 1646 + struct rtw89_sta_link *rtwsta_link; 1647 + unsigned int link_id; 1648 + u8 sec_cam_idx; 1649 + int ret; 1650 + 1651 + for_each_set_bit(link_id, &set_links, IEEE80211_MLD_MAX_NUM_LINKS) { 1652 + rtwsta_link = rtw89_sta_set_link(rtwsta, link_id); 1653 + if (!rtwsta_link) 1654 + return -EINVAL; 1655 + 1656 + rtwvif_link = rtwsta_link->rtwvif_link; 1657 + 1658 + ret = rtw89_core_sta_link_add(rtwdev, rtwvif_link, rtwsta_link); 1659 + if (ret) { 1660 + rtw89_err(rtwdev, "%s: failed to add sta (link id %u)\n", 1661 + __func__, link_id); 1662 + return ret; 1663 + } 1664 + 1665 + rtw89_vif_type_mapping(rtwvif_link, true); 1666 + 1667 + ret = rtw89_core_sta_link_assoc(rtwdev, rtwvif_link, rtwsta_link); 1668 + if (ret) { 1669 + rtw89_err(rtwdev, "%s: failed to assoc sta (link id %u)\n", 1670 + __func__, link_id); 1671 + return ret; 1672 + } 1673 + 1674 + __rtw89_ops_bss_link_assoc(rtwdev, rtwvif_link); 1675 + 1676 + for_each_set_bit(sec_cam_idx, rtwsta->pairwise_sec_cam_map, 1677 + RTW89_MAX_SEC_CAM_NUM) { 1678 + ret = rtw89_cam_attach_link_sec_cam(rtwdev, 1679 + rtwvif_link, 1680 + rtwsta_link, 1681 + sec_cam_idx); 1682 + if (ret) { 1683 + rtw89_err(rtwdev, 1684 + "%s: failed to apply pairwise key (link id %u)\n", 1685 + __func__, link_id); 1686 + return ret; 1687 + } 1688 + } 1689 + } 1690 + 1691 + return 0; 1692 + } 1693 + 1694 + static 1695 + int rtw89_ops_change_sta_links(struct ieee80211_hw *hw, 1696 + struct ieee80211_vif *vif, 1697 + struct ieee80211_sta *sta, 1698 + u16 old_links, u16 new_links) 1699 + { 1700 + struct rtw89_dev *rtwdev = hw->priv; 1701 + struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 1702 + unsigned long clr_links = old_links & ~new_links; 1703 + unsigned long set_links = new_links & ~old_links; 1704 + int ret = 0; 1705 + 1706 + guard(mutex)(&rtwdev->mutex); 1707 + 1708 + rtw89_debug(rtwdev, RTW89_DBG_STATE, 1709 + "%s: old_links (0x%08x) -> new_links (0x%08x)\n", 1710 + __func__, old_links, new_links); 1711 + 1712 + if (!rtw89_can_work_on_links(rtwdev, vif, new_links)) 1713 + return -EOPNOTSUPP; 1714 + 1715 + rtw89_leave_ps_mode(rtwdev); 1716 + 1717 + if (clr_links) 1718 + __rtw89_ops_clr_sta_links(rtwdev, rtwsta, clr_links); 1719 + 1720 + if (set_links) { 1721 + ret = __rtw89_ops_set_sta_links(rtwdev, rtwsta, set_links); 1722 + if (ret) 1723 + __rtw89_ops_clr_sta_links(rtwdev, rtwsta, set_links); 1724 + } 1725 + 1726 + return ret; 1727 + } 1728 + 1493 1729 #ifdef CONFIG_PM 1494 1730 static int rtw89_ops_suspend(struct ieee80211_hw *hw, 1495 1731 struct cfg80211_wowlan *wowlan) ··· 1870 1600 .set_sar_specs = rtw89_ops_set_sar_specs, 1871 1601 .link_sta_rc_update = rtw89_ops_sta_rc_update, 1872 1602 .set_tid_config = rtw89_ops_set_tid_config, 1603 + .can_activate_links = rtw89_ops_can_activate_links, 1604 + .change_vif_links = rtw89_ops_change_vif_links, 1605 + .change_sta_links = rtw89_ops_change_sta_links, 1873 1606 #ifdef CONFIG_PM 1874 1607 .suspend = rtw89_ops_suspend, 1875 1608 .resume = rtw89_ops_resume,
+15
drivers/net/wireless/realtek/rtw89/mac_be.c
··· 1988 1988 } 1989 1989 EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v2); 1990 1990 1991 + void rtw89_mac_cfg_phy_rpt_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable) 1992 + { 1993 + u32 reg, val; 1994 + 1995 + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RCR, mac_idx); 1996 + val = enable ? MAC_AX_PHY_RPT_SIZE_8 : MAC_AX_PHY_RPT_SIZE_0; 1997 + rtw89_write32_mask(rtwdev, reg, B_BE_PHY_RPT_SZ_MASK, val); 1998 + rtw89_write32_mask(rtwdev, reg, B_BE_HDR_CNV_SZ_MASK, MAC_AX_HDR_CNV_SIZE_0); 1999 + 2000 + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_DRV_INFO_OPTION, mac_idx); 2001 + rtw89_write32_mask(rtwdev, reg, B_BE_DRV_INFO_PHYRPT_EN, enable); 2002 + } 2003 + EXPORT_SYMBOL(rtw89_mac_cfg_phy_rpt_be); 2004 + 1991 2005 static 1992 2006 int rtw89_mac_cfg_ppdu_status_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable) 1993 2007 { ··· 2597 2583 2598 2584 .typ_fltr_opt = rtw89_mac_typ_fltr_opt_be, 2599 2585 .cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_be, 2586 + .cfg_phy_rpt = rtw89_mac_cfg_phy_rpt_be, 2600 2587 2601 2588 .dle_mix_cfg = dle_mix_cfg_be, 2602 2589 .chk_dle_rdy = chk_dle_rdy_be,
+13 -3
drivers/net/wireless/realtek/rtw89/pci.c
··· 2516 2516 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); 2517 2517 } 2518 2518 2519 - static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up) 2519 + static void rtw89_pci_power_wake_ax(struct rtw89_dev *rtwdev, bool pwr_up) 2520 2520 { 2521 2521 if (pwr_up) 2522 2522 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); ··· 2825 2825 { 2826 2826 const struct rtw89_pci_info *info = rtwdev->pci_info; 2827 2827 2828 + rtw89_pci_power_wake(rtwdev, false); 2829 + 2828 2830 if (rtwdev->chip->chip_id == RTL8852A) { 2829 2831 /* ltr sw trigger */ 2830 2832 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); ··· 2869 2867 return ret; 2870 2868 } 2871 2869 2872 - rtw89_pci_power_wake(rtwdev, true); 2870 + rtw89_pci_power_wake_ax(rtwdev, true); 2873 2871 rtw89_pci_autoload_hang(rtwdev); 2874 2872 rtw89_pci_l12_vmain(rtwdev); 2875 2873 rtw89_pci_gen2_force_ib(rtwdev); ··· 2910 2908 2911 2909 /* start DMA activities */ 2912 2910 rtw89_pci_ctrl_dma_all(rtwdev, true); 2911 + 2912 + return 0; 2913 + } 2914 + 2915 + static int rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev *rtwdev) 2916 + { 2917 + rtw89_pci_power_wake_ax(rtwdev, false); 2913 2918 2914 2919 return 0; 2915 2920 } ··· 4334 4325 B_AX_RDU_INT}, 4335 4326 4336 4327 .mac_pre_init = rtw89_pci_ops_mac_pre_init_ax, 4337 - .mac_pre_deinit = NULL, 4328 + .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax, 4338 4329 .mac_post_init = rtw89_pci_ops_mac_post_init_ax, 4339 4330 4340 4331 .clr_idx_all = rtw89_pci_clr_idx_all_ax, ··· 4352 4343 .l1ss_set = rtw89_pci_l1ss_set_ax, 4353 4344 4354 4345 .disable_eq = rtw89_pci_disable_eq_ax, 4346 + .power_wake = rtw89_pci_power_wake_ax, 4355 4347 }; 4356 4348 EXPORT_SYMBOL(rtw89_pci_gen_ax); 4357 4349
+9
drivers/net/wireless/realtek/rtw89/pci.h
··· 1290 1290 void (*l1ss_set)(struct rtw89_dev *rtwdev, bool enable); 1291 1291 1292 1292 void (*disable_eq)(struct rtw89_dev *rtwdev); 1293 + void (*power_wake)(struct rtw89_dev *rtwdev, bool pwr_up); 1293 1294 }; 1294 1295 1295 1296 #define RTW89_PCI_SSID(v, d, ssv, ssd, cust) \ ··· 1804 1803 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 1805 1804 1806 1805 gen_def->disable_eq(rtwdev); 1806 + } 1807 + 1808 + static inline void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up) 1809 + { 1810 + const struct rtw89_pci_info *info = rtwdev->pci_info; 1811 + const struct rtw89_pci_gen_def *gen_def = info->gen_def; 1812 + 1813 + gen_def->power_wake(rtwdev, pwr_up); 1807 1814 } 1808 1815 1809 1816 #endif
+1
drivers/net/wireless/realtek/rtw89/pci_be.c
··· 691 691 .l1ss_set = rtw89_pci_l1ss_set_be, 692 692 693 693 .disable_eq = rtw89_pci_disable_eq_be, 694 + .power_wake = _patch_pcie_power_wake_be, 694 695 }; 695 696 EXPORT_SYMBOL(rtw89_pci_gen_be);
+272 -43
drivers/net/wireless/realtek/rtw89/phy.c
··· 2 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 3 */ 4 4 5 + #include "acpi.h" 5 6 #include "chan.h" 6 7 #include "coex.h" 7 8 #include "debug.h" ··· 264 263 265 264 static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev, 266 265 struct rtw89_sta_link *rtwsta_link, 266 + struct ieee80211_link_sta *link_sta, 267 267 const struct rtw89_chan *chan, 268 268 bool *fix_giltf_en, u8 *fix_giltf) 269 269 { 270 270 struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask; 271 271 u8 band = chan->band_type; 272 272 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 273 - u8 he_gi = mask->control[nl_band].he_gi; 274 273 u8 he_ltf = mask->control[nl_band].he_ltf; 274 + u8 he_gi = mask->control[nl_band].he_gi; 275 275 276 - if (!rtwsta_link->use_cfg_mask) 276 + *fix_giltf_en = true; 277 + 278 + if (rtwdev->chip->chip_id == RTL8852C && 279 + chan->band_width == RTW89_CHANNEL_WIDTH_160 && 280 + rtw89_sta_link_has_su_mu_4xhe08(link_sta)) 281 + *fix_giltf = RTW89_GILTF_SGI_4XHE08; 282 + else 283 + *fix_giltf = RTW89_GILTF_2XHE08; 284 + 285 + if (!(rtwsta_link->use_cfg_mask && link_sta->he_cap.has_he)) 277 286 return; 278 287 279 288 if (he_ltf == 2 && he_gi == 2) { ··· 298 287 *fix_giltf = RTW89_GILTF_1XHE16; 299 288 } else if (he_ltf == 0 && he_gi == 0) { 300 289 *fix_giltf = RTW89_GILTF_1XHE08; 301 - } else { 302 - *fix_giltf_en = false; 303 - return; 304 290 } 305 - 306 - *fix_giltf_en = true; 307 291 } 308 292 309 293 static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, ··· 331 325 mode |= RTW89_RA_MODE_EHT; 332 326 ra_mask |= get_eht_ra_mask(link_sta); 333 327 high_rate_masks = rtw89_ra_mask_eht_rates; 328 + rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta, 329 + chan, &fix_giltf_en, &fix_giltf); 334 330 } else if (link_sta->he_cap.has_he) { 335 331 mode |= RTW89_RA_MODE_HE; 336 332 csi_mode = RTW89_RA_RPT_MODE_HE; ··· 344 336 if (link_sta->he_cap.he_cap_elem.phy_cap_info[1] & 345 337 IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD) 346 338 ldpc_en = 1; 347 - rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, chan, &fix_giltf_en, &fix_giltf); 339 + rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta, 340 + chan, &fix_giltf_en, &fix_giltf); 348 341 } else if (link_sta->vht_cap.vht_supported) { 349 342 u16 mcs_map = le16_to_cpu(link_sta->vht_cap.vht_mcs.rx_mcs_map); 350 343 ··· 475 466 ra->csi_mode = csi_mode; 476 467 } 477 468 478 - static void __rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, 479 - struct rtw89_vif_link *rtwvif_link, 480 - struct rtw89_sta_link *rtwsta_link, 481 - u32 changed) 469 + void rtw89_phy_ra_update_sta_link(struct rtw89_dev *rtwdev, 470 + struct rtw89_sta_link *rtwsta_link, 471 + u32 changed) 482 472 { 473 + struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link; 483 474 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 484 475 struct rtw89_ra_info *ra = &rtwsta_link->ra; 485 476 struct ieee80211_link_sta *link_sta; ··· 512 503 u32 changed) 513 504 { 514 505 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 515 - struct rtw89_vif_link *rtwvif_link; 516 506 struct rtw89_sta_link *rtwsta_link; 517 507 unsigned int link_id; 518 508 519 - rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) { 520 - rtwvif_link = rtwsta_link->rtwvif_link; 521 - __rtw89_phy_ra_update_sta(rtwdev, rtwvif_link, rtwsta_link, changed); 522 - } 509 + rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) 510 + rtw89_phy_ra_update_sta_link(rtwdev, rtwsta_link, changed); 523 511 } 524 512 525 513 static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next, ··· 1860 1854 } 1861 1855 EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl); 1862 1856 1857 + static u8 rtw89_phy_ant_gain_domain_to_regd(struct rtw89_dev *rtwdev, u8 ant_gain_regd) 1858 + { 1859 + switch (ant_gain_regd) { 1860 + case RTW89_ANT_GAIN_ETSI: 1861 + return RTW89_ETSI; 1862 + default: 1863 + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 1864 + "unknown antenna gain domain: %d\n", 1865 + ant_gain_regd); 1866 + return RTW89_REGD_NUM; 1867 + } 1868 + } 1869 + 1870 + /* antenna gain in unit of 0.25 dbm */ 1871 + #define RTW89_ANT_GAIN_2GHZ_MIN -8 1872 + #define RTW89_ANT_GAIN_2GHZ_MAX 14 1873 + #define RTW89_ANT_GAIN_5GHZ_MIN -8 1874 + #define RTW89_ANT_GAIN_5GHZ_MAX 20 1875 + #define RTW89_ANT_GAIN_6GHZ_MIN -8 1876 + #define RTW89_ANT_GAIN_6GHZ_MAX 20 1877 + 1878 + #define RTW89_ANT_GAIN_REF_2GHZ 14 1879 + #define RTW89_ANT_GAIN_REF_5GHZ 20 1880 + #define RTW89_ANT_GAIN_REF_6GHZ 20 1881 + 1882 + void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev) 1883 + { 1884 + struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; 1885 + const struct rtw89_chip_info *chip = rtwdev->chip; 1886 + struct rtw89_acpi_rtag_result res = {}; 1887 + u32 domain; 1888 + int ret; 1889 + u8 i, j; 1890 + u8 regd; 1891 + u8 val; 1892 + 1893 + if (!chip->support_ant_gain) 1894 + return; 1895 + 1896 + ret = rtw89_acpi_evaluate_rtag(rtwdev, &res); 1897 + if (ret) { 1898 + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 1899 + "acpi: cannot eval rtag: %d\n", ret); 1900 + return; 1901 + } 1902 + 1903 + if (res.revision != 0) { 1904 + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 1905 + "unknown rtag revision: %d\n", res.revision); 1906 + return; 1907 + } 1908 + 1909 + domain = get_unaligned_le32(&res.domain); 1910 + 1911 + for (i = 0; i < RTW89_ANT_GAIN_DOMAIN_NUM; i++) { 1912 + if (!(domain & BIT(i))) 1913 + continue; 1914 + 1915 + regd = rtw89_phy_ant_gain_domain_to_regd(rtwdev, i); 1916 + if (regd >= RTW89_REGD_NUM) 1917 + continue; 1918 + ant_gain->regd_enabled |= BIT(regd); 1919 + } 1920 + 1921 + for (i = 0; i < RTW89_ANT_GAIN_CHAIN_NUM; i++) { 1922 + for (j = 0; j < RTW89_ANT_GAIN_SUBBAND_NR; j++) { 1923 + val = res.ant_gain_table[i][j]; 1924 + switch (j) { 1925 + default: 1926 + case RTW89_ANT_GAIN_2GHZ_SUBBAND: 1927 + val = RTW89_ANT_GAIN_REF_2GHZ - 1928 + clamp_t(s8, val, 1929 + RTW89_ANT_GAIN_2GHZ_MIN, 1930 + RTW89_ANT_GAIN_2GHZ_MAX); 1931 + break; 1932 + case RTW89_ANT_GAIN_5GHZ_SUBBAND_1: 1933 + case RTW89_ANT_GAIN_5GHZ_SUBBAND_2: 1934 + case RTW89_ANT_GAIN_5GHZ_SUBBAND_2E: 1935 + case RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4: 1936 + val = RTW89_ANT_GAIN_REF_5GHZ - 1937 + clamp_t(s8, val, 1938 + RTW89_ANT_GAIN_5GHZ_MIN, 1939 + RTW89_ANT_GAIN_5GHZ_MAX); 1940 + break; 1941 + case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L: 1942 + case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H: 1943 + case RTW89_ANT_GAIN_6GHZ_SUBBAND_6: 1944 + case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L: 1945 + case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H: 1946 + case RTW89_ANT_GAIN_6GHZ_SUBBAND_8: 1947 + val = RTW89_ANT_GAIN_REF_6GHZ - 1948 + clamp_t(s8, val, 1949 + RTW89_ANT_GAIN_6GHZ_MIN, 1950 + RTW89_ANT_GAIN_6GHZ_MAX); 1951 + } 1952 + ant_gain->offset[i][j] = val; 1953 + } 1954 + } 1955 + } 1956 + 1957 + static 1958 + enum rtw89_ant_gain_subband rtw89_phy_ant_gain_get_subband(struct rtw89_dev *rtwdev, 1959 + u32 center_freq) 1960 + { 1961 + switch (center_freq) { 1962 + default: 1963 + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 1964 + "center freq: %u to antenna gain subband is unhandled\n", 1965 + center_freq); 1966 + fallthrough; 1967 + case 2412 ... 2484: 1968 + return RTW89_ANT_GAIN_2GHZ_SUBBAND; 1969 + case 5180 ... 5240: 1970 + return RTW89_ANT_GAIN_5GHZ_SUBBAND_1; 1971 + case 5250 ... 5320: 1972 + return RTW89_ANT_GAIN_5GHZ_SUBBAND_2; 1973 + case 5500 ... 5720: 1974 + return RTW89_ANT_GAIN_5GHZ_SUBBAND_2E; 1975 + case 5745 ... 5885: 1976 + return RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4; 1977 + case 5955 ... 6155: 1978 + return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L; 1979 + case 6175 ... 6415: 1980 + return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H; 1981 + case 6435 ... 6515: 1982 + return RTW89_ANT_GAIN_6GHZ_SUBBAND_6; 1983 + case 6535 ... 6695: 1984 + return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L; 1985 + case 6715 ... 6855: 1986 + return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H; 1987 + 1988 + /* freq 6875 (ch 185, 20MHz) spans RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H 1989 + * and RTW89_ANT_GAIN_6GHZ_SUBBAND_8, so directly describe it with 1990 + * struct rtw89_6ghz_span. 1991 + */ 1992 + 1993 + case 6895 ... 7115: 1994 + return RTW89_ANT_GAIN_6GHZ_SUBBAND_8; 1995 + } 1996 + } 1997 + 1998 + static s8 rtw89_phy_ant_gain_query(struct rtw89_dev *rtwdev, 1999 + enum rtw89_rf_path path, u32 center_freq) 2000 + { 2001 + struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; 2002 + enum rtw89_ant_gain_subband subband_l, subband_h; 2003 + const struct rtw89_6ghz_span *span; 2004 + 2005 + span = rtw89_get_6ghz_span(rtwdev, center_freq); 2006 + 2007 + if (span && RTW89_ANT_GAIN_SPAN_VALID(span)) { 2008 + subband_l = span->ant_gain_subband_low; 2009 + subband_h = span->ant_gain_subband_high; 2010 + } else { 2011 + subband_l = rtw89_phy_ant_gain_get_subband(rtwdev, center_freq); 2012 + subband_h = subband_l; 2013 + } 2014 + 2015 + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 2016 + "center_freq %u: antenna gain subband {%u, %u}\n", 2017 + center_freq, subband_l, subband_h); 2018 + 2019 + return min(ant_gain->offset[path][subband_l], 2020 + ant_gain->offset[path][subband_h]); 2021 + } 2022 + 2023 + static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u8 band, u32 center_freq) 2024 + { 2025 + struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; 2026 + const struct rtw89_chip_info *chip = rtwdev->chip; 2027 + u8 regd = rtw89_regd_get(rtwdev, band); 2028 + s8 offset_patha, offset_pathb; 2029 + 2030 + if (!chip->support_ant_gain) 2031 + return 0; 2032 + 2033 + if (!(ant_gain->regd_enabled & BIT(regd))) 2034 + return 0; 2035 + 2036 + offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq); 2037 + offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq); 2038 + 2039 + return max(offset_patha, offset_pathb); 2040 + } 2041 + 2042 + s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev, 2043 + const struct rtw89_chan *chan) 2044 + { 2045 + struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; 2046 + u8 regd = rtw89_regd_get(rtwdev, chan->band_type); 2047 + s8 offset_patha, offset_pathb; 2048 + 2049 + if (!(ant_gain->regd_enabled & BIT(regd))) 2050 + return 0; 2051 + 2052 + offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq); 2053 + offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq); 2054 + 2055 + return rtw89_phy_txpwr_rf_to_bb(rtwdev, offset_patha - offset_pathb); 2056 + } 2057 + EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset); 2058 + 2059 + void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev, 2060 + const struct rtw89_chan *chan) 2061 + { 2062 + struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; 2063 + const struct rtw89_chip_info *chip = rtwdev->chip; 2064 + u8 regd = rtw89_regd_get(rtwdev, chan->band_type); 2065 + s8 offset_patha, offset_pathb; 2066 + 2067 + if (!chip->support_ant_gain || !(ant_gain->regd_enabled & BIT(regd))) { 2068 + seq_puts(m, "no DAG is applied\n"); 2069 + return; 2070 + } 2071 + 2072 + offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq); 2073 + offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq); 2074 + 2075 + seq_printf(m, "ChainA offset: %d dBm\n", offset_patha); 2076 + seq_printf(m, "ChainB offset: %d dBm\n", offset_pathb); 2077 + } 2078 + 1863 2079 static const u8 rtw89_rs_idx_num_ax[] = { 1864 2080 [RTW89_RS_CCK] = RTW89_RATE_CCK_NUM, 1865 2081 [RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM, ··· 2144 1916 } 2145 1917 } 2146 1918 EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate); 2147 - 2148 - static s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf) 2149 - { 2150 - const struct rtw89_chip_info *chip = rtwdev->chip; 2151 - 2152 - return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac); 2153 - } 2154 - 2155 - static s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm) 2156 - { 2157 - const struct rtw89_chip_info *chip = rtwdev->chip; 2158 - 2159 - return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63); 2160 - } 2161 1919 2162 1920 static s8 rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm) 2163 1921 { ··· 2241 2027 u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch); 2242 2028 u8 regd = rtw89_regd_get(rtwdev, band); 2243 2029 u8 reg6 = regulatory->reg_6ghz_power; 2244 - s8 lmt = 0, sar; 2030 + s8 lmt = 0, sar, offset; 2245 2031 s8 cstr; 2246 2032 2247 2033 switch (band) { ··· 2273 2059 return 0; 2274 2060 } 2275 2061 2276 - lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt); 2062 + offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq); 2063 + lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt + offset); 2277 2064 sar = rtw89_query_sar(rtwdev, freq); 2278 2065 cstr = rtw89_phy_get_tpe_constraint(rtwdev, band); 2279 2066 ··· 2501 2286 u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch); 2502 2287 u8 regd = rtw89_regd_get(rtwdev, band); 2503 2288 u8 reg6 = regulatory->reg_6ghz_power; 2504 - s8 lmt_ru = 0, sar; 2289 + s8 lmt_ru = 0, sar, offset; 2505 2290 s8 cstr; 2506 2291 2507 2292 switch (band) { ··· 2533 2318 return 0; 2534 2319 } 2535 2320 2536 - lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru); 2321 + offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq); 2322 + lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru + offset); 2537 2323 sar = rtw89_query_sar(rtwdev, freq); 2538 2324 cstr = rtw89_phy_get_tpe_constraint(rtwdev, band); 2539 2325 ··· 3444 3228 (int)(len - sizeof(report->hdr)), &report->state); 3445 3229 } 3446 3230 3231 + static void 3232 + rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3233 + { 3234 + } 3235 + 3447 3236 static 3448 3237 void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev, 3449 3238 struct sk_buff *c2h, u32 len) = { 3450 3239 [RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state, 3240 + [RTW89_PHY_C2H_RFK_LOG_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr, 3451 3241 }; 3452 3242 3453 3243 bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func) ··· 3507 3285 return; 3508 3286 fallthrough; 3509 3287 default: 3510 - rtw89_info(rtwdev, "c2h class %d not support\n", class); 3288 + rtw89_info(rtwdev, "PHY c2h class %d not support\n", class); 3511 3289 return; 3512 3290 } 3513 3291 if (!handler) { 3514 - rtw89_info(rtwdev, "c2h class %d func %d not support\n", class, 3292 + rtw89_info(rtwdev, "PHY c2h class %d func %d not support\n", class, 3515 3293 func); 3516 3294 return; 3517 3295 } ··· 4280 4058 4281 4059 if (!force && cfo->crystal_cap == crystal_cap) 4282 4060 return; 4283 - crystal_cap = clamp_t(u8, crystal_cap, 0, 127); 4284 4061 if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) { 4285 4062 rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap); 4286 4063 rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap); ··· 4402 4181 s32 curr_cfo) 4403 4182 { 4404 4183 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4405 - s8 crystal_cap = cfo->crystal_cap; 4184 + int crystal_cap = cfo->crystal_cap; 4406 4185 s32 cfo_abs = abs(curr_cfo); 4407 4186 int sign; 4408 4187 ··· 4423 4202 } 4424 4203 sign = curr_cfo > 0 ? 1 : -1; 4425 4204 if (cfo_abs > CFO_TRK_STOP_TH_4) 4426 - crystal_cap += 7 * sign; 4427 - else if (cfo_abs > CFO_TRK_STOP_TH_3) 4428 - crystal_cap += 5 * sign; 4429 - else if (cfo_abs > CFO_TRK_STOP_TH_2) 4430 4205 crystal_cap += 3 * sign; 4206 + else if (cfo_abs > CFO_TRK_STOP_TH_3) 4207 + crystal_cap += 3 * sign; 4208 + else if (cfo_abs > CFO_TRK_STOP_TH_2) 4209 + crystal_cap += 1 * sign; 4431 4210 else if (cfo_abs > CFO_TRK_STOP_TH_1) 4432 4211 crystal_cap += 1 * sign; 4433 4212 else 4434 4213 return; 4214 + 4215 + crystal_cap = clamp(crystal_cap, 0, 127); 4435 4216 rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false); 4436 4217 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4437 4218 "X_cap{Curr,Default}={0x%x,0x%x}\n", ··· 6531 6308 rtw89_chip_set_txpwr_ctrl(rtwdev); 6532 6309 rtw89_chip_power_trim(rtwdev); 6533 6310 rtw89_chip_cfg_txrx_path(rtwdev); 6311 + } 6312 + 6313 + void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev) 6314 + { 6315 + rtw89_phy_env_monitor_init(rtwdev); 6316 + rtw89_physts_parsing_init(rtwdev); 6534 6317 } 6535 6318 6536 6319 void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev,
+32 -1
drivers/net/wireless/realtek/rtw89/phy.h
··· 57 57 #define CFO_TRK_STOP_TH_4 (30 << 2) 58 58 #define CFO_TRK_STOP_TH_3 (20 << 2) 59 59 #define CFO_TRK_STOP_TH_2 (10 << 2) 60 - #define CFO_TRK_STOP_TH_1 (00 << 2) 60 + #define CFO_TRK_STOP_TH_1 (03 << 2) 61 61 #define CFO_TRK_STOP_TH (2 << 2) 62 62 #define CFO_SW_COMP_FINE_TUNE (2 << 2) 63 63 #define CFO_PERIOD_CNT 15 ··· 151 151 152 152 enum rtw89_phy_c2h_rfk_report_func { 153 153 RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE = 0, 154 + RTW89_PHY_C2H_RFK_LOG_TAS_PWR = 6, 154 155 }; 155 156 156 157 enum rtw89_phy_c2h_dm_func { ··· 814 813 enum rtw89_rf_path rf_path, 815 814 void *extra_data); 816 815 void rtw89_phy_dm_init(struct rtw89_dev *rtwdev); 816 + void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev); 817 817 void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, 818 818 u32 data, enum rtw89_phy_idx phy_idx); 819 819 void rtw89_phy_write32_idx_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits, ··· 828 826 const struct rtw89_rate_desc *desc); 829 827 s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw, 830 828 const struct rtw89_rate_desc *rate_desc); 829 + void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev); 830 + s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev, 831 + const struct rtw89_chan *chan); 832 + void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev, 833 + const struct rtw89_chan *chan); 831 834 void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev, 832 835 const struct rtw89_txpwr_table *tbl); 833 836 s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, ··· 903 896 phy->set_txpwr_limit_ru(rtwdev, chan, phy_idx); 904 897 } 905 898 899 + static inline s8 rtw89_phy_txpwr_rf_to_bb(struct rtw89_dev *rtwdev, s8 txpwr_rf) 900 + { 901 + const struct rtw89_chip_info *chip = rtwdev->chip; 902 + 903 + return txpwr_rf << (chip->txpwr_factor_bb - chip->txpwr_factor_rf); 904 + } 905 + 906 + static inline s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf) 907 + { 908 + const struct rtw89_chip_info *chip = rtwdev->chip; 909 + 910 + return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac); 911 + } 912 + 913 + static inline s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm) 914 + { 915 + const struct rtw89_chip_info *chip = rtwdev->chip; 916 + 917 + return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63); 918 + } 919 + 906 920 void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link); 907 921 void rtw89_phy_ra_update(struct rtw89_dev *rtwdev); 908 922 void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, 909 923 u32 changed); 924 + void rtw89_phy_ra_update_sta_link(struct rtw89_dev *rtwdev, 925 + struct rtw89_sta_link *rtwsta_link, 926 + u32 changed); 910 927 void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, 911 928 struct ieee80211_vif *vif, 912 929 const struct cfg80211_bitrate_mask *mask);
+25 -17
drivers/net/wireless/realtek/rtw89/ps.c
··· 8 8 #include "debug.h" 9 9 #include "fw.h" 10 10 #include "mac.h" 11 + #include "phy.h" 11 12 #include "ps.h" 12 13 #include "reg.h" 13 14 #include "util.h" ··· 63 62 rtw89_mac_power_mode_change(rtwdev, enter); 64 63 } 65 64 66 - void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 65 + void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev) 67 66 { 68 - if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT) 69 - return; 70 - 71 67 if (!rtwdev->ps_mode) 72 68 return; 73 69 ··· 83 85 rtw89_ps_power_mode_change(rtwdev, false); 84 86 } 85 87 86 - static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, 87 - struct rtw89_vif_link *rtwvif_link) 88 + static void __rtw89_enter_lps_link(struct rtw89_dev *rtwdev, 89 + struct rtw89_vif_link *rtwvif_link) 88 90 { 89 91 struct rtw89_lps_parm lps_param = { 90 92 .macid = rtwvif_link->mac_id, ··· 94 96 95 97 rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_FW_CTRL); 96 98 rtw89_fw_h2c_lps_parm(rtwdev, &lps_param); 97 - rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif_link); 98 99 } 99 100 100 101 static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, ··· 118 121 __rtw89_leave_ps_mode(rtwdev); 119 122 } 120 123 121 - void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 124 + void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 122 125 bool ps_mode) 123 126 { 127 + struct rtw89_vif_link *rtwvif_link; 128 + bool can_ps_mode = true; 129 + unsigned int link_id; 130 + 124 131 lockdep_assert_held(&rtwdev->mutex); 125 132 126 133 if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags)) 127 134 return; 128 135 129 - __rtw89_enter_lps(rtwdev, rtwvif_link); 130 - if (ps_mode) 131 - __rtw89_enter_ps_mode(rtwdev, rtwvif_link); 136 + rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 137 + __rtw89_enter_lps_link(rtwdev, rtwvif_link); 138 + 139 + if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT) 140 + can_ps_mode = false; 141 + } 142 + 143 + if (RTW89_CHK_FW_FEATURE(LPS_CH_INFO, &rtwdev->fw)) 144 + rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif); 145 + else 146 + rtw89_fw_h2c_lps_ml_cmn_info(rtwdev, rtwvif); 147 + 148 + if (ps_mode && can_ps_mode) 149 + __rtw89_enter_ps_mode(rtwdev); 132 150 } 133 151 134 152 static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev, ··· 168 156 return; 169 157 170 158 __rtw89_leave_ps_mode(rtwdev); 159 + 160 + rtw89_phy_dm_reinit(rtwdev); 171 161 172 162 rtw89_for_each_rtwvif(rtwdev, rtwvif) 173 163 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) ··· 295 281 struct rtw89_vif *rtwvif; 296 282 enum rtw89_entity_mode mode; 297 283 int count = 0; 298 - 299 - /* FIXME: Fix rtw89_enter_lps() and __rtw89_enter_ps_mode() 300 - * to take MLO cases into account before doing the following. 301 - */ 302 - if (rtwdev->support_mlo) 303 - goto disable_lps; 304 284 305 285 mode = rtw89_get_entity_mode(rtwdev); 306 286 if (mode == RTW89_ENTITY_MODE_MCC)
+2 -2
drivers/net/wireless/realtek/rtw89/ps.h
··· 5 5 #ifndef __RTW89_PS_H_ 6 6 #define __RTW89_PS_H_ 7 7 8 - void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 8 + void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 9 9 bool ps_mode); 10 10 void rtw89_leave_lps(struct rtw89_dev *rtwdev); 11 11 void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev); 12 - void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link); 12 + void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev); 13 13 void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev); 14 14 void rtw89_enter_ips(struct rtw89_dev *rtwdev); 15 15 void rtw89_leave_ips(struct rtw89_dev *rtwdev);
+4
drivers/net/wireless/realtek/rtw89/reg.h
··· 7447 7447 #define B_BE_CSIPRT_HESU_AID_EN BIT(25) 7448 7448 #define B_BE_CSIPRT_VHTSU_AID_EN BIT(24) 7449 7449 7450 + #define R_BE_DRV_INFO_OPTION 0x11470 7451 + #define R_BE_DRV_INFO_OPTION_C1 0x15470 7452 + #define B_BE_DRV_INFO_PHYRPT_EN BIT(0) 7453 + 7450 7454 #define R_BE_RX_ERR_ISR 0x114F4 7451 7455 #define R_BE_RX_ERR_ISR_C1 0x154F4 7452 7456 #define B_BE_RX_ERR_TRIG_ACT_TO BIT(9)
+30 -27
drivers/net/wireless/realtek/rtw89/regd.c
··· 17 17 18 18 static const struct rtw89_regd rtw89_regd_map[] = { 19 19 COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC), 20 - COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_FCC), 20 + COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_NA), 21 21 COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC, RTW89_FCC), 22 22 COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE, RTW89_CHILE), 23 23 COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC, RTW89_FCC), ··· 35 35 COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC, RTW89_NA), 36 36 COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC, RTW89_NA), 37 37 COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC, RTW89_NA), 38 - COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_NA), 38 + COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_FCC), 39 39 COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 40 40 COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 41 41 COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), ··· 72 72 COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 73 73 COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 74 74 COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 75 - COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 75 + COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 76 76 COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 77 77 COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 78 78 COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), ··· 82 82 COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 83 83 COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 84 84 COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 85 - COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 85 + COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 86 86 COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 87 87 COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 88 - COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 88 + COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 89 89 COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 90 - COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 91 - COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 90 + COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 91 + COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 92 92 COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR, RTW89_QATAR), 93 93 COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 94 94 COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI, RTW89_NA), ··· 101 101 COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE, RTW89_UKRAINE), 102 102 COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 103 103 COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 104 - COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 104 + COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 105 105 COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 106 106 COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 107 107 COUNTRY_REGD("CN", RTW89_CN, RTW89_CN, RTW89_CN), ··· 110 110 COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 111 111 COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC, RTW89_KCC), 112 112 COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 113 - COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 114 - COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 113 + COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 114 + COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 115 115 COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 116 116 COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 117 117 COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_ETSI), 118 - COUNTRY_REGD("TH", RTW89_ETSI, RTW89_ETSI, RTW89_THAILAND), 118 + COUNTRY_REGD("TH", RTW89_THAILAND, RTW89_THAILAND, RTW89_THAILAND), 119 119 COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 120 120 COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA), 121 121 COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA), ··· 158 158 COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 159 159 COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA, RTW89_NA), 160 160 COUNTRY_REGD("CC", RTW89_ACMA, RTW89_ACMA, RTW89_NA), 161 - COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 162 - COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 163 - COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 161 + COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 162 + COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 163 + COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 164 164 COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 165 165 COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 166 166 COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), ··· 176 176 COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 177 177 COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 178 178 COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 179 - COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 180 - COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 179 + COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 180 + COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 181 181 COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 182 182 COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_FCC), 183 183 COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 184 - COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_NA), 184 + COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_FCC), 185 185 COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 186 186 COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 187 187 COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI, RTW89_NA), ··· 194 194 COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 195 195 COUNTRY_REGD("XK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 196 196 COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 197 - COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 197 + COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 198 198 COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 199 199 COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 200 200 COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 201 201 COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 202 202 COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 203 203 COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 204 - COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_NA), 204 + COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_FCC), 205 205 COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 206 206 COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 207 207 COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 208 208 COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 209 - COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_NA), 209 + COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_FCC), 210 210 COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 211 211 COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 212 212 COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI, RTW89_NA), ··· 216 216 COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 217 217 COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA, RTW89_NA), 218 218 COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA, RTW89_NA), 219 - COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_NA), 220 - COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_NA), 219 + COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_FCC), 220 + COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_FCC), 221 221 COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 222 222 COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 223 223 COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 224 224 COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC, RTW89_FCC), 225 225 COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC, RTW89_FCC), 226 - COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_NA), 227 - COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_NA), 226 + COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_FCC), 227 + COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_FCC), 228 228 COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 229 229 COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC, RTW89_NA), 230 230 COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC, RTW89_NA), ··· 237 237 COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 238 238 COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC, RTW89_FCC), 239 239 COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 240 - COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 240 + COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 241 241 COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 242 - COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 242 + COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 243 243 COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 244 244 COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA, RTW89_NA), 245 245 COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), ··· 247 247 COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 248 248 COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA, RTW89_NA), 249 249 COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 250 - COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_NA), 250 + COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_FCC), 251 251 COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), 252 252 COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 253 253 COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 254 254 COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 255 255 COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 256 + COUNTRY_REGD("CU", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 256 257 COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 258 + COUNTRY_REGD("SY", RTW89_ETSI, RTW89_NA, RTW89_NA), 259 + COUNTRY_REGD("SD", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 257 260 COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI, RTW89_NA), 258 261 }; 259 262
+5 -1
drivers/net/wireless/realtek/rtw89/rtw8851b.c
··· 2298 2298 u8 path; 2299 2299 u8 *rx_power = phy_ppdu->rssi; 2300 2300 2301 - status->signal = RTW89_RSSI_RAW_TO_DBM(rx_power[RF_PATH_A]); 2301 + if (!status->signal) 2302 + status->signal = RTW89_RSSI_RAW_TO_DBM(rx_power[RF_PATH_A]); 2302 2303 2303 2304 for (path = 0; path < rtwdev->chip->rf_path_num; path++) { 2304 2305 status->chains |= BIT(path); ··· 2392 2391 .ctrl_btg_bt_rx = rtw8851b_ctrl_btg_bt_rx, 2393 2392 .query_ppdu = rtw8851b_query_ppdu, 2394 2393 .convert_rpl_to_rssi = NULL, 2394 + .phy_rpt_to_rssi = NULL, 2395 2395 .ctrl_nbtg_bt_tx = rtw8851b_ctrl_nbtg_bt_tx, 2396 2396 .cfg_txrx_path = rtw8851b_bb_cfg_txrx_path, 2397 2397 .set_txpwr_ul_tb_offset = rtw8851b_set_txpwr_ul_tb_offset, ··· 2466 2464 .nctl_post_table = &rtw8851b_nctl_post_defs_tbl, 2467 2465 .dflt_parms = &rtw89_8851b_dflt_parms, 2468 2466 .rfe_parms_conf = rtw89_8851b_rfe_parms_conf, 2467 + .txpwr_factor_bb = 3, 2469 2468 .txpwr_factor_rf = 2, 2470 2469 .txpwr_factor_mac = 1, 2471 2470 .dig_table = NULL, ··· 2482 2479 BIT(NL80211_CHAN_WIDTH_40) | 2483 2480 BIT(NL80211_CHAN_WIDTH_80), 2484 2481 .support_unii4 = true, 2482 + .support_ant_gain = false, 2485 2483 .ul_tb_waveform_ctrl = true, 2486 2484 .ul_tb_pwr_diff = false, 2487 2485 .hw_sec_hdr = false,
+1 -1
drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
··· 2199 2199 2200 2200 if (dgain > 0x5fc || dgain < 0x556) { 2201 2201 _dpk_one_shot(rtwdev, phy, path, D_SYNC); 2202 - dgain = _dpk_dgain_read(rtwdev); 2202 + _dpk_dgain_read(rtwdev); 2203 2203 } 2204 2204 2205 2205 if (agc_cnt == 0) {
+6 -1
drivers/net/wireless/realtek/rtw89/rtw8852a.c
··· 2068 2068 u8 path; 2069 2069 u8 *rx_power = phy_ppdu->rssi; 2070 2070 2071 - status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B])); 2071 + if (!status->signal) 2072 + status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], 2073 + rx_power[RF_PATH_B])); 2072 2074 for (path = 0; path < rtwdev->chip->rf_path_num; path++) { 2073 2075 status->chains |= BIT(path); 2074 2076 status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]); ··· 2118 2116 .ctrl_btg_bt_rx = rtw8852a_ctrl_btg_bt_rx, 2119 2117 .query_ppdu = rtw8852a_query_ppdu, 2120 2118 .convert_rpl_to_rssi = NULL, 2119 + .phy_rpt_to_rssi = NULL, 2121 2120 .ctrl_nbtg_bt_tx = rtw8852a_ctrl_nbtg_bt_tx, 2122 2121 .cfg_txrx_path = NULL, 2123 2122 .set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset, ··· 2184 2181 .nctl_post_table = NULL, 2185 2182 .dflt_parms = &rtw89_8852a_dflt_parms, 2186 2183 .rfe_parms_conf = NULL, 2184 + .txpwr_factor_bb = 3, 2187 2185 .txpwr_factor_rf = 2, 2188 2186 .txpwr_factor_mac = 1, 2189 2187 .dig_table = &rtw89_8852a_phy_dig_table, ··· 2200 2196 BIT(NL80211_CHAN_WIDTH_40) | 2201 2197 BIT(NL80211_CHAN_WIDTH_80), 2202 2198 .support_unii4 = false, 2199 + .support_ant_gain = false, 2203 2200 .ul_tb_waveform_ctrl = false, 2204 2201 .ul_tb_pwr_diff = false, 2205 2202 .hw_sec_hdr = false,
+3
drivers/net/wireless/realtek/rtw89/rtw8852b.c
··· 745 745 .ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx, 746 746 .query_ppdu = rtw8852bx_query_ppdu, 747 747 .convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi, 748 + .phy_rpt_to_rssi = NULL, 748 749 .ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx, 749 750 .cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path, 750 751 .set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset, ··· 820 819 .nctl_post_table = NULL, 821 820 .dflt_parms = &rtw89_8852b_dflt_parms, 822 821 .rfe_parms_conf = NULL, 822 + .txpwr_factor_bb = 3, 823 823 .txpwr_factor_rf = 2, 824 824 .txpwr_factor_mac = 1, 825 825 .dig_table = NULL, ··· 836 834 BIT(NL80211_CHAN_WIDTH_40) | 837 835 BIT(NL80211_CHAN_WIDTH_80), 838 836 .support_unii4 = true, 837 + .support_ant_gain = true, 839 838 .ul_tb_waveform_ctrl = true, 840 839 .ul_tb_pwr_diff = false, 841 840 .hw_sec_hdr = false,
+33 -17
drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
··· 1206 1206 } 1207 1207 1208 1208 static u32 rtw8852bx_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev, 1209 - enum rtw89_phy_idx phy_idx, s16 ref) 1209 + enum rtw89_phy_idx phy_idx, 1210 + s16 ref, u16 pwr_ofst_decrease) 1210 1211 { 1211 1212 const u16 tssi_16dbm_cw = 0x12c; 1212 1213 const u8 base_cw_0db = 0x27; 1213 - const s8 ofst_int = 0; 1214 1214 s16 pwr_s10_3; 1215 1215 s16 rf_pwr_cw; 1216 1216 u16 bb_pwr_cw; 1217 1217 u32 pwr_cw; 1218 1218 u32 tssi_ofst_cw; 1219 1219 1220 - pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3); 1220 + pwr_s10_3 = (ref << 1) + (s16)(base_cw_0db << 3) - pwr_ofst_decrease; 1221 1221 bb_pwr_cw = u16_get_bits(pwr_s10_3, GENMASK(2, 0)); 1222 1222 rf_pwr_cw = u16_get_bits(pwr_s10_3, GENMASK(8, 3)); 1223 1223 rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63); 1224 1224 pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw; 1225 1225 1226 - tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3)); 1226 + tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3)) - 1227 + pwr_ofst_decrease; 1227 1228 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 1228 1229 "[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n", 1229 1230 tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw); ··· 1235 1234 } 1236 1235 1237 1236 static void rtw8852bx_set_txpwr_ref(struct rtw89_dev *rtwdev, 1238 - enum rtw89_phy_idx phy_idx) 1237 + enum rtw89_phy_idx phy_idx, s16 pwr_ofst) 1239 1238 { 1240 1239 static const u32 addr[RF_PATH_NUM_8852BX] = {0x5800, 0x7800}; 1241 1240 const u32 mask = B_DPD_TSSI_CW | B_DPD_PWR_CW | B_DPD_REF; 1241 + u16 ofst_dec[RF_PATH_NUM_8852BX]; 1242 1242 const u8 ofst_ofdm = 0x4; 1243 1243 const u8 ofst_cck = 0x8; 1244 1244 const s16 ref_ofdm = 0; ··· 1252 1250 rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL, 1253 1251 B_AX_PWR_REF, 0x0); 1254 1252 1255 - rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n"); 1256 - val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm); 1253 + ofst_dec[RF_PATH_A] = pwr_ofst > 0 ? 0 : abs(pwr_ofst); 1254 + ofst_dec[RF_PATH_B] = pwr_ofst > 0 ? pwr_ofst : 0; 1257 1255 1258 - for (i = 0; i < RF_PATH_NUM_8852BX; i++) 1259 - rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val, 1260 - phy_idx); 1256 + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n"); 1257 + for (i = 0; i < RF_PATH_NUM_8852BX; i++) { 1258 + val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm, ofst_dec[i]); 1259 + rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val, phy_idx); 1260 + } 1261 1261 1262 1262 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n"); 1263 - val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck); 1264 - 1265 - for (i = 0; i < RF_PATH_NUM_8852BX; i++) 1266 - rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val, 1267 - phy_idx); 1263 + for (i = 0; i < RF_PATH_NUM_8852BX; i++) { 1264 + val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck, ofst_dec[i]); 1265 + rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val, phy_idx); 1266 + } 1268 1267 } 1269 1268 1270 1269 static void rtw8852bx_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev, ··· 1336 1333 tx_shape_ofdm); 1337 1334 } 1338 1335 1336 + static void rtw8852bx_set_txpwr_diff(struct rtw89_dev *rtwdev, 1337 + const struct rtw89_chan *chan, 1338 + enum rtw89_phy_idx phy_idx) 1339 + { 1340 + s16 pwr_ofst; 1341 + 1342 + pwr_ofst = rtw89_phy_ant_gain_pwr_offset(rtwdev, chan); 1343 + rtw8852bx_set_txpwr_ref(rtwdev, phy_idx, pwr_ofst); 1344 + } 1345 + 1339 1346 static void __rtw8852bx_set_txpwr(struct rtw89_dev *rtwdev, 1340 1347 const struct rtw89_chan *chan, 1341 1348 enum rtw89_phy_idx phy_idx) ··· 1355 1342 rtw8852bx_set_tx_shape(rtwdev, chan, phy_idx); 1356 1343 rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx); 1357 1344 rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx); 1345 + rtw8852bx_set_txpwr_diff(rtwdev, chan, phy_idx); 1358 1346 } 1359 1347 1360 1348 static void __rtw8852bx_set_txpwr_ctrl(struct rtw89_dev *rtwdev, 1361 1349 enum rtw89_phy_idx phy_idx) 1362 1350 { 1363 - rtw8852bx_set_txpwr_ref(rtwdev, phy_idx); 1351 + rtw8852bx_set_txpwr_ref(rtwdev, phy_idx, 0); 1364 1352 } 1365 1353 1366 1354 static ··· 1950 1936 u8 path; 1951 1937 u8 *rx_power = phy_ppdu->rssi; 1952 1938 1953 - status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B])); 1939 + if (!status->signal) 1940 + status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], 1941 + rx_power[RF_PATH_B])); 1954 1942 for (path = 0; path < rtwdev->chip->rf_path_num; path++) { 1955 1943 status->chains |= BIT(path); 1956 1944 status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
+3
drivers/net/wireless/realtek/rtw89/rtw8852bt.c
··· 679 679 .ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx, 680 680 .query_ppdu = rtw8852bx_query_ppdu, 681 681 .convert_rpl_to_rssi = rtw8852bx_convert_rpl_to_rssi, 682 + .phy_rpt_to_rssi = NULL, 682 683 .ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx, 683 684 .cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path, 684 685 .set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset, ··· 753 752 .nctl_post_table = NULL, 754 753 .dflt_parms = NULL, 755 754 .rfe_parms_conf = NULL, 755 + .txpwr_factor_bb = 3, 756 756 .txpwr_factor_rf = 2, 757 757 .txpwr_factor_mac = 1, 758 758 .dig_table = NULL, ··· 769 767 BIT(NL80211_CHAN_WIDTH_40) | 770 768 BIT(NL80211_CHAN_WIDTH_80), 771 769 .support_unii4 = true, 770 + .support_ant_gain = true, 772 771 .ul_tb_waveform_ctrl = true, 773 772 .ul_tb_pwr_diff = false, 774 773 .hw_sec_hdr = false,
+37 -17
drivers/net/wireless/realtek/rtw89/rtw8852c.c
··· 1882 1882 } 1883 1883 1884 1884 static u32 rtw8852c_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev, 1885 - enum rtw89_phy_idx phy_idx, s16 ref) 1885 + enum rtw89_phy_idx phy_idx, 1886 + s16 ref, u16 pwr_ofst_decrease) 1886 1887 { 1887 - s8 ofst_int = 0; 1888 1888 u8 base_cw_0db = 0x27; 1889 1889 u16 tssi_16dbm_cw = 0x12c; 1890 1890 s16 pwr_s10_3 = 0; ··· 1893 1893 u32 pwr_cw = 0; 1894 1894 u32 tssi_ofst_cw = 0; 1895 1895 1896 - pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3); 1896 + pwr_s10_3 = (ref << 1) + (s16)(base_cw_0db << 3) - pwr_ofst_decrease; 1897 1897 bb_pwr_cw = FIELD_GET(GENMASK(2, 0), pwr_s10_3); 1898 1898 rf_pwr_cw = FIELD_GET(GENMASK(8, 3), pwr_s10_3); 1899 1899 rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63); 1900 1900 pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw; 1901 1901 1902 - tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3)); 1902 + tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3)) - 1903 + pwr_ofst_decrease; 1903 1904 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 1904 1905 "[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n", 1905 1906 tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw); ··· 1944 1943 } 1945 1944 1946 1945 static void rtw8852c_set_txpwr_ref(struct rtw89_dev *rtwdev, 1947 - enum rtw89_phy_idx phy_idx) 1946 + enum rtw89_phy_idx phy_idx, s16 pwr_ofst) 1948 1947 { 1949 1948 static const u32 addr[RF_PATH_NUM_8852C] = {0x5800, 0x7800}; 1949 + u16 ofst_dec[RF_PATH_NUM_8852C]; 1950 1950 const u32 mask = 0x7FFFFFF; 1951 1951 const u8 ofst_ofdm = 0x4; 1952 1952 const u8 ofst_cck = 0x8; ··· 1961 1959 rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL, 1962 1960 GENMASK(27, 10), 0x0); 1963 1961 1964 - rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n"); 1965 - val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm); 1962 + ofst_dec[RF_PATH_A] = pwr_ofst > 0 ? 0 : abs(pwr_ofst); 1963 + ofst_dec[RF_PATH_B] = pwr_ofst > 0 ? pwr_ofst : 0; 1966 1964 1967 - for (i = 0; i < RF_PATH_NUM_8852C; i++) 1968 - rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val, 1969 - phy_idx); 1965 + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n"); 1966 + for (i = 0; i < RF_PATH_NUM_8852C; i++) { 1967 + val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm, ofst_dec[i]); 1968 + rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val, phy_idx); 1969 + } 1970 1970 1971 1971 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n"); 1972 - val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck); 1973 - 1974 - for (i = 0; i < RF_PATH_NUM_8852C; i++) 1975 - rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val, 1976 - phy_idx); 1972 + for (i = 0; i < RF_PATH_NUM_8852C; i++) { 1973 + val = rtw8852c_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck, ofst_dec[i]); 1974 + rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val, phy_idx); 1975 + } 1977 1976 } 1978 1977 1979 1978 static void rtw8852c_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev, ··· 2055 2052 B_P1_DAC_COMP_POST_DPD_EN); 2056 2053 } 2057 2054 2055 + static void rtw8852c_set_txpwr_diff(struct rtw89_dev *rtwdev, 2056 + const struct rtw89_chan *chan, 2057 + enum rtw89_phy_idx phy_idx) 2058 + { 2059 + s16 pwr_ofst; 2060 + 2061 + pwr_ofst = rtw89_phy_ant_gain_pwr_offset(rtwdev, chan); 2062 + rtw8852c_set_txpwr_ref(rtwdev, phy_idx, pwr_ofst); 2063 + } 2064 + 2058 2065 static void rtw8852c_set_txpwr(struct rtw89_dev *rtwdev, 2059 2066 const struct rtw89_chan *chan, 2060 2067 enum rtw89_phy_idx phy_idx) ··· 2074 2061 rtw8852c_set_tx_shape(rtwdev, chan, phy_idx); 2075 2062 rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx); 2076 2063 rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx); 2064 + rtw8852c_set_txpwr_diff(rtwdev, chan, phy_idx); 2077 2065 } 2078 2066 2079 2067 static void rtw8852c_set_txpwr_ctrl(struct rtw89_dev *rtwdev, 2080 2068 enum rtw89_phy_idx phy_idx) 2081 2069 { 2082 - rtw8852c_set_txpwr_ref(rtwdev, phy_idx); 2070 + rtw8852c_set_txpwr_ref(rtwdev, phy_idx, 0); 2083 2071 } 2084 2072 2085 2073 static void ··· 2807 2793 u8 path; 2808 2794 u8 *rx_power = phy_ppdu->rssi; 2809 2795 2810 - status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B])); 2796 + if (!status->signal) 2797 + status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], 2798 + rx_power[RF_PATH_B])); 2799 + 2811 2800 for (path = 0; path < rtwdev->chip->rf_path_num; path++) { 2812 2801 status->chains |= BIT(path); 2813 2802 status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]); ··· 2910 2893 .ctrl_btg_bt_rx = rtw8852c_ctrl_btg_bt_rx, 2911 2894 .query_ppdu = rtw8852c_query_ppdu, 2912 2895 .convert_rpl_to_rssi = NULL, 2896 + .phy_rpt_to_rssi = NULL, 2913 2897 .ctrl_nbtg_bt_tx = rtw8852c_ctrl_nbtg_bt_tx, 2914 2898 .cfg_txrx_path = rtw8852c_bb_cfg_txrx_path, 2915 2899 .set_txpwr_ul_tb_offset = rtw8852c_set_txpwr_ul_tb_offset, ··· 2977 2959 .dflt_parms = &rtw89_8852c_dflt_parms, 2978 2960 .rfe_parms_conf = NULL, 2979 2961 .chanctx_listener = &rtw8852c_chanctx_listener, 2962 + .txpwr_factor_bb = 3, 2980 2963 .txpwr_factor_rf = 2, 2981 2964 .txpwr_factor_mac = 1, 2982 2965 .dig_table = NULL, ··· 2995 2976 BIT(NL80211_CHAN_WIDTH_80) | 2996 2977 BIT(NL80211_CHAN_WIDTH_160), 2997 2978 .support_unii4 = true, 2979 + .support_ant_gain = true, 2998 2980 .ul_tb_waveform_ctrl = false, 2999 2981 .ul_tb_pwr_diff = true, 3000 2982 .hw_sec_hdr = true,
+3 -3
drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
··· 1769 1769 target_ch = chan->channel - 33; 1770 1770 } 1771 1771 } else if (chan->band_type == RTW89_BAND_6G) { 1772 - if (chan->channel >= 1 && chan->channel <= 125) 1773 - target_ch = chan->channel + 32; 1774 - else 1772 + if (chan->channel > 125) 1775 1773 target_ch = chan->channel - 32; 1774 + else 1775 + target_ch = chan->channel + 32; 1776 1776 } else { 1777 1777 target_ch = chan->channel; 1778 1778 }
+18 -3
drivers/net/wireless/realtek/rtw89/rtw8922a.c
··· 14 14 #include "rtw8922a_rfk.h" 15 15 #include "util.h" 16 16 17 - #define RTW8922A_FW_FORMAT_MAX 2 17 + #define RTW8922A_FW_FORMAT_MAX 3 18 18 #define RTW8922A_FW_BASENAME "rtw89/rtw8922a_fw" 19 19 #define RTW8922A_MODULE_FIRMWARE \ 20 20 RTW8922A_FW_BASENAME "-" __stringify(RTW8922A_FW_FORMAT_MAX) ".bin" ··· 2565 2565 u8 path; 2566 2566 u8 *rx_power = phy_ppdu->rssi; 2567 2567 2568 - status->signal = 2569 - RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B])); 2568 + if (!status->signal) 2569 + status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], 2570 + rx_power[RF_PATH_B])); 2571 + 2570 2572 for (path = 0; path < rtwdev->chip->rf_path_num; path++) { 2571 2573 status->chains |= BIT(path); 2572 2574 status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]); ··· 2607 2605 } 2608 2606 2609 2607 phy_ppdu->rssi_avg = phy_ppdu->rpl_avg; 2608 + } 2609 + 2610 + static void rtw8922a_phy_rpt_to_rssi(struct rtw89_dev *rtwdev, 2611 + struct rtw89_rx_desc_info *desc_info, 2612 + struct ieee80211_rx_status *rx_status) 2613 + { 2614 + if (desc_info->rssi <= 0x1 || (desc_info->rssi >> 2) > MAX_RSSI) 2615 + return; 2616 + 2617 + rx_status->signal = (desc_info->rssi >> 2) - MAX_RSSI; 2610 2618 } 2611 2619 2612 2620 static int rtw8922a_mac_enable_bb_rf(struct rtw89_dev *rtwdev) ··· 2677 2665 .ctrl_btg_bt_rx = rtw8922a_ctrl_btg_bt_rx, 2678 2666 .query_ppdu = rtw8922a_query_ppdu, 2679 2667 .convert_rpl_to_rssi = rtw8922a_convert_rpl_to_rssi, 2668 + .phy_rpt_to_rssi = rtw8922a_phy_rpt_to_rssi, 2680 2669 .ctrl_nbtg_bt_tx = rtw8922a_ctrl_nbtg_bt_tx, 2681 2670 .cfg_txrx_path = rtw8922a_bb_cfg_txrx_path, 2682 2671 .set_txpwr_ul_tb_offset = NULL, ··· 2742 2729 .nctl_post_table = NULL, 2743 2730 .dflt_parms = NULL, /* load parm from fw */ 2744 2731 .rfe_parms_conf = NULL, /* load parm from fw */ 2732 + .txpwr_factor_bb = 3, 2745 2733 .txpwr_factor_rf = 2, 2746 2734 .txpwr_factor_mac = 1, 2747 2735 .dig_table = NULL, ··· 2760 2746 BIT(NL80211_CHAN_WIDTH_80) | 2761 2747 BIT(NL80211_CHAN_WIDTH_160), 2762 2748 .support_unii4 = true, 2749 + .support_ant_gain = false, 2763 2750 .ul_tb_waveform_ctrl = false, 2764 2751 .ul_tb_pwr_diff = false, 2765 2752 .hw_sec_hdr = true,
+5 -50
drivers/net/wireless/realtek/rtw89/sar.c
··· 42 42 43 43 /* freq 6875 (ch 185, 20MHz) spans RTW89_SAR_6GHZ_SUBBAND_7_H 44 44 * and RTW89_SAR_6GHZ_SUBBAND_8, so directly describe it with 45 - * struct rtw89_sar_span in the following. 45 + * struct rtw89_6ghz_span. 46 46 */ 47 47 48 48 case 6895 ... 7115: ··· 50 50 } 51 51 } 52 52 53 - struct rtw89_sar_span { 54 - enum rtw89_sar_subband subband_low; 55 - enum rtw89_sar_subband subband_high; 56 - }; 57 - 58 - #define RTW89_SAR_SPAN_VALID(span) ((span)->subband_high) 59 - 60 - #define RTW89_SAR_6GHZ_SPAN_HEAD 6145 61 - #define RTW89_SAR_6GHZ_SPAN_IDX(center_freq) \ 62 - ((((int)(center_freq) - RTW89_SAR_6GHZ_SPAN_HEAD) / 5) / 2) 63 - 64 - #define RTW89_DECL_SAR_6GHZ_SPAN(center_freq, subband_l, subband_h) \ 65 - [RTW89_SAR_6GHZ_SPAN_IDX(center_freq)] = { \ 66 - .subband_low = RTW89_SAR_6GHZ_ ## subband_l, \ 67 - .subband_high = RTW89_SAR_6GHZ_ ## subband_h, \ 68 - } 69 - 70 - /* Since 6GHz SAR subbands are not edge aligned, some cases span two SAR 71 - * subbands. In the following, we describe each of them with rtw89_sar_span. 72 - */ 73 - static const struct rtw89_sar_span rtw89_sar_overlapping_6ghz[] = { 74 - RTW89_DECL_SAR_6GHZ_SPAN(6145, SUBBAND_5_L, SUBBAND_5_H), 75 - RTW89_DECL_SAR_6GHZ_SPAN(6165, SUBBAND_5_L, SUBBAND_5_H), 76 - RTW89_DECL_SAR_6GHZ_SPAN(6185, SUBBAND_5_L, SUBBAND_5_H), 77 - RTW89_DECL_SAR_6GHZ_SPAN(6505, SUBBAND_6, SUBBAND_7_L), 78 - RTW89_DECL_SAR_6GHZ_SPAN(6525, SUBBAND_6, SUBBAND_7_L), 79 - RTW89_DECL_SAR_6GHZ_SPAN(6545, SUBBAND_6, SUBBAND_7_L), 80 - RTW89_DECL_SAR_6GHZ_SPAN(6665, SUBBAND_7_L, SUBBAND_7_H), 81 - RTW89_DECL_SAR_6GHZ_SPAN(6705, SUBBAND_7_L, SUBBAND_7_H), 82 - RTW89_DECL_SAR_6GHZ_SPAN(6825, SUBBAND_7_H, SUBBAND_8), 83 - RTW89_DECL_SAR_6GHZ_SPAN(6865, SUBBAND_7_H, SUBBAND_8), 84 - RTW89_DECL_SAR_6GHZ_SPAN(6875, SUBBAND_7_H, SUBBAND_8), 85 - RTW89_DECL_SAR_6GHZ_SPAN(6885, SUBBAND_7_H, SUBBAND_8), 86 - }; 87 - 88 53 static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev, 89 54 u32 center_freq, s32 *cfg) 90 55 { 91 56 struct rtw89_sar_cfg_common *rtwsar = &rtwdev->sar.cfg_common; 92 - const struct rtw89_sar_span *span = NULL; 93 57 enum rtw89_sar_subband subband_l, subband_h; 94 - int idx; 58 + const struct rtw89_6ghz_span *span; 95 59 96 - if (center_freq >= RTW89_SAR_6GHZ_SPAN_HEAD) { 97 - idx = RTW89_SAR_6GHZ_SPAN_IDX(center_freq); 98 - /* To decrease size of rtw89_sar_overlapping_6ghz[], 99 - * RTW89_SAR_6GHZ_SPAN_IDX() truncates the leading NULLs 100 - * to make first span as index 0 of the table. So, if center 101 - * frequency is less than the first one, it will get netative. 102 - */ 103 - if (idx >= 0 && idx < ARRAY_SIZE(rtw89_sar_overlapping_6ghz)) 104 - span = &rtw89_sar_overlapping_6ghz[idx]; 105 - } 60 + span = rtw89_get_6ghz_span(rtwdev, center_freq); 106 61 107 62 if (span && RTW89_SAR_SPAN_VALID(span)) { 108 - subband_l = span->subband_low; 109 - subband_h = span->subband_high; 63 + subband_l = span->sar_subband_low; 64 + subband_h = span->sar_subband_high; 110 65 } else { 111 66 subband_l = rtw89_sar_get_subband(rtwdev, center_freq); 112 67 subband_h = subband_l;
+1
drivers/net/wireless/realtek/rtw89/ser.c
··· 365 365 ser_reset_vif(rtwdev, rtwvif); 366 366 367 367 rtwdev->total_sta_assoc = 0; 368 + refcount_set(&rtwdev->refcount_ap_info, 0); 368 369 } 369 370 370 371 /* hal function */
+3
drivers/net/wireless/realtek/rtw89/txrx.h
··· 560 560 #define BE_RXD_HDR_OFFSET_MASK GENMASK(20, 16) 561 561 #define BE_RXD_WL_HD_IV_LEN_MASK GENMASK(26, 21) 562 562 563 + /* BE RXD - PHY RPT dword0 */ 564 + #define BE_RXD_PHY_RSSI GENMASK(11, 0) 565 + 563 566 struct rtw89_phy_sts_ie00 { 564 567 __le32 w0; 565 568 __le32 w1;
+6 -5
drivers/net/wireless/realtek/rtw89/wow.c
··· 620 620 * need to unlock mutex 621 621 */ 622 622 mutex_unlock(&rtwdev->mutex); 623 - key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, -1); 623 + if (ieee80211_vif_is_mld(wow_vif)) 624 + key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, rtwvif_link->link_id); 625 + else 626 + key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, -1); 624 627 mutex_lock(&rtwdev->mutex); 625 628 626 629 kfree(rekey_conf); ··· 694 691 695 692 static void rtw89_wow_enter_deep_ps(struct rtw89_dev *rtwdev) 696 693 { 697 - struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link; 698 - 699 - __rtw89_enter_ps_mode(rtwdev, rtwvif_link); 694 + __rtw89_enter_ps_mode(rtwdev); 700 695 } 701 696 702 697 static void rtw89_wow_enter_ps(struct rtw89_dev *rtwdev) ··· 702 701 struct rtw89_vif_link *rtwvif_link = rtwdev->wow.rtwvif_link; 703 702 704 703 if (rtw89_wow_mgd_linked(rtwdev)) 705 - rtw89_enter_lps(rtwdev, rtwvif_link, false); 704 + rtw89_enter_lps(rtwdev, rtwvif_link->rtwvif, false); 706 705 else if (rtw89_wow_no_link(rtwdev)) 707 706 rtw89_fw_h2c_fwips(rtwdev, rtwvif_link, true); 708 707 }
+2 -2
drivers/net/wireless/ti/wlcore/sysfs.c
··· 88 88 static DEVICE_ATTR_RO(hw_pg_ver); 89 89 90 90 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj, 91 - struct bin_attribute *bin_attr, 91 + const struct bin_attribute *bin_attr, 92 92 char *buffer, loff_t pos, size_t count) 93 93 { 94 94 struct device *dev = kobj_to_dev(kobj); ··· 121 121 122 122 static const struct bin_attribute fwlog_attr = { 123 123 .attr = { .name = "fwlog", .mode = 0400 }, 124 - .read = wl1271_sysfs_read_fwlog, 124 + .read_new = wl1271_sysfs_read_fwlog, 125 125 }; 126 126 127 127 int wlcore_sysfs_init(struct wl1271 *wl)
+1 -1
drivers/net/wireless/ti/wlcore/testmode.c
··· 45 45 }; 46 46 #define WL1271_TM_ATTR_MAX (__WL1271_TM_ATTR_AFTER_LAST - 1) 47 47 48 - static struct nla_policy wl1271_tm_policy[WL1271_TM_ATTR_MAX + 1] = { 48 + static const struct nla_policy wl1271_tm_policy[WL1271_TM_ATTR_MAX + 1] = { 49 49 [WL1271_TM_ATTR_CMD_ID] = { .type = NLA_U32 }, 50 50 [WL1271_TM_ATTR_ANSWER] = { .type = NLA_U8 }, 51 51 [WL1271_TM_ATTR_DATA] = { .type = NLA_BINARY,
+39
drivers/net/wireless/virtual/mac80211_hwsim.c
··· 5048 5048 .tx_mcs_80p80 = cpu_to_le16(0xfffa), 5049 5049 }, 5050 5050 }, 5051 + .eht_cap = { 5052 + .has_eht = true, 5053 + .eht_cap_elem = { 5054 + .mac_cap_info[0] = IEEE80211_EHT_MAC_CAP0_OM_CONTROL | 5055 + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1, 5056 + .phy_cap_info[0] = IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ, 5057 + /* Leave all the other PHY capability bytes 5058 + * unset, as DCM, beam forming, RU and PPE 5059 + * threshold information are not supported 5060 + */ 5061 + }, 5062 + /* For all MCS and bandwidth, set 8 NSS for both Tx and 5063 + * Rx 5064 + */ 5065 + .eht_mcs_nss_supp = { 5066 + /* As B1 and B2 are set in the supported 5067 + * channel width set field in the HE PHY 5068 + * capabilities information field and 320MHz in 5069 + * 6GHz is supported include all the following 5070 + * MCS/NSS. 5071 + */ 5072 + .bw._80 = { 5073 + .rx_tx_mcs9_max_nss = 0x88, 5074 + .rx_tx_mcs11_max_nss = 0x88, 5075 + .rx_tx_mcs13_max_nss = 0x88, 5076 + }, 5077 + .bw._160 = { 5078 + .rx_tx_mcs9_max_nss = 0x88, 5079 + .rx_tx_mcs11_max_nss = 0x88, 5080 + .rx_tx_mcs13_max_nss = 0x88, 5081 + }, 5082 + .bw._320 = { 5083 + .rx_tx_mcs9_max_nss = 0x88, 5084 + .rx_tx_mcs11_max_nss = 0x88, 5085 + .rx_tx_mcs13_max_nss = 0x88, 5086 + }, 5087 + }, 5088 + /* PPE threshold information is not supported */ 5089 + }, 5051 5090 }, 5052 5091 #endif 5053 5092 };
+2 -1
drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
··· 1802 1802 } 1803 1803 1804 1804 static int cfg80211_rtw_get_txpower(struct wiphy *wiphy, 1805 - struct wireless_dev *wdev, int *dbm) 1805 + struct wireless_dev *wdev, 1806 + unsigned int link_id, int *dbm) 1806 1807 { 1807 1808 *dbm = (12); 1808 1809
+5 -1
include/net/cfg80211.h
··· 4733 4733 int (*set_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev, 4734 4734 enum nl80211_tx_power_setting type, int mbm); 4735 4735 int (*get_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev, 4736 - int *dbm); 4736 + unsigned int link_id, int *dbm); 4737 4737 4738 4738 void (*rfkill_poll)(struct wiphy *wiphy); 4739 4739 ··· 6030 6030 __release(&wiphy->mtx); 6031 6031 mutex_unlock(&wiphy->mtx); 6032 6032 } 6033 + 6034 + DEFINE_GUARD(wiphy, struct wiphy *, 6035 + mutex_lock(&_T->mtx), 6036 + mutex_unlock(&_T->mtx)) 6033 6037 6034 6038 struct wiphy_work; 6035 6039 typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *);
+1 -1
include/net/mac80211.h
··· 4762 4762 u32 (*get_expected_throughput)(struct ieee80211_hw *hw, 4763 4763 struct ieee80211_sta *sta); 4764 4764 int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 4765 - int *dbm); 4765 + unsigned int link_id, int *dbm); 4766 4766 4767 4767 int (*tdls_channel_switch)(struct ieee80211_hw *hw, 4768 4768 struct ieee80211_vif *vif,
+12 -4
net/mac80211/cfg.c
··· 3190 3190 3191 3191 static int ieee80211_get_tx_power(struct wiphy *wiphy, 3192 3192 struct wireless_dev *wdev, 3193 + unsigned int link_id, 3193 3194 int *dbm) 3194 3195 { 3195 3196 struct ieee80211_local *local = wiphy_priv(wiphy); 3196 3197 struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); 3198 + struct ieee80211_link_data *link_data; 3197 3199 3198 3200 if (local->ops->get_txpower && 3199 3201 (sdata->flags & IEEE80211_SDATA_IN_DRIVER)) 3200 - return drv_get_txpower(local, sdata, dbm); 3202 + return drv_get_txpower(local, sdata, link_id, dbm); 3201 3203 3202 - if (local->emulate_chanctx) 3204 + if (local->emulate_chanctx) { 3203 3205 *dbm = local->hw.conf.power_level; 3204 - else 3205 - *dbm = sdata->vif.bss_conf.txpower; 3206 + } else { 3207 + link_data = wiphy_dereference(wiphy, sdata->link[link_id]); 3208 + 3209 + if (link_data) 3210 + *dbm = link_data->conf->txpower; 3211 + else 3212 + return -ENOLINK; 3213 + } 3206 3214 3207 3215 /* INT_MIN indicates no power level was set yet */ 3208 3216 if (*dbm == INT_MIN)
+2 -2
net/mac80211/debugfs.c
··· 284 284 q_limit_low_old = local->aql_txq_limit_low[ac]; 285 285 q_limit_high_old = local->aql_txq_limit_high[ac]; 286 286 287 - wiphy_lock(local->hw.wiphy); 287 + guard(wiphy)(local->hw.wiphy); 288 + 288 289 local->aql_txq_limit_low[ac] = q_limit_low; 289 290 local->aql_txq_limit_high[ac] = q_limit_high; 290 291 ··· 297 296 sta->airtime[ac].aql_limit_high = q_limit_high; 298 297 } 299 298 } 300 - wiphy_unlock(local->hw.wiphy); 301 299 302 300 return count; 303 301 }
+4 -3
net/mac80211/driver-ops.h
··· 1273 1273 } 1274 1274 1275 1275 static inline int drv_get_txpower(struct ieee80211_local *local, 1276 - struct ieee80211_sub_if_data *sdata, int *dbm) 1276 + struct ieee80211_sub_if_data *sdata, 1277 + unsigned int link_id, int *dbm) 1277 1278 { 1278 1279 int ret; 1279 1280 ··· 1284 1283 if (!local->ops->get_txpower) 1285 1284 return -EOPNOTSUPP; 1286 1285 1287 - ret = local->ops->get_txpower(&local->hw, &sdata->vif, dbm); 1288 - trace_drv_get_txpower(local, sdata, *dbm, ret); 1286 + ret = local->ops->get_txpower(&local->hw, &sdata->vif, link_id, dbm); 1287 + trace_drv_get_txpower(local, sdata, link_id, *dbm, ret); 1289 1288 1290 1289 return ret; 1291 1290 }
+10 -12
net/mac80211/ethtool.c
··· 19 19 struct netlink_ext_ack *extack) 20 20 { 21 21 struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy); 22 - int ret; 23 22 24 23 if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0) 25 24 return -EINVAL; 26 25 27 - wiphy_lock(local->hw.wiphy); 28 - ret = drv_set_ringparam(local, rp->tx_pending, rp->rx_pending); 29 - wiphy_unlock(local->hw.wiphy); 26 + guard(wiphy)(local->hw.wiphy); 30 27 31 - return ret; 28 + return drv_set_ringparam(local, rp->tx_pending, rp->rx_pending); 32 29 } 33 30 34 31 static void ieee80211_get_ringparam(struct net_device *dev, ··· 37 40 38 41 memset(rp, 0, sizeof(*rp)); 39 42 40 - wiphy_lock(local->hw.wiphy); 43 + guard(wiphy)(local->hw.wiphy); 44 + 41 45 drv_get_ringparam(local, &rp->tx_pending, &rp->tx_max_pending, 42 46 &rp->rx_pending, &rp->rx_max_pending); 43 - wiphy_unlock(local->hw.wiphy); 44 47 } 45 48 46 49 static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = { ··· 106 109 * network device. 107 110 */ 108 111 109 - wiphy_lock(local->hw.wiphy); 112 + guard(wiphy)(local->hw.wiphy); 110 113 111 114 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 112 115 sta = sta_info_get_bss(sdata, sdata->deflink.u.mgd.bssid); ··· 157 160 chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); 158 161 if (chanctx_conf) 159 162 channel = chanctx_conf->def.chan; 163 + else if (local->open_count > 0 && 164 + local->open_count == local->monitors && 165 + sdata->vif.type == NL80211_IFTYPE_MONITOR) 166 + channel = local->monitor_chanreq.oper.chan; 160 167 else 161 168 channel = NULL; 162 169 rcu_read_unlock(); ··· 206 205 else 207 206 data[i++] = -1LL; 208 207 209 - if (WARN_ON(i != STA_STATS_LEN)) { 210 - wiphy_unlock(local->hw.wiphy); 208 + if (WARN_ON(i != STA_STATS_LEN)) 211 209 return; 212 - } 213 210 214 211 drv_get_et_stats(sdata, stats, &(data[STA_STATS_LEN])); 215 - wiphy_unlock(local->hw.wiphy); 216 212 } 217 213 218 214 static void ieee80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
+1 -1
net/mac80211/ieee80211_i.h
··· 1204 1204 for (int ___link_id = 0; \ 1205 1205 ___link_id < ARRAY_SIZE(___sdata->link); \ 1206 1206 ___link_id++) \ 1207 - if ((_link = wiphy_dereference((local)->hw.wiphy, \ 1207 + if ((_link = wiphy_dereference((_local)->hw.wiphy, \ 1208 1208 ___sdata->link[___link_id]))) 1209 1209 1210 1210 static inline int
+9 -16
net/mac80211/iface.c
··· 295 295 { 296 296 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 297 297 struct ieee80211_local *local = sdata->local; 298 - int ret; 299 298 300 299 /* 301 300 * This happens during unregistration if there's a bond device ··· 304 305 if (!dev->ieee80211_ptr->registered) 305 306 return 0; 306 307 307 - wiphy_lock(local->hw.wiphy); 308 - ret = _ieee80211_change_mac(sdata, addr); 309 - wiphy_unlock(local->hw.wiphy); 308 + guard(wiphy)(local->hw.wiphy); 310 309 311 - return ret; 310 + return _ieee80211_change_mac(sdata, addr); 312 311 } 313 312 314 313 static inline int identical_mac_addr_allowed(int type1, int type2) ··· 442 445 if (!is_valid_ether_addr(dev->dev_addr)) 443 446 return -EADDRNOTAVAIL; 444 447 445 - wiphy_lock(sdata->local->hw.wiphy); 448 + guard(wiphy)(sdata->local->hw.wiphy); 449 + 446 450 err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type); 447 451 if (err) 448 - goto out; 452 + return err; 449 453 450 - err = ieee80211_do_open(&sdata->wdev, true); 451 - out: 452 - wiphy_unlock(sdata->local->hw.wiphy); 453 - 454 - return err; 454 + return ieee80211_do_open(&sdata->wdev, true); 455 455 } 456 456 457 457 static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_down) ··· 769 775 ieee80211_stop_mbssid(sdata); 770 776 } 771 777 772 - wiphy_lock(sdata->local->hw.wiphy); 778 + guard(wiphy)(sdata->local->hw.wiphy); 779 + 773 780 wiphy_work_cancel(sdata->local->hw.wiphy, &sdata->activate_links_work); 774 781 775 782 ieee80211_do_stop(sdata, true); 776 - wiphy_unlock(sdata->local->hw.wiphy); 777 783 778 784 return 0; 779 785 } ··· 2273 2279 */ 2274 2280 cfg80211_shutdown_all_interfaces(local->hw.wiphy); 2275 2281 2276 - wiphy_lock(local->hw.wiphy); 2282 + guard(wiphy)(local->hw.wiphy); 2277 2283 2278 2284 WARN(local->open_count, "%s: open count remains %d\n", 2279 2285 wiphy_name(local->hw.wiphy), local->open_count); ··· 2303 2309 if (!netdev) 2304 2310 kfree(sdata); 2305 2311 } 2306 - wiphy_unlock(local->hw.wiphy); 2307 2312 } 2308 2313 2309 2314 static int netdev_notify(struct notifier_block *nb,
+7 -2
net/mac80211/main.c
··· 5 5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 6 6 * Copyright 2013-2014 Intel Mobile Communications GmbH 7 7 * Copyright (C) 2017 Intel Deutschland GmbH 8 - * Copyright (C) 2018-2023 Intel Corporation 8 + * Copyright (C) 2018-2024 Intel Corporation 9 9 */ 10 10 11 11 #include <net/mac80211.h> ··· 726 726 }, 727 727 [NL80211_IFTYPE_P2P_DEVICE] = { 728 728 .tx = 0xffff, 729 + /* 730 + * To support P2P PASN pairing let user space register to rx 731 + * also AUTH frames on P2P device interface. 732 + */ 729 733 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | 730 - BIT(IEEE80211_STYPE_PROBE_REQ >> 4), 734 + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | 735 + BIT(IEEE80211_STYPE_AUTH >> 4), 731 736 }, 732 737 }; 733 738
+3 -1
net/mac80211/rx.c
··· 4562 4562 return ieee80211_is_public_action(hdr, skb->len) || 4563 4563 ieee80211_is_probe_req(hdr->frame_control) || 4564 4564 ieee80211_is_probe_resp(hdr->frame_control) || 4565 - ieee80211_is_beacon(hdr->frame_control); 4565 + ieee80211_is_beacon(hdr->frame_control) || 4566 + (ieee80211_is_auth(hdr->frame_control) && 4567 + ether_addr_equal(sdata->vif.addr, hdr->addr1)); 4566 4568 case NL80211_IFTYPE_NAN: 4567 4569 /* Currently no frames on NAN interface are allowed */ 4568 4570 return false;
+6 -4
net/mac80211/trace.h
··· 2173 2173 TRACE_EVENT(drv_get_txpower, 2174 2174 TP_PROTO(struct ieee80211_local *local, 2175 2175 struct ieee80211_sub_if_data *sdata, 2176 - int dbm, int ret), 2176 + unsigned int link_id, int dbm, int ret), 2177 2177 2178 - TP_ARGS(local, sdata, dbm, ret), 2178 + TP_ARGS(local, sdata, link_id, dbm, ret), 2179 2179 2180 2180 TP_STRUCT__entry( 2181 2181 LOCAL_ENTRY 2182 2182 VIF_ENTRY 2183 + __field(unsigned int, link_id) 2183 2184 __field(int, dbm) 2184 2185 __field(int, ret) 2185 2186 ), ··· 2188 2187 TP_fast_assign( 2189 2188 LOCAL_ASSIGN; 2190 2189 VIF_ASSIGN; 2190 + __entry->link_id = link_id; 2191 2191 __entry->dbm = dbm; 2192 2192 __entry->ret = ret; 2193 2193 ), 2194 2194 2195 2195 TP_printk( 2196 - LOCAL_PR_FMT VIF_PR_FMT " dbm:%d ret:%d", 2197 - LOCAL_PR_ARG, VIF_PR_ARG, __entry->dbm, __entry->ret 2196 + LOCAL_PR_FMT VIF_PR_FMT " link_id:%d dbm:%d ret:%d", 2197 + LOCAL_PR_ARG, VIF_PR_ARG, __entry->link_id, __entry->dbm, __entry->ret 2198 2198 ) 2199 2199 ); 2200 2200
+18 -11
net/mac80211/util.c
··· 2748 2748 { 2749 2749 struct ieee80211_he_operation *he_oper; 2750 2750 struct ieee80211_he_6ghz_oper *he_6ghz_op; 2751 + struct cfg80211_chan_def he_chandef; 2751 2752 u32 he_oper_params; 2752 2753 u8 ie_len = 1 + sizeof(struct ieee80211_he_operation); 2753 2754 ··· 2780 2779 if (chandef->chan->band != NL80211_BAND_6GHZ) 2781 2780 goto out; 2782 2781 2782 + cfg80211_chandef_create(&he_chandef, chandef->chan, NL80211_CHAN_NO_HT); 2783 + he_chandef.center_freq1 = chandef->center_freq1; 2784 + he_chandef.center_freq2 = chandef->center_freq2; 2785 + he_chandef.width = chandef->width; 2786 + 2783 2787 /* TODO add VHT operational */ 2784 2788 he_6ghz_op = (struct ieee80211_he_6ghz_oper *)pos; 2785 2789 he_6ghz_op->minrate = 6; /* 6 Mbps */ 2786 2790 he_6ghz_op->primary = 2787 - ieee80211_frequency_to_channel(chandef->chan->center_freq); 2791 + ieee80211_frequency_to_channel(he_chandef.chan->center_freq); 2788 2792 he_6ghz_op->ccfs0 = 2789 - ieee80211_frequency_to_channel(chandef->center_freq1); 2790 - if (chandef->center_freq2) 2793 + ieee80211_frequency_to_channel(he_chandef.center_freq1); 2794 + if (he_chandef.center_freq2) 2791 2795 he_6ghz_op->ccfs1 = 2792 - ieee80211_frequency_to_channel(chandef->center_freq2); 2796 + ieee80211_frequency_to_channel(he_chandef.center_freq2); 2793 2797 else 2794 2798 he_6ghz_op->ccfs1 = 0; 2795 2799 2796 - switch (chandef->width) { 2800 + switch (he_chandef.width) { 2797 2801 case NL80211_CHAN_WIDTH_320: 2798 - /* 2799 - * TODO: mesh operation is not defined over 6GHz 320 MHz 2800 - * channels. 2802 + /* Downgrade EHT 320 MHz BW to 160 MHz for HE and set new 2803 + * center_freq1 2801 2804 */ 2802 - WARN_ON(1); 2803 - break; 2805 + ieee80211_chandef_downgrade(&he_chandef, NULL); 2806 + he_6ghz_op->ccfs0 = 2807 + ieee80211_frequency_to_channel(he_chandef.center_freq1); 2808 + fallthrough; 2804 2809 case NL80211_CHAN_WIDTH_160: 2805 2810 /* Convert 160 MHz channel width to new style as interop 2806 2811 * workaround. ··· 2814 2807 he_6ghz_op->control = 2815 2808 IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ; 2816 2809 he_6ghz_op->ccfs1 = he_6ghz_op->ccfs0; 2817 - if (chandef->chan->center_freq < chandef->center_freq1) 2810 + if (he_chandef.chan->center_freq < he_chandef.center_freq1) 2818 2811 he_6ghz_op->ccfs0 -= 8; 2819 2812 else 2820 2813 he_6ghz_op->ccfs0 += 8;
+2 -2
net/wireless/chan.c
··· 1039 1039 if (!reg_dfs_domain_same(wiphy, &rdev->wiphy)) 1040 1040 continue; 1041 1041 1042 - wiphy_lock(&rdev->wiphy); 1042 + guard(wiphy)(&rdev->wiphy); 1043 + 1043 1044 found = cfg80211_is_wiphy_oper_chan(&rdev->wiphy, chan) || 1044 1045 cfg80211_offchan_chain_is_active(rdev, chan); 1045 - wiphy_unlock(&rdev->wiphy); 1046 1046 1047 1047 if (found) 1048 1048 return true;
+19 -23
net/wireless/core.c
··· 191 191 return err; 192 192 } 193 193 194 - wiphy_lock(&rdev->wiphy); 194 + guard(wiphy)(&rdev->wiphy); 195 + 195 196 list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { 196 197 if (!wdev->netdev) 197 198 continue; ··· 213 212 continue; 214 213 nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE); 215 214 } 216 - wiphy_unlock(&rdev->wiphy); 217 215 218 216 return 0; 219 217 } ··· 221 221 { 222 222 struct cfg80211_registered_device *rdev = data; 223 223 224 - wiphy_lock(&rdev->wiphy); 224 + guard(wiphy)(&rdev->wiphy); 225 + 225 226 rdev_rfkill_poll(rdev); 226 - wiphy_unlock(&rdev->wiphy); 227 227 } 228 228 229 229 void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, ··· 283 283 284 284 /* otherwise, check iftype */ 285 285 286 - wiphy_lock(wiphy); 286 + guard(wiphy)(wiphy); 287 287 288 288 switch (wdev->iftype) { 289 289 case NL80211_IFTYPE_P2P_DEVICE: ··· 295 295 default: 296 296 break; 297 297 } 298 - 299 - wiphy_unlock(wiphy); 300 298 } 301 299 } 302 300 EXPORT_SYMBOL_GPL(cfg80211_shutdown_all_interfaces); ··· 329 331 rdev = container_of(work, struct cfg80211_registered_device, 330 332 event_work); 331 333 332 - wiphy_lock(&rdev->wiphy); 334 + guard(wiphy)(&rdev->wiphy); 335 + 333 336 cfg80211_process_rdev_events(rdev); 334 - wiphy_unlock(&rdev->wiphy); 335 337 } 336 338 337 339 void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev) ··· 345 347 if (wdev->netdev) 346 348 dev_close(wdev->netdev); 347 349 348 - wiphy_lock(&rdev->wiphy); 350 + guard(wiphy)(&rdev->wiphy); 351 + 349 352 cfg80211_leave(rdev, wdev); 350 353 cfg80211_remove_virtual_intf(rdev, wdev); 351 - wiphy_unlock(&rdev->wiphy); 352 354 } 353 355 } 354 356 } ··· 421 423 422 424 trace_wiphy_work_worker_start(&rdev->wiphy); 423 425 424 - wiphy_lock(&rdev->wiphy); 426 + guard(wiphy)(&rdev->wiphy); 425 427 if (rdev->suspended) 426 - goto out; 428 + return; 427 429 428 430 spin_lock_irq(&rdev->wiphy_work_lock); 429 431 wk = list_first_entry_or_null(&rdev->wiphy_work_list, ··· 439 441 } else { 440 442 spin_unlock_irq(&rdev->wiphy_work_lock); 441 443 } 442 - out: 443 - wiphy_unlock(&rdev->wiphy); 444 444 } 445 445 446 446 /* exported functions */ ··· 1522 1526 break; 1523 1527 case NETDEV_REGISTER: 1524 1528 if (!wdev->registered) { 1525 - wiphy_lock(&rdev->wiphy); 1529 + guard(wiphy)(&rdev->wiphy); 1530 + 1526 1531 cfg80211_register_wdev(rdev, wdev); 1527 - wiphy_unlock(&rdev->wiphy); 1528 1532 } 1529 1533 break; 1530 1534 case NETDEV_UNREGISTER: ··· 1533 1537 * so check wdev->registered. 1534 1538 */ 1535 1539 if (wdev->registered && !wdev->registering) { 1536 - wiphy_lock(&rdev->wiphy); 1540 + guard(wiphy)(&rdev->wiphy); 1541 + 1537 1542 _cfg80211_unregister_wdev(wdev, false); 1538 - wiphy_unlock(&rdev->wiphy); 1539 1543 } 1540 1544 break; 1541 1545 case NETDEV_GOING_DOWN: 1542 - wiphy_lock(&rdev->wiphy); 1543 - cfg80211_leave(rdev, wdev); 1544 - cfg80211_remove_links(wdev); 1545 - wiphy_unlock(&rdev->wiphy); 1546 + scoped_guard(wiphy, &rdev->wiphy) { 1547 + cfg80211_leave(rdev, wdev); 1548 + cfg80211_remove_links(wdev); 1549 + } 1546 1550 /* since we just did cfg80211_leave() nothing to do there */ 1547 1551 cancel_work_sync(&wdev->disconnect_wk); 1548 1552 cancel_work_sync(&wdev->pmsr_free_wk);
+4 -4
net/wireless/mlme.c
··· 627 627 rdev = container_of(wk, struct cfg80211_registered_device, 628 628 mgmt_registrations_update_wk); 629 629 630 - wiphy_lock(&rdev->wiphy); 630 + guard(wiphy)(&rdev->wiphy); 631 + 631 632 list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) 632 633 cfg80211_mgmt_registrations_update(wdev); 633 - wiphy_unlock(&rdev->wiphy); 634 634 } 635 635 636 636 int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid, ··· 1193 1193 const struct cfg80211_chan_def *chandef, 1194 1194 enum nl80211_radar_event event) 1195 1195 { 1196 - wiphy_lock(&rdev->wiphy); 1196 + guard(wiphy)(&rdev->wiphy); 1197 + 1197 1198 __cfg80211_background_cac_event(rdev, rdev->background_radar_wdev, 1198 1199 chandef, event); 1199 - wiphy_unlock(&rdev->wiphy); 1200 1200 } 1201 1201 1202 1202 void cfg80211_background_cac_done_wk(struct work_struct *work)
+82 -121
net/wireless/nl80211.c
··· 3626 3626 } else 3627 3627 wdev = netdev->ieee80211_ptr; 3628 3628 3629 - wiphy_lock(&rdev->wiphy); 3629 + guard(wiphy)(&rdev->wiphy); 3630 3630 3631 3631 /* 3632 3632 * end workaround code, by now the rdev is available ··· 3639 3639 rtnl_unlock(); 3640 3640 3641 3641 if (result) 3642 - goto out; 3642 + return result; 3643 3643 3644 3644 if (info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS]) { 3645 3645 struct ieee80211_txq_params txq_params; 3646 3646 struct nlattr *tb[NL80211_TXQ_ATTR_MAX + 1]; 3647 3647 3648 - if (!rdev->ops->set_txq_params) { 3649 - result = -EOPNOTSUPP; 3650 - goto out; 3651 - } 3648 + if (!rdev->ops->set_txq_params) 3649 + return -EOPNOTSUPP; 3652 3650 3653 - if (!netdev) { 3654 - result = -EINVAL; 3655 - goto out; 3656 - } 3651 + if (!netdev) 3652 + return -EINVAL; 3657 3653 3658 3654 if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 3659 - netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) { 3660 - result = -EINVAL; 3661 - goto out; 3662 - } 3655 + netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 3656 + return -EINVAL; 3663 3657 3664 - if (!netif_running(netdev)) { 3665 - result = -ENETDOWN; 3666 - goto out; 3667 - } 3658 + if (!netif_running(netdev)) 3659 + return -ENETDOWN; 3668 3660 3669 3661 nla_for_each_nested(nl_txq_params, 3670 3662 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS], ··· 3667 3675 txq_params_policy, 3668 3676 info->extack); 3669 3677 if (result) 3670 - goto out; 3678 + return result; 3679 + 3671 3680 result = parse_txq_params(tb, &txq_params); 3672 3681 if (result) 3673 - goto out; 3682 + return result; 3674 3683 3675 3684 txq_params.link_id = 3676 3685 nl80211_link_id_or_invalid(info->attrs); ··· 3687 3694 result = rdev_set_txq_params(rdev, netdev, 3688 3695 &txq_params); 3689 3696 if (result) 3690 - goto out; 3697 + return result; 3691 3698 } 3692 3699 } 3693 3700 ··· 3704 3711 } 3705 3712 3706 3713 if (result) 3707 - goto out; 3714 + return result; 3708 3715 } 3709 3716 3710 3717 if (info->attrs[NL80211_ATTR_WIPHY_TX_POWER_SETTING]) { ··· 3715 3722 if (!(rdev->wiphy.features & NL80211_FEATURE_VIF_TXPOWER)) 3716 3723 txp_wdev = NULL; 3717 3724 3718 - if (!rdev->ops->set_tx_power) { 3719 - result = -EOPNOTSUPP; 3720 - goto out; 3721 - } 3725 + if (!rdev->ops->set_tx_power) 3726 + return -EOPNOTSUPP; 3722 3727 3723 3728 idx = NL80211_ATTR_WIPHY_TX_POWER_SETTING; 3724 3729 type = nla_get_u32(info->attrs[idx]); 3725 3730 3726 3731 if (!info->attrs[NL80211_ATTR_WIPHY_TX_POWER_LEVEL] && 3727 - (type != NL80211_TX_POWER_AUTOMATIC)) { 3728 - result = -EINVAL; 3729 - goto out; 3730 - } 3732 + (type != NL80211_TX_POWER_AUTOMATIC)) 3733 + return -EINVAL; 3731 3734 3732 3735 if (type != NL80211_TX_POWER_AUTOMATIC) { 3733 3736 idx = NL80211_ATTR_WIPHY_TX_POWER_LEVEL; ··· 3732 3743 3733 3744 result = rdev_set_tx_power(rdev, txp_wdev, type, mbm); 3734 3745 if (result) 3735 - goto out; 3746 + return result; 3736 3747 } 3737 3748 3738 3749 if (info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX] && ··· 3741 3752 3742 3753 if ((!rdev->wiphy.available_antennas_tx && 3743 3754 !rdev->wiphy.available_antennas_rx) || 3744 - !rdev->ops->set_antenna) { 3745 - result = -EOPNOTSUPP; 3746 - goto out; 3747 - } 3755 + !rdev->ops->set_antenna) 3756 + return -EOPNOTSUPP; 3748 3757 3749 3758 tx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX]); 3750 3759 rx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]); ··· 3750 3763 /* reject antenna configurations which don't match the 3751 3764 * available antenna masks, except for the "all" mask */ 3752 3765 if ((~tx_ant && (tx_ant & ~rdev->wiphy.available_antennas_tx)) || 3753 - (~rx_ant && (rx_ant & ~rdev->wiphy.available_antennas_rx))) { 3754 - result = -EINVAL; 3755 - goto out; 3756 - } 3766 + (~rx_ant && (rx_ant & ~rdev->wiphy.available_antennas_rx))) 3767 + return -EINVAL; 3757 3768 3758 3769 tx_ant = tx_ant & rdev->wiphy.available_antennas_tx; 3759 3770 rx_ant = rx_ant & rdev->wiphy.available_antennas_rx; 3760 3771 3761 3772 result = rdev_set_antenna(rdev, tx_ant, rx_ant); 3762 3773 if (result) 3763 - goto out; 3774 + return result; 3764 3775 } 3765 3776 3766 3777 changed = 0; ··· 3780 3795 if (info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]) { 3781 3796 frag_threshold = nla_get_u32( 3782 3797 info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]); 3783 - if (frag_threshold < 256) { 3784 - result = -EINVAL; 3785 - goto out; 3786 - } 3798 + if (frag_threshold < 256) 3799 + return -EINVAL; 3787 3800 3788 3801 if (frag_threshold != (u32) -1) { 3789 3802 /* ··· 3802 3819 } 3803 3820 3804 3821 if (info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]) { 3805 - if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK]) { 3806 - result = -EINVAL; 3807 - goto out; 3808 - } 3822 + if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK]) 3823 + return -EINVAL; 3809 3824 3810 3825 coverage_class = nla_get_u8( 3811 3826 info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]); ··· 3811 3830 } 3812 3831 3813 3832 if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK]) { 3814 - if (!(rdev->wiphy.features & NL80211_FEATURE_ACKTO_ESTIMATION)) { 3815 - result = -EOPNOTSUPP; 3816 - goto out; 3817 - } 3833 + if (!(rdev->wiphy.features & NL80211_FEATURE_ACKTO_ESTIMATION)) 3834 + return -EOPNOTSUPP; 3818 3835 3819 3836 changed |= WIPHY_PARAM_DYN_ACK; 3820 3837 } 3821 3838 3822 3839 if (info->attrs[NL80211_ATTR_TXQ_LIMIT]) { 3823 3840 if (!wiphy_ext_feature_isset(&rdev->wiphy, 3824 - NL80211_EXT_FEATURE_TXQS)) { 3825 - result = -EOPNOTSUPP; 3826 - goto out; 3827 - } 3841 + NL80211_EXT_FEATURE_TXQS)) 3842 + return -EOPNOTSUPP; 3843 + 3828 3844 txq_limit = nla_get_u32( 3829 3845 info->attrs[NL80211_ATTR_TXQ_LIMIT]); 3830 3846 changed |= WIPHY_PARAM_TXQ_LIMIT; ··· 3829 3851 3830 3852 if (info->attrs[NL80211_ATTR_TXQ_MEMORY_LIMIT]) { 3831 3853 if (!wiphy_ext_feature_isset(&rdev->wiphy, 3832 - NL80211_EXT_FEATURE_TXQS)) { 3833 - result = -EOPNOTSUPP; 3834 - goto out; 3835 - } 3854 + NL80211_EXT_FEATURE_TXQS)) 3855 + return -EOPNOTSUPP; 3856 + 3836 3857 txq_memory_limit = nla_get_u32( 3837 3858 info->attrs[NL80211_ATTR_TXQ_MEMORY_LIMIT]); 3838 3859 changed |= WIPHY_PARAM_TXQ_MEMORY_LIMIT; ··· 3839 3862 3840 3863 if (info->attrs[NL80211_ATTR_TXQ_QUANTUM]) { 3841 3864 if (!wiphy_ext_feature_isset(&rdev->wiphy, 3842 - NL80211_EXT_FEATURE_TXQS)) { 3843 - result = -EOPNOTSUPP; 3844 - goto out; 3845 - } 3865 + NL80211_EXT_FEATURE_TXQS)) 3866 + return -EOPNOTSUPP; 3867 + 3846 3868 txq_quantum = nla_get_u32( 3847 3869 info->attrs[NL80211_ATTR_TXQ_QUANTUM]); 3848 3870 changed |= WIPHY_PARAM_TXQ_QUANTUM; ··· 3853 3877 u8 old_coverage_class; 3854 3878 u32 old_txq_limit, old_txq_memory_limit, old_txq_quantum; 3855 3879 3856 - if (!rdev->ops->set_wiphy_params) { 3857 - result = -EOPNOTSUPP; 3858 - goto out; 3859 - } 3880 + if (!rdev->ops->set_wiphy_params) 3881 + return -EOPNOTSUPP; 3860 3882 3861 3883 old_retry_short = rdev->wiphy.retry_short; 3862 3884 old_retry_long = rdev->wiphy.retry_long; ··· 3892 3918 rdev->wiphy.txq_limit = old_txq_limit; 3893 3919 rdev->wiphy.txq_memory_limit = old_txq_memory_limit; 3894 3920 rdev->wiphy.txq_quantum = old_txq_quantum; 3895 - goto out; 3921 + return result; 3896 3922 } 3897 3923 } 3898 3924 3899 - result = 0; 3900 - 3901 - out: 3902 - wiphy_unlock(&rdev->wiphy); 3903 - return result; 3925 + return 0; 3904 3926 } 3905 3927 3906 3928 int nl80211_send_chandef(struct sk_buff *msg, const struct cfg80211_chan_def *chandef) ··· 3980 4010 goto nla_put_failure; 3981 4011 } 3982 4012 3983 - if (rdev->ops->get_tx_power) { 4013 + if (rdev->ops->get_tx_power && !wdev->valid_links) { 3984 4014 int dbm, ret; 3985 4015 3986 - ret = rdev_get_tx_power(rdev, wdev, &dbm); 4016 + ret = rdev_get_tx_power(rdev, wdev, 0, &dbm); 3987 4017 if (ret == 0 && 3988 4018 nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL, 3989 4019 DBM_TO_MBM(dbm))) ··· 4052 4082 if (ret == 0 && nl80211_send_chandef(msg, &chandef)) 4053 4083 goto nla_put_failure; 4054 4084 4085 + if (rdev->ops->get_tx_power) { 4086 + int dbm, ret; 4087 + 4088 + ret = rdev_get_tx_power(rdev, wdev, link_id, &dbm); 4089 + if (ret == 0 && 4090 + nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL, 4091 + DBM_TO_MBM(dbm))) 4092 + goto nla_put_failure; 4093 + } 4055 4094 nla_nest_end(msg, link); 4056 4095 } 4057 4096 ··· 4123 4144 4124 4145 if_idx = 0; 4125 4146 4126 - wiphy_lock(&rdev->wiphy); 4147 + guard(wiphy)(&rdev->wiphy); 4148 + 4127 4149 list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { 4128 4150 if (if_idx < if_start) { 4129 4151 if_idx++; 4130 4152 continue; 4131 4153 } 4154 + 4132 4155 if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).portid, 4133 4156 cb->nlh->nlmsg_seq, NLM_F_MULTI, 4134 4157 rdev, wdev, 4135 - NL80211_CMD_NEW_INTERFACE) < 0) { 4136 - wiphy_unlock(&rdev->wiphy); 4158 + NL80211_CMD_NEW_INTERFACE) < 0) 4137 4159 goto out; 4138 - } 4160 + 4139 4161 if_idx++; 4140 4162 } 4141 - wiphy_unlock(&rdev->wiphy); 4142 4163 4143 4164 if_start = 0; 4144 4165 wp_idx++; ··· 4496 4517 static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) 4497 4518 { 4498 4519 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 4499 - int ret; 4500 4520 4501 4521 /* to avoid failing a new interface creation due to pending removal */ 4502 4522 cfg80211_destroy_ifaces(rdev); 4503 4523 4504 - wiphy_lock(&rdev->wiphy); 4505 - ret = _nl80211_new_interface(skb, info); 4506 - wiphy_unlock(&rdev->wiphy); 4524 + guard(wiphy)(&rdev->wiphy); 4507 4525 4508 - return ret; 4526 + return _nl80211_new_interface(skb, info); 4509 4527 } 4510 4528 4511 4529 static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) ··· 10074 10098 struct cfg80211_chan_def chandef; 10075 10099 enum nl80211_dfs_regions dfs_region; 10076 10100 unsigned int cac_time_ms; 10077 - int err = -EINVAL; 10101 + int err; 10078 10102 10079 10103 flush_delayed_work(&rdev->dfs_update_channels_wk); 10080 10104 ··· 10089 10113 return -EINVAL; 10090 10114 } 10091 10115 10092 - wiphy_lock(wiphy); 10116 + guard(wiphy)(wiphy); 10093 10117 10094 10118 dfs_region = reg_get_dfs_region(wiphy); 10095 10119 if (dfs_region == NL80211_DFS_UNSET) 10096 - goto unlock; 10120 + return -EINVAL; 10097 10121 10098 10122 err = nl80211_parse_chandef(rdev, info, &chandef); 10099 10123 if (err) 10100 - goto unlock; 10124 + return err; 10101 10125 10102 10126 err = cfg80211_chandef_dfs_required(wiphy, &chandef, wdev->iftype); 10103 10127 if (err < 0) 10104 - goto unlock; 10128 + return err; 10105 10129 10106 - if (err == 0) { 10107 - err = -EINVAL; 10108 - goto unlock; 10109 - } 10130 + if (err == 0) 10131 + return -EINVAL; 10110 10132 10111 - if (!cfg80211_chandef_dfs_usable(wiphy, &chandef)) { 10112 - err = -EINVAL; 10113 - goto unlock; 10114 - } 10133 + if (!cfg80211_chandef_dfs_usable(wiphy, &chandef)) 10134 + return -EINVAL; 10115 10135 10116 - if (nla_get_flag(info->attrs[NL80211_ATTR_RADAR_BACKGROUND])) { 10117 - err = cfg80211_start_background_radar_detection(rdev, wdev, 10118 - &chandef); 10119 - goto unlock; 10120 - } 10136 + if (nla_get_flag(info->attrs[NL80211_ATTR_RADAR_BACKGROUND])) 10137 + return cfg80211_start_background_radar_detection(rdev, wdev, 10138 + &chandef); 10121 10139 10122 10140 if (cfg80211_beaconing_iface_active(wdev)) { 10123 10141 /* During MLO other link(s) can beacon, only the current link ··· 10121 10151 !wdev->links[link_id].ap.beacon_interval) { 10122 10152 /* nothing */ 10123 10153 } else { 10124 - err = -EBUSY; 10125 - goto unlock; 10154 + return -EBUSY; 10126 10155 } 10127 10156 } 10128 10157 10129 - if (wdev->links[link_id].cac_started) { 10130 - err = -EBUSY; 10131 - goto unlock; 10132 - } 10158 + if (wdev->links[link_id].cac_started) 10159 + return -EBUSY; 10133 10160 10134 10161 /* CAC start is offloaded to HW and can't be started manually */ 10135 - if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD)) { 10136 - err = -EOPNOTSUPP; 10137 - goto unlock; 10138 - } 10162 + if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD)) 10163 + return -EOPNOTSUPP; 10139 10164 10140 - if (!rdev->ops->start_radar_detection) { 10141 - err = -EOPNOTSUPP; 10142 - goto unlock; 10143 - } 10165 + if (!rdev->ops->start_radar_detection) 10166 + return -EOPNOTSUPP; 10144 10167 10145 10168 cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, &chandef); 10146 10169 if (WARN_ON(!cac_time_ms)) ··· 10160 10197 wdev->links[link_id].cac_start_time = jiffies; 10161 10198 wdev->links[link_id].cac_time_ms = cac_time_ms; 10162 10199 } 10163 - unlock: 10164 - wiphy_unlock(wiphy); 10165 10200 10166 - return err; 10201 + return 0; 10167 10202 } 10168 10203 10169 10204 static int nl80211_notify_radar_detection(struct sk_buff *skb,
+2 -2
net/wireless/pmsr.c
··· 630 630 struct wireless_dev *wdev = container_of(work, struct wireless_dev, 631 631 pmsr_free_wk); 632 632 633 - wiphy_lock(wdev->wiphy); 633 + guard(wiphy)(wdev->wiphy); 634 + 634 635 cfg80211_pmsr_process_abort(wdev); 635 - wiphy_unlock(wdev->wiphy); 636 636 } 637 637 638 638 void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
+4 -3
net/wireless/rdev-ops.h
··· 600 600 } 601 601 602 602 static inline int rdev_get_tx_power(struct cfg80211_registered_device *rdev, 603 - struct wireless_dev *wdev, int *dbm) 603 + struct wireless_dev *wdev, unsigned int link_id, 604 + int *dbm) 604 605 { 605 606 int ret; 606 - trace_rdev_get_tx_power(&rdev->wiphy, wdev); 607 - ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, dbm); 607 + trace_rdev_get_tx_power(&rdev->wiphy, wdev, link_id); 608 + ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, link_id, dbm); 608 609 trace_rdev_return_int_int(&rdev->wiphy, ret, *dbm); 609 610 return ret; 610 611 }
+24 -29
net/wireless/reg.c
··· 2465 2465 struct wireless_dev *wdev; 2466 2466 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); 2467 2467 2468 - wiphy_lock(wiphy); 2468 + guard(wiphy)(wiphy); 2469 + 2469 2470 list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) 2470 2471 if (!reg_wdev_chan_valid(wiphy, wdev)) 2471 2472 cfg80211_leave(rdev, wdev); 2472 - wiphy_unlock(wiphy); 2473 2473 } 2474 2474 2475 2475 static void reg_check_chans_work(struct work_struct *work) ··· 2649 2649 return; 2650 2650 2651 2651 rtnl_lock(); 2652 - wiphy_lock(wiphy); 2653 - 2654 - tmp = get_wiphy_regdom(wiphy); 2655 - rcu_assign_pointer(wiphy->regd, new_regd); 2656 - rcu_free_regdom(tmp); 2657 - 2658 - wiphy_unlock(wiphy); 2652 + scoped_guard(wiphy, wiphy) { 2653 + tmp = get_wiphy_regdom(wiphy); 2654 + rcu_assign_pointer(wiphy->regd, new_regd); 2655 + rcu_free_regdom(tmp); 2656 + } 2659 2657 rtnl_unlock(); 2660 2658 } 2661 2659 EXPORT_SYMBOL(wiphy_apply_custom_regulatory); ··· 2823 2825 2824 2826 tmp = get_wiphy_regdom(wiphy); 2825 2827 ASSERT_RTNL(); 2826 - wiphy_lock(wiphy); 2827 - rcu_assign_pointer(wiphy->regd, regd); 2828 - wiphy_unlock(wiphy); 2828 + scoped_guard(wiphy, wiphy) { 2829 + rcu_assign_pointer(wiphy->regd, regd); 2830 + } 2829 2831 rcu_free_regdom(tmp); 2830 2832 } 2831 2833 ··· 3203 3205 ASSERT_RTNL(); 3204 3206 3205 3207 for_each_rdev(rdev) { 3206 - wiphy_lock(&rdev->wiphy); 3208 + guard(wiphy)(&rdev->wiphy); 3209 + 3207 3210 reg_process_self_managed_hint(&rdev->wiphy); 3208 - wiphy_unlock(&rdev->wiphy); 3209 3211 } 3210 3212 3211 3213 reg_check_channels(); ··· 3598 3600 struct wireless_dev *wdev; 3599 3601 3600 3602 for_each_rdev(rdev) { 3601 - wiphy_lock(&rdev->wiphy); 3603 + guard(wiphy)(&rdev->wiphy); 3604 + 3602 3605 list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { 3603 - if (!(wdev->wiphy->regulatory_flags & flag)) { 3604 - wiphy_unlock(&rdev->wiphy); 3606 + if (!(wdev->wiphy->regulatory_flags & flag)) 3605 3607 return false; 3606 - } 3607 3608 } 3608 - wiphy_unlock(&rdev->wiphy); 3609 3609 } 3610 3610 3611 3611 return true; ··· 3879 3883 3880 3884 if (!driver_request->intersect) { 3881 3885 ASSERT_RTNL(); 3882 - wiphy_lock(request_wiphy); 3883 - if (request_wiphy->regd) 3884 - tmp = get_wiphy_regdom(request_wiphy); 3886 + scoped_guard(wiphy, request_wiphy) { 3887 + if (request_wiphy->regd) 3888 + tmp = get_wiphy_regdom(request_wiphy); 3885 3889 3886 - regd = reg_copy_regd(rd); 3887 - if (IS_ERR(regd)) { 3888 - wiphy_unlock(request_wiphy); 3889 - return PTR_ERR(regd); 3890 + regd = reg_copy_regd(rd); 3891 + if (IS_ERR(regd)) 3892 + return PTR_ERR(regd); 3893 + 3894 + rcu_assign_pointer(request_wiphy->regd, regd); 3895 + rcu_free_regdom(tmp); 3890 3896 } 3891 3897 3892 - rcu_assign_pointer(request_wiphy->regd, regd); 3893 - rcu_free_regdom(tmp); 3894 - wiphy_unlock(request_wiphy); 3895 3898 reset_regdomains(false, rd); 3896 3899 return 0; 3897 3900 }
+19 -21
net/wireless/scan.c
··· 1238 1238 rdev = container_of(work, struct cfg80211_registered_device, 1239 1239 sched_scan_res_wk); 1240 1240 1241 - wiphy_lock(&rdev->wiphy); 1241 + guard(wiphy)(&rdev->wiphy); 1242 + 1242 1243 list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) { 1243 1244 if (req->report_results) { 1244 1245 req->report_results = false; ··· 1254 1253 NL80211_CMD_SCHED_SCAN_RESULTS); 1255 1254 } 1256 1255 } 1257 - wiphy_unlock(&rdev->wiphy); 1258 1256 } 1259 1257 1260 1258 void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid) ··· 1288 1288 1289 1289 void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid) 1290 1290 { 1291 - wiphy_lock(wiphy); 1291 + guard(wiphy)(wiphy); 1292 + 1292 1293 cfg80211_sched_scan_stopped_locked(wiphy, reqid); 1293 - wiphy_unlock(wiphy); 1294 1294 } 1295 1295 EXPORT_SYMBOL(cfg80211_sched_scan_stopped); 1296 1296 ··· 3565 3565 /* translate "Scan for SSID" request */ 3566 3566 if (wreq) { 3567 3567 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { 3568 - if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) { 3569 - err = -EINVAL; 3570 - goto out; 3571 - } 3568 + if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) 3569 + return -EINVAL; 3572 3570 memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len); 3573 3571 creq->ssids[0].ssid_len = wreq->essid_len; 3574 3572 } ··· 3582 3584 3583 3585 eth_broadcast_addr(creq->bssid); 3584 3586 3585 - wiphy_lock(&rdev->wiphy); 3586 - 3587 - rdev->scan_req = creq; 3588 - err = rdev_scan(rdev, creq); 3589 - if (err) { 3590 - rdev->scan_req = NULL; 3591 - /* creq will be freed below */ 3592 - } else { 3593 - nl80211_send_scan_start(rdev, dev->ieee80211_ptr); 3594 - /* creq now owned by driver */ 3595 - creq = NULL; 3596 - dev_hold(dev); 3587 + scoped_guard(wiphy, &rdev->wiphy) { 3588 + rdev->scan_req = creq; 3589 + err = rdev_scan(rdev, creq); 3590 + if (err) { 3591 + rdev->scan_req = NULL; 3592 + /* creq will be freed below */ 3593 + } else { 3594 + nl80211_send_scan_start(rdev, dev->ieee80211_ptr); 3595 + /* creq now owned by driver */ 3596 + creq = NULL; 3597 + dev_hold(dev); 3598 + } 3597 3599 } 3598 - wiphy_unlock(&rdev->wiphy); 3600 + 3599 3601 out: 3600 3602 kfree(creq); 3601 3603 return err;
+4 -8
net/wireless/sme.c
··· 252 252 u8 bssid_buf[ETH_ALEN], *bssid = NULL; 253 253 enum nl80211_timeout_reason treason; 254 254 255 - wiphy_lock(&rdev->wiphy); 255 + guard(wiphy)(&rdev->wiphy); 256 256 257 257 list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { 258 258 if (!wdev->netdev) ··· 280 280 __cfg80211_connect_result(wdev->netdev, &cr, false); 281 281 } 282 282 } 283 - 284 - wiphy_unlock(&rdev->wiphy); 285 283 } 286 284 287 285 static void cfg80211_step_auth_next(struct cfg80211_conn *conn, ··· 691 693 * as chan dfs state, etc. 692 694 */ 693 695 for_each_rdev(rdev) { 694 - wiphy_lock(&rdev->wiphy); 696 + guard(wiphy)(&rdev->wiphy); 697 + 695 698 list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { 696 699 if (wdev->conn || wdev->connected || 697 700 cfg80211_beaconing_iface_active(wdev)) 698 701 is_all_idle = false; 699 702 } 700 - wiphy_unlock(&rdev->wiphy); 701 703 } 702 704 703 705 return is_all_idle; ··· 1581 1583 container_of(work, struct wireless_dev, disconnect_wk); 1582 1584 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 1583 1585 1584 - wiphy_lock(wdev->wiphy); 1586 + guard(wiphy)(wdev->wiphy); 1585 1587 1586 1588 if (wdev->conn_owner_nlportid) { 1587 1589 switch (wdev->iftype) { ··· 1617 1619 break; 1618 1620 } 1619 1621 } 1620 - 1621 - wiphy_unlock(wdev->wiphy); 1622 1622 }
+2
net/wireless/tests/scan.c
··· 810 810 skb_put_data(input, "123", 3); 811 811 812 812 ies = kunit_kzalloc(test, struct_size(ies, data, input->len), GFP_KERNEL); 813 + KUNIT_ASSERT_NOT_NULL(test, ies); 814 + 813 815 ies->len = input->len; 814 816 memcpy(ies->data, input->data, input->len); 815 817
+22 -22
net/wireless/trace.h
··· 1690 1690 WIPHY_PR_ARG, __entry->changed) 1691 1691 ); 1692 1692 1693 - DEFINE_EVENT(wiphy_wdev_evt, rdev_get_tx_power, 1694 - TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), 1695 - TP_ARGS(wiphy, wdev) 1693 + DECLARE_EVENT_CLASS(wiphy_wdev_link_evt, 1694 + TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, 1695 + unsigned int link_id), 1696 + TP_ARGS(wiphy, wdev, link_id), 1697 + TP_STRUCT__entry( 1698 + WIPHY_ENTRY 1699 + WDEV_ENTRY 1700 + __field(unsigned int, link_id) 1701 + ), 1702 + TP_fast_assign( 1703 + WIPHY_ASSIGN; 1704 + WDEV_ASSIGN; 1705 + __entry->link_id = link_id; 1706 + ), 1707 + TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", link_id: %u", 1708 + WIPHY_PR_ARG, WDEV_PR_ARG, __entry->link_id) 1709 + ); 1710 + 1711 + DEFINE_EVENT(wiphy_wdev_link_evt, rdev_get_tx_power, 1712 + TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, 1713 + unsigned int link_id), 1714 + TP_ARGS(wiphy, wdev, link_id) 1696 1715 ); 1697 1716 1698 1717 TRACE_EVENT(rdev_set_tx_power, ··· 2211 2192 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", noack_map: %u", 2212 2193 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->noack_map) 2213 2194 ); 2214 - 2215 - DECLARE_EVENT_CLASS(wiphy_wdev_link_evt, 2216 - TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, 2217 - unsigned int link_id), 2218 - TP_ARGS(wiphy, wdev, link_id), 2219 - TP_STRUCT__entry( 2220 - WIPHY_ENTRY 2221 - WDEV_ENTRY 2222 - __field(unsigned int, link_id) 2223 - ), 2224 - TP_fast_assign( 2225 - WIPHY_ASSIGN; 2226 - WDEV_ASSIGN; 2227 - __entry->link_id = link_id; 2228 - ), 2229 - TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", link_id: %u", 2230 - WIPHY_PR_ARG, WDEV_PR_ARG, __entry->link_id) 2231 - ); 2232 - 2233 2195 DEFINE_EVENT(wiphy_wdev_link_evt, rdev_get_channel, 2234 2196 TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, 2235 2197 unsigned int link_id),
+2 -5
net/wireless/util.c
··· 2572 2572 { 2573 2573 struct cfg80211_registered_device *rdev; 2574 2574 struct wireless_dev *wdev; 2575 - int ret; 2576 2575 2577 2576 wdev = dev->ieee80211_ptr; 2578 2577 if (!wdev) ··· 2583 2584 2584 2585 memset(sinfo, 0, sizeof(*sinfo)); 2585 2586 2586 - wiphy_lock(&rdev->wiphy); 2587 - ret = rdev_get_station(rdev, dev, mac_addr, sinfo); 2588 - wiphy_unlock(&rdev->wiphy); 2587 + guard(wiphy)(&rdev->wiphy); 2589 2588 2590 - return ret; 2589 + return rdev_get_station(rdev, dev, mac_addr, sinfo); 2591 2590 } 2592 2591 EXPORT_SYMBOL(cfg80211_get_station); 2593 2592
+111 -206
net/wireless/wext-compat.c
··· 39 39 struct cfg80211_registered_device *rdev; 40 40 struct vif_params vifparams; 41 41 enum nl80211_iftype type; 42 - int ret; 43 42 44 43 rdev = wiphy_to_rdev(wdev->wiphy); 45 44 ··· 61 62 62 63 memset(&vifparams, 0, sizeof(vifparams)); 63 64 64 - wiphy_lock(wdev->wiphy); 65 - ret = cfg80211_change_iface(rdev, dev, type, &vifparams); 66 - wiphy_unlock(wdev->wiphy); 65 + guard(wiphy)(wdev->wiphy); 67 66 68 - return ret; 67 + return cfg80211_change_iface(rdev, dev, type, &vifparams); 69 68 } 70 69 71 70 int cfg80211_wext_giwmode(struct net_device *dev, struct iw_request_info *info, ··· 255 258 u32 orts = wdev->wiphy->rts_threshold; 256 259 int err; 257 260 258 - wiphy_lock(&rdev->wiphy); 259 - if (rts->disabled || !rts->fixed) { 261 + guard(wiphy)(&rdev->wiphy); 262 + if (rts->disabled || !rts->fixed) 260 263 wdev->wiphy->rts_threshold = (u32) -1; 261 - } else if (rts->value < 0) { 262 - err = -EINVAL; 263 - goto out; 264 - } else { 264 + else if (rts->value < 0) 265 + return -EINVAL; 266 + else 265 267 wdev->wiphy->rts_threshold = rts->value; 266 - } 267 268 268 269 err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_RTS_THRESHOLD); 269 - 270 270 if (err) 271 271 wdev->wiphy->rts_threshold = orts; 272 - 273 - out: 274 - wiphy_unlock(&rdev->wiphy); 275 272 return err; 276 273 } 277 274 ··· 293 302 u32 ofrag = wdev->wiphy->frag_threshold; 294 303 int err; 295 304 296 - wiphy_lock(&rdev->wiphy); 305 + guard(wiphy)(&rdev->wiphy); 306 + 297 307 if (frag->disabled || !frag->fixed) { 298 308 wdev->wiphy->frag_threshold = (u32) -1; 299 309 } else if (frag->value < 256) { 300 - err = -EINVAL; 301 - goto out; 310 + return -EINVAL; 302 311 } else { 303 312 /* Fragment length must be even, so strip LSB. */ 304 313 wdev->wiphy->frag_threshold = frag->value & ~0x1; ··· 307 316 err = rdev_set_wiphy_params(rdev, WIPHY_PARAM_FRAG_THRESHOLD); 308 317 if (err) 309 318 wdev->wiphy->frag_threshold = ofrag; 310 - out: 311 - wiphy_unlock(&rdev->wiphy); 312 - 313 319 return err; 314 320 } 315 321 ··· 340 352 (retry->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT) 341 353 return -EINVAL; 342 354 343 - wiphy_lock(&rdev->wiphy); 355 + guard(wiphy)(&rdev->wiphy); 356 + 344 357 if (retry->flags & IW_RETRY_LONG) { 345 358 wdev->wiphy->retry_long = retry->value; 346 359 changed |= WIPHY_PARAM_RETRY_LONG; ··· 360 371 wdev->wiphy->retry_short = oshort; 361 372 wdev->wiphy->retry_long = olong; 362 373 } 363 - wiphy_unlock(&rdev->wiphy); 364 374 365 375 return err; 366 376 } ··· 566 578 struct iw_point *erq = &wrqu->encoding; 567 579 struct wireless_dev *wdev = dev->ieee80211_ptr; 568 580 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 569 - int idx, err; 570 - bool remove = false; 571 581 struct key_params params; 582 + bool remove = false; 583 + int idx; 572 584 573 585 if (wdev->iftype != NL80211_IFTYPE_STATION && 574 586 wdev->iftype != NL80211_IFTYPE_ADHOC) ··· 580 592 !rdev->ops->set_default_key) 581 593 return -EOPNOTSUPP; 582 594 583 - wiphy_lock(&rdev->wiphy); 584 - if (wdev->valid_links) { 585 - err = -EOPNOTSUPP; 586 - goto out; 587 - } 595 + guard(wiphy)(&rdev->wiphy); 596 + if (wdev->valid_links) 597 + return -EOPNOTSUPP; 588 598 589 599 idx = erq->flags & IW_ENCODE_INDEX; 590 600 if (idx == 0) { ··· 590 604 if (idx < 0) 591 605 idx = 0; 592 606 } else if (idx < 1 || idx > 4) { 593 - err = -EINVAL; 594 - goto out; 607 + return -EINVAL; 595 608 } else { 596 609 idx--; 597 610 } ··· 599 614 remove = true; 600 615 else if (erq->length == 0) { 601 616 /* No key data - just set the default TX key index */ 602 - err = 0; 617 + int err = 0; 618 + 603 619 if (wdev->connected || 604 620 (wdev->iftype == NL80211_IFTYPE_ADHOC && 605 621 wdev->u.ibss.current_bss)) ··· 608 622 true); 609 623 if (!err) 610 624 wdev->wext.default_key = idx; 611 - goto out; 625 + return err; 612 626 } 613 627 614 628 memset(&params, 0, sizeof(params)); 615 629 params.key = keybuf; 616 630 params.key_len = erq->length; 617 - if (erq->length == 5) { 631 + if (erq->length == 5) 618 632 params.cipher = WLAN_CIPHER_SUITE_WEP40; 619 - } else if (erq->length == 13) { 633 + else if (erq->length == 13) 620 634 params.cipher = WLAN_CIPHER_SUITE_WEP104; 621 - } else if (!remove) { 622 - err = -EINVAL; 623 - goto out; 624 - } 635 + else if (!remove) 636 + return -EINVAL; 625 637 626 - err = cfg80211_set_encryption(rdev, dev, false, NULL, remove, 627 - wdev->wext.default_key == -1, 628 - idx, &params); 629 - out: 630 - wiphy_unlock(&rdev->wiphy); 631 - 632 - return err; 638 + return cfg80211_set_encryption(rdev, dev, false, NULL, remove, 639 + wdev->wext.default_key == -1, 640 + idx, &params); 633 641 } 634 642 635 643 static int cfg80211_wext_siwencodeext(struct net_device *dev, ··· 639 659 bool remove = false; 640 660 struct key_params params; 641 661 u32 cipher; 642 - int ret; 643 662 644 663 if (wdev->iftype != NL80211_IFTYPE_STATION && 645 664 wdev->iftype != NL80211_IFTYPE_ADHOC) ··· 713 734 params.seq_len = 6; 714 735 } 715 736 716 - wiphy_lock(wdev->wiphy); 717 - ret = cfg80211_set_encryption( 718 - rdev, dev, 719 - !(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY), 720 - addr, remove, 721 - ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY, 722 - idx, &params); 723 - wiphy_unlock(wdev->wiphy); 737 + guard(wiphy)(wdev->wiphy); 724 738 725 - return ret; 739 + return cfg80211_set_encryption(rdev, dev, 740 + !(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY), 741 + addr, remove, 742 + ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY, 743 + idx, &params); 726 744 } 727 745 728 746 static int cfg80211_wext_giwencode(struct net_device *dev, ··· 770 794 struct cfg80211_chan_def chandef = { 771 795 .width = NL80211_CHAN_WIDTH_20_NOHT, 772 796 }; 773 - int freq, ret; 797 + int freq; 774 798 775 - wiphy_lock(&rdev->wiphy); 799 + guard(wiphy)(&rdev->wiphy); 776 800 777 801 switch (wdev->iftype) { 778 802 case NL80211_IFTYPE_STATION: 779 - ret = cfg80211_mgd_wext_siwfreq(dev, info, wextfreq, extra); 780 - break; 803 + return cfg80211_mgd_wext_siwfreq(dev, info, wextfreq, extra); 781 804 case NL80211_IFTYPE_ADHOC: 782 - ret = cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra); 783 - break; 805 + return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra); 784 806 case NL80211_IFTYPE_MONITOR: 785 807 freq = cfg80211_wext_freq(wextfreq); 786 - if (freq < 0) { 787 - ret = freq; 788 - break; 789 - } 790 - if (freq == 0) { 791 - ret = -EINVAL; 792 - break; 793 - } 808 + if (freq < 0) 809 + return freq; 810 + if (freq == 0) 811 + return -EINVAL; 812 + 794 813 chandef.center_freq1 = freq; 795 814 chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq); 796 - if (!chandef.chan) { 797 - ret = -EINVAL; 798 - break; 799 - } 800 - ret = cfg80211_set_monitor_channel(rdev, dev, &chandef); 801 - break; 815 + if (!chandef.chan) 816 + return -EINVAL; 817 + return cfg80211_set_monitor_channel(rdev, dev, &chandef); 802 818 case NL80211_IFTYPE_MESH_POINT: 803 819 freq = cfg80211_wext_freq(wextfreq); 804 - if (freq < 0) { 805 - ret = freq; 806 - break; 807 - } 808 - if (freq == 0) { 809 - ret = -EINVAL; 810 - break; 811 - } 820 + if (freq < 0) 821 + return freq; 822 + if (freq == 0) 823 + return -EINVAL; 812 824 chandef.center_freq1 = freq; 813 825 chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq); 814 - if (!chandef.chan) { 815 - ret = -EINVAL; 816 - break; 817 - } 818 - ret = cfg80211_set_mesh_channel(rdev, wdev, &chandef); 819 - break; 826 + if (!chandef.chan) 827 + return -EINVAL; 828 + return cfg80211_set_mesh_channel(rdev, wdev, &chandef); 820 829 default: 821 - ret = -EOPNOTSUPP; 822 - break; 830 + return -EOPNOTSUPP; 823 831 } 824 - 825 - wiphy_unlock(&rdev->wiphy); 826 - 827 - return ret; 828 832 } 829 833 830 834 static int cfg80211_wext_giwfreq(struct net_device *dev, ··· 817 861 struct cfg80211_chan_def chandef = {}; 818 862 int ret; 819 863 820 - wiphy_lock(&rdev->wiphy); 864 + guard(wiphy)(&rdev->wiphy); 865 + 821 866 switch (wdev->iftype) { 822 867 case NL80211_IFTYPE_STATION: 823 - ret = cfg80211_mgd_wext_giwfreq(dev, info, freq, extra); 824 - break; 868 + return cfg80211_mgd_wext_giwfreq(dev, info, freq, extra); 825 869 case NL80211_IFTYPE_ADHOC: 826 - ret = cfg80211_ibss_wext_giwfreq(dev, info, freq, extra); 827 - break; 870 + return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra); 828 871 case NL80211_IFTYPE_MONITOR: 829 - if (!rdev->ops->get_channel) { 830 - ret = -EINVAL; 831 - break; 832 - } 872 + if (!rdev->ops->get_channel) 873 + return -EINVAL; 833 874 834 875 ret = rdev_get_channel(rdev, wdev, 0, &chandef); 835 876 if (ret) 836 - break; 877 + return ret; 837 878 freq->m = chandef.chan->center_freq; 838 879 freq->e = 6; 839 - ret = 0; 840 - break; 880 + return ret; 841 881 default: 842 - ret = -EINVAL; 843 - break; 882 + return -EINVAL; 844 883 } 845 - 846 - wiphy_unlock(&rdev->wiphy); 847 - 848 - return ret; 849 884 } 850 885 851 886 static int cfg80211_wext_siwtxpower(struct net_device *dev, ··· 847 900 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 848 901 enum nl80211_tx_power_setting type; 849 902 int dbm = 0; 850 - int ret; 851 903 852 904 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) 853 905 return -EINVAL; ··· 888 942 return 0; 889 943 } 890 944 891 - wiphy_lock(&rdev->wiphy); 892 - ret = rdev_set_tx_power(rdev, wdev, type, DBM_TO_MBM(dbm)); 893 - wiphy_unlock(&rdev->wiphy); 945 + guard(wiphy)(&rdev->wiphy); 894 946 895 - return ret; 947 + return rdev_set_tx_power(rdev, wdev, type, DBM_TO_MBM(dbm)); 896 948 } 897 949 898 950 static int cfg80211_wext_giwtxpower(struct net_device *dev, ··· 909 965 if (!rdev->ops->get_tx_power) 910 966 return -EOPNOTSUPP; 911 967 912 - wiphy_lock(&rdev->wiphy); 913 - err = rdev_get_tx_power(rdev, wdev, &val); 914 - wiphy_unlock(&rdev->wiphy); 968 + scoped_guard(wiphy, &rdev->wiphy) { 969 + err = rdev_get_tx_power(rdev, wdev, 0, &val); 970 + } 915 971 if (err) 916 972 return err; 917 973 ··· 1153 1209 timeout = wrq->value / 1000; 1154 1210 } 1155 1211 1156 - wiphy_lock(&rdev->wiphy); 1212 + guard(wiphy)(&rdev->wiphy); 1213 + 1157 1214 err = rdev_set_power_mgmt(rdev, dev, ps, timeout); 1158 - wiphy_unlock(&rdev->wiphy); 1159 1215 if (err) 1160 1216 return err; 1161 1217 ··· 1188 1244 struct cfg80211_bitrate_mask mask; 1189 1245 u32 fixed, maxrate; 1190 1246 struct ieee80211_supported_band *sband; 1191 - int band, ridx, ret; 1192 1247 bool match = false; 1248 + int band, ridx; 1193 1249 1194 1250 if (!rdev->ops->set_bitrate_mask) 1195 1251 return -EOPNOTSUPP; ··· 1227 1283 if (!match) 1228 1284 return -EINVAL; 1229 1285 1230 - wiphy_lock(&rdev->wiphy); 1231 - if (dev->ieee80211_ptr->valid_links) 1232 - ret = -EOPNOTSUPP; 1233 - else 1234 - ret = rdev_set_bitrate_mask(rdev, dev, 0, NULL, &mask); 1235 - wiphy_unlock(&rdev->wiphy); 1286 + guard(wiphy)(&rdev->wiphy); 1236 1287 1237 - return ret; 1288 + if (dev->ieee80211_ptr->valid_links) 1289 + return -EOPNOTSUPP; 1290 + 1291 + return rdev_set_bitrate_mask(rdev, dev, 0, NULL, &mask); 1238 1292 } 1239 1293 1240 1294 static int cfg80211_wext_giwrate(struct net_device *dev, ··· 1261 1319 if (err) 1262 1320 return err; 1263 1321 1264 - wiphy_lock(&rdev->wiphy); 1265 - err = rdev_get_station(rdev, dev, addr, &sinfo); 1266 - wiphy_unlock(&rdev->wiphy); 1322 + scoped_guard(wiphy, &rdev->wiphy) { 1323 + err = rdev_get_station(rdev, dev, addr, &sinfo); 1324 + } 1267 1325 if (err) 1268 1326 return err; 1269 1327 ··· 1362 1420 struct sockaddr *ap_addr = &wrqu->ap_addr; 1363 1421 struct wireless_dev *wdev = dev->ieee80211_ptr; 1364 1422 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 1365 - int ret; 1366 1423 1367 - wiphy_lock(&rdev->wiphy); 1424 + guard(wiphy)(&rdev->wiphy); 1425 + 1368 1426 switch (wdev->iftype) { 1369 1427 case NL80211_IFTYPE_ADHOC: 1370 - ret = cfg80211_ibss_wext_siwap(dev, info, ap_addr, extra); 1371 - break; 1428 + return cfg80211_ibss_wext_siwap(dev, info, ap_addr, extra); 1372 1429 case NL80211_IFTYPE_STATION: 1373 - ret = cfg80211_mgd_wext_siwap(dev, info, ap_addr, extra); 1374 - break; 1430 + return cfg80211_mgd_wext_siwap(dev, info, ap_addr, extra); 1375 1431 default: 1376 - ret = -EOPNOTSUPP; 1377 - break; 1432 + return -EOPNOTSUPP; 1378 1433 } 1379 - wiphy_unlock(&rdev->wiphy); 1380 - 1381 - return ret; 1382 1434 } 1383 1435 1384 1436 static int cfg80211_wext_giwap(struct net_device *dev, ··· 1382 1446 struct sockaddr *ap_addr = &wrqu->ap_addr; 1383 1447 struct wireless_dev *wdev = dev->ieee80211_ptr; 1384 1448 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 1385 - int ret; 1386 1449 1387 - wiphy_lock(&rdev->wiphy); 1450 + guard(wiphy)(&rdev->wiphy); 1451 + 1388 1452 switch (wdev->iftype) { 1389 1453 case NL80211_IFTYPE_ADHOC: 1390 - ret = cfg80211_ibss_wext_giwap(dev, info, ap_addr, extra); 1391 - break; 1454 + return cfg80211_ibss_wext_giwap(dev, info, ap_addr, extra); 1392 1455 case NL80211_IFTYPE_STATION: 1393 - ret = cfg80211_mgd_wext_giwap(dev, info, ap_addr, extra); 1394 - break; 1456 + return cfg80211_mgd_wext_giwap(dev, info, ap_addr, extra); 1395 1457 default: 1396 - ret = -EOPNOTSUPP; 1397 - break; 1458 + return -EOPNOTSUPP; 1398 1459 } 1399 - wiphy_unlock(&rdev->wiphy); 1400 - 1401 - return ret; 1402 1460 } 1403 1461 1404 1462 static int cfg80211_wext_siwessid(struct net_device *dev, ··· 1402 1472 struct iw_point *data = &wrqu->data; 1403 1473 struct wireless_dev *wdev = dev->ieee80211_ptr; 1404 1474 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 1405 - int ret; 1406 1475 1407 - wiphy_lock(&rdev->wiphy); 1476 + guard(wiphy)(&rdev->wiphy); 1477 + 1408 1478 switch (wdev->iftype) { 1409 1479 case NL80211_IFTYPE_ADHOC: 1410 - ret = cfg80211_ibss_wext_siwessid(dev, info, data, ssid); 1411 - break; 1480 + return cfg80211_ibss_wext_siwessid(dev, info, data, ssid); 1412 1481 case NL80211_IFTYPE_STATION: 1413 - ret = cfg80211_mgd_wext_siwessid(dev, info, data, ssid); 1414 - break; 1482 + return cfg80211_mgd_wext_siwessid(dev, info, data, ssid); 1415 1483 default: 1416 - ret = -EOPNOTSUPP; 1417 - break; 1484 + return -EOPNOTSUPP; 1418 1485 } 1419 - wiphy_unlock(&rdev->wiphy); 1420 - 1421 - return ret; 1422 1486 } 1423 1487 1424 1488 static int cfg80211_wext_giwessid(struct net_device *dev, ··· 1422 1498 struct iw_point *data = &wrqu->data; 1423 1499 struct wireless_dev *wdev = dev->ieee80211_ptr; 1424 1500 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 1425 - int ret; 1426 1501 1427 1502 data->flags = 0; 1428 1503 data->length = 0; 1429 1504 1430 - wiphy_lock(&rdev->wiphy); 1505 + guard(wiphy)(&rdev->wiphy); 1506 + 1431 1507 switch (wdev->iftype) { 1432 1508 case NL80211_IFTYPE_ADHOC: 1433 - ret = cfg80211_ibss_wext_giwessid(dev, info, data, ssid); 1434 - break; 1509 + return cfg80211_ibss_wext_giwessid(dev, info, data, ssid); 1435 1510 case NL80211_IFTYPE_STATION: 1436 - ret = cfg80211_mgd_wext_giwessid(dev, info, data, ssid); 1437 - break; 1511 + return cfg80211_mgd_wext_giwessid(dev, info, data, ssid); 1438 1512 default: 1439 - ret = -EOPNOTSUPP; 1440 - break; 1513 + return -EOPNOTSUPP; 1441 1514 } 1442 - wiphy_unlock(&rdev->wiphy); 1443 - 1444 - return ret; 1445 1515 } 1446 1516 1447 1517 static int cfg80211_wext_siwpmksa(struct net_device *dev, ··· 1446 1528 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 1447 1529 struct cfg80211_pmksa cfg_pmksa; 1448 1530 struct iw_pmksa *pmksa = (struct iw_pmksa *)extra; 1449 - int ret; 1450 1531 1451 1532 memset(&cfg_pmksa, 0, sizeof(struct cfg80211_pmksa)); 1452 1533 ··· 1455 1538 cfg_pmksa.bssid = pmksa->bssid.sa_data; 1456 1539 cfg_pmksa.pmkid = pmksa->pmkid; 1457 1540 1458 - wiphy_lock(&rdev->wiphy); 1541 + guard(wiphy)(&rdev->wiphy); 1542 + 1459 1543 switch (pmksa->cmd) { 1460 1544 case IW_PMKSA_ADD: 1461 - if (!rdev->ops->set_pmksa) { 1462 - ret = -EOPNOTSUPP; 1463 - break; 1464 - } 1545 + if (!rdev->ops->set_pmksa) 1546 + return -EOPNOTSUPP; 1465 1547 1466 - ret = rdev_set_pmksa(rdev, dev, &cfg_pmksa); 1467 - break; 1548 + return rdev_set_pmksa(rdev, dev, &cfg_pmksa); 1468 1549 case IW_PMKSA_REMOVE: 1469 - if (!rdev->ops->del_pmksa) { 1470 - ret = -EOPNOTSUPP; 1471 - break; 1472 - } 1550 + if (!rdev->ops->del_pmksa) 1551 + return -EOPNOTSUPP; 1473 1552 1474 - ret = rdev_del_pmksa(rdev, dev, &cfg_pmksa); 1475 - break; 1553 + return rdev_del_pmksa(rdev, dev, &cfg_pmksa); 1476 1554 case IW_PMKSA_FLUSH: 1477 - if (!rdev->ops->flush_pmksa) { 1478 - ret = -EOPNOTSUPP; 1479 - break; 1480 - } 1555 + if (!rdev->ops->flush_pmksa) 1556 + return -EOPNOTSUPP; 1481 1557 1482 - ret = rdev_flush_pmksa(rdev, dev); 1483 - break; 1558 + return rdev_flush_pmksa(rdev, dev); 1484 1559 default: 1485 - ret = -EOPNOTSUPP; 1486 - break; 1560 + return -EOPNOTSUPP; 1487 1561 } 1488 - wiphy_unlock(&rdev->wiphy); 1489 - 1490 - return ret; 1491 1562 } 1492 1563 1493 1564 static const iw_handler cfg80211_handlers[] = {
+15 -28
net/wireless/wext-sme.c
··· 302 302 struct iw_point *data = &wrqu->data; 303 303 struct wireless_dev *wdev = dev->ieee80211_ptr; 304 304 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 305 + int ie_len = data->length; 305 306 u8 *ie = extra; 306 - int ie_len = data->length, err; 307 307 308 308 if (wdev->iftype != NL80211_IFTYPE_STATION) 309 309 return -EOPNOTSUPP; ··· 311 311 if (!ie_len) 312 312 ie = NULL; 313 313 314 - wiphy_lock(wdev->wiphy); 314 + guard(wiphy)(wdev->wiphy); 315 315 316 316 /* no change */ 317 - err = 0; 318 317 if (wdev->wext.ie_len == ie_len && 319 318 memcmp(wdev->wext.ie, ie, ie_len) == 0) 320 - goto out; 319 + return 0; 321 320 322 321 if (ie_len) { 323 322 ie = kmemdup(extra, ie_len, GFP_KERNEL); 324 - if (!ie) { 325 - err = -ENOMEM; 326 - goto out; 327 - } 328 - } else 323 + if (!ie) 324 + return -ENOMEM; 325 + } else { 329 326 ie = NULL; 327 + } 330 328 331 329 kfree(wdev->wext.ie); 332 330 wdev->wext.ie = ie; 333 331 wdev->wext.ie_len = ie_len; 334 332 335 - if (wdev->conn) { 336 - err = cfg80211_disconnect(rdev, dev, 337 - WLAN_REASON_DEAUTH_LEAVING, false); 338 - if (err) 339 - goto out; 340 - } 333 + if (wdev->conn) 334 + return cfg80211_disconnect(rdev, dev, 335 + WLAN_REASON_DEAUTH_LEAVING, false); 341 336 342 337 /* userspace better not think we'll reconnect */ 343 - err = 0; 344 - out: 345 - wiphy_unlock(wdev->wiphy); 346 - return err; 338 + return 0; 347 339 } 348 340 349 341 int cfg80211_wext_siwmlme(struct net_device *dev, ··· 345 353 struct wireless_dev *wdev = dev->ieee80211_ptr; 346 354 struct iw_mlme *mlme = (struct iw_mlme *)extra; 347 355 struct cfg80211_registered_device *rdev; 348 - int err; 349 356 350 357 if (!wdev) 351 358 return -EOPNOTSUPP; ··· 357 366 if (mlme->addr.sa_family != ARPHRD_ETHER) 358 367 return -EINVAL; 359 368 360 - wiphy_lock(&rdev->wiphy); 369 + guard(wiphy)(&rdev->wiphy); 370 + 361 371 switch (mlme->cmd) { 362 372 case IW_MLME_DEAUTH: 363 373 case IW_MLME_DISASSOC: 364 - err = cfg80211_disconnect(rdev, dev, mlme->reason_code, true); 365 - break; 374 + return cfg80211_disconnect(rdev, dev, mlme->reason_code, true); 366 375 default: 367 - err = -EOPNOTSUPP; 368 - break; 376 + return -EOPNOTSUPP; 369 377 } 370 - wiphy_unlock(&rdev->wiphy); 371 - 372 - return err; 373 378 }