Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'ath-next-20240130' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath

ath.git patches for v6.9

We have new features for both ath11k and ath12k. ath12k is now under
heavy refactoring in preparation for MLO support.

Major changes:

ath12k

* refactoring in preparation for Multi-Link Operation (MLO) support

* 1024 Block Ack window size support

* provide firmware wmi logs via a trace event

ath11k

* 36 bit DMA mask support

* support 6 GHz station power modes: Low Power Indoor (LPI), Standard
Power) SP and Very Low Power (VLP)

+2739 -1060
+2 -2
drivers/net/wireless/ath/ath10k/core.c
··· 3 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 6 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 6 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 7 7 */ 8 8 9 9 #include <linux/module.h> ··· 3613 3613 default: 3614 3614 ath10k_err(ar, "unsupported core hardware revision %d\n", 3615 3615 hw_rev); 3616 - ret = -ENOTSUPP; 3616 + ret = -EOPNOTSUPP; 3617 3617 goto err_free_mac; 3618 3618 } 3619 3619
+6 -2
drivers/net/wireless/ath/ath10k/coredump.h
··· 1 1 /* SPDX-License-Identifier: ISC */ 2 2 /* 3 3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 4 - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef _COREDUMP_H_ ··· 13 13 14 14 /** 15 15 * enum ath10k_fw_crash_dump_type - types of data in the dump file 16 - * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format 16 + * @ATH10K_FW_CRASH_DUMP_REGISTERS: Register crash dump in binary format 17 + * @ATH10K_FW_CRASH_DUMP_CE_DATA: Copy Engine crash dump data 18 + * @ATH10K_FW_CRASH_DUMP_RAM_DATA: RAM crash dump data, contains multiple 19 + * struct ath10k_dump_ram_data_hdr 20 + * @ATH10K_FW_CRASH_DUMP_MAX: Maximum enumeration 17 21 */ 18 22 enum ath10k_fw_crash_dump_type { 19 23 ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
+2 -1
drivers/net/wireless/ath/ath10k/htt.c
··· 2 2 /* 3 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 6 */ 6 7 7 8 #include <linux/slab.h> ··· 382 381 htt->target_version_major != 3) { 383 382 ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n", 384 383 htt->target_version_major); 385 - return -ENOTSUPP; 384 + return -EOPNOTSUPP; 386 385 } 387 386 388 387 return 0;
+8 -4
drivers/net/wireless/ath/ath10k/htt.h
··· 3 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 5 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 6 - * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved. 6 + * Copyright (c) 2021, 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. 7 7 */ 8 8 9 9 #ifndef _HTT_H_ ··· 906 906 __le16 msdus_rssi[]; 907 907 } __packed; 908 908 909 - /** 909 + /* 910 910 * @brief target -> host TX completion indication message definition 911 911 * 912 912 * @details ··· 1474 1474 #define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0 1475 1475 1476 1476 /** 1477 - * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config 1477 + * struct htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config 1478 1478 * 1479 1479 * Defines host q state format and behavior. See htt_q_state. 1480 1480 * 1481 + * @paddr: Queue physical address 1482 + * @num_peers: Number of supported peers 1483 + * @num_tids: Number of supported TIDs 1481 1484 * @record_size: Defines the size of each host q entry in bytes. In practice 1482 1485 * however firmware (at least 10.4.3-00191) ignores this host 1483 1486 * configuration value and uses hardcoded value of 1. 1484 1487 * @record_multiplier: This is valid only when q depth type is MSDUs. It 1485 1488 * defines the exponent for the power of 2 multiplication. 1489 + * @pad: struct padding for 32-bit alignment 1486 1490 */ 1487 1491 struct htt_q_state_conf { 1488 1492 __le32 paddr; ··· 1522 1518 #define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6 1523 1519 1524 1520 /** 1525 - * htt_q_state - shared between host and firmware via DMA 1521 + * struct htt_q_state - shared between host and firmware via DMA 1526 1522 * 1527 1523 * This structure is used for the host to expose it's software queue state to 1528 1524 * firmware so that its rate control can schedule fetch requests for optimized
+3 -3
drivers/net/wireless/ath/ath10k/mac.c
··· 3 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 6 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 6 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 7 7 */ 8 8 9 9 #include "mac.h" ··· 4056 4056 !(skb_cb->flags & ATH10K_SKB_F_RAW_TX)) { 4057 4057 WARN_ON_ONCE(1); 4058 4058 ieee80211_free_txskb(hw, skb); 4059 - return -ENOTSUPP; 4059 + return -EOPNOTSUPP; 4060 4060 } 4061 4061 } 4062 4062 ··· 7065 7065 7066 7066 if (sta) { 7067 7067 if (!sta->wme) 7068 - return -ENOTSUPP; 7068 + return -EOPNOTSUPP; 7069 7069 7070 7070 arsta = (struct ath10k_sta *)sta->drv_priv; 7071 7071
+5 -5
drivers/net/wireless/ath/ath10k/pci.c
··· 2 2 /* 3 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 5 + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. 6 6 */ 7 7 8 8 #include <linux/pci.h> ··· 889 889 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 890 890 891 891 if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr)) 892 - return -ENOTSUPP; 892 + return -EOPNOTSUPP; 893 893 894 894 return ar_pci->targ_cpu_to_ce_addr(ar, addr); 895 895 } ··· 2668 2668 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2669 2669 2670 2670 if (!ar_pci->pci_soft_reset) 2671 - return -ENOTSUPP; 2671 + return -EOPNOTSUPP; 2672 2672 2673 2673 return ar_pci->pci_soft_reset(ar); 2674 2674 } ··· 2808 2808 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2809 2809 2810 2810 if (WARN_ON(!ar_pci->pci_hard_reset)) 2811 - return -ENOTSUPP; 2811 + return -EOPNOTSUPP; 2812 2812 2813 2813 return ar_pci->pci_hard_reset(ar); 2814 2814 } ··· 3594 3594 break; 3595 3595 default: 3596 3596 WARN_ON(1); 3597 - return -ENOTSUPP; 3597 + return -EOPNOTSUPP; 3598 3598 } 3599 3599 3600 3600 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
+8 -3
drivers/net/wireless/ath/ath10k/wmi-tlv.c
··· 3 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 6 + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 6 7 */ 7 8 #include "core.h" 8 9 #include "debug.h" ··· 852 851 } 853 852 854 853 ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT]; 854 + if (!ev) { 855 + kfree(tb); 856 + return -EPROTO; 857 + } 855 858 856 859 arg->desc_id = ev->desc_id; 857 860 arg->status = ev->status; ··· 1352 1347 __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 || 1353 1348 __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 || 1354 1349 __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) { 1355 - return -ENOTSUPP; 1350 + return -EOPNOTSUPP; 1356 1351 } 1357 1352 1358 1353 arg->min_tx_power = ev->hw_min_tx_power; ··· 2124 2119 case WMI_VDEV_SUBTYPE_MESH_11S: 2125 2120 return WMI_TLV_VDEV_SUBTYPE_MESH_11S; 2126 2121 case WMI_VDEV_SUBTYPE_MESH_NON_11S: 2127 - return -ENOTSUPP; 2122 + return -EOPNOTSUPP; 2128 2123 } 2129 - return -ENOTSUPP; 2124 + return -EOPNOTSUPP; 2130 2125 } 2131 2126 2132 2127 static struct sk_buff *
+2 -2
drivers/net/wireless/ath/ath10k/wmi-tlv.h
··· 3 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 6 - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 6 + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. 7 7 */ 8 8 #ifndef _WMI_TLV_H 9 9 #define _WMI_TLV_H ··· 2343 2343 } __packed; 2344 2344 2345 2345 /** 2346 - * wmi_tlv_tx_pause_id - firmware tx queue pause reason types 2346 + * enum wmi_tlv_tx_pause_id - firmware tx queue pause reason types 2347 2347 * 2348 2348 * @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler. 2349 2349 * Only vdev_map is valid.
+10 -12
drivers/net/wireless/ath/ath10k/wmi.c
··· 3 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 6 - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 6 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 7 7 */ 8 8 9 9 #include <linux/skbuff.h> ··· 6927 6927 } 6928 6928 6929 6929 static void 6930 - ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs, 6930 + ath10k_wmi_put_start_scan_tlvs(u8 *tlvs, 6931 6931 const struct wmi_start_scan_arg *arg) 6932 6932 { 6933 6933 struct wmi_ie_data *ie; 6934 6934 struct wmi_chan_list *channels; 6935 6935 struct wmi_ssid_list *ssids; 6936 6936 struct wmi_bssid_list *bssids; 6937 - void *ptr = tlvs->tlvs; 6937 + void *ptr = tlvs; 6938 6938 int i; 6939 6939 6940 6940 if (arg->n_channels) { ··· 7012 7012 cmd = (struct wmi_start_scan_cmd *)skb->data; 7013 7013 7014 7014 ath10k_wmi_put_start_scan_common(&cmd->common, arg); 7015 - ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg); 7015 + ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg); 7016 7016 7017 7017 cmd->burst_duration_ms = __cpu_to_le32(0); 7018 7018 ··· 7041 7041 cmd = (struct wmi_10x_start_scan_cmd *)skb->data; 7042 7042 7043 7043 ath10k_wmi_put_start_scan_common(&cmd->common, arg); 7044 - ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg); 7044 + ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg); 7045 7045 7046 7046 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n"); 7047 7047 return skb; ··· 8733 8733 return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA; 8734 8734 case WMI_VDEV_SUBTYPE_MESH_11S: 8735 8735 case WMI_VDEV_SUBTYPE_MESH_NON_11S: 8736 - return -ENOTSUPP; 8736 + return -EOPNOTSUPP; 8737 8737 } 8738 - return -ENOTSUPP; 8738 + return -EOPNOTSUPP; 8739 8739 } 8740 8740 8741 8741 static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar, ··· 8755 8755 case WMI_VDEV_SUBTYPE_MESH_11S: 8756 8756 return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S; 8757 8757 case WMI_VDEV_SUBTYPE_MESH_NON_11S: 8758 - return -ENOTSUPP; 8758 + return -EOPNOTSUPP; 8759 8759 } 8760 - return -ENOTSUPP; 8760 + return -EOPNOTSUPP; 8761 8761 } 8762 8762 8763 8763 static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar, ··· 8779 8779 case WMI_VDEV_SUBTYPE_MESH_NON_11S: 8780 8780 return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S; 8781 8781 } 8782 - return -ENOTSUPP; 8782 + return -EOPNOTSUPP; 8783 8783 } 8784 8784 8785 8785 static struct sk_buff * ··· 8917 8917 skb = ath10k_wmi_alloc_skb(ar, len); 8918 8918 if (!skb) 8919 8919 return ERR_PTR(-ENOMEM); 8920 - 8921 - memset(skb->data, 0, sizeof(*cmd)); 8922 8920 8923 8921 cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data; 8924 8922 cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+18 -44
drivers/net/wireless/ath/ath10k/wmi.h
··· 3 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 6 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 6 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 7 7 */ 8 8 9 9 #ifndef _WMI_H_ ··· 3008 3008 * @WMI_10_4_TDLS_UAPSD_SLEEP_STA: TDLS sleep sta support enable/disable 3009 3009 * @WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE: TDLS connection tracker in host 3010 3010 * enable/disable 3011 - * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY:Explicit TDLS mode enable/disable 3011 + * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY: Explicit TDLS mode enable/disable 3012 3012 * @WMI_10_4_TX_DATA_ACK_RSSI: Enable DATA ACK RSSI if firmware is capable 3013 + * @WMI_10_4_EXT_PEER_TID_CONFIGS_SUPPORT: Firmware supports Extended Peer 3014 + * TID configuration for QoS related settings 3015 + * @WMI_10_4_REPORT_AIRTIME: Firmware supports transmit airtime reporting 3013 3016 */ 3014 3017 enum wmi_10_4_feature_mask { 3015 3018 WMI_10_4_LTEU_SUPPORT = BIT(0), ··· 3072 3069 struct wmi_host_mem_chunks { 3073 3070 __le32 count; 3074 3071 /* some fw revisions require at least 1 chunk regardless of count */ 3075 - struct host_memory_chunk items[1]; 3072 + union { 3073 + struct host_memory_chunk item; 3074 + DECLARE_FLEX_ARRAY(struct host_memory_chunk, items); 3075 + }; 3076 3076 } __packed; 3077 3077 3078 3078 struct wmi_init_cmd { ··· 3221 3215 __le32 scan_ctrl_flags; 3222 3216 } __packed; 3223 3217 3224 - struct wmi_start_scan_tlvs { 3225 - /* TLV parameters. These includes channel list, ssid list, bssid list, 3226 - * extra ies. 3227 - */ 3228 - u8 tlvs[0]; 3229 - } __packed; 3230 - 3231 3218 struct wmi_start_scan_cmd { 3232 3219 struct wmi_start_scan_common common; 3233 3220 __le32 burst_duration_ms; 3234 - struct wmi_start_scan_tlvs tlvs; 3221 + u8 tlvs[]; 3235 3222 } __packed; 3236 3223 3237 3224 /* This is the definition from 10.X firmware branch */ 3238 3225 struct wmi_10x_start_scan_cmd { 3239 3226 struct wmi_start_scan_common common; 3240 - struct wmi_start_scan_tlvs tlvs; 3227 + u8 tlvs[]; 3241 3228 } __packed; 3242 3229 3243 3230 struct wmi_ssid_arg { ··· 4257 4258 struct wmi_peer_sta_ps_state_chg_event { 4258 4259 struct wmi_mac_addr peer_macaddr; 4259 4260 __le32 peer_ps_state; 4260 - } __packed; 4261 - 4262 - struct wmi_pdev_chanlist_update_event { 4263 - /* number of channels */ 4264 - __le32 num_chan; 4265 - /* array of channels */ 4266 - struct wmi_channel channel_list[1]; 4267 4261 } __packed; 4268 4262 4269 4263 #define WMI_MAX_DEBUG_MESG (sizeof(u32) * 32) ··· 5785 5793 /* app IE */ 5786 5794 } __packed; 5787 5795 5788 - struct wmi_bcn_tmpl_cmd { 5789 - /* unique id identifying the VDEV, generated by the caller */ 5790 - __le32 vdev_id; 5791 - /* TIM IE offset from the beginning of the template. */ 5792 - __le32 tim_ie_offset; 5793 - /* beacon probe capabilities and IEs */ 5794 - struct wmi_bcn_prb_info bcn_prb_info; 5795 - /* beacon buffer length */ 5796 - __le32 buf_len; 5797 - /* variable length data */ 5798 - u8 data[1]; 5799 - } __packed; 5800 - 5801 - struct wmi_prb_tmpl_cmd { 5802 - /* unique id identifying the VDEV, generated by the caller */ 5803 - __le32 vdev_id; 5804 - /* beacon probe capabilities and IEs */ 5805 - struct wmi_bcn_prb_info bcn_prb_info; 5806 - /* beacon buffer length */ 5807 - __le32 buf_len; 5808 - /* Variable length data */ 5809 - u8 data[1]; 5810 - } __packed; 5811 - 5812 5796 enum wmi_sta_ps_mode { 5813 5797 /* enable power save for the given STA VDEV */ 5814 5798 WMI_STA_PS_MODE_DISABLED = 0, ··· 7165 7197 __le32 is_peer_responder; 7166 7198 __le32 pref_offchan_num; 7167 7199 __le32 pref_offchan_bw; 7168 - struct wmi_channel peer_chan_list[1]; 7200 + union { 7201 + /* to match legacy implementation allocate room for 7202 + * at least one record even if peer_chan_len is 0 7203 + */ 7204 + struct wmi_channel peer_chan_min_allocation; 7205 + DECLARE_FLEX_ARRAY(struct wmi_channel, peer_chan_list); 7206 + }; 7169 7207 } __packed; 7170 7208 7171 7209 struct wmi_10_4_tdls_peer_update_cmd {
+41
drivers/net/wireless/ath/ath11k/core.h
··· 314 314 bool enable_offload; 315 315 }; 316 316 317 + /** 318 + * struct ath11k_chan_power_info - TPE containing power info per channel chunk 319 + * @chan_cfreq: channel center freq (MHz) 320 + * e.g. 321 + * channel 37/20 MHz, it is 6135 322 + * channel 37/40 MHz, it is 6125 323 + * channel 37/80 MHz, it is 6145 324 + * channel 37/160 MHz, it is 6185 325 + * @tx_power: transmit power (dBm) 326 + */ 327 + struct ath11k_chan_power_info { 328 + u16 chan_cfreq; 329 + s8 tx_power; 330 + }; 331 + 332 + /** 333 + * struct ath11k_reg_tpc_power_info - regulatory TPC power info 334 + * @is_psd_power: is PSD power or not 335 + * @eirp_power: Maximum EIRP power (dBm), valid only if power is PSD 336 + * @ap_power_type: type of power (SP/LPI/VLP) 337 + * @num_pwr_levels: number of power levels 338 + * @reg_max: Array of maximum TX power (dBm) per PSD value 339 + * @ap_constraint_power: AP constraint power (dBm) 340 + * @tpe: TPE values processed from TPE IE 341 + * @chan_power_info: power info to send to firmware 342 + */ 343 + struct ath11k_reg_tpc_power_info { 344 + bool is_psd_power; 345 + u8 eirp_power; 346 + enum wmi_reg_6ghz_ap_type ap_power_type; 347 + u8 num_pwr_levels; 348 + u8 reg_max[IEEE80211_MAX_NUM_PWR_LEVEL]; 349 + u8 ap_constraint_power; 350 + s8 tpe[IEEE80211_MAX_NUM_PWR_LEVEL]; 351 + struct ath11k_chan_power_info chan_power_info[IEEE80211_MAX_NUM_PWR_LEVEL]; 352 + }; 353 + 317 354 struct ath11k_vif { 318 355 u32 vdev_id; 319 356 enum wmi_vdev_type vdev_type; ··· 405 368 struct ieee80211_chanctx_conf chanctx; 406 369 struct ath11k_arp_ns_offload arp_ns_offload; 407 370 struct ath11k_rekey_data rekey_data; 371 + 372 + struct ath11k_reg_tpc_power_info reg_tpc_info; 408 373 }; 409 374 410 375 struct ath11k_vif_iter { ··· 774 735 /* protected by conf_mutex */ 775 736 bool ps_state_enable; 776 737 bool ps_timekeeper_enable; 738 + s8 max_allowed_tx_power; 777 739 }; 778 740 779 741 struct ath11k_band_cap { ··· 958 918 * This may or may not be used during the runtime 959 919 */ 960 920 struct ieee80211_regdomain *new_regd[MAX_RADIOS]; 921 + struct cur_regulatory_info *reg_info_store; 961 922 962 923 /* Current DFS Regulatory */ 963 924 enum ath11k_dfs_region dfs_region;
+17 -3
drivers/net/wireless/ath/ath11k/dp.c
··· 104 104 if (!ring->vaddr_unaligned) 105 105 return; 106 106 107 - if (ring->cached) 107 + if (ring->cached) { 108 + dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size, 109 + DMA_FROM_DEVICE); 108 110 kfree(ring->vaddr_unaligned); 109 - else 111 + } else { 110 112 dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, 111 113 ring->paddr_unaligned); 114 + } 112 115 113 116 ring->vaddr_unaligned = NULL; 114 117 } ··· 252 249 253 250 if (cached) { 254 251 ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL); 255 - ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned); 252 + if (!ring->vaddr_unaligned) 253 + return -ENOMEM; 254 + 255 + ring->paddr_unaligned = dma_map_single(ab->dev, 256 + ring->vaddr_unaligned, 257 + ring->size, 258 + DMA_FROM_DEVICE); 259 + if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) { 260 + kfree(ring->vaddr_unaligned); 261 + ring->vaddr_unaligned = NULL; 262 + return -ENOMEM; 263 + } 256 264 } 257 265 } 258 266
+3 -3
drivers/net/wireless/ath/ath11k/dp_tx.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include "core.h" ··· 103 103 104 104 if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 105 105 !ieee80211_is_data(hdr->frame_control))) 106 - return -ENOTSUPP; 106 + return -EOPNOTSUPP; 107 107 108 108 pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1); 109 109 ··· 1018 1018 if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) { 1019 1019 ath11k_err(ab, "unsupported htt major version %d supported version is %d\n", 1020 1020 dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR); 1021 - return -ENOTSUPP; 1021 + return -EOPNOTSUPP; 1022 1022 } 1023 1023 1024 1024 return 0;
+17 -2
drivers/net/wireless/ath/ath11k/hal.c
··· 626 626 return NULL; 627 627 } 628 628 629 + static u32 *ath11k_hal_srng_dst_peek_with_dma(struct ath11k_base *ab, 630 + struct hal_srng *srng, dma_addr_t *paddr) 631 + { 632 + lockdep_assert_held(&srng->lock); 633 + 634 + if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) { 635 + *paddr = srng->ring_base_paddr + 636 + sizeof(*srng->ring_base_vaddr) * srng->u.dst_ring.tp; 637 + return srng->ring_base_vaddr + srng->u.dst_ring.tp; 638 + } 639 + 640 + return NULL; 641 + } 642 + 629 643 static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab, 630 644 struct hal_srng *srng) 631 645 { 646 + dma_addr_t desc_paddr; 632 647 u32 *desc; 633 648 634 649 /* prefetch only if desc is available */ 635 - desc = ath11k_hal_srng_dst_peek(ab, srng); 650 + desc = ath11k_hal_srng_dst_peek_with_dma(ab, srng, &desc_paddr); 636 651 if (likely(desc)) { 637 - dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc), 652 + dma_sync_single_for_cpu(ab->dev, desc_paddr, 638 653 (srng->entry_size * sizeof(u32)), 639 654 DMA_FROM_DEVICE); 640 655 prefetch(desc);
+2 -1
drivers/net/wireless/ath/ath11k/hal.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH11K_HAL_H ··· 674 674 * @HAL_RX_BUF_RBM_SW1_BM: For Tx completion -- returned to host 675 675 * @HAL_RX_BUF_RBM_SW2_BM: For Tx completion -- returned to host 676 676 * @HAL_RX_BUF_RBM_SW3_BM: For Rx release -- returned to host 677 + * @HAL_RX_BUF_RBM_SW4_BM: For Tx completion -- returned to host 677 678 */ 678 679 679 680 enum hal_rx_buf_return_buf_manager {
+2 -2
drivers/net/wireless/ath/ath11k/hal_rx.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include "debug.h" ··· 246 246 case HAL_REO_CMD_UNBLOCK_CACHE: 247 247 case HAL_REO_CMD_FLUSH_TIMEOUT_LIST: 248 248 ath11k_warn(ab, "Unsupported reo command %d\n", type); 249 - ret = -ENOTSUPP; 249 + ret = -EOPNOTSUPP; 250 250 break; 251 251 default: 252 252 ath11k_warn(ab, "Unknown reo command %d\n", type);
+808 -278
drivers/net/wireless/ath/ath11k/mac.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <net/mac80211.h> ··· 254 254 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, 255 255 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, 256 256 }; 257 - 258 - static int ath11k_start_vdev_delay(struct ieee80211_hw *hw, 259 - struct ieee80211_vif *vif); 260 257 261 258 enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy) 262 259 { ··· 3394 3397 return 0; 3395 3398 } 3396 3399 3400 + static bool ath11k_mac_supports_station_tpc(struct ath11k *ar, 3401 + struct ath11k_vif *arvif, 3402 + const struct cfg80211_chan_def *chandef) 3403 + { 3404 + return ath11k_wmi_supports_6ghz_cc_ext(ar) && 3405 + test_bit(WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT, ar->ab->wmi_ab.svc_map) && 3406 + arvif->vdev_type == WMI_VDEV_TYPE_STA && 3407 + arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE && 3408 + chandef->chan && 3409 + chandef->chan->band == NL80211_BAND_6GHZ; 3410 + } 3411 + 3397 3412 static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, 3398 3413 struct ieee80211_vif *vif, 3399 3414 struct ieee80211_bss_conf *info, ··· 3605 3596 if (changed & BSS_CHANGED_TXPOWER) { 3606 3597 ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev_id %i txpower %d\n", 3607 3598 arvif->vdev_id, info->txpower); 3608 - 3609 3599 arvif->txpower = info->txpower; 3610 3600 ath11k_mac_txpower_recalc(ar); 3611 3601 } ··· 4914 4906 ar->num_stations--; 4915 4907 } 4916 4908 4917 - static int ath11k_mac_station_add(struct ath11k *ar, 4918 - struct ieee80211_vif *vif, 4919 - struct ieee80211_sta *sta) 4920 - { 4921 - struct ath11k_base *ab = ar->ab; 4922 - struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); 4923 - struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); 4924 - struct peer_create_params peer_param; 4925 - int ret; 4926 - 4927 - lockdep_assert_held(&ar->conf_mutex); 4928 - 4929 - ret = ath11k_mac_inc_num_stations(arvif, sta); 4930 - if (ret) { 4931 - ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n", 4932 - ar->max_num_stations); 4933 - goto exit; 4934 - } 4935 - 4936 - arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL); 4937 - if (!arsta->rx_stats) { 4938 - ret = -ENOMEM; 4939 - goto dec_num_station; 4940 - } 4941 - 4942 - peer_param.vdev_id = arvif->vdev_id; 4943 - peer_param.peer_addr = sta->addr; 4944 - peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; 4945 - 4946 - ret = ath11k_peer_create(ar, arvif, sta, &peer_param); 4947 - if (ret) { 4948 - ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n", 4949 - sta->addr, arvif->vdev_id); 4950 - goto free_rx_stats; 4951 - } 4952 - 4953 - ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n", 4954 - sta->addr, arvif->vdev_id); 4955 - 4956 - if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) { 4957 - arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL); 4958 - if (!arsta->tx_stats) { 4959 - ret = -ENOMEM; 4960 - goto free_peer; 4961 - } 4962 - } 4963 - 4964 - if (ieee80211_vif_is_mesh(vif)) { 4965 - ath11k_dbg(ab, ATH11K_DBG_MAC, 4966 - "setting USE_4ADDR for mesh STA %pM\n", sta->addr); 4967 - ret = ath11k_wmi_set_peer_param(ar, sta->addr, 4968 - arvif->vdev_id, 4969 - WMI_PEER_USE_4ADDR, 1); 4970 - if (ret) { 4971 - ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n", 4972 - sta->addr, ret); 4973 - goto free_tx_stats; 4974 - } 4975 - } 4976 - 4977 - ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr); 4978 - if (ret) { 4979 - ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n", 4980 - sta->addr, arvif->vdev_id, ret); 4981 - goto free_tx_stats; 4982 - } 4983 - 4984 - if (ab->hw_params.vdev_start_delay && 4985 - !arvif->is_started && 4986 - arvif->vdev_type != WMI_VDEV_TYPE_AP) { 4987 - ret = ath11k_start_vdev_delay(ar->hw, vif); 4988 - if (ret) { 4989 - ath11k_warn(ab, "failed to delay vdev start: %d\n", ret); 4990 - goto free_tx_stats; 4991 - } 4992 - } 4993 - 4994 - ewma_avg_rssi_init(&arsta->avg_rssi); 4995 - return 0; 4996 - 4997 - free_tx_stats: 4998 - kfree(arsta->tx_stats); 4999 - arsta->tx_stats = NULL; 5000 - free_peer: 5001 - ath11k_peer_delete(ar, arvif->vdev_id, sta->addr); 5002 - free_rx_stats: 5003 - kfree(arsta->rx_stats); 5004 - arsta->rx_stats = NULL; 5005 - dec_num_station: 5006 - ath11k_mac_dec_num_stations(arvif, sta); 5007 - exit: 5008 - return ret; 5009 - } 5010 - 5011 4909 static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar, 5012 4910 struct ieee80211_sta *sta) 5013 4911 { ··· 4940 5026 } 4941 5027 4942 5028 return bw; 4943 - } 4944 - 4945 - static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, 4946 - struct ieee80211_vif *vif, 4947 - struct ieee80211_sta *sta, 4948 - enum ieee80211_sta_state old_state, 4949 - enum ieee80211_sta_state new_state) 4950 - { 4951 - struct ath11k *ar = hw->priv; 4952 - struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); 4953 - struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); 4954 - struct ath11k_peer *peer; 4955 - int ret = 0; 4956 - 4957 - /* cancel must be done outside the mutex to avoid deadlock */ 4958 - if ((old_state == IEEE80211_STA_NONE && 4959 - new_state == IEEE80211_STA_NOTEXIST)) { 4960 - cancel_work_sync(&arsta->update_wk); 4961 - cancel_work_sync(&arsta->set_4addr_wk); 4962 - } 4963 - 4964 - mutex_lock(&ar->conf_mutex); 4965 - 4966 - if (old_state == IEEE80211_STA_NOTEXIST && 4967 - new_state == IEEE80211_STA_NONE) { 4968 - memset(arsta, 0, sizeof(*arsta)); 4969 - arsta->arvif = arvif; 4970 - arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; 4971 - INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk); 4972 - INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk); 4973 - 4974 - ret = ath11k_mac_station_add(ar, vif, sta); 4975 - if (ret) 4976 - ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n", 4977 - sta->addr, arvif->vdev_id); 4978 - } else if ((old_state == IEEE80211_STA_NONE && 4979 - new_state == IEEE80211_STA_NOTEXIST)) { 4980 - bool skip_peer_delete = ar->ab->hw_params.vdev_start_delay && 4981 - vif->type == NL80211_IFTYPE_STATION; 4982 - 4983 - ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr); 4984 - 4985 - if (!skip_peer_delete) { 4986 - ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr); 4987 - if (ret) 4988 - ath11k_warn(ar->ab, 4989 - "Failed to delete peer: %pM for VDEV: %d\n", 4990 - sta->addr, arvif->vdev_id); 4991 - else 4992 - ath11k_dbg(ar->ab, 4993 - ATH11K_DBG_MAC, 4994 - "Removed peer: %pM for VDEV: %d\n", 4995 - sta->addr, arvif->vdev_id); 4996 - } 4997 - 4998 - ath11k_mac_dec_num_stations(arvif, sta); 4999 - mutex_lock(&ar->ab->tbl_mtx_lock); 5000 - spin_lock_bh(&ar->ab->base_lock); 5001 - peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 5002 - if (skip_peer_delete && peer) { 5003 - peer->sta = NULL; 5004 - } else if (peer && peer->sta == sta) { 5005 - ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n", 5006 - vif->addr, arvif->vdev_id); 5007 - ath11k_peer_rhash_delete(ar->ab, peer); 5008 - peer->sta = NULL; 5009 - list_del(&peer->list); 5010 - kfree(peer); 5011 - ar->num_peers--; 5012 - } 5013 - spin_unlock_bh(&ar->ab->base_lock); 5014 - mutex_unlock(&ar->ab->tbl_mtx_lock); 5015 - 5016 - kfree(arsta->tx_stats); 5017 - arsta->tx_stats = NULL; 5018 - 5019 - kfree(arsta->rx_stats); 5020 - arsta->rx_stats = NULL; 5021 - } else if (old_state == IEEE80211_STA_AUTH && 5022 - new_state == IEEE80211_STA_ASSOC && 5023 - (vif->type == NL80211_IFTYPE_AP || 5024 - vif->type == NL80211_IFTYPE_MESH_POINT || 5025 - vif->type == NL80211_IFTYPE_ADHOC)) { 5026 - ret = ath11k_station_assoc(ar, vif, sta, false); 5027 - if (ret) 5028 - ath11k_warn(ar->ab, "Failed to associate station: %pM\n", 5029 - sta->addr); 5030 - 5031 - spin_lock_bh(&ar->data_lock); 5032 - /* Set arsta bw and prev bw */ 5033 - arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta); 5034 - arsta->bw_prev = arsta->bw; 5035 - spin_unlock_bh(&ar->data_lock); 5036 - } else if (old_state == IEEE80211_STA_ASSOC && 5037 - new_state == IEEE80211_STA_AUTHORIZED) { 5038 - spin_lock_bh(&ar->ab->base_lock); 5039 - 5040 - peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 5041 - if (peer) 5042 - peer->is_authorized = true; 5043 - 5044 - spin_unlock_bh(&ar->ab->base_lock); 5045 - 5046 - if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) { 5047 - ret = ath11k_wmi_set_peer_param(ar, sta->addr, 5048 - arvif->vdev_id, 5049 - WMI_PEER_AUTHORIZE, 5050 - 1); 5051 - if (ret) 5052 - ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", 5053 - sta->addr, arvif->vdev_id, ret); 5054 - } 5055 - } else if (old_state == IEEE80211_STA_AUTHORIZED && 5056 - new_state == IEEE80211_STA_ASSOC) { 5057 - spin_lock_bh(&ar->ab->base_lock); 5058 - 5059 - peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 5060 - if (peer) 5061 - peer->is_authorized = false; 5062 - 5063 - spin_unlock_bh(&ar->ab->base_lock); 5064 - } else if (old_state == IEEE80211_STA_ASSOC && 5065 - new_state == IEEE80211_STA_AUTH && 5066 - (vif->type == NL80211_IFTYPE_AP || 5067 - vif->type == NL80211_IFTYPE_MESH_POINT || 5068 - vif->type == NL80211_IFTYPE_ADHOC)) { 5069 - ret = ath11k_station_disassoc(ar, vif, sta); 5070 - if (ret) 5071 - ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n", 5072 - sta->addr); 5073 - } 5074 - 5075 - mutex_unlock(&ar->conf_mutex); 5076 - return ret; 5077 5029 } 5078 5030 5079 5031 static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, ··· 6720 6940 ret); 6721 6941 } 6722 6942 6943 + if (ath11k_wmi_supports_6ghz_cc_ext(ar)) { 6944 + struct cur_regulatory_info *reg_info; 6945 + 6946 + reg_info = &ab->reg_info_store[ar->pdev_idx]; 6947 + ath11k_dbg(ab, ATH11K_DBG_MAC, "interface added to change reg rules\n"); 6948 + ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_LPI_AP); 6949 + } 6950 + 6723 6951 mutex_unlock(&ar->conf_mutex); 6724 6952 6725 6953 return 0; ··· 7054 7266 return ret; 7055 7267 } 7056 7268 7269 + /* TODO: For now we only set TPC power here. However when 7270 + * channel changes, say CSA, it should be updated again. 7271 + */ 7272 + if (ath11k_mac_supports_station_tpc(ar, arvif, chandef)) { 7273 + ath11k_mac_fill_reg_tpc_info(ar, arvif->vif, &arvif->chanctx); 7274 + ath11k_wmi_send_vdev_set_tpc_power(ar, arvif->vdev_id, 7275 + &arvif->reg_tpc_info); 7276 + } 7277 + 7057 7278 if (!restart) 7058 7279 ar->num_started_vdevs++; 7059 7280 ··· 7339 7542 mutex_unlock(&ar->conf_mutex); 7340 7543 } 7341 7544 7342 - static int ath11k_start_vdev_delay(struct ieee80211_hw *hw, 7343 - struct ieee80211_vif *vif) 7545 + static int ath11k_mac_start_vdev_delay(struct ieee80211_hw *hw, 7546 + struct ieee80211_vif *vif) 7344 7547 { 7345 7548 struct ath11k *ar = hw->priv; 7346 7549 struct ath11k_base *ab = ar->ab; ··· 7386 7589 return 0; 7387 7590 } 7388 7591 7592 + static int ath11k_mac_stop_vdev_early(struct ieee80211_hw *hw, 7593 + struct ieee80211_vif *vif) 7594 + { 7595 + struct ath11k *ar = hw->priv; 7596 + struct ath11k_base *ab = ar->ab; 7597 + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); 7598 + int ret; 7599 + 7600 + if (WARN_ON(!arvif->is_started)) 7601 + return -EBUSY; 7602 + 7603 + ret = ath11k_mac_vdev_stop(arvif); 7604 + if (ret) { 7605 + ath11k_warn(ab, "failed to stop vdev %i: %d\n", 7606 + arvif->vdev_id, ret); 7607 + return ret; 7608 + } 7609 + 7610 + arvif->is_started = false; 7611 + 7612 + /* TODO: Setup ps and cts/rts protection */ 7613 + return 0; 7614 + } 7615 + 7616 + static u8 ath11k_mac_get_tpe_count(u8 txpwr_intrprt, u8 txpwr_cnt) 7617 + { 7618 + switch (txpwr_intrprt) { 7619 + /* Refer "Table 9-276-Meaning of Maximum Transmit Power Count subfield 7620 + * if the Maximum Transmit Power Interpretation subfield is 0 or 2" of 7621 + * "IEEE Std 802.11ax 2021". 7622 + */ 7623 + case IEEE80211_TPE_LOCAL_EIRP: 7624 + case IEEE80211_TPE_REG_CLIENT_EIRP: 7625 + txpwr_cnt = txpwr_cnt <= 3 ? txpwr_cnt : 3; 7626 + txpwr_cnt = txpwr_cnt + 1; 7627 + break; 7628 + /* Refer "Table 9-277-Meaning of Maximum Transmit Power Count subfield 7629 + * if Maximum Transmit Power Interpretation subfield is 1 or 3" of 7630 + * "IEEE Std 802.11ax 2021". 7631 + */ 7632 + case IEEE80211_TPE_LOCAL_EIRP_PSD: 7633 + case IEEE80211_TPE_REG_CLIENT_EIRP_PSD: 7634 + txpwr_cnt = txpwr_cnt <= 4 ? txpwr_cnt : 4; 7635 + txpwr_cnt = txpwr_cnt ? (BIT(txpwr_cnt - 1)) : 1; 7636 + break; 7637 + } 7638 + 7639 + return txpwr_cnt; 7640 + } 7641 + 7642 + static u8 ath11k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def) 7643 + { 7644 + if (chan_def->chan->flags & IEEE80211_CHAN_PSD) { 7645 + switch (chan_def->width) { 7646 + case NL80211_CHAN_WIDTH_20: 7647 + return 1; 7648 + case NL80211_CHAN_WIDTH_40: 7649 + return 2; 7650 + case NL80211_CHAN_WIDTH_80: 7651 + return 4; 7652 + case NL80211_CHAN_WIDTH_80P80: 7653 + case NL80211_CHAN_WIDTH_160: 7654 + return 8; 7655 + default: 7656 + return 1; 7657 + } 7658 + } else { 7659 + switch (chan_def->width) { 7660 + case NL80211_CHAN_WIDTH_20: 7661 + return 1; 7662 + case NL80211_CHAN_WIDTH_40: 7663 + return 2; 7664 + case NL80211_CHAN_WIDTH_80: 7665 + return 3; 7666 + case NL80211_CHAN_WIDTH_80P80: 7667 + case NL80211_CHAN_WIDTH_160: 7668 + return 4; 7669 + default: 7670 + return 1; 7671 + } 7672 + } 7673 + } 7674 + 7675 + static u16 ath11k_mac_get_6ghz_start_frequency(struct cfg80211_chan_def *chan_def) 7676 + { 7677 + u16 diff_seq; 7678 + 7679 + /* It is to get the lowest channel number's center frequency of the chan. 7680 + * For example, 7681 + * bandwidth=40 MHz, center frequency is 5965, lowest channel is 1 7682 + * with center frequency 5955, its diff is 5965 - 5955 = 10. 7683 + * bandwidth=80 MHz, center frequency is 5985, lowest channel is 1 7684 + * with center frequency 5955, its diff is 5985 - 5955 = 30. 7685 + * bandwidth=160 MHz, center frequency is 6025, lowest channel is 1 7686 + * with center frequency 5955, its diff is 6025 - 5955 = 70. 7687 + */ 7688 + switch (chan_def->width) { 7689 + case NL80211_CHAN_WIDTH_160: 7690 + diff_seq = 70; 7691 + break; 7692 + case NL80211_CHAN_WIDTH_80: 7693 + case NL80211_CHAN_WIDTH_80P80: 7694 + diff_seq = 30; 7695 + break; 7696 + case NL80211_CHAN_WIDTH_40: 7697 + diff_seq = 10; 7698 + break; 7699 + default: 7700 + diff_seq = 0; 7701 + } 7702 + 7703 + return chan_def->center_freq1 - diff_seq; 7704 + } 7705 + 7706 + static u16 ath11k_mac_get_seg_freq(struct cfg80211_chan_def *chan_def, 7707 + u16 start_seq, u8 seq) 7708 + { 7709 + u16 seg_seq; 7710 + 7711 + /* It is to get the center frequency of the specific bandwidth. 7712 + * start_seq means the lowest channel number's center frequency. 7713 + * seq 0/1/2/3 means 20 MHz/40 MHz/80 MHz/160 MHz&80P80. 7714 + * For example, 7715 + * lowest channel is 1, its center frequency 5955, 7716 + * center frequency is 5955 when bandwidth=20 MHz, its diff is 5955 - 5955 = 0. 7717 + * lowest channel is 1, its center frequency 5955, 7718 + * center frequency is 5965 when bandwidth=40 MHz, its diff is 5965 - 5955 = 10. 7719 + * lowest channel is 1, its center frequency 5955, 7720 + * center frequency is 5985 when bandwidth=80 MHz, its diff is 5985 - 5955 = 30. 7721 + * lowest channel is 1, its center frequency 5955, 7722 + * center frequency is 6025 when bandwidth=160 MHz, its diff is 6025 - 5955 = 70. 7723 + */ 7724 + if (chan_def->width == NL80211_CHAN_WIDTH_80P80 && seq == 3) 7725 + return chan_def->center_freq2; 7726 + 7727 + seg_seq = 10 * (BIT(seq) - 1); 7728 + return seg_seq + start_seq; 7729 + } 7730 + 7731 + static void ath11k_mac_get_psd_channel(struct ath11k *ar, 7732 + u16 step_freq, 7733 + u16 *start_freq, 7734 + u16 *center_freq, 7735 + u8 i, 7736 + struct ieee80211_channel **temp_chan, 7737 + s8 *tx_power) 7738 + { 7739 + /* It is to get the center frequency for each 20 MHz. 7740 + * For example, if the chan is 160 MHz and center frequency is 6025, 7741 + * then it include 8 channels, they are 1/5/9/13/17/21/25/29, 7742 + * channel number 1's center frequency is 5955, it is parameter start_freq. 7743 + * parameter i is the step of the 8 channels. i is 0~7 for the 8 channels. 7744 + * the channel 1/5/9/13/17/21/25/29 maps i=0/1/2/3/4/5/6/7, 7745 + * and maps its center frequency is 5955/5975/5995/6015/6035/6055/6075/6095, 7746 + * the gap is 20 for each channel, parameter step_freq means the gap. 7747 + * after get the center frequency of each channel, it is easy to find the 7748 + * struct ieee80211_channel of it and get the max_reg_power. 7749 + */ 7750 + *center_freq = *start_freq + i * step_freq; 7751 + *temp_chan = ieee80211_get_channel(ar->hw->wiphy, *center_freq); 7752 + *tx_power = (*temp_chan)->max_reg_power; 7753 + } 7754 + 7755 + static void ath11k_mac_get_eirp_power(struct ath11k *ar, 7756 + u16 *start_freq, 7757 + u16 *center_freq, 7758 + u8 i, 7759 + struct ieee80211_channel **temp_chan, 7760 + struct cfg80211_chan_def *def, 7761 + s8 *tx_power) 7762 + { 7763 + /* It is to get the center frequency for 20 MHz/40 MHz/80 MHz/ 7764 + * 160 MHz&80P80 bandwidth, and then plus 10 to the center frequency, 7765 + * it is the center frequency of a channel number. 7766 + * For example, when configured channel number is 1. 7767 + * center frequency is 5965 when bandwidth=40 MHz, after plus 10, it is 5975, 7768 + * then it is channel number 5. 7769 + * center frequency is 5985 when bandwidth=80 MHz, after plus 10, it is 5995, 7770 + * then it is channel number 9. 7771 + * center frequency is 6025 when bandwidth=160 MHz, after plus 10, it is 6035, 7772 + * then it is channel number 17. 7773 + * after get the center frequency of each channel, it is easy to find the 7774 + * struct ieee80211_channel of it and get the max_reg_power. 7775 + */ 7776 + *center_freq = ath11k_mac_get_seg_freq(def, *start_freq, i); 7777 + 7778 + /* For the 20 MHz, its center frequency is same with same channel */ 7779 + if (i != 0) 7780 + *center_freq += 10; 7781 + 7782 + *temp_chan = ieee80211_get_channel(ar->hw->wiphy, *center_freq); 7783 + *tx_power = (*temp_chan)->max_reg_power; 7784 + } 7785 + 7786 + void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar, 7787 + struct ieee80211_vif *vif, 7788 + struct ieee80211_chanctx_conf *ctx) 7789 + { 7790 + struct ath11k_base *ab = ar->ab; 7791 + struct ath11k_vif *arvif = (void *)vif->drv_priv; 7792 + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 7793 + struct ath11k_reg_tpc_power_info *reg_tpc_info = &arvif->reg_tpc_info; 7794 + struct ieee80211_channel *chan, *temp_chan; 7795 + u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction; 7796 + bool is_psd_power = false, is_tpe_present = false; 7797 + s8 max_tx_power[IEEE80211_MAX_NUM_PWR_LEVEL], 7798 + psd_power, tx_power, eirp_power; 7799 + u16 start_freq, center_freq; 7800 + 7801 + chan = ctx->def.chan; 7802 + start_freq = ath11k_mac_get_6ghz_start_frequency(&ctx->def); 7803 + pwr_reduction = bss_conf->pwr_reduction; 7804 + 7805 + if (arvif->reg_tpc_info.num_pwr_levels) { 7806 + is_tpe_present = true; 7807 + num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels; 7808 + } else { 7809 + num_pwr_levels = ath11k_mac_get_num_pwr_levels(&ctx->def); 7810 + } 7811 + 7812 + for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) { 7813 + /* STA received TPE IE*/ 7814 + if (is_tpe_present) { 7815 + /* local power is PSD power*/ 7816 + if (chan->flags & IEEE80211_CHAN_PSD) { 7817 + /* Connecting AP is psd power */ 7818 + if (reg_tpc_info->is_psd_power) { 7819 + is_psd_power = true; 7820 + ath11k_mac_get_psd_channel(ar, 20, 7821 + &start_freq, 7822 + &center_freq, 7823 + pwr_lvl_idx, 7824 + &temp_chan, 7825 + &tx_power); 7826 + psd_power = temp_chan->psd; 7827 + eirp_power = tx_power; 7828 + max_tx_power[pwr_lvl_idx] = 7829 + min_t(s8, 7830 + psd_power, 7831 + reg_tpc_info->tpe[pwr_lvl_idx]); 7832 + /* Connecting AP is not psd power */ 7833 + } else { 7834 + ath11k_mac_get_eirp_power(ar, 7835 + &start_freq, 7836 + &center_freq, 7837 + pwr_lvl_idx, 7838 + &temp_chan, 7839 + &ctx->def, 7840 + &tx_power); 7841 + psd_power = temp_chan->psd; 7842 + /* convert psd power to EIRP power based 7843 + * on channel width 7844 + */ 7845 + tx_power = 7846 + min_t(s8, tx_power, 7847 + psd_power + 13 + pwr_lvl_idx * 3); 7848 + max_tx_power[pwr_lvl_idx] = 7849 + min_t(s8, 7850 + tx_power, 7851 + reg_tpc_info->tpe[pwr_lvl_idx]); 7852 + } 7853 + /* local power is not PSD power */ 7854 + } else { 7855 + /* Connecting AP is psd power */ 7856 + if (reg_tpc_info->is_psd_power) { 7857 + is_psd_power = true; 7858 + ath11k_mac_get_psd_channel(ar, 20, 7859 + &start_freq, 7860 + &center_freq, 7861 + pwr_lvl_idx, 7862 + &temp_chan, 7863 + &tx_power); 7864 + eirp_power = tx_power; 7865 + max_tx_power[pwr_lvl_idx] = 7866 + reg_tpc_info->tpe[pwr_lvl_idx]; 7867 + /* Connecting AP is not psd power */ 7868 + } else { 7869 + ath11k_mac_get_eirp_power(ar, 7870 + &start_freq, 7871 + &center_freq, 7872 + pwr_lvl_idx, 7873 + &temp_chan, 7874 + &ctx->def, 7875 + &tx_power); 7876 + max_tx_power[pwr_lvl_idx] = 7877 + min_t(s8, 7878 + tx_power, 7879 + reg_tpc_info->tpe[pwr_lvl_idx]); 7880 + } 7881 + } 7882 + /* STA not received TPE IE */ 7883 + } else { 7884 + /* local power is PSD power*/ 7885 + if (chan->flags & IEEE80211_CHAN_PSD) { 7886 + is_psd_power = true; 7887 + ath11k_mac_get_psd_channel(ar, 20, 7888 + &start_freq, 7889 + &center_freq, 7890 + pwr_lvl_idx, 7891 + &temp_chan, 7892 + &tx_power); 7893 + psd_power = temp_chan->psd; 7894 + eirp_power = tx_power; 7895 + max_tx_power[pwr_lvl_idx] = psd_power; 7896 + } else { 7897 + ath11k_mac_get_eirp_power(ar, 7898 + &start_freq, 7899 + &center_freq, 7900 + pwr_lvl_idx, 7901 + &temp_chan, 7902 + &ctx->def, 7903 + &tx_power); 7904 + max_tx_power[pwr_lvl_idx] = tx_power; 7905 + } 7906 + } 7907 + 7908 + if (is_psd_power) { 7909 + /* If AP local power constraint is present */ 7910 + if (pwr_reduction) 7911 + eirp_power = eirp_power - pwr_reduction; 7912 + 7913 + /* If firmware updated max tx power is non zero, then take 7914 + * the min of firmware updated ap tx power 7915 + * and max power derived from above mentioned parameters. 7916 + */ 7917 + ath11k_dbg(ab, ATH11K_DBG_MAC, 7918 + "eirp power : %d firmware report power : %d\n", 7919 + eirp_power, ar->max_allowed_tx_power); 7920 + /* Firmware reports lower max_allowed_tx_power during vdev 7921 + * start response. In case of 6 GHz, firmware is not aware 7922 + * of EIRP power unless driver sets EIRP power through WMI 7923 + * TPC command. So radio which does not support idle power 7924 + * save can set maximum calculated EIRP power directly to 7925 + * firmware through TPC command without min comparison with 7926 + * vdev start response's max_allowed_tx_power. 7927 + */ 7928 + if (ar->max_allowed_tx_power && ab->hw_params.idle_ps) 7929 + eirp_power = min_t(s8, 7930 + eirp_power, 7931 + ar->max_allowed_tx_power); 7932 + } else { 7933 + /* If AP local power constraint is present */ 7934 + if (pwr_reduction) 7935 + max_tx_power[pwr_lvl_idx] = 7936 + max_tx_power[pwr_lvl_idx] - pwr_reduction; 7937 + /* If firmware updated max tx power is non zero, then take 7938 + * the min of firmware updated ap tx power 7939 + * and max power derived from above mentioned parameters. 7940 + */ 7941 + if (ar->max_allowed_tx_power && ab->hw_params.idle_ps) 7942 + max_tx_power[pwr_lvl_idx] = 7943 + min_t(s8, 7944 + max_tx_power[pwr_lvl_idx], 7945 + ar->max_allowed_tx_power); 7946 + } 7947 + reg_tpc_info->chan_power_info[pwr_lvl_idx].chan_cfreq = center_freq; 7948 + reg_tpc_info->chan_power_info[pwr_lvl_idx].tx_power = 7949 + max_tx_power[pwr_lvl_idx]; 7950 + } 7951 + 7952 + reg_tpc_info->num_pwr_levels = num_pwr_levels; 7953 + reg_tpc_info->is_psd_power = is_psd_power; 7954 + reg_tpc_info->eirp_power = eirp_power; 7955 + reg_tpc_info->ap_power_type = 7956 + ath11k_reg_ap_pwr_convert(vif->bss_conf.power_type); 7957 + } 7958 + 7959 + static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar, 7960 + struct ieee80211_vif *vif, 7961 + struct ieee80211_chanctx_conf *ctx) 7962 + { 7963 + struct ath11k_base *ab = ar->ab; 7964 + struct ath11k_vif *arvif = (void *)vif->drv_priv; 7965 + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 7966 + struct ieee80211_tx_pwr_env *single_tpe; 7967 + enum wmi_reg_6ghz_client_type client_type; 7968 + struct cur_regulatory_info *reg_info; 7969 + int i; 7970 + u8 pwr_count, pwr_interpret, pwr_category; 7971 + u8 psd_index = 0, non_psd_index = 0, local_tpe_count = 0, reg_tpe_count = 0; 7972 + bool use_local_tpe, non_psd_set = false, psd_set = false; 7973 + 7974 + reg_info = &ab->reg_info_store[ar->pdev_idx]; 7975 + client_type = reg_info->client_type; 7976 + 7977 + for (i = 0; i < bss_conf->tx_pwr_env_num; i++) { 7978 + single_tpe = &bss_conf->tx_pwr_env[i]; 7979 + pwr_category = u8_get_bits(single_tpe->tx_power_info, 7980 + IEEE80211_TX_PWR_ENV_INFO_CATEGORY); 7981 + pwr_interpret = u8_get_bits(single_tpe->tx_power_info, 7982 + IEEE80211_TX_PWR_ENV_INFO_INTERPRET); 7983 + 7984 + if (pwr_category == client_type) { 7985 + if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP || 7986 + pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD) 7987 + local_tpe_count++; 7988 + else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP || 7989 + pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD) 7990 + reg_tpe_count++; 7991 + } 7992 + } 7993 + 7994 + if (!reg_tpe_count && !local_tpe_count) { 7995 + ath11k_warn(ab, 7996 + "no transmit power envelope match client power type %d\n", 7997 + client_type); 7998 + return; 7999 + } else if (!reg_tpe_count) { 8000 + use_local_tpe = true; 8001 + } else { 8002 + use_local_tpe = false; 8003 + } 8004 + 8005 + for (i = 0; i < bss_conf->tx_pwr_env_num; i++) { 8006 + single_tpe = &bss_conf->tx_pwr_env[i]; 8007 + pwr_category = u8_get_bits(single_tpe->tx_power_info, 8008 + IEEE80211_TX_PWR_ENV_INFO_CATEGORY); 8009 + pwr_interpret = u8_get_bits(single_tpe->tx_power_info, 8010 + IEEE80211_TX_PWR_ENV_INFO_INTERPRET); 8011 + 8012 + if (pwr_category != client_type) 8013 + continue; 8014 + 8015 + /* get local transmit power envelope */ 8016 + if (use_local_tpe) { 8017 + if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP) { 8018 + non_psd_index = i; 8019 + non_psd_set = true; 8020 + } else if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD) { 8021 + psd_index = i; 8022 + psd_set = true; 8023 + } 8024 + /* get regulatory transmit power envelope */ 8025 + } else { 8026 + if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP) { 8027 + non_psd_index = i; 8028 + non_psd_set = true; 8029 + } else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD) { 8030 + psd_index = i; 8031 + psd_set = true; 8032 + } 8033 + } 8034 + } 8035 + 8036 + if (non_psd_set && !psd_set) { 8037 + single_tpe = &bss_conf->tx_pwr_env[non_psd_index]; 8038 + pwr_count = u8_get_bits(single_tpe->tx_power_info, 8039 + IEEE80211_TX_PWR_ENV_INFO_COUNT); 8040 + pwr_interpret = u8_get_bits(single_tpe->tx_power_info, 8041 + IEEE80211_TX_PWR_ENV_INFO_INTERPRET); 8042 + arvif->reg_tpc_info.is_psd_power = false; 8043 + arvif->reg_tpc_info.eirp_power = 0; 8044 + 8045 + arvif->reg_tpc_info.num_pwr_levels = 8046 + ath11k_mac_get_tpe_count(pwr_interpret, pwr_count); 8047 + 8048 + for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) { 8049 + ath11k_dbg(ab, ATH11K_DBG_MAC, 8050 + "non PSD power[%d] : %d\n", 8051 + i, single_tpe->tx_power[i]); 8052 + arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2; 8053 + } 8054 + } 8055 + 8056 + if (psd_set) { 8057 + single_tpe = &bss_conf->tx_pwr_env[psd_index]; 8058 + pwr_count = u8_get_bits(single_tpe->tx_power_info, 8059 + IEEE80211_TX_PWR_ENV_INFO_COUNT); 8060 + pwr_interpret = u8_get_bits(single_tpe->tx_power_info, 8061 + IEEE80211_TX_PWR_ENV_INFO_INTERPRET); 8062 + arvif->reg_tpc_info.is_psd_power = true; 8063 + 8064 + if (pwr_count == 0) { 8065 + ath11k_dbg(ab, ATH11K_DBG_MAC, 8066 + "TPE PSD power : %d\n", single_tpe->tx_power[0]); 8067 + arvif->reg_tpc_info.num_pwr_levels = 8068 + ath11k_mac_get_num_pwr_levels(&ctx->def); 8069 + 8070 + for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) 8071 + arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[0] / 2; 8072 + } else { 8073 + arvif->reg_tpc_info.num_pwr_levels = 8074 + ath11k_mac_get_tpe_count(pwr_interpret, pwr_count); 8075 + 8076 + for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) { 8077 + ath11k_dbg(ab, ATH11K_DBG_MAC, 8078 + "TPE PSD power[%d] : %d\n", 8079 + i, single_tpe->tx_power[i]); 8080 + arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2; 8081 + } 8082 + } 8083 + } 8084 + } 8085 + 7389 8086 static int 7390 8087 ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, 7391 8088 struct ieee80211_vif *vif, ··· 7890 7599 struct ath11k_base *ab = ar->ab; 7891 7600 struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); 7892 7601 int ret; 7893 - struct peer_create_params param; 7602 + struct cur_regulatory_info *reg_info; 7603 + enum ieee80211_ap_reg_power power_type; 7894 7604 7895 7605 mutex_lock(&ar->conf_mutex); 7896 7606 7897 7607 ath11k_dbg(ab, ATH11K_DBG_MAC, 7898 7608 "chanctx assign ptr %p vdev_id %i\n", 7899 7609 ctx, arvif->vdev_id); 7610 + 7611 + if (ath11k_wmi_supports_6ghz_cc_ext(ar) && 7612 + ctx->def.chan->band == NL80211_BAND_6GHZ && 7613 + arvif->vdev_type == WMI_VDEV_TYPE_STA) { 7614 + reg_info = &ab->reg_info_store[ar->pdev_idx]; 7615 + power_type = vif->bss_conf.power_type; 7616 + 7617 + ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx power type %d\n", power_type); 7618 + 7619 + if (power_type == IEEE80211_REG_UNSET_AP) { 7620 + ret = -EINVAL; 7621 + goto out; 7622 + } 7623 + 7624 + ath11k_reg_handle_chan_list(ab, reg_info, power_type); 7625 + arvif->chanctx = *ctx; 7626 + ath11k_mac_parse_tx_pwr_env(ar, vif, ctx); 7627 + } 7900 7628 7901 7629 /* for QCA6390 bss peer must be created before vdev_start */ 7902 7630 if (ab->hw_params.vdev_start_delay && ··· 7932 7622 goto out; 7933 7623 } 7934 7624 7935 - if (ab->hw_params.vdev_start_delay && 7936 - arvif->vdev_type != WMI_VDEV_TYPE_AP && 7937 - arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) { 7938 - param.vdev_id = arvif->vdev_id; 7939 - param.peer_type = WMI_PEER_TYPE_DEFAULT; 7940 - param.peer_addr = ar->mac_addr; 7941 - 7942 - ret = ath11k_peer_create(ar, arvif, NULL, &param); 7943 - if (ret) { 7944 - ath11k_warn(ab, "failed to create peer after vdev start delay: %d", 7945 - ret); 7946 - goto out; 7947 - } 7948 - } 7949 - 7950 7625 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { 7951 7626 ret = ath11k_mac_monitor_start(ar); 7952 7627 if (ret) { ··· 7944 7649 goto out; 7945 7650 } 7946 7651 7947 - ret = ath11k_mac_vdev_start(arvif, ctx); 7948 - if (ret) { 7949 - ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", 7950 - arvif->vdev_id, vif->addr, 7951 - ctx->def.chan->center_freq, ret); 7952 - goto out; 7953 - } 7652 + if (!arvif->is_started) { 7653 + ret = ath11k_mac_vdev_start(arvif, ctx); 7654 + if (ret) { 7655 + ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", 7656 + arvif->vdev_id, vif->addr, 7657 + ctx->def.chan->center_freq, ret); 7658 + goto out; 7659 + } 7954 7660 7955 - arvif->is_started = true; 7661 + arvif->is_started = true; 7662 + } 7956 7663 7957 7664 if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && 7958 7665 test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) { ··· 7994 7697 "chanctx unassign ptr %p vdev_id %i\n", 7995 7698 ctx, arvif->vdev_id); 7996 7699 7997 - WARN_ON(!arvif->is_started); 7998 - 7999 7700 if (ab->hw_params.vdev_start_delay && 8000 7701 arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { 8001 7702 spin_lock_bh(&ab->base_lock); ··· 8017 7722 return; 8018 7723 } 8019 7724 8020 - ret = ath11k_mac_vdev_stop(arvif); 8021 - if (ret) 8022 - ath11k_warn(ab, "failed to stop vdev %i: %d\n", 8023 - arvif->vdev_id, ret); 8024 - 8025 - arvif->is_started = false; 8026 - 8027 - if (ab->hw_params.vdev_start_delay && 8028 - arvif->vdev_type == WMI_VDEV_TYPE_STA) { 8029 - ret = ath11k_peer_delete(ar, arvif->vdev_id, arvif->bssid); 7725 + if (arvif->is_started) { 7726 + ret = ath11k_mac_vdev_stop(arvif); 8030 7727 if (ret) 8031 - ath11k_warn(ar->ab, 8032 - "failed to delete peer %pM for vdev %d: %d\n", 8033 - arvif->bssid, arvif->vdev_id, ret); 8034 - else 8035 - ath11k_dbg(ar->ab, ATH11K_DBG_MAC, 8036 - "removed peer %pM vdev %d after vdev stop\n", 8037 - arvif->bssid, arvif->vdev_id); 7728 + ath11k_warn(ab, "failed to stop vdev %i: %d\n", 7729 + arvif->vdev_id, ret); 7730 + 7731 + arvif->is_started = false; 8038 7732 } 8039 7733 8040 7734 if (ab->hw_params.vdev_start_delay && ··· 9379 9095 ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n", 9380 9096 *dbm); 9381 9097 return 0; 9098 + } 9099 + 9100 + static int ath11k_mac_station_add(struct ath11k *ar, 9101 + struct ieee80211_vif *vif, 9102 + struct ieee80211_sta *sta) 9103 + { 9104 + struct ath11k_base *ab = ar->ab; 9105 + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); 9106 + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); 9107 + struct peer_create_params peer_param; 9108 + int ret; 9109 + 9110 + lockdep_assert_held(&ar->conf_mutex); 9111 + 9112 + ret = ath11k_mac_inc_num_stations(arvif, sta); 9113 + if (ret) { 9114 + ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n", 9115 + ar->max_num_stations); 9116 + goto exit; 9117 + } 9118 + 9119 + arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL); 9120 + if (!arsta->rx_stats) { 9121 + ret = -ENOMEM; 9122 + goto dec_num_station; 9123 + } 9124 + 9125 + peer_param.vdev_id = arvif->vdev_id; 9126 + peer_param.peer_addr = sta->addr; 9127 + peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; 9128 + 9129 + ret = ath11k_peer_create(ar, arvif, sta, &peer_param); 9130 + if (ret) { 9131 + ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n", 9132 + sta->addr, arvif->vdev_id); 9133 + goto free_rx_stats; 9134 + } 9135 + 9136 + ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n", 9137 + sta->addr, arvif->vdev_id); 9138 + 9139 + if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) { 9140 + arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL); 9141 + if (!arsta->tx_stats) { 9142 + ret = -ENOMEM; 9143 + goto free_peer; 9144 + } 9145 + } 9146 + 9147 + if (ieee80211_vif_is_mesh(vif)) { 9148 + ath11k_dbg(ab, ATH11K_DBG_MAC, 9149 + "setting USE_4ADDR for mesh STA %pM\n", sta->addr); 9150 + ret = ath11k_wmi_set_peer_param(ar, sta->addr, 9151 + arvif->vdev_id, 9152 + WMI_PEER_USE_4ADDR, 1); 9153 + if (ret) { 9154 + ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n", 9155 + sta->addr, ret); 9156 + goto free_tx_stats; 9157 + } 9158 + } 9159 + 9160 + ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr); 9161 + if (ret) { 9162 + ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n", 9163 + sta->addr, arvif->vdev_id, ret); 9164 + goto free_tx_stats; 9165 + } 9166 + 9167 + if (ab->hw_params.vdev_start_delay && 9168 + !arvif->is_started && 9169 + arvif->vdev_type != WMI_VDEV_TYPE_AP) { 9170 + ret = ath11k_mac_start_vdev_delay(ar->hw, vif); 9171 + if (ret) { 9172 + ath11k_warn(ab, "failed to delay vdev start: %d\n", ret); 9173 + goto free_tx_stats; 9174 + } 9175 + } 9176 + 9177 + ewma_avg_rssi_init(&arsta->avg_rssi); 9178 + return 0; 9179 + 9180 + free_tx_stats: 9181 + kfree(arsta->tx_stats); 9182 + arsta->tx_stats = NULL; 9183 + free_peer: 9184 + ath11k_peer_delete(ar, arvif->vdev_id, sta->addr); 9185 + free_rx_stats: 9186 + kfree(arsta->rx_stats); 9187 + arsta->rx_stats = NULL; 9188 + dec_num_station: 9189 + ath11k_mac_dec_num_stations(arvif, sta); 9190 + exit: 9191 + return ret; 9192 + } 9193 + 9194 + static int ath11k_mac_station_remove(struct ath11k *ar, 9195 + struct ieee80211_vif *vif, 9196 + struct ieee80211_sta *sta) 9197 + { 9198 + struct ath11k_base *ab = ar->ab; 9199 + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); 9200 + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); 9201 + int ret; 9202 + 9203 + if (ab->hw_params.vdev_start_delay && 9204 + arvif->is_started && 9205 + arvif->vdev_type != WMI_VDEV_TYPE_AP) { 9206 + ret = ath11k_mac_stop_vdev_early(ar->hw, vif); 9207 + if (ret) { 9208 + ath11k_warn(ab, "failed to do early vdev stop: %d\n", ret); 9209 + return ret; 9210 + } 9211 + } 9212 + 9213 + ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr); 9214 + 9215 + ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr); 9216 + if (ret) 9217 + ath11k_warn(ab, "Failed to delete peer: %pM for VDEV: %d\n", 9218 + sta->addr, arvif->vdev_id); 9219 + else 9220 + ath11k_dbg(ab, ATH11K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n", 9221 + sta->addr, arvif->vdev_id); 9222 + 9223 + ath11k_mac_dec_num_stations(arvif, sta); 9224 + 9225 + kfree(arsta->tx_stats); 9226 + arsta->tx_stats = NULL; 9227 + 9228 + kfree(arsta->rx_stats); 9229 + arsta->rx_stats = NULL; 9230 + 9231 + return ret; 9232 + } 9233 + 9234 + static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, 9235 + struct ieee80211_vif *vif, 9236 + struct ieee80211_sta *sta, 9237 + enum ieee80211_sta_state old_state, 9238 + enum ieee80211_sta_state new_state) 9239 + { 9240 + struct ath11k *ar = hw->priv; 9241 + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); 9242 + struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); 9243 + struct ath11k_peer *peer; 9244 + int ret = 0; 9245 + 9246 + /* cancel must be done outside the mutex to avoid deadlock */ 9247 + if ((old_state == IEEE80211_STA_NONE && 9248 + new_state == IEEE80211_STA_NOTEXIST)) { 9249 + cancel_work_sync(&arsta->update_wk); 9250 + cancel_work_sync(&arsta->set_4addr_wk); 9251 + } 9252 + 9253 + mutex_lock(&ar->conf_mutex); 9254 + 9255 + if (old_state == IEEE80211_STA_NOTEXIST && 9256 + new_state == IEEE80211_STA_NONE) { 9257 + memset(arsta, 0, sizeof(*arsta)); 9258 + arsta->arvif = arvif; 9259 + arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; 9260 + INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk); 9261 + INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk); 9262 + 9263 + ret = ath11k_mac_station_add(ar, vif, sta); 9264 + if (ret) 9265 + ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n", 9266 + sta->addr, arvif->vdev_id); 9267 + } else if ((old_state == IEEE80211_STA_NONE && 9268 + new_state == IEEE80211_STA_NOTEXIST)) { 9269 + ret = ath11k_mac_station_remove(ar, vif, sta); 9270 + if (ret) 9271 + ath11k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n", 9272 + sta->addr, arvif->vdev_id); 9273 + 9274 + mutex_lock(&ar->ab->tbl_mtx_lock); 9275 + spin_lock_bh(&ar->ab->base_lock); 9276 + peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 9277 + if (peer && peer->sta == sta) { 9278 + ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n", 9279 + vif->addr, arvif->vdev_id); 9280 + ath11k_peer_rhash_delete(ar->ab, peer); 9281 + peer->sta = NULL; 9282 + list_del(&peer->list); 9283 + kfree(peer); 9284 + ar->num_peers--; 9285 + } 9286 + spin_unlock_bh(&ar->ab->base_lock); 9287 + mutex_unlock(&ar->ab->tbl_mtx_lock); 9288 + } else if (old_state == IEEE80211_STA_AUTH && 9289 + new_state == IEEE80211_STA_ASSOC && 9290 + (vif->type == NL80211_IFTYPE_AP || 9291 + vif->type == NL80211_IFTYPE_MESH_POINT || 9292 + vif->type == NL80211_IFTYPE_ADHOC)) { 9293 + ret = ath11k_station_assoc(ar, vif, sta, false); 9294 + if (ret) 9295 + ath11k_warn(ar->ab, "Failed to associate station: %pM\n", 9296 + sta->addr); 9297 + 9298 + spin_lock_bh(&ar->data_lock); 9299 + /* Set arsta bw and prev bw */ 9300 + arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta); 9301 + arsta->bw_prev = arsta->bw; 9302 + spin_unlock_bh(&ar->data_lock); 9303 + } else if (old_state == IEEE80211_STA_ASSOC && 9304 + new_state == IEEE80211_STA_AUTHORIZED) { 9305 + spin_lock_bh(&ar->ab->base_lock); 9306 + 9307 + peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 9308 + if (peer) 9309 + peer->is_authorized = true; 9310 + 9311 + spin_unlock_bh(&ar->ab->base_lock); 9312 + 9313 + if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) { 9314 + ret = ath11k_wmi_set_peer_param(ar, sta->addr, 9315 + arvif->vdev_id, 9316 + WMI_PEER_AUTHORIZE, 9317 + 1); 9318 + if (ret) 9319 + ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", 9320 + sta->addr, arvif->vdev_id, ret); 9321 + } 9322 + } else if (old_state == IEEE80211_STA_AUTHORIZED && 9323 + new_state == IEEE80211_STA_ASSOC) { 9324 + spin_lock_bh(&ar->ab->base_lock); 9325 + 9326 + peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 9327 + if (peer) 9328 + peer->is_authorized = false; 9329 + 9330 + spin_unlock_bh(&ar->ab->base_lock); 9331 + } else if (old_state == IEEE80211_STA_ASSOC && 9332 + new_state == IEEE80211_STA_AUTH && 9333 + (vif->type == NL80211_IFTYPE_AP || 9334 + vif->type == NL80211_IFTYPE_MESH_POINT || 9335 + vif->type == NL80211_IFTYPE_ADHOC)) { 9336 + ret = ath11k_station_disassoc(ar, vif, sta); 9337 + if (ret) 9338 + ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n", 9339 + sta->addr); 9340 + } 9341 + 9342 + mutex_unlock(&ar->conf_mutex); 9343 + return ret; 9382 9344 } 9383 9345 9384 9346 static const struct ieee80211_ops ath11k_ops = {
+4 -1
drivers/net/wireless/ath/ath11k/mac.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH11K_MAC_H ··· 176 176 int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif, 177 177 enum wmi_sta_keepalive_method method, 178 178 u32 interval); 179 + void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar, 180 + struct ieee80211_vif *vif, 181 + struct ieee80211_chanctx_conf *ctx); 179 182 #endif
+2 -2
drivers/net/wireless/ath/ath11k/mhi.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2020 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/msi.h> ··· 423 423 goto free_controller; 424 424 } else { 425 425 mhi_ctrl->iova_start = 0; 426 - mhi_ctrl->iova_stop = 0xFFFFFFFF; 426 + mhi_ctrl->iova_stop = ab_pci->dma_mask; 427 427 } 428 428 429 429 mhi_ctrl->rddm_size = RDDM_DUMP_SIZE;
+15 -4
drivers/net/wireless/ath/ath11k/pci.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/module.h> ··· 18 18 #include "qmi.h" 19 19 20 20 #define ATH11K_PCI_BAR_NUM 0 21 - #define ATH11K_PCI_DMA_MASK 32 21 + #define ATH11K_PCI_DMA_MASK 36 22 + #define ATH11K_PCI_COHERENT_DMA_MASK 32 22 23 23 24 #define TCSR_SOC_HW_VERSION 0x0224 24 25 #define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8) ··· 527 526 goto disable_device; 528 527 } 529 528 530 - ret = dma_set_mask_and_coherent(&pdev->dev, 531 - DMA_BIT_MASK(ATH11K_PCI_DMA_MASK)); 529 + ret = dma_set_mask(&pdev->dev, 530 + DMA_BIT_MASK(ATH11K_PCI_DMA_MASK)); 532 531 if (ret) { 533 532 ath11k_err(ab, "failed to set pci dma mask to %d: %d\n", 534 533 ATH11K_PCI_DMA_MASK, ret); 534 + goto release_region; 535 + } 536 + 537 + ab_pci->dma_mask = DMA_BIT_MASK(ATH11K_PCI_DMA_MASK); 538 + 539 + ret = dma_set_coherent_mask(&pdev->dev, 540 + DMA_BIT_MASK(ATH11K_PCI_COHERENT_DMA_MASK)); 541 + if (ret) { 542 + ath11k_err(ab, "failed to set pci coherent dma mask to %d: %d\n", 543 + ATH11K_PCI_COHERENT_DMA_MASK, ret); 535 544 goto release_region; 536 545 } 537 546
+2 -1
drivers/net/wireless/ath/ath11k/pci.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2022,2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 #ifndef _ATH11K_PCI_H 7 7 #define _ATH11K_PCI_H ··· 72 72 /* enum ath11k_pci_flags */ 73 73 unsigned long flags; 74 74 u16 link_ctl; 75 + u64 dma_mask; 75 76 }; 76 77 77 78 static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
+247 -20
drivers/net/wireless/ath/ath11k/reg.c
··· 425 425 /* Use the flags of both the rules */ 426 426 new_rule->flags = rule1->flags | rule2->flags; 427 427 428 + if ((rule1->flags & NL80211_RRF_PSD) && (rule2->flags & NL80211_RRF_PSD)) 429 + new_rule->psd = min_t(s8, rule1->psd, rule2->psd); 430 + else 431 + new_rule->flags &= ~NL80211_RRF_PSD; 432 + 428 433 /* To be safe, lts use the max cac timeout of both rules */ 429 434 new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms, 430 435 rule2->dfs_cac_ms); ··· 532 527 static void 533 528 ath11k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq, 534 529 u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr, 535 - u32 reg_flags) 530 + s8 psd, u32 reg_flags) 536 531 { 537 532 reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq); 538 533 reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq); 539 534 reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw); 540 535 reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain); 541 536 reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr); 537 + reg_rule->psd = psd; 542 538 reg_rule->flags = reg_flags; 543 539 } 544 540 ··· 569 563 reg_rule->start_freq, 570 564 ETSI_WEATHER_RADAR_BAND_LOW, bw, 571 565 reg_rule->ant_gain, reg_rule->reg_power, 572 - flags); 566 + reg_rule->psd_eirp, flags); 573 567 574 568 ath11k_dbg(ab, ATH11K_DBG_REG, 575 569 "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", ··· 590 584 591 585 ath11k_reg_update_rule(regd->reg_rules + i, start_freq, 592 586 end_freq, bw, reg_rule->ant_gain, 593 - reg_rule->reg_power, flags); 587 + reg_rule->reg_power, reg_rule->psd_eirp, flags); 594 588 595 589 regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT; 596 590 ··· 611 605 ETSI_WEATHER_RADAR_BAND_HIGH, 612 606 reg_rule->end_freq, bw, 613 607 reg_rule->ant_gain, reg_rule->reg_power, 614 - flags); 608 + reg_rule->psd_eirp, flags); 615 609 616 610 ath11k_dbg(ab, ATH11K_DBG_REG, 617 611 "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", ··· 624 618 *rule_idx = i; 625 619 } 626 620 621 + enum wmi_reg_6ghz_ap_type 622 + ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type) 623 + { 624 + switch (power_type) { 625 + case IEEE80211_REG_LPI_AP: 626 + return WMI_REG_INDOOR_AP; 627 + case IEEE80211_REG_SP_AP: 628 + return WMI_REG_STANDARD_POWER_AP; 629 + case IEEE80211_REG_VLP_AP: 630 + return WMI_REG_VERY_LOW_POWER_AP; 631 + default: 632 + return WMI_REG_MAX_AP_TYPE; 633 + } 634 + } 635 + 627 636 struct ieee80211_regdomain * 628 637 ath11k_reg_build_regd(struct ath11k_base *ab, 629 - struct cur_regulatory_info *reg_info, bool intersect) 638 + struct cur_regulatory_info *reg_info, bool intersect, 639 + enum wmi_vdev_type vdev_type, 640 + enum ieee80211_ap_reg_power power_type) 630 641 { 631 642 struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL; 632 - struct cur_reg_rule *reg_rule; 643 + struct cur_reg_rule *reg_rule, *reg_rule_6ghz; 633 644 u8 i = 0, j = 0, k = 0; 634 645 u8 num_rules; 635 646 u16 max_bw; 636 - u32 flags; 647 + u32 flags, reg_6ghz_number, max_bw_6ghz; 637 648 char alpha2[3]; 638 649 639 650 num_rules = reg_info->num_5ghz_reg_rules + reg_info->num_2ghz_reg_rules; 640 651 641 - /* FIXME: Currently taking reg rules for 6 GHz only from Indoor AP mode list. 642 - * This can be updated after complete 6 GHz regulatory support is added. 643 - */ 644 - if (reg_info->is_ext_reg_event) 645 - num_rules += reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP]; 652 + if (reg_info->is_ext_reg_event) { 653 + if (vdev_type == WMI_VDEV_TYPE_STA) { 654 + enum wmi_reg_6ghz_ap_type ap_type; 655 + 656 + ap_type = ath11k_reg_ap_pwr_convert(power_type); 657 + 658 + if (ap_type == WMI_REG_MAX_AP_TYPE) 659 + ap_type = WMI_REG_INDOOR_AP; 660 + 661 + reg_6ghz_number = reg_info->num_6ghz_rules_client 662 + [ap_type][WMI_REG_DEFAULT_CLIENT]; 663 + 664 + if (reg_6ghz_number == 0) { 665 + ap_type = WMI_REG_INDOOR_AP; 666 + reg_6ghz_number = reg_info->num_6ghz_rules_client 667 + [ap_type][WMI_REG_DEFAULT_CLIENT]; 668 + } 669 + 670 + reg_rule_6ghz = reg_info->reg_rules_6ghz_client_ptr 671 + [ap_type][WMI_REG_DEFAULT_CLIENT]; 672 + max_bw_6ghz = reg_info->max_bw_6ghz_client 673 + [ap_type][WMI_REG_DEFAULT_CLIENT]; 674 + } else { 675 + reg_6ghz_number = reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP]; 676 + reg_rule_6ghz = 677 + reg_info->reg_rules_6ghz_ap_ptr[WMI_REG_INDOOR_AP]; 678 + max_bw_6ghz = reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP]; 679 + } 680 + 681 + num_rules += reg_6ghz_number; 682 + } 646 683 647 684 if (!num_rules) 648 685 goto ret; ··· 732 683 * per other BW rule flags we pass from here 733 684 */ 734 685 flags = NL80211_RRF_AUTO_BW; 735 - } else if (reg_info->is_ext_reg_event && 736 - reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP] && 737 - (k < reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP])) { 738 - reg_rule = reg_info->reg_rules_6ghz_ap_ptr[WMI_REG_INDOOR_AP] + 739 - k++; 740 - max_bw = min_t(u16, reg_rule->max_bw, 741 - reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP]); 686 + } else if (reg_info->is_ext_reg_event && reg_6ghz_number && 687 + k < reg_6ghz_number) { 688 + reg_rule = reg_rule_6ghz + k++; 689 + max_bw = min_t(u16, reg_rule->max_bw, max_bw_6ghz); 742 690 flags = NL80211_RRF_AUTO_BW; 691 + if (reg_rule->psd_flag) 692 + flags |= NL80211_RRF_PSD; 743 693 } else { 744 694 break; 745 695 } ··· 750 702 reg_rule->start_freq, 751 703 reg_rule->end_freq, max_bw, 752 704 reg_rule->ant_gain, reg_rule->reg_power, 753 - flags); 705 + reg_rule->psd_eirp, flags); 754 706 755 707 /* Update dfs cac timeout if the dfs domain is ETSI and the 756 708 * new rule covers weather radar band. ··· 806 758 return new_regd; 807 759 } 808 760 761 + static bool ath11k_reg_is_world_alpha(char *alpha) 762 + { 763 + if (alpha[0] == '0' && alpha[1] == '0') 764 + return true; 765 + 766 + if (alpha[0] == 'n' && alpha[1] == 'a') 767 + return true; 768 + 769 + return false; 770 + } 771 + 772 + static enum wmi_vdev_type ath11k_reg_get_ar_vdev_type(struct ath11k *ar) 773 + { 774 + struct ath11k_vif *arvif; 775 + 776 + /* Currently each struct ath11k maps to one struct ieee80211_hw/wiphy 777 + * and one struct ieee80211_regdomain, so it could only store one group 778 + * reg rules. It means multi-interface concurrency in the same ath11k is 779 + * not support for the regdomain. So get the vdev type of the first entry 780 + * now. After concurrency support for the regdomain, this should change. 781 + */ 782 + arvif = list_first_entry_or_null(&ar->arvifs, struct ath11k_vif, list); 783 + if (arvif) 784 + return arvif->vdev_type; 785 + 786 + return WMI_VDEV_TYPE_UNSPEC; 787 + } 788 + 789 + int ath11k_reg_handle_chan_list(struct ath11k_base *ab, 790 + struct cur_regulatory_info *reg_info, 791 + enum ieee80211_ap_reg_power power_type) 792 + { 793 + struct ieee80211_regdomain *regd; 794 + bool intersect = false; 795 + int pdev_idx; 796 + struct ath11k *ar; 797 + enum wmi_vdev_type vdev_type; 798 + 799 + ath11k_dbg(ab, ATH11K_DBG_WMI, "event reg handle chan list"); 800 + 801 + if (reg_info->status_code != REG_SET_CC_STATUS_PASS) { 802 + /* In case of failure to set the requested ctry, 803 + * fw retains the current regd. We print a failure info 804 + * and return from here. 805 + */ 806 + ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n"); 807 + return -EINVAL; 808 + } 809 + 810 + pdev_idx = reg_info->phy_id; 811 + 812 + /* Avoid default reg rule updates sent during FW recovery if 813 + * it is already available 814 + */ 815 + spin_lock_bh(&ab->base_lock); 816 + if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) && 817 + ab->default_regd[pdev_idx]) { 818 + spin_unlock_bh(&ab->base_lock); 819 + goto retfail; 820 + } 821 + spin_unlock_bh(&ab->base_lock); 822 + 823 + if (pdev_idx >= ab->num_radios) { 824 + /* Process the event for phy0 only if single_pdev_only 825 + * is true. If pdev_idx is valid but not 0, discard the 826 + * event. Otherwise, it goes to fallback. In either case 827 + * ath11k_reg_reset_info() needs to be called to avoid 828 + * memory leak issue. 829 + */ 830 + ath11k_reg_reset_info(reg_info); 831 + 832 + if (ab->hw_params.single_pdev_only && 833 + pdev_idx < ab->hw_params.num_rxmda_per_pdev) 834 + return 0; 835 + goto fallback; 836 + } 837 + 838 + /* Avoid multiple overwrites to default regd, during core 839 + * stop-start after mac registration. 840 + */ 841 + if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] && 842 + !memcmp((char *)ab->default_regd[pdev_idx]->alpha2, 843 + (char *)reg_info->alpha2, 2)) 844 + goto retfail; 845 + 846 + /* Intersect new rules with default regd if a new country setting was 847 + * requested, i.e a default regd was already set during initialization 848 + * and the regd coming from this event has a valid country info. 849 + */ 850 + if (ab->default_regd[pdev_idx] && 851 + !ath11k_reg_is_world_alpha((char *) 852 + ab->default_regd[pdev_idx]->alpha2) && 853 + !ath11k_reg_is_world_alpha((char *)reg_info->alpha2)) 854 + intersect = true; 855 + 856 + ar = ab->pdevs[pdev_idx].ar; 857 + vdev_type = ath11k_reg_get_ar_vdev_type(ar); 858 + 859 + ath11k_dbg(ab, ATH11K_DBG_WMI, 860 + "wmi handle chan list power type %d vdev type %d intersect %d\n", 861 + power_type, vdev_type, intersect); 862 + 863 + regd = ath11k_reg_build_regd(ab, reg_info, intersect, vdev_type, power_type); 864 + if (!regd) { 865 + ath11k_warn(ab, "failed to build regd from reg_info\n"); 866 + goto fallback; 867 + } 868 + 869 + if (power_type == IEEE80211_REG_UNSET_AP) { 870 + ath11k_reg_reset_info(&ab->reg_info_store[pdev_idx]); 871 + ab->reg_info_store[pdev_idx] = *reg_info; 872 + } 873 + 874 + spin_lock_bh(&ab->base_lock); 875 + if (ab->default_regd[pdev_idx]) { 876 + /* The initial rules from FW after WMI Init is to build 877 + * the default regd. From then on, any rules updated for 878 + * the pdev could be due to user reg changes. 879 + * Free previously built regd before assigning the newly 880 + * generated regd to ar. NULL pointer handling will be 881 + * taken care by kfree itself. 882 + */ 883 + ar = ab->pdevs[pdev_idx].ar; 884 + kfree(ab->new_regd[pdev_idx]); 885 + ab->new_regd[pdev_idx] = regd; 886 + queue_work(ab->workqueue, &ar->regd_update_work); 887 + } else { 888 + /* This regd would be applied during mac registration and is 889 + * held constant throughout for regd intersection purpose 890 + */ 891 + ab->default_regd[pdev_idx] = regd; 892 + } 893 + ab->dfs_region = reg_info->dfs_region; 894 + spin_unlock_bh(&ab->base_lock); 895 + 896 + return 0; 897 + 898 + fallback: 899 + /* Fallback to older reg (by sending previous country setting 900 + * again if fw has succeeded and we failed to process here. 901 + * The Regdomain should be uniform across driver and fw. Since the 902 + * FW has processed the command and sent a success status, we expect 903 + * this function to succeed as well. If it doesn't, CTRY needs to be 904 + * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent. 905 + */ 906 + /* TODO: This is rare, but still should also be handled */ 907 + WARN_ON(1); 908 + 909 + retfail: 910 + 911 + return -EINVAL; 912 + } 913 + 809 914 void ath11k_regd_update_work(struct work_struct *work) 810 915 { 811 916 struct ath11k *ar = container_of(work, struct ath11k, ··· 982 781 ar->hw->wiphy->reg_notifier = ath11k_reg_notifier; 983 782 } 984 783 784 + void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info) 785 + { 786 + int i, j; 787 + 788 + if (!reg_info) 789 + return; 790 + 791 + kfree(reg_info->reg_rules_2ghz_ptr); 792 + kfree(reg_info->reg_rules_5ghz_ptr); 793 + 794 + for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { 795 + kfree(reg_info->reg_rules_6ghz_ap_ptr[i]); 796 + 797 + for (j = 0; j < WMI_REG_MAX_CLIENT_TYPE; j++) 798 + kfree(reg_info->reg_rules_6ghz_client_ptr[i][j]); 799 + } 800 + 801 + memset(reg_info, 0, sizeof(*reg_info)); 802 + } 803 + 985 804 void ath11k_reg_free(struct ath11k_base *ab) 986 805 { 987 806 int i; 807 + 808 + for (i = 0; i < ab->num_radios; i++) 809 + ath11k_reg_reset_info(&ab->reg_info_store[i]); 810 + 811 + kfree(ab->reg_info_store); 812 + ab->reg_info_store = NULL; 988 813 989 814 for (i = 0; i < ab->hw_params.max_radios; i++) { 990 815 kfree(ab->default_regd[i]);
+10 -1
drivers/net/wireless/ath/ath11k/reg.h
··· 30 30 31 31 /* ATH11K Regulatory API's */ 32 32 void ath11k_reg_init(struct ath11k *ar); 33 + void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info); 33 34 void ath11k_reg_free(struct ath11k_base *ab); 34 35 void ath11k_regd_update_work(struct work_struct *work); 35 36 struct ieee80211_regdomain * 36 37 ath11k_reg_build_regd(struct ath11k_base *ab, 37 - struct cur_regulatory_info *reg_info, bool intersect); 38 + struct cur_regulatory_info *reg_info, bool intersect, 39 + enum wmi_vdev_type vdev_type, 40 + enum ieee80211_ap_reg_power power_type); 38 41 int ath11k_regd_update(struct ath11k *ar); 39 42 int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait); 43 + enum wmi_reg_6ghz_ap_type 44 + ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type); 45 + int ath11k_reg_handle_chan_list(struct ath11k_base *ab, 46 + struct cur_regulatory_info *reg_info, 47 + enum ieee80211_ap_reg_power power_type); 48 + 40 49 #endif
+1 -1
drivers/net/wireless/ath/ath11k/testmode.c
··· 198 198 u16 length; 199 199 int ret; 200 200 201 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 201 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 202 202 if (IS_ERR(tb)) { 203 203 ret = PTR_ERR(tb); 204 204 ath11k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
+129 -164
drivers/net/wireless/ath/ath11k/wmi.c
··· 238 238 (void *)tb); 239 239 } 240 240 241 - const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr, 242 - size_t len, gfp_t gfp) 241 + const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, 242 + struct sk_buff *skb, gfp_t gfp) 243 243 { 244 244 const void **tb; 245 245 int ret; ··· 248 248 if (!tb) 249 249 return ERR_PTR(-ENOMEM); 250 250 251 - ret = ath11k_wmi_tlv_parse(ab, tb, ptr, len); 251 + ret = ath11k_wmi_tlv_parse(ab, tb, skb->data, skb->len); 252 252 if (ret) { 253 253 kfree(tb); 254 254 return ERR_PTR(ret); ··· 2379 2379 return ret; 2380 2380 } 2381 2381 2382 + int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar, 2383 + u32 vdev_id, 2384 + struct ath11k_reg_tpc_power_info *param) 2385 + { 2386 + struct ath11k_pdev_wmi *wmi = ar->wmi; 2387 + struct wmi_vdev_set_tpc_power_cmd *cmd; 2388 + struct wmi_vdev_ch_power_info *ch; 2389 + struct sk_buff *skb; 2390 + struct wmi_tlv *tlv; 2391 + u8 *ptr; 2392 + int i, ret, len, array_len; 2393 + 2394 + array_len = sizeof(*ch) * param->num_pwr_levels; 2395 + len = sizeof(*cmd) + TLV_HDR_SIZE + array_len; 2396 + 2397 + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); 2398 + if (!skb) 2399 + return -ENOMEM; 2400 + 2401 + ptr = skb->data; 2402 + 2403 + cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr; 2404 + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_TPC_POWER_CMD) | 2405 + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); 2406 + cmd->vdev_id = vdev_id; 2407 + cmd->psd_power = param->is_psd_power; 2408 + cmd->eirp_power = param->eirp_power; 2409 + cmd->power_type_6ghz = param->ap_power_type; 2410 + 2411 + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, 2412 + "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n", 2413 + vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type); 2414 + 2415 + ptr += sizeof(*cmd); 2416 + tlv = (struct wmi_tlv *)ptr; 2417 + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | 2418 + FIELD_PREP(WMI_TLV_LEN, array_len); 2419 + 2420 + ptr += TLV_HDR_SIZE; 2421 + ch = (struct wmi_vdev_ch_power_info *)ptr; 2422 + 2423 + for (i = 0; i < param->num_pwr_levels; i++, ch++) { 2424 + ch->tlv_header = FIELD_PREP(WMI_TLV_TAG, 2425 + WMI_TAG_VDEV_CH_POWER_INFO) | 2426 + FIELD_PREP(WMI_TLV_LEN, 2427 + sizeof(*ch) - TLV_HDR_SIZE); 2428 + 2429 + ch->chan_cfreq = param->chan_power_info[i].chan_cfreq; 2430 + ch->tx_power = param->chan_power_info[i].tx_power; 2431 + 2432 + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tpc chan freq %d TX power %d\n", 2433 + ch->chan_cfreq, ch->tx_power); 2434 + } 2435 + 2436 + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID); 2437 + if (ret) { 2438 + ath11k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n"); 2439 + dev_kfree_skb(skb); 2440 + return ret; 2441 + } 2442 + 2443 + return 0; 2444 + } 2445 + 2382 2446 int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar, 2383 2447 struct scan_cancel_param *param) 2384 2448 { ··· 3994 3930 struct ath11k_vif *arvif; 3995 3931 int ret; 3996 3932 3997 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 3933 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 3998 3934 if (IS_ERR(tb)) { 3999 3935 ret = PTR_ERR(tb); 4000 3936 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 4813 4749 soc->pdevs[0].pdev_id = 0; 4814 4750 } 4815 4751 4752 + if (!soc->reg_info_store) { 4753 + soc->reg_info_store = kcalloc(soc->num_radios, 4754 + sizeof(*soc->reg_info_store), 4755 + GFP_ATOMIC); 4756 + if (!soc->reg_info_store) 4757 + return -ENOMEM; 4758 + } 4759 + 4816 4760 return 0; 4817 4761 } 4818 4762 ··· 5075 5003 const struct wmi_vdev_start_resp_event *ev; 5076 5004 int ret; 5077 5005 5078 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 5006 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5079 5007 if (IS_ERR(tb)) { 5080 5008 ret = PTR_ERR(tb); 5081 5009 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 5100 5028 vdev_rsp->mac_id = ev->mac_id; 5101 5029 vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams; 5102 5030 vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams; 5031 + vdev_rsp->max_allowed_tx_power = ev->max_allowed_tx_power; 5103 5032 5104 5033 kfree(tb); 5105 5034 return 0; ··· 5175 5102 5176 5103 ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n"); 5177 5104 5178 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 5105 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5179 5106 if (IS_ERR(tb)) { 5180 5107 ret = PTR_ERR(tb); 5181 5108 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 5351 5278 5352 5279 ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory ext channel list\n"); 5353 5280 5354 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 5281 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5355 5282 if (IS_ERR(tb)) { 5356 5283 ret = PTR_ERR(tb); 5357 5284 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 5707 5634 const struct wmi_peer_delete_resp_event *ev; 5708 5635 int ret; 5709 5636 5710 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 5637 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5711 5638 if (IS_ERR(tb)) { 5712 5639 ret = PTR_ERR(tb); 5713 5640 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 5739 5666 const struct wmi_vdev_delete_resp_event *ev; 5740 5667 int ret; 5741 5668 5742 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 5669 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5743 5670 if (IS_ERR(tb)) { 5744 5671 ret = PTR_ERR(tb); 5745 5672 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 5759 5686 return 0; 5760 5687 } 5761 5688 5762 - static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, void *evt_buf, 5763 - u32 len, u32 *vdev_id, 5764 - u32 *tx_status) 5689 + static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, 5690 + struct sk_buff *skb, 5691 + u32 *vdev_id, u32 *tx_status) 5765 5692 { 5766 5693 const void **tb; 5767 5694 const struct wmi_bcn_tx_status_event *ev; 5768 5695 int ret; 5769 5696 5770 - tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); 5697 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5771 5698 if (IS_ERR(tb)) { 5772 5699 ret = PTR_ERR(tb); 5773 5700 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 5795 5722 const struct wmi_vdev_stopped_event *ev; 5796 5723 int ret; 5797 5724 5798 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 5725 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5799 5726 if (IS_ERR(tb)) { 5800 5727 ret = PTR_ERR(tb); 5801 5728 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 5949 5876 const struct wmi_mgmt_tx_compl_event *ev; 5950 5877 int ret; 5951 5878 5952 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 5879 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5953 5880 if (IS_ERR(tb)) { 5954 5881 ret = PTR_ERR(tb); 5955 5882 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 6125 6052 const struct wmi_scan_event *ev; 6126 6053 int ret; 6127 6054 6128 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6055 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6129 6056 if (IS_ERR(tb)) { 6130 6057 ret = PTR_ERR(tb); 6131 6058 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 6158 6085 const struct wmi_peer_sta_kickout_event *ev; 6159 6086 int ret; 6160 6087 6161 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6088 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6162 6089 if (IS_ERR(tb)) { 6163 6090 ret = PTR_ERR(tb); 6164 6091 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 6185 6112 const struct wmi_roam_event *ev; 6186 6113 int ret; 6187 6114 6188 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6115 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6189 6116 if (IS_ERR(tb)) { 6190 6117 ret = PTR_ERR(tb); 6191 6118 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 6226 6153 return idx; 6227 6154 } 6228 6155 6229 - static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, u8 *evt_buf, 6230 - u32 len, struct wmi_chan_info_event *ch_info_ev) 6156 + static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb, 6157 + struct wmi_chan_info_event *ch_info_ev) 6231 6158 { 6232 6159 const void **tb; 6233 6160 const struct wmi_chan_info_event *ev; 6234 6161 int ret; 6235 6162 6236 - tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); 6163 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6237 6164 if (IS_ERR(tb)) { 6238 6165 ret = PTR_ERR(tb); 6239 6166 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 6272 6199 const struct wmi_pdev_bss_chan_info_event *ev; 6273 6200 int ret; 6274 6201 6275 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6202 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6276 6203 if (IS_ERR(tb)) { 6277 6204 ret = PTR_ERR(tb); 6278 6205 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 6312 6239 const struct wmi_vdev_install_key_compl_event *ev; 6313 6240 int ret; 6314 6241 6315 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6242 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6316 6243 if (IS_ERR(tb)) { 6317 6244 ret = PTR_ERR(tb); 6318 6245 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 6343 6270 const struct wmi_peer_assoc_conf_event *ev; 6344 6271 int ret; 6345 6272 6346 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6273 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6347 6274 if (IS_ERR(tb)) { 6348 6275 ret = PTR_ERR(tb); 6349 6276 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 7068 6995 const void **tb; 7069 6996 int ret, i; 7070 6997 7071 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6998 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 7072 6999 if (IS_ERR(tb)) { 7073 7000 ret = PTR_ERR(tb); 7074 7001 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 7133 7060 wake_up(&wmi->tx_ce_desc_wq); 7134 7061 } 7135 7062 7136 - static bool ath11k_reg_is_world_alpha(char *alpha) 7137 - { 7138 - if (alpha[0] == '0' && alpha[1] == '0') 7139 - return true; 7140 - 7141 - if (alpha[0] == 'n' && alpha[1] == 'a') 7142 - return true; 7143 - 7144 - return false; 7145 - } 7146 - 7147 - static int ath11k_reg_chan_list_event(struct ath11k_base *ab, 7148 - struct sk_buff *skb, 7063 + static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb, 7149 7064 enum wmi_reg_chan_list_cmd_type id) 7150 7065 { 7151 - struct cur_regulatory_info *reg_info = NULL; 7152 - struct ieee80211_regdomain *regd = NULL; 7153 - bool intersect = false; 7154 - int ret = 0, pdev_idx, i, j; 7155 - struct ath11k *ar; 7066 + struct cur_regulatory_info *reg_info; 7067 + int ret; 7156 7068 7157 7069 reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC); 7158 - if (!reg_info) { 7159 - ret = -ENOMEM; 7160 - goto fallback; 7161 - } 7070 + if (!reg_info) 7071 + return -ENOMEM; 7162 7072 7163 7073 if (id == WMI_REG_CHAN_LIST_CC_ID) 7164 7074 ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info); ··· 7149 7093 ret = ath11k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info); 7150 7094 7151 7095 if (ret) { 7152 - ath11k_warn(ab, "failed to extract regulatory info from received event\n"); 7153 - goto fallback; 7154 - } 7155 - 7156 - ath11k_dbg(ab, ATH11K_DBG_WMI, "event reg chan list id %d", id); 7157 - 7158 - if (reg_info->status_code != REG_SET_CC_STATUS_PASS) { 7159 - /* In case of failure to set the requested ctry, 7160 - * fw retains the current regd. We print a failure info 7161 - * and return from here. 7162 - */ 7163 - ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n"); 7096 + ath11k_warn(ab, "failed to extract regulatory info\n"); 7164 7097 goto mem_free; 7165 7098 } 7166 7099 7167 - pdev_idx = reg_info->phy_id; 7168 - 7169 - /* Avoid default reg rule updates sent during FW recovery if 7170 - * it is already available 7171 - */ 7172 - spin_lock(&ab->base_lock); 7173 - if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) && 7174 - ab->default_regd[pdev_idx]) { 7175 - spin_unlock(&ab->base_lock); 7100 + ret = ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_UNSET_AP); 7101 + if (ret) { 7102 + ath11k_warn(ab, "failed to process regulatory info %d\n", ret); 7176 7103 goto mem_free; 7177 7104 } 7178 - spin_unlock(&ab->base_lock); 7179 7105 7180 - if (pdev_idx >= ab->num_radios) { 7181 - /* Process the event for phy0 only if single_pdev_only 7182 - * is true. If pdev_idx is valid but not 0, discard the 7183 - * event. Otherwise, it goes to fallback. 7184 - */ 7185 - if (ab->hw_params.single_pdev_only && 7186 - pdev_idx < ab->hw_params.num_rxmda_per_pdev) 7187 - goto mem_free; 7188 - else 7189 - goto fallback; 7190 - } 7106 + kfree(reg_info); 7107 + return 0; 7191 7108 7192 - /* Avoid multiple overwrites to default regd, during core 7193 - * stop-start after mac registration. 7194 - */ 7195 - if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] && 7196 - !memcmp((char *)ab->default_regd[pdev_idx]->alpha2, 7197 - (char *)reg_info->alpha2, 2)) 7198 - goto mem_free; 7199 - 7200 - /* Intersect new rules with default regd if a new country setting was 7201 - * requested, i.e a default regd was already set during initialization 7202 - * and the regd coming from this event has a valid country info. 7203 - */ 7204 - if (ab->default_regd[pdev_idx] && 7205 - !ath11k_reg_is_world_alpha((char *) 7206 - ab->default_regd[pdev_idx]->alpha2) && 7207 - !ath11k_reg_is_world_alpha((char *)reg_info->alpha2)) 7208 - intersect = true; 7209 - 7210 - regd = ath11k_reg_build_regd(ab, reg_info, intersect); 7211 - if (!regd) { 7212 - ath11k_warn(ab, "failed to build regd from reg_info\n"); 7213 - goto fallback; 7214 - } 7215 - 7216 - spin_lock(&ab->base_lock); 7217 - if (ab->default_regd[pdev_idx]) { 7218 - /* The initial rules from FW after WMI Init is to build 7219 - * the default regd. From then on, any rules updated for 7220 - * the pdev could be due to user reg changes. 7221 - * Free previously built regd before assigning the newly 7222 - * generated regd to ar. NULL pointer handling will be 7223 - * taken care by kfree itself. 7224 - */ 7225 - ar = ab->pdevs[pdev_idx].ar; 7226 - kfree(ab->new_regd[pdev_idx]); 7227 - ab->new_regd[pdev_idx] = regd; 7228 - queue_work(ab->workqueue, &ar->regd_update_work); 7229 - } else { 7230 - /* This regd would be applied during mac registration and is 7231 - * held constant throughout for regd intersection purpose 7232 - */ 7233 - ab->default_regd[pdev_idx] = regd; 7234 - } 7235 - ab->dfs_region = reg_info->dfs_region; 7236 - spin_unlock(&ab->base_lock); 7237 - 7238 - goto mem_free; 7239 - 7240 - fallback: 7241 - /* Fallback to older reg (by sending previous country setting 7242 - * again if fw has succeeded and we failed to process here. 7243 - * The Regdomain should be uniform across driver and fw. Since the 7244 - * FW has processed the command and sent a success status, we expect 7245 - * this function to succeed as well. If it doesn't, CTRY needs to be 7246 - * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent. 7247 - */ 7248 - /* TODO: This is rare, but still should also be handled */ 7249 - WARN_ON(1); 7250 7109 mem_free: 7251 - if (reg_info) { 7252 - kfree(reg_info->reg_rules_2ghz_ptr); 7253 - kfree(reg_info->reg_rules_5ghz_ptr); 7254 - if (reg_info->is_ext_reg_event) { 7255 - for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) 7256 - kfree(reg_info->reg_rules_6ghz_ap_ptr[i]); 7257 - 7258 - for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) 7259 - for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) 7260 - kfree(reg_info->reg_rules_6ghz_client_ptr[j][i]); 7261 - } 7262 - kfree(reg_info); 7263 - } 7110 + ath11k_reg_reset_info(reg_info); 7111 + kfree(reg_info); 7264 7112 return ret; 7265 7113 } 7266 7114 ··· 7322 7362 } 7323 7363 7324 7364 ar->last_wmi_vdev_start_status = 0; 7325 - 7365 + ar->max_allowed_tx_power = vdev_start_resp.max_allowed_tx_power; 7326 7366 status = vdev_start_resp.status; 7327 7367 7328 7368 if (WARN_ON_ONCE(status)) { ··· 7344 7384 struct ath11k_vif *arvif; 7345 7385 u32 vdev_id, tx_status; 7346 7386 7347 - if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len, 7348 - &vdev_id, &tx_status) != 0) { 7387 + if (ath11k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) { 7349 7388 ath11k_warn(ab, "failed to extract bcn tx status"); 7350 7389 return; 7351 7390 } ··· 7375 7416 enum ath11k_wmi_peer_ps_state peer_previous_ps_state; 7376 7417 int ret; 7377 7418 7378 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 7419 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 7379 7420 if (IS_ERR(tb)) { 7380 7421 ret = PTR_ERR(tb); 7381 7422 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 7843 7884 /* HW channel counters frequency value in hertz */ 7844 7885 u32 cc_freq_hz = ab->cc_freq_hz; 7845 7886 7846 - if (ath11k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) { 7887 + if (ath11k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) { 7847 7888 ath11k_warn(ab, "failed to extract chan info event"); 7848 7889 return; 7849 7890 } ··· 8175 8216 const struct wmi_pdev_ctl_failsafe_chk_event *ev; 8176 8217 int ret; 8177 8218 8178 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 8219 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8179 8220 if (IS_ERR(tb)) { 8180 8221 ret = PTR_ERR(tb); 8181 8222 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 8240 8281 const u32 *vdev_ids; 8241 8282 int ret; 8242 8283 8243 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 8284 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8244 8285 if (IS_ERR(tb)) { 8245 8286 ret = PTR_ERR(tb); 8246 8287 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 8274 8315 struct ath11k *ar; 8275 8316 int ret; 8276 8317 8277 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 8318 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8278 8319 if (IS_ERR(tb)) { 8279 8320 ret = PTR_ERR(tb); 8280 8321 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 8328 8369 const struct wmi_pdev_temperature_event *ev; 8329 8370 int ret; 8330 8371 8331 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 8372 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8332 8373 if (IS_ERR(tb)) { 8333 8374 ret = PTR_ERR(tb); 8334 8375 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 8368 8409 const struct wmi_fils_discovery_event *ev; 8369 8410 int ret; 8370 8411 8371 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 8412 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8372 8413 if (IS_ERR(tb)) { 8373 8414 ret = PTR_ERR(tb); 8374 8415 ath11k_warn(ab, ··· 8400 8441 const struct wmi_probe_resp_tx_status_event *ev; 8401 8442 int ret; 8402 8443 8403 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 8444 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8404 8445 if (IS_ERR(tb)) { 8405 8446 ret = PTR_ERR(tb); 8406 8447 ath11k_warn(ab, ··· 8526 8567 const struct wmi_twt_add_dialog_event *ev; 8527 8568 int ret; 8528 8569 8529 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 8570 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8530 8571 if (IS_ERR(tb)) { 8531 8572 ret = PTR_ERR(tb); 8532 8573 ath11k_warn(ab, ··· 8563 8604 u64 replay_ctr; 8564 8605 int ret; 8565 8606 8566 - tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 8607 + tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8567 8608 if (IS_ERR(tb)) { 8568 8609 ret = PTR_ERR(tb); 8569 8610 ath11k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 9751 9792 arg->vdev_id, arg->enabled, arg->method, arg->interval); 9752 9793 9753 9794 return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID); 9795 + } 9796 + 9797 + bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar) 9798 + { 9799 + return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, 9800 + ar->ab->wmi_ab.svc_map) && ar->supports_6ghz; 9754 9801 }
+63 -2
drivers/net/wireless/ath/ath11k/wmi.h
··· 15 15 struct ath11k_fw_stats; 16 16 struct ath11k_fw_dbglog; 17 17 struct ath11k_vif; 18 + struct ath11k_reg_tpc_power_info; 18 19 19 20 #define PSOC_HOST_MAX_NUM_SS (8) 20 21 ··· 328 327 WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID, 329 328 WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID, 330 329 WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID, 330 + WMI_VDEV_SET_ARP_STAT_CMDID, 331 + WMI_VDEV_GET_ARP_STAT_CMDID, 332 + WMI_VDEV_GET_TX_POWER_CMDID, 333 + WMI_VDEV_LIMIT_OFFCHAN_CMDID, 334 + WMI_VDEV_SET_CUSTOM_SW_RETRY_TH_CMDID, 335 + WMI_VDEV_CHAINMASK_CONFIG_CMDID, 336 + WMI_VDEV_GET_BCN_RECEPTION_STATS_CMDID, 337 + WMI_VDEV_GET_MWS_COEX_INFO_CMDID, 338 + WMI_VDEV_DELETE_ALL_PEER_CMDID, 339 + WMI_VDEV_BSS_MAX_IDLE_TIME_CMDID, 340 + WMI_VDEV_AUDIO_SYNC_TRIGGER_CMDID, 341 + WMI_VDEV_AUDIO_SYNC_QTIMER_CMDID, 342 + WMI_VDEV_SET_PCL_CMDID, 343 + WMI_VDEV_GET_BIG_DATA_CMDID, 344 + WMI_VDEV_GET_BIG_DATA_P2_CMDID, 345 + WMI_VDEV_SET_TPC_POWER_CMDID, 331 346 WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER), 332 347 WMI_PEER_DELETE_CMDID, 333 348 WMI_PEER_FLUSH_TIDS_CMDID, ··· 1897 1880 WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD, 1898 1881 WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9, 1899 1882 WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT, 1883 + WMI_TAG_VDEV_SET_TPC_POWER_CMD = 0x3B5, 1884 + WMI_TAG_VDEV_CH_POWER_INFO, 1900 1885 WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8, 1901 1886 WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD, 1902 1887 WMI_TAG_MAX ··· 2133 2114 /* The second 128 bits */ 2134 2115 WMI_MAX_EXT_SERVICE = 256, 2135 2116 WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL = 265, 2117 + WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT = 280, 2136 2118 WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT = 281, 2137 2119 WMI_TLV_SERVICE_BIOS_SAR_SUPPORT = 326, 2138 2120 WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN = 357, ··· 3188 3168 u8 ssid[WLAN_SSID_MAX_LEN]; 3189 3169 }; 3190 3170 3171 + struct wmi_vdev_ch_power_info { 3172 + u32 tlv_header; 3173 + 3174 + /* Channel center frequency (MHz) */ 3175 + u32 chan_cfreq; 3176 + 3177 + /* Unit: dBm, either PSD/EIRP power for this frequency or 3178 + * incremental for non-PSD BW 3179 + */ 3180 + u32 tx_power; 3181 + } __packed; 3182 + 3183 + struct wmi_vdev_set_tpc_power_cmd { 3184 + u32 tlv_header; 3185 + u32 vdev_id; 3186 + 3187 + /* Value: 0 or 1, is PSD power or not */ 3188 + u32 psd_power; 3189 + 3190 + /* Maximum EIRP power (dBm units), valid only if power is PSD */ 3191 + u32 eirp_power; 3192 + 3193 + /* Type: WMI_6GHZ_REG_TYPE, used for halphy CTL lookup */ 3194 + u32 power_type_6ghz; 3195 + 3196 + /* This fixed_param TLV is followed by the below TLVs: 3197 + * num_pwr_levels of wmi_vdev_ch_power_info 3198 + * For PSD power, it is the PSD/EIRP power of the frequency (20 MHz chunks). 3199 + * For non-PSD power, the power values are for 20, 40, and till 3200 + * BSS BW power levels. 3201 + * The num_pwr_levels will be checked by sw how many elements present 3202 + * in the variable-length array. 3203 + */ 3204 + } __packed; 3205 + 3191 3206 #define WMI_IE_BITMAP_SIZE 8 3192 3207 3193 3208 /* prefix used by scan requestor ids on the host */ ··· 4174 4119 }; 4175 4120 u32 cfgd_tx_streams; 4176 4121 u32 cfgd_rx_streams; 4122 + s32 max_allowed_tx_power; 4177 4123 } __packed; 4178 4124 4179 4125 /* VDEV start response status codes */ ··· 5007 4951 }; 5008 4952 5009 4953 enum wmi_vdev_type { 4954 + WMI_VDEV_TYPE_UNSPEC = 0, 5010 4955 WMI_VDEV_TYPE_AP = 1, 5011 4956 WMI_VDEV_TYPE_STA = 2, 5012 4957 WMI_VDEV_TYPE_IBSS = 3, ··· 6352 6295 #define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30 6353 6296 #define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0 6354 6297 6355 - const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr, 6356 - size_t len, gfp_t gfp); 6298 + const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, 6299 + struct sk_buff *skb, gfp_t gfp); 6357 6300 int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, 6358 6301 u32 cmd_id); 6359 6302 struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len); ··· 6536 6479 int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar); 6537 6480 int ath11k_wmi_sta_keepalive(struct ath11k *ar, 6538 6481 const struct wmi_sta_keepalive_arg *arg); 6482 + bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar); 6483 + int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar, 6484 + u32 vdev_id, 6485 + struct ath11k_reg_tpc_power_info *param); 6539 6486 6540 6487 #endif
+174 -43
drivers/net/wireless/ath/ath12k/core.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/module.h> ··· 104 104 return 0; 105 105 } 106 106 107 - static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name, 108 - size_t name_len) 107 + static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name, 108 + size_t name_len, bool with_variant, 109 + bool bus_type_mode) 109 110 { 110 111 /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */ 111 112 char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 }; 112 113 113 - if (ab->qmi.target.bdf_ext[0] != '\0') 114 + if (with_variant && ab->qmi.target.bdf_ext[0] != '\0') 114 115 scnprintf(variant, sizeof(variant), ",variant=%s", 115 116 ab->qmi.target.bdf_ext); 116 117 117 - scnprintf(name, name_len, 118 - "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s", 119 - ath12k_bus_str(ab->hif.bus), 120 - ab->qmi.target.chip_id, 121 - ab->qmi.target.board_id, variant); 118 + switch (ab->id.bdf_search) { 119 + case ATH12K_BDF_SEARCH_BUS_AND_BOARD: 120 + if (bus_type_mode) 121 + scnprintf(name, name_len, 122 + "bus=%s", 123 + ath12k_bus_str(ab->hif.bus)); 124 + else 125 + scnprintf(name, name_len, 126 + "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s", 127 + ath12k_bus_str(ab->hif.bus), 128 + ab->id.vendor, ab->id.device, 129 + ab->id.subsystem_vendor, 130 + ab->id.subsystem_device, 131 + ab->qmi.target.chip_id, 132 + ab->qmi.target.board_id, 133 + variant); 134 + break; 135 + default: 136 + scnprintf(name, name_len, 137 + "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s", 138 + ath12k_bus_str(ab->hif.bus), 139 + ab->qmi.target.chip_id, 140 + ab->qmi.target.board_id, variant); 141 + break; 142 + } 122 143 123 144 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name); 124 145 125 146 return 0; 147 + } 148 + 149 + static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name, 150 + size_t name_len) 151 + { 152 + return __ath12k_core_create_board_name(ab, name, name_len, true, false); 153 + } 154 + 155 + static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name, 156 + size_t name_len) 157 + { 158 + return __ath12k_core_create_board_name(ab, name, name_len, false, false); 159 + } 160 + 161 + static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name, 162 + size_t name_len) 163 + { 164 + return __ath12k_core_create_board_name(ab, name, name_len, false, true); 126 165 } 127 166 128 167 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab, ··· 198 159 struct ath12k_board_data *bd, 199 160 const void *buf, size_t buf_len, 200 161 const char *boardname, 201 - int bd_ie_type) 162 + int ie_id, 163 + int name_id, 164 + int data_id) 202 165 { 203 166 const struct ath12k_fw_ie *hdr; 204 167 bool name_match_found; ··· 210 169 211 170 name_match_found = false; 212 171 213 - /* go through ATH12K_BD_IE_BOARD_ elements */ 172 + /* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */ 214 173 while (buf_len > sizeof(struct ath12k_fw_ie)) { 215 174 hdr = buf; 216 175 board_ie_id = le32_to_cpu(hdr->id); ··· 221 180 buf += sizeof(*hdr); 222 181 223 182 if (buf_len < ALIGN(board_ie_len, 4)) { 224 - ath12k_err(ab, "invalid ATH12K_BD_IE_BOARD length: %zu < %zu\n", 183 + ath12k_err(ab, "invalid %s length: %zu < %zu\n", 184 + ath12k_bd_ie_type_str(ie_id), 225 185 buf_len, ALIGN(board_ie_len, 4)); 226 186 ret = -EINVAL; 227 187 goto out; 228 188 } 229 189 230 - switch (board_ie_id) { 231 - case ATH12K_BD_IE_BOARD_NAME: 190 + if (board_ie_id == name_id) { 232 191 ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "", 233 192 board_ie_data, board_ie_len); 234 193 235 194 if (board_ie_len != strlen(boardname)) 236 - break; 195 + goto next; 237 196 238 197 ret = memcmp(board_ie_data, boardname, strlen(boardname)); 239 198 if (ret) 240 - break; 199 + goto next; 241 200 242 201 name_match_found = true; 243 202 ath12k_dbg(ab, ATH12K_DBG_BOOT, 244 - "boot found match for name '%s'", 203 + "boot found match %s for name '%s'", 204 + ath12k_bd_ie_type_str(ie_id), 245 205 boardname); 246 - break; 247 - case ATH12K_BD_IE_BOARD_DATA: 206 + } else if (board_ie_id == data_id) { 248 207 if (!name_match_found) 249 208 /* no match found */ 250 - break; 209 + goto next; 251 210 252 211 ath12k_dbg(ab, ATH12K_DBG_BOOT, 253 - "boot found board data for '%s'", boardname); 212 + "boot found %s for '%s'", 213 + ath12k_bd_ie_type_str(ie_id), 214 + boardname); 254 215 255 216 bd->data = board_ie_data; 256 217 bd->len = board_ie_len; 257 218 258 219 ret = 0; 259 220 goto out; 260 - default: 261 - ath12k_warn(ab, "unknown ATH12K_BD_IE_BOARD found: %d\n", 221 + } else { 222 + ath12k_warn(ab, "unknown %s id found: %d\n", 223 + ath12k_bd_ie_type_str(ie_id), 262 224 board_ie_id); 263 - break; 264 225 } 265 - 226 + next: 266 227 /* jump over the padding */ 267 228 board_ie_len = ALIGN(board_ie_len, 4); 268 229 ··· 281 238 282 239 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab, 283 240 struct ath12k_board_data *bd, 284 - const char *boardname) 241 + const char *boardname, 242 + int ie_id_match, 243 + int name_id, 244 + int data_id) 285 245 { 286 246 size_t len, magic_len; 287 247 const u8 *data; ··· 349 303 goto err; 350 304 } 351 305 352 - switch (ie_id) { 353 - case ATH12K_BD_IE_BOARD: 306 + if (ie_id == ie_id_match) { 354 307 ret = ath12k_core_parse_bd_ie_board(ab, bd, data, 355 308 ie_len, 356 309 boardname, 357 - ATH12K_BD_IE_BOARD); 310 + ie_id_match, 311 + name_id, 312 + data_id); 358 313 if (ret == -ENOENT) 359 314 /* no match found, continue */ 360 - break; 315 + goto next; 361 316 else if (ret) 362 317 /* there was an error, bail out */ 363 318 goto err; 364 319 /* either found or error, so stop searching */ 365 320 goto out; 366 321 } 367 - 322 + next: 368 323 /* jump over the padding */ 369 324 ie_len = ALIGN(ie_len, 4); 370 325 ··· 375 328 376 329 out: 377 330 if (!bd->data || !bd->len) { 378 - ath12k_err(ab, 379 - "failed to fetch board data for %s from %s\n", 331 + ath12k_dbg(ab, ATH12K_DBG_BOOT, 332 + "failed to fetch %s for %s from %s\n", 333 + ath12k_bd_ie_type_str(ie_id_match), 380 334 boardname, filepath); 381 335 ret = -ENODATA; 382 336 goto err; ··· 404 356 return 0; 405 357 } 406 358 407 - #define BOARD_NAME_SIZE 100 359 + #define BOARD_NAME_SIZE 200 408 360 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd) 409 361 { 410 - char boardname[BOARD_NAME_SIZE]; 362 + char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE]; 363 + char *filename, filepath[100]; 411 364 int bd_api; 412 365 int ret; 413 366 414 - ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); 367 + filename = ATH12K_BOARD_API2_FILE; 368 + 369 + ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname)); 415 370 if (ret) { 416 371 ath12k_err(ab, "failed to create board name: %d", ret); 417 372 return ret; 418 373 } 419 374 420 375 bd_api = 2; 421 - ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname); 376 + ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname, 377 + ATH12K_BD_IE_BOARD, 378 + ATH12K_BD_IE_BOARD_NAME, 379 + ATH12K_BD_IE_BOARD_DATA); 380 + if (!ret) 381 + goto success; 382 + 383 + ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname, 384 + sizeof(fallback_boardname)); 385 + if (ret) { 386 + ath12k_err(ab, "failed to create fallback board name: %d", ret); 387 + return ret; 388 + } 389 + 390 + ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname, 391 + ATH12K_BD_IE_BOARD, 392 + ATH12K_BD_IE_BOARD_NAME, 393 + ATH12K_BD_IE_BOARD_DATA); 422 394 if (!ret) 423 395 goto success; 424 396 425 397 bd_api = 1; 426 398 ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE); 427 399 if (ret) { 428 - ath12k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n", 400 + ath12k_core_create_firmware_path(ab, filename, 401 + filepath, sizeof(filepath)); 402 + ath12k_err(ab, "failed to fetch board data for %s from %s\n", 403 + boardname, filepath); 404 + if (memcmp(boardname, fallback_boardname, strlen(boardname))) 405 + ath12k_err(ab, "failed to fetch board data for %s from %s\n", 406 + fallback_boardname, filepath); 407 + 408 + ath12k_err(ab, "failed to fetch board.bin from %s\n", 429 409 ab->hw_params->fw.dir); 430 410 return ret; 431 411 } ··· 461 385 success: 462 386 ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api); 463 387 return 0; 388 + } 389 + 390 + int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd) 391 + { 392 + char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE]; 393 + int ret; 394 + 395 + ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); 396 + if (ret) { 397 + ath12k_dbg(ab, ATH12K_DBG_BOOT, 398 + "failed to create board name for regdb: %d", ret); 399 + goto exit; 400 + } 401 + 402 + ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname, 403 + ATH12K_BD_IE_REGDB, 404 + ATH12K_BD_IE_REGDB_NAME, 405 + ATH12K_BD_IE_REGDB_DATA); 406 + if (!ret) 407 + goto exit; 408 + 409 + ret = ath12k_core_create_bus_type_board_name(ab, default_boardname, 410 + BOARD_NAME_SIZE); 411 + if (ret) { 412 + ath12k_dbg(ab, ATH12K_DBG_BOOT, 413 + "failed to create default board name for regdb: %d", ret); 414 + goto exit; 415 + } 416 + 417 + ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname, 418 + ATH12K_BD_IE_REGDB, 419 + ATH12K_BD_IE_REGDB_NAME, 420 + ATH12K_BD_IE_REGDB_DATA); 421 + if (!ret) 422 + goto exit; 423 + 424 + ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME); 425 + if (ret) 426 + ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n", 427 + ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir); 428 + 429 + exit: 430 + if (!ret) 431 + ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n"); 432 + 433 + return ret; 464 434 } 465 435 466 436 static void ath12k_core_stop(struct ath12k_base *ab) ··· 714 592 715 593 ath12k_dp_cc_config(ab); 716 594 717 - ath12k_dp_pdev_pre_alloc(ab); 718 - 719 595 ret = ath12k_dp_rx_pdev_reo_setup(ab); 720 596 if (ret) { 721 597 ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret); ··· 879 759 { 880 760 struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work); 881 761 struct ath12k *ar; 762 + struct ieee80211_hw *hw; 882 763 bool rfkill_radio_on; 883 764 int i; 884 765 ··· 892 771 if (!ar) 893 772 continue; 894 773 774 + hw = ath12k_ar_to_hw(ar); 895 775 ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on); 896 - wiphy_rfkill_set_hw_state(ar->hw->wiphy, !rfkill_radio_on); 776 + wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on); 897 777 } 898 778 } 899 779 ··· 923 801 { 924 802 struct ath12k *ar; 925 803 struct ath12k_pdev *pdev; 804 + struct ath12k_hw *ah; 926 805 int i; 927 806 928 807 spin_lock_bh(&ab->base_lock); ··· 933 810 if (ab->is_reset) 934 811 set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags); 935 812 813 + for (i = 0; i < ab->num_hw; i++) { 814 + if (!ab->ah[i]) 815 + continue; 816 + 817 + ah = ab->ah[i]; 818 + ieee80211_stop_queues(ah->hw); 819 + } 820 + 936 821 for (i = 0; i < ab->num_radios; i++) { 937 822 pdev = &ab->pdevs[i]; 938 823 ar = pdev->ar; 939 824 if (!ar || ar->state == ATH12K_STATE_OFF) 940 825 continue; 941 826 942 - ieee80211_stop_queues(ar->hw); 943 827 ath12k_mac_drain_tx(ar); 944 828 complete(&ar->scan.started); 945 829 complete(&ar->scan.completed); ··· 986 856 case ATH12K_STATE_ON: 987 857 ar->state = ATH12K_STATE_RESTARTING; 988 858 ath12k_core_halt(ar); 989 - ieee80211_restart_hw(ar->hw); 859 + ieee80211_restart_hw(ath12k_ar_to_hw(ar)); 990 860 break; 991 861 case ATH12K_STATE_OFF: 992 862 ath12k_warn(ab, ··· 1184 1054 1185 1055 ab->dev = dev; 1186 1056 ab->hif.bus = bus; 1057 + ab->qmi.num_radios = U8_MAX; 1187 1058 1188 1059 return ab; 1189 1060
+54 -3
drivers/net/wireless/ath/ath12k/core.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH12K_CORE_H ··· 54 54 #define ATH12K_RESET_FAIL_TIMEOUT_HZ (20 * HZ) 55 55 #define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ) 56 56 #define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ) 57 + 58 + enum ath12k_bdf_search { 59 + ATH12K_BDF_SEARCH_DEFAULT, 60 + ATH12K_BDF_SEARCH_BUS_AND_BOARD, 61 + }; 57 62 58 63 enum wme_ac { 59 64 WME_AC_BE, ··· 425 420 }; 426 421 427 422 #define ATH12K_MIN_5G_FREQ 4150 428 - #define ATH12K_MIN_6G_FREQ 5945 423 + #define ATH12K_MIN_6G_FREQ 5925 429 424 #define ATH12K_MAX_6G_FREQ 7115 430 425 #define ATH12K_NUM_CHANS 100 431 426 #define ATH12K_MAX_5G_CHAN 173 ··· 473 468 struct ath12k { 474 469 struct ath12k_base *ab; 475 470 struct ath12k_pdev *pdev; 476 - struct ieee80211_hw *hw; 471 + struct ath12k_hw *ah; 477 472 struct ath12k_wmi_pdev *wmi; 478 473 struct ath12k_pdev_dp dp; 479 474 u8 mac_addr[ETH_ALEN]; ··· 537 532 /* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */ 538 533 u8 pdev_idx; 539 534 u8 lmac_id; 535 + u8 hw_link_id; 540 536 541 537 struct completion peer_assoc_done; 542 538 struct completion peer_delete_done; ··· 595 589 bool monitor_vdev_created; 596 590 bool monitor_started; 597 591 int monitor_vdev_id; 592 + }; 593 + 594 + struct ath12k_hw { 595 + struct ieee80211_hw *hw; 596 + 597 + u8 num_radio; 598 + struct ath12k radio[] __aligned(sizeof(void *)); 598 599 }; 599 600 600 601 struct ath12k_band_cap { ··· 737 724 u8 fw_pdev_count; 738 725 739 726 struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS]; 727 + 728 + /* Holds information of wiphy (hw) registration. 729 + * 730 + * In Multi/Single Link Operation case, all pdevs are registered as 731 + * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is 732 + * registered as separate wiphys. 733 + */ 734 + struct ath12k_hw *ah[MAX_RADIOS]; 735 + u8 num_hw; 736 + 740 737 struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS]; 741 738 unsigned long long free_vdev_map; 742 739 unsigned long long free_vdev_stats_id_map; ··· 816 793 /* true means radio is on */ 817 794 bool rfkill_radio_on; 818 795 796 + struct { 797 + enum ath12k_bdf_search bdf_search; 798 + u32 vendor; 799 + u32 device; 800 + u32 subsystem_vendor; 801 + u32 subsystem_device; 802 + } id; 803 + 819 804 /* must be last */ 820 805 u8 drv_priv[] __aligned(sizeof(void *)); 806 + }; 807 + 808 + struct ath12k_pdev_map { 809 + struct ath12k_base *ab; 810 + u8 pdev_idx; 821 811 }; 822 812 823 813 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab); ··· 846 810 int ath12k_core_fetch_bdf(struct ath12k_base *ath12k, 847 811 struct ath12k_board_data *bd); 848 812 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd); 813 + int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd); 849 814 int ath12k_core_check_dt(struct ath12k_base *ath12k); 850 815 int ath12k_core_check_smbios(struct ath12k_base *ab); 851 816 void ath12k_core_halt(struct ath12k *ar); ··· 919 882 return "unknown"; 920 883 } 921 884 885 + static inline struct ath12k_hw *ath12k_hw_to_ah(struct ieee80211_hw *hw) 886 + { 887 + return hw->priv; 888 + } 889 + 890 + static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah) 891 + { 892 + return ah->radio; 893 + } 894 + 895 + static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar) 896 + { 897 + return ar->ah->hw; 898 + } 922 899 #endif /* _CORE_H_ */
+2 -1
drivers/net/wireless/ath/ath12k/dp.h
··· 150 150 151 151 #define DP_RX_HASH_ENABLE 1 /* Enable hash based Rx steering */ 152 152 153 - #define DP_BA_WIN_SZ_MAX 256 153 + #define DP_BA_WIN_SZ_MAX 1024 154 154 155 155 #define DP_TCL_NUM_RING_MAX 4 156 156 ··· 170 170 #define DP_REO_CMD_RING_SIZE 128 171 171 #define DP_REO_STATUS_RING_SIZE 2048 172 172 #define DP_RXDMA_BUF_RING_SIZE 4096 173 + #define DP_RX_MAC_BUF_RING_SIZE 2048 173 174 #define DP_RXDMA_REFILL_RING_SIZE 2048 174 175 #define DP_RXDMA_ERR_DST_RING_SIZE 1024 175 176 #define DP_RXDMA_MON_STATUS_RING_SIZE 1024
+2 -2
drivers/net/wireless/ath/ath12k/dp_mon.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include "dp_mon.h" ··· 1130 1130 !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) 1131 1131 rx_status->flag |= RX_FLAG_8023; 1132 1132 1133 - ieee80211_rx_napi(ar->hw, pubsta, msdu, napi); 1133 + ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi); 1134 1134 } 1135 1135 1136 1136 static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
+4 -4
drivers/net/wireless/ath/ath12k/dp_rx.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/ieee80211.h> ··· 2458 2458 !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED)) 2459 2459 rx_status->flag |= RX_FLAG_8023; 2460 2460 2461 - ieee80211_rx_napi(ar->hw, pubsta, msdu, napi); 2461 + ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi); 2462 2462 } 2463 2463 2464 2464 static int ath12k_dp_rx_process_msdu(struct ath12k *ar, ··· 2844 2844 ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs); 2845 2845 ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, 2846 2846 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); 2847 - ieee80211_rx(ar->hw, msdu); 2847 + ieee80211_rx(ath12k_ar_to_hw(ar), msdu); 2848 2848 return -EINVAL; 2849 2849 } 2850 2850 ··· 4086 4086 ret = ath12k_dp_srng_setup(ab, 4087 4087 &dp->rx_mac_buf_ring[i], 4088 4088 HAL_RXDMA_BUF, 1, 4089 - i, 1024); 4089 + i, DP_RX_MAC_BUF_RING_SIZE); 4090 4090 if (ret) { 4091 4091 ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n", 4092 4092 i);
+5 -5
drivers/net/wireless/ath/ath12k/dp_tx.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include "core.h" ··· 151 151 152 152 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 153 153 !ieee80211_is_data(hdr->frame_control)) 154 - return -ENOTSUPP; 154 + return -EOPNOTSUPP; 155 155 156 156 pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1); 157 157 ··· 401 401 } 402 402 } 403 403 404 - ieee80211_tx_status_skb(ar->hw, msdu); 404 + ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu); 405 405 } 406 406 407 407 static void ··· 498 498 * Might end up reporting it out-of-band from HTT stats. 499 499 */ 500 500 501 - ieee80211_tx_status_skb(ar->hw, msdu); 501 + ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu); 502 502 503 503 exit: 504 504 rcu_read_unlock(); ··· 837 837 if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) { 838 838 ath12k_err(ab, "unsupported htt major version %d supported version is %d\n", 839 839 dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR); 840 - return -ENOTSUPP; 840 + return -EOPNOTSUPP; 841 841 } 842 842 843 843 return 0;
+13 -7
drivers/net/wireless/ath/ath12k/hal_desc.h
··· 2500 2500 #define HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE BIT(30) 2501 2501 #define HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG BIT(31) 2502 2502 2503 - #define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(7, 0) 2504 - #define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(9, 8) 2505 - #define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(10) 2506 - #define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(22, 11) 2507 - #define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(23) 2508 - #define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(24) 2509 - #define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(25) 2503 + #define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(9, 0) 2504 + #define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(11, 10) 2505 + #define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(12) 2506 + #define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(24, 13) 2507 + #define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(25) 2508 + #define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(26) 2509 + #define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(27) 2510 2510 2511 2511 struct hal_reo_update_rx_queue { 2512 2512 struct hal_reo_cmd_hdr cmd; ··· 2515 2515 __le32 info1; 2516 2516 __le32 info2; 2517 2517 __le32 pn[4]; 2518 + } __packed; 2519 + 2520 + struct hal_rx_reo_queue_1k { 2521 + struct hal_desc_header desc_hdr; 2522 + __le32 rx_bitmap_1023_288[23]; 2523 + __le32 reserved[8]; 2518 2524 } __packed; 2519 2525 2520 2526 #define HAL_REO_UNBLOCK_CACHE_INFO0_UNBLK_CACHE BIT(0)
+10 -5
drivers/net/wireless/ath/ath12k/hal_rx.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include "debug.h" ··· 247 247 case HAL_REO_CMD_UNBLOCK_CACHE: 248 248 case HAL_REO_CMD_FLUSH_TIMEOUT_LIST: 249 249 ath12k_warn(ab, "Unsupported reo command %d\n", type); 250 - ret = -ENOTSUPP; 250 + ret = -EOPNOTSUPP; 251 251 break; 252 252 default: 253 253 ath12k_warn(ab, "Unknown reo command %d\n", type); ··· 688 688 689 689 u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid) 690 690 { 691 - u32 num_ext_desc; 691 + u32 num_ext_desc, num_1k_desc = 0; 692 692 693 693 if (ba_window_size <= 1) { 694 694 if (tid != HAL_DESC_REO_NON_QOS_TID) 695 695 num_ext_desc = 1; 696 696 else 697 697 num_ext_desc = 0; 698 + 698 699 } else if (ba_window_size <= 105) { 699 700 num_ext_desc = 1; 700 701 } else if (ba_window_size <= 210) { 701 702 num_ext_desc = 2; 702 - } else { 703 + } else if (ba_window_size <= 256) { 703 704 num_ext_desc = 3; 705 + } else { 706 + num_ext_desc = 10; 707 + num_1k_desc = 1; 704 708 } 705 709 706 710 return sizeof(struct hal_rx_reo_queue) + 707 - (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext)); 711 + (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext)) + 712 + (num_1k_desc * sizeof(struct hal_rx_reo_queue_1k)); 708 713 } 709 714 710 715 void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
+9
drivers/net/wireless/ath/ath12k/hw.c
··· 914 914 .rfkill_on_level = 0, 915 915 916 916 .rddm_size = 0, 917 + 918 + .def_num_link = 0, 919 + .max_mlo_peer = 256, 917 920 }, 918 921 { 919 922 .name = "wcn7850 hw2.0", ··· 981 978 .rfkill_on_level = 1, 982 979 983 980 .rddm_size = 0x780000, 981 + 982 + .def_num_link = 2, 983 + .max_mlo_peer = 32, 984 984 }, 985 985 { 986 986 .name = "qcn9274 hw2.0", ··· 1046 1040 .rfkill_on_level = 0, 1047 1041 1048 1042 .rddm_size = 0, 1043 + 1044 + .def_num_link = 0, 1045 + .max_mlo_peer = 256, 1049 1046 }, 1050 1047 }; 1051 1048
+22 -1
drivers/net/wireless/ath/ath12k/hw.h
··· 192 192 u32 rfkill_on_level; 193 193 194 194 u32 rddm_size; 195 + 196 + u8 def_num_link; 197 + u16 max_mlo_peer; 195 198 }; 196 199 197 200 struct ath12k_hw_ops { ··· 245 242 ATH12K_BD_IE_BOARD_DATA = 1, 246 243 }; 247 244 245 + enum ath12k_bd_ie_regdb_type { 246 + ATH12K_BD_IE_REGDB_NAME = 0, 247 + ATH12K_BD_IE_REGDB_DATA = 1, 248 + }; 249 + 248 250 enum ath12k_bd_ie_type { 249 251 /* contains sub IEs of enum ath12k_bd_ie_board_type */ 250 252 ATH12K_BD_IE_BOARD = 0, 251 - ATH12K_BD_IE_BOARD_EXT = 1, 253 + /* contains sub IEs of enum ath12k_bd_ie_regdb_type */ 254 + ATH12K_BD_IE_REGDB = 1, 252 255 }; 253 256 254 257 struct ath12k_hw_regs { ··· 323 314 324 315 u32 hal_reo_status_ring_base; 325 316 }; 317 + 318 + static inline const char *ath12k_bd_ie_type_str(enum ath12k_bd_ie_type type) 319 + { 320 + switch (type) { 321 + case ATH12K_BD_IE_BOARD: 322 + return "board data"; 323 + case ATH12K_BD_IE_REGDB: 324 + return "regdb data"; 325 + } 326 + 327 + return "unknown"; 328 + } 326 329 327 330 int ath12k_hw_init(struct ath12k_base *ab); 328 331
+584 -276
drivers/net/wireless/ath/ath12k/mac.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <net/mac80211.h> ··· 241 241 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, 242 242 }; 243 243 244 - static int ath12k_start_vdev_delay(struct ieee80211_hw *hw, 245 - struct ieee80211_vif *vif); 244 + static int ath12k_start_vdev_delay(struct ath12k *ar, 245 + struct ath12k_vif *arvif); 246 246 247 247 static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode) 248 248 { ··· 542 542 arvif_iter.vdev_id = vdev_id; 543 543 544 544 flags = IEEE80211_IFACE_ITER_RESUME_ALL; 545 - ieee80211_iterate_active_interfaces_atomic(ar->hw, 545 + ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar), 546 546 flags, 547 547 ath12k_get_arvif_iter, 548 548 &arvif_iter); ··· 1040 1040 if (ar->monitor_started) 1041 1041 return 0; 1042 1042 1043 - ieee80211_iter_chan_contexts_atomic(ar->hw, 1043 + ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar), 1044 1044 ath12k_mac_get_any_chandef_iter, 1045 1045 &chandef); 1046 1046 if (!chandef) ··· 1083 1083 return ret; 1084 1084 } 1085 1085 1086 - static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed) 1086 + static int ath12k_mac_config(struct ath12k *ar, u32 changed) 1087 1087 { 1088 - struct ath12k *ar = hw->priv; 1088 + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1089 1089 struct ieee80211_conf *conf = &hw->conf; 1090 1090 int ret = 0; 1091 1091 ··· 1122 1122 return ret; 1123 1123 } 1124 1124 1125 + static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed) 1126 + { 1127 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 1128 + struct ath12k *ar; 1129 + int ret; 1130 + 1131 + ar = ath12k_ah_to_ar(ah); 1132 + 1133 + ret = ath12k_mac_config(ar, changed); 1134 + if (ret) 1135 + ath12k_warn(ar->ab, "failed to update config pdev idx %d: %d\n", 1136 + ar->pdev_idx, ret); 1137 + 1138 + return ret; 1139 + } 1140 + 1125 1141 static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif) 1126 1142 { 1127 1143 struct ath12k *ar = arvif->ar; 1128 1144 struct ath12k_base *ab = ar->ab; 1129 - struct ieee80211_hw *hw = ar->hw; 1145 + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1130 1146 struct ieee80211_vif *vif = arvif->vif; 1131 1147 struct ieee80211_mutable_offsets offs = {}; 1132 1148 struct sk_buff *bcn; ··· 1230 1214 struct ath12k_wmi_peer_assoc_arg *arg) 1231 1215 { 1232 1216 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 1217 + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1233 1218 u32 aid; 1234 1219 1235 1220 lockdep_assert_held(&ar->conf_mutex); ··· 1245 1228 arg->peer_associd = aid; 1246 1229 arg->auth_flag = true; 1247 1230 /* TODO: STA WAR in ath10k for listen interval required? */ 1248 - arg->peer_listen_intval = ar->hw->conf.listen_interval; 1231 + arg->peer_listen_intval = hw->conf.listen_interval; 1249 1232 arg->peer_nss = 1; 1250 1233 arg->peer_caps = vif->bss_conf.assoc_capability; 1251 1234 } ··· 1259 1242 struct cfg80211_chan_def def; 1260 1243 struct cfg80211_bss *bss; 1261 1244 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 1245 + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1262 1246 const u8 *rsnie = NULL; 1263 1247 const u8 *wpaie = NULL; 1264 1248 ··· 1268 1250 if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) 1269 1251 return; 1270 1252 1271 - bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0, 1253 + bss = cfg80211_get_bss(hw->wiphy, def.chan, info->bssid, NULL, 0, 1272 1254 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); 1273 1255 1274 1256 if (arvif->rsnie_present || arvif->wpaie_present) { ··· 1288 1270 ies->data, 1289 1271 ies->len); 1290 1272 rcu_read_unlock(); 1291 - cfg80211_put_bss(ar->hw->wiphy, bss); 1273 + cfg80211_put_bss(hw->wiphy, bss); 1292 1274 } 1293 1275 1294 1276 /* FIXME: base on RSN IE/WPA IE is a correct idea? */ ··· 1322 1304 struct cfg80211_chan_def def; 1323 1305 const struct ieee80211_supported_band *sband; 1324 1306 const struct ieee80211_rate *rates; 1307 + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1325 1308 enum nl80211_band band; 1326 1309 u32 ratemask; 1327 1310 u8 rate; ··· 1334 1315 return; 1335 1316 1336 1317 band = def.chan->band; 1337 - sband = ar->hw->wiphy->bands[band]; 1318 + sband = hw->wiphy->bands[band]; 1338 1319 ratemask = sta->deflink.supp_rates[band]; 1339 1320 ratemask &= arvif->bitrate_mask.control[band].legacy; 1340 1321 rates = sband->bitrates; ··· 2285 2266 ath12k_smps_map[smps]); 2286 2267 } 2287 2268 2288 - static void ath12k_bss_assoc(struct ieee80211_hw *hw, 2289 - struct ieee80211_vif *vif, 2269 + static void ath12k_bss_assoc(struct ath12k *ar, 2270 + struct ath12k_vif *arvif, 2290 2271 struct ieee80211_bss_conf *bss_conf) 2291 2272 { 2292 - struct ath12k *ar = hw->priv; 2293 - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 2273 + struct ieee80211_vif *vif = arvif->vif; 2294 2274 struct ath12k_wmi_peer_assoc_arg peer_arg; 2295 2275 struct ieee80211_sta *ap_sta; 2296 2276 struct ath12k_peer *peer; ··· 2379 2361 arvif->vdev_id, ret); 2380 2362 } 2381 2363 2382 - static void ath12k_bss_disassoc(struct ieee80211_hw *hw, 2383 - struct ieee80211_vif *vif) 2364 + static void ath12k_bss_disassoc(struct ath12k *ar, 2365 + struct ath12k_vif *arvif) 2384 2366 { 2385 - struct ath12k *ar = hw->priv; 2386 - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 2387 2367 int ret; 2388 2368 2389 2369 lockdep_assert_held(&ar->conf_mutex); ··· 2429 2413 struct cfg80211_chan_def *def) 2430 2414 { 2431 2415 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 2416 + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 2432 2417 const struct ieee80211_supported_band *sband; 2433 2418 u8 basic_rate_idx; 2434 2419 int hw_rate_code; ··· 2439 2422 2440 2423 lockdep_assert_held(&ar->conf_mutex); 2441 2424 2442 - sband = ar->hw->wiphy->bands[def->chan->band]; 2425 + sband = hw->wiphy->bands[def->chan->band]; 2443 2426 basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; 2444 2427 bitrate = sband->bitrates[basic_rate_idx].bitrate; 2445 2428 ··· 2466 2449 struct ieee80211_bss_conf *info) 2467 2450 { 2468 2451 struct ath12k *ar = arvif->ar; 2452 + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 2469 2453 struct sk_buff *tmpl; 2470 2454 int ret; 2471 2455 u32 interval; ··· 2475 2457 if (info->fils_discovery.max_interval) { 2476 2458 interval = info->fils_discovery.max_interval; 2477 2459 2478 - tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif); 2460 + tmpl = ieee80211_get_fils_discovery_tmpl(hw, arvif->vif); 2479 2461 if (tmpl) 2480 2462 ret = ath12k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id, 2481 2463 tmpl); ··· 2483 2465 unsol_bcast_probe_resp_enabled = 1; 2484 2466 interval = info->unsol_bcast_probe_resp_interval; 2485 2467 2486 - tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw, 2468 + tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, 2487 2469 arvif->vif); 2488 2470 if (tmpl) 2489 2471 ret = ath12k_wmi_probe_resp_tmpl(ar, arvif->vdev_id, ··· 2509 2491 return ret; 2510 2492 } 2511 2493 2512 - static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, 2513 - struct ieee80211_vif *vif, 2514 - struct ieee80211_bss_conf *info, 2515 - u64 changed) 2494 + static void ath12k_mac_bss_info_changed(struct ath12k *ar, 2495 + struct ath12k_vif *arvif, 2496 + struct ieee80211_bss_conf *info, 2497 + u64 changed) 2516 2498 { 2517 - struct ath12k *ar = hw->priv; 2518 - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 2499 + struct ieee80211_vif *vif = arvif->vif; 2519 2500 struct cfg80211_chan_def def; 2520 2501 u32 param_id, param_value; 2521 2502 enum nl80211_band band; ··· 2527 2510 u8 rateidx; 2528 2511 u32 rate; 2529 2512 2530 - mutex_lock(&ar->conf_mutex); 2513 + lockdep_assert_held(&ar->conf_mutex); 2531 2514 2532 2515 if (changed & BSS_CHANGED_BEACON_INT) { 2533 2516 arvif->beacon_interval = info->beacon_int; ··· 2683 2666 2684 2667 if (changed & BSS_CHANGED_ASSOC) { 2685 2668 if (vif->cfg.assoc) 2686 - ath12k_bss_assoc(hw, vif, info); 2669 + ath12k_bss_assoc(ar, arvif, info); 2687 2670 else 2688 - ath12k_bss_disassoc(hw, vif); 2671 + ath12k_bss_disassoc(ar, arvif); 2689 2672 } 2690 2673 2691 2674 if (changed & BSS_CHANGED_TXPOWER) { ··· 2787 2770 2788 2771 if (changed & BSS_CHANGED_EHT_PUNCTURING) 2789 2772 arvif->punct_bitmap = info->eht_puncturing; 2773 + } 2774 + 2775 + static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, 2776 + struct ieee80211_vif *vif, 2777 + struct ieee80211_bss_conf *info, 2778 + u64 changed) 2779 + { 2780 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 2781 + struct ath12k *ar; 2782 + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 2783 + 2784 + ar = ath12k_ah_to_ar(ah); 2785 + 2786 + mutex_lock(&ar->conf_mutex); 2787 + 2788 + ath12k_mac_bss_info_changed(ar, arvif, info, changed); 2790 2789 2791 2790 mutex_unlock(&ar->conf_mutex); 2792 2791 } 2793 2792 2794 2793 void __ath12k_mac_scan_finish(struct ath12k *ar) 2795 2794 { 2795 + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 2796 + 2796 2797 lockdep_assert_held(&ar->data_lock); 2797 2798 2798 2799 switch (ar->scan.state) { ··· 2819 2784 case ATH12K_SCAN_RUNNING: 2820 2785 case ATH12K_SCAN_ABORTING: 2821 2786 if (ar->scan.is_roc && ar->scan.roc_notify) 2822 - ieee80211_remain_on_channel_expired(ar->hw); 2787 + ieee80211_remain_on_channel_expired(hw); 2823 2788 fallthrough; 2824 2789 case ATH12K_SCAN_STARTING: 2825 2790 if (!ar->scan.is_roc) { ··· 2830 2795 ATH12K_SCAN_STARTING)), 2831 2796 }; 2832 2797 2833 - ieee80211_scan_completed(ar->hw, &info); 2798 + ieee80211_scan_completed(hw, &info); 2834 2799 } 2835 2800 2836 2801 ar->scan.state = ATH12K_SCAN_IDLE; ··· 2975 2940 struct ieee80211_vif *vif, 2976 2941 struct ieee80211_scan_request *hw_req) 2977 2942 { 2978 - struct ath12k *ar = hw->priv; 2943 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 2944 + struct ath12k *ar; 2979 2945 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 2980 2946 struct cfg80211_scan_request *req = &hw_req->req; 2981 2947 struct ath12k_wmi_scan_req_arg arg = {}; 2982 2948 int ret; 2983 2949 int i; 2950 + 2951 + ar = ath12k_ah_to_ar(ah); 2984 2952 2985 2953 mutex_lock(&ar->conf_mutex); 2986 2954 ··· 3052 3014 } 3053 3015 3054 3016 /* Add a margin to account for event/command processing */ 3055 - ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 3017 + ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout, 3056 3018 msecs_to_jiffies(arg.max_scan_time + 3057 3019 ATH12K_MAC_SCAN_TIMEOUT_MSECS)); 3058 3020 ··· 3063 3025 kfree(arg.extraie.ptr); 3064 3026 3065 3027 mutex_unlock(&ar->conf_mutex); 3028 + 3066 3029 return ret; 3067 3030 } 3068 3031 3069 3032 static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw, 3070 3033 struct ieee80211_vif *vif) 3071 3034 { 3072 - struct ath12k *ar = hw->priv; 3035 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 3036 + struct ath12k *ar; 3037 + 3038 + ar = ath12k_ah_to_ar(ah); 3073 3039 3074 3040 mutex_lock(&ar->conf_mutex); 3075 3041 ath12k_scan_abort(ar); ··· 3201 3159 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 3202 3160 struct ieee80211_key_conf *key) 3203 3161 { 3204 - struct ath12k *ar = hw->priv; 3205 - struct ath12k_base *ab = ar->ab; 3162 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 3163 + struct ath12k *ar; 3164 + struct ath12k_base *ab; 3206 3165 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3207 3166 struct ath12k_peer *peer; 3208 3167 struct ath12k_sta *arsta; ··· 3217 3174 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || 3218 3175 key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) 3219 3176 return 1; 3177 + 3178 + ar = ath12k_ah_to_ar(ah); 3179 + ab = ar->ab; 3220 3180 3221 3181 if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) 3222 3182 return 1; ··· 3742 3696 if (ab->hw_params->vdev_start_delay && 3743 3697 !arvif->is_started && 3744 3698 arvif->vdev_type != WMI_VDEV_TYPE_AP) { 3745 - ret = ath12k_start_vdev_delay(ar->hw, vif); 3699 + ret = ath12k_start_vdev_delay(ar, arvif); 3746 3700 if (ret) { 3747 3701 ath12k_warn(ab, "failed to delay vdev start: %d\n", ret); 3748 3702 goto free_peer; ··· 3796 3750 enum ieee80211_sta_state old_state, 3797 3751 enum ieee80211_sta_state new_state) 3798 3752 { 3799 - struct ath12k *ar = hw->priv; 3753 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 3754 + struct ath12k *ar; 3800 3755 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3801 3756 struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); 3802 3757 struct ath12k_peer *peer; ··· 3807 3760 if ((old_state == IEEE80211_STA_NONE && 3808 3761 new_state == IEEE80211_STA_NOTEXIST)) 3809 3762 cancel_work_sync(&arsta->update_wk); 3763 + 3764 + ar = ath12k_ah_to_ar(ah); 3810 3765 3811 3766 mutex_lock(&ar->conf_mutex); 3812 3767 ··· 3905 3856 } 3906 3857 3907 3858 mutex_unlock(&ar->conf_mutex); 3859 + 3908 3860 return ret; 3909 3861 } 3910 3862 ··· 3913 3863 struct ieee80211_vif *vif, 3914 3864 struct ieee80211_sta *sta) 3915 3865 { 3916 - struct ath12k *ar = hw->priv; 3866 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 3867 + struct ath12k *ar; 3917 3868 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3918 3869 int ret; 3919 3870 s16 txpwr; ··· 3929 3878 3930 3879 if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL) 3931 3880 return -EINVAL; 3881 + 3882 + ar = ath12k_ah_to_ar(ah); 3932 3883 3933 3884 mutex_lock(&ar->conf_mutex); 3934 3885 ··· 3952 3899 struct ieee80211_sta *sta, 3953 3900 u32 changed) 3954 3901 { 3955 - struct ath12k *ar = hw->priv; 3902 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 3903 + struct ath12k *ar; 3956 3904 struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); 3957 3905 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3958 3906 struct ath12k_peer *peer; 3959 3907 u32 bw, smps; 3908 + 3909 + ar = ath12k_ah_to_ar(ah); 3960 3910 3961 3911 spin_lock_bh(&ar->ab->base_lock); 3962 3912 ··· 4020 3964 ieee80211_queue_work(hw, &arsta->update_wk); 4021 3965 } 4022 3966 4023 - static int ath12k_conf_tx_uapsd(struct ath12k *ar, struct ieee80211_vif *vif, 3967 + static int ath12k_conf_tx_uapsd(struct ath12k_vif *arvif, 4024 3968 u16 ac, bool enable) 4025 3969 { 4026 - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3970 + struct ath12k *ar = arvif->ar; 4027 3971 u32 value; 4028 3972 int ret; 4029 3973 ··· 4077 4021 return ret; 4078 4022 } 4079 4023 4080 - static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw, 4081 - struct ieee80211_vif *vif, 4082 - unsigned int link_id, u16 ac, 4083 - const struct ieee80211_tx_queue_params *params) 4024 + static int ath12k_mac_conf_tx(struct ath12k_vif *arvif, 4025 + unsigned int link_id, u16 ac, 4026 + const struct ieee80211_tx_queue_params *params) 4084 4027 { 4085 - struct ath12k *ar = hw->priv; 4086 - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 4087 4028 struct wmi_wmm_params_arg *p = NULL; 4029 + struct ath12k *ar = arvif->ar; 4030 + struct ath12k_base *ab = ar->ab; 4088 4031 int ret; 4089 4032 4090 - mutex_lock(&ar->conf_mutex); 4033 + lockdep_assert_held(&ar->conf_mutex); 4091 4034 4092 4035 switch (ac) { 4093 4036 case IEEE80211_AC_VO: ··· 4116 4061 ret = ath12k_wmi_send_wmm_update_cmd(ar, arvif->vdev_id, 4117 4062 &arvif->wmm_params); 4118 4063 if (ret) { 4119 - ath12k_warn(ar->ab, "failed to set wmm params: %d\n", ret); 4064 + ath12k_warn(ab, "pdev idx %d failed to set wmm params: %d\n", 4065 + ar->pdev_idx, ret); 4120 4066 goto exit; 4121 4067 } 4122 4068 4123 - ret = ath12k_conf_tx_uapsd(ar, vif, ac, params->uapsd); 4124 - 4069 + ret = ath12k_conf_tx_uapsd(arvif, ac, params->uapsd); 4125 4070 if (ret) 4126 - ath12k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret); 4071 + ath12k_warn(ab, "pdev idx %d failed to set sta uapsd: %d\n", 4072 + ar->pdev_idx, ret); 4127 4073 4128 4074 exit: 4075 + return ret; 4076 + } 4077 + 4078 + static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw, 4079 + struct ieee80211_vif *vif, 4080 + unsigned int link_id, u16 ac, 4081 + const struct ieee80211_tx_queue_params *params) 4082 + { 4083 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 4084 + struct ath12k *ar; 4085 + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 4086 + int ret; 4087 + 4088 + ar = ath12k_ah_to_ar(ah); 4089 + 4090 + mutex_lock(&ar->conf_mutex); 4091 + ret = ath12k_mac_conf_tx(arvif, link_id, ac, params); 4129 4092 mutex_unlock(&ar->conf_mutex); 4093 + 4130 4094 return ret; 4131 4095 } 4132 4096 ··· 4856 4782 { 4857 4783 int num_mgmt; 4858 4784 4859 - ieee80211_free_txskb(ar->hw, skb); 4785 + ieee80211_free_txskb(ath12k_ar_to_hw(ar), skb); 4860 4786 4861 4787 num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); 4862 4788 ··· 5033 4959 5034 4960 skb_queue_tail(q, skb); 5035 4961 atomic_inc(&ar->num_pending_mgmt_tx); 5036 - ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); 4962 + ieee80211_queue_work(ath12k_ar_to_hw(ar), &ar->wmi_mgmt_tx_work); 5037 4963 5038 4964 return 0; 5039 4965 } ··· 5043 4969 struct sk_buff *skb) 5044 4970 { 5045 4971 struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); 5046 - struct ath12k *ar = hw->priv; 5047 4972 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 5048 4973 struct ieee80211_vif *vif = info->control.vif; 5049 4974 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 4975 + struct ath12k *ar = arvif->ar; 5050 4976 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 5051 4977 struct ieee80211_key_conf *key = info->control.hw_key; 5052 4978 u32 info_flags = info->flags; ··· 5092 5018 5093 5019 static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable) 5094 5020 { 5095 - return -ENOTSUPP; 5021 + return -EOPNOTSUPP; 5096 5022 /* TODO: Need to support new monitor mode */ 5097 5023 } 5098 5024 ··· 5118 5044 ATH12K_RECONFIGURE_TIMEOUT_HZ); 5119 5045 } 5120 5046 5121 - static int ath12k_mac_op_start(struct ieee80211_hw *hw) 5047 + static int ath12k_mac_start(struct ath12k *ar) 5122 5048 { 5123 - struct ath12k *ar = hw->priv; 5124 5049 struct ath12k_base *ab = ar->ab; 5125 5050 struct ath12k_pdev *pdev = ar->pdev; 5126 5051 int ret; 5127 5052 5128 - ath12k_mac_drain_tx(ar); 5129 5053 mutex_lock(&ar->conf_mutex); 5130 5054 5131 5055 switch (ar->state) { ··· 5146 5074 1, pdev->pdev_id); 5147 5075 5148 5076 if (ret) { 5149 - ath12k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret); 5077 + ath12k_err(ab, "failed to enable PMF QOS: (%d\n", ret); 5150 5078 goto err; 5151 5079 } 5152 5080 5153 5081 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1, 5154 5082 pdev->pdev_id); 5155 5083 if (ret) { 5156 - ath12k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret); 5084 + ath12k_err(ab, "failed to enable dynamic bw: %d\n", ret); 5157 5085 goto err; 5158 5086 } 5159 5087 ··· 5183 5111 1, pdev->pdev_id); 5184 5112 5185 5113 if (ret) { 5186 - ath12k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret); 5114 + ath12k_err(ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret); 5187 5115 goto err; 5188 5116 } 5189 5117 ··· 5202 5130 * such as rssi, rx_duration. 5203 5131 */ 5204 5132 ret = ath12k_mac_config_mon_status_default(ar, true); 5205 - if (ret && (ret != -ENOTSUPP)) { 5133 + if (ret && (ret != -EOPNOTSUPP)) { 5206 5134 ath12k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n", 5207 5135 ret); 5208 5136 goto err; 5209 5137 } 5210 5138 5211 - if (ret == -ENOTSUPP) 5212 - ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 5139 + if (ret == -EOPNOTSUPP) 5140 + ath12k_dbg(ab, ATH12K_DBG_MAC, 5213 5141 "monitor status config is not yet supported"); 5214 5142 5215 5143 /* Configure the hash seed for hash based reo dest ring selection */ ··· 5231 5159 &ab->pdevs[ar->pdev_idx]); 5232 5160 5233 5161 return 0; 5234 - 5235 5162 err: 5236 5163 ar->state = ATH12K_STATE_OFF; 5237 5164 mutex_unlock(&ar->conf_mutex); 5238 5165 5239 5166 return ret; 5167 + } 5168 + 5169 + static int ath12k_mac_op_start(struct ieee80211_hw *hw) 5170 + { 5171 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5172 + struct ath12k *ar = ath12k_ah_to_ar(ah); 5173 + struct ath12k_base *ab = ar->ab; 5174 + int ret; 5175 + 5176 + ath12k_mac_drain_tx(ar); 5177 + 5178 + ret = ath12k_mac_start(ar); 5179 + if (ret) { 5180 + ath12k_err(ab, "fail to start mac operations in pdev idx %d ret %d\n", 5181 + ar->pdev_idx, ret); 5182 + return ret; 5183 + } 5184 + 5185 + return 0; 5240 5186 } 5241 5187 5242 5188 int ath12k_mac_rfkill_config(struct ath12k *ar) ··· 5314 5224 return 0; 5315 5225 } 5316 5226 5317 - static void ath12k_mac_op_stop(struct ieee80211_hw *hw) 5227 + static void ath12k_mac_stop(struct ath12k *ar) 5318 5228 { 5319 - struct ath12k *ar = hw->priv; 5320 5229 struct htt_ppdu_stats_info *ppdu_stats, *tmp; 5321 5230 int ret; 5322 5231 5323 - ath12k_mac_drain_tx(ar); 5324 - 5325 5232 mutex_lock(&ar->conf_mutex); 5326 5233 ret = ath12k_mac_config_mon_status_default(ar, false); 5327 - if (ret && (ret != -ENOTSUPP)) 5234 + if (ret && (ret != -EOPNOTSUPP)) 5328 5235 ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n", 5329 5236 ret); 5330 5237 ··· 5345 5258 synchronize_rcu(); 5346 5259 5347 5260 atomic_set(&ar->num_pending_mgmt_tx, 0); 5261 + } 5262 + 5263 + static void ath12k_mac_op_stop(struct ieee80211_hw *hw) 5264 + { 5265 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5266 + struct ath12k *ar = ath12k_ah_to_ar(ah); 5267 + 5268 + ath12k_mac_drain_tx(ar); 5269 + 5270 + ath12k_mac_stop(ar); 5348 5271 } 5349 5272 5350 5273 static u8 ··· 5473 5376 return ret; 5474 5377 } 5475 5378 5476 - static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw, 5477 - struct ieee80211_vif *vif) 5379 + static void ath12k_mac_update_vif_offload(struct ath12k_vif *arvif) 5478 5380 { 5479 - struct ath12k *ar = hw->priv; 5381 + struct ieee80211_vif *vif = arvif->vif; 5382 + struct ath12k *ar = arvif->ar; 5480 5383 struct ath12k_base *ab = ar->ab; 5481 - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 5482 5384 u32 param_id, param_value; 5483 5385 int ret; 5484 5386 ··· 5519 5423 } 5520 5424 } 5521 5425 5426 + static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw, 5427 + struct ieee80211_vif *vif) 5428 + { 5429 + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 5430 + 5431 + ath12k_mac_update_vif_offload(arvif); 5432 + } 5433 + 5522 5434 static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, 5523 5435 struct ieee80211_vif *vif) 5524 5436 { 5525 - struct ath12k *ar = hw->priv; 5526 - struct ath12k_base *ab = ar->ab; 5437 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5438 + struct ath12k *ar; 5439 + struct ath12k_base *ab; 5527 5440 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 5528 5441 struct ath12k_wmi_vdev_create_arg vdev_arg = {0}; 5529 5442 struct ath12k_wmi_peer_create_arg peer_param; ··· 5543 5438 int bit; 5544 5439 5545 5440 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 5441 + 5442 + ar = ath12k_ah_to_ar(ah); 5443 + ab = ar->ab; 5546 5444 5547 5445 mutex_lock(&ar->conf_mutex); 5548 5446 ··· 5634 5526 list_add(&arvif->list, &ar->arvifs); 5635 5527 spin_unlock_bh(&ar->data_lock); 5636 5528 5637 - ath12k_mac_op_update_vif_offload(hw, vif); 5529 + ath12k_mac_update_vif_offload(arvif); 5638 5530 5639 5531 nss = hweight32(ar->cfg_tx_chainmask) ? : 1; 5640 5532 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, ··· 5793 5685 static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw, 5794 5686 struct ieee80211_vif *vif) 5795 5687 { 5796 - struct ath12k *ar = hw->priv; 5688 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5689 + struct ath12k *ar; 5797 5690 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 5798 - struct ath12k_base *ab = ar->ab; 5691 + struct ath12k_base *ab; 5799 5692 unsigned long time_left; 5800 5693 int ret; 5694 + 5695 + ar = ath12k_ah_to_ar(ah); 5696 + ab = ar->ab; 5801 5697 5802 5698 mutex_lock(&ar->conf_mutex); 5803 5699 ··· 5878 5766 FIF_PROBE_REQ | \ 5879 5767 FIF_FCSFAIL) 5880 5768 5881 - static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, 5882 - unsigned int changed_flags, 5883 - unsigned int *total_flags, 5884 - u64 multicast) 5769 + static void ath12k_mac_configure_filter(struct ath12k *ar, 5770 + unsigned int total_flags) 5885 5771 { 5886 - struct ath12k *ar = hw->priv; 5887 5772 bool reset_flag; 5888 5773 int ret; 5889 5774 5890 - mutex_lock(&ar->conf_mutex); 5775 + lockdep_assert_held(&ar->conf_mutex); 5891 5776 5892 - *total_flags &= SUPPORTED_FILTERS; 5893 - ar->filter_flags = *total_flags; 5777 + ar->filter_flags = total_flags; 5894 5778 5895 5779 /* For monitor mode */ 5896 5780 reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC); ··· 5901 5793 ath12k_warn(ar->ab, 5902 5794 "fail to set monitor filter: %d\n", ret); 5903 5795 } 5796 + 5904 5797 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 5905 5798 "total_flags:0x%x, reset_flag:%d\n", 5906 - *total_flags, reset_flag); 5799 + total_flags, reset_flag); 5800 + } 5801 + 5802 + static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, 5803 + unsigned int changed_flags, 5804 + unsigned int *total_flags, 5805 + u64 multicast) 5806 + { 5807 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5808 + struct ath12k *ar; 5809 + 5810 + ar = ath12k_ah_to_ar(ah); 5811 + 5812 + mutex_lock(&ar->conf_mutex); 5813 + 5814 + *total_flags &= SUPPORTED_FILTERS; 5815 + ath12k_mac_configure_filter(ar, *total_flags); 5907 5816 5908 5817 mutex_unlock(&ar->conf_mutex); 5909 5818 } 5910 5819 5911 5820 static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 5912 5821 { 5913 - struct ath12k *ar = hw->priv; 5822 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5823 + struct ath12k *ar; 5824 + 5825 + ar = ath12k_ah_to_ar(ah); 5914 5826 5915 5827 mutex_lock(&ar->conf_mutex); 5916 5828 ··· 5944 5816 5945 5817 static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) 5946 5818 { 5947 - struct ath12k *ar = hw->priv; 5819 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5820 + struct ath12k *ar; 5948 5821 int ret; 5822 + 5823 + ar = ath12k_ah_to_ar(ah); 5949 5824 5950 5825 mutex_lock(&ar->conf_mutex); 5951 5826 ret = __ath12k_set_antenna(ar, tx_ant, rx_ant); ··· 5957 5826 return ret; 5958 5827 } 5959 5828 5960 - static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw, 5961 - struct ieee80211_vif *vif, 5962 - struct ieee80211_ampdu_params *params) 5829 + static int ath12k_mac_ampdu_action(struct ath12k_vif *arvif, 5830 + struct ieee80211_ampdu_params *params) 5963 5831 { 5964 - struct ath12k *ar = hw->priv; 5832 + struct ath12k *ar = arvif->ar; 5965 5833 int ret = -EINVAL; 5966 5834 5967 - mutex_lock(&ar->conf_mutex); 5835 + lockdep_assert_held(&ar->conf_mutex); 5968 5836 5969 5837 switch (params->action) { 5970 5838 case IEEE80211_AMPDU_RX_START: ··· 5984 5854 break; 5985 5855 } 5986 5856 5857 + return ret; 5858 + } 5859 + 5860 + static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw, 5861 + struct ieee80211_vif *vif, 5862 + struct ieee80211_ampdu_params *params) 5863 + { 5864 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5865 + struct ath12k *ar; 5866 + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 5867 + int ret = -EINVAL; 5868 + 5869 + ar = ath12k_ah_to_ar(ah); 5870 + 5871 + mutex_lock(&ar->conf_mutex); 5872 + ret = ath12k_mac_ampdu_action(arvif, params); 5987 5873 mutex_unlock(&ar->conf_mutex); 5874 + 5875 + if (ret) 5876 + ath12k_warn(ar->ab, "pdev idx %d unable to perform ampdu action %d ret %d\n", 5877 + ar->pdev_idx, params->action, ret); 5988 5878 5989 5879 return ret; 5990 5880 } ··· 6012 5862 static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw, 6013 5863 struct ieee80211_chanctx_conf *ctx) 6014 5864 { 6015 - struct ath12k *ar = hw->priv; 6016 - struct ath12k_base *ab = ar->ab; 5865 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5866 + struct ath12k *ar; 5867 + struct ath12k_base *ab; 5868 + 5869 + ar = ath12k_ah_to_ar(ah); 5870 + ab = ar->ab; 6017 5871 6018 5872 ath12k_dbg(ab, ATH12K_DBG_MAC, 6019 5873 "mac chanctx add freq %u width %d ptr %pK\n", ··· 6040 5886 static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw, 6041 5887 struct ieee80211_chanctx_conf *ctx) 6042 5888 { 6043 - struct ath12k *ar = hw->priv; 6044 - struct ath12k_base *ab = ar->ab; 5889 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5890 + struct ath12k *ar; 5891 + struct ath12k_base *ab; 5892 + 5893 + ar = ath12k_ah_to_ar(ah); 5894 + ab = ar->ab; 6045 5895 6046 5896 ath12k_dbg(ab, ATH12K_DBG_MAC, 6047 5897 "mac chanctx remove freq %u width %d ptr %pK\n", ··· 6424 6266 struct ieee80211_chanctx_conf *ctx) 6425 6267 { 6426 6268 struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx }; 6427 - struct ieee80211_hw *hw = ar->hw; 6269 + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 6428 6270 6429 6271 lockdep_assert_held(&ar->conf_mutex); 6430 6272 ··· 6453 6295 struct ieee80211_chanctx_conf *ctx, 6454 6296 u32 changed) 6455 6297 { 6456 - struct ath12k *ar = hw->priv; 6457 - struct ath12k_base *ab = ar->ab; 6298 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6299 + struct ath12k *ar; 6300 + struct ath12k_base *ab; 6301 + 6302 + ar = ath12k_ah_to_ar(ah); 6303 + ab = ar->ab; 6458 6304 6459 6305 mutex_lock(&ar->conf_mutex); 6460 6306 ··· 6482 6320 mutex_unlock(&ar->conf_mutex); 6483 6321 } 6484 6322 6485 - static int ath12k_start_vdev_delay(struct ieee80211_hw *hw, 6486 - struct ieee80211_vif *vif) 6323 + static int ath12k_start_vdev_delay(struct ath12k *ar, 6324 + struct ath12k_vif *arvif) 6487 6325 { 6488 - struct ath12k *ar = hw->priv; 6489 6326 struct ath12k_base *ab = ar->ab; 6490 - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 6327 + struct ieee80211_vif *vif = arvif->vif; 6491 6328 int ret; 6492 6329 6493 6330 if (WARN_ON(arvif->is_started)) ··· 6520 6359 struct ieee80211_bss_conf *link_conf, 6521 6360 struct ieee80211_chanctx_conf *ctx) 6522 6361 { 6523 - struct ath12k *ar = hw->priv; 6524 - struct ath12k_base *ab = ar->ab; 6362 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6363 + struct ath12k *ar; 6364 + struct ath12k_base *ab; 6525 6365 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 6526 6366 int ret; 6527 6367 struct ath12k_wmi_peer_create_arg param; 6368 + 6369 + ar = ath12k_ah_to_ar(ah); 6370 + ab = ar->ab; 6528 6371 6529 6372 mutex_lock(&ar->conf_mutex); 6530 6373 ··· 6603 6438 struct ieee80211_bss_conf *link_conf, 6604 6439 struct ieee80211_chanctx_conf *ctx) 6605 6440 { 6606 - struct ath12k *ar = hw->priv; 6607 - struct ath12k_base *ab = ar->ab; 6441 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6442 + struct ath12k *ar; 6443 + struct ath12k_base *ab; 6608 6444 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 6609 6445 int ret; 6446 + 6447 + ar = ath12k_ah_to_ar(ah); 6448 + ab = ar->ab; 6610 6449 6611 6450 mutex_lock(&ar->conf_mutex); 6612 6451 ··· 6659 6490 int n_vifs, 6660 6491 enum ieee80211_chanctx_switch_mode mode) 6661 6492 { 6662 - struct ath12k *ar = hw->priv; 6493 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6494 + struct ath12k *ar; 6495 + 6496 + ar = ath12k_ah_to_ar(ah); 6663 6497 6664 6498 mutex_lock(&ar->conf_mutex); 6665 6499 ··· 6704 6532 */ 6705 6533 static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 6706 6534 { 6707 - struct ath12k *ar = hw->priv; 6708 - int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD; 6535 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6536 + struct ath12k *ar; 6537 + int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret; 6709 6538 6710 - return ath12k_set_vdev_param_to_all_vifs(ar, param_id, value); 6539 + ar = ath12k_ah_to_ar(ah); 6540 + 6541 + ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value); 6542 + 6543 + return ret; 6711 6544 } 6712 6545 6713 6546 static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) ··· 6730 6553 return -EOPNOTSUPP; 6731 6554 } 6732 6555 6733 - static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 6734 - u32 queues, bool drop) 6556 + static void ath12k_mac_flush(struct ath12k *ar) 6735 6557 { 6736 - struct ath12k *ar = hw->priv; 6737 6558 long time_left; 6738 - 6739 - if (drop) 6740 - return; 6741 6559 6742 6560 time_left = wait_event_timeout(ar->dp.tx_empty_waitq, 6743 6561 (atomic_read(&ar->dp.num_tx_pending) == 0), ··· 6746 6574 if (time_left == 0) 6747 6575 ath12k_warn(ar->ab, "failed to flush mgmt transmit queue %ld\n", 6748 6576 time_left); 6577 + } 6578 + 6579 + static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 6580 + u32 queues, bool drop) 6581 + { 6582 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6583 + struct ath12k *ar = ath12k_ah_to_ar(ah); 6584 + 6585 + if (drop) 6586 + return; 6587 + 6588 + ath12k_mac_flush(ar); 6749 6589 } 6750 6590 6751 6591 static int ··· 6962 6778 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; 6963 6779 spin_unlock_bh(&ar->data_lock); 6964 6780 6965 - ieee80211_queue_work(ar->hw, &arsta->update_wk); 6781 + ieee80211_queue_work(ath12k_ar_to_hw(ar), &arsta->update_wk); 6966 6782 } 6967 6783 6968 6784 static void ath12k_mac_disable_peer_fixed_rate(void *data, ··· 7010 6826 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); 7011 6827 7012 6828 sgi = mask->control[band].gi; 7013 - if (sgi == NL80211_TXRATE_FORCE_LGI) 7014 - return -EINVAL; 6829 + if (sgi == NL80211_TXRATE_FORCE_LGI) { 6830 + ret = -EINVAL; 6831 + goto out; 6832 + } 7015 6833 7016 6834 /* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it 7017 6835 * requires passing at least one of used basic rates along with them. ··· 7029 6843 if (ret) { 7030 6844 ath12k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n", 7031 6845 arvif->vdev_id, ret); 7032 - return ret; 6846 + goto out; 7033 6847 } 7034 6848 ieee80211_iterate_stations_atomic(hw, 7035 6849 ath12k_mac_disable_peer_fixed_rate, ··· 7074 6888 */ 7075 6889 ath12k_warn(ar->ab, 7076 6890 "Setting more than one MCS Value in bitrate mask not supported\n"); 7077 - return -EINVAL; 6891 + ret = -EINVAL; 6892 + goto out; 7078 6893 } 7079 6894 7080 6895 ieee80211_iterate_stations_atomic(hw, ··· 7102 6915 7103 6916 mutex_unlock(&ar->conf_mutex); 7104 6917 6918 + out: 7105 6919 return ret; 7106 6920 } 7107 6921 ··· 7110 6922 ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, 7111 6923 enum ieee80211_reconfig_type reconfig_type) 7112 6924 { 7113 - struct ath12k *ar = hw->priv; 7114 - struct ath12k_base *ab = ar->ab; 6925 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6926 + struct ath12k *ar; 6927 + struct ath12k_base *ab; 7115 6928 struct ath12k_vif *arvif; 7116 6929 int recovery_count; 7117 6930 7118 6931 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) 7119 6932 return; 6933 + 6934 + ar = ath12k_ah_to_ar(ah); 6935 + ab = ar->ab; 7120 6936 7121 6937 mutex_lock(&ar->conf_mutex); 7122 6938 ··· 7205 7013 static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, 7206 7014 struct survey_info *survey) 7207 7015 { 7208 - struct ath12k *ar = hw->priv; 7016 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 7017 + struct ath12k *ar; 7209 7018 struct ieee80211_supported_band *sband; 7210 7019 struct survey_info *ar_survey; 7211 7020 int ret = 0; 7212 7021 7213 7022 if (idx >= ATH12K_NUM_CHANS) 7214 7023 return -ENOENT; 7024 + 7025 + ar = ath12k_ah_to_ar(ah); 7215 7026 7216 7027 ar_survey = &ar->survey[idx]; 7217 7028 ··· 7247 7052 7248 7053 exit: 7249 7054 mutex_unlock(&ar->conf_mutex); 7055 + 7250 7056 return ret; 7251 7057 } 7252 7058 ··· 7354 7158 } 7355 7159 7356 7160 static int ath12k_mac_setup_channels_rates(struct ath12k *ar, 7357 - u32 supported_bands) 7161 + u32 supported_bands, 7162 + struct ieee80211_supported_band *bands[]) 7358 7163 { 7359 - struct ieee80211_hw *hw = ar->hw; 7360 7164 struct ieee80211_supported_band *band; 7361 7165 struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap; 7362 7166 void *channels; ··· 7382 7186 band->channels = channels; 7383 7187 band->n_bitrates = ath12k_g_rates_size; 7384 7188 band->bitrates = ath12k_g_rates; 7385 - hw->wiphy->bands[NL80211_BAND_2GHZ] = band; 7189 + bands[NL80211_BAND_2GHZ] = band; 7386 7190 7387 7191 if (ar->ab->hw_params->single_pdev_only) { 7388 7192 phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP); ··· 7394 7198 } 7395 7199 7396 7200 if (supported_bands & WMI_HOST_WLAN_5G_CAP) { 7397 - if (reg_cap->high_5ghz_chan >= ATH12K_MAX_6G_FREQ) { 7201 + if (reg_cap->high_5ghz_chan >= ATH12K_MIN_6G_FREQ) { 7398 7202 channels = kmemdup(ath12k_6ghz_channels, 7399 7203 sizeof(ath12k_6ghz_channels), GFP_KERNEL); 7400 7204 if (!channels) { ··· 7409 7213 band->channels = channels; 7410 7214 band->n_bitrates = ath12k_a_rates_size; 7411 7215 band->bitrates = ath12k_a_rates; 7412 - hw->wiphy->bands[NL80211_BAND_6GHZ] = band; 7216 + bands[NL80211_BAND_6GHZ] = band; 7413 7217 ath12k_mac_update_ch_list(ar, band, 7414 7218 reg_cap->low_5ghz_chan, 7415 7219 reg_cap->high_5ghz_chan); ··· 7431 7235 band->channels = channels; 7432 7236 band->n_bitrates = ath12k_a_rates_size; 7433 7237 band->bitrates = ath12k_a_rates; 7434 - hw->wiphy->bands[NL80211_BAND_5GHZ] = band; 7238 + bands[NL80211_BAND_5GHZ] = band; 7435 7239 7436 7240 if (ar->ab->hw_params->single_pdev_only) { 7437 7241 phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP); ··· 7447 7251 return 0; 7448 7252 } 7449 7253 7450 - static int ath12k_mac_setup_iface_combinations(struct ath12k *ar) 7254 + static u16 ath12k_mac_get_ifmodes(struct ath12k_hw *ah) 7451 7255 { 7452 - struct ath12k_base *ab = ar->ab; 7453 - struct ieee80211_hw *hw = ar->hw; 7454 - struct wiphy *wiphy = hw->wiphy; 7256 + struct ath12k *ar = ath12k_ah_to_ar(ah); 7257 + u16 interface_modes = U16_MAX; 7258 + 7259 + interface_modes &= ar->ab->hw_params->interface_modes; 7260 + 7261 + return interface_modes == U16_MAX ? 0 : interface_modes; 7262 + } 7263 + 7264 + static bool ath12k_mac_is_iface_mode_enable(struct ath12k_hw *ah, 7265 + enum nl80211_iftype type) 7266 + { 7267 + struct ath12k *ar = ath12k_ah_to_ar(ah); 7268 + u16 interface_modes, mode; 7269 + bool is_enable = true; 7270 + 7271 + mode = BIT(type); 7272 + 7273 + interface_modes = ar->ab->hw_params->interface_modes; 7274 + if (!(interface_modes & mode)) 7275 + is_enable = false; 7276 + 7277 + return is_enable; 7278 + } 7279 + 7280 + static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah) 7281 + { 7282 + struct wiphy *wiphy = ah->hw->wiphy; 7455 7283 struct ieee80211_iface_combination *combinations; 7456 7284 struct ieee80211_iface_limit *limits; 7457 7285 int n_limits, max_interfaces; 7458 7286 bool ap, mesh; 7459 7287 7460 - ap = ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_AP); 7288 + ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP); 7461 7289 7462 7290 mesh = IS_ENABLED(CONFIG_MAC80211_MESH) && 7463 - ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT); 7291 + ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT); 7464 7292 7465 7293 combinations = kzalloc(sizeof(*combinations), GFP_KERNEL); 7466 7294 if (!combinations) ··· 7569 7349 }, 7570 7350 }; 7571 7351 7572 - static void __ath12k_mac_unregister(struct ath12k *ar) 7352 + static void ath12k_mac_cleanup_unregister(struct ath12k *ar) 7573 7353 { 7574 - struct ieee80211_hw *hw = ar->hw; 7575 - struct wiphy *wiphy = hw->wiphy; 7576 - 7577 - cancel_work_sync(&ar->regd_update_work); 7578 - 7579 - ieee80211_unregister_hw(hw); 7580 - 7581 7354 idr_for_each(&ar->txmgmt_idr, ath12k_mac_tx_mgmt_pending_free, ar); 7582 7355 idr_destroy(&ar->txmgmt_idr); 7583 7356 7584 7357 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 7585 7358 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 7586 7359 kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); 7360 + } 7361 + 7362 + static void ath12k_mac_hw_unregister(struct ath12k_hw *ah) 7363 + { 7364 + struct ieee80211_hw *hw = ah->hw; 7365 + struct wiphy *wiphy = hw->wiphy; 7366 + struct ath12k *ar = ath12k_ah_to_ar(ah); 7367 + 7368 + cancel_work_sync(&ar->regd_update_work); 7369 + 7370 + ieee80211_unregister_hw(hw); 7371 + 7372 + ath12k_mac_cleanup_unregister(ar); 7587 7373 7588 7374 kfree(wiphy->iface_combinations[0].limits); 7589 7375 kfree(wiphy->iface_combinations); ··· 7597 7371 SET_IEEE80211_DEV(hw, NULL); 7598 7372 } 7599 7373 7600 - void ath12k_mac_unregister(struct ath12k_base *ab) 7374 + static int ath12k_mac_setup_register(struct ath12k *ar, 7375 + u32 *ht_cap, 7376 + struct ieee80211_supported_band *bands[]) 7601 7377 { 7602 - struct ath12k *ar; 7603 - struct ath12k_pdev *pdev; 7604 - int i; 7378 + struct ath12k_pdev_cap *cap = &ar->pdev->cap; 7379 + int ret; 7605 7380 7606 - for (i = 0; i < ab->num_radios; i++) { 7607 - pdev = &ab->pdevs[i]; 7608 - ar = pdev->ar; 7609 - if (!ar) 7610 - continue; 7381 + init_waitqueue_head(&ar->txmgmt_empty_waitq); 7382 + idr_init(&ar->txmgmt_idr); 7383 + spin_lock_init(&ar->txmgmt_idr_lock); 7611 7384 7612 - __ath12k_mac_unregister(ar); 7613 - } 7385 + ath12k_pdev_caps_update(ar); 7386 + 7387 + ret = ath12k_mac_setup_channels_rates(ar, 7388 + cap->supported_bands, 7389 + bands); 7390 + if (ret) 7391 + return ret; 7392 + 7393 + ath12k_mac_setup_ht_vht_cap(ar, cap, ht_cap); 7394 + ath12k_mac_setup_sband_iftype_data(ar, cap); 7395 + 7396 + ar->max_num_stations = TARGET_NUM_STATIONS; 7397 + ar->max_num_peers = TARGET_NUM_PEERS_PDEV; 7398 + 7399 + return 0; 7614 7400 } 7615 7401 7616 - static int __ath12k_mac_register(struct ath12k *ar) 7402 + static int ath12k_mac_hw_register(struct ath12k_hw *ah) 7617 7403 { 7618 - struct ath12k_base *ab = ar->ab; 7619 - struct ieee80211_hw *hw = ar->hw; 7404 + struct ieee80211_hw *hw = ah->hw; 7620 7405 struct wiphy *wiphy = hw->wiphy; 7621 - struct ath12k_pdev_cap *cap = &ar->pdev->cap; 7406 + struct ath12k *ar = ath12k_ah_to_ar(ah); 7407 + struct ath12k_base *ab = ar->ab; 7408 + struct ath12k_pdev *pdev; 7409 + struct ath12k_pdev_cap *cap; 7622 7410 static const u32 cipher_suites[] = { 7623 7411 WLAN_CIPHER_SUITE_TKIP, 7624 7412 WLAN_CIPHER_SUITE_CCMP, ··· 7647 7407 int ret; 7648 7408 u32 ht_cap = 0; 7649 7409 7650 - ath12k_pdev_caps_update(ar); 7410 + pdev = ar->pdev; 7651 7411 7652 - SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr); 7412 + if (ab->pdevs_macaddr_valid) 7413 + ether_addr_copy(ar->mac_addr, pdev->mac_addr); 7414 + else 7415 + ether_addr_copy(ar->mac_addr, ab->mac_addr); 7653 7416 7654 - SET_IEEE80211_DEV(hw, ab->dev); 7655 - 7656 - ret = ath12k_mac_setup_channels_rates(ar, 7657 - cap->supported_bands); 7417 + ret = ath12k_mac_setup_register(ar, &ht_cap, hw->wiphy->bands); 7658 7418 if (ret) 7659 - goto err; 7419 + goto out; 7660 7420 7661 - ath12k_mac_setup_ht_vht_cap(ar, cap, &ht_cap); 7662 - ath12k_mac_setup_sband_iftype_data(ar, cap); 7421 + wiphy->max_ap_assoc_sta = ar->max_num_stations; 7663 7422 7664 - ret = ath12k_mac_setup_iface_combinations(ar); 7665 - if (ret) { 7666 - ath12k_err(ar->ab, "failed to setup interface combinations: %d\n", ret); 7667 - goto err_free_channels; 7668 - } 7423 + cap = &pdev->cap; 7669 7424 7670 7425 wiphy->available_antennas_rx = cap->rx_chain_mask; 7671 7426 wiphy->available_antennas_tx = cap->tx_chain_mask; 7672 7427 7673 - wiphy->interface_modes = ab->hw_params->interface_modes; 7428 + SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr); 7429 + SET_IEEE80211_DEV(hw, ab->dev); 7430 + 7431 + ret = ath12k_mac_setup_iface_combinations(ah); 7432 + if (ret) { 7433 + ath12k_err(ab, "failed to setup interface combinations: %d\n", ret); 7434 + goto err_cleanup_unregister; 7435 + } 7436 + 7437 + wiphy->interface_modes = ath12k_mac_get_ifmodes(ah); 7674 7438 7675 7439 if (wiphy->bands[NL80211_BAND_2GHZ] && 7676 7440 wiphy->bands[NL80211_BAND_5GHZ] && ··· 7727 7483 wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 7728 7484 NL80211_FEATURE_AP_SCAN; 7729 7485 7730 - ar->max_num_stations = TARGET_NUM_STATIONS; 7731 - ar->max_num_peers = TARGET_NUM_PEERS_PDEV; 7732 - 7733 - wiphy->max_ap_assoc_sta = ar->max_num_stations; 7734 - 7735 7486 hw->queues = ATH12K_HW_MAX_QUEUES; 7736 7487 wiphy->tx_queue_len = ATH12K_QUEUE_LEN; 7737 7488 hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1; 7738 - hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; 7489 + hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT; 7739 7490 7740 7491 hw->vif_data_size = sizeof(struct ath12k_vif); 7741 7492 hw->sta_data_size = sizeof(struct ath12k_sta); ··· 7763 7524 7764 7525 ret = ieee80211_register_hw(hw); 7765 7526 if (ret) { 7766 - ath12k_err(ar->ab, "ieee80211 registration failed: %d\n", ret); 7527 + ath12k_err(ab, "ieee80211 registration failed: %d\n", ret); 7767 7528 goto err_free_if_combs; 7768 7529 } 7769 7530 ··· 7791 7552 kfree(wiphy->iface_combinations[0].limits); 7792 7553 kfree(wiphy->iface_combinations); 7793 7554 7794 - err_free_channels: 7795 - kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 7796 - kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 7797 - kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); 7555 + err_cleanup_unregister: 7556 + ath12k_mac_cleanup_unregister(ar); 7798 7557 7799 - err: 7558 + out: 7800 7559 SET_IEEE80211_DEV(hw, NULL); 7560 + 7801 7561 return ret; 7562 + } 7563 + 7564 + static void ath12k_mac_setup(struct ath12k *ar) 7565 + { 7566 + struct ath12k_base *ab = ar->ab; 7567 + struct ath12k_pdev *pdev = ar->pdev; 7568 + u8 pdev_idx = ar->pdev_idx; 7569 + 7570 + ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, pdev_idx); 7571 + 7572 + ar->wmi = &ab->wmi_ab.wmi[pdev_idx]; 7573 + /* FIXME: wmi[0] is already initialized during attach, 7574 + * Should we do this again? 7575 + */ 7576 + ath12k_wmi_pdev_attach(ab, pdev_idx); 7577 + 7578 + ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask; 7579 + ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask; 7580 + ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask); 7581 + ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask); 7582 + 7583 + spin_lock_init(&ar->data_lock); 7584 + INIT_LIST_HEAD(&ar->arvifs); 7585 + INIT_LIST_HEAD(&ar->ppdu_stats_info); 7586 + mutex_init(&ar->conf_mutex); 7587 + init_completion(&ar->vdev_setup_done); 7588 + init_completion(&ar->vdev_delete_done); 7589 + init_completion(&ar->peer_assoc_done); 7590 + init_completion(&ar->peer_delete_done); 7591 + init_completion(&ar->install_key_done); 7592 + init_completion(&ar->bss_survey_done); 7593 + init_completion(&ar->scan.started); 7594 + init_completion(&ar->scan.completed); 7595 + 7596 + INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work); 7597 + INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work); 7598 + 7599 + INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work); 7600 + skb_queue_head_init(&ar->wmi_mgmt_tx_queue); 7601 + clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); 7802 7602 } 7803 7603 7804 7604 int ath12k_mac_register(struct ath12k_base *ab) 7805 7605 { 7806 - struct ath12k *ar; 7807 - struct ath12k_pdev *pdev; 7606 + struct ath12k_hw *ah; 7808 7607 int i; 7809 7608 int ret; 7810 7609 7811 7610 if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) 7812 7611 return 0; 7813 - 7814 - for (i = 0; i < ab->num_radios; i++) { 7815 - pdev = &ab->pdevs[i]; 7816 - ar = pdev->ar; 7817 - if (ab->pdevs_macaddr_valid) { 7818 - ether_addr_copy(ar->mac_addr, pdev->mac_addr); 7819 - } else { 7820 - ether_addr_copy(ar->mac_addr, ab->mac_addr); 7821 - ar->mac_addr[4] += i; 7822 - } 7823 - 7824 - ret = __ath12k_mac_register(ar); 7825 - if (ret) 7826 - goto err_cleanup; 7827 - 7828 - init_waitqueue_head(&ar->txmgmt_empty_waitq); 7829 - idr_init(&ar->txmgmt_idr); 7830 - spin_lock_init(&ar->txmgmt_idr_lock); 7831 - } 7832 7612 7833 7613 /* Initialize channel counters frequency value in hertz */ 7834 7614 ab->cc_freq_hz = 320000; 7835 7615 ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1; 7836 7616 7617 + for (i = 0; i < ab->num_hw; i++) { 7618 + ah = ab->ah[i]; 7619 + 7620 + ret = ath12k_mac_hw_register(ah); 7621 + if (ret) 7622 + goto err; 7623 + } 7624 + 7837 7625 return 0; 7838 7626 7839 - err_cleanup: 7627 + err: 7840 7628 for (i = i - 1; i >= 0; i--) { 7841 - pdev = &ab->pdevs[i]; 7842 - ar = pdev->ar; 7843 - __ath12k_mac_unregister(ar); 7629 + ah = ab->ah[i]; 7630 + if (!ah) 7631 + continue; 7632 + 7633 + ath12k_mac_hw_unregister(ah); 7844 7634 } 7845 7635 7846 7636 return ret; 7847 7637 } 7848 7638 7849 - int ath12k_mac_allocate(struct ath12k_base *ab) 7639 + void ath12k_mac_unregister(struct ath12k_base *ab) 7640 + { 7641 + struct ath12k_hw *ah; 7642 + int i; 7643 + 7644 + for (i = ab->num_hw - 1; i >= 0; i--) { 7645 + ah = ab->ah[i]; 7646 + if (!ah) 7647 + continue; 7648 + 7649 + ath12k_mac_hw_unregister(ah); 7650 + } 7651 + } 7652 + 7653 + static void ath12k_mac_hw_destroy(struct ath12k_hw *ah) 7654 + { 7655 + ieee80211_free_hw(ah->hw); 7656 + } 7657 + 7658 + static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab, 7659 + struct ath12k_pdev_map *pdev_map, 7660 + u8 num_pdev_map) 7850 7661 { 7851 7662 struct ieee80211_hw *hw; 7852 7663 struct ath12k *ar; 7853 7664 struct ath12k_pdev *pdev; 7854 - int ret; 7665 + struct ath12k_hw *ah; 7855 7666 int i; 7667 + u8 pdev_idx; 7856 7668 7857 - if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) 7858 - return 0; 7669 + hw = ieee80211_alloc_hw(struct_size(ah, radio, num_pdev_map), 7670 + &ath12k_ops); 7671 + if (!hw) 7672 + return NULL; 7859 7673 7860 - for (i = 0; i < ab->num_radios; i++) { 7861 - pdev = &ab->pdevs[i]; 7862 - hw = ieee80211_alloc_hw(sizeof(struct ath12k), &ath12k_ops); 7863 - if (!hw) { 7864 - ath12k_warn(ab, "failed to allocate mac80211 hw device\n"); 7865 - ret = -ENOMEM; 7866 - goto err_free_mac; 7867 - } 7674 + ah = ath12k_hw_to_ah(hw); 7675 + ah->hw = hw; 7676 + ah->num_radio = num_pdev_map; 7868 7677 7869 - ar = hw->priv; 7870 - ar->hw = hw; 7678 + for (i = 0; i < num_pdev_map; i++) { 7679 + ab = pdev_map[i].ab; 7680 + pdev_idx = pdev_map[i].pdev_idx; 7681 + pdev = &ab->pdevs[pdev_idx]; 7682 + 7683 + ar = ath12k_ah_to_ar(ah); 7684 + ar->ah = ah; 7871 7685 ar->ab = ab; 7686 + ar->hw_link_id = i; 7872 7687 ar->pdev = pdev; 7873 - ar->pdev_idx = i; 7874 - ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, i); 7875 - 7876 - ar->wmi = &ab->wmi_ab.wmi[i]; 7877 - /* FIXME: wmi[0] is already initialized during attach, 7878 - * Should we do this again? 7879 - */ 7880 - ath12k_wmi_pdev_attach(ab, i); 7881 - 7882 - ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask; 7883 - ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask; 7884 - ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask); 7885 - ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask); 7886 - 7688 + ar->pdev_idx = pdev_idx; 7887 7689 pdev->ar = ar; 7888 - spin_lock_init(&ar->data_lock); 7889 - INIT_LIST_HEAD(&ar->arvifs); 7890 - INIT_LIST_HEAD(&ar->ppdu_stats_info); 7891 - mutex_init(&ar->conf_mutex); 7892 - init_completion(&ar->vdev_setup_done); 7893 - init_completion(&ar->vdev_delete_done); 7894 - init_completion(&ar->peer_assoc_done); 7895 - init_completion(&ar->peer_delete_done); 7896 - init_completion(&ar->install_key_done); 7897 - init_completion(&ar->bss_survey_done); 7898 - init_completion(&ar->scan.started); 7899 - init_completion(&ar->scan.completed); 7900 7690 7901 - INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work); 7902 - INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work); 7903 - 7904 - INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work); 7905 - skb_queue_head_init(&ar->wmi_mgmt_tx_queue); 7906 - clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); 7691 + ath12k_mac_setup(ar); 7907 7692 } 7908 7693 7909 - return 0; 7910 - 7911 - err_free_mac: 7912 - ath12k_mac_destroy(ab); 7913 - 7914 - return ret; 7694 + return ah; 7915 7695 } 7916 7696 7917 7697 void ath12k_mac_destroy(struct ath12k_base *ab) 7918 7698 { 7919 - struct ath12k *ar; 7920 7699 struct ath12k_pdev *pdev; 7921 7700 int i; 7922 7701 7923 7702 for (i = 0; i < ab->num_radios; i++) { 7924 7703 pdev = &ab->pdevs[i]; 7925 - ar = pdev->ar; 7926 - if (!ar) 7704 + if (!pdev->ar) 7927 7705 continue; 7928 7706 7929 - ieee80211_free_hw(ar->hw); 7930 7707 pdev->ar = NULL; 7931 7708 } 7709 + 7710 + for (i = 0; i < ab->num_hw; i++) { 7711 + if (!ab->ah[i]) 7712 + continue; 7713 + 7714 + ath12k_mac_hw_destroy(ab->ah[i]); 7715 + ab->ah[i] = NULL; 7716 + } 7717 + } 7718 + 7719 + int ath12k_mac_allocate(struct ath12k_base *ab) 7720 + { 7721 + struct ath12k_hw *ah; 7722 + struct ath12k_pdev_map pdev_map[MAX_RADIOS]; 7723 + int ret, i, j; 7724 + u8 radio_per_hw; 7725 + 7726 + if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) 7727 + return 0; 7728 + 7729 + ab->num_hw = ab->num_radios; 7730 + radio_per_hw = 1; 7731 + 7732 + for (i = 0; i < ab->num_hw; i++) { 7733 + for (j = 0; j < radio_per_hw; j++) { 7734 + pdev_map[j].ab = ab; 7735 + pdev_map[j].pdev_idx = (i * radio_per_hw) + j; 7736 + } 7737 + 7738 + ah = ath12k_mac_hw_allocate(ab, pdev_map, radio_per_hw); 7739 + if (!ah) { 7740 + ath12k_warn(ab, "failed to allocate mac80211 hw device for hw_idx %d\n", 7741 + i); 7742 + goto err; 7743 + } 7744 + 7745 + ab->ah[i] = ah; 7746 + } 7747 + 7748 + ath12k_dp_pdev_pre_alloc(ab); 7749 + 7750 + return 0; 7751 + 7752 + err: 7753 + for (i = i - 1; i >= 0; i--) { 7754 + if (!ab->ah[i]) 7755 + continue; 7756 + 7757 + ath12k_mac_hw_destroy(ab->ah[i]); 7758 + ab->ah[i] = NULL; 7759 + } 7760 + 7761 + return ret; 7932 7762 }
+3 -1
drivers/net/wireless/ath/ath12k/mac.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH12K_MAC_H ··· 12 12 13 13 struct ath12k; 14 14 struct ath12k_base; 15 + struct ath12k_hw; 16 + struct ath12k_pdev_map; 15 17 16 18 struct ath12k_generic_iter { 17 19 struct ath12k *ar;
+10
drivers/net/wireless/ath/ath12k/pci.c
··· 1310 1310 goto err_free_core; 1311 1311 } 1312 1312 1313 + ath12k_dbg(ab, ATH12K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", 1314 + pdev->vendor, pdev->device, 1315 + pdev->subsystem_vendor, pdev->subsystem_device); 1316 + 1317 + ab->id.vendor = pdev->vendor; 1318 + ab->id.device = pdev->device; 1319 + ab->id.subsystem_vendor = pdev->subsystem_vendor; 1320 + ab->id.subsystem_device = pdev->subsystem_device; 1321 + 1313 1322 switch (pci_dev->device) { 1314 1323 case QCN9274_DEVICE_ID: 1315 1324 ab_pci->msi_config = &ath12k_msi_config[0]; ··· 1342 1333 } 1343 1334 break; 1344 1335 case WCN7850_DEVICE_ID: 1336 + ab->id.bdf_search = ATH12K_BDF_SEARCH_BUS_AND_BOARD; 1345 1337 ab_pci->msi_config = &ath12k_msi_config[0]; 1346 1338 ab->static_window_map = false; 1347 1339 ab_pci->pci_ops = &ath12k_pci_ops_wcn7850;
+296 -81
drivers/net/wireless/ath/ath12k/qmi.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/elf.h> ··· 17 17 #define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08 18 18 #define ATH12K_QMI_MAX_CHUNK_SIZE 2097152 19 19 20 - static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = { 20 + static const struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = { 21 21 { 22 22 .data_type = QMI_UNSIGNED_1_BYTE, 23 23 .elem_len = 1, ··· 61 61 }, 62 62 }; 63 63 64 - static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = { 64 + static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = { 65 65 { 66 66 .data_type = QMI_OPT_FLAG, 67 67 .elem_len = 1, ··· 511 511 }, 512 512 }; 513 513 514 - static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = { 514 + static const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = { 515 515 { 516 516 .data_type = QMI_STRUCT, 517 517 .elem_len = 1, ··· 528 528 }, 529 529 }; 530 530 531 - static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = { 531 + static const struct qmi_elem_info qmi_wlanfw_phy_cap_req_msg_v01_ei[] = { 532 + { 533 + .data_type = QMI_EOTI, 534 + .array_type = NO_ARRAY, 535 + .tlv_type = QMI_COMMON_TLV_TYPE, 536 + }, 537 + }; 538 + 539 + static const struct qmi_elem_info qmi_wlanfw_phy_cap_resp_msg_v01_ei[] = { 540 + { 541 + .data_type = QMI_STRUCT, 542 + .elem_len = 1, 543 + .elem_size = sizeof(struct qmi_response_type_v01), 544 + .array_type = NO_ARRAY, 545 + .tlv_type = 0x02, 546 + .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, resp), 547 + .ei_array = qmi_response_type_v01_ei, 548 + }, 549 + { 550 + .data_type = QMI_OPT_FLAG, 551 + .elem_len = 1, 552 + .elem_size = sizeof(u8), 553 + .array_type = NO_ARRAY, 554 + .tlv_type = 0x10, 555 + .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, 556 + num_phy_valid), 557 + }, 558 + { 559 + .data_type = QMI_UNSIGNED_1_BYTE, 560 + .elem_len = 1, 561 + .elem_size = sizeof(u8), 562 + .array_type = NO_ARRAY, 563 + .tlv_type = 0x10, 564 + .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, 565 + num_phy), 566 + }, 567 + { 568 + .data_type = QMI_OPT_FLAG, 569 + .elem_len = 1, 570 + .elem_size = sizeof(u8), 571 + .array_type = NO_ARRAY, 572 + .tlv_type = 0x11, 573 + .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, 574 + board_id_valid), 575 + }, 576 + { 577 + .data_type = QMI_UNSIGNED_4_BYTE, 578 + .elem_len = 1, 579 + .elem_size = sizeof(u32), 580 + .array_type = NO_ARRAY, 581 + .tlv_type = 0x11, 582 + .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, 583 + board_id), 584 + }, 585 + { 586 + .data_type = QMI_EOTI, 587 + .array_type = NO_ARRAY, 588 + .tlv_type = QMI_COMMON_TLV_TYPE, 589 + }, 590 + }; 591 + 592 + static const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = { 532 593 { 533 594 .data_type = QMI_OPT_FLAG, 534 595 .elem_len = 1, ··· 814 753 }, 815 754 }; 816 755 817 - static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = { 756 + static const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = { 818 757 { 819 758 .data_type = QMI_STRUCT, 820 759 .elem_len = 1, ··· 850 789 }, 851 790 }; 852 791 853 - static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = { 792 + static const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = { 854 793 { 855 794 .data_type = QMI_UNSIGNED_8_BYTE, 856 795 .elem_len = 1, ··· 882 821 }, 883 822 }; 884 823 885 - static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = { 824 + static const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = { 886 825 { 887 826 .data_type = QMI_UNSIGNED_4_BYTE, 888 827 .elem_len = 1, ··· 924 863 }, 925 864 }; 926 865 927 - static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = { 866 + static const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = { 928 867 { 929 868 .data_type = QMI_DATA_LEN, 930 869 .elem_len = 1, ··· 951 890 }, 952 891 }; 953 892 954 - static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = { 893 + static const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = { 955 894 { 956 895 .data_type = QMI_UNSIGNED_8_BYTE, 957 896 .elem_len = 1, ··· 991 930 }, 992 931 }; 993 932 994 - static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = { 933 + static const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = { 995 934 { 996 935 .data_type = QMI_DATA_LEN, 997 936 .elem_len = 1, ··· 1018 957 }, 1019 958 }; 1020 959 1021 - static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = { 960 + static const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = { 1022 961 { 1023 962 .data_type = QMI_STRUCT, 1024 963 .elem_len = 1, ··· 1036 975 }, 1037 976 }; 1038 977 1039 - static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = { 978 + static const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = { 1040 979 { 1041 980 .data_type = QMI_EOTI, 1042 981 .array_type = NO_ARRAY, ··· 1044 983 }, 1045 984 }; 1046 985 1047 - static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = { 986 + static const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = { 1048 987 { 1049 988 .data_type = QMI_UNSIGNED_4_BYTE, 1050 989 .elem_len = 1, ··· 1070 1009 }, 1071 1010 }; 1072 1011 1073 - static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = { 1012 + static const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = { 1074 1013 { 1075 1014 .data_type = QMI_UNSIGNED_4_BYTE, 1076 1015 .elem_len = 1, ··· 1087 1026 }, 1088 1027 }; 1089 1028 1090 - static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = { 1029 + static const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = { 1091 1030 { 1092 1031 .data_type = QMI_UNSIGNED_4_BYTE, 1093 1032 .elem_len = 1, ··· 1103 1042 }, 1104 1043 }; 1105 1044 1106 - static struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = { 1045 + static const struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = { 1107 1046 { 1108 1047 .data_type = QMI_UNSIGNED_8_BYTE, 1109 1048 .elem_len = 1, ··· 1129 1068 }, 1130 1069 }; 1131 1070 1132 - static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = { 1071 + static const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = { 1133 1072 { 1134 1073 .data_type = QMI_UNSIGNED_4_BYTE, 1135 1074 .elem_len = 1, ··· 1155 1094 }, 1156 1095 }; 1157 1096 1158 - static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = { 1097 + static const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = { 1159 1098 { 1160 1099 .data_type = QMI_STRUCT, 1161 1100 .elem_len = 1, ··· 1409 1348 }, 1410 1349 }; 1411 1350 1412 - static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = { 1351 + static const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = { 1413 1352 { 1414 1353 .data_type = QMI_UNSIGNED_1_BYTE, 1415 1354 .elem_len = 1, ··· 1544 1483 }, 1545 1484 }; 1546 1485 1547 - static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = { 1486 + static const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = { 1548 1487 { 1549 1488 .data_type = QMI_STRUCT, 1550 1489 .elem_len = 1, ··· 1562 1501 }, 1563 1502 }; 1564 1503 1565 - static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = { 1504 + static const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = { 1566 1505 { 1567 1506 .data_type = QMI_UNSIGNED_8_BYTE, 1568 1507 .elem_len = 1, ··· 1586 1525 }, 1587 1526 }; 1588 1527 1589 - static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = { 1528 + static const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = { 1590 1529 { 1591 1530 .data_type = QMI_STRUCT, 1592 1531 .elem_len = 1, ··· 1603 1542 }, 1604 1543 }; 1605 1544 1606 - static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = { 1545 + static const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = { 1607 1546 { 1608 1547 .data_type = QMI_UNSIGNED_4_BYTE, 1609 1548 .elem_len = 1, ··· 1656 1595 }, 1657 1596 }; 1658 1597 1659 - static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = { 1598 + static const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = { 1660 1599 { 1661 1600 .data_type = QMI_UNSIGNED_4_BYTE, 1662 1601 .elem_len = 1, ··· 1691 1630 }, 1692 1631 }; 1693 1632 1694 - static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = { 1633 + static const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = { 1695 1634 { 1696 1635 .data_type = QMI_UNSIGNED_2_BYTE, 1697 1636 .elem_len = 1, ··· 1715 1654 }, 1716 1655 }; 1717 1656 1718 - static struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = { 1657 + static const struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = { 1719 1658 { 1720 1659 .data_type = QMI_UNSIGNED_4_BYTE, 1721 1660 .elem_len = 1, ··· 1732 1671 }, 1733 1672 }; 1734 1673 1735 - static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = { 1674 + static const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = { 1736 1675 { 1737 1676 .data_type = QMI_UNSIGNED_4_BYTE, 1738 1677 .elem_len = 1, ··· 1767 1706 }, 1768 1707 }; 1769 1708 1770 - static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = { 1709 + static const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = { 1771 1710 { 1772 1711 .data_type = QMI_STRUCT, 1773 1712 .elem_len = 1, ··· 1785 1724 }, 1786 1725 }; 1787 1726 1788 - static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = { 1727 + static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = { 1789 1728 { 1790 1729 .data_type = QMI_OPT_FLAG, 1791 1730 .elem_len = 1, ··· 1923 1862 }, 1924 1863 }; 1925 1864 1926 - static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = { 1865 + static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = { 1927 1866 { 1928 1867 .data_type = QMI_STRUCT, 1929 1868 .elem_len = 1, ··· 1940 1879 }, 1941 1880 }; 1942 1881 1943 - static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = { 1882 + static const struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = { 1944 1883 { 1945 1884 .data_type = QMI_EOTI, 1946 1885 .array_type = NO_ARRAY, 1947 1886 }, 1948 1887 }; 1949 1888 1950 - static struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = { 1889 + static const struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = { 1951 1890 { 1952 1891 .data_type = QMI_EOTI, 1953 1892 .array_type = NO_ARRAY, 1954 1893 }, 1955 1894 }; 1956 1895 1957 - static void ath12k_host_cap_parse_mlo(struct qmi_wlanfw_host_cap_req_msg_v01 *req) 1896 + static const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = { 1897 + { 1898 + .data_type = QMI_OPT_FLAG, 1899 + .elem_len = 1, 1900 + .elem_size = sizeof(u8), 1901 + .array_type = NO_ARRAY, 1902 + .tlv_type = 0x10, 1903 + .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01, 1904 + enable_fwlog_valid), 1905 + }, 1906 + { 1907 + .data_type = QMI_UNSIGNED_1_BYTE, 1908 + .elem_len = 1, 1909 + .elem_size = sizeof(u8), 1910 + .array_type = NO_ARRAY, 1911 + .tlv_type = 0x10, 1912 + .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01, 1913 + enable_fwlog), 1914 + }, 1915 + { 1916 + .data_type = QMI_EOTI, 1917 + .array_type = NO_ARRAY, 1918 + .tlv_type = QMI_COMMON_TLV_TYPE, 1919 + }, 1920 + }; 1921 + 1922 + static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = { 1923 + { 1924 + .data_type = QMI_STRUCT, 1925 + .elem_len = 1, 1926 + .elem_size = sizeof(struct qmi_response_type_v01), 1927 + .array_type = NO_ARRAY, 1928 + .tlv_type = 0x02, 1929 + .offset = offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01, 1930 + resp), 1931 + .ei_array = qmi_response_type_v01_ei, 1932 + }, 1933 + { 1934 + .data_type = QMI_EOTI, 1935 + .array_type = NO_ARRAY, 1936 + .tlv_type = QMI_COMMON_TLV_TYPE, 1937 + }, 1938 + }; 1939 + 1940 + static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab, 1941 + struct qmi_wlanfw_host_cap_req_msg_v01 *req) 1958 1942 { 1943 + struct wlfw_host_mlo_chip_info_s_v01 *info; 1944 + u8 hw_link_id = 0; 1945 + int i; 1946 + 1947 + if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) { 1948 + ath12k_dbg(ab, ATH12K_DBG_QMI, 1949 + "skip QMI MLO cap due to invalid num_radio %d\n", 1950 + ab->qmi.num_radios); 1951 + return; 1952 + } 1953 + 1959 1954 req->mlo_capable_valid = 1; 1960 1955 req->mlo_capable = 1; 1961 1956 req->mlo_chip_id_valid = 1; ··· 2022 1905 /* Max peer number generally won't change for the same device 2023 1906 * but needs to be synced with host driver. 2024 1907 */ 2025 - req->max_mlo_peer = 32; 1908 + req->max_mlo_peer = ab->hw_params->max_mlo_peer; 2026 1909 req->mlo_num_chips_valid = 1; 2027 1910 req->mlo_num_chips = 1; 1911 + 1912 + info = &req->mlo_chip_info[0]; 1913 + info->chip_id = 0; 1914 + info->num_local_links = ab->qmi.num_radios; 1915 + 1916 + for (i = 0; i < info->num_local_links; i++) { 1917 + info->hw_link_id[i] = hw_link_id; 1918 + info->valid_mlo_link_id[i] = 1; 1919 + 1920 + hw_link_id++; 1921 + } 1922 + 2028 1923 req->mlo_chip_info_valid = 1; 2029 - req->mlo_chip_info[0].chip_id = 0; 2030 - req->mlo_chip_info[0].num_local_links = 2; 2031 - req->mlo_chip_info[0].hw_link_id[0] = 0; 2032 - req->mlo_chip_info[0].hw_link_id[1] = 1; 2033 - req->mlo_chip_info[0].valid_mlo_link_id[0] = 1; 2034 - req->mlo_chip_info[0].valid_mlo_link_id[1] = 1; 2035 1924 } 2036 1925 2037 1926 static int ath12k_qmi_host_cap_send(struct ath12k_base *ab) 2038 1927 { 2039 - struct qmi_wlanfw_host_cap_req_msg_v01 req; 2040 - struct qmi_wlanfw_host_cap_resp_msg_v01 resp; 2041 - struct qmi_txn txn = {}; 1928 + struct qmi_wlanfw_host_cap_req_msg_v01 req = {}; 1929 + struct qmi_wlanfw_host_cap_resp_msg_v01 resp = {}; 1930 + struct qmi_txn txn; 2042 1931 int ret = 0; 2043 - 2044 - memset(&req, 0, sizeof(req)); 2045 - memset(&resp, 0, sizeof(resp)); 2046 1932 2047 1933 req.num_clients_valid = 1; 2048 1934 req.num_clients = 1; ··· 2083 1963 */ 2084 1964 req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT; 2085 1965 req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET; 2086 - 2087 - ath12k_host_cap_parse_mlo(&req); 2088 1966 } 1967 + 1968 + ath12k_host_cap_parse_mlo(ab, &req); 2089 1969 2090 1970 ret = qmi_txn_init(&ab->qmi.handle, &txn, 2091 1971 qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp); ··· 2097 1977 QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN, 2098 1978 qmi_wlanfw_host_cap_req_msg_v01_ei, &req); 2099 1979 if (ret < 0) { 1980 + qmi_txn_cancel(&txn); 2100 1981 ath12k_warn(ab, "Failed to send host capability request,err = %d\n", ret); 2101 1982 goto out; 2102 1983 } ··· 2115 1994 2116 1995 out: 2117 1996 return ret; 1997 + } 1998 + 1999 + static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab) 2000 + { 2001 + struct qmi_wlanfw_phy_cap_req_msg_v01 req = {}; 2002 + struct qmi_wlanfw_phy_cap_resp_msg_v01 resp = {}; 2003 + struct qmi_txn txn; 2004 + int ret; 2005 + 2006 + ret = qmi_txn_init(&ab->qmi.handle, &txn, 2007 + qmi_wlanfw_phy_cap_resp_msg_v01_ei, &resp); 2008 + if (ret < 0) 2009 + goto out; 2010 + 2011 + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, 2012 + QMI_WLANFW_PHY_CAP_REQ_V01, 2013 + QMI_WLANFW_PHY_CAP_REQ_MSG_V01_MAX_LEN, 2014 + qmi_wlanfw_phy_cap_req_msg_v01_ei, &req); 2015 + if (ret < 0) { 2016 + qmi_txn_cancel(&txn); 2017 + ath12k_warn(ab, "failed to send phy capability request: %d\n", ret); 2018 + goto out; 2019 + } 2020 + 2021 + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS)); 2022 + if (ret < 0) 2023 + goto out; 2024 + 2025 + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 2026 + ret = -EOPNOTSUPP; 2027 + goto out; 2028 + } 2029 + 2030 + if (!resp.num_phy_valid) { 2031 + ret = -ENODATA; 2032 + goto out; 2033 + } 2034 + 2035 + ab->qmi.num_radios = resp.num_phy; 2036 + 2037 + ath12k_dbg(ab, ATH12K_DBG_QMI, "phy capability resp valid %d num_phy %d valid %d board_id %d\n", 2038 + resp.num_phy_valid, resp.num_phy, 2039 + resp.board_id_valid, resp.board_id); 2040 + 2041 + return; 2042 + 2043 + out: 2044 + /* If PHY capability not advertised then rely on default num link */ 2045 + ab->qmi.num_radios = ab->hw_params->def_num_link; 2046 + 2047 + ath12k_dbg(ab, ATH12K_DBG_QMI, 2048 + "no valid response from PHY capability, choose default num_phy %d\n", 2049 + ab->qmi.num_radios); 2118 2050 } 2119 2051 2120 2052 static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab) ··· 2214 2040 QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN, 2215 2041 qmi_wlanfw_ind_register_req_msg_v01_ei, req); 2216 2042 if (ret < 0) { 2043 + qmi_txn_cancel(&txn); 2217 2044 ath12k_warn(ab, "Failed to send indication register request, err = %d\n", 2218 2045 ret); 2219 2046 goto out; ··· 2243 2068 static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab) 2244 2069 { 2245 2070 struct qmi_wlanfw_respond_mem_req_msg_v01 *req; 2246 - struct qmi_wlanfw_respond_mem_resp_msg_v01 resp; 2247 - struct qmi_txn txn = {}; 2071 + struct qmi_wlanfw_respond_mem_resp_msg_v01 resp = {}; 2072 + struct qmi_txn txn; 2248 2073 int ret = 0, i; 2249 2074 bool delayed; 2250 2075 2251 2076 req = kzalloc(sizeof(*req), GFP_KERNEL); 2252 2077 if (!req) 2253 2078 return -ENOMEM; 2254 - 2255 - memset(&resp, 0, sizeof(resp)); 2256 2079 2257 2080 /* Some targets by default request a block of big contiguous 2258 2081 * DMA memory, it's hard to allocate from kernel. So host returns ··· 2261 2088 delayed = true; 2262 2089 ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi delays mem_request %d\n", 2263 2090 ab->qmi.mem_seg_count); 2264 - memset(req, 0, sizeof(*req)); 2265 2091 } else { 2266 2092 delayed = false; 2267 2093 req->mem_seg_len = ab->qmi.mem_seg_count; ··· 2286 2114 QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN, 2287 2115 qmi_wlanfw_respond_mem_req_msg_v01_ei, req); 2288 2116 if (ret < 0) { 2117 + qmi_txn_cancel(&txn); 2289 2118 ath12k_warn(ab, "qmi failed to respond memory request, err = %d\n", 2290 2119 ret); 2291 2120 goto out; ··· 2381 2208 2382 2209 static int ath12k_qmi_request_target_cap(struct ath12k_base *ab) 2383 2210 { 2384 - struct qmi_wlanfw_cap_req_msg_v01 req; 2385 - struct qmi_wlanfw_cap_resp_msg_v01 resp; 2386 - struct qmi_txn txn = {}; 2211 + struct qmi_wlanfw_cap_req_msg_v01 req = {}; 2212 + struct qmi_wlanfw_cap_resp_msg_v01 resp = {}; 2213 + struct qmi_txn txn; 2387 2214 unsigned int board_id = ATH12K_BOARD_ID_DEFAULT; 2388 2215 int ret = 0; 2389 2216 int r; 2390 2217 int i; 2391 - 2392 - memset(&req, 0, sizeof(req)); 2393 - memset(&resp, 0, sizeof(resp)); 2394 2218 2395 2219 ret = qmi_txn_init(&ab->qmi.handle, &txn, 2396 2220 qmi_wlanfw_cap_resp_msg_v01_ei, &resp); ··· 2399 2229 QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN, 2400 2230 qmi_wlanfw_cap_req_msg_v01_ei, &req); 2401 2231 if (ret < 0) { 2232 + qmi_txn_cancel(&txn); 2402 2233 ath12k_warn(ab, "qmi failed to send target cap request, err = %d\n", 2403 2234 ret); 2404 2235 goto out; ··· 2481 2310 const u8 *data, u32 len, u8 type) 2482 2311 { 2483 2312 struct qmi_wlanfw_bdf_download_req_msg_v01 *req; 2484 - struct qmi_wlanfw_bdf_download_resp_msg_v01 resp; 2485 - struct qmi_txn txn = {}; 2313 + struct qmi_wlanfw_bdf_download_resp_msg_v01 resp = {}; 2314 + struct qmi_txn txn; 2486 2315 const u8 *temp = data; 2487 2316 int ret; 2488 2317 u32 remaining = len; ··· 2490 2319 req = kzalloc(sizeof(*req), GFP_KERNEL); 2491 2320 if (!req) 2492 2321 return -ENOMEM; 2493 - memset(&resp, 0, sizeof(resp)); 2494 2322 2495 2323 while (remaining) { 2496 2324 req->valid = 1; ··· 2593 2423 2594 2424 break; 2595 2425 case ATH12K_QMI_BDF_TYPE_REGDB: 2596 - ret = ath12k_core_fetch_board_data_api_1(ab, &bd, 2597 - ATH12K_REGDB_FILE_NAME); 2426 + ret = ath12k_core_fetch_regdb(ab, &bd); 2598 2427 if (ret) { 2599 2428 ath12k_warn(ab, "qmi failed to load regdb bin:\n"); 2600 2429 goto out; ··· 2715 2546 static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab) 2716 2547 { 2717 2548 struct m3_mem_region *m3_mem = &ab->qmi.m3_mem; 2718 - struct qmi_wlanfw_m3_info_req_msg_v01 req; 2719 - struct qmi_wlanfw_m3_info_resp_msg_v01 resp; 2720 - struct qmi_txn txn = {}; 2549 + struct qmi_wlanfw_m3_info_req_msg_v01 req = {}; 2550 + struct qmi_wlanfw_m3_info_resp_msg_v01 resp = {}; 2551 + struct qmi_txn txn; 2721 2552 int ret = 0; 2722 - 2723 - memset(&req, 0, sizeof(req)); 2724 - memset(&resp, 0, sizeof(resp)); 2725 2553 2726 2554 ret = ath12k_qmi_m3_load(ab); 2727 2555 if (ret) { ··· 2739 2573 QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN, 2740 2574 qmi_wlanfw_m3_info_req_msg_v01_ei, &req); 2741 2575 if (ret < 0) { 2576 + qmi_txn_cancel(&txn); 2742 2577 ath12k_warn(ab, "qmi failed to send M3 information request, err = %d\n", 2743 2578 ret); 2744 2579 goto out; ··· 2764 2597 static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab, 2765 2598 u32 mode) 2766 2599 { 2767 - struct qmi_wlanfw_wlan_mode_req_msg_v01 req; 2768 - struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp; 2769 - struct qmi_txn txn = {}; 2600 + struct qmi_wlanfw_wlan_mode_req_msg_v01 req = {}; 2601 + struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp = {}; 2602 + struct qmi_txn txn; 2770 2603 int ret = 0; 2771 - 2772 - memset(&req, 0, sizeof(req)); 2773 - memset(&resp, 0, sizeof(resp)); 2774 2604 2775 2605 req.mode = mode; 2776 2606 req.hw_debug_valid = 1; ··· 2783 2619 QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN, 2784 2620 qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req); 2785 2621 if (ret < 0) { 2622 + qmi_txn_cancel(&txn); 2786 2623 ath12k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n", 2787 2624 mode, ret); 2788 2625 goto out; ··· 2814 2649 static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab) 2815 2650 { 2816 2651 struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req; 2817 - struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp; 2652 + struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp = {}; 2818 2653 struct ce_pipe_config *ce_cfg; 2819 2654 struct service_to_pipe *svc_cfg; 2820 - struct qmi_txn txn = {}; 2655 + struct qmi_txn txn; 2821 2656 int ret = 0, pipe_num; 2822 2657 2823 2658 ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce; ··· 2826 2661 req = kzalloc(sizeof(*req), GFP_KERNEL); 2827 2662 if (!req) 2828 2663 return -ENOMEM; 2829 - 2830 - memset(&resp, 0, sizeof(resp)); 2831 2664 2832 2665 req->host_version_valid = 1; 2833 2666 strscpy(req->host_version, ATH12K_HOST_VERSION_STRING, ··· 2873 2710 QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN, 2874 2711 qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req); 2875 2712 if (ret < 0) { 2713 + qmi_txn_cancel(&txn); 2876 2714 ath12k_warn(ab, "qmi failed to send wlan config request, err = %d\n", 2877 2715 ret); 2878 2716 goto out; ··· 2897 2733 return ret; 2898 2734 } 2899 2735 2736 + static int ath12k_qmi_wlanfw_wlan_ini_send(struct ath12k_base *ab) 2737 + { 2738 + struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp = {}; 2739 + struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {}; 2740 + struct qmi_txn txn; 2741 + int ret; 2742 + 2743 + req.enable_fwlog_valid = true; 2744 + req.enable_fwlog = 1; 2745 + 2746 + ret = qmi_txn_init(&ab->qmi.handle, &txn, 2747 + qmi_wlanfw_wlan_ini_resp_msg_v01_ei, &resp); 2748 + if (ret < 0) 2749 + goto out; 2750 + 2751 + ret = qmi_send_request(&ab->qmi.handle, NULL, &txn, 2752 + ATH12K_QMI_WLANFW_WLAN_INI_REQ_V01, 2753 + QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN, 2754 + qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req); 2755 + if (ret < 0) { 2756 + qmi_txn_cancel(&txn); 2757 + ath12k_warn(ab, "failed to send QMI wlan ini request: %d\n", 2758 + ret); 2759 + goto out; 2760 + } 2761 + 2762 + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS)); 2763 + if (ret < 0) { 2764 + ath12k_warn(ab, "failed to receive QMI wlan ini request: %d\n", ret); 2765 + goto out; 2766 + } 2767 + 2768 + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 2769 + ath12k_warn(ab, "QMI wlan ini response failure: %d %d\n", 2770 + resp.resp.result, resp.resp.error); 2771 + ret = -EINVAL; 2772 + goto out; 2773 + } 2774 + 2775 + out: 2776 + return ret; 2777 + } 2778 + 2900 2779 void ath12k_qmi_firmware_stop(struct ath12k_base *ab) 2901 2780 { 2902 2781 int ret; ··· 2955 2748 u32 mode) 2956 2749 { 2957 2750 int ret; 2751 + 2752 + ret = ath12k_qmi_wlanfw_wlan_ini_send(ab); 2753 + if (ret < 0) { 2754 + ath12k_warn(ab, "qmi failed to send wlan fw ini: %d\n", ret); 2755 + return ret; 2756 + } 2958 2757 2959 2758 ret = ath12k_qmi_wlanfw_wlan_cfg_send(ab); 2960 2759 if (ret < 0) { ··· 3004 2791 { 3005 2792 struct ath12k_base *ab = qmi->ab; 3006 2793 int ret; 2794 + 2795 + ath12k_qmi_phy_cap_send(ab); 3007 2796 3008 2797 ret = ath12k_qmi_fw_ind_register_send(ab); 3009 2798 if (ret < 0) {
+33 -1
drivers/net/wireless/ath/ath12k/qmi.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH12K_QMI_H ··· 141 141 u32 target_mem_mode; 142 142 bool target_mem_delayed; 143 143 u8 cal_done; 144 + u8 num_radios; 144 145 struct target_info target; 145 146 struct m3_mem_region m3_mem; 146 147 unsigned int service_ins_id; ··· 250 249 251 250 struct qmi_wlanfw_host_cap_resp_msg_v01 { 252 251 struct qmi_response_type_v01 resp; 252 + }; 253 + 254 + #define QMI_WLANFW_PHY_CAP_REQ_MSG_V01_MAX_LEN 0 255 + #define QMI_WLANFW_PHY_CAP_REQ_V01 0x0057 256 + #define QMI_WLANFW_PHY_CAP_RESP_MSG_V01_MAX_LEN 18 257 + #define QMI_WLANFW_PHY_CAP_RESP_V01 0x0057 258 + 259 + struct qmi_wlanfw_phy_cap_req_msg_v01 { 260 + }; 261 + 262 + struct qmi_wlanfw_phy_cap_resp_msg_v01 { 263 + struct qmi_response_type_v01 resp; 264 + u8 num_phy_valid; 265 + u8 num_phy; 266 + u8 board_id_valid; 267 + u32 board_id; 253 268 }; 254 269 255 270 #define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54 ··· 573 556 }; 574 557 575 558 struct qmi_wlanfw_wlan_cfg_resp_msg_v01 { 559 + struct qmi_response_type_v01 resp; 560 + }; 561 + 562 + #define ATH12K_QMI_WLANFW_WLAN_INI_REQ_V01 0x002F 563 + #define ATH12K_QMI_WLANFW_WLAN_INI_RESP_V01 0x002F 564 + #define QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN 7 565 + #define QMI_WLANFW_WLAN_INI_RESP_MSG_V01_MAX_LEN 7 566 + 567 + struct qmi_wlanfw_wlan_ini_req_msg_v01 { 568 + /* Must be set to true if enable_fwlog is being passed */ 569 + u8 enable_fwlog_valid; 570 + u8 enable_fwlog; 571 + }; 572 + 573 + struct qmi_wlanfw_wlan_ini_resp_msg_v01 { 576 574 struct qmi_response_type_v01 resp; 577 575 }; 578 576
+7 -6
drivers/net/wireless/ath/ath12k/reg.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 #include <linux/rtnetlink.h> 7 7 #include "core.h" ··· 48 48 { 49 49 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 50 50 struct ath12k_wmi_init_country_arg arg; 51 - struct ath12k *ar = hw->priv; 51 + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 52 + struct ath12k *ar = ath12k_ah_to_ar(ah); 52 53 int ret; 53 54 54 55 ath12k_dbg(ar->ab, ATH12K_DBG_REG, ··· 96 95 struct ieee80211_supported_band **bands; 97 96 struct ath12k_wmi_scan_chan_list_arg *arg; 98 97 struct ieee80211_channel *channel; 99 - struct ieee80211_hw *hw = ar->hw; 98 + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 100 99 struct ath12k_wmi_channel_arg *ch; 101 100 enum nl80211_band band; 102 101 int num_channels = 0; ··· 104 103 105 104 bands = hw->wiphy->bands; 106 105 for (band = 0; band < NUM_NL80211_BANDS; band++) { 107 - if (!bands[band]) 106 + if (!(ar->mac.sbands[band].channels && bands[band])) 108 107 continue; 109 108 110 109 for (i = 0; i < bands[band]->n_channels; i++) { ··· 130 129 ch = arg->channel; 131 130 132 131 for (band = 0; band < NUM_NL80211_BANDS; band++) { 133 - if (!bands[band]) 132 + if (!(ar->mac.sbands[band].channels && bands[band])) 134 133 continue; 135 134 136 135 for (i = 0; i < bands[band]->n_channels; i++) { ··· 200 199 201 200 int ath12k_regd_update(struct ath12k *ar, bool init) 202 201 { 203 - struct ieee80211_hw *hw = ar->hw; 202 + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 204 203 struct ieee80211_regdomain *regd, *regd_copy = NULL; 205 204 int ret, regd_len, pdev_id; 206 205 struct ath12k_base *ab;
+28 -1
drivers/net/wireless/ath/ath12k/trace.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) ··· 138 138 __get_str(device), 139 139 __entry->len 140 140 ) 141 + ); 142 + 143 + TRACE_EVENT(ath12k_wmi_diag, 144 + TP_PROTO(struct ath12k_base *ab, const void *data, size_t len), 145 + 146 + TP_ARGS(ab, data, len), 147 + 148 + TP_STRUCT__entry( 149 + __string(device, dev_name(ab->dev)) 150 + __string(driver, dev_driver_string(ab->dev)) 151 + __field(u16, len) 152 + __dynamic_array(u8, data, len) 153 + ), 154 + 155 + TP_fast_assign( 156 + __assign_str(device, dev_name(ab->dev)); 157 + __assign_str(driver, dev_driver_string(ab->dev)); 158 + __entry->len = len; 159 + memcpy(__get_dynamic_array(data), data, len); 160 + ), 161 + 162 + TP_printk( 163 + "%s %s tlv diag len %d", 164 + __get_str(driver), 165 + __get_str(device), 166 + __entry->len 167 + ) 141 168 ); 142 169 143 170 #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
+53 -42
drivers/net/wireless/ath/ath12k/wmi.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 #include <linux/skbuff.h> 7 7 #include <linux/ctype.h> ··· 359 359 } 360 360 361 361 static const void ** 362 - ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr, 363 - size_t len, gfp_t gfp) 362 + ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, 363 + struct sk_buff *skb, gfp_t gfp) 364 364 { 365 365 const void **tb; 366 366 int ret; ··· 369 369 if (!tb) 370 370 return ERR_PTR(-ENOMEM); 371 371 372 - ret = ath12k_wmi_tlv_parse(ab, tb, ptr, len); 372 + ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len); 373 373 if (ret) { 374 374 kfree(tb); 375 375 return ERR_PTR(ret); ··· 4374 4374 const struct wmi_vdev_start_resp_event *ev; 4375 4375 int ret; 4376 4376 4377 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 4377 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 4378 4378 if (IS_ERR(tb)) { 4379 4379 ret = PTR_ERR(tb); 4380 4380 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 4452 4452 4453 4453 ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n"); 4454 4454 4455 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 4455 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 4456 4456 if (IS_ERR(tb)) { 4457 4457 ret = PTR_ERR(tb); 4458 4458 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 4738 4738 const struct wmi_peer_delete_resp_event *ev; 4739 4739 int ret; 4740 4740 4741 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 4741 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 4742 4742 if (IS_ERR(tb)) { 4743 4743 ret = PTR_ERR(tb); 4744 4744 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 4770 4770 const struct wmi_vdev_delete_resp_event *ev; 4771 4771 int ret; 4772 4772 4773 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 4773 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 4774 4774 if (IS_ERR(tb)) { 4775 4775 ret = PTR_ERR(tb); 4776 4776 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 4790 4790 return 0; 4791 4791 } 4792 4792 4793 - static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, void *evt_buf, 4794 - u32 len, u32 *vdev_id, 4795 - u32 *tx_status) 4793 + static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, 4794 + struct sk_buff *skb, 4795 + u32 *vdev_id, u32 *tx_status) 4796 4796 { 4797 4797 const void **tb; 4798 4798 const struct wmi_bcn_tx_status_event *ev; 4799 4799 int ret; 4800 4800 4801 - tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); 4801 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 4802 4802 if (IS_ERR(tb)) { 4803 4803 ret = PTR_ERR(tb); 4804 4804 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 4826 4826 const struct wmi_vdev_stopped_event *ev; 4827 4827 int ret; 4828 4828 4829 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 4829 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 4830 4830 if (IS_ERR(tb)) { 4831 4831 ret = PTR_ERR(tb); 4832 4832 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 4948 4948 if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) 4949 4949 info->flags |= IEEE80211_TX_STAT_ACK; 4950 4950 4951 - ieee80211_tx_status_irqsafe(ar->hw, msdu); 4951 + ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu); 4952 4952 4953 4953 num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); 4954 4954 ··· 4970 4970 const struct wmi_mgmt_tx_compl_event *ev; 4971 4971 int ret; 4972 4972 4973 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 4973 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 4974 4974 if (IS_ERR(tb)) { 4975 4975 ret = PTR_ERR(tb); 4976 4976 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 5076 5076 5077 5077 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq) 5078 5078 { 5079 + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 5080 + 5079 5081 lockdep_assert_held(&ar->data_lock); 5080 5082 5081 5083 switch (ar->scan.state) { ··· 5089 5087 break; 5090 5088 case ATH12K_SCAN_RUNNING: 5091 5089 case ATH12K_SCAN_ABORTING: 5092 - ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); 5090 + ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq); 5093 5091 break; 5094 5092 } 5095 5093 } ··· 5143 5141 const struct wmi_scan_event *ev; 5144 5142 int ret; 5145 5143 5146 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 5144 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5147 5145 if (IS_ERR(tb)) { 5148 5146 ret = PTR_ERR(tb); 5149 5147 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 5176 5174 const struct wmi_peer_sta_kickout_event *ev; 5177 5175 int ret; 5178 5176 5179 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 5177 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5180 5178 if (IS_ERR(tb)) { 5181 5179 ret = PTR_ERR(tb); 5182 5180 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 5203 5201 const struct wmi_roam_event *ev; 5204 5202 int ret; 5205 5203 5206 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 5204 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5207 5205 if (IS_ERR(tb)) { 5208 5206 ret = PTR_ERR(tb); 5209 5207 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 5228 5226 static int freq_to_idx(struct ath12k *ar, int freq) 5229 5227 { 5230 5228 struct ieee80211_supported_band *sband; 5229 + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 5231 5230 int band, ch, idx = 0; 5232 5231 5233 5232 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 5234 5233 if (!ar->mac.sbands[band].channels) 5235 5234 continue; 5236 5235 5237 - sband = ar->hw->wiphy->bands[band]; 5236 + sband = hw->wiphy->bands[band]; 5238 5237 if (!sband) 5239 5238 continue; 5240 5239 ··· 5248 5245 return idx; 5249 5246 } 5250 5247 5251 - static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, u8 *evt_buf, 5252 - u32 len, struct wmi_chan_info_event *ch_info_ev) 5248 + static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb, 5249 + struct wmi_chan_info_event *ch_info_ev) 5253 5250 { 5254 5251 const void **tb; 5255 5252 const struct wmi_chan_info_event *ev; 5256 5253 int ret; 5257 5254 5258 - tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); 5255 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5259 5256 if (IS_ERR(tb)) { 5260 5257 ret = PTR_ERR(tb); 5261 5258 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 5294 5291 const struct wmi_pdev_bss_chan_info_event *ev; 5295 5292 int ret; 5296 5293 5297 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 5294 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5298 5295 if (IS_ERR(tb)) { 5299 5296 ret = PTR_ERR(tb); 5300 5297 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 5334 5331 const struct wmi_vdev_install_key_compl_event *ev; 5335 5332 int ret; 5336 5333 5337 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 5334 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5338 5335 if (IS_ERR(tb)) { 5339 5336 ret = PTR_ERR(tb); 5340 5337 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 5365 5362 const struct wmi_peer_assoc_conf_event *ev; 5366 5363 int ret; 5367 5364 5368 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 5365 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5369 5366 if (IS_ERR(tb)) { 5370 5367 ret = PTR_ERR(tb); 5371 5368 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 5387 5384 } 5388 5385 5389 5386 static int 5390 - ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, u8 *evt_buf, 5391 - u32 len, const struct wmi_pdev_temperature_event *ev) 5387 + ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb, 5388 + const struct wmi_pdev_temperature_event *ev) 5392 5389 { 5393 5390 const void **tb; 5394 5391 int ret; 5395 5392 5396 - tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC); 5393 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5397 5394 if (IS_ERR(tb)) { 5398 5395 ret = PTR_ERR(tb); 5399 5396 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 5728 5725 { 5729 5726 u32 vdev_id, tx_status; 5730 5727 5731 - if (ath12k_pull_bcn_tx_status_ev(ab, skb->data, skb->len, 5732 - &vdev_id, &tx_status) != 0) { 5728 + if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) { 5733 5729 ath12k_warn(ab, "failed to extract bcn tx status"); 5734 5730 return; 5735 5731 } ··· 5866 5864 status->freq, status->band, status->signal, 5867 5865 status->rate_idx); 5868 5866 5869 - ieee80211_rx_ni(ar->hw, skb); 5867 + ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb); 5870 5868 5871 5869 exit: 5872 5870 rcu_read_unlock(); ··· 6039 6037 goto exit; 6040 6038 } 6041 6039 6042 - sta = ieee80211_find_sta_by_ifaddr(ar->hw, 6040 + sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar), 6043 6041 arg.mac_addr, NULL); 6044 6042 if (!sta) { 6045 6043 ath12k_warn(ab, "Spurious quick kickout for STA %pM\n", ··· 6112 6110 /* HW channel counters frequency value in hertz */ 6113 6111 u32 cc_freq_hz = ab->cc_freq_hz; 6114 6112 6115 - if (ath12k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) { 6113 + if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) { 6116 6114 ath12k_warn(ab, "failed to extract chan info event"); 6117 6115 return; 6118 6116 } ··· 6397 6395 const struct wmi_pdev_ctl_failsafe_chk_event *ev; 6398 6396 int ret; 6399 6397 6400 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6398 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6401 6399 if (IS_ERR(tb)) { 6402 6400 ret = PTR_ERR(tb); 6403 6401 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 6462 6460 const u32 *vdev_ids; 6463 6461 int ret; 6464 6462 6465 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6463 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6466 6464 if (IS_ERR(tb)) { 6467 6465 ret = PTR_ERR(tb); 6468 6466 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 6496 6494 struct ath12k *ar; 6497 6495 int ret; 6498 6496 6499 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6497 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6500 6498 if (IS_ERR(tb)) { 6501 6499 ret = PTR_ERR(tb); 6502 6500 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 6533 6531 if (ar->dfs_block_radar_events) 6534 6532 ath12k_info(ab, "DFS Radar detected, but ignored as requested\n"); 6535 6533 else 6536 - ieee80211_radar_detected(ar->hw); 6534 + ieee80211_radar_detected(ath12k_ar_to_hw(ar)); 6537 6535 6538 6536 exit: 6539 6537 rcu_read_unlock(); ··· 6548 6546 struct ath12k *ar; 6549 6547 struct wmi_pdev_temperature_event ev = {0}; 6550 6548 6551 - if (ath12k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) { 6549 + if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) { 6552 6550 ath12k_warn(ab, "failed to extract pdev temperature event"); 6553 6551 return; 6554 6552 } ··· 6575 6573 const struct wmi_fils_discovery_event *ev; 6576 6574 int ret; 6577 6575 6578 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6576 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6579 6577 if (IS_ERR(tb)) { 6580 6578 ret = PTR_ERR(tb); 6581 6579 ath12k_warn(ab, ··· 6605 6603 const struct wmi_probe_resp_tx_status_event *ev; 6606 6604 int ret; 6607 6605 6608 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6606 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6609 6607 if (IS_ERR(tb)) { 6610 6608 ret = PTR_ERR(tb); 6611 6609 ath12k_warn(ab, ··· 6637 6635 const void **tb; 6638 6636 int ret; 6639 6637 6640 - tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); 6638 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6641 6639 if (IS_ERR(tb)) { 6642 6640 ret = PTR_ERR(tb); 6643 6641 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); ··· 6662 6660 6663 6661 queue_work(ab->workqueue, &ab->rfkill_work); 6664 6662 kfree(tb); 6663 + } 6664 + 6665 + static void 6666 + ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb) 6667 + { 6668 + trace_ath12k_wmi_diag(ab, skb->data, skb->len); 6665 6669 } 6666 6670 6667 6671 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) ··· 6779 6771 break; 6780 6772 case WMI_VDEV_DELETE_RESP_EVENTID: 6781 6773 ath12k_vdev_delete_resp_event(ab, skb); 6774 + break; 6775 + case WMI_DIAG_EVENTID: 6776 + ath12k_wmi_diag_event(ab, skb); 6782 6777 break; 6783 6778 /* TODO: Add remaining events */ 6784 6779 default:
+2 -4
drivers/net/wireless/ath/ath9k/ahb.c
··· 144 144 return ret; 145 145 } 146 146 147 - static int ath_ahb_remove(struct platform_device *pdev) 147 + static void ath_ahb_remove(struct platform_device *pdev) 148 148 { 149 149 struct ieee80211_hw *hw = platform_get_drvdata(pdev); 150 150 ··· 155 155 free_irq(sc->irq, sc); 156 156 ieee80211_free_hw(sc->hw); 157 157 } 158 - 159 - return 0; 160 158 } 161 159 162 160 static struct platform_driver ath_ahb_driver = { 163 161 .probe = ath_ahb_probe, 164 - .remove = ath_ahb_remove, 162 + .remove_new = ath_ahb_remove, 165 163 .driver = { 166 164 .name = "ath9k", 167 165 },
+1 -1
drivers/net/wireless/ath/ath9k/antenna.c
··· 643 643 conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; 644 644 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2; 645 645 } else if (antcomb->rssi_sub > 646 - antcomb->rssi_lna1) { 646 + antcomb->rssi_lna2) { 647 647 /* set to A-B */ 648 648 conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1; 649 649 conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
-9
drivers/net/wireless/ath/ath9k/ar9003_phy.h
··· 851 851 #define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN 0x0000000e 852 852 #define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN_S 1 853 853 854 - #define AR_PHY_POWER_TX_RATE1 0x9934 855 - #define AR_PHY_POWER_TX_RATE2 0x9938 856 854 #define AR_PHY_POWER_TX_RATE_MAX 0x993c 857 855 #define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040 858 856 #define PHY_AGC_CLR 0x10000000 ··· 1038 1040 #define AR_PHY_TX_IQCAL_CORR_COEFF_B2(_i) (AR_SM2_BASE + 0x450 + ((_i) << 2)) 1039 1041 1040 1042 #define AR_PHY_TX_IQCAL_STATUS_B2_FAILED 0x00000001 1041 - 1042 - /* 1043 - * AGC 3 Register Map 1044 - */ 1045 - #define AR_AGC3_BASE 0xce00 1046 - 1047 - #define AR_PHY_RSSI_3 (AR_AGC3_BASE + 0x180) 1048 1043 1049 1044 /* GLB Registers */ 1050 1045 #define AR_GLB_BASE 0x20000
-4
drivers/net/wireless/ath/ath9k/reg_aic.h
··· 17 17 #ifndef REG_AIC_H 18 18 #define REG_AIC_H 19 19 20 - #define AR_SM_BASE 0xa200 21 - #define AR_SM1_BASE 0xb200 22 - #define AR_AGC_BASE 0x9e00 23 - 24 20 #define AR_PHY_AIC_CTRL_0_B0 (AR_SM_BASE + 0x4b0) 25 21 #define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4) 26 22 #define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8)