Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'wireless-drivers-next-for-davem-2018-10-14' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for 4.20

Third set of patches for 4.20. Most notable is finalising ath10k
wcn3990 support, all components should be implemented now.

Major changes:

ath10k

* support NET_DETECT WoWLAN feature

* wcn3990 basic functionality now working after we got QMI support

mt76

* mt76x0e improvements (should be usable now)

* more mt76x0/mt76x2 unification work

brcmsmac

* fix a problem on AP mode with clients using power save mode

iwlwifi

* support for a new scan type: fast balance
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+6814 -2280
+6
Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
··· 56 56 the length can vary between hw versions. 57 57 - <supply-name>-supply: handle to the regulator device tree node 58 58 optional "supply-name" is "vdd-0.8-cx-mx". 59 + - memory-region: 60 + Usage: optional 61 + Value type: <phandle> 62 + Definition: reference to the reserved-memory for the msa region 63 + used by the wifi firmware running in Q6. 59 64 60 65 Example (to supply the calibration data alone): 61 66 ··· 154 149 <0 140 0 /* CE10 */ >, 155 150 <0 141 0 /* CE11 */ >; 156 151 vdd-0.8-cx-mx-supply = <&pm8998_l5>; 152 + memory-region = <&wifi_msa_mem>; 157 153 };
+1
drivers/net/wireless/ath/ath10k/Kconfig
··· 44 44 tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)" 45 45 depends on ATH10K 46 46 depends on ARCH_QCOM || COMPILE_TEST 47 + select QCOM_QMI_HELPERS 47 48 ---help--- 48 49 This module adds support for integrated WCN3990 chip connected 49 50 to system NOC(SNOC). Currently work in progress and will not
+3 -1
drivers/net/wireless/ath/ath10k/Makefile
··· 36 36 ath10k_usb-y += usb.o 37 37 38 38 obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o 39 - ath10k_snoc-y += snoc.o 39 + ath10k_snoc-y += qmi.o \ 40 + qmi_wlfw_v01.o \ 41 + snoc.o 40 42 41 43 # for tracing framework to find trace.h 42 44 CFLAGS_trace.o := -I$(src)
+12 -2
drivers/net/wireless/ath/ath10k/core.c
··· 989 989 data, data_len); 990 990 } 991 991 992 - static void ath10k_core_free_board_files(struct ath10k *ar) 992 + void ath10k_core_free_board_files(struct ath10k *ar) 993 993 { 994 994 if (!IS_ERR(ar->normal_mode_fw.board)) 995 995 release_firmware(ar->normal_mode_fw.board); ··· 1004 1004 ar->normal_mode_fw.ext_board_data = NULL; 1005 1005 ar->normal_mode_fw.ext_board_len = 0; 1006 1006 } 1007 + EXPORT_SYMBOL(ath10k_core_free_board_files); 1007 1008 1008 1009 static void ath10k_core_free_firmware_files(struct ath10k *ar) 1009 1010 { ··· 1332 1331 goto out; 1333 1332 } 1334 1333 1334 + if (ar->id.qmi_ids_valid) { 1335 + scnprintf(name, name_len, 1336 + "bus=%s,qmi-board-id=%x", 1337 + ath10k_bus_str(ar->hif.bus), 1338 + ar->id.qmi_board_id); 1339 + goto out; 1340 + } 1341 + 1335 1342 scnprintf(name, name_len, 1336 1343 "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s", 1337 1344 ath10k_bus_str(ar->hif.bus), ··· 1368 1359 return -1; 1369 1360 } 1370 1361 1371 - static int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type) 1362 + int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type) 1372 1363 { 1373 1364 char boardname[100], fallback_boardname[100]; 1374 1365 int ret; ··· 1416 1407 ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api); 1417 1408 return 0; 1418 1409 } 1410 + EXPORT_SYMBOL(ath10k_core_fetch_board_file); 1419 1411 1420 1412 static int ath10k_core_get_ext_board_id_from_otp(struct ath10k *ar) 1421 1413 {
+5
drivers/net/wireless/ath/ath10k/core.h
··· 951 951 /* protected by conf_mutex */ 952 952 u8 ps_state_enable; 953 953 954 + bool nlo_enabled; 954 955 bool p2p; 955 956 956 957 struct { ··· 989 988 u32 subsystem_device; 990 989 991 990 bool bmi_ids_valid; 991 + bool qmi_ids_valid; 992 + u32 qmi_board_id; 992 993 u8 bmi_board_id; 993 994 u8 bmi_eboard_id; 994 995 u8 bmi_chip_id; ··· 1218 1215 int ath10k_core_register(struct ath10k *ar, 1219 1216 const struct ath10k_bus_params *bus_params); 1220 1217 void ath10k_core_unregister(struct ath10k *ar); 1218 + int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type); 1219 + void ath10k_core_free_board_files(struct ath10k *ar); 1221 1220 1222 1221 #endif /* _CORE_H_ */
+1 -1
drivers/net/wireless/ath/ath10k/debug.c
··· 2421 2421 if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable)) 2422 2422 return -EINVAL; 2423 2423 2424 - if (ps_state_enable > 1 || ps_state_enable < 0) 2424 + if (ps_state_enable > 1) 2425 2425 return -EINVAL; 2426 2426 2427 2427 mutex_lock(&ar->conf_mutex);
+1
drivers/net/wireless/ath/ath10k/debug.h
··· 44 44 ATH10K_DBG_USB = 0x00040000, 45 45 ATH10K_DBG_USB_BULK = 0x00080000, 46 46 ATH10K_DBG_SNOC = 0x00100000, 47 + ATH10K_DBG_QMI = 0x00200000, 47 48 ATH10K_DBG_ANY = 0xffffffff, 48 49 }; 49 50
+2 -3
drivers/net/wireless/ath/ath10k/htt_rx.c
··· 2680 2680 STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts; 2681 2681 } else { 2682 2682 mcs = legacy_rate_idx; 2683 - if (mcs < 0) 2684 - return; 2685 2683 2686 2684 STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes; 2687 2685 STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts; ··· 2751 2753 struct ath10k_per_peer_tx_stats *peer_stats) 2752 2754 { 2753 2755 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 2754 - u8 rate = 0, rate_idx = 0, sgi; 2756 + u8 rate = 0, sgi; 2757 + s8 rate_idx = 0; 2755 2758 struct rate_info txrate; 2756 2759 2757 2760 lockdep_assert_held(&ar->data_lock);
+50 -24
drivers/net/wireless/ath/ath10k/mac.c
··· 164 164 if (ath10k_mac_bitrate_is_cck(bitrate)) 165 165 hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6; 166 166 167 - for (i = 0; i < sizeof(ath10k_rates); i++) { 167 + for (i = 0; i < ARRAY_SIZE(ath10k_rates); i++) { 168 168 if (ath10k_rates[i].bitrate == bitrate) 169 169 return hw_value_prefix | ath10k_rates[i].hw_value; 170 170 } ··· 4697 4697 goto err_core_stop; 4698 4698 } 4699 4699 4700 + if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { 4701 + ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr); 4702 + if (ret) { 4703 + ath10k_err(ar, "failed to set prob req oui: %i\n", ret); 4704 + goto err_core_stop; 4705 + } 4706 + } 4707 + 4700 4708 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 4701 4709 ret = ath10k_wmi_adaptive_qcs(ar, true); 4702 4710 if (ret) { ··· 5690 5682 return; 5691 5683 } 5692 5684 5693 - sband = ar->hw->wiphy->bands[def.chan->band]; 5694 - basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; 5695 - bitrate = sband->bitrates[basic_rate_idx].bitrate; 5685 + sband = ar->hw->wiphy->bands[def.chan->band]; 5686 + basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; 5687 + bitrate = sband->bitrates[basic_rate_idx].bitrate; 5696 5688 5697 - hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate); 5698 - if (hw_rate_code < 0) { 5699 - ath10k_warn(ar, "bitrate not supported %d\n", bitrate); 5700 - mutex_unlock(&ar->conf_mutex); 5701 - return; 5702 - } 5689 + hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate); 5690 + if (hw_rate_code < 0) { 5691 + ath10k_warn(ar, "bitrate not supported %d\n", bitrate); 5692 + mutex_unlock(&ar->conf_mutex); 5693 + return; 5694 + } 5703 5695 5704 - vdev_param = ar->wmi.vdev_param->mgmt_rate; 5705 - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5706 - hw_rate_code); 5707 - if (ret) 5708 - ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret); 5696 + vdev_param = ar->wmi.vdev_param->mgmt_rate; 5697 + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5698 + hw_rate_code); 5699 + if (ret) 5700 + ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret); 5709 5701 } 5710 5702 5711 5703 mutex_unlock(&ar->conf_mutex); ··· 6863 6855 u32 queues, bool drop) 6864 6856 { 6865 6857 struct ath10k *ar = hw->priv; 6858 + struct ath10k_vif *arvif; 6859 + u32 bitmap; 6866 6860 6867 - if (drop) 6861 + if (drop) { 6862 + if (vif->type == NL80211_IFTYPE_STATION) { 6863 + bitmap = ~(1 << WMI_MGMT_TID); 6864 + list_for_each_entry(arvif, &ar->arvifs, list) { 6865 + if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 6866 + ath10k_wmi_peer_flush(ar, arvif->vdev_id, 6867 + arvif->bssid, bitmap); 6868 + } 6869 + } 6868 6870 return; 6871 + } 6869 6872 6870 6873 mutex_lock(&ar->conf_mutex); 6871 6874 ath10k_mac_wait_tx_complete(ar); ··· 8512 8493 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; 8513 8494 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; 8514 8495 8496 + if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) { 8497 + ar->hw->wiphy->max_sched_scan_reqs = 1; 8498 + ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS; 8499 + ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; 8500 + ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH; 8501 + ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS; 8502 + ar->hw->wiphy->max_sched_scan_plan_interval = 8503 + WMI_PNO_MAX_SCHED_SCAN_PLAN_INT; 8504 + ar->hw->wiphy->max_sched_scan_plan_iterations = 8505 + WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS; 8506 + } 8507 + 8515 8508 ar->hw->vif_data_size = sizeof(struct ath10k_vif); 8516 8509 ar->hw->sta_data_size = sizeof(struct ath10k_sta); 8517 8510 ar->hw->txq_data_size = sizeof(struct ath10k_txq); ··· 8573 8542 wiphy_ext_feature_set(ar->hw->wiphy, 8574 8543 NL80211_EXT_FEATURE_SET_SCAN_DWELL); 8575 8544 8576 - if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) 8545 + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) || 8546 + test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map)) 8577 8547 wiphy_ext_feature_set(ar->hw->wiphy, 8578 - NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT); 8548 + NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT); 8579 8549 8580 8550 /* 8581 8551 * on LL hardware queues are managed entirely by the FW ··· 8667 8635 } 8668 8636 8669 8637 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { 8670 - ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr); 8671 - if (ret) { 8672 - ath10k_err(ar, "failed to set prob req oui: %i\n", ret); 8673 - goto err_dfs_detector_exit; 8674 - } 8675 - 8676 8638 ar->hw->wiphy->features |= 8677 8639 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; 8678 8640 }
+11 -12
drivers/net/wireless/ath/ath10k/pci.c
··· 1071 1071 struct ath10k_ce *ce = ath10k_ce_priv(ar); 1072 1072 int ret = 0; 1073 1073 u32 *buf; 1074 - unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 1074 + unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; 1075 1075 struct ath10k_ce_pipe *ce_diag; 1076 1076 void *data_buf = NULL; 1077 - u32 ce_data; /* Host buffer address in CE space */ 1078 1077 dma_addr_t ce_data_base = 0; 1079 1078 int i; 1080 1079 ··· 1087 1088 * 1) 4-byte alignment 1088 1089 * 2) Buffer in DMA-able space 1089 1090 */ 1090 - orig_nbytes = nbytes; 1091 + alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); 1092 + 1091 1093 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, 1092 - orig_nbytes, 1094 + alloc_nbytes, 1093 1095 &ce_data_base, 1094 1096 GFP_ATOMIC); 1095 1097 if (!data_buf) { 1096 1098 ret = -ENOMEM; 1097 1099 goto done; 1098 1100 } 1099 - 1100 - /* Copy caller's data to allocated DMA buf */ 1101 - memcpy(data_buf, data, orig_nbytes); 1102 1101 1103 1102 /* 1104 1103 * The address supplied by the caller is in the ··· 1110 1113 */ 1111 1114 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); 1112 1115 1113 - remaining_bytes = orig_nbytes; 1114 - ce_data = ce_data_base; 1116 + remaining_bytes = nbytes; 1115 1117 while (remaining_bytes) { 1116 1118 /* FIXME: check cast */ 1117 1119 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); 1120 + 1121 + /* Copy caller's data to allocated DMA buf */ 1122 + memcpy(data_buf, data, nbytes); 1118 1123 1119 1124 /* Set up to receive directly into Target(!) address */ 1120 1125 ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address); ··· 1127 1128 * Request CE to send caller-supplied data that 1128 1129 * was copied to bounce buffer to Target(!) address. 1129 1130 */ 1130 - ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data, 1131 + ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base, 1131 1132 nbytes, 0, 0); 1132 1133 if (ret != 0) 1133 1134 goto done; ··· 1170 1171 1171 1172 remaining_bytes -= nbytes; 1172 1173 address += nbytes; 1173 - ce_data += nbytes; 1174 + data += nbytes; 1174 1175 } 1175 1176 1176 1177 done: 1177 1178 if (data_buf) { 1178 - dma_free_coherent(ar->dev, orig_nbytes, data_buf, 1179 + dma_free_coherent(ar->dev, alloc_nbytes, data_buf, 1179 1180 ce_data_base); 1180 1181 } 1181 1182
+1019
drivers/net/wireless/ath/ath10k/qmi.c
··· 1 + /* 2 + * Copyright (c) 2018 The Linux Foundation. All rights reserved. 3 + * 4 + * Permission to use, copy, modify, and/or distribute this software for any 5 + * purpose with or without fee is hereby granted, provided that the above 6 + * copyright notice and this permission notice appear in all copies. 7 + * 8 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 + */ 16 + 17 + #include <linux/completion.h> 18 + #include <linux/device.h> 19 + #include <linux/debugfs.h> 20 + #include <linux/idr.h> 21 + #include <linux/kernel.h> 22 + #include <linux/of.h> 23 + #include <linux/of_address.h> 24 + #include <linux/module.h> 25 + #include <linux/net.h> 26 + #include <linux/platform_device.h> 27 + #include <linux/qcom_scm.h> 28 + #include <linux/string.h> 29 + #include <net/sock.h> 30 + 31 + #include "debug.h" 32 + #include "snoc.h" 33 + 34 + #define ATH10K_QMI_CLIENT_ID 0x4b4e454c 35 + #define ATH10K_QMI_TIMEOUT 30 36 + 37 + static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi, 38 + struct ath10k_msa_mem_info *mem_info) 39 + { 40 + struct qcom_scm_vmperm dst_perms[3]; 41 + struct ath10k *ar = qmi->ar; 42 + unsigned int src_perms; 43 + u32 perm_count; 44 + int ret; 45 + 46 + src_perms = BIT(QCOM_SCM_VMID_HLOS); 47 + 48 + dst_perms[0].vmid = QCOM_SCM_VMID_MSS_MSA; 49 + dst_perms[0].perm = QCOM_SCM_PERM_RW; 50 + dst_perms[1].vmid = QCOM_SCM_VMID_WLAN; 51 + dst_perms[1].perm = QCOM_SCM_PERM_RW; 52 + 53 + if (mem_info->secure) { 54 + perm_count = 2; 55 + } else { 56 + dst_perms[2].vmid = QCOM_SCM_VMID_WLAN_CE; 57 + dst_perms[2].perm = QCOM_SCM_PERM_RW; 58 + perm_count = 3; 59 + } 60 + 61 + ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size, 62 + &src_perms, dst_perms, perm_count); 63 + if (ret < 0) 64 + ath10k_err(ar, "failed to assign msa map permissions: %d\n", ret); 65 + 66 + return ret; 67 + } 68 + 69 + static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi *qmi, 70 + struct ath10k_msa_mem_info *mem_info) 71 + { 72 + struct qcom_scm_vmperm dst_perms; 73 + struct ath10k *ar = qmi->ar; 74 + unsigned int src_perms; 75 + int ret; 76 + 77 + src_perms = BIT(QCOM_SCM_VMID_MSS_MSA) | BIT(QCOM_SCM_VMID_WLAN); 78 + 79 + if (!mem_info->secure) 80 + src_perms |= BIT(QCOM_SCM_VMID_WLAN_CE); 81 + 82 + dst_perms.vmid = QCOM_SCM_VMID_HLOS; 83 + dst_perms.perm = QCOM_SCM_PERM_RW; 84 + 85 + ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size, 86 + &src_perms, &dst_perms, 1); 87 + if (ret < 0) 88 + ath10k_err(ar, "failed to unmap msa permissions: %d\n", ret); 89 + 90 + return ret; 91 + } 92 + 93 + static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi) 94 + { 95 + int ret; 96 + int i; 97 + 98 + for (i = 0; i < qmi->nr_mem_region; i++) { 99 + ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]); 100 + if (ret) 101 + goto err_unmap; 102 + } 103 + 104 + return 0; 105 + 106 + err_unmap: 107 + for (i--; i >= 0; i--) 108 + ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]); 109 + return ret; 110 + } 111 + 112 + static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi) 113 + { 114 + int i; 115 + 116 + for (i = 0; i < qmi->nr_mem_region; i++) 117 + ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]); 118 + } 119 + 120 + static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi) 121 + { 122 + struct wlfw_msa_info_resp_msg_v01 resp = {}; 123 + struct wlfw_msa_info_req_msg_v01 req = {}; 124 + struct ath10k *ar = qmi->ar; 125 + struct qmi_txn txn; 126 + int ret; 127 + int i; 128 + 129 + req.msa_addr = qmi->msa_pa; 130 + req.size = qmi->msa_mem_size; 131 + 132 + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, 133 + wlfw_msa_info_resp_msg_v01_ei, &resp); 134 + if (ret < 0) 135 + goto out; 136 + 137 + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 138 + QMI_WLFW_MSA_INFO_REQ_V01, 139 + WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN, 140 + wlfw_msa_info_req_msg_v01_ei, &req); 141 + if (ret < 0) { 142 + qmi_txn_cancel(&txn); 143 + ath10k_err(ar, "failed to send msa mem info req: %d\n", ret); 144 + goto out; 145 + } 146 + 147 + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 148 + if (ret < 0) 149 + goto out; 150 + 151 + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 152 + ath10k_err(ar, "msa info req rejected: %d\n", resp.resp.error); 153 + ret = -EINVAL; 154 + goto out; 155 + } 156 + 157 + if (resp.mem_region_info_len > QMI_WLFW_MAX_MEM_REG_V01) { 158 + ath10k_err(ar, "invalid memory region length received: %d\n", 159 + resp.mem_region_info_len); 160 + ret = -EINVAL; 161 + goto out; 162 + } 163 + 164 + qmi->nr_mem_region = resp.mem_region_info_len; 165 + for (i = 0; i < resp.mem_region_info_len; i++) { 166 + qmi->mem_region[i].addr = resp.mem_region_info[i].region_addr; 167 + qmi->mem_region[i].size = resp.mem_region_info[i].size; 168 + qmi->mem_region[i].secure = resp.mem_region_info[i].secure_flag; 169 + ath10k_dbg(ar, ATH10K_DBG_QMI, 170 + "qmi msa mem region %d addr 0x%pa size 0x%x flag 0x%08x\n", 171 + i, &qmi->mem_region[i].addr, 172 + qmi->mem_region[i].size, 173 + qmi->mem_region[i].secure); 174 + } 175 + 176 + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem info request completed\n"); 177 + return 0; 178 + 179 + out: 180 + return ret; 181 + } 182 + 183 + static int ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi *qmi) 184 + { 185 + struct wlfw_msa_ready_resp_msg_v01 resp = {}; 186 + struct wlfw_msa_ready_req_msg_v01 req = {}; 187 + struct ath10k *ar = qmi->ar; 188 + struct qmi_txn txn; 189 + int ret; 190 + 191 + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, 192 + wlfw_msa_ready_resp_msg_v01_ei, &resp); 193 + if (ret < 0) 194 + goto out; 195 + 196 + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 197 + QMI_WLFW_MSA_READY_REQ_V01, 198 + WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN, 199 + wlfw_msa_ready_req_msg_v01_ei, &req); 200 + if (ret < 0) { 201 + qmi_txn_cancel(&txn); 202 + ath10k_err(ar, "failed to send msa mem ready request: %d\n", ret); 203 + goto out; 204 + } 205 + 206 + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 207 + if (ret < 0) 208 + goto out; 209 + 210 + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 211 + ath10k_err(ar, "msa ready request rejected: %d\n", resp.resp.error); 212 + ret = -EINVAL; 213 + } 214 + 215 + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem ready request completed\n"); 216 + return 0; 217 + 218 + out: 219 + return ret; 220 + } 221 + 222 + static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi) 223 + { 224 + struct wlfw_bdf_download_resp_msg_v01 resp = {}; 225 + struct wlfw_bdf_download_req_msg_v01 *req; 226 + struct ath10k *ar = qmi->ar; 227 + unsigned int remaining; 228 + struct qmi_txn txn; 229 + const u8 *temp; 230 + int ret; 231 + 232 + req = kzalloc(sizeof(*req), GFP_KERNEL); 233 + if (!req) 234 + return -ENOMEM; 235 + 236 + temp = ar->normal_mode_fw.board_data; 237 + remaining = ar->normal_mode_fw.board_len; 238 + 239 + while (remaining) { 240 + req->valid = 1; 241 + req->file_id_valid = 1; 242 + req->file_id = 0; 243 + req->total_size_valid = 1; 244 + req->total_size = ar->normal_mode_fw.board_len; 245 + req->seg_id_valid = 1; 246 + req->data_valid = 1; 247 + req->end_valid = 1; 248 + 249 + if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) { 250 + req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01; 251 + } else { 252 + req->data_len = remaining; 253 + req->end = 1; 254 + } 255 + 256 + memcpy(req->data, temp, req->data_len); 257 + 258 + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, 259 + wlfw_bdf_download_resp_msg_v01_ei, 260 + &resp); 261 + if (ret < 0) 262 + goto out; 263 + 264 + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 265 + QMI_WLFW_BDF_DOWNLOAD_REQ_V01, 266 + WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN, 267 + wlfw_bdf_download_req_msg_v01_ei, req); 268 + if (ret < 0) { 269 + qmi_txn_cancel(&txn); 270 + goto out; 271 + } 272 + 273 + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 274 + 275 + if (ret < 0) 276 + goto out; 277 + 278 + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 279 + ath10k_err(ar, "failed to download board data file: %d\n", 280 + resp.resp.error); 281 + ret = -EINVAL; 282 + goto out; 283 + } 284 + 285 + remaining -= req->data_len; 286 + temp += req->data_len; 287 + req->seg_id++; 288 + } 289 + 290 + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi bdf download request completed\n"); 291 + 292 + kfree(req); 293 + return 0; 294 + 295 + out: 296 + kfree(req); 297 + return ret; 298 + } 299 + 300 + static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi *qmi) 301 + { 302 + struct wlfw_cal_report_resp_msg_v01 resp = {}; 303 + struct wlfw_cal_report_req_msg_v01 req = {}; 304 + struct ath10k *ar = qmi->ar; 305 + struct qmi_txn txn; 306 + int i, j = 0; 307 + int ret; 308 + 309 + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cal_report_resp_msg_v01_ei, 310 + &resp); 311 + if (ret < 0) 312 + goto out; 313 + 314 + for (i = 0; i < QMI_WLFW_MAX_NUM_CAL_V01; i++) { 315 + if (qmi->cal_data[i].total_size && 316 + qmi->cal_data[i].data) { 317 + req.meta_data[j] = qmi->cal_data[i].cal_id; 318 + j++; 319 + } 320 + } 321 + req.meta_data_len = j; 322 + 323 + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 324 + QMI_WLFW_CAL_REPORT_REQ_V01, 325 + WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN, 326 + wlfw_cal_report_req_msg_v01_ei, &req); 327 + if (ret < 0) { 328 + qmi_txn_cancel(&txn); 329 + ath10k_err(ar, "failed to send calibration request: %d\n", ret); 330 + goto out; 331 + } 332 + 333 + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 334 + if (ret < 0) 335 + goto out; 336 + 337 + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 338 + ath10k_err(ar, "calibration request rejected: %d\n", resp.resp.error); 339 + ret = -EINVAL; 340 + goto out; 341 + } 342 + 343 + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi cal report request completed\n"); 344 + return 0; 345 + 346 + out: 347 + return ret; 348 + } 349 + 350 + static int 351 + ath10k_qmi_mode_send_sync_msg(struct ath10k *ar, enum wlfw_driver_mode_enum_v01 mode) 352 + { 353 + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); 354 + struct ath10k_qmi *qmi = ar_snoc->qmi; 355 + struct wlfw_wlan_mode_resp_msg_v01 resp = {}; 356 + struct wlfw_wlan_mode_req_msg_v01 req = {}; 357 + struct qmi_txn txn; 358 + int ret; 359 + 360 + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, 361 + wlfw_wlan_mode_resp_msg_v01_ei, 362 + &resp); 363 + if (ret < 0) 364 + goto out; 365 + 366 + req.mode = mode; 367 + req.hw_debug_valid = 1; 368 + req.hw_debug = 0; 369 + 370 + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 371 + QMI_WLFW_WLAN_MODE_REQ_V01, 372 + WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN, 373 + wlfw_wlan_mode_req_msg_v01_ei, &req); 374 + if (ret < 0) { 375 + qmi_txn_cancel(&txn); 376 + ath10k_err(ar, "failed to send wlan mode %d request: %d\n", mode, ret); 377 + goto out; 378 + } 379 + 380 + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 381 + if (ret < 0) 382 + goto out; 383 + 384 + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 385 + ath10k_err(ar, "more request rejected: %d\n", resp.resp.error); 386 + ret = -EINVAL; 387 + goto out; 388 + } 389 + 390 + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wlan mode req completed: %d\n", mode); 391 + return 0; 392 + 393 + out: 394 + return ret; 395 + } 396 + 397 + static int 398 + ath10k_qmi_cfg_send_sync_msg(struct ath10k *ar, 399 + struct ath10k_qmi_wlan_enable_cfg *config, 400 + const char *version) 401 + { 402 + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); 403 + struct ath10k_qmi *qmi = ar_snoc->qmi; 404 + struct wlfw_wlan_cfg_resp_msg_v01 resp = {}; 405 + struct wlfw_wlan_cfg_req_msg_v01 *req; 406 + struct qmi_txn txn; 407 + int ret; 408 + u32 i; 409 + 410 + req = kzalloc(sizeof(*req), GFP_KERNEL); 411 + if (!req) 412 + return -ENOMEM; 413 + 414 + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, 415 + wlfw_wlan_cfg_resp_msg_v01_ei, 416 + &resp); 417 + if (ret < 0) 418 + goto out; 419 + 420 + req->host_version_valid = 0; 421 + 422 + req->tgt_cfg_valid = 1; 423 + if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01) 424 + req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01; 425 + else 426 + req->tgt_cfg_len = config->num_ce_tgt_cfg; 427 + for (i = 0; i < req->tgt_cfg_len; i++) { 428 + req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num; 429 + req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir; 430 + req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries; 431 + req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max; 432 + req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags; 433 + } 434 + 435 + req->svc_cfg_valid = 1; 436 + if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01) 437 + req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01; 438 + else 439 + req->svc_cfg_len = config->num_ce_svc_pipe_cfg; 440 + for (i = 0; i < req->svc_cfg_len; i++) { 441 + req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id; 442 + req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir; 443 + req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num; 444 + } 445 + 446 + req->shadow_reg_valid = 1; 447 + if (config->num_shadow_reg_cfg > 448 + QMI_WLFW_MAX_NUM_SHADOW_REG_V01) 449 + req->shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01; 450 + else 451 + req->shadow_reg_len = config->num_shadow_reg_cfg; 452 + 453 + memcpy(req->shadow_reg, config->shadow_reg_cfg, 454 + sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req->shadow_reg_len); 455 + 456 + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 457 + QMI_WLFW_WLAN_CFG_REQ_V01, 458 + WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN, 459 + wlfw_wlan_cfg_req_msg_v01_ei, req); 460 + if (ret < 0) { 461 + qmi_txn_cancel(&txn); 462 + ath10k_err(ar, "failed to send config request: %d\n", ret); 463 + goto out; 464 + } 465 + 466 + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 467 + if (ret < 0) 468 + goto out; 469 + 470 + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 471 + ath10k_err(ar, "config request rejected: %d\n", resp.resp.error); 472 + ret = -EINVAL; 473 + goto out; 474 + } 475 + 476 + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi config request completed\n"); 477 + kfree(req); 478 + return 0; 479 + 480 + out: 481 + kfree(req); 482 + return ret; 483 + } 484 + 485 + int ath10k_qmi_wlan_enable(struct ath10k *ar, 486 + struct ath10k_qmi_wlan_enable_cfg *config, 487 + enum wlfw_driver_mode_enum_v01 mode, 488 + const char *version) 489 + { 490 + int ret; 491 + 492 + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi mode %d config %p\n", 493 + mode, config); 494 + 495 + ret = ath10k_qmi_cfg_send_sync_msg(ar, config, version); 496 + if (ret) { 497 + ath10k_err(ar, "failed to send qmi config: %d\n", ret); 498 + return ret; 499 + } 500 + 501 + ret = ath10k_qmi_mode_send_sync_msg(ar, mode); 502 + if (ret) { 503 + ath10k_err(ar, "failed to send qmi mode: %d\n", ret); 504 + return ret; 505 + } 506 + 507 + return 0; 508 + } 509 + 510 + int ath10k_qmi_wlan_disable(struct ath10k *ar) 511 + { 512 + return ath10k_qmi_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01); 513 + } 514 + 515 + static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi) 516 + { 517 + struct wlfw_cap_resp_msg_v01 *resp; 518 + struct wlfw_cap_req_msg_v01 req = {}; 519 + struct ath10k *ar = qmi->ar; 520 + struct qmi_txn txn; 521 + int ret; 522 + 523 + resp = kzalloc(sizeof(*resp), GFP_KERNEL); 524 + if (!resp) 525 + return -ENOMEM; 526 + 527 + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cap_resp_msg_v01_ei, resp); 528 + if (ret < 0) 529 + goto out; 530 + 531 + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 532 + QMI_WLFW_CAP_REQ_V01, 533 + WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN, 534 + wlfw_cap_req_msg_v01_ei, &req); 535 + if (ret < 0) { 536 + qmi_txn_cancel(&txn); 537 + ath10k_err(ar, "failed to send capability request: %d\n", ret); 538 + goto out; 539 + } 540 + 541 + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 542 + if (ret < 0) 543 + goto out; 544 + 545 + if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 546 + ath10k_err(ar, "capablity req rejected: %d\n", resp->resp.error); 547 + ret = -EINVAL; 548 + goto out; 549 + } 550 + 551 + if (resp->chip_info_valid) { 552 + qmi->chip_info.chip_id = resp->chip_info.chip_id; 553 + qmi->chip_info.chip_family = resp->chip_info.chip_family; 554 + } 555 + 556 + if (resp->board_info_valid) 557 + qmi->board_info.board_id = resp->board_info.board_id; 558 + else 559 + qmi->board_info.board_id = 0xFF; 560 + 561 + if (resp->soc_info_valid) 562 + qmi->soc_info.soc_id = resp->soc_info.soc_id; 563 + 564 + if (resp->fw_version_info_valid) { 565 + qmi->fw_version = resp->fw_version_info.fw_version; 566 + strlcpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp, 567 + sizeof(qmi->fw_build_timestamp)); 568 + } 569 + 570 + if (resp->fw_build_id_valid) 571 + strlcpy(qmi->fw_build_id, resp->fw_build_id, 572 + MAX_BUILD_ID_LEN + 1); 573 + 574 + ath10k_dbg(ar, ATH10K_DBG_QMI, 575 + "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x", 576 + qmi->chip_info.chip_id, qmi->chip_info.chip_family, 577 + qmi->board_info.board_id, qmi->soc_info.soc_id); 578 + ath10k_dbg(ar, ATH10K_DBG_QMI, 579 + "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s", 580 + qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id); 581 + 582 + kfree(resp); 583 + return 0; 584 + 585 + out: 586 + kfree(resp); 587 + return ret; 588 + } 589 + 590 + static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi) 591 + { 592 + struct wlfw_host_cap_resp_msg_v01 resp = {}; 593 + struct wlfw_host_cap_req_msg_v01 req = {}; 594 + struct ath10k *ar = qmi->ar; 595 + struct qmi_txn txn; 596 + int ret; 597 + 598 + req.daemon_support_valid = 1; 599 + req.daemon_support = 0; 600 + 601 + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, 602 + wlfw_host_cap_resp_msg_v01_ei, &resp); 603 + if (ret < 0) 604 + goto out; 605 + 606 + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 607 + QMI_WLFW_HOST_CAP_REQ_V01, 608 + WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN, 609 + wlfw_host_cap_req_msg_v01_ei, &req); 610 + if (ret < 0) { 611 + qmi_txn_cancel(&txn); 612 + ath10k_err(ar, "failed to send host capability request: %d\n", ret); 613 + goto out; 614 + } 615 + 616 + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 617 + if (ret < 0) 618 + goto out; 619 + 620 + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 621 + ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error); 622 + ret = -EINVAL; 623 + goto out; 624 + } 625 + 626 + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capablity request completed\n"); 627 + return 0; 628 + 629 + out: 630 + return ret; 631 + } 632 + 633 + static int 634 + ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi) 635 + { 636 + struct wlfw_ind_register_resp_msg_v01 resp = {}; 637 + struct wlfw_ind_register_req_msg_v01 req = {}; 638 + struct ath10k *ar = qmi->ar; 639 + struct qmi_txn txn; 640 + int ret; 641 + 642 + req.client_id_valid = 1; 643 + req.client_id = ATH10K_QMI_CLIENT_ID; 644 + req.fw_ready_enable_valid = 1; 645 + req.fw_ready_enable = 1; 646 + req.msa_ready_enable_valid = 1; 647 + req.msa_ready_enable = 1; 648 + 649 + ret = qmi_txn_init(&qmi->qmi_hdl, &txn, 650 + wlfw_ind_register_resp_msg_v01_ei, &resp); 651 + if (ret < 0) 652 + goto out; 653 + 654 + ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 655 + QMI_WLFW_IND_REGISTER_REQ_V01, 656 + WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN, 657 + wlfw_ind_register_req_msg_v01_ei, &req); 658 + if (ret < 0) { 659 + qmi_txn_cancel(&txn); 660 + ath10k_err(ar, "failed to send indication registed request: %d\n", ret); 661 + goto out; 662 + } 663 + 664 + ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 665 + if (ret < 0) 666 + goto out; 667 + 668 + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 669 + ath10k_err(ar, "indication request rejected: %d\n", resp.resp.error); 670 + ret = -EINVAL; 671 + goto out; 672 + } 673 + 674 + if (resp.fw_status_valid) { 675 + if (resp.fw_status & QMI_WLFW_FW_READY_V01) 676 + qmi->fw_ready = true; 677 + } 678 + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi indication register request completed\n"); 679 + return 0; 680 + 681 + out: 682 + return ret; 683 + } 684 + 685 + static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi) 686 + { 687 + struct ath10k *ar = qmi->ar; 688 + int ret; 689 + 690 + ret = ath10k_qmi_ind_register_send_sync_msg(qmi); 691 + if (ret) 692 + return; 693 + 694 + if (qmi->fw_ready) { 695 + ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND); 696 + return; 697 + } 698 + 699 + ret = ath10k_qmi_host_cap_send_sync(qmi); 700 + if (ret) 701 + return; 702 + 703 + ret = ath10k_qmi_msa_mem_info_send_sync_msg(qmi); 704 + if (ret) 705 + return; 706 + 707 + ret = ath10k_qmi_setup_msa_permissions(qmi); 708 + if (ret) 709 + return; 710 + 711 + ret = ath10k_qmi_msa_ready_send_sync_msg(qmi); 712 + if (ret) 713 + goto err_setup_msa; 714 + 715 + ret = ath10k_qmi_cap_send_sync_msg(qmi); 716 + if (ret) 717 + goto err_setup_msa; 718 + 719 + return; 720 + 721 + err_setup_msa: 722 + ath10k_qmi_remove_msa_permission(qmi); 723 + } 724 + 725 + static int ath10k_qmi_fetch_board_file(struct ath10k_qmi *qmi) 726 + { 727 + struct ath10k *ar = qmi->ar; 728 + 729 + ar->hif.bus = ATH10K_BUS_SNOC; 730 + ar->id.qmi_ids_valid = true; 731 + ar->id.qmi_board_id = qmi->board_info.board_id; 732 + ar->hw_params.fw.dir = WCN3990_HW_1_0_FW_DIR; 733 + 734 + return ath10k_core_fetch_board_file(qmi->ar, ATH10K_BD_IE_BOARD); 735 + } 736 + 737 + static int 738 + ath10k_qmi_driver_event_post(struct ath10k_qmi *qmi, 739 + enum ath10k_qmi_driver_event_type type, 740 + void *data) 741 + { 742 + struct ath10k_qmi_driver_event *event; 743 + 744 + event = kzalloc(sizeof(*event), GFP_ATOMIC); 745 + if (!event) 746 + return -ENOMEM; 747 + 748 + event->type = type; 749 + event->data = data; 750 + 751 + spin_lock(&qmi->event_lock); 752 + list_add_tail(&event->list, &qmi->event_list); 753 + spin_unlock(&qmi->event_lock); 754 + 755 + queue_work(qmi->event_wq, &qmi->event_work); 756 + 757 + return 0; 758 + } 759 + 760 + static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi) 761 + { 762 + struct ath10k *ar = qmi->ar; 763 + 764 + ath10k_qmi_remove_msa_permission(qmi); 765 + ath10k_core_free_board_files(ar); 766 + ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND); 767 + ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service disconnected\n"); 768 + } 769 + 770 + static void ath10k_qmi_event_msa_ready(struct ath10k_qmi *qmi) 771 + { 772 + int ret; 773 + 774 + ret = ath10k_qmi_fetch_board_file(qmi); 775 + if (ret) 776 + goto out; 777 + 778 + ret = ath10k_qmi_bdf_dnld_send_sync(qmi); 779 + if (ret) 780 + goto out; 781 + 782 + ret = ath10k_qmi_send_cal_report_req(qmi); 783 + 784 + out: 785 + return; 786 + } 787 + 788 + static int ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi *qmi) 789 + { 790 + struct ath10k *ar = qmi->ar; 791 + 792 + ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw ready event received\n"); 793 + ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND); 794 + 795 + return 0; 796 + } 797 + 798 + static void ath10k_qmi_fw_ready_ind(struct qmi_handle *qmi_hdl, 799 + struct sockaddr_qrtr *sq, 800 + struct qmi_txn *txn, const void *data) 801 + { 802 + struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl); 803 + 804 + ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_FW_READY_IND, NULL); 805 + } 806 + 807 + static void ath10k_qmi_msa_ready_ind(struct qmi_handle *qmi_hdl, 808 + struct sockaddr_qrtr *sq, 809 + struct qmi_txn *txn, const void *data) 810 + { 811 + struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl); 812 + 813 + ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_MSA_READY_IND, NULL); 814 + } 815 + 816 + static struct qmi_msg_handler qmi_msg_handler[] = { 817 + { 818 + .type = QMI_INDICATION, 819 + .msg_id = QMI_WLFW_FW_READY_IND_V01, 820 + .ei = wlfw_fw_ready_ind_msg_v01_ei, 821 + .decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01), 822 + .fn = ath10k_qmi_fw_ready_ind, 823 + }, 824 + { 825 + .type = QMI_INDICATION, 826 + .msg_id = QMI_WLFW_MSA_READY_IND_V01, 827 + .ei = wlfw_msa_ready_ind_msg_v01_ei, 828 + .decoded_size = sizeof(struct wlfw_msa_ready_ind_msg_v01), 829 + .fn = ath10k_qmi_msa_ready_ind, 830 + }, 831 + {} 832 + }; 833 + 834 + static int ath10k_qmi_new_server(struct qmi_handle *qmi_hdl, 835 + struct qmi_service *service) 836 + { 837 + struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl); 838 + struct sockaddr_qrtr *sq = &qmi->sq; 839 + struct ath10k *ar = qmi->ar; 840 + int ret; 841 + 842 + sq->sq_family = AF_QIPCRTR; 843 + sq->sq_node = service->node; 844 + sq->sq_port = service->port; 845 + 846 + ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service found\n"); 847 + 848 + ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)&qmi->sq, 849 + sizeof(qmi->sq), 0); 850 + if (ret) { 851 + ath10k_err(ar, "failed to connect to a remote QMI service port\n"); 852 + return ret; 853 + } 854 + 855 + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wifi fw qmi service connected\n"); 856 + ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_ARRIVE, NULL); 857 + 858 + return ret; 859 + } 860 + 861 + static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl, 862 + struct qmi_service *service) 863 + { 864 + struct ath10k_qmi *qmi = 865 + container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl); 866 + 867 + qmi->fw_ready = false; 868 + ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT, NULL); 869 + } 870 + 871 + static struct qmi_ops ath10k_qmi_ops = { 872 + .new_server = ath10k_qmi_new_server, 873 + .del_server = ath10k_qmi_del_server, 874 + }; 875 + 876 + static void ath10k_qmi_driver_event_work(struct work_struct *work) 877 + { 878 + struct ath10k_qmi *qmi = container_of(work, struct ath10k_qmi, 879 + event_work); 880 + struct ath10k_qmi_driver_event *event; 881 + struct ath10k *ar = qmi->ar; 882 + 883 + spin_lock(&qmi->event_lock); 884 + while (!list_empty(&qmi->event_list)) { 885 + event = list_first_entry(&qmi->event_list, 886 + struct ath10k_qmi_driver_event, list); 887 + list_del(&event->list); 888 + spin_unlock(&qmi->event_lock); 889 + 890 + switch (event->type) { 891 + case ATH10K_QMI_EVENT_SERVER_ARRIVE: 892 + ath10k_qmi_event_server_arrive(qmi); 893 + break; 894 + case ATH10K_QMI_EVENT_SERVER_EXIT: 895 + ath10k_qmi_event_server_exit(qmi); 896 + break; 897 + case ATH10K_QMI_EVENT_FW_READY_IND: 898 + ath10k_qmi_event_fw_ready_ind(qmi); 899 + break; 900 + case ATH10K_QMI_EVENT_MSA_READY_IND: 901 + ath10k_qmi_event_msa_ready(qmi); 902 + break; 903 + default: 904 + ath10k_warn(ar, "invalid event type: %d", event->type); 905 + break; 906 + } 907 + kfree(event); 908 + spin_lock(&qmi->event_lock); 909 + } 910 + spin_unlock(&qmi->event_lock); 911 + } 912 + 913 + static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size) 914 + { 915 + struct ath10k *ar = qmi->ar; 916 + struct device *dev = ar->dev; 917 + struct device_node *node; 918 + struct resource r; 919 + int ret; 920 + 921 + node = of_parse_phandle(dev->of_node, "memory-region", 0); 922 + if (node) { 923 + ret = of_address_to_resource(node, 0, &r); 924 + if (ret) { 925 + dev_err(dev, "failed to resolve msa fixed region\n"); 926 + return ret; 927 + } 928 + of_node_put(node); 929 + 930 + qmi->msa_pa = r.start; 931 + qmi->msa_mem_size = resource_size(&r); 932 + qmi->msa_va = devm_memremap(dev, qmi->msa_pa, qmi->msa_mem_size, 933 + MEMREMAP_WT); 934 + if (!qmi->msa_pa) { 935 + dev_err(dev, "failed to map memory region: %pa\n", &r.start); 936 + return -EBUSY; 937 + } 938 + } else { 939 + qmi->msa_va = dmam_alloc_coherent(dev, msa_size, 940 + &qmi->msa_pa, GFP_KERNEL); 941 + if (!qmi->msa_va) { 942 + ath10k_err(ar, "failed to allocate dma memory for msa region\n"); 943 + return -ENOMEM; 944 + } 945 + qmi->msa_mem_size = msa_size; 946 + } 947 + 948 + ath10k_dbg(ar, ATH10K_DBG_QMI, "msa pa: %pad , msa va: 0x%p\n", 949 + &qmi->msa_pa, 950 + qmi->msa_va); 951 + 952 + return 0; 953 + } 954 + 955 + int ath10k_qmi_init(struct ath10k *ar, u32 msa_size) 956 + { 957 + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); 958 + struct ath10k_qmi *qmi; 959 + int ret; 960 + 961 + qmi = kzalloc(sizeof(*qmi), GFP_KERNEL); 962 + if (!qmi) 963 + return -ENOMEM; 964 + 965 + qmi->ar = ar; 966 + ar_snoc->qmi = qmi; 967 + 968 + ret = ath10k_qmi_setup_msa_resources(qmi, msa_size); 969 + if (ret) 970 + goto err; 971 + 972 + ret = qmi_handle_init(&qmi->qmi_hdl, 973 + WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN, 974 + &ath10k_qmi_ops, qmi_msg_handler); 975 + if (ret) 976 + goto err; 977 + 978 + qmi->event_wq = alloc_workqueue("ath10k_qmi_driver_event", 979 + WQ_UNBOUND, 1); 980 + if (!qmi->event_wq) { 981 + ath10k_err(ar, "failed to allocate workqueue\n"); 982 + ret = -EFAULT; 983 + goto err_release_qmi_handle; 984 + } 985 + 986 + INIT_LIST_HEAD(&qmi->event_list); 987 + spin_lock_init(&qmi->event_lock); 988 + INIT_WORK(&qmi->event_work, ath10k_qmi_driver_event_work); 989 + 990 + ret = qmi_add_lookup(&qmi->qmi_hdl, WLFW_SERVICE_ID_V01, 991 + WLFW_SERVICE_VERS_V01, 0); 992 + if (ret) 993 + goto err_qmi_lookup; 994 + 995 + return 0; 996 + 997 + err_qmi_lookup: 998 + destroy_workqueue(qmi->event_wq); 999 + 1000 + err_release_qmi_handle: 1001 + qmi_handle_release(&qmi->qmi_hdl); 1002 + 1003 + err: 1004 + kfree(qmi); 1005 + return ret; 1006 + } 1007 + 1008 + int ath10k_qmi_deinit(struct ath10k *ar) 1009 + { 1010 + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); 1011 + struct ath10k_qmi *qmi = ar_snoc->qmi; 1012 + 1013 + qmi_handle_release(&qmi->qmi_hdl); 1014 + cancel_work_sync(&qmi->event_work); 1015 + destroy_workqueue(qmi->event_wq); 1016 + ar_snoc->qmi = NULL; 1017 + 1018 + return 0; 1019 + }
+129
drivers/net/wireless/ath/ath10k/qmi.h
··· 1 + /* 2 + * Copyright (c) 2018 The Linux Foundation. All rights reserved. 3 + * 4 + * Permission to use, copy, modify, and/or distribute this software for any 5 + * purpose with or without fee is hereby granted, provided that the above 6 + * copyright notice and this permission notice appear in all copies. 7 + * 8 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 + */ 16 + #ifndef _ATH10K_QMI_H_ 17 + #define _ATH10K_QMI_H_ 18 + 19 + #include <linux/soc/qcom/qmi.h> 20 + #include <linux/qrtr.h> 21 + #include "qmi_wlfw_v01.h" 22 + 23 + #define MAX_NUM_MEMORY_REGIONS 2 24 + #define MAX_TIMESTAMP_LEN 32 25 + #define MAX_BUILD_ID_LEN 128 26 + #define MAX_NUM_CAL_V01 5 27 + 28 + enum ath10k_qmi_driver_event_type { 29 + ATH10K_QMI_EVENT_SERVER_ARRIVE, 30 + ATH10K_QMI_EVENT_SERVER_EXIT, 31 + ATH10K_QMI_EVENT_FW_READY_IND, 32 + ATH10K_QMI_EVENT_FW_DOWN_IND, 33 + ATH10K_QMI_EVENT_MSA_READY_IND, 34 + ATH10K_QMI_EVENT_MAX, 35 + }; 36 + 37 + struct ath10k_msa_mem_info { 38 + phys_addr_t addr; 39 + u32 size; 40 + bool secure; 41 + }; 42 + 43 + struct ath10k_qmi_chip_info { 44 + u32 chip_id; 45 + u32 chip_family; 46 + }; 47 + 48 + struct ath10k_qmi_board_info { 49 + u32 board_id; 50 + }; 51 + 52 + struct ath10k_qmi_soc_info { 53 + u32 soc_id; 54 + }; 55 + 56 + struct ath10k_qmi_cal_data { 57 + u32 cal_id; 58 + u32 total_size; 59 + u8 *data; 60 + }; 61 + 62 + struct ath10k_tgt_pipe_cfg { 63 + __le32 pipe_num; 64 + __le32 pipe_dir; 65 + __le32 nentries; 66 + __le32 nbytes_max; 67 + __le32 flags; 68 + __le32 reserved; 69 + }; 70 + 71 + struct ath10k_svc_pipe_cfg { 72 + __le32 service_id; 73 + __le32 pipe_dir; 74 + __le32 pipe_num; 75 + }; 76 + 77 + struct ath10k_shadow_reg_cfg { 78 + __le16 ce_id; 79 + __le16 reg_offset; 80 + }; 81 + 82 + struct ath10k_qmi_wlan_enable_cfg { 83 + u32 num_ce_tgt_cfg; 84 + struct ath10k_tgt_pipe_cfg *ce_tgt_cfg; 85 + u32 num_ce_svc_pipe_cfg; 86 + struct ath10k_svc_pipe_cfg *ce_svc_cfg; 87 + u32 num_shadow_reg_cfg; 88 + struct ath10k_shadow_reg_cfg *shadow_reg_cfg; 89 + }; 90 + 91 + struct ath10k_qmi_driver_event { 92 + struct list_head list; 93 + enum ath10k_qmi_driver_event_type type; 94 + void *data; 95 + }; 96 + 97 + struct ath10k_qmi { 98 + struct ath10k *ar; 99 + struct qmi_handle qmi_hdl; 100 + struct sockaddr_qrtr sq; 101 + struct work_struct event_work; 102 + struct workqueue_struct *event_wq; 103 + struct list_head event_list; 104 + spinlock_t event_lock; /* spinlock for qmi event list */ 105 + u32 nr_mem_region; 106 + struct ath10k_msa_mem_info mem_region[MAX_NUM_MEMORY_REGIONS]; 107 + dma_addr_t msa_pa; 108 + u32 msa_mem_size; 109 + void *msa_va; 110 + struct ath10k_qmi_chip_info chip_info; 111 + struct ath10k_qmi_board_info board_info; 112 + struct ath10k_qmi_soc_info soc_info; 113 + char fw_build_id[MAX_BUILD_ID_LEN + 1]; 114 + u32 fw_version; 115 + bool fw_ready; 116 + char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1]; 117 + struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01]; 118 + }; 119 + 120 + int ath10k_qmi_wlan_enable(struct ath10k *ar, 121 + struct ath10k_qmi_wlan_enable_cfg *config, 122 + enum wlfw_driver_mode_enum_v01 mode, 123 + const char *version); 124 + int ath10k_qmi_wlan_disable(struct ath10k *ar); 125 + int ath10k_qmi_register_service_notifier(struct notifier_block *nb); 126 + int ath10k_qmi_init(struct ath10k *ar, u32 msa_size); 127 + int ath10k_qmi_deinit(struct ath10k *ar); 128 + 129 + #endif /* ATH10K_QMI_H */
+2072
drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
··· 1 + /* 2 + * Copyright (c) 2018 The Linux Foundation. All rights reserved. 3 + * 4 + * Permission to use, copy, modify, and/or distribute this software for any 5 + * purpose with or without fee is hereby granted, provided that the above 6 + * copyright notice and this permission notice appear in all copies. 7 + * 8 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 + */ 16 + 17 + #include <linux/soc/qcom/qmi.h> 18 + #include <linux/types.h> 19 + #include "qmi_wlfw_v01.h" 20 + 21 + static struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = { 22 + { 23 + .data_type = QMI_UNSIGNED_4_BYTE, 24 + .elem_len = 1, 25 + .elem_size = sizeof(u32), 26 + .array_type = NO_ARRAY, 27 + .tlv_type = 0, 28 + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, 29 + pipe_num), 30 + }, 31 + { 32 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 33 + .elem_len = 1, 34 + .elem_size = sizeof(enum wlfw_pipedir_enum_v01), 35 + .array_type = NO_ARRAY, 36 + .tlv_type = 0, 37 + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, 38 + pipe_dir), 39 + }, 40 + { 41 + .data_type = QMI_UNSIGNED_4_BYTE, 42 + .elem_len = 1, 43 + .elem_size = sizeof(u32), 44 + .array_type = NO_ARRAY, 45 + .tlv_type = 0, 46 + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, 47 + nentries), 48 + }, 49 + { 50 + .data_type = QMI_UNSIGNED_4_BYTE, 51 + .elem_len = 1, 52 + .elem_size = sizeof(u32), 53 + .array_type = NO_ARRAY, 54 + .tlv_type = 0, 55 + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, 56 + nbytes_max), 57 + }, 58 + { 59 + .data_type = QMI_UNSIGNED_4_BYTE, 60 + .elem_len = 1, 61 + .elem_size = sizeof(u32), 62 + .array_type = NO_ARRAY, 63 + .tlv_type = 0, 64 + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, 65 + flags), 66 + }, 67 + {} 68 + }; 69 + 70 + static struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = { 71 + { 72 + .data_type = QMI_UNSIGNED_4_BYTE, 73 + .elem_len = 1, 74 + .elem_size = sizeof(u32), 75 + .array_type = NO_ARRAY, 76 + .tlv_type = 0, 77 + .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01, 78 + service_id), 79 + }, 80 + { 81 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 82 + .elem_len = 1, 83 + .elem_size = sizeof(enum wlfw_pipedir_enum_v01), 84 + .array_type = NO_ARRAY, 85 + .tlv_type = 0, 86 + .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01, 87 + pipe_dir), 88 + }, 89 + { 90 + .data_type = QMI_UNSIGNED_4_BYTE, 91 + .elem_len = 1, 92 + .elem_size = sizeof(u32), 93 + .array_type = NO_ARRAY, 94 + .tlv_type = 0, 95 + .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01, 96 + pipe_num), 97 + }, 98 + {} 99 + }; 100 + 101 + static struct qmi_elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = { 102 + { 103 + .data_type = QMI_UNSIGNED_2_BYTE, 104 + .elem_len = 1, 105 + .elem_size = sizeof(u16), 106 + .array_type = NO_ARRAY, 107 + .tlv_type = 0, 108 + .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01, 109 + id), 110 + }, 111 + { 112 + .data_type = QMI_UNSIGNED_2_BYTE, 113 + .elem_len = 1, 114 + .elem_size = sizeof(u16), 115 + .array_type = NO_ARRAY, 116 + .tlv_type = 0, 117 + .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01, 118 + offset), 119 + }, 120 + {} 121 + }; 122 + 123 + static struct qmi_elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = { 124 + { 125 + .data_type = QMI_UNSIGNED_4_BYTE, 126 + .elem_len = 1, 127 + .elem_size = sizeof(u32), 128 + .array_type = NO_ARRAY, 129 + .tlv_type = 0, 130 + .offset = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01, 131 + addr), 132 + }, 133 + {} 134 + }; 135 + 136 + static struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = { 137 + { 138 + .data_type = QMI_UNSIGNED_8_BYTE, 139 + .elem_len = 1, 140 + .elem_size = sizeof(u64), 141 + .array_type = NO_ARRAY, 142 + .tlv_type = 0, 143 + .offset = offsetof(struct wlfw_memory_region_info_s_v01, 144 + region_addr), 145 + }, 146 + { 147 + .data_type = QMI_UNSIGNED_4_BYTE, 148 + .elem_len = 1, 149 + .elem_size = sizeof(u32), 150 + .array_type = NO_ARRAY, 151 + .tlv_type = 0, 152 + .offset = offsetof(struct wlfw_memory_region_info_s_v01, 153 + size), 154 + }, 155 + { 156 + .data_type = QMI_UNSIGNED_1_BYTE, 157 + .elem_len = 1, 158 + .elem_size = sizeof(u8), 159 + .array_type = NO_ARRAY, 160 + .tlv_type = 0, 161 + .offset = offsetof(struct wlfw_memory_region_info_s_v01, 162 + secure_flag), 163 + }, 164 + {} 165 + }; 166 + 167 + static struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = { 168 + { 169 + .data_type = QMI_UNSIGNED_8_BYTE, 170 + .elem_len = 1, 171 + .elem_size = sizeof(u64), 172 + .array_type = NO_ARRAY, 173 + .tlv_type = 0, 174 + .offset = offsetof(struct wlfw_mem_cfg_s_v01, 175 + offset), 176 + }, 177 + { 178 + .data_type = QMI_UNSIGNED_4_BYTE, 179 + .elem_len = 1, 180 + .elem_size = sizeof(u32), 181 + .array_type = NO_ARRAY, 182 + .tlv_type = 0, 183 + .offset = offsetof(struct wlfw_mem_cfg_s_v01, 184 + size), 185 + }, 186 + { 187 + .data_type = QMI_UNSIGNED_1_BYTE, 188 + .elem_len = 1, 189 + .elem_size = sizeof(u8), 190 + .array_type = NO_ARRAY, 191 + .tlv_type = 0, 192 + .offset = offsetof(struct wlfw_mem_cfg_s_v01, 193 + secure_flag), 194 + }, 195 + {} 196 + }; 197 + 198 + static struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = { 199 + { 200 + .data_type = QMI_UNSIGNED_4_BYTE, 201 + .elem_len = 1, 202 + .elem_size = sizeof(u32), 203 + .array_type = NO_ARRAY, 204 + .tlv_type = 0, 205 + .offset = offsetof(struct wlfw_mem_seg_s_v01, 206 + size), 207 + }, 208 + { 209 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 210 + .elem_len = 1, 211 + .elem_size = sizeof(enum wlfw_mem_type_enum_v01), 212 + .array_type = NO_ARRAY, 213 + .tlv_type = 0, 214 + .offset = offsetof(struct wlfw_mem_seg_s_v01, 215 + type), 216 + }, 217 + { 218 + .data_type = QMI_DATA_LEN, 219 + .elem_len = 1, 220 + .elem_size = sizeof(u8), 221 + .array_type = NO_ARRAY, 222 + .tlv_type = 0, 223 + .offset = offsetof(struct wlfw_mem_seg_s_v01, 224 + mem_cfg_len), 225 + }, 226 + { 227 + .data_type = QMI_STRUCT, 228 + .elem_len = QMI_WLFW_MAX_NUM_MEM_CFG_V01, 229 + .elem_size = sizeof(struct wlfw_mem_cfg_s_v01), 230 + .array_type = VAR_LEN_ARRAY, 231 + .tlv_type = 0, 232 + .offset = offsetof(struct wlfw_mem_seg_s_v01, 233 + mem_cfg), 234 + .ei_array = wlfw_mem_cfg_s_v01_ei, 235 + }, 236 + {} 237 + }; 238 + 239 + static struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = { 240 + { 241 + .data_type = QMI_UNSIGNED_8_BYTE, 242 + .elem_len = 1, 243 + .elem_size = sizeof(u64), 244 + .array_type = NO_ARRAY, 245 + .tlv_type = 0, 246 + .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, 247 + addr), 248 + }, 249 + { 250 + .data_type = QMI_UNSIGNED_4_BYTE, 251 + .elem_len = 1, 252 + .elem_size = sizeof(u32), 253 + .array_type = NO_ARRAY, 254 + .tlv_type = 0, 255 + .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, 256 + size), 257 + }, 258 + { 259 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 260 + .elem_len = 1, 261 + .elem_size = sizeof(enum wlfw_mem_type_enum_v01), 262 + .array_type = NO_ARRAY, 263 + .tlv_type = 0, 264 + .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, 265 + type), 266 + }, 267 + {} 268 + }; 269 + 270 + static struct qmi_elem_info wlfw_rf_chip_info_s_v01_ei[] = { 271 + { 272 + .data_type = QMI_UNSIGNED_4_BYTE, 273 + .elem_len = 1, 274 + .elem_size = sizeof(u32), 275 + .array_type = NO_ARRAY, 276 + .tlv_type = 0, 277 + .offset = offsetof(struct wlfw_rf_chip_info_s_v01, 278 + chip_id), 279 + }, 280 + { 281 + .data_type = QMI_UNSIGNED_4_BYTE, 282 + .elem_len = 1, 283 + .elem_size = sizeof(u32), 284 + .array_type = NO_ARRAY, 285 + .tlv_type = 0, 286 + .offset = offsetof(struct wlfw_rf_chip_info_s_v01, 287 + chip_family), 288 + }, 289 + {} 290 + }; 291 + 292 + static struct qmi_elem_info wlfw_rf_board_info_s_v01_ei[] = { 293 + { 294 + .data_type = QMI_UNSIGNED_4_BYTE, 295 + .elem_len = 1, 296 + .elem_size = sizeof(u32), 297 + .array_type = NO_ARRAY, 298 + .tlv_type = 0, 299 + .offset = offsetof(struct wlfw_rf_board_info_s_v01, 300 + board_id), 301 + }, 302 + {} 303 + }; 304 + 305 + static struct qmi_elem_info wlfw_soc_info_s_v01_ei[] = { 306 + { 307 + .data_type = QMI_UNSIGNED_4_BYTE, 308 + .elem_len = 1, 309 + .elem_size = sizeof(u32), 310 + .array_type = NO_ARRAY, 311 + .tlv_type = 0, 312 + .offset = offsetof(struct wlfw_soc_info_s_v01, 313 + soc_id), 314 + }, 315 + {} 316 + }; 317 + 318 + static struct qmi_elem_info wlfw_fw_version_info_s_v01_ei[] = { 319 + { 320 + .data_type = QMI_UNSIGNED_4_BYTE, 321 + .elem_len = 1, 322 + .elem_size = sizeof(u32), 323 + .array_type = NO_ARRAY, 324 + .tlv_type = 0, 325 + .offset = offsetof(struct wlfw_fw_version_info_s_v01, 326 + fw_version), 327 + }, 328 + { 329 + .data_type = QMI_STRING, 330 + .elem_len = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1, 331 + .elem_size = sizeof(char), 332 + .array_type = NO_ARRAY, 333 + .tlv_type = 0, 334 + .offset = offsetof(struct wlfw_fw_version_info_s_v01, 335 + fw_build_timestamp), 336 + }, 337 + {} 338 + }; 339 + 340 + struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = { 341 + { 342 + .data_type = QMI_OPT_FLAG, 343 + .elem_len = 1, 344 + .elem_size = sizeof(u8), 345 + .array_type = NO_ARRAY, 346 + .tlv_type = 0x10, 347 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 348 + fw_ready_enable_valid), 349 + }, 350 + { 351 + .data_type = QMI_UNSIGNED_1_BYTE, 352 + .elem_len = 1, 353 + .elem_size = sizeof(u8), 354 + .array_type = NO_ARRAY, 355 + .tlv_type = 0x10, 356 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 357 + fw_ready_enable), 358 + }, 359 + { 360 + .data_type = QMI_OPT_FLAG, 361 + .elem_len = 1, 362 + .elem_size = sizeof(u8), 363 + .array_type = NO_ARRAY, 364 + .tlv_type = 0x11, 365 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 366 + initiate_cal_download_enable_valid), 367 + }, 368 + { 369 + .data_type = QMI_UNSIGNED_1_BYTE, 370 + .elem_len = 1, 371 + .elem_size = sizeof(u8), 372 + .array_type = NO_ARRAY, 373 + .tlv_type = 0x11, 374 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 375 + initiate_cal_download_enable), 376 + }, 377 + { 378 + .data_type = QMI_OPT_FLAG, 379 + .elem_len = 1, 380 + .elem_size = sizeof(u8), 381 + .array_type = NO_ARRAY, 382 + .tlv_type = 0x12, 383 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 384 + initiate_cal_update_enable_valid), 385 + }, 386 + { 387 + .data_type = QMI_UNSIGNED_1_BYTE, 388 + .elem_len = 1, 389 + .elem_size = sizeof(u8), 390 + .array_type = NO_ARRAY, 391 + .tlv_type = 0x12, 392 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 393 + initiate_cal_update_enable), 394 + }, 395 + { 396 + .data_type = QMI_OPT_FLAG, 397 + .elem_len = 1, 398 + .elem_size = sizeof(u8), 399 + .array_type = NO_ARRAY, 400 + .tlv_type = 0x13, 401 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 402 + msa_ready_enable_valid), 403 + }, 404 + { 405 + .data_type = QMI_UNSIGNED_1_BYTE, 406 + .elem_len = 1, 407 + .elem_size = sizeof(u8), 408 + .array_type = NO_ARRAY, 409 + .tlv_type = 0x13, 410 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 411 + msa_ready_enable), 412 + }, 413 + { 414 + .data_type = QMI_OPT_FLAG, 415 + .elem_len = 1, 416 + .elem_size = sizeof(u8), 417 + .array_type = NO_ARRAY, 418 + .tlv_type = 0x14, 419 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 420 + pin_connect_result_enable_valid), 421 + }, 422 + { 423 + .data_type = QMI_UNSIGNED_1_BYTE, 424 + .elem_len = 1, 425 + .elem_size = sizeof(u8), 426 + .array_type = NO_ARRAY, 427 + .tlv_type = 0x14, 428 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 429 + pin_connect_result_enable), 430 + }, 431 + { 432 + .data_type = QMI_OPT_FLAG, 433 + .elem_len = 1, 434 + .elem_size = sizeof(u8), 435 + .array_type = NO_ARRAY, 436 + .tlv_type = 0x15, 437 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 438 + client_id_valid), 439 + }, 440 + { 441 + .data_type = QMI_UNSIGNED_4_BYTE, 442 + .elem_len = 1, 443 + .elem_size = sizeof(u32), 444 + .array_type = NO_ARRAY, 445 + .tlv_type = 0x15, 446 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 447 + client_id), 448 + }, 449 + { 450 + .data_type = QMI_OPT_FLAG, 451 + .elem_len = 1, 452 + .elem_size = sizeof(u8), 453 + .array_type = NO_ARRAY, 454 + .tlv_type = 0x16, 455 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 456 + request_mem_enable_valid), 457 + }, 458 + { 459 + .data_type = QMI_UNSIGNED_1_BYTE, 460 + .elem_len = 1, 461 + .elem_size = sizeof(u8), 462 + .array_type = NO_ARRAY, 463 + .tlv_type = 0x16, 464 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 465 + request_mem_enable), 466 + }, 467 + { 468 + .data_type = QMI_OPT_FLAG, 469 + .elem_len = 1, 470 + .elem_size = sizeof(u8), 471 + .array_type = NO_ARRAY, 472 + .tlv_type = 0x17, 473 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 474 + mem_ready_enable_valid), 475 + }, 476 + { 477 + .data_type = QMI_UNSIGNED_1_BYTE, 478 + .elem_len = 1, 479 + .elem_size = sizeof(u8), 480 + .array_type = NO_ARRAY, 481 + .tlv_type = 0x17, 482 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 483 + mem_ready_enable), 484 + }, 485 + { 486 + .data_type = QMI_OPT_FLAG, 487 + .elem_len = 1, 488 + .elem_size = sizeof(u8), 489 + .array_type = NO_ARRAY, 490 + .tlv_type = 0x18, 491 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 492 + fw_init_done_enable_valid), 493 + }, 494 + { 495 + .data_type = QMI_UNSIGNED_1_BYTE, 496 + .elem_len = 1, 497 + .elem_size = sizeof(u8), 498 + .array_type = NO_ARRAY, 499 + .tlv_type = 0x18, 500 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 501 + fw_init_done_enable), 502 + }, 503 + { 504 + .data_type = QMI_OPT_FLAG, 505 + .elem_len = 1, 506 + .elem_size = sizeof(u8), 507 + .array_type = NO_ARRAY, 508 + .tlv_type = 0x19, 509 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 510 + rejuvenate_enable_valid), 511 + }, 512 + { 513 + .data_type = QMI_UNSIGNED_4_BYTE, 514 + .elem_len = 1, 515 + .elem_size = sizeof(u32), 516 + .array_type = NO_ARRAY, 517 + .tlv_type = 0x19, 518 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 519 + rejuvenate_enable), 520 + }, 521 + { 522 + .data_type = QMI_OPT_FLAG, 523 + .elem_len = 1, 524 + .elem_size = sizeof(u8), 525 + .array_type = NO_ARRAY, 526 + .tlv_type = 0x1A, 527 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 528 + xo_cal_enable_valid), 529 + }, 530 + { 531 + .data_type = QMI_UNSIGNED_1_BYTE, 532 + .elem_len = 1, 533 + .elem_size = sizeof(u8), 534 + .array_type = NO_ARRAY, 535 + .tlv_type = 0x1A, 536 + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, 537 + xo_cal_enable), 538 + }, 539 + {} 540 + }; 541 + 542 + struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = { 543 + { 544 + .data_type = QMI_STRUCT, 545 + .elem_len = 1, 546 + .elem_size = sizeof(struct qmi_response_type_v01), 547 + .array_type = NO_ARRAY, 548 + .tlv_type = 0x02, 549 + .offset = offsetof(struct wlfw_ind_register_resp_msg_v01, 550 + resp), 551 + .ei_array = qmi_response_type_v01_ei, 552 + }, 553 + { 554 + .data_type = QMI_OPT_FLAG, 555 + .elem_len = 1, 556 + .elem_size = sizeof(u8), 557 + .array_type = NO_ARRAY, 558 + .tlv_type = 0x10, 559 + .offset = offsetof(struct wlfw_ind_register_resp_msg_v01, 560 + fw_status_valid), 561 + }, 562 + { 563 + .data_type = QMI_UNSIGNED_8_BYTE, 564 + .elem_len = 1, 565 + .elem_size = sizeof(u64), 566 + .array_type = NO_ARRAY, 567 + .tlv_type = 0x10, 568 + .offset = offsetof(struct wlfw_ind_register_resp_msg_v01, 569 + fw_status), 570 + }, 571 + {} 572 + }; 573 + 574 + struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[] = { 575 + {} 576 + }; 577 + 578 + struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[] = { 579 + {} 580 + }; 581 + 582 + struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = { 583 + { 584 + .data_type = QMI_OPT_FLAG, 585 + .elem_len = 1, 586 + .elem_size = sizeof(u8), 587 + .array_type = NO_ARRAY, 588 + .tlv_type = 0x10, 589 + .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, 590 + pwr_pin_result_valid), 591 + }, 592 + { 593 + .data_type = QMI_UNSIGNED_4_BYTE, 594 + .elem_len = 1, 595 + .elem_size = sizeof(u32), 596 + .array_type = NO_ARRAY, 597 + .tlv_type = 0x10, 598 + .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, 599 + pwr_pin_result), 600 + }, 601 + { 602 + .data_type = QMI_OPT_FLAG, 603 + .elem_len = 1, 604 + .elem_size = sizeof(u8), 605 + .array_type = NO_ARRAY, 606 + .tlv_type = 0x11, 607 + .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, 608 + phy_io_pin_result_valid), 609 + }, 610 + { 611 + .data_type = QMI_UNSIGNED_4_BYTE, 612 + .elem_len = 1, 613 + .elem_size = sizeof(u32), 614 + .array_type = NO_ARRAY, 615 + .tlv_type = 0x11, 616 + .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, 617 + phy_io_pin_result), 618 + }, 619 + { 620 + .data_type = QMI_OPT_FLAG, 621 + .elem_len = 1, 622 + .elem_size = sizeof(u8), 623 + .array_type = NO_ARRAY, 624 + .tlv_type = 0x12, 625 + .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, 626 + rf_pin_result_valid), 627 + }, 628 + { 629 + .data_type = QMI_UNSIGNED_4_BYTE, 630 + .elem_len = 1, 631 + .elem_size = sizeof(u32), 632 + .array_type = NO_ARRAY, 633 + .tlv_type = 0x12, 634 + .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01, 635 + rf_pin_result), 636 + }, 637 + {} 638 + }; 639 + 640 + struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = { 641 + { 642 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 643 + .elem_len = 1, 644 + .elem_size = sizeof(enum wlfw_driver_mode_enum_v01), 645 + .array_type = NO_ARRAY, 646 + .tlv_type = 0x01, 647 + .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01, 648 + mode), 649 + }, 650 + { 651 + .data_type = QMI_OPT_FLAG, 652 + .elem_len = 1, 653 + .elem_size = sizeof(u8), 654 + .array_type = NO_ARRAY, 655 + .tlv_type = 0x10, 656 + .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01, 657 + hw_debug_valid), 658 + }, 659 + { 660 + .data_type = QMI_UNSIGNED_1_BYTE, 661 + .elem_len = 1, 662 + .elem_size = sizeof(u8), 663 + .array_type = NO_ARRAY, 664 + .tlv_type = 0x10, 665 + .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01, 666 + hw_debug), 667 + }, 668 + {} 669 + }; 670 + 671 + struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = { 672 + { 673 + .data_type = QMI_STRUCT, 674 + .elem_len = 1, 675 + .elem_size = sizeof(struct qmi_response_type_v01), 676 + .array_type = NO_ARRAY, 677 + .tlv_type = 0x02, 678 + .offset = offsetof(struct wlfw_wlan_mode_resp_msg_v01, 679 + resp), 680 + .ei_array = qmi_response_type_v01_ei, 681 + }, 682 + {} 683 + }; 684 + 685 + struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { 686 + { 687 + .data_type = QMI_OPT_FLAG, 688 + .elem_len = 1, 689 + .elem_size = sizeof(u8), 690 + .array_type = NO_ARRAY, 691 + .tlv_type = 0x10, 692 + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, 693 + host_version_valid), 694 + }, 695 + { 696 + .data_type = QMI_STRING, 697 + .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1, 698 + .elem_size = sizeof(char), 699 + .array_type = NO_ARRAY, 700 + .tlv_type = 0x10, 701 + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, 702 + host_version), 703 + }, 704 + { 705 + .data_type = QMI_OPT_FLAG, 706 + .elem_len = 1, 707 + .elem_size = sizeof(u8), 708 + .array_type = NO_ARRAY, 709 + .tlv_type = 0x11, 710 + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, 711 + tgt_cfg_valid), 712 + }, 713 + { 714 + .data_type = QMI_DATA_LEN, 715 + .elem_len = 1, 716 + .elem_size = sizeof(u8), 717 + .array_type = NO_ARRAY, 718 + .tlv_type = 0x11, 719 + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, 720 + tgt_cfg_len), 721 + }, 722 + { 723 + .data_type = QMI_STRUCT, 724 + .elem_len = QMI_WLFW_MAX_NUM_CE_V01, 725 + .elem_size = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01), 726 + .array_type = VAR_LEN_ARRAY, 727 + .tlv_type = 0x11, 728 + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, 729 + tgt_cfg), 730 + .ei_array = wlfw_ce_tgt_pipe_cfg_s_v01_ei, 731 + }, 732 + { 733 + .data_type = QMI_OPT_FLAG, 734 + .elem_len = 1, 735 + .elem_size = sizeof(u8), 736 + .array_type = NO_ARRAY, 737 + .tlv_type = 0x12, 738 + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, 739 + svc_cfg_valid), 740 + }, 741 + { 742 + .data_type = QMI_DATA_LEN, 743 + .elem_len = 1, 744 + .elem_size = sizeof(u8), 745 + .array_type = NO_ARRAY, 746 + .tlv_type = 0x12, 747 + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, 748 + svc_cfg_len), 749 + }, 750 + { 751 + .data_type = QMI_STRUCT, 752 + .elem_len = QMI_WLFW_MAX_NUM_SVC_V01, 753 + .elem_size = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01), 754 + .array_type = VAR_LEN_ARRAY, 755 + .tlv_type = 0x12, 756 + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, 757 + svc_cfg), 758 + .ei_array = wlfw_ce_svc_pipe_cfg_s_v01_ei, 759 + }, 760 + { 761 + .data_type = QMI_OPT_FLAG, 762 + .elem_len = 1, 763 + .elem_size = sizeof(u8), 764 + .array_type = NO_ARRAY, 765 + .tlv_type = 0x13, 766 + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, 767 + shadow_reg_valid), 768 + }, 769 + { 770 + .data_type = QMI_DATA_LEN, 771 + .elem_len = 1, 772 + .elem_size = sizeof(u8), 773 + .array_type = NO_ARRAY, 774 + .tlv_type = 0x13, 775 + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, 776 + shadow_reg_len), 777 + }, 778 + { 779 + .data_type = QMI_STRUCT, 780 + .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01, 781 + .elem_size = sizeof(struct wlfw_shadow_reg_cfg_s_v01), 782 + .array_type = VAR_LEN_ARRAY, 783 + .tlv_type = 0x13, 784 + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, 785 + shadow_reg), 786 + .ei_array = wlfw_shadow_reg_cfg_s_v01_ei, 787 + }, 788 + { 789 + .data_type = QMI_OPT_FLAG, 790 + .elem_len = 1, 791 + .elem_size = sizeof(u8), 792 + .array_type = NO_ARRAY, 793 + .tlv_type = 0x14, 794 + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, 795 + shadow_reg_v2_valid), 796 + }, 797 + { 798 + .data_type = QMI_DATA_LEN, 799 + .elem_len = 1, 800 + .elem_size = sizeof(u8), 801 + .array_type = NO_ARRAY, 802 + .tlv_type = 0x14, 803 + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, 804 + shadow_reg_v2_len), 805 + }, 806 + { 807 + .data_type = QMI_STRUCT, 808 + .elem_len = QMI_WLFW_MAX_SHADOW_REG_V2, 809 + .elem_size = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01), 810 + .array_type = VAR_LEN_ARRAY, 811 + .tlv_type = 0x14, 812 + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, 813 + shadow_reg_v2), 814 + .ei_array = wlfw_shadow_reg_v2_cfg_s_v01_ei, 815 + }, 816 + {} 817 + }; 818 + 819 + struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = { 820 + { 821 + .data_type = QMI_STRUCT, 822 + .elem_len = 1, 823 + .elem_size = sizeof(struct qmi_response_type_v01), 824 + .array_type = NO_ARRAY, 825 + .tlv_type = 0x02, 826 + .offset = offsetof(struct wlfw_wlan_cfg_resp_msg_v01, 827 + resp), 828 + .ei_array = qmi_response_type_v01_ei, 829 + }, 830 + {} 831 + }; 832 + 833 + struct qmi_elem_info wlfw_cap_req_msg_v01_ei[] = { 834 + {} 835 + }; 836 + 837 + struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = { 838 + { 839 + .data_type = QMI_STRUCT, 840 + .elem_len = 1, 841 + .elem_size = sizeof(struct qmi_response_type_v01), 842 + .array_type = NO_ARRAY, 843 + .tlv_type = 0x02, 844 + .offset = offsetof(struct wlfw_cap_resp_msg_v01, 845 + resp), 846 + .ei_array = qmi_response_type_v01_ei, 847 + }, 848 + { 849 + .data_type = QMI_OPT_FLAG, 850 + .elem_len = 1, 851 + .elem_size = sizeof(u8), 852 + .array_type = NO_ARRAY, 853 + .tlv_type = 0x10, 854 + .offset = offsetof(struct wlfw_cap_resp_msg_v01, 855 + chip_info_valid), 856 + }, 857 + { 858 + .data_type = QMI_STRUCT, 859 + .elem_len = 1, 860 + .elem_size = sizeof(struct wlfw_rf_chip_info_s_v01), 861 + .array_type = NO_ARRAY, 862 + .tlv_type = 0x10, 863 + .offset = offsetof(struct wlfw_cap_resp_msg_v01, 864 + chip_info), 865 + .ei_array = wlfw_rf_chip_info_s_v01_ei, 866 + }, 867 + { 868 + .data_type = QMI_OPT_FLAG, 869 + .elem_len = 1, 870 + .elem_size = sizeof(u8), 871 + .array_type = NO_ARRAY, 872 + .tlv_type = 0x11, 873 + .offset = offsetof(struct wlfw_cap_resp_msg_v01, 874 + board_info_valid), 875 + }, 876 + { 877 + .data_type = QMI_STRUCT, 878 + .elem_len = 1, 879 + .elem_size = sizeof(struct wlfw_rf_board_info_s_v01), 880 + .array_type = NO_ARRAY, 881 + .tlv_type = 0x11, 882 + .offset = offsetof(struct wlfw_cap_resp_msg_v01, 883 + board_info), 884 + .ei_array = wlfw_rf_board_info_s_v01_ei, 885 + }, 886 + { 887 + .data_type = QMI_OPT_FLAG, 888 + .elem_len = 1, 889 + .elem_size = sizeof(u8), 890 + .array_type = NO_ARRAY, 891 + .tlv_type = 0x12, 892 + .offset = offsetof(struct wlfw_cap_resp_msg_v01, 893 + soc_info_valid), 894 + }, 895 + { 896 + .data_type = QMI_STRUCT, 897 + .elem_len = 1, 898 + .elem_size = sizeof(struct wlfw_soc_info_s_v01), 899 + .array_type = NO_ARRAY, 900 + .tlv_type = 0x12, 901 + .offset = offsetof(struct wlfw_cap_resp_msg_v01, 902 + soc_info), 903 + .ei_array = wlfw_soc_info_s_v01_ei, 904 + }, 905 + { 906 + .data_type = QMI_OPT_FLAG, 907 + .elem_len = 1, 908 + .elem_size = sizeof(u8), 909 + .array_type = NO_ARRAY, 910 + .tlv_type = 0x13, 911 + .offset = offsetof(struct wlfw_cap_resp_msg_v01, 912 + fw_version_info_valid), 913 + }, 914 + { 915 + .data_type = QMI_STRUCT, 916 + .elem_len = 1, 917 + .elem_size = sizeof(struct wlfw_fw_version_info_s_v01), 918 + .array_type = NO_ARRAY, 919 + .tlv_type = 0x13, 920 + .offset = offsetof(struct wlfw_cap_resp_msg_v01, 921 + fw_version_info), 922 + .ei_array = wlfw_fw_version_info_s_v01_ei, 923 + }, 924 + { 925 + .data_type = QMI_OPT_FLAG, 926 + .elem_len = 1, 927 + .elem_size = sizeof(u8), 928 + .array_type = NO_ARRAY, 929 + .tlv_type = 0x14, 930 + .offset = offsetof(struct wlfw_cap_resp_msg_v01, 931 + fw_build_id_valid), 932 + }, 933 + { 934 + .data_type = QMI_STRING, 935 + .elem_len = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1, 936 + .elem_size = sizeof(char), 937 + .array_type = NO_ARRAY, 938 + .tlv_type = 0x14, 939 + .offset = offsetof(struct wlfw_cap_resp_msg_v01, 940 + fw_build_id), 941 + }, 942 + { 943 + .data_type = QMI_OPT_FLAG, 944 + .elem_len = 1, 945 + .elem_size = sizeof(u8), 946 + .array_type = NO_ARRAY, 947 + .tlv_type = 0x15, 948 + .offset = offsetof(struct wlfw_cap_resp_msg_v01, 949 + num_macs_valid), 950 + }, 951 + { 952 + .data_type = QMI_UNSIGNED_1_BYTE, 953 + .elem_len = 1, 954 + .elem_size = sizeof(u8), 955 + .array_type = NO_ARRAY, 956 + .tlv_type = 0x15, 957 + .offset = offsetof(struct wlfw_cap_resp_msg_v01, 958 + num_macs), 959 + }, 960 + {} 961 + }; 962 + 963 + struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = { 964 + { 965 + .data_type = QMI_UNSIGNED_1_BYTE, 966 + .elem_len = 1, 967 + .elem_size = sizeof(u8), 968 + .array_type = NO_ARRAY, 969 + .tlv_type = 0x01, 970 + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, 971 + valid), 972 + }, 973 + { 974 + .data_type = QMI_OPT_FLAG, 975 + .elem_len = 1, 976 + .elem_size = sizeof(u8), 977 + .array_type = NO_ARRAY, 978 + .tlv_type = 0x10, 979 + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, 980 + file_id_valid), 981 + }, 982 + { 983 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 984 + .elem_len = 1, 985 + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), 986 + .array_type = NO_ARRAY, 987 + .tlv_type = 0x10, 988 + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, 989 + file_id), 990 + }, 991 + { 992 + .data_type = QMI_OPT_FLAG, 993 + .elem_len = 1, 994 + .elem_size = sizeof(u8), 995 + .array_type = NO_ARRAY, 996 + .tlv_type = 0x11, 997 + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, 998 + total_size_valid), 999 + }, 1000 + { 1001 + .data_type = QMI_UNSIGNED_4_BYTE, 1002 + .elem_len = 1, 1003 + .elem_size = sizeof(u32), 1004 + .array_type = NO_ARRAY, 1005 + .tlv_type = 0x11, 1006 + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, 1007 + total_size), 1008 + }, 1009 + { 1010 + .data_type = QMI_OPT_FLAG, 1011 + .elem_len = 1, 1012 + .elem_size = sizeof(u8), 1013 + .array_type = NO_ARRAY, 1014 + .tlv_type = 0x12, 1015 + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, 1016 + seg_id_valid), 1017 + }, 1018 + { 1019 + .data_type = QMI_UNSIGNED_4_BYTE, 1020 + .elem_len = 1, 1021 + .elem_size = sizeof(u32), 1022 + .array_type = NO_ARRAY, 1023 + .tlv_type = 0x12, 1024 + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, 1025 + seg_id), 1026 + }, 1027 + { 1028 + .data_type = QMI_OPT_FLAG, 1029 + .elem_len = 1, 1030 + .elem_size = sizeof(u8), 1031 + .array_type = NO_ARRAY, 1032 + .tlv_type = 0x13, 1033 + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, 1034 + data_valid), 1035 + }, 1036 + { 1037 + .data_type = QMI_DATA_LEN, 1038 + .elem_len = 1, 1039 + .elem_size = sizeof(u16), 1040 + .array_type = NO_ARRAY, 1041 + .tlv_type = 0x13, 1042 + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, 1043 + data_len), 1044 + }, 1045 + { 1046 + .data_type = QMI_UNSIGNED_1_BYTE, 1047 + .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, 1048 + .elem_size = sizeof(u8), 1049 + .array_type = VAR_LEN_ARRAY, 1050 + .tlv_type = 0x13, 1051 + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, 1052 + data), 1053 + }, 1054 + { 1055 + .data_type = QMI_OPT_FLAG, 1056 + .elem_len = 1, 1057 + .elem_size = sizeof(u8), 1058 + .array_type = NO_ARRAY, 1059 + .tlv_type = 0x14, 1060 + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, 1061 + end_valid), 1062 + }, 1063 + { 1064 + .data_type = QMI_UNSIGNED_1_BYTE, 1065 + .elem_len = 1, 1066 + .elem_size = sizeof(u8), 1067 + .array_type = NO_ARRAY, 1068 + .tlv_type = 0x14, 1069 + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, 1070 + end), 1071 + }, 1072 + { 1073 + .data_type = QMI_OPT_FLAG, 1074 + .elem_len = 1, 1075 + .elem_size = sizeof(u8), 1076 + .array_type = NO_ARRAY, 1077 + .tlv_type = 0x15, 1078 + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, 1079 + bdf_type_valid), 1080 + }, 1081 + { 1082 + .data_type = QMI_UNSIGNED_1_BYTE, 1083 + .elem_len = 1, 1084 + .elem_size = sizeof(u8), 1085 + .array_type = NO_ARRAY, 1086 + .tlv_type = 0x15, 1087 + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, 1088 + bdf_type), 1089 + }, 1090 + {} 1091 + }; 1092 + 1093 + struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[] = { 1094 + { 1095 + .data_type = QMI_STRUCT, 1096 + .elem_len = 1, 1097 + .elem_size = sizeof(struct qmi_response_type_v01), 1098 + .array_type = NO_ARRAY, 1099 + .tlv_type = 0x02, 1100 + .offset = offsetof(struct wlfw_bdf_download_resp_msg_v01, 1101 + resp), 1102 + .ei_array = qmi_response_type_v01_ei, 1103 + }, 1104 + {} 1105 + }; 1106 + 1107 + struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = { 1108 + { 1109 + .data_type = QMI_DATA_LEN, 1110 + .elem_len = 1, 1111 + .elem_size = sizeof(u8), 1112 + .array_type = NO_ARRAY, 1113 + .tlv_type = 0x01, 1114 + .offset = offsetof(struct wlfw_cal_report_req_msg_v01, 1115 + meta_data_len), 1116 + }, 1117 + { 1118 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 1119 + .elem_len = QMI_WLFW_MAX_NUM_CAL_V01, 1120 + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), 1121 + .array_type = VAR_LEN_ARRAY, 1122 + .tlv_type = 0x01, 1123 + .offset = offsetof(struct wlfw_cal_report_req_msg_v01, 1124 + meta_data), 1125 + }, 1126 + { 1127 + .data_type = QMI_OPT_FLAG, 1128 + .elem_len = 1, 1129 + .elem_size = sizeof(u8), 1130 + .array_type = NO_ARRAY, 1131 + .tlv_type = 0x10, 1132 + .offset = offsetof(struct wlfw_cal_report_req_msg_v01, 1133 + xo_cal_data_valid), 1134 + }, 1135 + { 1136 + .data_type = QMI_UNSIGNED_1_BYTE, 1137 + .elem_len = 1, 1138 + .elem_size = sizeof(u8), 1139 + .array_type = NO_ARRAY, 1140 + .tlv_type = 0x10, 1141 + .offset = offsetof(struct wlfw_cal_report_req_msg_v01, 1142 + xo_cal_data), 1143 + }, 1144 + {} 1145 + }; 1146 + 1147 + struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[] = { 1148 + { 1149 + .data_type = QMI_STRUCT, 1150 + .elem_len = 1, 1151 + .elem_size = sizeof(struct qmi_response_type_v01), 1152 + .array_type = NO_ARRAY, 1153 + .tlv_type = 0x02, 1154 + .offset = offsetof(struct wlfw_cal_report_resp_msg_v01, 1155 + resp), 1156 + .ei_array = qmi_response_type_v01_ei, 1157 + }, 1158 + {} 1159 + }; 1160 + 1161 + struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = { 1162 + { 1163 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 1164 + .elem_len = 1, 1165 + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), 1166 + .array_type = NO_ARRAY, 1167 + .tlv_type = 0x01, 1168 + .offset = offsetof(struct wlfw_initiate_cal_download_ind_msg_v01, 1169 + cal_id), 1170 + }, 1171 + {} 1172 + }; 1173 + 1174 + struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = { 1175 + { 1176 + .data_type = QMI_UNSIGNED_1_BYTE, 1177 + .elem_len = 1, 1178 + .elem_size = sizeof(u8), 1179 + .array_type = NO_ARRAY, 1180 + .tlv_type = 0x01, 1181 + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, 1182 + valid), 1183 + }, 1184 + { 1185 + .data_type = QMI_OPT_FLAG, 1186 + .elem_len = 1, 1187 + .elem_size = sizeof(u8), 1188 + .array_type = NO_ARRAY, 1189 + .tlv_type = 0x10, 1190 + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, 1191 + file_id_valid), 1192 + }, 1193 + { 1194 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 1195 + .elem_len = 1, 1196 + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), 1197 + .array_type = NO_ARRAY, 1198 + .tlv_type = 0x10, 1199 + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, 1200 + file_id), 1201 + }, 1202 + { 1203 + .data_type = QMI_OPT_FLAG, 1204 + .elem_len = 1, 1205 + .elem_size = sizeof(u8), 1206 + .array_type = NO_ARRAY, 1207 + .tlv_type = 0x11, 1208 + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, 1209 + total_size_valid), 1210 + }, 1211 + { 1212 + .data_type = QMI_UNSIGNED_4_BYTE, 1213 + .elem_len = 1, 1214 + .elem_size = sizeof(u32), 1215 + .array_type = NO_ARRAY, 1216 + .tlv_type = 0x11, 1217 + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, 1218 + total_size), 1219 + }, 1220 + { 1221 + .data_type = QMI_OPT_FLAG, 1222 + .elem_len = 1, 1223 + .elem_size = sizeof(u8), 1224 + .array_type = NO_ARRAY, 1225 + .tlv_type = 0x12, 1226 + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, 1227 + seg_id_valid), 1228 + }, 1229 + { 1230 + .data_type = QMI_UNSIGNED_4_BYTE, 1231 + .elem_len = 1, 1232 + .elem_size = sizeof(u32), 1233 + .array_type = NO_ARRAY, 1234 + .tlv_type = 0x12, 1235 + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, 1236 + seg_id), 1237 + }, 1238 + { 1239 + .data_type = QMI_OPT_FLAG, 1240 + .elem_len = 1, 1241 + .elem_size = sizeof(u8), 1242 + .array_type = NO_ARRAY, 1243 + .tlv_type = 0x13, 1244 + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, 1245 + data_valid), 1246 + }, 1247 + { 1248 + .data_type = QMI_DATA_LEN, 1249 + .elem_len = 1, 1250 + .elem_size = sizeof(u16), 1251 + .array_type = NO_ARRAY, 1252 + .tlv_type = 0x13, 1253 + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, 1254 + data_len), 1255 + }, 1256 + { 1257 + .data_type = QMI_UNSIGNED_1_BYTE, 1258 + .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, 1259 + .elem_size = sizeof(u8), 1260 + .array_type = VAR_LEN_ARRAY, 1261 + .tlv_type = 0x13, 1262 + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, 1263 + data), 1264 + }, 1265 + { 1266 + .data_type = QMI_OPT_FLAG, 1267 + .elem_len = 1, 1268 + .elem_size = sizeof(u8), 1269 + .array_type = NO_ARRAY, 1270 + .tlv_type = 0x14, 1271 + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, 1272 + end_valid), 1273 + }, 1274 + { 1275 + .data_type = QMI_UNSIGNED_1_BYTE, 1276 + .elem_len = 1, 1277 + .elem_size = sizeof(u8), 1278 + .array_type = NO_ARRAY, 1279 + .tlv_type = 0x14, 1280 + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, 1281 + end), 1282 + }, 1283 + {} 1284 + }; 1285 + 1286 + struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[] = { 1287 + { 1288 + .data_type = QMI_STRUCT, 1289 + .elem_len = 1, 1290 + .elem_size = sizeof(struct qmi_response_type_v01), 1291 + .array_type = NO_ARRAY, 1292 + .tlv_type = 0x02, 1293 + .offset = offsetof(struct wlfw_cal_download_resp_msg_v01, 1294 + resp), 1295 + .ei_array = qmi_response_type_v01_ei, 1296 + }, 1297 + {} 1298 + }; 1299 + 1300 + struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = { 1301 + { 1302 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 1303 + .elem_len = 1, 1304 + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), 1305 + .array_type = NO_ARRAY, 1306 + .tlv_type = 0x01, 1307 + .offset = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01, 1308 + cal_id), 1309 + }, 1310 + { 1311 + .data_type = QMI_UNSIGNED_4_BYTE, 1312 + .elem_len = 1, 1313 + .elem_size = sizeof(u32), 1314 + .array_type = NO_ARRAY, 1315 + .tlv_type = 0x02, 1316 + .offset = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01, 1317 + total_size), 1318 + }, 1319 + {} 1320 + }; 1321 + 1322 + struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[] = { 1323 + { 1324 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 1325 + .elem_len = 1, 1326 + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), 1327 + .array_type = NO_ARRAY, 1328 + .tlv_type = 0x01, 1329 + .offset = offsetof(struct wlfw_cal_update_req_msg_v01, 1330 + cal_id), 1331 + }, 1332 + { 1333 + .data_type = QMI_UNSIGNED_4_BYTE, 1334 + .elem_len = 1, 1335 + .elem_size = sizeof(u32), 1336 + .array_type = NO_ARRAY, 1337 + .tlv_type = 0x02, 1338 + .offset = offsetof(struct wlfw_cal_update_req_msg_v01, 1339 + seg_id), 1340 + }, 1341 + {} 1342 + }; 1343 + 1344 + struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = { 1345 + { 1346 + .data_type = QMI_STRUCT, 1347 + .elem_len = 1, 1348 + .elem_size = sizeof(struct qmi_response_type_v01), 1349 + .array_type = NO_ARRAY, 1350 + .tlv_type = 0x02, 1351 + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, 1352 + resp), 1353 + .ei_array = qmi_response_type_v01_ei, 1354 + }, 1355 + { 1356 + .data_type = QMI_OPT_FLAG, 1357 + .elem_len = 1, 1358 + .elem_size = sizeof(u8), 1359 + .array_type = NO_ARRAY, 1360 + .tlv_type = 0x10, 1361 + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, 1362 + file_id_valid), 1363 + }, 1364 + { 1365 + .data_type = QMI_SIGNED_4_BYTE_ENUM, 1366 + .elem_len = 1, 1367 + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), 1368 + .array_type = NO_ARRAY, 1369 + .tlv_type = 0x10, 1370 + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, 1371 + file_id), 1372 + }, 1373 + { 1374 + .data_type = QMI_OPT_FLAG, 1375 + .elem_len = 1, 1376 + .elem_size = sizeof(u8), 1377 + .array_type = NO_ARRAY, 1378 + .tlv_type = 0x11, 1379 + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, 1380 + total_size_valid), 1381 + }, 1382 + { 1383 + .data_type = QMI_UNSIGNED_4_BYTE, 1384 + .elem_len = 1, 1385 + .elem_size = sizeof(u32), 1386 + .array_type = NO_ARRAY, 1387 + .tlv_type = 0x11, 1388 + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, 1389 + total_size), 1390 + }, 1391 + { 1392 + .data_type = QMI_OPT_FLAG, 1393 + .elem_len = 1, 1394 + .elem_size = sizeof(u8), 1395 + .array_type = NO_ARRAY, 1396 + .tlv_type = 0x12, 1397 + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, 1398 + seg_id_valid), 1399 + }, 1400 + { 1401 + .data_type = QMI_UNSIGNED_4_BYTE, 1402 + .elem_len = 1, 1403 + .elem_size = sizeof(u32), 1404 + .array_type = NO_ARRAY, 1405 + .tlv_type = 0x12, 1406 + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, 1407 + seg_id), 1408 + }, 1409 + { 1410 + .data_type = QMI_OPT_FLAG, 1411 + .elem_len = 1, 1412 + .elem_size = sizeof(u8), 1413 + .array_type = NO_ARRAY, 1414 + .tlv_type = 0x13, 1415 + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, 1416 + data_valid), 1417 + }, 1418 + { 1419 + .data_type = QMI_DATA_LEN, 1420 + .elem_len = 1, 1421 + .elem_size = sizeof(u16), 1422 + .array_type = NO_ARRAY, 1423 + .tlv_type = 0x13, 1424 + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, 1425 + data_len), 1426 + }, 1427 + { 1428 + .data_type = QMI_UNSIGNED_1_BYTE, 1429 + .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, 1430 + .elem_size = sizeof(u8), 1431 + .array_type = VAR_LEN_ARRAY, 1432 + .tlv_type = 0x13, 1433 + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, 1434 + data), 1435 + }, 1436 + { 1437 + .data_type = QMI_OPT_FLAG, 1438 + .elem_len = 1, 1439 + .elem_size = sizeof(u8), 1440 + .array_type = NO_ARRAY, 1441 + .tlv_type = 0x14, 1442 + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, 1443 + end_valid), 1444 + }, 1445 + { 1446 + .data_type = QMI_UNSIGNED_1_BYTE, 1447 + .elem_len = 1, 1448 + .elem_size = sizeof(u8), 1449 + .array_type = NO_ARRAY, 1450 + .tlv_type = 0x14, 1451 + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, 1452 + end), 1453 + }, 1454 + {} 1455 + }; 1456 + 1457 + struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[] = { 1458 + { 1459 + .data_type = QMI_UNSIGNED_8_BYTE, 1460 + .elem_len = 1, 1461 + .elem_size = sizeof(u64), 1462 + .array_type = NO_ARRAY, 1463 + .tlv_type = 0x01, 1464 + .offset = offsetof(struct wlfw_msa_info_req_msg_v01, 1465 + msa_addr), 1466 + }, 1467 + { 1468 + .data_type = QMI_UNSIGNED_4_BYTE, 1469 + .elem_len = 1, 1470 + .elem_size = sizeof(u32), 1471 + .array_type = NO_ARRAY, 1472 + .tlv_type = 0x02, 1473 + .offset = offsetof(struct wlfw_msa_info_req_msg_v01, 1474 + size), 1475 + }, 1476 + {} 1477 + }; 1478 + 1479 + struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = { 1480 + { 1481 + .data_type = QMI_STRUCT, 1482 + .elem_len = 1, 1483 + .elem_size = sizeof(struct qmi_response_type_v01), 1484 + .array_type = NO_ARRAY, 1485 + .tlv_type = 0x02, 1486 + .offset = offsetof(struct wlfw_msa_info_resp_msg_v01, 1487 + resp), 1488 + .ei_array = qmi_response_type_v01_ei, 1489 + }, 1490 + { 1491 + .data_type = QMI_DATA_LEN, 1492 + .elem_len = 1, 1493 + .elem_size = sizeof(u8), 1494 + .array_type = NO_ARRAY, 1495 + .tlv_type = 0x03, 1496 + .offset = offsetof(struct wlfw_msa_info_resp_msg_v01, 1497 + mem_region_info_len), 1498 + }, 1499 + { 1500 + .data_type = QMI_STRUCT, 1501 + .elem_len = QMI_WLFW_MAX_MEM_REG_V01, 1502 + .elem_size = sizeof(struct wlfw_memory_region_info_s_v01), 1503 + .array_type = VAR_LEN_ARRAY, 1504 + .tlv_type = 0x03, 1505 + .offset = offsetof(struct wlfw_msa_info_resp_msg_v01, 1506 + mem_region_info), 1507 + .ei_array = wlfw_memory_region_info_s_v01_ei, 1508 + }, 1509 + {} 1510 + }; 1511 + 1512 + struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[] = { 1513 + {} 1514 + }; 1515 + 1516 + struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[] = { 1517 + { 1518 + .data_type = QMI_STRUCT, 1519 + .elem_len = 1, 1520 + .elem_size = sizeof(struct qmi_response_type_v01), 1521 + .array_type = NO_ARRAY, 1522 + .tlv_type = 0x02, 1523 + .offset = offsetof(struct wlfw_msa_ready_resp_msg_v01, 1524 + resp), 1525 + .ei_array = qmi_response_type_v01_ei, 1526 + }, 1527 + {} 1528 + }; 1529 + 1530 + struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = { 1531 + { 1532 + .data_type = QMI_OPT_FLAG, 1533 + .elem_len = 1, 1534 + .elem_size = sizeof(u8), 1535 + .array_type = NO_ARRAY, 1536 + .tlv_type = 0x10, 1537 + .offset = offsetof(struct wlfw_ini_req_msg_v01, 1538 + enablefwlog_valid), 1539 + }, 1540 + { 1541 + .data_type = QMI_UNSIGNED_1_BYTE, 1542 + .elem_len = 1, 1543 + .elem_size = sizeof(u8), 1544 + .array_type = NO_ARRAY, 1545 + .tlv_type = 0x10, 1546 + .offset = offsetof(struct wlfw_ini_req_msg_v01, 1547 + enablefwlog), 1548 + }, 1549 + {} 1550 + }; 1551 + 1552 + struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = { 1553 + { 1554 + .data_type = QMI_STRUCT, 1555 + .elem_len = 1, 1556 + .elem_size = sizeof(struct qmi_response_type_v01), 1557 + .array_type = NO_ARRAY, 1558 + .tlv_type = 0x02, 1559 + .offset = offsetof(struct wlfw_ini_resp_msg_v01, 1560 + resp), 1561 + .ei_array = qmi_response_type_v01_ei, 1562 + }, 1563 + {} 1564 + }; 1565 + 1566 + struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = { 1567 + { 1568 + .data_type = QMI_UNSIGNED_4_BYTE, 1569 + .elem_len = 1, 1570 + .elem_size = sizeof(u32), 1571 + .array_type = NO_ARRAY, 1572 + .tlv_type = 0x01, 1573 + .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01, 1574 + offset), 1575 + }, 1576 + { 1577 + .data_type = QMI_UNSIGNED_4_BYTE, 1578 + .elem_len = 1, 1579 + .elem_size = sizeof(u32), 1580 + .array_type = NO_ARRAY, 1581 + .tlv_type = 0x02, 1582 + .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01, 1583 + mem_type), 1584 + }, 1585 + { 1586 + .data_type = QMI_UNSIGNED_4_BYTE, 1587 + .elem_len = 1, 1588 + .elem_size = sizeof(u32), 1589 + .array_type = NO_ARRAY, 1590 + .tlv_type = 0x03, 1591 + .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01, 1592 + data_len), 1593 + }, 1594 + {} 1595 + }; 1596 + 1597 + struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = { 1598 + { 1599 + .data_type = QMI_STRUCT, 1600 + .elem_len = 1, 1601 + .elem_size = sizeof(struct qmi_response_type_v01), 1602 + .array_type = NO_ARRAY, 1603 + .tlv_type = 0x02, 1604 + .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01, 1605 + resp), 1606 + .ei_array = qmi_response_type_v01_ei, 1607 + }, 1608 + { 1609 + .data_type = QMI_OPT_FLAG, 1610 + .elem_len = 1, 1611 + .elem_size = sizeof(u8), 1612 + .array_type = NO_ARRAY, 1613 + .tlv_type = 0x10, 1614 + .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01, 1615 + data_valid), 1616 + }, 1617 + { 1618 + .data_type = QMI_DATA_LEN, 1619 + .elem_len = 1, 1620 + .elem_size = sizeof(u16), 1621 + .array_type = NO_ARRAY, 1622 + .tlv_type = 0x10, 1623 + .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01, 1624 + data_len), 1625 + }, 1626 + { 1627 + .data_type = QMI_UNSIGNED_1_BYTE, 1628 + .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01, 1629 + .elem_size = sizeof(u8), 1630 + .array_type = VAR_LEN_ARRAY, 1631 + .tlv_type = 0x10, 1632 + .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01, 1633 + data), 1634 + }, 1635 + {} 1636 + }; 1637 + 1638 + struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = { 1639 + { 1640 + .data_type = QMI_UNSIGNED_4_BYTE, 1641 + .elem_len = 1, 1642 + .elem_size = sizeof(u32), 1643 + .array_type = NO_ARRAY, 1644 + .tlv_type = 0x01, 1645 + .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01, 1646 + offset), 1647 + }, 1648 + { 1649 + .data_type = QMI_UNSIGNED_4_BYTE, 1650 + .elem_len = 1, 1651 + .elem_size = sizeof(u32), 1652 + .array_type = NO_ARRAY, 1653 + .tlv_type = 0x02, 1654 + .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01, 1655 + mem_type), 1656 + }, 1657 + { 1658 + .data_type = QMI_DATA_LEN, 1659 + .elem_len = 1, 1660 + .elem_size = sizeof(u16), 1661 + .array_type = NO_ARRAY, 1662 + .tlv_type = 0x03, 1663 + .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01, 1664 + data_len), 1665 + }, 1666 + { 1667 + .data_type = QMI_UNSIGNED_1_BYTE, 1668 + .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01, 1669 + .elem_size = sizeof(u8), 1670 + .array_type = VAR_LEN_ARRAY, 1671 + .tlv_type = 0x03, 1672 + .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01, 1673 + data), 1674 + }, 1675 + {} 1676 + }; 1677 + 1678 + struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = { 1679 + { 1680 + .data_type = QMI_STRUCT, 1681 + .elem_len = 1, 1682 + .elem_size = sizeof(struct qmi_response_type_v01), 1683 + .array_type = NO_ARRAY, 1684 + .tlv_type = 0x02, 1685 + .offset = offsetof(struct wlfw_athdiag_write_resp_msg_v01, 1686 + resp), 1687 + .ei_array = qmi_response_type_v01_ei, 1688 + }, 1689 + {} 1690 + }; 1691 + 1692 + struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[] = { 1693 + { 1694 + .data_type = QMI_UNSIGNED_8_BYTE, 1695 + .elem_len = 1, 1696 + .elem_size = sizeof(u64), 1697 + .array_type = NO_ARRAY, 1698 + .tlv_type = 0x01, 1699 + .offset = offsetof(struct wlfw_vbatt_req_msg_v01, 1700 + voltage_uv), 1701 + }, 1702 + {} 1703 + }; 1704 + 1705 + struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[] = { 1706 + { 1707 + .data_type = QMI_STRUCT, 1708 + .elem_len = 1, 1709 + .elem_size = sizeof(struct qmi_response_type_v01), 1710 + .array_type = NO_ARRAY, 1711 + .tlv_type = 0x02, 1712 + .offset = offsetof(struct wlfw_vbatt_resp_msg_v01, 1713 + resp), 1714 + .ei_array = qmi_response_type_v01_ei, 1715 + }, 1716 + {} 1717 + }; 1718 + 1719 + struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[] = { 1720 + { 1721 + .data_type = QMI_OPT_FLAG, 1722 + .elem_len = 1, 1723 + .elem_size = sizeof(u8), 1724 + .array_type = NO_ARRAY, 1725 + .tlv_type = 0x10, 1726 + .offset = offsetof(struct wlfw_mac_addr_req_msg_v01, 1727 + mac_addr_valid), 1728 + }, 1729 + { 1730 + .data_type = QMI_UNSIGNED_1_BYTE, 1731 + .elem_len = QMI_WLFW_MAC_ADDR_SIZE_V01, 1732 + .elem_size = sizeof(u8), 1733 + .array_type = STATIC_ARRAY, 1734 + .tlv_type = 0x10, 1735 + .offset = offsetof(struct wlfw_mac_addr_req_msg_v01, 1736 + mac_addr), 1737 + }, 1738 + {} 1739 + }; 1740 + 1741 + struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[] = { 1742 + { 1743 + .data_type = QMI_STRUCT, 1744 + .elem_len = 1, 1745 + .elem_size = sizeof(struct qmi_response_type_v01), 1746 + .array_type = NO_ARRAY, 1747 + .tlv_type = 0x02, 1748 + .offset = offsetof(struct wlfw_mac_addr_resp_msg_v01, 1749 + resp), 1750 + .ei_array = qmi_response_type_v01_ei, 1751 + }, 1752 + {} 1753 + }; 1754 + 1755 + struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = { 1756 + { 1757 + .data_type = QMI_OPT_FLAG, 1758 + .elem_len = 1, 1759 + .elem_size = sizeof(u8), 1760 + .array_type = NO_ARRAY, 1761 + .tlv_type = 0x10, 1762 + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, 1763 + daemon_support_valid), 1764 + }, 1765 + { 1766 + .data_type = QMI_UNSIGNED_1_BYTE, 1767 + .elem_len = 1, 1768 + .elem_size = sizeof(u8), 1769 + .array_type = NO_ARRAY, 1770 + .tlv_type = 0x10, 1771 + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, 1772 + daemon_support), 1773 + }, 1774 + {} 1775 + }; 1776 + 1777 + struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = { 1778 + { 1779 + .data_type = QMI_STRUCT, 1780 + .elem_len = 1, 1781 + .elem_size = sizeof(struct qmi_response_type_v01), 1782 + .array_type = NO_ARRAY, 1783 + .tlv_type = 0x02, 1784 + .offset = offsetof(struct wlfw_host_cap_resp_msg_v01, 1785 + resp), 1786 + .ei_array = qmi_response_type_v01_ei, 1787 + }, 1788 + {} 1789 + }; 1790 + 1791 + struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[] = { 1792 + { 1793 + .data_type = QMI_DATA_LEN, 1794 + .elem_len = 1, 1795 + .elem_size = sizeof(u8), 1796 + .array_type = NO_ARRAY, 1797 + .tlv_type = 0x01, 1798 + .offset = offsetof(struct wlfw_request_mem_ind_msg_v01, 1799 + mem_seg_len), 1800 + }, 1801 + { 1802 + .data_type = QMI_STRUCT, 1803 + .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01, 1804 + .elem_size = sizeof(struct wlfw_mem_seg_s_v01), 1805 + .array_type = VAR_LEN_ARRAY, 1806 + .tlv_type = 0x01, 1807 + .offset = offsetof(struct wlfw_request_mem_ind_msg_v01, 1808 + mem_seg), 1809 + .ei_array = wlfw_mem_seg_s_v01_ei, 1810 + }, 1811 + {} 1812 + }; 1813 + 1814 + struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[] = { 1815 + { 1816 + .data_type = QMI_DATA_LEN, 1817 + .elem_len = 1, 1818 + .elem_size = sizeof(u8), 1819 + .array_type = NO_ARRAY, 1820 + .tlv_type = 0x01, 1821 + .offset = offsetof(struct wlfw_respond_mem_req_msg_v01, 1822 + mem_seg_len), 1823 + }, 1824 + { 1825 + .data_type = QMI_STRUCT, 1826 + .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01, 1827 + .elem_size = sizeof(struct wlfw_mem_seg_resp_s_v01), 1828 + .array_type = VAR_LEN_ARRAY, 1829 + .tlv_type = 0x01, 1830 + .offset = offsetof(struct wlfw_respond_mem_req_msg_v01, 1831 + mem_seg), 1832 + .ei_array = wlfw_mem_seg_resp_s_v01_ei, 1833 + }, 1834 + {} 1835 + }; 1836 + 1837 + struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[] = { 1838 + { 1839 + .data_type = QMI_STRUCT, 1840 + .elem_len = 1, 1841 + .elem_size = sizeof(struct qmi_response_type_v01), 1842 + .array_type = NO_ARRAY, 1843 + .tlv_type = 0x02, 1844 + .offset = offsetof(struct wlfw_respond_mem_resp_msg_v01, 1845 + resp), 1846 + .ei_array = qmi_response_type_v01_ei, 1847 + }, 1848 + {} 1849 + }; 1850 + 1851 + struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[] = { 1852 + {} 1853 + }; 1854 + 1855 + struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = { 1856 + {} 1857 + }; 1858 + 1859 + struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = { 1860 + { 1861 + .data_type = QMI_OPT_FLAG, 1862 + .elem_len = 1, 1863 + .elem_size = sizeof(u8), 1864 + .array_type = NO_ARRAY, 1865 + .tlv_type = 0x10, 1866 + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, 1867 + cause_for_rejuvenation_valid), 1868 + }, 1869 + { 1870 + .data_type = QMI_UNSIGNED_1_BYTE, 1871 + .elem_len = 1, 1872 + .elem_size = sizeof(u8), 1873 + .array_type = NO_ARRAY, 1874 + .tlv_type = 0x10, 1875 + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, 1876 + cause_for_rejuvenation), 1877 + }, 1878 + { 1879 + .data_type = QMI_OPT_FLAG, 1880 + .elem_len = 1, 1881 + .elem_size = sizeof(u8), 1882 + .array_type = NO_ARRAY, 1883 + .tlv_type = 0x11, 1884 + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, 1885 + requesting_sub_system_valid), 1886 + }, 1887 + { 1888 + .data_type = QMI_UNSIGNED_1_BYTE, 1889 + .elem_len = 1, 1890 + .elem_size = sizeof(u8), 1891 + .array_type = NO_ARRAY, 1892 + .tlv_type = 0x11, 1893 + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, 1894 + requesting_sub_system), 1895 + }, 1896 + { 1897 + .data_type = QMI_OPT_FLAG, 1898 + .elem_len = 1, 1899 + .elem_size = sizeof(u8), 1900 + .array_type = NO_ARRAY, 1901 + .tlv_type = 0x12, 1902 + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, 1903 + line_number_valid), 1904 + }, 1905 + { 1906 + .data_type = QMI_UNSIGNED_2_BYTE, 1907 + .elem_len = 1, 1908 + .elem_size = sizeof(u16), 1909 + .array_type = NO_ARRAY, 1910 + .tlv_type = 0x12, 1911 + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, 1912 + line_number), 1913 + }, 1914 + { 1915 + .data_type = QMI_OPT_FLAG, 1916 + .elem_len = 1, 1917 + .elem_size = sizeof(u8), 1918 + .array_type = NO_ARRAY, 1919 + .tlv_type = 0x13, 1920 + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, 1921 + function_name_valid), 1922 + }, 1923 + { 1924 + .data_type = QMI_STRING, 1925 + .elem_len = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1, 1926 + .elem_size = sizeof(char), 1927 + .array_type = NO_ARRAY, 1928 + .tlv_type = 0x13, 1929 + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, 1930 + function_name), 1931 + }, 1932 + {} 1933 + }; 1934 + 1935 + struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = { 1936 + {} 1937 + }; 1938 + 1939 + struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = { 1940 + { 1941 + .data_type = QMI_STRUCT, 1942 + .elem_len = 1, 1943 + .elem_size = sizeof(struct qmi_response_type_v01), 1944 + .array_type = NO_ARRAY, 1945 + .tlv_type = 0x02, 1946 + .offset = offsetof(struct wlfw_rejuvenate_ack_resp_msg_v01, 1947 + resp), 1948 + .ei_array = qmi_response_type_v01_ei, 1949 + }, 1950 + {} 1951 + }; 1952 + 1953 + struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = { 1954 + { 1955 + .data_type = QMI_OPT_FLAG, 1956 + .elem_len = 1, 1957 + .elem_size = sizeof(u8), 1958 + .array_type = NO_ARRAY, 1959 + .tlv_type = 0x10, 1960 + .offset = offsetof(struct wlfw_dynamic_feature_mask_req_msg_v01, 1961 + mask_valid), 1962 + }, 1963 + { 1964 + .data_type = QMI_UNSIGNED_8_BYTE, 1965 + .elem_len = 1, 1966 + .elem_size = sizeof(u64), 1967 + .array_type = NO_ARRAY, 1968 + .tlv_type = 0x10, 1969 + .offset = offsetof(struct wlfw_dynamic_feature_mask_req_msg_v01, 1970 + mask), 1971 + }, 1972 + {} 1973 + }; 1974 + 1975 + struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = { 1976 + { 1977 + .data_type = QMI_STRUCT, 1978 + .elem_len = 1, 1979 + .elem_size = sizeof(struct qmi_response_type_v01), 1980 + .array_type = NO_ARRAY, 1981 + .tlv_type = 0x02, 1982 + .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01, 1983 + resp), 1984 + .ei_array = qmi_response_type_v01_ei, 1985 + }, 1986 + { 1987 + .data_type = QMI_OPT_FLAG, 1988 + .elem_len = 1, 1989 + .elem_size = sizeof(u8), 1990 + .array_type = NO_ARRAY, 1991 + .tlv_type = 0x10, 1992 + .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01, 1993 + prev_mask_valid), 1994 + }, 1995 + { 1996 + .data_type = QMI_UNSIGNED_8_BYTE, 1997 + .elem_len = 1, 1998 + .elem_size = sizeof(u64), 1999 + .array_type = NO_ARRAY, 2000 + .tlv_type = 0x10, 2001 + .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01, 2002 + prev_mask), 2003 + }, 2004 + { 2005 + .data_type = QMI_OPT_FLAG, 2006 + .elem_len = 1, 2007 + .elem_size = sizeof(u8), 2008 + .array_type = NO_ARRAY, 2009 + .tlv_type = 0x11, 2010 + .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01, 2011 + curr_mask_valid), 2012 + }, 2013 + { 2014 + .data_type = QMI_UNSIGNED_8_BYTE, 2015 + .elem_len = 1, 2016 + .elem_size = sizeof(u64), 2017 + .array_type = NO_ARRAY, 2018 + .tlv_type = 0x11, 2019 + .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01, 2020 + curr_mask), 2021 + }, 2022 + {} 2023 + }; 2024 + 2025 + struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[] = { 2026 + { 2027 + .data_type = QMI_UNSIGNED_8_BYTE, 2028 + .elem_len = 1, 2029 + .elem_size = sizeof(u64), 2030 + .array_type = NO_ARRAY, 2031 + .tlv_type = 0x01, 2032 + .offset = offsetof(struct wlfw_m3_info_req_msg_v01, 2033 + addr), 2034 + }, 2035 + { 2036 + .data_type = QMI_UNSIGNED_4_BYTE, 2037 + .elem_len = 1, 2038 + .elem_size = sizeof(u32), 2039 + .array_type = NO_ARRAY, 2040 + .tlv_type = 0x02, 2041 + .offset = offsetof(struct wlfw_m3_info_req_msg_v01, 2042 + size), 2043 + }, 2044 + {} 2045 + }; 2046 + 2047 + struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[] = { 2048 + { 2049 + .data_type = QMI_STRUCT, 2050 + .elem_len = 1, 2051 + .elem_size = sizeof(struct qmi_response_type_v01), 2052 + .array_type = NO_ARRAY, 2053 + .tlv_type = 0x02, 2054 + .offset = offsetof(struct wlfw_m3_info_resp_msg_v01, 2055 + resp), 2056 + .ei_array = qmi_response_type_v01_ei, 2057 + }, 2058 + {} 2059 + }; 2060 + 2061 + struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[] = { 2062 + { 2063 + .data_type = QMI_UNSIGNED_1_BYTE, 2064 + .elem_len = 1, 2065 + .elem_size = sizeof(u8), 2066 + .array_type = NO_ARRAY, 2067 + .tlv_type = 0x01, 2068 + .offset = offsetof(struct wlfw_xo_cal_ind_msg_v01, 2069 + xo_cal_data), 2070 + }, 2071 + {} 2072 + };
+677
drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
··· 1 + /* 2 + * Copyright (c) 2018 The Linux Foundation. All rights reserved. 3 + * 4 + * Permission to use, copy, modify, and/or distribute this software for any 5 + * purpose with or without fee is hereby granted, provided that the above 6 + * copyright notice and this permission notice appear in all copies. 7 + * 8 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 + */ 16 + 17 + #ifndef WCN3990_QMI_SVC_V01_H 18 + #define WCN3990_QMI_SVC_V01_H 19 + 20 + #define WLFW_SERVICE_ID_V01 0x45 21 + #define WLFW_SERVICE_VERS_V01 0x01 22 + 23 + #define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025 24 + #define QMI_WLFW_MEM_READY_IND_V01 0x0037 25 + #define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B 26 + #define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A 27 + #define QMI_WLFW_HOST_CAP_REQ_V01 0x0034 28 + #define QMI_WLFW_M3_INFO_REQ_V01 0x003C 29 + #define QMI_WLFW_CAP_REQ_V01 0x0024 30 + #define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038 31 + #define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026 32 + #define QMI_WLFW_M3_INFO_RESP_V01 0x003C 33 + #define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029 34 + #define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027 35 + #define QMI_WLFW_XO_CAL_IND_V01 0x003D 36 + #define QMI_WLFW_INI_RESP_V01 0x002F 37 + #define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026 38 + #define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033 39 + #define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028 40 + #define QMI_WLFW_HOST_CAP_RESP_V01 0x0034 41 + #define QMI_WLFW_MSA_READY_IND_V01 0x002B 42 + #define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031 43 + #define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022 44 + #define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020 45 + #define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023 46 + #define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035 47 + #define QMI_WLFW_REJUVENATE_IND_V01 0x0039 48 + #define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B 49 + #define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031 50 + #define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022 51 + #define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036 52 + #define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C 53 + #define QMI_WLFW_FW_READY_IND_V01 0x0021 54 + #define QMI_WLFW_MSA_READY_RESP_V01 0x002E 55 + #define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029 56 + #define QMI_WLFW_INI_REQ_V01 0x002F 57 + #define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025 58 + #define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A 59 + #define QMI_WLFW_MSA_INFO_RESP_V01 0x002D 60 + #define QMI_WLFW_MSA_READY_REQ_V01 0x002E 61 + #define QMI_WLFW_CAP_RESP_V01 0x0024 62 + #define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A 63 + #define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030 64 + #define QMI_WLFW_VBATT_REQ_V01 0x0032 65 + #define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033 66 + #define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036 67 + #define QMI_WLFW_VBATT_RESP_V01 0x0032 68 + #define QMI_WLFW_MSA_INFO_REQ_V01 0x002D 69 + #define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027 70 + #define QMI_WLFW_ATHDIAG_READ_REQ_V01 0x0030 71 + #define QMI_WLFW_WLAN_CFG_REQ_V01 0x0023 72 + #define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020 73 + 74 + #define QMI_WLFW_MAX_MEM_REG_V01 2 75 + #define QMI_WLFW_MAX_NUM_MEM_SEG_V01 16 76 + #define QMI_WLFW_MAX_NUM_CAL_V01 5 77 + #define QMI_WLFW_MAX_DATA_SIZE_V01 6144 78 + #define QMI_WLFW_FUNCTION_NAME_LEN_V01 128 79 + #define QMI_WLFW_MAX_NUM_CE_V01 12 80 + #define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32 81 + #define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 6144 82 + #define QMI_WLFW_MAX_NUM_GPIO_V01 32 83 + #define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128 84 + #define QMI_WLFW_MAX_NUM_MEM_CFG_V01 2 85 + #define QMI_WLFW_MAX_STR_LEN_V01 16 86 + #define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24 87 + #define QMI_WLFW_MAC_ADDR_SIZE_V01 6 88 + #define QMI_WLFW_MAX_SHADOW_REG_V2 36 89 + #define QMI_WLFW_MAX_NUM_SVC_V01 24 90 + 91 + enum wlfw_driver_mode_enum_v01 { 92 + QMI_WLFW_MISSION_V01 = 0, 93 + QMI_WLFW_FTM_V01 = 1, 94 + QMI_WLFW_EPPING_V01 = 2, 95 + QMI_WLFW_WALTEST_V01 = 3, 96 + QMI_WLFW_OFF_V01 = 4, 97 + QMI_WLFW_CCPM_V01 = 5, 98 + QMI_WLFW_QVIT_V01 = 6, 99 + QMI_WLFW_CALIBRATION_V01 = 7, 100 + }; 101 + 102 + enum wlfw_cal_temp_id_enum_v01 { 103 + QMI_WLFW_CAL_TEMP_IDX_0_V01 = 0, 104 + QMI_WLFW_CAL_TEMP_IDX_1_V01 = 1, 105 + QMI_WLFW_CAL_TEMP_IDX_2_V01 = 2, 106 + QMI_WLFW_CAL_TEMP_IDX_3_V01 = 3, 107 + QMI_WLFW_CAL_TEMP_IDX_4_V01 = 4, 108 + }; 109 + 110 + enum wlfw_pipedir_enum_v01 { 111 + QMI_WLFW_PIPEDIR_NONE_V01 = 0, 112 + QMI_WLFW_PIPEDIR_IN_V01 = 1, 113 + QMI_WLFW_PIPEDIR_OUT_V01 = 2, 114 + QMI_WLFW_PIPEDIR_INOUT_V01 = 3, 115 + }; 116 + 117 + enum wlfw_mem_type_enum_v01 { 118 + QMI_WLFW_MEM_TYPE_MSA_V01 = 0, 119 + QMI_WLFW_MEM_TYPE_DDR_V01 = 1, 120 + }; 121 + 122 + #define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00) 123 + #define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01) 124 + #define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02) 125 + #define QMI_WLFW_CE_ATTR_SWIZZLE_DESCRIPTORS_V01 ((u32)0x04) 126 + #define QMI_WLFW_CE_ATTR_DISABLE_INTR_V01 ((u32)0x08) 127 + #define QMI_WLFW_CE_ATTR_ENABLE_POLL_V01 ((u32)0x10) 128 + 129 + #define QMI_WLFW_ALREADY_REGISTERED_V01 ((u64)0x01ULL) 130 + #define QMI_WLFW_FW_READY_V01 ((u64)0x02ULL) 131 + #define QMI_WLFW_MSA_READY_V01 ((u64)0x04ULL) 132 + #define QMI_WLFW_MEM_READY_V01 ((u64)0x08ULL) 133 + #define QMI_WLFW_FW_INIT_DONE_V01 ((u64)0x10ULL) 134 + 135 + #define QMI_WLFW_FW_REJUVENATE_V01 ((u64)0x01ULL) 136 + 137 + struct wlfw_ce_tgt_pipe_cfg_s_v01 { 138 + __le32 pipe_num; 139 + __le32 pipe_dir; 140 + __le32 nentries; 141 + __le32 nbytes_max; 142 + __le32 flags; 143 + }; 144 + 145 + struct wlfw_ce_svc_pipe_cfg_s_v01 { 146 + __le32 service_id; 147 + __le32 pipe_dir; 148 + __le32 pipe_num; 149 + }; 150 + 151 + struct wlfw_shadow_reg_cfg_s_v01 { 152 + u16 id; 153 + u16 offset; 154 + }; 155 + 156 + struct wlfw_shadow_reg_v2_cfg_s_v01 { 157 + u32 addr; 158 + }; 159 + 160 + struct wlfw_memory_region_info_s_v01 { 161 + u64 region_addr; 162 + u32 size; 163 + u8 secure_flag; 164 + }; 165 + 166 + struct wlfw_mem_cfg_s_v01 { 167 + u64 offset; 168 + u32 size; 169 + u8 secure_flag; 170 + }; 171 + 172 + struct wlfw_mem_seg_s_v01 { 173 + u32 size; 174 + enum wlfw_mem_type_enum_v01 type; 175 + u32 mem_cfg_len; 176 + struct wlfw_mem_cfg_s_v01 mem_cfg[QMI_WLFW_MAX_NUM_MEM_CFG_V01]; 177 + }; 178 + 179 + struct wlfw_mem_seg_resp_s_v01 { 180 + u64 addr; 181 + u32 size; 182 + enum wlfw_mem_type_enum_v01 type; 183 + }; 184 + 185 + struct wlfw_rf_chip_info_s_v01 { 186 + u32 chip_id; 187 + u32 chip_family; 188 + }; 189 + 190 + struct wlfw_rf_board_info_s_v01 { 191 + u32 board_id; 192 + }; 193 + 194 + struct wlfw_soc_info_s_v01 { 195 + u32 soc_id; 196 + }; 197 + 198 + struct wlfw_fw_version_info_s_v01 { 199 + u32 fw_version; 200 + char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1]; 201 + }; 202 + 203 + struct wlfw_ind_register_req_msg_v01 { 204 + u8 fw_ready_enable_valid; 205 + u8 fw_ready_enable; 206 + u8 initiate_cal_download_enable_valid; 207 + u8 initiate_cal_download_enable; 208 + u8 initiate_cal_update_enable_valid; 209 + u8 initiate_cal_update_enable; 210 + u8 msa_ready_enable_valid; 211 + u8 msa_ready_enable; 212 + u8 pin_connect_result_enable_valid; 213 + u8 pin_connect_result_enable; 214 + u8 client_id_valid; 215 + u32 client_id; 216 + u8 request_mem_enable_valid; 217 + u8 request_mem_enable; 218 + u8 mem_ready_enable_valid; 219 + u8 mem_ready_enable; 220 + u8 fw_init_done_enable_valid; 221 + u8 fw_init_done_enable; 222 + u8 rejuvenate_enable_valid; 223 + u32 rejuvenate_enable; 224 + u8 xo_cal_enable_valid; 225 + u8 xo_cal_enable; 226 + }; 227 + 228 + #define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 50 229 + extern struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[]; 230 + 231 + struct wlfw_ind_register_resp_msg_v01 { 232 + struct qmi_response_type_v01 resp; 233 + u8 fw_status_valid; 234 + u64 fw_status; 235 + }; 236 + 237 + #define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18 238 + extern struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[]; 239 + 240 + struct wlfw_fw_ready_ind_msg_v01 { 241 + char placeholder; 242 + }; 243 + 244 + #define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0 245 + extern struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[]; 246 + 247 + struct wlfw_msa_ready_ind_msg_v01 { 248 + char placeholder; 249 + }; 250 + 251 + #define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0 252 + extern struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[]; 253 + 254 + struct wlfw_pin_connect_result_ind_msg_v01 { 255 + u8 pwr_pin_result_valid; 256 + u32 pwr_pin_result; 257 + u8 phy_io_pin_result_valid; 258 + u32 phy_io_pin_result; 259 + u8 rf_pin_result_valid; 260 + u32 rf_pin_result; 261 + }; 262 + 263 + #define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21 264 + extern struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[]; 265 + 266 + struct wlfw_wlan_mode_req_msg_v01 { 267 + enum wlfw_driver_mode_enum_v01 mode; 268 + u8 hw_debug_valid; 269 + u8 hw_debug; 270 + }; 271 + 272 + #define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11 273 + extern struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[]; 274 + 275 + struct wlfw_wlan_mode_resp_msg_v01 { 276 + struct qmi_response_type_v01 resp; 277 + }; 278 + 279 + #define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7 280 + extern struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[]; 281 + 282 + struct wlfw_wlan_cfg_req_msg_v01 { 283 + u8 host_version_valid; 284 + char host_version[QMI_WLFW_MAX_STR_LEN_V01 + 1]; 285 + u8 tgt_cfg_valid; 286 + u32 tgt_cfg_len; 287 + struct wlfw_ce_tgt_pipe_cfg_s_v01 tgt_cfg[QMI_WLFW_MAX_NUM_CE_V01]; 288 + u8 svc_cfg_valid; 289 + u32 svc_cfg_len; 290 + struct wlfw_ce_svc_pipe_cfg_s_v01 svc_cfg[QMI_WLFW_MAX_NUM_SVC_V01]; 291 + u8 shadow_reg_valid; 292 + u32 shadow_reg_len; 293 + struct wlfw_shadow_reg_cfg_s_v01 shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01]; 294 + u8 shadow_reg_v2_valid; 295 + u32 shadow_reg_v2_len; 296 + struct wlfw_shadow_reg_v2_cfg_s_v01 shadow_reg_v2[QMI_WLFW_MAX_SHADOW_REG_V2]; 297 + }; 298 + 299 + #define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803 300 + extern struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[]; 301 + 302 + struct wlfw_wlan_cfg_resp_msg_v01 { 303 + struct qmi_response_type_v01 resp; 304 + }; 305 + 306 + #define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7 307 + extern struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[]; 308 + 309 + struct wlfw_cap_req_msg_v01 { 310 + char placeholder; 311 + }; 312 + 313 + #define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0 314 + extern struct qmi_elem_info wlfw_cap_req_msg_v01_ei[]; 315 + 316 + struct wlfw_cap_resp_msg_v01 { 317 + struct qmi_response_type_v01 resp; 318 + u8 chip_info_valid; 319 + struct wlfw_rf_chip_info_s_v01 chip_info; 320 + u8 board_info_valid; 321 + struct wlfw_rf_board_info_s_v01 board_info; 322 + u8 soc_info_valid; 323 + struct wlfw_soc_info_s_v01 soc_info; 324 + u8 fw_version_info_valid; 325 + struct wlfw_fw_version_info_s_v01 fw_version_info; 326 + u8 fw_build_id_valid; 327 + char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1]; 328 + u8 num_macs_valid; 329 + u8 num_macs; 330 + }; 331 + 332 + #define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 207 333 + extern struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[]; 334 + 335 + struct wlfw_bdf_download_req_msg_v01 { 336 + u8 valid; 337 + u8 file_id_valid; 338 + enum wlfw_cal_temp_id_enum_v01 file_id; 339 + u8 total_size_valid; 340 + u32 total_size; 341 + u8 seg_id_valid; 342 + u32 seg_id; 343 + u8 data_valid; 344 + u32 data_len; 345 + u8 data[QMI_WLFW_MAX_DATA_SIZE_V01]; 346 + u8 end_valid; 347 + u8 end; 348 + u8 bdf_type_valid; 349 + u8 bdf_type; 350 + }; 351 + 352 + #define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6182 353 + extern struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[]; 354 + 355 + struct wlfw_bdf_download_resp_msg_v01 { 356 + struct qmi_response_type_v01 resp; 357 + }; 358 + 359 + #define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7 360 + extern struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[]; 361 + 362 + struct wlfw_cal_report_req_msg_v01 { 363 + u32 meta_data_len; 364 + enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01]; 365 + u8 xo_cal_data_valid; 366 + u8 xo_cal_data; 367 + }; 368 + 369 + #define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 28 370 + extern struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[]; 371 + 372 + struct wlfw_cal_report_resp_msg_v01 { 373 + struct qmi_response_type_v01 resp; 374 + }; 375 + 376 + #define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7 377 + extern struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[]; 378 + 379 + struct wlfw_initiate_cal_download_ind_msg_v01 { 380 + enum wlfw_cal_temp_id_enum_v01 cal_id; 381 + }; 382 + 383 + #define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7 384 + extern struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[]; 385 + 386 + struct wlfw_cal_download_req_msg_v01 { 387 + u8 valid; 388 + u8 file_id_valid; 389 + enum wlfw_cal_temp_id_enum_v01 file_id; 390 + u8 total_size_valid; 391 + u32 total_size; 392 + u8 seg_id_valid; 393 + u32 seg_id; 394 + u8 data_valid; 395 + u32 data_len; 396 + u8 data[QMI_WLFW_MAX_DATA_SIZE_V01]; 397 + u8 end_valid; 398 + u8 end; 399 + }; 400 + 401 + #define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178 402 + extern struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[]; 403 + 404 + struct wlfw_cal_download_resp_msg_v01 { 405 + struct qmi_response_type_v01 resp; 406 + }; 407 + 408 + #define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7 409 + extern struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[]; 410 + 411 + struct wlfw_initiate_cal_update_ind_msg_v01 { 412 + enum wlfw_cal_temp_id_enum_v01 cal_id; 413 + u32 total_size; 414 + }; 415 + 416 + #define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14 417 + extern struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[]; 418 + 419 + struct wlfw_cal_update_req_msg_v01 { 420 + enum wlfw_cal_temp_id_enum_v01 cal_id; 421 + u32 seg_id; 422 + }; 423 + 424 + #define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14 425 + extern struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[]; 426 + 427 + struct wlfw_cal_update_resp_msg_v01 { 428 + struct qmi_response_type_v01 resp; 429 + u8 file_id_valid; 430 + enum wlfw_cal_temp_id_enum_v01 file_id; 431 + u8 total_size_valid; 432 + u32 total_size; 433 + u8 seg_id_valid; 434 + u32 seg_id; 435 + u8 data_valid; 436 + u32 data_len; 437 + u8 data[QMI_WLFW_MAX_DATA_SIZE_V01]; 438 + u8 end_valid; 439 + u8 end; 440 + }; 441 + 442 + #define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181 443 + extern struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[]; 444 + 445 + struct wlfw_msa_info_req_msg_v01 { 446 + u64 msa_addr; 447 + u32 size; 448 + }; 449 + 450 + #define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18 451 + extern struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[]; 452 + 453 + struct wlfw_msa_info_resp_msg_v01 { 454 + struct qmi_response_type_v01 resp; 455 + u32 mem_region_info_len; 456 + struct wlfw_memory_region_info_s_v01 mem_region_info[QMI_WLFW_MAX_MEM_REG_V01]; 457 + }; 458 + 459 + #define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37 460 + extern struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[]; 461 + 462 + struct wlfw_msa_ready_req_msg_v01 { 463 + char placeholder; 464 + }; 465 + 466 + #define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0 467 + extern struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[]; 468 + 469 + struct wlfw_msa_ready_resp_msg_v01 { 470 + struct qmi_response_type_v01 resp; 471 + }; 472 + 473 + #define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7 474 + extern struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[]; 475 + 476 + struct wlfw_ini_req_msg_v01 { 477 + u8 enablefwlog_valid; 478 + u8 enablefwlog; 479 + }; 480 + 481 + #define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4 482 + extern struct qmi_elem_info wlfw_ini_req_msg_v01_ei[]; 483 + 484 + struct wlfw_ini_resp_msg_v01 { 485 + struct qmi_response_type_v01 resp; 486 + }; 487 + 488 + #define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7 489 + extern struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[]; 490 + 491 + struct wlfw_athdiag_read_req_msg_v01 { 492 + u32 offset; 493 + u32 mem_type; 494 + u32 data_len; 495 + }; 496 + 497 + #define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21 498 + extern struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[]; 499 + 500 + struct wlfw_athdiag_read_resp_msg_v01 { 501 + struct qmi_response_type_v01 resp; 502 + u8 data_valid; 503 + u32 data_len; 504 + u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01]; 505 + }; 506 + 507 + #define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156 508 + extern struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[]; 509 + 510 + struct wlfw_athdiag_write_req_msg_v01 { 511 + u32 offset; 512 + u32 mem_type; 513 + u32 data_len; 514 + u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01]; 515 + }; 516 + 517 + #define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163 518 + extern struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[]; 519 + 520 + struct wlfw_athdiag_write_resp_msg_v01 { 521 + struct qmi_response_type_v01 resp; 522 + }; 523 + 524 + #define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7 525 + extern struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[]; 526 + 527 + struct wlfw_vbatt_req_msg_v01 { 528 + u64 voltage_uv; 529 + }; 530 + 531 + #define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11 532 + extern struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[]; 533 + 534 + struct wlfw_vbatt_resp_msg_v01 { 535 + struct qmi_response_type_v01 resp; 536 + }; 537 + 538 + #define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7 539 + extern struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[]; 540 + 541 + struct wlfw_mac_addr_req_msg_v01 { 542 + u8 mac_addr_valid; 543 + u8 mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01]; 544 + }; 545 + 546 + #define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9 547 + extern struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[]; 548 + 549 + struct wlfw_mac_addr_resp_msg_v01 { 550 + struct qmi_response_type_v01 resp; 551 + }; 552 + 553 + #define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7 554 + extern struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[]; 555 + 556 + struct wlfw_host_cap_req_msg_v01 { 557 + u8 daemon_support_valid; 558 + u8 daemon_support; 559 + }; 560 + 561 + #define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 4 562 + extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[]; 563 + 564 + struct wlfw_host_cap_resp_msg_v01 { 565 + struct qmi_response_type_v01 resp; 566 + }; 567 + 568 + #define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7 569 + extern struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[]; 570 + 571 + struct wlfw_request_mem_ind_msg_v01 { 572 + u32 mem_seg_len; 573 + struct wlfw_mem_seg_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01]; 574 + }; 575 + 576 + #define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 564 577 + extern struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[]; 578 + 579 + struct wlfw_respond_mem_req_msg_v01 { 580 + u32 mem_seg_len; 581 + struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01]; 582 + }; 583 + 584 + #define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 260 585 + extern struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[]; 586 + 587 + struct wlfw_respond_mem_resp_msg_v01 { 588 + struct qmi_response_type_v01 resp; 589 + }; 590 + 591 + #define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7 592 + extern struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[]; 593 + 594 + struct wlfw_mem_ready_ind_msg_v01 { 595 + char placeholder; 596 + }; 597 + 598 + #define WLFW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0 599 + extern struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[]; 600 + 601 + struct wlfw_fw_init_done_ind_msg_v01 { 602 + char placeholder; 603 + }; 604 + 605 + #define WLFW_FW_INIT_DONE_IND_MSG_V01_MAX_MSG_LEN 0 606 + extern struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[]; 607 + 608 + struct wlfw_rejuvenate_ind_msg_v01 { 609 + u8 cause_for_rejuvenation_valid; 610 + u8 cause_for_rejuvenation; 611 + u8 requesting_sub_system_valid; 612 + u8 requesting_sub_system; 613 + u8 line_number_valid; 614 + u16 line_number; 615 + u8 function_name_valid; 616 + char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1]; 617 + }; 618 + 619 + #define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144 620 + extern struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[]; 621 + 622 + struct wlfw_rejuvenate_ack_req_msg_v01 { 623 + char placeholder; 624 + }; 625 + 626 + #define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0 627 + extern struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[]; 628 + 629 + struct wlfw_rejuvenate_ack_resp_msg_v01 { 630 + struct qmi_response_type_v01 resp; 631 + }; 632 + 633 + #define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7 634 + extern struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[]; 635 + 636 + struct wlfw_dynamic_feature_mask_req_msg_v01 { 637 + u8 mask_valid; 638 + u64 mask; 639 + }; 640 + 641 + #define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11 642 + extern struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[]; 643 + 644 + struct wlfw_dynamic_feature_mask_resp_msg_v01 { 645 + struct qmi_response_type_v01 resp; 646 + u8 prev_mask_valid; 647 + u64 prev_mask; 648 + u8 curr_mask_valid; 649 + u64 curr_mask; 650 + }; 651 + 652 + #define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29 653 + extern struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[]; 654 + 655 + struct wlfw_m3_info_req_msg_v01 { 656 + u64 addr; 657 + u32 size; 658 + }; 659 + 660 + #define WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18 661 + extern struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[]; 662 + 663 + struct wlfw_m3_info_resp_msg_v01 { 664 + struct qmi_response_type_v01 resp; 665 + }; 666 + 667 + #define WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7 668 + extern struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[]; 669 + 670 + struct wlfw_xo_cal_ind_msg_v01 { 671 + u8 xo_cal_data; 672 + }; 673 + 674 + #define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4 675 + extern struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[]; 676 + 677 + #endif
+257 -10
drivers/net/wireless/ath/ath10k/snoc.c
··· 67 67 static const struct ath10k_snoc_drv_priv drv_priv = { 68 68 .hw_rev = ATH10K_HW_WCN3990, 69 69 .dma_mask = DMA_BIT_MASK(37), 70 + .msa_size = 0x100000, 71 + }; 72 + 73 + #define WCN3990_SRC_WR_IDX_OFFSET 0x3C 74 + #define WCN3990_DST_WR_IDX_OFFSET 0x40 75 + 76 + static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = { 77 + { 78 + .ce_id = __cpu_to_le16(0), 79 + .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET), 80 + }, 81 + 82 + { 83 + .ce_id = __cpu_to_le16(3), 84 + .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET), 85 + }, 86 + 87 + { 88 + .ce_id = __cpu_to_le16(4), 89 + .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET), 90 + }, 91 + 92 + { 93 + .ce_id = __cpu_to_le16(5), 94 + .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET), 95 + }, 96 + 97 + { 98 + .ce_id = __cpu_to_le16(7), 99 + .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET), 100 + }, 101 + 102 + { 103 + .ce_id = __cpu_to_le16(1), 104 + .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET), 105 + }, 106 + 107 + { 108 + .ce_id = __cpu_to_le16(2), 109 + .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET), 110 + }, 111 + 112 + { 113 + .ce_id = __cpu_to_le16(7), 114 + .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET), 115 + }, 116 + 117 + { 118 + .ce_id = __cpu_to_le16(8), 119 + .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET), 120 + }, 121 + 122 + { 123 + .ce_id = __cpu_to_le16(9), 124 + .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET), 125 + }, 126 + 127 + { 128 + .ce_id = __cpu_to_le16(10), 129 + .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET), 130 + }, 131 + 132 + { 133 + .ce_id = __cpu_to_le16(11), 134 + .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET), 135 + }, 70 136 }; 71 137 72 138 static struct ce_attr host_ce_config_wlan[] = { ··· 239 173 .src_sz_max = 2048, 240 174 .dest_nentries = 512, 241 175 .recv_cb = ath10k_snoc_pktlog_rx_cb, 176 + }, 177 + }; 178 + 179 + static struct ce_pipe_config target_ce_config_wlan[] = { 180 + /* CE0: host->target HTC control and raw streams */ 181 + { 182 + .pipenum = __cpu_to_le32(0), 183 + .pipedir = __cpu_to_le32(PIPEDIR_OUT), 184 + .nentries = __cpu_to_le32(32), 185 + .nbytes_max = __cpu_to_le32(2048), 186 + .flags = __cpu_to_le32(CE_ATTR_FLAGS), 187 + .reserved = __cpu_to_le32(0), 188 + }, 189 + 190 + /* CE1: target->host HTT + HTC control */ 191 + { 192 + .pipenum = __cpu_to_le32(1), 193 + .pipedir = __cpu_to_le32(PIPEDIR_IN), 194 + .nentries = __cpu_to_le32(32), 195 + .nbytes_max = __cpu_to_le32(2048), 196 + .flags = __cpu_to_le32(CE_ATTR_FLAGS), 197 + .reserved = __cpu_to_le32(0), 198 + }, 199 + 200 + /* CE2: target->host WMI */ 201 + { 202 + .pipenum = __cpu_to_le32(2), 203 + .pipedir = __cpu_to_le32(PIPEDIR_IN), 204 + .nentries = __cpu_to_le32(64), 205 + .nbytes_max = __cpu_to_le32(2048), 206 + .flags = __cpu_to_le32(CE_ATTR_FLAGS), 207 + .reserved = __cpu_to_le32(0), 208 + }, 209 + 210 + /* CE3: host->target WMI */ 211 + { 212 + .pipenum = __cpu_to_le32(3), 213 + .pipedir = __cpu_to_le32(PIPEDIR_OUT), 214 + .nentries = __cpu_to_le32(32), 215 + .nbytes_max = __cpu_to_le32(2048), 216 + .flags = __cpu_to_le32(CE_ATTR_FLAGS), 217 + .reserved = __cpu_to_le32(0), 218 + }, 219 + 220 + /* CE4: host->target HTT */ 221 + { 222 + .pipenum = __cpu_to_le32(4), 223 + .pipedir = __cpu_to_le32(PIPEDIR_OUT), 224 + .nentries = __cpu_to_le32(256), 225 + .nbytes_max = __cpu_to_le32(256), 226 + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 227 + .reserved = __cpu_to_le32(0), 228 + }, 229 + 230 + /* CE5: target->host HTT (HIF->HTT) */ 231 + { 232 + .pipenum = __cpu_to_le32(5), 233 + .pipedir = __cpu_to_le32(PIPEDIR_OUT), 234 + .nentries = __cpu_to_le32(1024), 235 + .nbytes_max = __cpu_to_le32(64), 236 + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 237 + .reserved = __cpu_to_le32(0), 238 + }, 239 + 240 + /* CE6: Reserved for target autonomous hif_memcpy */ 241 + { 242 + .pipenum = __cpu_to_le32(6), 243 + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 244 + .nentries = __cpu_to_le32(32), 245 + .nbytes_max = __cpu_to_le32(16384), 246 + .flags = __cpu_to_le32(CE_ATTR_FLAGS), 247 + .reserved = __cpu_to_le32(0), 248 + }, 249 + 250 + /* CE7 used only by Host */ 251 + { 252 + .pipenum = __cpu_to_le32(7), 253 + .pipedir = __cpu_to_le32(4), 254 + .nentries = __cpu_to_le32(0), 255 + .nbytes_max = __cpu_to_le32(0), 256 + .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 257 + .reserved = __cpu_to_le32(0), 258 + }, 259 + 260 + /* CE8 Target to uMC */ 261 + { 262 + .pipenum = __cpu_to_le32(8), 263 + .pipedir = __cpu_to_le32(PIPEDIR_IN), 264 + .nentries = __cpu_to_le32(32), 265 + .nbytes_max = __cpu_to_le32(2048), 266 + .flags = __cpu_to_le32(0), 267 + .reserved = __cpu_to_le32(0), 268 + }, 269 + 270 + /* CE9 target->host HTT */ 271 + { 272 + .pipenum = __cpu_to_le32(9), 273 + .pipedir = __cpu_to_le32(PIPEDIR_IN), 274 + .nentries = __cpu_to_le32(32), 275 + .nbytes_max = __cpu_to_le32(2048), 276 + .flags = __cpu_to_le32(CE_ATTR_FLAGS), 277 + .reserved = __cpu_to_le32(0), 278 + }, 279 + 280 + /* CE10 target->host HTT */ 281 + { 282 + .pipenum = __cpu_to_le32(10), 283 + .pipedir = __cpu_to_le32(PIPEDIR_IN), 284 + .nentries = __cpu_to_le32(32), 285 + .nbytes_max = __cpu_to_le32(2048), 286 + .flags = __cpu_to_le32(CE_ATTR_FLAGS), 287 + .reserved = __cpu_to_le32(0), 288 + }, 289 + 290 + /* CE11 target autonomous qcache memcpy */ 291 + { 292 + .pipenum = __cpu_to_le32(11), 293 + .pipedir = __cpu_to_le32(PIPEDIR_IN), 294 + .nentries = __cpu_to_le32(32), 295 + .nbytes_max = __cpu_to_le32(2048), 296 + .flags = __cpu_to_le32(CE_ATTR_FLAGS), 297 + .reserved = __cpu_to_le32(0), 242 298 }, 243 299 }; 244 300 ··· 954 766 955 767 static int ath10k_snoc_wlan_enable(struct ath10k *ar) 956 768 { 957 - return 0; 769 + struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX]; 770 + struct ath10k_qmi_wlan_enable_cfg cfg; 771 + enum wlfw_driver_mode_enum_v01 mode; 772 + int pipe_num; 773 + 774 + for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) { 775 + tgt_cfg[pipe_num].pipe_num = 776 + target_ce_config_wlan[pipe_num].pipenum; 777 + tgt_cfg[pipe_num].pipe_dir = 778 + target_ce_config_wlan[pipe_num].pipedir; 779 + tgt_cfg[pipe_num].nentries = 780 + target_ce_config_wlan[pipe_num].nentries; 781 + tgt_cfg[pipe_num].nbytes_max = 782 + target_ce_config_wlan[pipe_num].nbytes_max; 783 + tgt_cfg[pipe_num].flags = 784 + target_ce_config_wlan[pipe_num].flags; 785 + tgt_cfg[pipe_num].reserved = 0; 786 + } 787 + 788 + cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) / 789 + sizeof(struct ath10k_tgt_pipe_cfg); 790 + cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *) 791 + &tgt_cfg; 792 + cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) / 793 + sizeof(struct ath10k_svc_pipe_cfg); 794 + cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *) 795 + &target_service_to_ce_map_wlan; 796 + cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) / 797 + sizeof(struct ath10k_shadow_reg_cfg); 798 + cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *) 799 + &target_shadow_reg_cfg_map; 800 + 801 + mode = QMI_WLFW_MISSION_V01; 802 + 803 + return ath10k_qmi_wlan_enable(ar, &cfg, mode, 804 + NULL); 958 805 } 959 806 960 807 static void ath10k_snoc_wlan_disable(struct ath10k *ar) 961 808 { 809 + ath10k_qmi_wlan_disable(ar); 962 810 } 963 811 964 812 static void ath10k_snoc_hif_power_down(struct ath10k *ar) ··· 1179 955 1180 956 out: 1181 957 return ret; 958 + } 959 + 960 + int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type) 961 + { 962 + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); 963 + struct ath10k_bus_params bus_params; 964 + int ret; 965 + 966 + switch (type) { 967 + case ATH10K_QMI_EVENT_FW_READY_IND: 968 + bus_params.dev_type = ATH10K_DEV_TYPE_LL; 969 + bus_params.chip_id = ar_snoc->target_info.soc_version; 970 + ret = ath10k_core_register(ar, &bus_params); 971 + if (ret) { 972 + ath10k_err(ar, "failed to register driver core: %d\n", 973 + ret); 974 + } 975 + break; 976 + case ATH10K_QMI_EVENT_FW_DOWN_IND: 977 + break; 978 + default: 979 + ath10k_err(ar, "invalid fw indication: %llx\n", type); 980 + return -EINVAL; 981 + } 982 + 983 + return 0; 1182 984 } 1183 985 1184 986 static int ath10k_snoc_setup_resource(struct ath10k *ar) ··· 1531 1281 struct ath10k_snoc *ar_snoc; 1532 1282 struct device *dev; 1533 1283 struct ath10k *ar; 1284 + u32 msa_size; 1534 1285 int ret; 1535 1286 u32 i; 1536 - struct ath10k_bus_params bus_params; 1537 1287 1538 1288 of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev); 1539 1289 if (!of_id) { ··· 1563 1313 ar_snoc->ar = ar; 1564 1314 ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops; 1565 1315 ar->ce_priv = &ar_snoc->ce; 1316 + msa_size = drv_data->msa_size; 1566 1317 1567 1318 ret = ath10k_snoc_resource_init(ar); 1568 1319 if (ret) { ··· 1602 1351 goto err_free_irq; 1603 1352 } 1604 1353 1605 - bus_params.dev_type = ATH10K_DEV_TYPE_LL; 1606 - bus_params.chip_id = drv_data->hw_rev; 1607 - ret = ath10k_core_register(ar, &bus_params); 1354 + ret = ath10k_qmi_init(ar, msa_size); 1608 1355 if (ret) { 1609 - ath10k_err(ar, "failed to register driver core: %d\n", ret); 1610 - goto err_hw_power_off; 1356 + ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret); 1357 + goto err_core_destroy; 1611 1358 } 1612 1359 1613 1360 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n"); 1614 1361 ath10k_warn(ar, "Warning: SNOC support is still work-in-progress, it will not work properly!"); 1615 1362 1616 1363 return 0; 1617 - 1618 - err_hw_power_off: 1619 - ath10k_hw_power_off(ar); 1620 1364 1621 1365 err_free_irq: 1622 1366 ath10k_snoc_free_irq(ar); ··· 1634 1388 ath10k_hw_power_off(ar); 1635 1389 ath10k_snoc_free_irq(ar); 1636 1390 ath10k_snoc_release_resource(ar); 1391 + ath10k_qmi_deinit(ar); 1637 1392 ath10k_core_destroy(ar); 1638 1393 1639 1394 return 0;
+4
drivers/net/wireless/ath/ath10k/snoc.h
··· 19 19 20 20 #include "hw.h" 21 21 #include "ce.h" 22 + #include "qmi.h" 22 23 23 24 struct ath10k_snoc_drv_priv { 24 25 enum ath10k_hw_rev hw_rev; 25 26 u64 dma_mask; 27 + u32 msa_size; 26 28 }; 27 29 28 30 struct snoc_state { ··· 83 81 struct timer_list rx_post_retry; 84 82 struct ath10k_wcn3990_vreg_info *vreg; 85 83 struct ath10k_wcn3990_clk_info *clk; 84 + struct ath10k_qmi *qmi; 86 85 }; 87 86 88 87 static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar) ··· 93 90 94 91 void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value); 95 92 u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset); 93 + int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type); 96 94 97 95 #endif /* _SNOC_H_ */
+21
drivers/net/wireless/ath/ath10k/wmi-ops.h
··· 210 210 u32 fw_feature_bitmap); 211 211 int (*get_vdev_subtype)(struct ath10k *ar, 212 212 enum wmi_vdev_subtype subtype); 213 + struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar, 214 + u32 vdev_id, 215 + struct wmi_pno_scan_req *pno_scan); 213 216 struct sk_buff *(*gen_pdev_bss_chan_info_req) 214 217 (struct ath10k *ar, 215 218 enum wmi_bss_survey_req_type type); ··· 1360 1357 return PTR_ERR(skb); 1361 1358 1362 1359 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid; 1360 + return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1361 + } 1362 + 1363 + static inline int 1364 + ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id, 1365 + struct wmi_pno_scan_req *pno_scan) 1366 + { 1367 + struct sk_buff *skb; 1368 + u32 cmd_id; 1369 + 1370 + if (!ar->wmi.ops->gen_wow_config_pno) 1371 + return -EOPNOTSUPP; 1372 + 1373 + skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan); 1374 + if (IS_ERR(skb)) 1375 + return PTR_ERR(skb); 1376 + 1377 + cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid; 1363 1378 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1364 1379 } 1365 1380
+187
drivers/net/wireless/ath/ath10k/wmi-tlv.c
··· 3441 3441 return skb; 3442 3442 } 3443 3443 3444 + /* Request FW to start PNO operation */ 3445 + static struct sk_buff * 3446 + ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar, 3447 + u32 vdev_id, 3448 + struct wmi_pno_scan_req *pno) 3449 + { 3450 + struct nlo_configured_parameters *nlo_list; 3451 + struct wmi_tlv_wow_nlo_config_cmd *cmd; 3452 + struct wmi_tlv *tlv; 3453 + struct sk_buff *skb; 3454 + __le32 *channel_list; 3455 + u16 tlv_len; 3456 + size_t len; 3457 + void *ptr; 3458 + u32 i; 3459 + 3460 + len = sizeof(*tlv) + sizeof(*cmd) + 3461 + sizeof(*tlv) + 3462 + /* TLV place holder for array of structures 3463 + * nlo_configured_parameters(nlo_list) 3464 + */ 3465 + sizeof(*tlv); 3466 + /* TLV place holder for array of uint32 channel_list */ 3467 + 3468 + len += sizeof(u32) * min_t(u8, pno->a_networks[0].channel_count, 3469 + WMI_NLO_MAX_CHAN); 3470 + len += sizeof(struct nlo_configured_parameters) * 3471 + min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS); 3472 + 3473 + skb = ath10k_wmi_alloc_skb(ar, len); 3474 + if (!skb) 3475 + return ERR_PTR(-ENOMEM); 3476 + 3477 + ptr = (void *)skb->data; 3478 + tlv = ptr; 3479 + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD); 3480 + tlv->len = __cpu_to_le16(sizeof(*cmd)); 3481 + cmd = (void *)tlv->value; 3482 + 3483 + /* wmi_tlv_wow_nlo_config_cmd parameters*/ 3484 + cmd->vdev_id = __cpu_to_le32(pno->vdev_id); 3485 + cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN); 3486 + 3487 + /* current FW does not support min-max range for dwell time */ 3488 + cmd->active_dwell_time = __cpu_to_le32(pno->active_max_time); 3489 + cmd->passive_dwell_time = __cpu_to_le32(pno->passive_max_time); 3490 + 3491 + if (pno->do_passive_scan) 3492 + cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE); 3493 + 3494 + /* copy scan interval */ 3495 + cmd->fast_scan_period = __cpu_to_le32(pno->fast_scan_period); 3496 + cmd->slow_scan_period = __cpu_to_le32(pno->slow_scan_period); 3497 + cmd->fast_scan_max_cycles = __cpu_to_le32(pno->fast_scan_max_cycles); 3498 + cmd->delay_start_time = __cpu_to_le32(pno->delay_start_time); 3499 + 3500 + if (pno->enable_pno_scan_randomization) { 3501 + cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | 3502 + WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ); 3503 + ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); 3504 + ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); 3505 + } 3506 + 3507 + ptr += sizeof(*tlv); 3508 + ptr += sizeof(*cmd); 3509 + 3510 + /* nlo_configured_parameters(nlo_list) */ 3511 + cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count, 3512 + WMI_NLO_MAX_SSIDS)); 3513 + tlv_len = __le32_to_cpu(cmd->no_of_ssids) * 3514 + sizeof(struct nlo_configured_parameters); 3515 + 3516 + tlv = ptr; 3517 + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 3518 + tlv->len = __cpu_to_le16(len); 3519 + 3520 + ptr += sizeof(*tlv); 3521 + nlo_list = ptr; 3522 + for (i = 0; i < __le32_to_cpu(cmd->no_of_ssids); i++) { 3523 + tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); 3524 + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 3525 + tlv->len = __cpu_to_le16(sizeof(struct nlo_configured_parameters) - 3526 + sizeof(*tlv)); 3527 + 3528 + /* copy ssid and it's length */ 3529 + nlo_list[i].ssid.valid = __cpu_to_le32(true); 3530 + nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len; 3531 + memcpy(nlo_list[i].ssid.ssid.ssid, 3532 + pno->a_networks[i].ssid.ssid, 3533 + __le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len)); 3534 + 3535 + /* copy rssi threshold */ 3536 + if (pno->a_networks[i].rssi_threshold && 3537 + pno->a_networks[i].rssi_threshold > -300) { 3538 + nlo_list[i].rssi_cond.valid = __cpu_to_le32(true); 3539 + nlo_list[i].rssi_cond.rssi = 3540 + __cpu_to_le32(pno->a_networks[i].rssi_threshold); 3541 + } 3542 + 3543 + nlo_list[i].bcast_nw_type.valid = __cpu_to_le32(true); 3544 + nlo_list[i].bcast_nw_type.bcast_nw_type = 3545 + __cpu_to_le32(pno->a_networks[i].bcast_nw_type); 3546 + } 3547 + 3548 + ptr += __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters); 3549 + 3550 + /* copy channel info */ 3551 + cmd->num_of_channels = __cpu_to_le32(min_t(u8, 3552 + pno->a_networks[0].channel_count, 3553 + WMI_NLO_MAX_CHAN)); 3554 + 3555 + tlv = ptr; 3556 + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); 3557 + tlv->len = __cpu_to_le16(__le32_to_cpu(cmd->num_of_channels) * 3558 + sizeof(u_int32_t)); 3559 + ptr += sizeof(*tlv); 3560 + 3561 + channel_list = (__le32 *)ptr; 3562 + for (i = 0; i < __le32_to_cpu(cmd->num_of_channels); i++) 3563 + channel_list[i] = __cpu_to_le32(pno->a_networks[0].channels[i]); 3564 + 3565 + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n", 3566 + vdev_id); 3567 + 3568 + return skb; 3569 + } 3570 + 3571 + /* Request FW to stop ongoing PNO operation */ 3572 + static struct sk_buff *ath10k_wmi_tlv_op_gen_config_pno_stop(struct ath10k *ar, 3573 + u32 vdev_id) 3574 + { 3575 + struct wmi_tlv_wow_nlo_config_cmd *cmd; 3576 + struct wmi_tlv *tlv; 3577 + struct sk_buff *skb; 3578 + void *ptr; 3579 + size_t len; 3580 + 3581 + len = sizeof(*tlv) + sizeof(*cmd) + 3582 + sizeof(*tlv) + 3583 + /* TLV place holder for array of structures 3584 + * nlo_configured_parameters(nlo_list) 3585 + */ 3586 + sizeof(*tlv); 3587 + /* TLV place holder for array of uint32 channel_list */ 3588 + skb = ath10k_wmi_alloc_skb(ar, len); 3589 + if (!skb) 3590 + return ERR_PTR(-ENOMEM); 3591 + 3592 + ptr = (void *)skb->data; 3593 + tlv = ptr; 3594 + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD); 3595 + tlv->len = __cpu_to_le16(sizeof(*cmd)); 3596 + cmd = (void *)tlv->value; 3597 + 3598 + cmd->vdev_id = __cpu_to_le32(vdev_id); 3599 + cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_STOP); 3600 + 3601 + ptr += sizeof(*tlv); 3602 + ptr += sizeof(*cmd); 3603 + 3604 + /* nlo_configured_parameters(nlo_list) */ 3605 + tlv = ptr; 3606 + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 3607 + tlv->len = __cpu_to_le16(0); 3608 + 3609 + ptr += sizeof(*tlv); 3610 + 3611 + /* channel list */ 3612 + tlv = ptr; 3613 + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); 3614 + tlv->len = __cpu_to_le16(0); 3615 + 3616 + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop pno config vdev_id %d\n", vdev_id); 3617 + return skb; 3618 + } 3619 + 3620 + static struct sk_buff * 3621 + ath10k_wmi_tlv_op_gen_config_pno(struct ath10k *ar, u32 vdev_id, 3622 + struct wmi_pno_scan_req *pno_scan) 3623 + { 3624 + if (pno_scan->enable) 3625 + return ath10k_wmi_tlv_op_gen_config_pno_start(ar, vdev_id, pno_scan); 3626 + else 3627 + return ath10k_wmi_tlv_op_gen_config_pno_stop(ar, vdev_id); 3628 + } 3629 + 3444 3630 static struct sk_buff * 3445 3631 ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable) 3446 3632 { ··· 4159 3973 .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind, 4160 3974 .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern, 4161 3975 .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern, 3976 + .gen_wow_config_pno = ath10k_wmi_tlv_op_gen_config_pno, 4162 3977 .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state, 4163 3978 .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update, 4164 3979 .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
+254
drivers/net/wireless/ath/ath10k/wmi-tlv.h
··· 2146 2146 2147 2147 void ath10k_wmi_tlv_attach(struct ath10k *ar); 2148 2148 2149 + enum wmi_nlo_auth_algorithm { 2150 + WMI_NLO_AUTH_ALGO_80211_OPEN = 1, 2151 + WMI_NLO_AUTH_ALGO_80211_SHARED_KEY = 2, 2152 + WMI_NLO_AUTH_ALGO_WPA = 3, 2153 + WMI_NLO_AUTH_ALGO_WPA_PSK = 4, 2154 + WMI_NLO_AUTH_ALGO_WPA_NONE = 5, 2155 + WMI_NLO_AUTH_ALGO_RSNA = 6, 2156 + WMI_NLO_AUTH_ALGO_RSNA_PSK = 7, 2157 + }; 2158 + 2159 + enum wmi_nlo_cipher_algorithm { 2160 + WMI_NLO_CIPHER_ALGO_NONE = 0x00, 2161 + WMI_NLO_CIPHER_ALGO_WEP40 = 0x01, 2162 + WMI_NLO_CIPHER_ALGO_TKIP = 0x02, 2163 + WMI_NLO_CIPHER_ALGO_CCMP = 0x04, 2164 + WMI_NLO_CIPHER_ALGO_WEP104 = 0x05, 2165 + WMI_NLO_CIPHER_ALGO_BIP = 0x06, 2166 + WMI_NLO_CIPHER_ALGO_RSN_USE_GROUP = 0x100, 2167 + WMI_NLO_CIPHER_ALGO_WEP = 0x101, 2168 + }; 2169 + 2170 + /* SSID broadcast type passed in NLO params */ 2171 + enum wmi_nlo_ssid_bcastnwtype { 2172 + WMI_NLO_BCAST_UNKNOWN = 0, 2173 + WMI_NLO_BCAST_NORMAL = 1, 2174 + WMI_NLO_BCAST_HIDDEN = 2, 2175 + }; 2176 + 2177 + #define WMI_NLO_MAX_SSIDS 16 2178 + #define WMI_NLO_MAX_CHAN 48 2179 + 2180 + #define WMI_NLO_CONFIG_STOP (0x1 << 0) 2181 + #define WMI_NLO_CONFIG_START (0x1 << 1) 2182 + #define WMI_NLO_CONFIG_RESET (0x1 << 2) 2183 + #define WMI_NLO_CONFIG_SLOW_SCAN (0x1 << 4) 2184 + #define WMI_NLO_CONFIG_FAST_SCAN (0x1 << 5) 2185 + #define WMI_NLO_CONFIG_SSID_HIDE_EN (0x1 << 6) 2186 + 2187 + /* This bit is used to indicate if EPNO or supplicant PNO is enabled. 2188 + * Only one of them can be enabled at a given time 2189 + */ 2190 + #define WMI_NLO_CONFIG_ENLO (0x1 << 7) 2191 + #define WMI_NLO_CONFIG_SCAN_PASSIVE (0x1 << 8) 2192 + #define WMI_NLO_CONFIG_ENLO_RESET (0x1 << 9) 2193 + #define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ (0x1 << 10) 2194 + #define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ (0x1 << 11) 2195 + #define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ (0x1 << 12) 2196 + #define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG (0x1 << 13) 2197 + 2198 + /* Whether directed scan needs to be performed (for hidden SSIDs) */ 2199 + #define WMI_ENLO_FLAG_DIRECTED_SCAN 1 2200 + 2201 + /* Whether PNO event shall be triggered if the network is found on A band */ 2202 + #define WMI_ENLO_FLAG_A_BAND 2 2203 + 2204 + /* Whether PNO event shall be triggered if the network is found on G band */ 2205 + #define WMI_ENLO_FLAG_G_BAND 4 2206 + 2207 + /* Whether strict matching is required (i.e. firmware shall not 2208 + * match on the entire SSID) 2209 + */ 2210 + #define WMI_ENLO_FLAG_STRICT_MATCH 8 2211 + 2212 + /* Code for matching the beacon AUTH IE - additional codes TBD */ 2213 + /* open */ 2214 + #define WMI_ENLO_AUTH_CODE_OPEN 1 2215 + 2216 + /* WPA_PSK or WPA2PSK */ 2217 + #define WMI_ENLO_AUTH_CODE_PSK 2 2218 + 2219 + /* any EAPOL */ 2220 + #define WMI_ENLO_AUTH_CODE_EAPOL 4 2221 + 2222 + struct wmi_nlo_ssid_param { 2223 + __le32 valid; 2224 + struct wmi_ssid ssid; 2225 + } __packed; 2226 + 2227 + struct wmi_nlo_enc_param { 2228 + __le32 valid; 2229 + __le32 enc_type; 2230 + } __packed; 2231 + 2232 + struct wmi_nlo_auth_param { 2233 + __le32 valid; 2234 + __le32 auth_type; 2235 + } __packed; 2236 + 2237 + struct wmi_nlo_bcast_nw_param { 2238 + __le32 valid; 2239 + 2240 + /* If WMI_NLO_CONFIG_EPNO is not set. Supplicant PNO is enabled. 2241 + * The value should be true/false. Otherwise EPNO is enabled. 2242 + * bcast_nw_type would be used as a bit flag contains WMI_ENLO_FLAG_XXX 2243 + */ 2244 + __le32 bcast_nw_type; 2245 + } __packed; 2246 + 2247 + struct wmi_nlo_rssi_param { 2248 + __le32 valid; 2249 + __le32 rssi; 2250 + } __packed; 2251 + 2252 + struct nlo_configured_parameters { 2253 + /* TLV tag and len;*/ 2254 + __le32 tlv_header; 2255 + struct wmi_nlo_ssid_param ssid; 2256 + struct wmi_nlo_enc_param enc_type; 2257 + struct wmi_nlo_auth_param auth_type; 2258 + struct wmi_nlo_rssi_param rssi_cond; 2259 + 2260 + /* indicates if the SSID is hidden or not */ 2261 + struct wmi_nlo_bcast_nw_param bcast_nw_type; 2262 + } __packed; 2263 + 2264 + /* Support channel prediction for PNO scan after scanning top_k_num channels 2265 + * if stationary_threshold is met. 2266 + */ 2267 + struct nlo_channel_prediction_cfg { 2268 + __le32 tlv_header; 2269 + 2270 + /* Enable or disable this feature. */ 2271 + __le32 enable; 2272 + 2273 + /* Top K channels will be scanned before deciding whether to further scan 2274 + * or stop. Minimum value is 3 and maximum is 5. 2275 + */ 2276 + __le32 top_k_num; 2277 + 2278 + /* Preconfigured stationary threshold. 2279 + * Lesser value means more conservative. Bigger value means more aggressive. 2280 + * Maximum is 100 and mininum is 0. 2281 + */ 2282 + __le32 stationary_threshold; 2283 + 2284 + /* Periodic full channel scan in milliseconds unit. 2285 + * After full_scan_period_ms since last full scan, channel prediction 2286 + * scan is suppressed and will do full scan. 2287 + * This is to help detecting sudden AP power-on or -off. Value 0 means no 2288 + * full scan at all (not recommended). 2289 + */ 2290 + __le32 full_scan_period_ms; 2291 + } __packed; 2292 + 2293 + struct enlo_candidate_score_params_t { 2294 + __le32 tlv_header; /* TLV tag and len; */ 2295 + 2296 + /* minimum 5GHz RSSI for a BSSID to be considered (units = dBm) */ 2297 + __le32 min_5ghz_rssi; 2298 + 2299 + /* minimum 2.4GHz RSSI for a BSSID to be considered (units = dBm) */ 2300 + __le32 min_24ghz_rssi; 2301 + 2302 + /* the maximum score that a network can have before bonuses */ 2303 + __le32 initial_score_max; 2304 + 2305 + /* current_connection_bonus: 2306 + * only report when there is a network's score this much higher 2307 + * than the current connection 2308 + */ 2309 + __le32 current_connection_bonus; 2310 + 2311 + /* score bonus for all networks with the same network flag */ 2312 + __le32 same_network_bonus; 2313 + 2314 + /* score bonus for networks that are not open */ 2315 + __le32 secure_bonus; 2316 + 2317 + /* 5GHz RSSI score bonus (applied to all 5GHz networks) */ 2318 + __le32 band_5ghz_bonus; 2319 + } __packed; 2320 + 2321 + struct connected_nlo_bss_band_rssi_pref_t { 2322 + __le32 tlv_header; /* TLV tag and len;*/ 2323 + 2324 + /* band which needs to get preference over other band 2325 + * - see wmi_set_vdev_ie_band enum 2326 + */ 2327 + __le32 band; 2328 + 2329 + /* Amount of RSSI preference (in dB) that can be given to a band */ 2330 + __le32 rssi_pref; 2331 + } __packed; 2332 + 2333 + struct connected_nlo_rssi_params_t { 2334 + __le32 tlv_header; /* TLV tag and len;*/ 2335 + 2336 + /* Relative rssi threshold (in dB) by which new BSS should have 2337 + * better rssi than the current connected BSS. 2338 + */ 2339 + __le32 relative_rssi; 2340 + 2341 + /* The amount of rssi preference (in dB) that can be given 2342 + * to a 5G BSS over 2.4G BSS. 2343 + */ 2344 + __le32 relative_rssi_5g_pref; 2345 + } __packed; 2346 + 2347 + struct wmi_tlv_wow_nlo_config_cmd { 2348 + __le32 flags; 2349 + __le32 vdev_id; 2350 + __le32 fast_scan_max_cycles; 2351 + __le32 active_dwell_time; 2352 + __le32 passive_dwell_time; /* PDT in msecs */ 2353 + __le32 probe_bundle_size; 2354 + 2355 + /* ART = IRT */ 2356 + __le32 rest_time; 2357 + 2358 + /* Max value that can be reached after SBM */ 2359 + __le32 max_rest_time; 2360 + 2361 + /* SBM */ 2362 + __le32 scan_backoff_multiplier; 2363 + 2364 + /* SCBM */ 2365 + __le32 fast_scan_period; 2366 + 2367 + /* specific to windows */ 2368 + __le32 slow_scan_period; 2369 + 2370 + __le32 no_of_ssids; 2371 + 2372 + __le32 num_of_channels; 2373 + 2374 + /* NLO scan start delay time in milliseconds */ 2375 + __le32 delay_start_time; 2376 + 2377 + /** MAC Address to use in Probe Req as SA **/ 2378 + struct wmi_mac_addr mac_addr; 2379 + 2380 + /** Mask on which MAC has to be randomized **/ 2381 + struct wmi_mac_addr mac_mask; 2382 + 2383 + /** IE bitmap to use in Probe Req **/ 2384 + __le32 ie_bitmap[8]; 2385 + 2386 + /** Number of vendor OUIs. In the TLV vendor_oui[] **/ 2387 + __le32 num_vendor_oui; 2388 + 2389 + /** Number of connected NLO band preferences **/ 2390 + __le32 num_cnlo_band_pref; 2391 + 2392 + /* The TLVs will follow. 2393 + * nlo_configured_parameters nlo_list[]; 2394 + * A_UINT32 channel_list[num_of_channels]; 2395 + * nlo_channel_prediction_cfg ch_prediction_cfg; 2396 + * enlo_candidate_score_params candidate_score_params; 2397 + * wmi_vendor_oui vendor_oui[num_vendor_oui]; 2398 + * connected_nlo_rssi_params cnlo_rssi_params; 2399 + * connected_nlo_bss_band_rssi_pref cnlo_bss_band_rssi_pref[num_cnlo_band_pref]; 2400 + */ 2401 + } __packed; 2402 + 2149 2403 struct wmi_tlv_mgmt_tx_cmd { 2150 2404 __le32 vdev_id; 2151 2405 __le32 desc_id;
+57
drivers/net/wireless/ath/ath10k/wmi.h
··· 7068 7068 __le32 cca_detect_margin; 7069 7069 } __packed; 7070 7070 7071 + #define WMI_PNO_MAX_SCHED_SCAN_PLANS 2 7072 + #define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200 7073 + #define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100 7074 + #define WMI_PNO_MAX_NETW_CHANNELS 26 7075 + #define WMI_PNO_MAX_NETW_CHANNELS_EX 60 7076 + #define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID 7077 + #define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN 7078 + 7079 + /*size based of dot11 declaration without extra IEs as we will not carry those for PNO*/ 7080 + #define WMI_PNO_MAX_PB_REQ_SIZE 450 7081 + 7082 + #define WMI_PNO_24G_DEFAULT_CH 1 7083 + #define WMI_PNO_5G_DEFAULT_CH 36 7084 + 7085 + #define WMI_ACTIVE_MAX_CHANNEL_TIME 40 7086 + #define WMI_PASSIVE_MAX_CHANNEL_TIME 110 7087 + 7088 + /* SSID broadcast type */ 7089 + enum wmi_SSID_bcast_type { 7090 + BCAST_UNKNOWN = 0, 7091 + BCAST_NORMAL = 1, 7092 + BCAST_HIDDEN = 2, 7093 + }; 7094 + 7095 + struct wmi_network_type { 7096 + struct wmi_ssid ssid; 7097 + u32 authentication; 7098 + u32 encryption; 7099 + u32 bcast_nw_type; 7100 + u8 channel_count; 7101 + u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX]; 7102 + s32 rssi_threshold; 7103 + } __packed; 7104 + 7105 + struct wmi_pno_scan_req { 7106 + u8 enable; 7107 + u8 vdev_id; 7108 + u8 uc_networks_count; 7109 + struct wmi_network_type a_networks[WMI_PNO_MAX_SUPP_NETWORKS]; 7110 + u32 fast_scan_period; 7111 + u32 slow_scan_period; 7112 + u8 fast_scan_max_cycles; 7113 + 7114 + bool do_passive_scan; 7115 + 7116 + u32 delay_start_time; 7117 + u32 active_min_time; 7118 + u32 active_max_time; 7119 + u32 passive_min_time; 7120 + u32 passive_max_time; 7121 + 7122 + /* mac address randomization attributes */ 7123 + u32 enable_pno_scan_randomization; 7124 + u8 mac_addr[ETH_ALEN]; 7125 + u8 mac_addr_mask[ETH_ALEN]; 7126 + } __packed; 7127 + 7071 7128 enum wmi_host_platform_type { 7072 7129 WMI_HOST_PLATFORM_HIGH_PERF, 7073 7130 WMI_HOST_PLATFORM_LOW_PERF,
+168
drivers/net/wireless/ath/ath10k/wow.c
··· 180 180 } 181 181 } 182 182 183 + static int ath10k_wmi_pno_check(struct ath10k *ar, u32 vdev_id, 184 + struct cfg80211_sched_scan_request *nd_config, 185 + struct wmi_pno_scan_req *pno) 186 + { 187 + int i, j, ret = 0; 188 + u8 ssid_len; 189 + 190 + pno->enable = 1; 191 + pno->vdev_id = vdev_id; 192 + pno->uc_networks_count = nd_config->n_match_sets; 193 + 194 + if (!pno->uc_networks_count || 195 + pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS) 196 + return -EINVAL; 197 + 198 + if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX) 199 + return -EINVAL; 200 + 201 + /* Filling per profile params */ 202 + for (i = 0; i < pno->uc_networks_count; i++) { 203 + ssid_len = nd_config->match_sets[i].ssid.ssid_len; 204 + 205 + if (ssid_len == 0 || ssid_len > 32) 206 + return -EINVAL; 207 + 208 + pno->a_networks[i].ssid.ssid_len = __cpu_to_le32(ssid_len); 209 + 210 + memcpy(pno->a_networks[i].ssid.ssid, 211 + nd_config->match_sets[i].ssid.ssid, 212 + nd_config->match_sets[i].ssid.ssid_len); 213 + pno->a_networks[i].authentication = 0; 214 + pno->a_networks[i].encryption = 0; 215 + pno->a_networks[i].bcast_nw_type = 0; 216 + 217 + /*Copying list of valid channel into request */ 218 + pno->a_networks[i].channel_count = nd_config->n_channels; 219 + pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold; 220 + 221 + for (j = 0; j < nd_config->n_channels; j++) { 222 + pno->a_networks[i].channels[j] = 223 + nd_config->channels[j]->center_freq; 224 + } 225 + } 226 + 227 + /* set scan to passive if no SSIDs are specified in the request */ 228 + if (nd_config->n_ssids == 0) 229 + pno->do_passive_scan = true; 230 + else 231 + pno->do_passive_scan = false; 232 + 233 + for (i = 0; i < nd_config->n_ssids; i++) { 234 + j = 0; 235 + while (j < pno->uc_networks_count) { 236 + if (__le32_to_cpu(pno->a_networks[j].ssid.ssid_len) == 237 + nd_config->ssids[i].ssid_len && 238 + (memcmp(pno->a_networks[j].ssid.ssid, 239 + nd_config->ssids[i].ssid, 240 + __le32_to_cpu(pno->a_networks[j].ssid.ssid_len)) == 0)) { 241 + pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN; 242 + break; 243 + } 244 + j++; 245 + } 246 + } 247 + 248 + if (nd_config->n_scan_plans == 2) { 249 + pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; 250 + pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations; 251 + pno->slow_scan_period = 252 + nd_config->scan_plans[1].interval * MSEC_PER_SEC; 253 + } else if (nd_config->n_scan_plans == 1) { 254 + pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; 255 + pno->fast_scan_max_cycles = 1; 256 + pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; 257 + } else { 258 + ath10k_warn(ar, "Invalid number of scan plans %d !!", 259 + nd_config->n_scan_plans); 260 + } 261 + 262 + if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { 263 + /* enable mac randomization */ 264 + pno->enable_pno_scan_randomization = 1; 265 + memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN); 266 + memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN); 267 + } 268 + 269 + pno->delay_start_time = nd_config->delay; 270 + 271 + /* Current FW does not support min-max range for dwell time */ 272 + pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME; 273 + pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME; 274 + return ret; 275 + } 276 + 183 277 static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif, 184 278 struct cfg80211_wowlan *wowlan) 185 279 { ··· 307 213 308 214 if (wowlan->magic_pkt) 309 215 __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask); 216 + 217 + if (wowlan->nd_config) { 218 + struct wmi_pno_scan_req *pno; 219 + int ret; 220 + 221 + pno = kzalloc(sizeof(*pno), GFP_KERNEL); 222 + if (!pno) 223 + return -ENOMEM; 224 + 225 + ar->nlo_enabled = true; 226 + 227 + ret = ath10k_wmi_pno_check(ar, arvif->vdev_id, 228 + wowlan->nd_config, pno); 229 + if (!ret) { 230 + ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno); 231 + __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask); 232 + } 233 + 234 + kfree(pno); 235 + } 310 236 break; 311 237 default: 312 238 break; ··· 405 291 ret = ath10k_vif_wow_set_wakeups(arvif, wowlan); 406 292 if (ret) { 407 293 ath10k_warn(ar, "failed to set wow wakeups on vdev %i: %d\n", 294 + arvif->vdev_id, ret); 295 + return ret; 296 + } 297 + } 298 + 299 + return 0; 300 + } 301 + 302 + static int ath10k_vif_wow_clean_nlo(struct ath10k_vif *arvif) 303 + { 304 + int ret = 0; 305 + struct ath10k *ar = arvif->ar; 306 + 307 + switch (arvif->vdev_type) { 308 + case WMI_VDEV_TYPE_STA: 309 + if (ar->nlo_enabled) { 310 + struct wmi_pno_scan_req *pno; 311 + 312 + pno = kzalloc(sizeof(*pno), GFP_KERNEL); 313 + if (!pno) 314 + return -ENOMEM; 315 + 316 + pno->enable = 0; 317 + ar->nlo_enabled = false; 318 + ret = ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno); 319 + kfree(pno); 320 + } 321 + break; 322 + default: 323 + break; 324 + } 325 + return ret; 326 + } 327 + 328 + static int ath10k_wow_nlo_cleanup(struct ath10k *ar) 329 + { 330 + struct ath10k_vif *arvif; 331 + int ret = 0; 332 + 333 + lockdep_assert_held(&ar->conf_mutex); 334 + 335 + list_for_each_entry(arvif, &ar->arvifs, list) { 336 + ret = ath10k_vif_wow_clean_nlo(arvif); 337 + if (ret) { 338 + ath10k_warn(ar, "failed to clean nlo settings on vdev %i: %d\n", 408 339 arvif->vdev_id, ret); 409 340 return ret; 410 341 } ··· 595 436 if (ret) 596 437 ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret); 597 438 439 + ret = ath10k_wow_nlo_cleanup(ar); 440 + if (ret) 441 + ath10k_warn(ar, "failed to cleanup nlo: %d\n", ret); 442 + 598 443 exit: 599 444 if (ret) { 600 445 switch (ar->state) { ··· 636 473 if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) { 637 474 ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE; 638 475 ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE; 476 + } 477 + 478 + if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) { 479 + ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT; 480 + ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; 639 481 } 640 482 641 483 ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+4 -4
drivers/net/wireless/ath/ath9k/antenna.c
··· 755 755 } 756 756 757 757 if (main_ant_conf == rx_ant_conf) { 758 - ANT_STAT_INC(ANT_MAIN, recv_cnt); 759 - ANT_LNA_INC(ANT_MAIN, rx_ant_conf); 758 + ANT_STAT_INC(sc, ANT_MAIN, recv_cnt); 759 + ANT_LNA_INC(sc, ANT_MAIN, rx_ant_conf); 760 760 } else { 761 - ANT_STAT_INC(ANT_ALT, recv_cnt); 762 - ANT_LNA_INC(ANT_ALT, rx_ant_conf); 761 + ANT_STAT_INC(sc, ANT_ALT, recv_cnt); 762 + ANT_LNA_INC(sc, ANT_ALT, rx_ant_conf); 763 763 } 764 764 765 765 /* Short scan check */
+4 -4
drivers/net/wireless/ath/ath9k/common-spectral.c
··· 624 624 tsf, freq, chan_type); 625 625 626 626 if (ret == 0) 627 - RX_STAT_INC(rx_spectral_sample_good); 627 + RX_STAT_INC(sc, rx_spectral_sample_good); 628 628 else 629 - RX_STAT_INC(rx_spectral_sample_err); 629 + RX_STAT_INC(sc, rx_spectral_sample_err); 630 630 631 631 memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN); 632 632 ··· 642 642 tsf, freq, chan_type); 643 643 644 644 if (ret == 0) 645 - RX_STAT_INC(rx_spectral_sample_good); 645 + RX_STAT_INC(sc, rx_spectral_sample_good); 646 646 else 647 - RX_STAT_INC(rx_spectral_sample_err); 647 + RX_STAT_INC(sc, rx_spectral_sample_err); 648 648 649 649 /* Mix the received bins to the /dev/random 650 650 * pool
+12 -12
drivers/net/wireless/ath/ath9k/debug.c
··· 785 785 { 786 786 int qnum = txq->axq_qnum; 787 787 788 - TX_STAT_INC(qnum, tx_pkts_all); 788 + TX_STAT_INC(sc, qnum, tx_pkts_all); 789 789 sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len; 790 790 791 791 if (bf_isampdu(bf)) { 792 792 if (flags & ATH_TX_ERROR) 793 - TX_STAT_INC(qnum, a_xretries); 793 + TX_STAT_INC(sc, qnum, a_xretries); 794 794 else 795 - TX_STAT_INC(qnum, a_completed); 795 + TX_STAT_INC(sc, qnum, a_completed); 796 796 } else { 797 797 if (ts->ts_status & ATH9K_TXERR_XRETRY) 798 - TX_STAT_INC(qnum, xretries); 798 + TX_STAT_INC(sc, qnum, xretries); 799 799 else 800 - TX_STAT_INC(qnum, completed); 800 + TX_STAT_INC(sc, qnum, completed); 801 801 } 802 802 803 803 if (ts->ts_status & ATH9K_TXERR_FILT) 804 - TX_STAT_INC(qnum, txerr_filtered); 804 + TX_STAT_INC(sc, qnum, txerr_filtered); 805 805 if (ts->ts_status & ATH9K_TXERR_FIFO) 806 - TX_STAT_INC(qnum, fifo_underrun); 806 + TX_STAT_INC(sc, qnum, fifo_underrun); 807 807 if (ts->ts_status & ATH9K_TXERR_XTXOP) 808 - TX_STAT_INC(qnum, xtxop); 808 + TX_STAT_INC(sc, qnum, xtxop); 809 809 if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED) 810 - TX_STAT_INC(qnum, timer_exp); 810 + TX_STAT_INC(sc, qnum, timer_exp); 811 811 if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR) 812 - TX_STAT_INC(qnum, desc_cfg_err); 812 + TX_STAT_INC(sc, qnum, desc_cfg_err); 813 813 if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN) 814 - TX_STAT_INC(qnum, data_underrun); 814 + TX_STAT_INC(sc, qnum, data_underrun); 815 815 if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN) 816 - TX_STAT_INC(qnum, delim_underrun); 816 + TX_STAT_INC(sc, qnum, delim_underrun); 817 817 } 818 818 819 819 void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
+10 -10
drivers/net/wireless/ath/ath9k/debug.h
··· 25 25 struct fft_sample_tlv; 26 26 27 27 #ifdef CONFIG_ATH9K_DEBUGFS 28 - #define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++ 29 - #define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++) 30 - #define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++ 31 - #define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++ 32 - #define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_recv_cnt[c]++; 28 + #define TX_STAT_INC(sc, q, c) do { (sc)->debug.stats.txstats[q].c++; } while (0) 29 + #define RX_STAT_INC(sc, c) do { (sc)->debug.stats.rxstats.c++; } while (0) 30 + #define RESET_STAT_INC(sc, type) do { (sc)->debug.stats.reset[type]++; } while (0) 31 + #define ANT_STAT_INC(sc, i, c) do { (sc)->debug.stats.ant_stats[i].c++; } while (0) 32 + #define ANT_LNA_INC(sc, i, c) do { (sc)->debug.stats.ant_stats[i].lna_recv_cnt[c]++; } while (0) 33 33 #else 34 - #define TX_STAT_INC(q, c) do { } while (0) 35 - #define RX_STAT_INC(c) 36 - #define RESET_STAT_INC(sc, type) do { } while (0) 37 - #define ANT_STAT_INC(i, c) do { } while (0) 38 - #define ANT_LNA_INC(i, c) do { } while (0) 34 + #define TX_STAT_INC(sc, q, c) do { (void)(sc); } while (0) 35 + #define RX_STAT_INC(sc, c) do { (void)(sc); } while (0) 36 + #define RESET_STAT_INC(sc, type) do { (void)(sc); } while (0) 37 + #define ANT_STAT_INC(sc, i, c) do { (void)(sc); } while (0) 38 + #define ANT_LNA_INC(sc, i, c) do { (void)(sc); } while (0) 39 39 #endif 40 40 41 41 enum ath_reset_type {
+1 -1
drivers/net/wireless/ath/ath9k/main.c
··· 809 809 810 810 if (ath_tx_start(hw, skb, &txctl) != 0) { 811 811 ath_dbg(common, XMIT, "TX failed\n"); 812 - TX_STAT_INC(txctl.txq->axq_qnum, txfailed); 812 + TX_STAT_INC(sc, txctl.txq->axq_qnum, txfailed); 813 813 goto exit; 814 814 } 815 815
+9 -9
drivers/net/wireless/ath/ath9k/recv.c
··· 829 829 * Discard zero-length packets and packets smaller than an ACK 830 830 */ 831 831 if (rx_stats->rs_datalen < 10) { 832 - RX_STAT_INC(rx_len_err); 832 + RX_STAT_INC(sc, rx_len_err); 833 833 goto corrupt; 834 834 } 835 835 ··· 839 839 * those frames. 840 840 */ 841 841 if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) { 842 - RX_STAT_INC(rx_len_err); 842 + RX_STAT_INC(sc, rx_len_err); 843 843 goto corrupt; 844 844 } 845 845 ··· 880 880 } else if (sc->spec_priv.spectral_mode != SPECTRAL_DISABLED && 881 881 ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats, 882 882 rx_status->mactime)) { 883 - RX_STAT_INC(rx_spectral); 883 + RX_STAT_INC(sc, rx_spectral); 884 884 } 885 885 return -EINVAL; 886 886 } ··· 898 898 spin_unlock_bh(&sc->chan_lock); 899 899 900 900 if (ath_is_mybeacon(common, hdr)) { 901 - RX_STAT_INC(rx_beacons); 901 + RX_STAT_INC(sc, rx_beacons); 902 902 rx_stats->is_mybeacon = true; 903 903 } 904 904 ··· 915 915 */ 916 916 ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 917 917 rx_stats->rs_rate); 918 - RX_STAT_INC(rx_rate_err); 918 + RX_STAT_INC(sc, rx_rate_err); 919 919 return -EINVAL; 920 920 } 921 921 ··· 1136 1136 * skb and put it at the tail of the sc->rx.rxbuf list for 1137 1137 * processing. */ 1138 1138 if (!requeue_skb) { 1139 - RX_STAT_INC(rx_oom_err); 1139 + RX_STAT_INC(sc, rx_oom_err); 1140 1140 goto requeue_drop_frag; 1141 1141 } 1142 1142 ··· 1164 1164 rxs, decrypt_error); 1165 1165 1166 1166 if (rs.rs_more) { 1167 - RX_STAT_INC(rx_frags); 1167 + RX_STAT_INC(sc, rx_frags); 1168 1168 /* 1169 1169 * rs_more indicates chained descriptors which can be 1170 1170 * used to link buffers together for a sort of ··· 1174 1174 /* too many fragments - cannot handle frame */ 1175 1175 dev_kfree_skb_any(sc->rx.frag); 1176 1176 dev_kfree_skb_any(skb); 1177 - RX_STAT_INC(rx_too_many_frags_err); 1177 + RX_STAT_INC(sc, rx_too_many_frags_err); 1178 1178 skb = NULL; 1179 1179 } 1180 1180 sc->rx.frag = skb; ··· 1186 1186 1187 1187 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 1188 1188 dev_kfree_skb(skb); 1189 - RX_STAT_INC(rx_oom_err); 1189 + RX_STAT_INC(sc, rx_oom_err); 1190 1190 goto requeue_drop_frag; 1191 1191 } 1192 1192
+9 -9
drivers/net/wireless/ath/ath9k/xmit.c
··· 391 391 struct ieee80211_hdr *hdr; 392 392 int prev = fi->retries; 393 393 394 - TX_STAT_INC(txq->axq_qnum, a_retries); 394 + TX_STAT_INC(sc, txq->axq_qnum, a_retries); 395 395 fi->retries += count; 396 396 397 397 if (prev > 0) ··· 1105 1105 al = get_frame_info(bf->bf_mpdu)->framelen; 1106 1106 bf->bf_state.bf_type = BUF_AMPDU; 1107 1107 } else { 1108 - TX_STAT_INC(txq->axq_qnum, a_aggr); 1108 + TX_STAT_INC(sc, txq->axq_qnum, a_aggr); 1109 1109 } 1110 1110 1111 1111 return al; ··· 1727 1727 bf_tail = bf; 1728 1728 nframes--; 1729 1729 sent++; 1730 - TX_STAT_INC(txq->axq_qnum, a_queued_hw); 1730 + TX_STAT_INC(sc, txq->axq_qnum, a_queued_hw); 1731 1731 1732 1732 if (an->sta && skb_queue_empty(&tid->retry_q)) 1733 1733 ieee80211_sta_set_buffered(an->sta, i, false); ··· 2110 2110 } 2111 2111 2112 2112 if (puttxbuf) { 2113 - TX_STAT_INC(txq->axq_qnum, puttxbuf); 2113 + TX_STAT_INC(sc, txq->axq_qnum, puttxbuf); 2114 2114 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 2115 2115 ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n", 2116 2116 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 2117 2117 } 2118 2118 2119 2119 if (!edma || sc->tx99_state) { 2120 - TX_STAT_INC(txq->axq_qnum, txstart); 2120 + TX_STAT_INC(sc, txq->axq_qnum, txstart); 2121 2121 ath9k_hw_txstart(ah, txq->axq_qnum); 2122 2122 } 2123 2123 ··· 2154 2154 bf->bf_lastbf = bf; 2155 2155 ath_tx_fill_desc(sc, bf, txq, fi->framelen); 2156 2156 ath_tx_txqaddbuf(sc, txq, &bf_head, false); 2157 - TX_STAT_INC(txq->axq_qnum, queued); 2157 + TX_STAT_INC(sc, txq->axq_qnum, queued); 2158 2158 } 2159 2159 2160 2160 static void setup_frame_info(struct ieee80211_hw *hw, ··· 2486 2486 ath_txq_lock(sc, txctl.txq); 2487 2487 ath_tx_fill_desc(sc, bf, txctl.txq, 0); 2488 2488 ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false); 2489 - TX_STAT_INC(txctl.txq->axq_qnum, queued); 2489 + TX_STAT_INC(sc, txctl.txq->axq_qnum, queued); 2490 2490 ath_txq_unlock(sc, txctl.txq); 2491 2491 } 2492 2492 ··· 2699 2699 if (status == -EINPROGRESS) 2700 2700 break; 2701 2701 2702 - TX_STAT_INC(txq->axq_qnum, txprocdesc); 2702 + TX_STAT_INC(sc, txq->axq_qnum, txprocdesc); 2703 2703 2704 2704 /* 2705 2705 * Remove ath_buf's of the same transmit unit from txq, ··· 2778 2778 2779 2779 ath_txq_lock(sc, txq); 2780 2780 2781 - TX_STAT_INC(txq->axq_qnum, txprocdesc); 2781 + TX_STAT_INC(sc, txq->axq_qnum, txprocdesc); 2782 2782 2783 2783 fifo_list = &txq->txq_fifo[txq->txq_tailidx]; 2784 2784 if (list_empty(fifo_list)) {
+8 -6
drivers/net/wireless/ath/wil6210/debugfs.c
··· 416 416 return 0; 417 417 } 418 418 419 - DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get, 420 - wil_debugfs_iomem_x32_set, "0x%08llx\n"); 419 + DEFINE_DEBUGFS_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get, 420 + wil_debugfs_iomem_x32_set, "0x%08llx\n"); 421 421 422 422 static struct dentry *wil_debugfs_create_iomem_x32(const char *name, 423 423 umode_t mode, ··· 432 432 data->wil = wil; 433 433 data->offset = value; 434 434 435 - file = debugfs_create_file(name, mode, parent, data, &fops_iomem_x32); 435 + file = debugfs_create_file_unsafe(name, mode, parent, data, 436 + &fops_iomem_x32); 436 437 if (!IS_ERR_OR_NULL(file)) 437 438 wil->dbg_data.iomem_data_count++; 438 439 ··· 452 451 return 0; 453 452 } 454 453 455 - DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get, 456 - wil_debugfs_ulong_set, "0x%llx\n"); 454 + DEFINE_DEBUGFS_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get, 455 + wil_debugfs_ulong_set, "0x%llx\n"); 457 456 458 457 static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode, 459 458 struct dentry *parent, 460 459 ulong *value) 461 460 { 462 - return debugfs_create_file(name, mode, parent, value, &wil_fops_ulong); 461 + return debugfs_create_file_unsafe(name, mode, parent, value, 462 + &wil_fops_ulong); 463 463 } 464 464 465 465 /**
+26
drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
··· 502 502 } 503 503 504 504 spin_lock_bh(&wl->lock); 505 + wl->wlc->vif = vif; 505 506 wl->mute_tx = false; 506 507 brcms_c_mute(wl->wlc, false); 507 508 if (vif->type == NL80211_IFTYPE_STATION) ··· 520 519 static void 521 520 brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 522 521 { 522 + struct brcms_info *wl = hw->priv; 523 + 524 + spin_lock_bh(&wl->lock); 525 + wl->wlc->vif = NULL; 526 + spin_unlock_bh(&wl->lock); 523 527 } 524 528 525 529 static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed) ··· 943 937 spin_unlock_bh(&wl->lock); 944 938 } 945 939 940 + static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw, 941 + struct ieee80211_sta *sta, bool set) 942 + { 943 + struct brcms_info *wl = hw->priv; 944 + struct sk_buff *beacon = NULL; 945 + u16 tim_offset = 0; 946 + 947 + spin_lock_bh(&wl->lock); 948 + if (wl->wlc->vif) 949 + beacon = ieee80211_beacon_get_tim(hw, wl->wlc->vif, 950 + &tim_offset, NULL); 951 + if (beacon) 952 + brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset, 953 + wl->wlc->vif->bss_conf.dtim_period); 954 + spin_unlock_bh(&wl->lock); 955 + 956 + return 0; 957 + } 958 + 946 959 static const struct ieee80211_ops brcms_ops = { 947 960 .tx = brcms_ops_tx, 948 961 .start = brcms_ops_start, ··· 980 955 .flush = brcms_ops_flush, 981 956 .get_tsf = brcms_ops_get_tsf, 982 957 .set_tsf = brcms_ops_set_tsf, 958 + .set_tim = brcms_ops_beacon_set_tim, 983 959 }; 984 960 985 961 void brcms_dpc(unsigned long data)
+1
drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
··· 563 563 564 564 struct wiphy *wiphy; 565 565 struct scb pri_scb; 566 + struct ieee80211_vif *vif; 566 567 567 568 struct sk_buff *beacon; 568 569 u16 beacon_tim_offset;
+2
drivers/net/wireless/intel/iwlegacy/4965.c
··· 1297 1297 const struct il_rxon_cmd *rxon1 = &il->staging; 1298 1298 const struct il_rxon_cmd *rxon2 = &il->active; 1299 1299 1300 + lockdep_assert_held(&il->mutex); 1301 + 1300 1302 if (rxon1->flags == rxon2->flags && 1301 1303 rxon1->filter_flags == rxon2->filter_flags && 1302 1304 rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
+20 -7
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
··· 1154 1154 } 1155 1155 IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf); 1156 1156 1157 - void iwl_fw_error_dump_wk(struct work_struct *work) 1157 + /* this function assumes dump_start was called beforehand and dump_end will be 1158 + * called afterwards 1159 + */ 1160 + void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt) 1158 1161 { 1159 - struct iwl_fw_runtime *fwrt = 1160 - container_of(work, struct iwl_fw_runtime, dump.wk.work); 1161 1162 struct iwl_fw_dbg_params params = {0}; 1162 1163 1163 - if (fwrt->ops && fwrt->ops->dump_start && 1164 - fwrt->ops->dump_start(fwrt->ops_ctx)) 1164 + if (!test_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status)) 1165 1165 return; 1166 1166 1167 1167 if (fwrt->ops && fwrt->ops->fw_running && ··· 1169 1169 IWL_ERR(fwrt, "Firmware not running - cannot dump error\n"); 1170 1170 iwl_fw_free_dump_desc(fwrt); 1171 1171 clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); 1172 - goto out; 1172 + return; 1173 1173 } 1174 1174 1175 1175 iwl_fw_dbg_stop_recording(fwrt, &params); ··· 1183 1183 udelay(500); 1184 1184 iwl_fw_dbg_restart_recording(fwrt, &params); 1185 1185 } 1186 - out: 1186 + } 1187 + IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_sync); 1188 + 1189 + void iwl_fw_error_dump_wk(struct work_struct *work) 1190 + { 1191 + struct iwl_fw_runtime *fwrt = 1192 + container_of(work, struct iwl_fw_runtime, dump.wk.work); 1193 + 1194 + if (fwrt->ops && fwrt->ops->dump_start && 1195 + fwrt->ops->dump_start(fwrt->ops_ctx)) 1196 + return; 1197 + 1198 + iwl_fw_dbg_collect_sync(fwrt); 1199 + 1187 1200 if (fwrt->ops && fwrt->ops->dump_end) 1188 1201 fwrt->ops->dump_end(fwrt->ops_ctx); 1189 1202 }
+1
drivers/net/wireless/intel/iwlwifi/fw/dbg.h
··· 367 367 #endif /* CONFIG_IWLWIFI_DEBUGFS */ 368 368 369 369 void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt); 370 + void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt); 370 371 #endif /* __iwl_fw_dbg_h__ */
+6 -24
drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
··· 30 30 #undef TRACE_SYSTEM 31 31 #define TRACE_SYSTEM iwlwifi_data 32 32 33 - TRACE_EVENT(iwlwifi_dev_tx_data, 34 - TP_PROTO(const struct device *dev, 35 - struct sk_buff *skb, u8 hdr_len), 36 - TP_ARGS(dev, skb, hdr_len), 33 + TRACE_EVENT(iwlwifi_dev_tx_tb, 34 + TP_PROTO(const struct device *dev, struct sk_buff *skb, 35 + u8 *data_src, size_t data_len), 36 + TP_ARGS(dev, skb, data_src, data_len), 37 37 TP_STRUCT__entry( 38 38 DEV_ENTRY 39 39 40 40 __dynamic_array(u8, data, 41 - iwl_trace_data(skb) ? skb->len - hdr_len : 0) 41 + iwl_trace_data(skb) ? data_len : 0) 42 42 ), 43 43 TP_fast_assign( 44 44 DEV_ASSIGN; 45 45 if (iwl_trace_data(skb)) 46 - skb_copy_bits(skb, hdr_len, 47 - __get_dynamic_array(data), 48 - skb->len - hdr_len); 49 - ), 50 - TP_printk("[%s] TX frame data", __get_str(dev)) 51 - ); 52 - 53 - TRACE_EVENT(iwlwifi_dev_tx_tso_chunk, 54 - TP_PROTO(const struct device *dev, 55 - u8 *data_src, size_t data_len), 56 - TP_ARGS(dev, data_src, data_len), 57 - TP_STRUCT__entry( 58 - DEV_ENTRY 59 - 60 - __dynamic_array(u8, data, data_len) 61 - ), 62 - TP_fast_assign( 63 - DEV_ASSIGN; 64 - memcpy(__get_dynamic_array(data), data_src, data_len); 46 + memcpy(__get_dynamic_array(data), data_src, data_len); 65 47 ), 66 48 TP_printk("[%s] TX frame data", __get_str(dev)) 67 49 );
+38 -26
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
··· 722 722 { 723 723 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {}; 724 724 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {}; 725 + bool unified = fw_has_capa(&mvm->fw->ucode_capa, 726 + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 725 727 struct wowlan_key_data key_data = { 726 - .configure_keys = !d0i3, 728 + .configure_keys = !d0i3 && !unified, 727 729 .use_rsc_tsc = false, 728 730 .tkip = &tkip_cmd, 729 731 .use_tkip = false, ··· 1638 1636 } 1639 1637 1640 1638 static struct iwl_wowlan_status * 1641 - iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1639 + iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm) 1642 1640 { 1643 - u32 base = mvm->error_event_table[0]; 1644 - struct error_table_start { 1645 - /* cf. struct iwl_error_event_table */ 1646 - u32 valid; 1647 - u32 error_id; 1648 - } err_info; 1649 1641 int ret; 1650 - 1651 - iwl_trans_read_mem_bytes(mvm->trans, base, 1652 - &err_info, sizeof(err_info)); 1653 - 1654 - if (err_info.valid) { 1655 - IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n", 1656 - err_info.valid, err_info.error_id); 1657 - if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { 1658 - struct cfg80211_wowlan_wakeup wakeup = { 1659 - .rfkill_release = true, 1660 - }; 1661 - ieee80211_report_wowlan_wakeup(vif, &wakeup, 1662 - GFP_KERNEL); 1663 - } 1664 - return ERR_PTR(-EIO); 1665 - } 1666 1642 1667 1643 /* only for tracing for now */ 1668 1644 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL); ··· 1660 1680 bool keep; 1661 1681 struct iwl_mvm_sta *mvm_ap_sta; 1662 1682 1663 - fw_status = iwl_mvm_get_wakeup_status(mvm, vif); 1683 + fw_status = iwl_mvm_get_wakeup_status(mvm); 1664 1684 if (IS_ERR_OR_NULL(fw_status)) 1665 1685 goto out_unlock; 1666 1686 ··· 1785 1805 u32 reasons = 0; 1786 1806 int i, j, n_matches, ret; 1787 1807 1788 - fw_status = iwl_mvm_get_wakeup_status(mvm, vif); 1808 + fw_status = iwl_mvm_get_wakeup_status(mvm); 1789 1809 if (!IS_ERR_OR_NULL(fw_status)) { 1790 1810 reasons = le32_to_cpu(fw_status->wakeup_reasons); 1791 1811 kfree(fw_status); ··· 1898 1918 ieee80211_resume_disconnect(vif); 1899 1919 } 1900 1920 1921 + static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm, 1922 + struct ieee80211_vif *vif) 1923 + { 1924 + u32 base = mvm->error_event_table[0]; 1925 + struct error_table_start { 1926 + /* cf. struct iwl_error_event_table */ 1927 + u32 valid; 1928 + u32 error_id; 1929 + } err_info; 1930 + 1931 + iwl_trans_read_mem_bytes(mvm->trans, base, 1932 + &err_info, sizeof(err_info)); 1933 + 1934 + if (err_info.valid && 1935 + err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { 1936 + struct cfg80211_wowlan_wakeup wakeup = { 1937 + .rfkill_release = true, 1938 + }; 1939 + ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL); 1940 + } 1941 + return err_info.valid; 1942 + } 1943 + 1901 1944 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) 1902 1945 { 1903 1946 struct ieee80211_vif *vif = NULL; ··· 1951 1948 iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); 1952 1949 /* query SRAM first in case we want event logging */ 1953 1950 iwl_mvm_read_d3_sram(mvm); 1951 + 1952 + if (iwl_mvm_check_rt_status(mvm, vif)) { 1953 + set_bit(STATUS_FW_ERROR, &mvm->trans->status); 1954 + iwl_mvm_dump_nic_error_log(mvm); 1955 + iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, 1956 + NULL, 0); 1957 + ret = 1; 1958 + goto err; 1959 + } 1954 1960 1955 1961 if (d0i3_first) { 1956 1962 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
+8 -1
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
··· 364 364 */ 365 365 366 366 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); 367 - mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1; 367 + /* 368 + * Set a 'fake' TID for the command queue, since we use the 369 + * hweight() of the tid_bitmap as a refcount now. Not that 370 + * we ever even consider the command queue as one we might 371 + * want to reuse, but be safe nevertheless. 372 + */ 373 + mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap = 374 + BIT(IWL_MAX_TID_COUNT + 2); 368 375 369 376 for (i = 0; i < IEEE80211_MAX_QUEUES; i++) 370 377 atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
+18 -36
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
··· 512 512 IWL_SCAN_TYPE_WILD, 513 513 IWL_SCAN_TYPE_MILD, 514 514 IWL_SCAN_TYPE_FRAGMENTED, 515 + IWL_SCAN_TYPE_FAST_BALANCE, 515 516 }; 516 517 517 518 enum iwl_mvm_sched_scan_pass_all_states { ··· 754 753 * This is a state in which a single queue serves more than one TID, all of 755 754 * which are not aggregated. Note that the queue is only associated to one 756 755 * RA. 757 - * @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it 758 - * This is a state of a queue that has had traffic on it, but during the 759 - * last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on 760 - * it. In this state, when a new queue is needed to be allocated but no 761 - * such free queue exists, an inactive queue might be freed and given to 762 - * the new RA/TID. 763 - * @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured 764 - * This is the state of a queue that has had traffic pass through it, but 765 - * needs to be reconfigured for some reason, e.g. the queue needs to 766 - * become unshared and aggregations re-enabled on. 767 756 */ 768 757 enum iwl_mvm_queue_status { 769 758 IWL_MVM_QUEUE_FREE, 770 759 IWL_MVM_QUEUE_RESERVED, 771 760 IWL_MVM_QUEUE_READY, 772 761 IWL_MVM_QUEUE_SHARED, 773 - IWL_MVM_QUEUE_INACTIVE, 774 - IWL_MVM_QUEUE_RECONFIGURING, 775 762 }; 776 763 777 764 #define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) ··· 774 785 775 786 struct iwl_mvm_geo_profile { 776 787 u8 values[ACPI_GEO_TABLE_SIZE]; 788 + }; 789 + 790 + struct iwl_mvm_dqa_txq_info { 791 + u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ 792 + bool reserved; /* Is this the TXQ reserved for a STA */ 793 + u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */ 794 + u8 txq_tid; /* The TID "owner" of this queue*/ 795 + u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ 796 + /* Timestamp for inactivation per TID of this queue */ 797 + unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1]; 798 + enum iwl_mvm_queue_status status; 777 799 }; 778 800 779 801 struct iwl_mvm { ··· 843 843 844 844 u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES]; 845 845 846 - struct { 847 - u8 hw_queue_refcount; 848 - u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ 849 - bool reserved; /* Is this the TXQ reserved for a STA */ 850 - u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */ 851 - u8 txq_tid; /* The TID "owner" of this queue*/ 852 - u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ 853 - /* Timestamp for inactivation per TID of this queue */ 854 - unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1]; 855 - enum iwl_mvm_queue_status status; 856 - } queue_info[IWL_MAX_HW_QUEUES]; 846 + struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES]; 857 847 spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ 858 848 struct work_struct add_stream_wk; /* To add streams to queues */ 859 849 ··· 1873 1883 mvmvif->low_latency &= ~cause; 1874 1884 } 1875 1885 1876 - /* hw scheduler queue config */ 1877 - bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, 1878 - u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, 1879 - unsigned int wdg_timeout); 1880 - int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, 1881 - u8 sta_id, u8 tid, unsigned int timeout); 1882 - 1883 - int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, 1884 - u8 tid, u8 flags); 1885 - int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); 1886 - 1887 1886 /* Return a bitmask with all the hw supported queues, except for the 1888 1887 * command queue, which can't be flushed. 1889 1888 */ ··· 1884 1905 1885 1906 static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) 1886 1907 { 1908 + lockdep_assert_held(&mvm->mutex); 1909 + /* calling this function without using dump_start/end since at this 1910 + * point we already hold the op mode mutex 1911 + */ 1912 + iwl_fw_dbg_collect_sync(&mvm->fwrt); 1887 1913 iwl_fw_cancel_timestamp(&mvm->fwrt); 1888 1914 iwl_free_fw_paging(&mvm->fwrt); 1889 1915 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); ··· 1973 1989 void iwl_mvm_reorder_timer_expired(struct timer_list *t); 1974 1990 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); 1975 1991 bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm); 1976 - 1977 - void iwl_mvm_inactivity_check(struct iwl_mvm *mvm); 1978 1992 1979 1993 #define MVM_TCM_PERIOD_MSEC 500 1980 1994 #define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
+19 -5
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
··· 1239 1239 !(info->flags & IEEE80211_TX_STAT_AMPDU)) 1240 1240 return; 1241 1241 1242 - rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate); 1242 + if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, 1243 + &tx_resp_rate)) { 1244 + WARN_ON_ONCE(1); 1245 + return; 1246 + } 1243 1247 1244 1248 #ifdef CONFIG_MAC80211_DEBUGFS 1245 1249 /* Disable last tx check if we are debugging with fixed rate but ··· 1294 1290 */ 1295 1291 table = &lq_sta->lq; 1296 1292 lq_hwrate = le32_to_cpu(table->rs_table[0]); 1297 - rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); 1293 + if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) { 1294 + WARN_ON_ONCE(1); 1295 + return; 1296 + } 1298 1297 1299 1298 /* Here we actually compare this rate to the latest LQ command */ 1300 1299 if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) { ··· 1399 1392 /* Collect data for each rate used during failed TX attempts */ 1400 1393 for (i = 0; i <= retries; ++i) { 1401 1394 lq_hwrate = le32_to_cpu(table->rs_table[i]); 1402 - rs_rate_from_ucode_rate(lq_hwrate, info->band, 1403 - &lq_rate); 1395 + if (rs_rate_from_ucode_rate(lq_hwrate, info->band, 1396 + &lq_rate)) { 1397 + WARN_ON_ONCE(1); 1398 + return; 1399 + } 1400 + 1404 1401 /* 1405 1402 * Only collect stats if retried rate is in the same RS 1406 1403 * table as active/search. ··· 3271 3260 for (i = 0; i < num_rates; i++) 3272 3261 lq_cmd->rs_table[i] = ucode_rate_le32; 3273 3262 3274 - rs_rate_from_ucode_rate(ucode_rate, band, &rate); 3263 + if (rs_rate_from_ucode_rate(ucode_rate, band, &rate)) { 3264 + WARN_ON_ONCE(1); 3265 + return; 3266 + } 3275 3267 3276 3268 if (is_mimo(&rate)) 3277 3269 lq_cmd->mimo_delim = num_rates - 1;
+85 -30
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
··· 110 110 .suspend_time = 95, 111 111 .max_out_time = 44, 112 112 }, 113 + [IWL_SCAN_TYPE_FAST_BALANCE] = { 114 + .suspend_time = 30, 115 + .max_out_time = 37, 116 + }, 113 117 }; 114 118 115 119 struct iwl_mvm_scan_params { ··· 239 235 return mvm->tcm.result.band_load[band]; 240 236 } 241 237 238 + struct iwl_is_dcm_with_go_iterator_data { 239 + struct ieee80211_vif *current_vif; 240 + bool is_dcm_with_p2p_go; 241 + }; 242 + 243 + static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac, 244 + struct ieee80211_vif *vif) 245 + { 246 + struct iwl_is_dcm_with_go_iterator_data *data = _data; 247 + struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif); 248 + struct iwl_mvm_vif *curr_mvmvif = 249 + iwl_mvm_vif_from_mac80211(data->current_vif); 250 + 251 + /* exclude the given vif */ 252 + if (vif == data->current_vif) 253 + return; 254 + 255 + if (vif->type == NL80211_IFTYPE_AP && vif->p2p && 256 + other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt && 257 + other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id) 258 + data->is_dcm_with_p2p_go = true; 259 + } 260 + 242 261 static enum 243 - iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device, 262 + iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, 263 + struct ieee80211_vif *vif, 244 264 enum iwl_mvm_traffic_load load, 245 265 bool low_latency) 246 266 { ··· 277 249 if (!global_cnt) 278 250 return IWL_SCAN_TYPE_UNASSOC; 279 251 280 - if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device && 281 - fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) 282 - return IWL_SCAN_TYPE_FRAGMENTED; 252 + if (fw_has_api(&mvm->fw->ucode_capa, 253 + IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { 254 + if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && 255 + (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE)) 256 + return IWL_SCAN_TYPE_FRAGMENTED; 257 + 258 + /* in case of DCM with GO where BSS DTIM interval < 220msec 259 + * set all scan requests as fast-balance scan 260 + * */ 261 + if (vif && vif->type == NL80211_IFTYPE_STATION && 262 + vif->bss_conf.dtim_period < 220) { 263 + struct iwl_is_dcm_with_go_iterator_data data = { 264 + .current_vif = vif, 265 + .is_dcm_with_p2p_go = false, 266 + }; 267 + 268 + ieee80211_iterate_active_interfaces_atomic(mvm->hw, 269 + IEEE80211_IFACE_ITER_NORMAL, 270 + iwl_mvm_is_dcm_with_go_iterator, 271 + &data); 272 + if (data.is_dcm_with_p2p_go) 273 + return IWL_SCAN_TYPE_FAST_BALANCE; 274 + } 275 + } 283 276 284 277 if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency) 285 278 return IWL_SCAN_TYPE_MILD; ··· 309 260 } 310 261 311 262 static enum 312 - iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device) 263 + iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, 264 + struct ieee80211_vif *vif) 313 265 { 314 266 enum iwl_mvm_traffic_load load; 315 267 bool low_latency; ··· 318 268 load = iwl_mvm_get_traffic_load(mvm); 319 269 low_latency = iwl_mvm_low_latency(mvm); 320 270 321 - return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency); 271 + return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency); 322 272 } 323 273 324 274 static enum 325 275 iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm, 326 - bool p2p_device, 276 + struct ieee80211_vif *vif, 327 277 enum nl80211_band band) 328 278 { 329 279 enum iwl_mvm_traffic_load load; ··· 332 282 load = iwl_mvm_get_traffic_load_band(mvm, band); 333 283 low_latency = iwl_mvm_low_latency_band(mvm, band); 334 284 335 - return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency); 285 + return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency); 336 286 } 337 287 338 288 static int ··· 910 860 params->scan_plans[0].iterations == 1; 911 861 } 912 862 863 + static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type) 864 + { 865 + return (type == IWL_SCAN_TYPE_FRAGMENTED || 866 + type == IWL_SCAN_TYPE_FAST_BALANCE); 867 + } 868 + 913 869 static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, 914 870 struct iwl_mvm_scan_params *params, 915 871 struct ieee80211_vif *vif) ··· 928 872 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) 929 873 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; 930 874 931 - if (params->type == IWL_SCAN_TYPE_FRAGMENTED) 875 + if (iwl_mvm_is_scan_fragmented(params->type)) 932 876 flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED; 933 877 934 878 if (iwl_mvm_rrm_scan_needed(mvm) && ··· 951 895 952 896 if (iwl_mvm_is_regular_scan(params) && 953 897 vif->type != NL80211_IFTYPE_P2P_DEVICE && 954 - params->type != IWL_SCAN_TYPE_FRAGMENTED) 898 + !iwl_mvm_is_scan_fragmented(params->type)) 955 899 flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL; 956 900 957 901 return flags; ··· 1100 1044 static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, 1101 1045 u32 flags, u8 channel_flags) 1102 1046 { 1103 - enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); 1047 + enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL); 1104 1048 struct iwl_scan_config_v1 *cfg = config; 1105 1049 1106 1050 cfg->flags = cpu_to_le32(flags); ··· 1133 1077 if (iwl_mvm_is_cdb_supported(mvm)) { 1134 1078 enum iwl_mvm_scan_type lb_type, hb_type; 1135 1079 1136 - lb_type = iwl_mvm_get_scan_type_band(mvm, false, 1080 + lb_type = iwl_mvm_get_scan_type_band(mvm, NULL, 1137 1081 NL80211_BAND_2GHZ); 1138 - hb_type = iwl_mvm_get_scan_type_band(mvm, false, 1082 + hb_type = iwl_mvm_get_scan_type_band(mvm, NULL, 1139 1083 NL80211_BAND_5GHZ); 1140 1084 1141 1085 cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = ··· 1149 1093 cpu_to_le32(scan_timing[hb_type].suspend_time); 1150 1094 } else { 1151 1095 enum iwl_mvm_scan_type type = 1152 - iwl_mvm_get_scan_type(mvm, false); 1096 + iwl_mvm_get_scan_type(mvm, NULL); 1153 1097 1154 1098 cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = 1155 1099 cpu_to_le32(scan_timing[type].max_out_time); ··· 1186 1130 return -ENOBUFS; 1187 1131 1188 1132 if (iwl_mvm_is_cdb_supported(mvm)) { 1189 - type = iwl_mvm_get_scan_type_band(mvm, false, 1133 + type = iwl_mvm_get_scan_type_band(mvm, NULL, 1190 1134 NL80211_BAND_2GHZ); 1191 - hb_type = iwl_mvm_get_scan_type_band(mvm, false, 1135 + hb_type = iwl_mvm_get_scan_type_band(mvm, NULL, 1192 1136 NL80211_BAND_5GHZ); 1193 1137 if (type == mvm->scan_type && hb_type == mvm->hb_scan_type) 1194 1138 return 0; 1195 1139 } else { 1196 - type = iwl_mvm_get_scan_type(mvm, false); 1140 + type = iwl_mvm_get_scan_type(mvm, NULL); 1197 1141 if (type == mvm->scan_type) 1198 1142 return 0; 1199 1143 } ··· 1218 1162 SCAN_CONFIG_FLAG_SET_MAC_ADDR | 1219 1163 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS | 1220 1164 SCAN_CONFIG_N_CHANNELS(num_channels) | 1221 - (type == IWL_SCAN_TYPE_FRAGMENTED ? 1165 + (iwl_mvm_is_scan_fragmented(type) ? 1222 1166 SCAN_CONFIG_FLAG_SET_FRAGMENTED : 1223 1167 SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED); 1224 1168 ··· 1233 1177 */ 1234 1178 if (iwl_mvm_cdb_scan_api(mvm)) { 1235 1179 if (iwl_mvm_is_cdb_supported(mvm)) 1236 - flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ? 1180 + flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ? 1237 1181 SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : 1238 1182 SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; 1239 1183 iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags); ··· 1394 1338 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) 1395 1339 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT; 1396 1340 1397 - if (params->type == IWL_SCAN_TYPE_FRAGMENTED) 1341 + if (iwl_mvm_is_scan_fragmented(params->type)) 1398 1342 flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED; 1399 1343 1400 1344 if (iwl_mvm_is_cdb_supported(mvm) && 1401 - params->hb_type == IWL_SCAN_TYPE_FRAGMENTED) 1345 + iwl_mvm_is_scan_fragmented(params->hb_type)) 1402 1346 flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED; 1403 1347 1404 1348 if (iwl_mvm_rrm_scan_needed(mvm) && ··· 1436 1380 */ 1437 1381 if (iwl_mvm_is_regular_scan(params) && 1438 1382 vif->type != NL80211_IFTYPE_P2P_DEVICE && 1439 - params->type != IWL_SCAN_TYPE_FRAGMENTED && 1383 + !iwl_mvm_is_scan_fragmented(params->type) && 1440 1384 !iwl_mvm_is_adaptive_dwell_supported(mvm) && 1441 1385 !iwl_mvm_is_oce_supported(mvm)) 1442 1386 flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL; ··· 1645 1589 1646 1590 static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm, 1647 1591 struct iwl_mvm_scan_params *params, 1648 - bool p2p) 1592 + struct ieee80211_vif *vif) 1649 1593 { 1650 1594 if (iwl_mvm_is_cdb_supported(mvm)) { 1651 1595 params->type = 1652 - iwl_mvm_get_scan_type_band(mvm, p2p, 1596 + iwl_mvm_get_scan_type_band(mvm, vif, 1653 1597 NL80211_BAND_2GHZ); 1654 1598 params->hb_type = 1655 - iwl_mvm_get_scan_type_band(mvm, p2p, 1599 + iwl_mvm_get_scan_type_band(mvm, vif, 1656 1600 NL80211_BAND_5GHZ); 1657 1601 } else { 1658 - params->type = iwl_mvm_get_scan_type(mvm, p2p); 1602 + params->type = iwl_mvm_get_scan_type(mvm, vif); 1659 1603 } 1660 1604 } 1605 + 1661 1606 int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1662 1607 struct cfg80211_scan_request *req, 1663 1608 struct ieee80211_scan_ies *ies) ··· 1706 1649 params.scan_plans = &scan_plan; 1707 1650 params.n_scan_plans = 1; 1708 1651 1709 - iwl_mvm_fill_scan_type(mvm, &params, 1710 - vif->type == NL80211_IFTYPE_P2P_DEVICE); 1652 + iwl_mvm_fill_scan_type(mvm, &params, vif); 1711 1653 1712 1654 ret = iwl_mvm_get_measurement_dwell(mvm, req, &params); 1713 1655 if (ret < 0) ··· 1801 1745 params.n_scan_plans = req->n_scan_plans; 1802 1746 params.scan_plans = req->scan_plans; 1803 1747 1804 - iwl_mvm_fill_scan_type(mvm, &params, 1805 - vif->type == NL80211_IFTYPE_P2P_DEVICE); 1748 + iwl_mvm_fill_scan_type(mvm, &params, vif); 1806 1749 1807 1750 /* In theory, LMAC scans can handle a 32-bit delay, but since 1808 1751 * waiting for over 18 hours to start the scan is a bit silly
+640 -269
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
··· 358 358 return ret; 359 359 } 360 360 361 + static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, 362 + int mac80211_queue, u8 tid, u8 flags) 363 + { 364 + struct iwl_scd_txq_cfg_cmd cmd = { 365 + .scd_queue = queue, 366 + .action = SCD_CFG_DISABLE_QUEUE, 367 + }; 368 + bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE; 369 + int ret; 370 + 371 + if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES)) 372 + return -EINVAL; 373 + 374 + if (iwl_mvm_has_new_tx_api(mvm)) { 375 + spin_lock_bh(&mvm->queue_info_lock); 376 + 377 + if (remove_mac_queue) 378 + mvm->hw_queue_to_mac80211[queue] &= 379 + ~BIT(mac80211_queue); 380 + 381 + spin_unlock_bh(&mvm->queue_info_lock); 382 + 383 + iwl_trans_txq_free(mvm->trans, queue); 384 + 385 + return 0; 386 + } 387 + 388 + spin_lock_bh(&mvm->queue_info_lock); 389 + 390 + if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) { 391 + spin_unlock_bh(&mvm->queue_info_lock); 392 + return 0; 393 + } 394 + 395 + mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); 396 + 397 + /* 398 + * If there is another TID with the same AC - don't remove the MAC queue 399 + * from the mapping 400 + */ 401 + if (tid < IWL_MAX_TID_COUNT) { 402 + unsigned long tid_bitmap = 403 + mvm->queue_info[queue].tid_bitmap; 404 + int ac = tid_to_mac80211_ac[tid]; 405 + int i; 406 + 407 + for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) { 408 + if (tid_to_mac80211_ac[i] == ac) 409 + remove_mac_queue = false; 410 + } 411 + } 412 + 413 + if (remove_mac_queue) 414 + mvm->hw_queue_to_mac80211[queue] &= 415 + ~BIT(mac80211_queue); 416 + 417 + cmd.action = mvm->queue_info[queue].tid_bitmap ? 418 + SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; 419 + if (cmd.action == SCD_CFG_DISABLE_QUEUE) 420 + mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; 421 + 422 + IWL_DEBUG_TX_QUEUES(mvm, 423 + "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n", 424 + queue, 425 + mvm->queue_info[queue].tid_bitmap, 426 + mvm->hw_queue_to_mac80211[queue]); 427 + 428 + /* If the queue is still enabled - nothing left to do in this func */ 429 + if (cmd.action == SCD_CFG_ENABLE_QUEUE) { 430 + spin_unlock_bh(&mvm->queue_info_lock); 431 + return 0; 432 + } 433 + 434 + cmd.sta_id = mvm->queue_info[queue].ra_sta_id; 435 + cmd.tid = mvm->queue_info[queue].txq_tid; 436 + 437 + /* Make sure queue info is correct even though we overwrite it */ 438 + WARN(mvm->queue_info[queue].tid_bitmap || 439 + mvm->hw_queue_to_mac80211[queue], 440 + "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n", 441 + queue, mvm->hw_queue_to_mac80211[queue], 442 + mvm->queue_info[queue].tid_bitmap); 443 + 444 + /* If we are here - the queue is freed and we can zero out these vals */ 445 + mvm->queue_info[queue].tid_bitmap = 0; 446 + mvm->hw_queue_to_mac80211[queue] = 0; 447 + 448 + /* Regardless if this is a reserved TXQ for a STA - mark it as false */ 449 + mvm->queue_info[queue].reserved = false; 450 + 451 + spin_unlock_bh(&mvm->queue_info_lock); 452 + 453 + iwl_trans_txq_disable(mvm->trans, queue, false); 454 + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, 455 + sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); 456 + 457 + if (ret) 458 + IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", 459 + queue, ret); 460 + return ret; 461 + } 462 + 361 463 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) 362 464 { 363 465 struct ieee80211_sta *sta; ··· 549 447 } 550 448 551 449 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, 552 - bool same_sta) 450 + u8 new_sta_id) 553 451 { 554 452 struct iwl_mvm_sta *mvmsta; 555 453 u8 txq_curr_ac, sta_id, tid; 556 454 unsigned long disable_agg_tids = 0; 455 + bool same_sta; 557 456 int ret; 558 457 559 458 lockdep_assert_held(&mvm->mutex); ··· 567 464 sta_id = mvm->queue_info[queue].ra_sta_id; 568 465 tid = mvm->queue_info[queue].txq_tid; 569 466 spin_unlock_bh(&mvm->queue_info_lock); 467 + 468 + same_sta = sta_id == new_sta_id; 570 469 571 470 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); 572 471 if (WARN_ON(!mvmsta)) ··· 584 479 mvmsta->vif->hw_queue[txq_curr_ac], 585 480 tid, 0); 586 481 if (ret) { 587 - /* Re-mark the inactive queue as inactive */ 588 - spin_lock_bh(&mvm->queue_info_lock); 589 - mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; 590 - spin_unlock_bh(&mvm->queue_info_lock); 591 482 IWL_ERR(mvm, 592 483 "Failed to free inactive queue %d (ret=%d)\n", 593 484 queue, ret); ··· 605 504 u8 ac_to_queue[IEEE80211_NUM_ACS]; 606 505 int i; 607 506 507 + /* 508 + * This protects us against grabbing a queue that's being reconfigured 509 + * by the inactivity checker. 510 + */ 511 + lockdep_assert_held(&mvm->mutex); 608 512 lockdep_assert_held(&mvm->queue_info_lock); 513 + 609 514 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 610 515 return -EINVAL; 611 516 ··· 622 515 /* Only DATA queues can be shared */ 623 516 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && 624 517 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) 625 - continue; 626 - 627 - /* Don't try and take queues being reconfigured */ 628 - if (mvm->queue_info[queue].status == 629 - IWL_MVM_QUEUE_RECONFIGURING) 630 518 continue; 631 519 632 520 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; ··· 664 562 return -ENOSPC; 665 563 } 666 564 667 - /* Make sure the queue isn't in the middle of being reconfigured */ 668 - if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) { 669 - IWL_ERR(mvm, 670 - "TXQ %d is in the middle of re-config - try again\n", 671 - queue); 672 - return -EBUSY; 673 - } 674 - 675 565 return queue; 676 566 } 677 567 ··· 673 579 * in such a case, otherwise - if no redirection required - it does nothing, 674 580 * unless the %force param is true. 675 581 */ 676 - int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, 677 - int ac, int ssn, unsigned int wdg_timeout, 678 - bool force) 582 + static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, 583 + int ac, int ssn, unsigned int wdg_timeout, 584 + bool force) 679 585 { 680 586 struct iwl_scd_txq_cfg_cmd cmd = { 681 587 .scd_queue = queue, ··· 710 616 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; 711 617 cmd.tid = mvm->queue_info[queue].txq_tid; 712 618 mq = mvm->hw_queue_to_mac80211[queue]; 713 - shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); 619 + shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; 714 620 spin_unlock_bh(&mvm->queue_info_lock); 715 621 716 622 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", ··· 768 674 return ret; 769 675 } 770 676 677 + static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, 678 + u8 minq, u8 maxq) 679 + { 680 + int i; 681 + 682 + lockdep_assert_held(&mvm->queue_info_lock); 683 + 684 + /* This should not be hit with new TX path */ 685 + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 686 + return -ENOSPC; 687 + 688 + /* Start by looking for a free queue */ 689 + for (i = minq; i <= maxq; i++) 690 + if (mvm->queue_info[i].tid_bitmap == 0 && 691 + mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) 692 + return i; 693 + 694 + return -ENOSPC; 695 + } 696 + 697 + static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, 698 + u8 sta_id, u8 tid, unsigned int timeout) 699 + { 700 + int queue, size = IWL_DEFAULT_QUEUE_SIZE; 701 + 702 + if (tid == IWL_MAX_TID_COUNT) { 703 + tid = IWL_MGMT_TID; 704 + size = IWL_MGMT_QUEUE_SIZE; 705 + } 706 + queue = iwl_trans_txq_alloc(mvm->trans, 707 + cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), 708 + sta_id, tid, SCD_QUEUE_CFG, size, timeout); 709 + 710 + if (queue < 0) { 711 + IWL_DEBUG_TX_QUEUES(mvm, 712 + "Failed allocating TXQ for sta %d tid %d, ret: %d\n", 713 + sta_id, tid, queue); 714 + return queue; 715 + } 716 + 717 + IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", 718 + queue, sta_id, tid); 719 + 720 + mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); 721 + IWL_DEBUG_TX_QUEUES(mvm, 722 + "Enabling TXQ #%d (mac80211 map:0x%x)\n", 723 + queue, mvm->hw_queue_to_mac80211[queue]); 724 + 725 + return queue; 726 + } 727 + 771 728 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, 772 729 struct ieee80211_sta *sta, u8 ac, 773 730 int tid) ··· 843 698 844 699 spin_lock_bh(&mvmsta->lock); 845 700 mvmsta->tid_data[tid].txq_id = queue; 846 - mvmsta->tid_data[tid].is_tid_active = true; 847 701 spin_unlock_bh(&mvmsta->lock); 848 702 849 703 return 0; 850 704 } 851 705 852 - static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, 853 - struct ieee80211_sta *sta, u8 ac, int tid, 854 - struct ieee80211_hdr *hdr) 706 + static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, 707 + int mac80211_queue, u8 sta_id, u8 tid) 855 708 { 856 - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 857 - struct iwl_trans_txq_scd_cfg cfg = { 858 - .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), 859 - .sta_id = mvmsta->sta_id, 860 - .tid = tid, 861 - .frame_limit = IWL_FRAME_LIMIT, 862 - }; 863 - unsigned int wdg_timeout = 864 - iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 865 - u8 mac_queue = mvmsta->vif->hw_queue[ac]; 866 - int queue = -1; 867 - bool using_inactive_queue = false, same_sta = false; 868 - unsigned long disable_agg_tids = 0; 869 - enum iwl_mvm_agg_state queue_state; 870 - bool shared_queue = false, inc_ssn; 871 - int ssn; 872 - unsigned long tfd_queue_mask; 873 - int ret; 874 - 875 - lockdep_assert_held(&mvm->mutex); 876 - 877 - if (iwl_mvm_has_new_tx_api(mvm)) 878 - return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); 879 - 880 - spin_lock_bh(&mvmsta->lock); 881 - tfd_queue_mask = mvmsta->tfd_queue_msk; 882 - spin_unlock_bh(&mvmsta->lock); 709 + bool enable_queue = true; 883 710 884 711 spin_lock_bh(&mvm->queue_info_lock); 885 712 886 - /* 887 - * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one 888 - * exists 889 - */ 890 - if (!ieee80211_is_data_qos(hdr->frame_control) || 891 - ieee80211_is_qos_nullfunc(hdr->frame_control)) { 892 - queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 893 - IWL_MVM_DQA_MIN_MGMT_QUEUE, 894 - IWL_MVM_DQA_MAX_MGMT_QUEUE); 895 - if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) 896 - IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", 897 - queue); 898 - 899 - /* If no such queue is found, we'll use a DATA queue instead */ 713 + /* Make sure this TID isn't already enabled */ 714 + if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { 715 + spin_unlock_bh(&mvm->queue_info_lock); 716 + IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", 717 + queue, tid); 718 + return false; 900 719 } 901 720 902 - if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && 903 - (mvm->queue_info[mvmsta->reserved_queue].status == 904 - IWL_MVM_QUEUE_RESERVED || 905 - mvm->queue_info[mvmsta->reserved_queue].status == 906 - IWL_MVM_QUEUE_INACTIVE)) { 907 - queue = mvmsta->reserved_queue; 908 - mvm->queue_info[queue].reserved = true; 909 - IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); 721 + /* Update mappings and refcounts */ 722 + if (mvm->queue_info[queue].tid_bitmap) 723 + enable_queue = false; 724 + 725 + if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) { 726 + WARN(mac80211_queue >= 727 + BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]), 728 + "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n", 729 + mac80211_queue, queue, sta_id, tid); 730 + mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); 910 731 } 911 732 912 - if (queue < 0) 913 - queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 914 - IWL_MVM_DQA_MIN_DATA_QUEUE, 915 - IWL_MVM_DQA_MAX_DATA_QUEUE); 733 + mvm->queue_info[queue].tid_bitmap |= BIT(tid); 734 + mvm->queue_info[queue].ra_sta_id = sta_id; 916 735 917 - /* 918 - * Check if this queue is already allocated but inactive. 919 - * In such a case, we'll need to first free this queue before enabling 920 - * it again, so we'll mark it as reserved to make sure no new traffic 921 - * arrives on it 922 - */ 923 - if (queue > 0 && 924 - mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { 925 - mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; 926 - using_inactive_queue = true; 927 - same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; 928 - IWL_DEBUG_TX_QUEUES(mvm, 929 - "Re-assigning TXQ %d: sta_id=%d, tid=%d\n", 930 - queue, mvmsta->sta_id, tid); 931 - } 736 + if (enable_queue) { 737 + if (tid != IWL_MAX_TID_COUNT) 738 + mvm->queue_info[queue].mac80211_ac = 739 + tid_to_mac80211_ac[tid]; 740 + else 741 + mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; 932 742 933 - /* No free queue - we'll have to share */ 934 - if (queue <= 0) { 935 - queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); 936 - if (queue > 0) { 937 - shared_queue = true; 938 - mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; 939 - } 940 - } 941 - 942 - /* 943 - * Mark TXQ as ready, even though it hasn't been fully configured yet, 944 - * to make sure no one else takes it. 945 - * This will allow avoiding re-acquiring the lock at the end of the 946 - * configuration. On error we'll mark it back as free. 947 - */ 948 - if ((queue > 0) && !shared_queue) 949 - mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 950 - 951 - spin_unlock_bh(&mvm->queue_info_lock); 952 - 953 - /* This shouldn't happen - out of queues */ 954 - if (WARN_ON(queue <= 0)) { 955 - IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", 956 - tid, cfg.sta_id); 957 - return queue; 958 - } 959 - 960 - /* 961 - * Actual en/disablement of aggregations is through the ADD_STA HCMD, 962 - * but for configuring the SCD to send A-MPDUs we need to mark the queue 963 - * as aggregatable. 964 - * Mark all DATA queues as allowing to be aggregated at some point 965 - */ 966 - cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || 967 - queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); 968 - 969 - /* 970 - * If this queue was previously inactive (idle) - we need to free it 971 - * first 972 - */ 973 - if (using_inactive_queue) { 974 - ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta); 975 - if (ret) 976 - return ret; 743 + mvm->queue_info[queue].txq_tid = tid; 977 744 } 978 745 979 746 IWL_DEBUG_TX_QUEUES(mvm, 980 - "Allocating %squeue #%d to sta %d on tid %d\n", 981 - shared_queue ? "shared " : "", queue, 982 - mvmsta->sta_id, tid); 747 + "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n", 748 + queue, mvm->queue_info[queue].tid_bitmap, 749 + mvm->hw_queue_to_mac80211[queue]); 983 750 984 - if (shared_queue) { 985 - /* Disable any open aggs on this queue */ 986 - disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); 751 + spin_unlock_bh(&mvm->queue_info_lock); 987 752 988 - if (disable_agg_tids) { 989 - IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", 990 - queue); 991 - iwl_mvm_invalidate_sta_queue(mvm, queue, 992 - disable_agg_tids, false); 993 - } 994 - } 995 - 996 - ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 997 - inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue, 998 - ssn, &cfg, wdg_timeout); 999 - if (inc_ssn) { 1000 - ssn = (ssn + 1) & IEEE80211_SCTL_SEQ; 1001 - le16_add_cpu(&hdr->seq_ctrl, 0x10); 1002 - } 1003 - 1004 - /* 1005 - * Mark queue as shared in transport if shared 1006 - * Note this has to be done after queue enablement because enablement 1007 - * can also set this value, and there is no indication there to shared 1008 - * queues 1009 - */ 1010 - if (shared_queue) 1011 - iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); 1012 - 1013 - spin_lock_bh(&mvmsta->lock); 1014 - /* 1015 - * This looks racy, but it is not. We have only one packet for 1016 - * this ra/tid in our Tx path since we stop the Qdisc when we 1017 - * need to allocate a new TFD queue. 1018 - */ 1019 - if (inc_ssn) 1020 - mvmsta->tid_data[tid].seq_number += 0x10; 1021 - mvmsta->tid_data[tid].txq_id = queue; 1022 - mvmsta->tid_data[tid].is_tid_active = true; 1023 - mvmsta->tfd_queue_msk |= BIT(queue); 1024 - queue_state = mvmsta->tid_data[tid].state; 1025 - 1026 - if (mvmsta->reserved_queue == queue) 1027 - mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; 1028 - spin_unlock_bh(&mvmsta->lock); 1029 - 1030 - if (!shared_queue) { 1031 - ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); 1032 - if (ret) 1033 - goto out_err; 1034 - 1035 - /* If we need to re-enable aggregations... */ 1036 - if (queue_state == IWL_AGG_ON) { 1037 - ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 1038 - if (ret) 1039 - goto out_err; 1040 - } 1041 - } else { 1042 - /* Redirect queue, if needed */ 1043 - ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn, 1044 - wdg_timeout, false); 1045 - if (ret) 1046 - goto out_err; 1047 - } 1048 - 1049 - return 0; 1050 - 1051 - out_err: 1052 - iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); 1053 - 1054 - return ret; 753 + return enable_queue; 1055 754 } 1056 755 1057 - static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue) 756 + static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, 757 + int mac80211_queue, u16 ssn, 758 + const struct iwl_trans_txq_scd_cfg *cfg, 759 + unsigned int wdg_timeout) 760 + { 761 + struct iwl_scd_txq_cfg_cmd cmd = { 762 + .scd_queue = queue, 763 + .action = SCD_CFG_ENABLE_QUEUE, 764 + .window = cfg->frame_limit, 765 + .sta_id = cfg->sta_id, 766 + .ssn = cpu_to_le16(ssn), 767 + .tx_fifo = cfg->fifo, 768 + .aggregate = cfg->aggregate, 769 + .tid = cfg->tid, 770 + }; 771 + bool inc_ssn; 772 + 773 + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 774 + return false; 775 + 776 + /* Send the enabling command if we need to */ 777 + if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, 778 + cfg->sta_id, cfg->tid)) 779 + return false; 780 + 781 + inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, 782 + NULL, wdg_timeout); 783 + if (inc_ssn) 784 + le16_add_cpu(&cmd.ssn, 1); 785 + 786 + WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), 787 + "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); 788 + 789 + return inc_ssn; 790 + } 791 + 792 + static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) 1058 793 { 1059 794 struct iwl_scd_txq_cfg_cmd cmd = { 1060 795 .scd_queue = queue, ··· 1057 1032 spin_unlock_bh(&mvm->queue_info_lock); 1058 1033 } 1059 1034 1035 + /* 1036 + * Remove inactive TIDs of a given queue. 1037 + * If all queue TIDs are inactive - mark the queue as inactive 1038 + * If only some the queue TIDs are inactive - unmap them from the queue 1039 + * 1040 + * Returns %true if all TIDs were removed and the queue could be reused. 1041 + */ 1042 + static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, 1043 + struct iwl_mvm_sta *mvmsta, int queue, 1044 + unsigned long tid_bitmap, 1045 + unsigned long *unshare_queues, 1046 + unsigned long *changetid_queues) 1047 + { 1048 + int tid; 1049 + 1050 + lockdep_assert_held(&mvmsta->lock); 1051 + lockdep_assert_held(&mvm->queue_info_lock); 1052 + 1053 + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1054 + return false; 1055 + 1056 + /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ 1057 + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1058 + /* If some TFDs are still queued - don't mark TID as inactive */ 1059 + if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) 1060 + tid_bitmap &= ~BIT(tid); 1061 + 1062 + /* Don't mark as inactive any TID that has an active BA */ 1063 + if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) 1064 + tid_bitmap &= ~BIT(tid); 1065 + } 1066 + 1067 + /* If all TIDs in the queue are inactive - return it can be reused */ 1068 + if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { 1069 + IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue); 1070 + return true; 1071 + } 1072 + 1073 + /* 1074 + * If we are here, this is a shared queue and not all TIDs timed-out. 1075 + * Remove the ones that did. 1076 + */ 1077 + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1078 + int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]; 1079 + u16 tid_bitmap; 1080 + 1081 + mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; 1082 + mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue); 1083 + mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); 1084 + 1085 + tid_bitmap = mvm->queue_info[queue].tid_bitmap; 1086 + 1087 + /* 1088 + * We need to take into account a situation in which a TXQ was 1089 + * allocated to TID x, and then turned shared by adding TIDs y 1090 + * and z. If TID x becomes inactive and is removed from the TXQ, 1091 + * ownership must be given to one of the remaining TIDs. 1092 + * This is mainly because if TID x continues - a new queue can't 1093 + * be allocated for it as long as it is an owner of another TXQ. 1094 + * 1095 + * Mark this queue in the right bitmap, we'll send the command 1096 + * to the firmware later. 1097 + */ 1098 + if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) 1099 + set_bit(queue, changetid_queues); 1100 + 1101 + IWL_DEBUG_TX_QUEUES(mvm, 1102 + "Removing inactive TID %d from shared Q:%d\n", 1103 + tid, queue); 1104 + } 1105 + 1106 + IWL_DEBUG_TX_QUEUES(mvm, 1107 + "TXQ #%d left with tid bitmap 0x%x\n", queue, 1108 + mvm->queue_info[queue].tid_bitmap); 1109 + 1110 + /* 1111 + * There may be different TIDs with the same mac queues, so make 1112 + * sure all TIDs have existing corresponding mac queues enabled 1113 + */ 1114 + tid_bitmap = mvm->queue_info[queue].tid_bitmap; 1115 + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1116 + mvm->hw_queue_to_mac80211[queue] |= 1117 + BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); 1118 + } 1119 + 1120 + /* If the queue is marked as shared - "unshare" it */ 1121 + if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 && 1122 + mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { 1123 + IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", 1124 + queue); 1125 + set_bit(queue, unshare_queues); 1126 + } 1127 + 1128 + return false; 1129 + } 1130 + 1131 + /* 1132 + * Check for inactivity - this includes checking if any queue 1133 + * can be unshared and finding one (and only one) that can be 1134 + * reused. 1135 + * This function is also invoked as a sort of clean-up task, 1136 + * in which case @alloc_for_sta is IWL_MVM_INVALID_STA. 1137 + * 1138 + * Returns the queue number, or -ENOSPC. 1139 + */ 1140 + static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) 1141 + { 1142 + unsigned long now = jiffies; 1143 + unsigned long unshare_queues = 0; 1144 + unsigned long changetid_queues = 0; 1145 + int i, ret, free_queue = -ENOSPC; 1146 + 1147 + lockdep_assert_held(&mvm->mutex); 1148 + 1149 + if (iwl_mvm_has_new_tx_api(mvm)) 1150 + return -ENOSPC; 1151 + 1152 + spin_lock_bh(&mvm->queue_info_lock); 1153 + 1154 + rcu_read_lock(); 1155 + 1156 + /* we skip the CMD queue below by starting at 1 */ 1157 + BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0); 1158 + 1159 + for (i = 1; i < IWL_MAX_HW_QUEUES; i++) { 1160 + struct ieee80211_sta *sta; 1161 + struct iwl_mvm_sta *mvmsta; 1162 + u8 sta_id; 1163 + int tid; 1164 + unsigned long inactive_tid_bitmap = 0; 1165 + unsigned long queue_tid_bitmap; 1166 + 1167 + queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; 1168 + if (!queue_tid_bitmap) 1169 + continue; 1170 + 1171 + /* If TXQ isn't in active use anyway - nothing to do here... */ 1172 + if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && 1173 + mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) 1174 + continue; 1175 + 1176 + /* Check to see if there are inactive TIDs on this queue */ 1177 + for_each_set_bit(tid, &queue_tid_bitmap, 1178 + IWL_MAX_TID_COUNT + 1) { 1179 + if (time_after(mvm->queue_info[i].last_frame_time[tid] + 1180 + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) 1181 + continue; 1182 + 1183 + inactive_tid_bitmap |= BIT(tid); 1184 + } 1185 + 1186 + /* If all TIDs are active - finish check on this queue */ 1187 + if (!inactive_tid_bitmap) 1188 + continue; 1189 + 1190 + /* 1191 + * If we are here - the queue hadn't been served recently and is 1192 + * in use 1193 + */ 1194 + 1195 + sta_id = mvm->queue_info[i].ra_sta_id; 1196 + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 1197 + 1198 + /* 1199 + * If the STA doesn't exist anymore, it isn't an error. It could 1200 + * be that it was removed since getting the queues, and in this 1201 + * case it should've inactivated its queues anyway. 1202 + */ 1203 + if (IS_ERR_OR_NULL(sta)) 1204 + continue; 1205 + 1206 + mvmsta = iwl_mvm_sta_from_mac80211(sta); 1207 + 1208 + /* this isn't so nice, but works OK due to the way we loop */ 1209 + spin_unlock(&mvm->queue_info_lock); 1210 + 1211 + /* and we need this locking order */ 1212 + spin_lock(&mvmsta->lock); 1213 + spin_lock(&mvm->queue_info_lock); 1214 + ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, 1215 + inactive_tid_bitmap, 1216 + &unshare_queues, 1217 + &changetid_queues); 1218 + if (ret >= 0 && free_queue < 0) 1219 + free_queue = ret; 1220 + /* only unlock sta lock - we still need the queue info lock */ 1221 + spin_unlock(&mvmsta->lock); 1222 + } 1223 + 1224 + rcu_read_unlock(); 1225 + spin_unlock_bh(&mvm->queue_info_lock); 1226 + 1227 + /* Reconfigure queues requiring reconfiguation */ 1228 + for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) 1229 + iwl_mvm_unshare_queue(mvm, i); 1230 + for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) 1231 + iwl_mvm_change_queue_tid(mvm, i); 1232 + 1233 + if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { 1234 + ret = iwl_mvm_free_inactive_queue(mvm, free_queue, 1235 + alloc_for_sta); 1236 + if (ret) 1237 + return ret; 1238 + } 1239 + 1240 + return free_queue; 1241 + } 1242 + 1243 + static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, 1244 + struct ieee80211_sta *sta, u8 ac, int tid, 1245 + struct ieee80211_hdr *hdr) 1246 + { 1247 + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1248 + struct iwl_trans_txq_scd_cfg cfg = { 1249 + .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), 1250 + .sta_id = mvmsta->sta_id, 1251 + .tid = tid, 1252 + .frame_limit = IWL_FRAME_LIMIT, 1253 + }; 1254 + unsigned int wdg_timeout = 1255 + iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 1256 + u8 mac_queue = mvmsta->vif->hw_queue[ac]; 1257 + int queue = -1; 1258 + unsigned long disable_agg_tids = 0; 1259 + enum iwl_mvm_agg_state queue_state; 1260 + bool shared_queue = false, inc_ssn; 1261 + int ssn; 1262 + unsigned long tfd_queue_mask; 1263 + int ret; 1264 + 1265 + lockdep_assert_held(&mvm->mutex); 1266 + 1267 + if (iwl_mvm_has_new_tx_api(mvm)) 1268 + return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); 1269 + 1270 + spin_lock_bh(&mvmsta->lock); 1271 + tfd_queue_mask = mvmsta->tfd_queue_msk; 1272 + spin_unlock_bh(&mvmsta->lock); 1273 + 1274 + spin_lock_bh(&mvm->queue_info_lock); 1275 + 1276 + /* 1277 + * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one 1278 + * exists 1279 + */ 1280 + if (!ieee80211_is_data_qos(hdr->frame_control) || 1281 + ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1282 + queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 1283 + IWL_MVM_DQA_MIN_MGMT_QUEUE, 1284 + IWL_MVM_DQA_MAX_MGMT_QUEUE); 1285 + if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) 1286 + IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", 1287 + queue); 1288 + 1289 + /* If no such queue is found, we'll use a DATA queue instead */ 1290 + } 1291 + 1292 + if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && 1293 + (mvm->queue_info[mvmsta->reserved_queue].status == 1294 + IWL_MVM_QUEUE_RESERVED)) { 1295 + queue = mvmsta->reserved_queue; 1296 + mvm->queue_info[queue].reserved = true; 1297 + IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); 1298 + } 1299 + 1300 + if (queue < 0) 1301 + queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 1302 + IWL_MVM_DQA_MIN_DATA_QUEUE, 1303 + IWL_MVM_DQA_MAX_DATA_QUEUE); 1304 + if (queue < 0) { 1305 + spin_unlock_bh(&mvm->queue_info_lock); 1306 + 1307 + /* try harder - perhaps kill an inactive queue */ 1308 + queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); 1309 + 1310 + spin_lock_bh(&mvm->queue_info_lock); 1311 + } 1312 + 1313 + /* No free queue - we'll have to share */ 1314 + if (queue <= 0) { 1315 + queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); 1316 + if (queue > 0) { 1317 + shared_queue = true; 1318 + mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; 1319 + } 1320 + } 1321 + 1322 + /* 1323 + * Mark TXQ as ready, even though it hasn't been fully configured yet, 1324 + * to make sure no one else takes it. 1325 + * This will allow avoiding re-acquiring the lock at the end of the 1326 + * configuration. On error we'll mark it back as free. 1327 + */ 1328 + if (queue > 0 && !shared_queue) 1329 + mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 1330 + 1331 + spin_unlock_bh(&mvm->queue_info_lock); 1332 + 1333 + /* This shouldn't happen - out of queues */ 1334 + if (WARN_ON(queue <= 0)) { 1335 + IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", 1336 + tid, cfg.sta_id); 1337 + return queue; 1338 + } 1339 + 1340 + /* 1341 + * Actual en/disablement of aggregations is through the ADD_STA HCMD, 1342 + * but for configuring the SCD to send A-MPDUs we need to mark the queue 1343 + * as aggregatable. 1344 + * Mark all DATA queues as allowing to be aggregated at some point 1345 + */ 1346 + cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || 1347 + queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); 1348 + 1349 + IWL_DEBUG_TX_QUEUES(mvm, 1350 + "Allocating %squeue #%d to sta %d on tid %d\n", 1351 + shared_queue ? "shared " : "", queue, 1352 + mvmsta->sta_id, tid); 1353 + 1354 + if (shared_queue) { 1355 + /* Disable any open aggs on this queue */ 1356 + disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); 1357 + 1358 + if (disable_agg_tids) { 1359 + IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", 1360 + queue); 1361 + iwl_mvm_invalidate_sta_queue(mvm, queue, 1362 + disable_agg_tids, false); 1363 + } 1364 + } 1365 + 1366 + ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 1367 + inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue, 1368 + ssn, &cfg, wdg_timeout); 1369 + if (inc_ssn) { 1370 + ssn = (ssn + 1) & IEEE80211_SCTL_SEQ; 1371 + le16_add_cpu(&hdr->seq_ctrl, 0x10); 1372 + } 1373 + 1374 + /* 1375 + * Mark queue as shared in transport if shared 1376 + * Note this has to be done after queue enablement because enablement 1377 + * can also set this value, and there is no indication there to shared 1378 + * queues 1379 + */ 1380 + if (shared_queue) 1381 + iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); 1382 + 1383 + spin_lock_bh(&mvmsta->lock); 1384 + /* 1385 + * This looks racy, but it is not. We have only one packet for 1386 + * this ra/tid in our Tx path since we stop the Qdisc when we 1387 + * need to allocate a new TFD queue. 1388 + */ 1389 + if (inc_ssn) 1390 + mvmsta->tid_data[tid].seq_number += 0x10; 1391 + mvmsta->tid_data[tid].txq_id = queue; 1392 + mvmsta->tfd_queue_msk |= BIT(queue); 1393 + queue_state = mvmsta->tid_data[tid].state; 1394 + 1395 + if (mvmsta->reserved_queue == queue) 1396 + mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; 1397 + spin_unlock_bh(&mvmsta->lock); 1398 + 1399 + if (!shared_queue) { 1400 + ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); 1401 + if (ret) 1402 + goto out_err; 1403 + 1404 + /* If we need to re-enable aggregations... */ 1405 + if (queue_state == IWL_AGG_ON) { 1406 + ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 1407 + if (ret) 1408 + goto out_err; 1409 + } 1410 + } else { 1411 + /* Redirect queue, if needed */ 1412 + ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn, 1413 + wdg_timeout, false); 1414 + if (ret) 1415 + goto out_err; 1416 + } 1417 + 1418 + return 0; 1419 + 1420 + out_err: 1421 + iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); 1422 + 1423 + return ret; 1424 + } 1425 + 1060 1426 static inline u8 iwl_mvm_tid_to_ac_queue(int tid) 1061 1427 { 1062 1428 if (tid == IWL_MAX_TID_COUNT) ··· 1516 1100 struct ieee80211_sta *sta; 1517 1101 struct iwl_mvm_sta *mvmsta; 1518 1102 unsigned long deferred_tid_traffic; 1519 - int queue, sta_id, tid; 1520 - 1521 - /* Check inactivity of queues */ 1522 - iwl_mvm_inactivity_check(mvm); 1103 + int sta_id, tid; 1523 1104 1524 1105 mutex_lock(&mvm->mutex); 1525 1106 1526 - /* No queue reconfiguration in TVQM mode */ 1527 - if (iwl_mvm_has_new_tx_api(mvm)) 1528 - goto alloc_queues; 1107 + iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); 1529 1108 1530 - /* Reconfigure queues requiring reconfiguation */ 1531 - for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) { 1532 - bool reconfig; 1533 - bool change_owner; 1534 - 1535 - spin_lock_bh(&mvm->queue_info_lock); 1536 - reconfig = (mvm->queue_info[queue].status == 1537 - IWL_MVM_QUEUE_RECONFIGURING); 1538 - 1539 - /* 1540 - * We need to take into account a situation in which a TXQ was 1541 - * allocated to TID x, and then turned shared by adding TIDs y 1542 - * and z. If TID x becomes inactive and is removed from the TXQ, 1543 - * ownership must be given to one of the remaining TIDs. 1544 - * This is mainly because if TID x continues - a new queue can't 1545 - * be allocated for it as long as it is an owner of another TXQ. 1546 - */ 1547 - change_owner = !(mvm->queue_info[queue].tid_bitmap & 1548 - BIT(mvm->queue_info[queue].txq_tid)) && 1549 - (mvm->queue_info[queue].status == 1550 - IWL_MVM_QUEUE_SHARED); 1551 - spin_unlock_bh(&mvm->queue_info_lock); 1552 - 1553 - if (reconfig) 1554 - iwl_mvm_unshare_queue(mvm, queue); 1555 - else if (change_owner) 1556 - iwl_mvm_change_queue_owner(mvm, queue); 1557 - } 1558 - 1559 - alloc_queues: 1560 1109 /* Go over all stations with deferred traffic */ 1561 1110 for_each_set_bit(sta_id, mvm->sta_deferred_frames, 1562 1111 IWL_MVM_STATION_COUNT) { ··· 1548 1167 { 1549 1168 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1550 1169 int queue; 1551 - bool using_inactive_queue = false, same_sta = false; 1552 1170 1553 1171 /* queue reserving is disabled on new TX path */ 1554 1172 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1555 1173 return 0; 1556 1174 1557 - /* 1558 - * Check for inactive queues, so we don't reach a situation where we 1559 - * can't add a STA due to a shortage in queues that doesn't really exist 1560 - */ 1561 - iwl_mvm_inactivity_check(mvm); 1175 + /* run the general cleanup/unsharing of queues */ 1176 + iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); 1562 1177 1563 1178 spin_lock_bh(&mvm->queue_info_lock); 1564 1179 1565 1180 /* Make sure we have free resources for this STA */ 1566 1181 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && 1567 - !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount && 1182 + !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap && 1568 1183 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == 1569 1184 IWL_MVM_QUEUE_FREE)) 1570 1185 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; ··· 1570 1193 IWL_MVM_DQA_MAX_DATA_QUEUE); 1571 1194 if (queue < 0) { 1572 1195 spin_unlock_bh(&mvm->queue_info_lock); 1573 - IWL_ERR(mvm, "No available queues for new station\n"); 1574 - return -ENOSPC; 1575 - } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { 1576 - /* 1577 - * If this queue is already allocated but inactive we'll need to 1578 - * first free this queue before enabling it again, we'll mark 1579 - * it as reserved to make sure no new traffic arrives on it 1580 - */ 1581 - using_inactive_queue = true; 1582 - same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id; 1196 + /* try again - this time kick out a queue if needed */ 1197 + queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); 1198 + if (queue < 0) { 1199 + IWL_ERR(mvm, "No available queues for new station\n"); 1200 + return -ENOSPC; 1201 + } 1202 + spin_lock_bh(&mvm->queue_info_lock); 1583 1203 } 1584 1204 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; 1585 1205 1586 1206 spin_unlock_bh(&mvm->queue_info_lock); 1587 1207 1588 1208 mvmsta->reserved_queue = queue; 1589 - 1590 - if (using_inactive_queue) 1591 - iwl_mvm_free_inactive_queue(mvm, queue, same_sta); 1592 1209 1593 1210 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", 1594 1211 queue, mvmsta->sta_id);
-8
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
··· 312 312 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that 313 313 * we are ready to finish the Tx AGG stop / start flow. 314 314 * @tx_time: medium time consumed by this A-MPDU 315 - * @is_tid_active: has this TID sent traffic in the last 316 - * %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this 317 - * field should be ignored. 318 315 * @tpt_meas_start: time of the throughput measurements start, is reset every HZ 319 316 * @tx_count_last: number of frames transmitted during the last second 320 317 * @tx_count: counts the number of frames transmitted since the last reset of ··· 329 332 u16 txq_id; 330 333 u16 ssn; 331 334 u16 tx_time; 332 - bool is_tid_active; 333 335 unsigned long tpt_meas_start; 334 336 u32 tx_count_last; 335 337 u32 tx_count; ··· 567 571 bool disable); 568 572 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 569 573 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); 570 - 571 - int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, 572 - int ac, int ssn, unsigned int wdg_timeout, 573 - bool force); 574 574 575 575 #endif /* __sta_h__ */
+9 -25
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
··· 1140 1140 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); 1141 1141 1142 1142 /* Check if TXQ needs to be allocated or re-activated */ 1143 - if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE || 1144 - !mvmsta->tid_data[tid].is_tid_active)) { 1145 - /* If TXQ needs to be allocated... */ 1146 - if (txq_id == IWL_MVM_INVALID_QUEUE) { 1147 - iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); 1143 + if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) { 1144 + iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); 1148 1145 1149 - /* 1150 - * The frame is now deferred, and the worker scheduled 1151 - * will re-allocate it, so we can free it for now. 1152 - */ 1153 - iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); 1154 - spin_unlock(&mvmsta->lock); 1155 - return 0; 1156 - } 1157 - 1158 - /* queue should always be active in new TX path */ 1159 - WARN_ON(iwl_mvm_has_new_tx_api(mvm)); 1160 - 1161 - /* If we are here - TXQ exists and needs to be re-activated */ 1162 - spin_lock(&mvm->queue_info_lock); 1163 - mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; 1164 - mvmsta->tid_data[tid].is_tid_active = true; 1165 - spin_unlock(&mvm->queue_info_lock); 1166 - 1167 - IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n", 1168 - txq_id); 1146 + /* 1147 + * The frame is now deferred, and the worker scheduled 1148 + * will re-allocate it, so we can free it for now. 1149 + */ 1150 + iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); 1151 + spin_unlock(&mvmsta->lock); 1152 + return 0; 1169 1153 } 1170 1154 1171 1155 if (!iwl_mvm_has_new_tx_api(mvm)) {
+1 -419
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
··· 599 599 iwl_mvm_dump_umac_error_log(mvm); 600 600 } 601 601 602 - int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq) 603 - { 604 - int i; 605 - 606 - lockdep_assert_held(&mvm->queue_info_lock); 607 - 608 - /* This should not be hit with new TX path */ 609 - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 610 - return -ENOSPC; 611 - 612 - /* Start by looking for a free queue */ 613 - for (i = minq; i <= maxq; i++) 614 - if (mvm->queue_info[i].hw_queue_refcount == 0 && 615 - mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) 616 - return i; 617 - 618 - /* 619 - * If no free queue found - settle for an inactive one to reconfigure 620 - * Make sure that the inactive queue either already belongs to this STA, 621 - * or that if it belongs to another one - it isn't the reserved queue 622 - */ 623 - for (i = minq; i <= maxq; i++) 624 - if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE && 625 - (sta_id == mvm->queue_info[i].ra_sta_id || 626 - !mvm->queue_info[i].reserved)) 627 - return i; 628 - 629 - return -ENOSPC; 630 - } 631 - 632 602 int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, 633 603 int tid, int frame_limit, u16 ssn) 634 604 { ··· 619 649 return -EINVAL; 620 650 621 651 spin_lock_bh(&mvm->queue_info_lock); 622 - if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0, 652 + if (WARN(mvm->queue_info[queue].tid_bitmap == 0, 623 653 "Trying to reconfig unallocated queue %d\n", queue)) { 624 654 spin_unlock_bh(&mvm->queue_info_lock); 625 655 return -ENXIO; ··· 632 662 WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n", 633 663 queue, fifo, ret); 634 664 635 - return ret; 636 - } 637 - 638 - static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, 639 - int mac80211_queue, u8 sta_id, u8 tid) 640 - { 641 - bool enable_queue = true; 642 - 643 - spin_lock_bh(&mvm->queue_info_lock); 644 - 645 - /* Make sure this TID isn't already enabled */ 646 - if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { 647 - spin_unlock_bh(&mvm->queue_info_lock); 648 - IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", 649 - queue, tid); 650 - return false; 651 - } 652 - 653 - /* Update mappings and refcounts */ 654 - if (mvm->queue_info[queue].hw_queue_refcount > 0) 655 - enable_queue = false; 656 - 657 - if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) { 658 - WARN(mac80211_queue >= 659 - BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]), 660 - "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n", 661 - mac80211_queue, queue, sta_id, tid); 662 - mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); 663 - } 664 - 665 - mvm->queue_info[queue].hw_queue_refcount++; 666 - mvm->queue_info[queue].tid_bitmap |= BIT(tid); 667 - mvm->queue_info[queue].ra_sta_id = sta_id; 668 - 669 - if (enable_queue) { 670 - if (tid != IWL_MAX_TID_COUNT) 671 - mvm->queue_info[queue].mac80211_ac = 672 - tid_to_mac80211_ac[tid]; 673 - else 674 - mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; 675 - 676 - mvm->queue_info[queue].txq_tid = tid; 677 - } 678 - 679 - IWL_DEBUG_TX_QUEUES(mvm, 680 - "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", 681 - queue, mvm->queue_info[queue].hw_queue_refcount, 682 - mvm->hw_queue_to_mac80211[queue]); 683 - 684 - spin_unlock_bh(&mvm->queue_info_lock); 685 - 686 - return enable_queue; 687 - } 688 - 689 - int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, 690 - u8 sta_id, u8 tid, unsigned int timeout) 691 - { 692 - int queue, size = IWL_DEFAULT_QUEUE_SIZE; 693 - 694 - if (tid == IWL_MAX_TID_COUNT) { 695 - tid = IWL_MGMT_TID; 696 - size = IWL_MGMT_QUEUE_SIZE; 697 - } 698 - queue = iwl_trans_txq_alloc(mvm->trans, 699 - cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), 700 - sta_id, tid, SCD_QUEUE_CFG, size, timeout); 701 - 702 - if (queue < 0) { 703 - IWL_DEBUG_TX_QUEUES(mvm, 704 - "Failed allocating TXQ for sta %d tid %d, ret: %d\n", 705 - sta_id, tid, queue); 706 - return queue; 707 - } 708 - 709 - IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", 710 - queue, sta_id, tid); 711 - 712 - mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); 713 - IWL_DEBUG_TX_QUEUES(mvm, 714 - "Enabling TXQ #%d (mac80211 map:0x%x)\n", 715 - queue, mvm->hw_queue_to_mac80211[queue]); 716 - 717 - return queue; 718 - } 719 - 720 - bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, 721 - u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, 722 - unsigned int wdg_timeout) 723 - { 724 - struct iwl_scd_txq_cfg_cmd cmd = { 725 - .scd_queue = queue, 726 - .action = SCD_CFG_ENABLE_QUEUE, 727 - .window = cfg->frame_limit, 728 - .sta_id = cfg->sta_id, 729 - .ssn = cpu_to_le16(ssn), 730 - .tx_fifo = cfg->fifo, 731 - .aggregate = cfg->aggregate, 732 - .tid = cfg->tid, 733 - }; 734 - bool inc_ssn; 735 - 736 - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 737 - return false; 738 - 739 - /* Send the enabling command if we need to */ 740 - if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, 741 - cfg->sta_id, cfg->tid)) 742 - return false; 743 - 744 - inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, 745 - NULL, wdg_timeout); 746 - if (inc_ssn) 747 - le16_add_cpu(&cmd.ssn, 1); 748 - 749 - WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), 750 - "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); 751 - 752 - return inc_ssn; 753 - } 754 - 755 - int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, 756 - u8 tid, u8 flags) 757 - { 758 - struct iwl_scd_txq_cfg_cmd cmd = { 759 - .scd_queue = queue, 760 - .action = SCD_CFG_DISABLE_QUEUE, 761 - }; 762 - bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE; 763 - int ret; 764 - 765 - if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES)) 766 - return -EINVAL; 767 - 768 - if (iwl_mvm_has_new_tx_api(mvm)) { 769 - spin_lock_bh(&mvm->queue_info_lock); 770 - 771 - if (remove_mac_queue) 772 - mvm->hw_queue_to_mac80211[queue] &= 773 - ~BIT(mac80211_queue); 774 - 775 - spin_unlock_bh(&mvm->queue_info_lock); 776 - 777 - iwl_trans_txq_free(mvm->trans, queue); 778 - 779 - return 0; 780 - } 781 - 782 - spin_lock_bh(&mvm->queue_info_lock); 783 - 784 - if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) { 785 - spin_unlock_bh(&mvm->queue_info_lock); 786 - return 0; 787 - } 788 - 789 - mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); 790 - 791 - /* 792 - * If there is another TID with the same AC - don't remove the MAC queue 793 - * from the mapping 794 - */ 795 - if (tid < IWL_MAX_TID_COUNT) { 796 - unsigned long tid_bitmap = 797 - mvm->queue_info[queue].tid_bitmap; 798 - int ac = tid_to_mac80211_ac[tid]; 799 - int i; 800 - 801 - for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) { 802 - if (tid_to_mac80211_ac[i] == ac) 803 - remove_mac_queue = false; 804 - } 805 - } 806 - 807 - if (remove_mac_queue) 808 - mvm->hw_queue_to_mac80211[queue] &= 809 - ~BIT(mac80211_queue); 810 - mvm->queue_info[queue].hw_queue_refcount--; 811 - 812 - cmd.action = mvm->queue_info[queue].hw_queue_refcount ? 813 - SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; 814 - if (cmd.action == SCD_CFG_DISABLE_QUEUE) 815 - mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; 816 - 817 - IWL_DEBUG_TX_QUEUES(mvm, 818 - "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", 819 - queue, 820 - mvm->queue_info[queue].hw_queue_refcount, 821 - mvm->hw_queue_to_mac80211[queue]); 822 - 823 - /* If the queue is still enabled - nothing left to do in this func */ 824 - if (cmd.action == SCD_CFG_ENABLE_QUEUE) { 825 - spin_unlock_bh(&mvm->queue_info_lock); 826 - return 0; 827 - } 828 - 829 - cmd.sta_id = mvm->queue_info[queue].ra_sta_id; 830 - cmd.tid = mvm->queue_info[queue].txq_tid; 831 - 832 - /* Make sure queue info is correct even though we overwrite it */ 833 - WARN(mvm->queue_info[queue].hw_queue_refcount || 834 - mvm->queue_info[queue].tid_bitmap || 835 - mvm->hw_queue_to_mac80211[queue], 836 - "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n", 837 - queue, mvm->queue_info[queue].hw_queue_refcount, 838 - mvm->hw_queue_to_mac80211[queue], 839 - mvm->queue_info[queue].tid_bitmap); 840 - 841 - /* If we are here - the queue is freed and we can zero out these vals */ 842 - mvm->queue_info[queue].hw_queue_refcount = 0; 843 - mvm->queue_info[queue].tid_bitmap = 0; 844 - mvm->hw_queue_to_mac80211[queue] = 0; 845 - 846 - /* Regardless if this is a reserved TXQ for a STA - mark it as false */ 847 - mvm->queue_info[queue].reserved = false; 848 - 849 - spin_unlock_bh(&mvm->queue_info_lock); 850 - 851 - iwl_trans_txq_disable(mvm->trans, queue, false); 852 - ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, 853 - sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); 854 - 855 - if (ret) 856 - IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", 857 - queue, ret); 858 665 return ret; 859 666 } 860 667 ··· 1000 1253 1001 1254 out: 1002 1255 ieee80211_connection_loss(vif); 1003 - } 1004 - 1005 - /* 1006 - * Remove inactive TIDs of a given queue. 1007 - * If all queue TIDs are inactive - mark the queue as inactive 1008 - * If only some the queue TIDs are inactive - unmap them from the queue 1009 - */ 1010 - static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, 1011 - struct iwl_mvm_sta *mvmsta, int queue, 1012 - unsigned long tid_bitmap) 1013 - { 1014 - int tid; 1015 - 1016 - lockdep_assert_held(&mvmsta->lock); 1017 - lockdep_assert_held(&mvm->queue_info_lock); 1018 - 1019 - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1020 - return; 1021 - 1022 - /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ 1023 - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1024 - /* If some TFDs are still queued - don't mark TID as inactive */ 1025 - if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) 1026 - tid_bitmap &= ~BIT(tid); 1027 - 1028 - /* Don't mark as inactive any TID that has an active BA */ 1029 - if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) 1030 - tid_bitmap &= ~BIT(tid); 1031 - } 1032 - 1033 - /* If all TIDs in the queue are inactive - mark queue as inactive. */ 1034 - if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { 1035 - mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; 1036 - 1037 - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) 1038 - mvmsta->tid_data[tid].is_tid_active = false; 1039 - 1040 - IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n", 1041 - queue); 1042 - return; 1043 - } 1044 - 1045 - /* 1046 - * If we are here, this is a shared queue and not all TIDs timed-out. 1047 - * Remove the ones that did. 1048 - */ 1049 - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1050 - int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]; 1051 - 1052 - mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; 1053 - mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue); 1054 - mvm->queue_info[queue].hw_queue_refcount--; 1055 - mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); 1056 - mvmsta->tid_data[tid].is_tid_active = false; 1057 - 1058 - IWL_DEBUG_TX_QUEUES(mvm, 1059 - "Removing inactive TID %d from shared Q:%d\n", 1060 - tid, queue); 1061 - } 1062 - 1063 - IWL_DEBUG_TX_QUEUES(mvm, 1064 - "TXQ #%d left with tid bitmap 0x%x\n", queue, 1065 - mvm->queue_info[queue].tid_bitmap); 1066 - 1067 - /* 1068 - * There may be different TIDs with the same mac queues, so make 1069 - * sure all TIDs have existing corresponding mac queues enabled 1070 - */ 1071 - tid_bitmap = mvm->queue_info[queue].tid_bitmap; 1072 - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1073 - mvm->hw_queue_to_mac80211[queue] |= 1074 - BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); 1075 - } 1076 - 1077 - /* If the queue is marked as shared - "unshare" it */ 1078 - if (mvm->queue_info[queue].hw_queue_refcount == 1 && 1079 - mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { 1080 - mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING; 1081 - IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", 1082 - queue); 1083 - } 1084 - } 1085 - 1086 - void iwl_mvm_inactivity_check(struct iwl_mvm *mvm) 1087 - { 1088 - unsigned long timeout_queues_map = 0; 1089 - unsigned long now = jiffies; 1090 - int i; 1091 - 1092 - if (iwl_mvm_has_new_tx_api(mvm)) 1093 - return; 1094 - 1095 - spin_lock_bh(&mvm->queue_info_lock); 1096 - for (i = 0; i < IWL_MAX_HW_QUEUES; i++) 1097 - if (mvm->queue_info[i].hw_queue_refcount > 0) 1098 - timeout_queues_map |= BIT(i); 1099 - spin_unlock_bh(&mvm->queue_info_lock); 1100 - 1101 - rcu_read_lock(); 1102 - 1103 - /* 1104 - * If a queue time outs - mark it as INACTIVE (don't remove right away 1105 - * if we don't have to.) This is an optimization in case traffic comes 1106 - * later, and we don't HAVE to use a currently-inactive queue 1107 - */ 1108 - for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) { 1109 - struct ieee80211_sta *sta; 1110 - struct iwl_mvm_sta *mvmsta; 1111 - u8 sta_id; 1112 - int tid; 1113 - unsigned long inactive_tid_bitmap = 0; 1114 - unsigned long queue_tid_bitmap; 1115 - 1116 - spin_lock_bh(&mvm->queue_info_lock); 1117 - queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; 1118 - 1119 - /* If TXQ isn't in active use anyway - nothing to do here... */ 1120 - if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && 1121 - mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) { 1122 - spin_unlock_bh(&mvm->queue_info_lock); 1123 - continue; 1124 - } 1125 - 1126 - /* Check to see if there are inactive TIDs on this queue */ 1127 - for_each_set_bit(tid, &queue_tid_bitmap, 1128 - IWL_MAX_TID_COUNT + 1) { 1129 - if (time_after(mvm->queue_info[i].last_frame_time[tid] + 1130 - IWL_MVM_DQA_QUEUE_TIMEOUT, now)) 1131 - continue; 1132 - 1133 - inactive_tid_bitmap |= BIT(tid); 1134 - } 1135 - spin_unlock_bh(&mvm->queue_info_lock); 1136 - 1137 - /* If all TIDs are active - finish check on this queue */ 1138 - if (!inactive_tid_bitmap) 1139 - continue; 1140 - 1141 - /* 1142 - * If we are here - the queue hadn't been served recently and is 1143 - * in use 1144 - */ 1145 - 1146 - sta_id = mvm->queue_info[i].ra_sta_id; 1147 - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 1148 - 1149 - /* 1150 - * If the STA doesn't exist anymore, it isn't an error. It could 1151 - * be that it was removed since getting the queues, and in this 1152 - * case it should've inactivated its queues anyway. 1153 - */ 1154 - if (IS_ERR_OR_NULL(sta)) 1155 - continue; 1156 - 1157 - mvmsta = iwl_mvm_sta_from_mac80211(sta); 1158 - 1159 - spin_lock_bh(&mvmsta->lock); 1160 - spin_lock(&mvm->queue_info_lock); 1161 - iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, 1162 - inactive_tid_bitmap); 1163 - spin_unlock(&mvm->queue_info_lock); 1164 - spin_unlock_bh(&mvmsta->lock); 1165 - } 1166 - 1167 - rcu_read_unlock(); 1168 1256 } 1169 1257 1170 1258 void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
+18 -10
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
··· 330 330 goto out_err; 331 331 } 332 332 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); 333 - trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len); 333 + trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len); 334 334 /* add this subframe's headers' length to the tx_cmd */ 335 335 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 336 336 ··· 347 347 goto out_err; 348 348 } 349 349 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); 350 - trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data, 351 - tb_len); 350 + trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, 351 + tb_len); 352 352 353 353 data_left -= tb_len; 354 354 tso_build_data(skb, &tso, tb_len); ··· 438 438 return -ENOMEM; 439 439 tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, 440 440 skb_frag_size(frag)); 441 + trace_iwlwifi_dev_tx_tb(trans->dev, skb, 442 + skb_frag_address(frag), 443 + skb_frag_size(frag)); 441 444 if (tb_idx < 0) 442 445 return tb_idx; 443 446 ··· 457 454 struct sk_buff *skb, 458 455 struct iwl_cmd_meta *out_meta, 459 456 int hdr_len, 460 - int tx_cmd_len) 457 + int tx_cmd_len, 458 + bool pad) 461 459 { 462 460 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); 463 461 struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); ··· 482 478 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - 483 479 IWL_FIRST_TB_SIZE; 484 480 485 - tb1_len = ALIGN(len, 4); 481 + if (pad) 482 + tb1_len = ALIGN(len, 4); 483 + else 484 + tb1_len = len; 486 485 487 486 /* map the data for TB1 */ 488 487 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; ··· 493 486 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 494 487 goto out_err; 495 488 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); 489 + trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, 490 + IWL_FIRST_TB_SIZE + tb1_len, hdr_len); 496 491 497 492 /* set up TFD's third entry to point to remainder of skb's head */ 498 493 tb2_len = skb_headlen(skb) - hdr_len; ··· 505 496 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 506 497 goto out_err; 507 498 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len); 499 + trace_iwlwifi_dev_tx_tb(trans->dev, skb, 500 + skb->data + hdr_len, 501 + tb2_len); 508 502 } 509 503 510 504 if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta)) 511 505 goto out_err; 512 - 513 - trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, 514 - IWL_FIRST_TB_SIZE + tb1_len, hdr_len); 515 - trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len); 516 506 517 507 return tfd; 518 508 ··· 559 551 out_meta, hdr_len, len); 560 552 561 553 return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, 562 - hdr_len, len); 554 + hdr_len, len, !amsdu); 563 555 } 564 556 565 557 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+17 -12
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
··· 1994 1994 head_tb_len, DMA_TO_DEVICE); 1995 1995 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 1996 1996 return -EINVAL; 1997 + trace_iwlwifi_dev_tx_tb(trans->dev, skb, 1998 + skb->data + hdr_len, 1999 + head_tb_len); 1997 2000 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); 1998 2001 } 1999 2002 ··· 2014 2011 2015 2012 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 2016 2013 return -EINVAL; 2014 + trace_iwlwifi_dev_tx_tb(trans->dev, skb, 2015 + skb_frag_address(frag), 2016 + skb_frag_size(frag)); 2017 2017 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 2018 2018 skb_frag_size(frag), false); 2019 2019 if (tb_idx < 0) ··· 2196 2190 } 2197 2191 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 2198 2192 hdr_tb_len, false); 2199 - trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, 2200 - hdr_tb_len); 2193 + trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 2194 + hdr_tb_len); 2201 2195 /* add this subframe's headers' length to the tx_cmd */ 2202 2196 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 2203 2197 ··· 2222 2216 2223 2217 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 2224 2218 size, false); 2225 - trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data, 2226 - size); 2219 + trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, 2220 + size); 2227 2221 2228 2222 data_left -= size; 2229 2223 tso_build_data(skb, &tso, size); ··· 2404 2398 goto out_err; 2405 2399 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 2406 2400 2401 + trace_iwlwifi_dev_tx(trans->dev, skb, 2402 + iwl_pcie_get_tfd(trans, txq, 2403 + txq->write_ptr), 2404 + trans_pcie->tfd_size, 2405 + &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 2406 + hdr_len); 2407 + 2407 2408 /* 2408 2409 * If gso_size wasn't set, don't give the frame "amsdu treatment" 2409 2410 * (adding subframes, etc.). ··· 2434 2421 out_meta))) 2435 2422 goto out_err; 2436 2423 } 2437 - 2438 - trace_iwlwifi_dev_tx(trans->dev, skb, 2439 - iwl_pcie_get_tfd(trans, txq, 2440 - txq->write_ptr), 2441 - trans_pcie->tfd_size, 2442 - &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 2443 - hdr_len); 2444 - trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len); 2445 2424 } 2446 2425 2447 2426 /* building the A-MSDU might have changed this data, so memcpy it now */
+2 -2
drivers/net/wireless/marvell/libertas/if_cs.c
··· 900 900 901 901 /* Make this card known to the libertas driver */ 902 902 priv = lbs_add_card(card, &p_dev->dev); 903 - if (!priv) { 904 - ret = -ENOMEM; 903 + if (IS_ERR(priv)) { 904 + ret = PTR_ERR(priv); 905 905 goto out2; 906 906 } 907 907
+2 -2
drivers/net/wireless/marvell/libertas/if_sdio.c
··· 1206 1206 1207 1207 1208 1208 priv = lbs_add_card(card, &func->dev); 1209 - if (!priv) { 1210 - ret = -ENOMEM; 1209 + if (IS_ERR(priv)) { 1210 + ret = PTR_ERR(priv); 1211 1211 goto free; 1212 1212 } 1213 1213
+2 -2
drivers/net/wireless/marvell/libertas/if_spi.c
··· 1146 1146 * This will call alloc_etherdev. 1147 1147 */ 1148 1148 priv = lbs_add_card(card, &spi->dev); 1149 - if (!priv) { 1150 - err = -ENOMEM; 1149 + if (IS_ERR(priv)) { 1150 + err = PTR_ERR(priv); 1151 1151 goto free_card; 1152 1152 } 1153 1153 card->priv = priv;
+4 -3
drivers/net/wireless/marvell/libertas/if_usb.c
··· 254 254 goto dealloc; 255 255 } 256 256 257 - if (!(priv = lbs_add_card(cardp, &intf->dev))) 257 + priv = lbs_add_card(cardp, &intf->dev); 258 + if (IS_ERR(priv)) { 259 + r = PTR_ERR(priv); 258 260 goto err_add_card; 261 + } 259 262 260 263 cardp->priv = priv; 261 264 ··· 458 455 skb->data + IPFIELD_ALIGN_OFFSET, 459 456 MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, 460 457 cardp); 461 - 462 - cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET; 463 458 464 459 lbs_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb); 465 460 if ((ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC))) {
+10 -7
drivers/net/wireless/marvell/libertas/main.c
··· 907 907 struct net_device *dev; 908 908 struct wireless_dev *wdev; 909 909 struct lbs_private *priv = NULL; 910 + int err; 910 911 911 912 /* Allocate an Ethernet device and register it */ 912 913 wdev = lbs_cfg_alloc(dmdev); 913 914 if (IS_ERR(wdev)) { 915 + err = PTR_ERR(wdev); 914 916 pr_err("cfg80211 init failed\n"); 915 - goto done; 917 + goto err_cfg; 916 918 } 917 919 918 920 wdev->iftype = NL80211_IFTYPE_STATION; 919 921 priv = wdev_priv(wdev); 920 922 priv->wdev = wdev; 921 923 922 - if (lbs_init_adapter(priv)) { 924 + err = lbs_init_adapter(priv); 925 + if (err) { 923 926 pr_err("failed to initialize adapter structure\n"); 924 927 goto err_wdev; 925 928 } 926 929 927 930 dev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, ether_setup); 928 931 if (!dev) { 932 + err = -ENOMEM; 929 933 dev_err(dmdev, "no memory for network device instance\n"); 930 934 goto err_adapter; 931 935 } ··· 953 949 init_waitqueue_head(&priv->waitq); 954 950 priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main"); 955 951 if (IS_ERR(priv->main_thread)) { 952 + err = PTR_ERR(priv->main_thread); 956 953 lbs_deb_thread("Error creating main thread.\n"); 957 954 goto err_ndev; 958 955 } ··· 966 961 priv->wol_gap = 20; 967 962 priv->ehs_remove_supported = true; 968 963 969 - goto done; 964 + return priv; 970 965 971 966 err_ndev: 972 967 free_netdev(dev); ··· 977 972 err_wdev: 978 973 lbs_cfg_free(priv); 979 974 980 - priv = NULL; 981 - 982 - done: 983 - return priv; 975 + err_cfg: 976 + return ERR_PTR(err); 984 977 } 985 978 EXPORT_SYMBOL_GPL(lbs_add_card); 986 979
+1
drivers/net/wireless/mediatek/mt76/mmio.c
··· 79 79 .copy = mt76_mmio_copy, 80 80 .wr_rp = mt76_mmio_wr_rp, 81 81 .rd_rp = mt76_mmio_rd_rp, 82 + .type = MT76_BUS_MMIO, 82 83 }; 83 84 84 85 dev->bus = &mt76_mmio_ops;
+9
drivers/net/wireless/mediatek/mt76/mt76.h
··· 38 38 u32 value; 39 39 }; 40 40 41 + enum mt76_bus_type { 42 + MT76_BUS_MMIO, 43 + MT76_BUS_USB, 44 + }; 45 + 41 46 struct mt76_bus_ops { 42 47 u32 (*rr)(struct mt76_dev *dev, u32 offset); 43 48 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); ··· 53 48 const struct mt76_reg_pair *rp, int len); 54 49 int (*rd_rp)(struct mt76_dev *dev, u32 base, 55 50 struct mt76_reg_pair *rp, int len); 51 + enum mt76_bus_type type; 56 52 }; 53 + 54 + #define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB) 55 + #define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO) 57 56 58 57 enum mt76_txq_id { 59 58 MT_TXQ_VO = IEEE80211_AC_VO,
-126
drivers/net/wireless/mediatek/mt76/mt76x0/dma.h
··· 1 - /* 2 - * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org> 3 - * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> 4 - * 5 - * This program is free software; you can redistribute it and/or modify 6 - * it under the terms of the GNU General Public License version 2 7 - * as published by the Free Software Foundation 8 - * 9 - * This program is distributed in the hope that it will be useful, 10 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 - * GNU General Public License for more details. 13 - */ 14 - 15 - #ifndef __MT76X0U_DMA_H 16 - #define __MT76X0U_DMA_H 17 - 18 - #include <asm/unaligned.h> 19 - #include <linux/skbuff.h> 20 - 21 - #define MT_DMA_HDR_LEN 4 22 - #define MT_RX_INFO_LEN 4 23 - #define MT_FCE_INFO_LEN 4 24 - #define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN) 25 - 26 - /* Common Tx DMA descriptor fields */ 27 - #define MT_TXD_INFO_LEN GENMASK(15, 0) 28 - #define MT_TXD_INFO_D_PORT GENMASK(29, 27) 29 - #define MT_TXD_INFO_TYPE GENMASK(31, 30) 30 - 31 - /* Tx DMA MCU command specific flags */ 32 - #define MT_TXD_CMD_SEQ GENMASK(19, 16) 33 - #define MT_TXD_CMD_TYPE GENMASK(26, 20) 34 - 35 - enum mt76_msg_port { 36 - WLAN_PORT, 37 - CPU_RX_PORT, 38 - CPU_TX_PORT, 39 - HOST_PORT, 40 - VIRTUAL_CPU_RX_PORT, 41 - VIRTUAL_CPU_TX_PORT, 42 - DISCARD, 43 - }; 44 - 45 - enum mt76_info_type { 46 - DMA_PACKET, 47 - DMA_COMMAND, 48 - }; 49 - 50 - /* Tx DMA packet specific flags */ 51 - #define MT_TXD_PKT_INFO_NEXT_VLD BIT(16) 52 - #define MT_TXD_PKT_INFO_TX_BURST BIT(17) 53 - #define MT_TXD_PKT_INFO_80211 BIT(19) 54 - #define MT_TXD_PKT_INFO_TSO BIT(20) 55 - #define MT_TXD_PKT_INFO_CSO BIT(21) 56 - #define MT_TXD_PKT_INFO_WIV BIT(24) 57 - #define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25) 58 - 59 - enum mt76_qsel { 60 - MT_QSEL_MGMT, 61 - MT_QSEL_HCCA, 62 - MT_QSEL_EDCA, 63 - MT_QSEL_EDCA_2, 64 - }; 65 - 66 - 67 - static inline int mt76x0_dma_skb_wrap(struct sk_buff *skb, 68 - enum mt76_msg_port d_port, 69 - enum mt76_info_type type, u32 flags) 70 - { 71 - u32 info; 72 - 73 - /* Buffer layout: 74 - * | 4B | xfer len | pad | 4B | 75 - * | TXINFO | pkt/cmd | zero pad to 4B | zero | 76 - * 77 - * length field of TXINFO should be set to 'xfer len'. 78 - */ 79 - 80 - info = flags | 81 - FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) | 82 - FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) | 83 - FIELD_PREP(MT_TXD_INFO_TYPE, type); 84 - 85 - put_unaligned_le32(info, skb_push(skb, sizeof(info))); 86 - return skb_put_padto(skb, round_up(skb->len, 4) + 4); 87 - } 88 - 89 - static inline int 90 - mt76x0_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags) 91 - { 92 - flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel); 93 - return mt76x0_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags); 94 - } 95 - 96 - /* Common Rx DMA descriptor fields */ 97 - #define MT_RXD_INFO_LEN GENMASK(13, 0) 98 - #define MT_RXD_INFO_PCIE_INTR BIT(24) 99 - #define MT_RXD_INFO_QSEL GENMASK(26, 25) 100 - #define MT_RXD_INFO_PORT GENMASK(29, 27) 101 - #define MT_RXD_INFO_TYPE GENMASK(31, 30) 102 - 103 - /* Rx DMA packet specific flags */ 104 - #define MT_RXD_PKT_INFO_UDP_ERR BIT(16) 105 - #define MT_RXD_PKT_INFO_TCP_ERR BIT(17) 106 - #define MT_RXD_PKT_INFO_IP_ERR BIT(18) 107 - #define MT_RXD_PKT_INFO_PKT_80211 BIT(19) 108 - #define MT_RXD_PKT_INFO_L3L4_DONE BIT(20) 109 - #define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21) 110 - 111 - /* Rx DMA MCU command specific flags */ 112 - #define MT_RXD_CMD_INFO_SELF_GEN BIT(15) 113 - #define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16) 114 - #define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20) 115 - 116 - enum mt76_evt_type { 117 - CMD_DONE, 118 - CMD_ERROR, 119 - CMD_RETRY, 120 - EVENT_PWR_RSP, 121 - EVENT_WOW_RSP, 122 - EVENT_CARRIER_DETECT_RSP, 123 - EVENT_DFS_DETECT_RSP, 124 - }; 125 - 126 - #endif
+26 -29
drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
··· 31 31 int ret, i; 32 32 u32 start = 0, end = 0, cnt_free; 33 33 34 - ret = mt76x02_get_efuse_data(&dev->mt76, MT_EE_USAGE_MAP_START, 35 - data, sizeof(data), MT_EE_PHYSICAL_READ); 34 + ret = mt76x02_get_efuse_data(dev, MT_EE_USAGE_MAP_START, data, 35 + sizeof(data), MT_EE_PHYSICAL_READ); 36 36 if (ret) 37 37 return ret; 38 38 ··· 55 55 56 56 static void mt76x0_set_chip_cap(struct mt76x02_dev *dev) 57 57 { 58 - u16 nic_conf0 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0); 59 - u16 nic_conf1 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1); 58 + u16 nic_conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0); 59 + u16 nic_conf1 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1); 60 60 61 - mt76x02_eeprom_parse_hw_cap(&dev->mt76); 61 + mt76x02_eeprom_parse_hw_cap(dev); 62 62 dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n", 63 63 dev->mt76.cap.has_2ghz, dev->mt76.cap.has_5ghz); 64 64 ··· 86 86 { 87 87 u8 val; 88 88 89 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_2G_TARGET_POWER) >> 8; 89 + val = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER) >> 8; 90 90 if (mt76x02_field_valid(val)) 91 91 dev->cal.rx.temp_offset = mt76x02_sign_extend(val, 8); 92 92 else ··· 98 98 struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx; 99 99 u8 val; 100 100 101 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_FREQ_OFFSET); 101 + val = mt76x02_eeprom_get(dev, MT_EE_FREQ_OFFSET); 102 102 if (!mt76x02_field_valid(val)) 103 103 val = 0; 104 104 caldata->freq_offset = val; 105 105 106 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TSSI_BOUND4) >> 8; 106 + val = mt76x02_eeprom_get(dev, MT_EE_TSSI_BOUND4) >> 8; 107 107 if (!mt76x02_field_valid(val)) 108 108 val = 0; 109 109 ··· 118 118 u16 rssi_offset; 119 119 int i; 120 120 121 - mt76x02_get_rx_gain(&dev->mt76, chan->band, &rssi_offset, 122 - &lna_2g, lna_5g); 123 - caldata->lna_gain = mt76x02_get_lna_gain(&dev->mt76, &lna_2g, 124 - lna_5g, chan); 121 + mt76x02_get_rx_gain(dev, chan->band, &rssi_offset, &lna_2g, lna_5g); 122 + caldata->lna_gain = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan); 125 123 126 124 for (i = 0; i < ARRAY_SIZE(caldata->rssi_offset); i++) { 127 125 val = rssi_offset >> (8 * i); ··· 130 132 } 131 133 } 132 134 133 - static s8 mt76x0_get_delta(struct mt76_dev *dev) 135 + static s8 mt76x0_get_delta(struct mt76x02_dev *dev) 134 136 { 135 - struct cfg80211_chan_def *chandef = &dev->chandef; 137 + struct cfg80211_chan_def *chandef = &dev->mt76.chandef; 136 138 u8 val; 137 139 138 - if (mt76x02_tssi_enabled(dev)) 140 + if (mt76x0_tssi_enabled(dev)) 139 141 return 0; 140 142 141 143 if (chandef->width == NL80211_CHAN_WIDTH_80) { ··· 160 162 struct ieee80211_channel *chan = dev->mt76.chandef.chan; 161 163 bool is_2ghz = chan->band == NL80211_BAND_2GHZ; 162 164 struct mt76_rate_power *t = &dev->mt76.rate_power; 163 - s8 delta = mt76x0_get_delta(&dev->mt76); 165 + s8 delta = mt76x0_get_delta(dev); 164 166 u16 val, addr; 165 167 166 168 memset(t, 0, sizeof(*t)); 167 169 168 170 /* cck 1M, 2M, 5.5M, 11M */ 169 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_BYRATE_BASE); 171 + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_BYRATE_BASE); 170 172 t->cck[0] = t->cck[1] = s6_to_s8(val); 171 173 t->cck[2] = t->cck[3] = s6_to_s8(val >> 8); 172 174 173 175 /* ofdm 6M, 9M, 12M, 18M */ 174 176 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 2 : 0x120; 175 - val = mt76x02_eeprom_get(&dev->mt76, addr); 177 + val = mt76x02_eeprom_get(dev, addr); 176 178 t->ofdm[0] = t->ofdm[1] = s6_to_s8(val); 177 179 t->ofdm[2] = t->ofdm[3] = s6_to_s8(val >> 8); 178 180 179 181 /* ofdm 24M, 36M, 48M, 54M */ 180 182 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 4 : 0x122; 181 - val = mt76x02_eeprom_get(&dev->mt76, addr); 183 + val = mt76x02_eeprom_get(dev, addr); 182 184 t->ofdm[4] = t->ofdm[5] = s6_to_s8(val); 183 185 t->ofdm[6] = t->ofdm[7] = s6_to_s8(val >> 8); 184 186 185 187 /* ht-vht mcs 1ss 0, 1, 2, 3 */ 186 188 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 6 : 0x124; 187 - val = mt76x02_eeprom_get(&dev->mt76, addr); 189 + val = mt76x02_eeprom_get(dev, addr); 188 190 t->ht[0] = t->ht[1] = t->vht[0] = t->vht[1] = s6_to_s8(val); 189 191 t->ht[2] = t->ht[3] = t->vht[2] = t->vht[3] = s6_to_s8(val >> 8); 190 192 191 193 /* ht-vht mcs 1ss 4, 5, 6 */ 192 194 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126; 193 - val = mt76x02_eeprom_get(&dev->mt76, addr); 195 + val = mt76x02_eeprom_get(dev, addr); 194 196 t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val); 195 197 t->ht[6] = t->vht[6] = s6_to_s8(val >> 8); 196 198 197 199 /* ht-vht mcs 1ss 0, 1, 2, 3 stbc */ 198 200 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec; 199 - val = mt76x02_eeprom_get(&dev->mt76, addr); 201 + val = mt76x02_eeprom_get(dev, addr); 200 202 t->stbc[0] = t->stbc[1] = s6_to_s8(val); 201 203 t->stbc[2] = t->stbc[3] = s6_to_s8(val >> 8); 202 204 203 205 /* ht-vht mcs 1ss 4, 5, 6 stbc */ 204 206 addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 16 : 0xee; 205 - val = mt76x02_eeprom_get(&dev->mt76, addr); 207 + val = mt76x02_eeprom_get(dev, addr); 206 208 t->stbc[4] = t->stbc[5] = s6_to_s8(val); 207 209 t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8); 208 210 209 211 /* vht mcs 8, 9 5GHz */ 210 - val = mt76x02_eeprom_get(&dev->mt76, 0x132); 212 + val = mt76x02_eeprom_get(dev, 0x132); 211 213 t->vht[7] = s6_to_s8(val); 212 214 t->vht[8] = s6_to_s8(val >> 8); 213 215 ··· 264 266 addr = MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE + 2 + offset; 265 267 } 266 268 267 - data = mt76x02_eeprom_get(&dev->mt76, addr); 269 + data = mt76x02_eeprom_get(dev, addr); 268 270 269 271 info[0] = data; 270 272 if (!info[0] || info[0] > 0x3f) ··· 310 312 if (found < 0) 311 313 return found; 312 314 313 - return mt76x02_get_efuse_data(&dev->mt76, 0, dev->mt76.eeprom.data, 315 + return mt76x02_get_efuse_data(dev, 0, dev->mt76.eeprom.data, 314 316 MT76X0_EEPROM_SIZE, MT_EE_READ); 315 317 } 316 318 ··· 324 326 if (err < 0) 325 327 return err; 326 328 327 - data = mt76x02_eeprom_get(&dev->mt76, MT_EE_VERSION); 329 + data = mt76x02_eeprom_get(dev, MT_EE_VERSION); 328 330 version = data >> 8; 329 331 fae = data; 330 332 ··· 335 337 dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n", 336 338 version, fae); 337 339 338 - mt76x02_mac_setaddr(&dev->mt76, 339 - dev->mt76.eeprom.data + MT_EE_MAC_ADDR); 340 + mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR); 340 341 mt76x0_set_chip_cap(dev); 341 342 mt76x0_set_freq_offset(dev); 342 343 mt76x0_set_temp_offset(dev);
+6
drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
··· 37 37 return ret; 38 38 } 39 39 40 + static inline bool mt76x0_tssi_enabled(struct mt76x02_dev *dev) 41 + { 42 + return (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) & 43 + MT_EE_NIC_CONF_1_TX_ALC_EN); 44 + } 45 + 40 46 #endif
+6 -3
drivers/net/wireless/mediatek/mt76/mt76x0/init.c
··· 138 138 139 139 RANDOM_WRITE(dev, common_mac_reg_table); 140 140 141 - mt76x02_set_beacon_offsets(&dev->mt76); 141 + mt76x02_set_beacon_offsets(dev); 142 142 143 143 /* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */ 144 144 RANDOM_WRITE(dev, mt76x0_mac_reg_table); ··· 280 280 return -ETIMEDOUT; 281 281 282 282 mt76x0_reset_csr_bbp(dev); 283 - ret = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, false); 283 + ret = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false); 284 284 if (ret) 285 285 return ret; 286 286 ··· 368 368 hw->max_rates = 1; 369 369 hw->max_report_rates = 7; 370 370 hw->max_rate_tries = 1; 371 - hw->extra_tx_headroom = sizeof(struct mt76x02_txwi) + 4 + 2; 371 + hw->extra_tx_headroom = 2; 372 + if (mt76_is_usb(dev)) 373 + hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) + 374 + MT_DMA_HDR_LEN; 372 375 373 376 hw->sta_data_size = sizeof(struct mt76x02_sta); 374 377 hw->vif_data_size = sizeof(struct mt76x02_vif);
+15 -7
drivers/net/wireless/mediatek/mt76/mt76x0/main.c
··· 16 16 #include <linux/etherdevice.h> 17 17 #include "mt76x0.h" 18 18 19 + static int 20 + mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) 21 + { 22 + int ret; 23 + 24 + cancel_delayed_work_sync(&dev->cal_work); 25 + 26 + mt76_set_channel(&dev->mt76); 27 + ret = mt76x0_phy_set_channel(dev, chandef); 28 + mt76_txq_schedule_all(&dev->mt76); 29 + 30 + return ret; 31 + } 32 + 19 33 int mt76x0_config(struct ieee80211_hw *hw, u32 changed) 20 34 { 21 35 struct mt76x02_dev *dev = hw->priv; ··· 39 25 40 26 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 41 27 ieee80211_stop_queues(hw); 42 - ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef); 28 + ret = mt76x0_set_channel(dev, &hw->conf.chandef); 43 29 ieee80211_wake_queues(hw); 44 30 } 45 31 ··· 128 114 { 129 115 struct mt76x02_dev *dev = hw->priv; 130 116 131 - cancel_delayed_work_sync(&dev->cal_work); 132 - mt76x0_agc_save(dev); 133 117 set_bit(MT76_SCANNING, &dev->mt76.state); 134 118 } 135 119 EXPORT_SYMBOL_GPL(mt76x0_sw_scan); ··· 137 125 { 138 126 struct mt76x02_dev *dev = hw->priv; 139 127 140 - mt76x0_agc_restore(dev); 141 128 clear_bit(MT76_SCANNING, &dev->mt76.state); 142 - 143 - ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, 144 - MT_CALIBRATE_INTERVAL); 145 129 } 146 130 EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete); 147 131
+3
drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h
··· 39 39 MCU_CAL_TXDCOC, 40 40 MCU_CAL_RX_GROUP_DELAY, 41 41 MCU_CAL_TX_GROUP_DELAY, 42 + MCU_CAL_VCO, 43 + MCU_CAL_NO_SIGNAL = 0xfe, 44 + MCU_CAL_FULL = 0xff, 42 45 }; 43 46 44 47 int mt76x0e_mcu_init(struct mt76x02_dev *dev);
+1 -2
drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
··· 66 66 /* PHY */ 67 67 void mt76x0_phy_init(struct mt76x02_dev *dev); 68 68 int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev); 69 - void mt76x0_agc_save(struct mt76x02_dev *dev); 70 - void mt76x0_agc_restore(struct mt76x02_dev *dev); 71 69 int mt76x0_phy_set_channel(struct mt76x02_dev *dev, 72 70 struct cfg80211_chan_def *chandef); 73 71 void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev); 74 72 void mt76x0_phy_set_txpower(struct mt76x02_dev *dev); 73 + void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on); 75 74 76 75 /* MAC */ 77 76 void mt76x0_mac_work(struct work_struct *work);
+34 -13
drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
··· 28 28 mutex_lock(&dev->mt76.mutex); 29 29 30 30 mt76x02_mac_start(dev); 31 + mt76x0_phy_calibrate(dev, true); 31 32 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 32 33 MT_CALIBRATE_INTERVAL); 33 34 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, ··· 72 71 .tx = mt76x02_tx, 73 72 .start = mt76x0e_start, 74 73 .stop = mt76x0e_stop, 75 - .config = mt76x0_config, 76 74 .add_interface = mt76x02_add_interface, 77 75 .remove_interface = mt76x02_remove_interface, 76 + .config = mt76x0_config, 78 77 .configure_filter = mt76x02_configure_filter, 78 + .sta_add = mt76x02_sta_add, 79 + .sta_remove = mt76x02_sta_remove, 80 + .set_key = mt76x02_set_key, 81 + .conf_tx = mt76x02_conf_tx, 82 + .sw_scan_start = mt76x0_sw_scan, 83 + .sw_scan_complete = mt76x0_sw_scan_complete, 84 + .ampdu_action = mt76x02_ampdu_action, 85 + .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, 86 + .wake_tx_queue = mt76_wake_tx_queue, 79 87 }; 80 88 81 89 static int mt76x0e_register_device(struct mt76x02_dev *dev) ··· 112 102 u16 val; 113 103 114 104 mt76_clear(dev, MT_COEXCFG0, BIT(0)); 115 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0); 116 - if (val & MT_EE_NIC_CONF_0_PA_IO_CURRENT) { 117 - u32 data; 118 105 119 - /* set external external PA I/O 120 - * current to 16mA 121 - */ 122 - data = mt76_rr(dev, 0x11c); 123 - val |= 0xc03; 124 - mt76_wr(dev, 0x11c, val); 125 - } 106 + val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0); 107 + if (!(val & MT_EE_NIC_CONF_0_PA_IO_CURRENT)) 108 + mt76_set(dev, MT_XO_CTRL7, 0xc03); 126 109 } 127 110 128 111 mt76_clear(dev, 0x110, BIT(9)); 129 112 mt76_set(dev, MT_MAX_LEN_CFG, BIT(13)); 113 + 114 + err = mt76x0_register_device(dev); 115 + if (err < 0) 116 + return err; 117 + 118 + set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); 130 119 131 120 return 0; 132 121 } ··· 133 124 static int 134 125 mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) 135 126 { 127 + static const struct mt76_driver_ops drv_ops = { 128 + .txwi_size = sizeof(struct mt76x02_txwi), 129 + .tx_prepare_skb = mt76x02_tx_prepare_skb, 130 + .tx_complete_skb = mt76x02_tx_complete_skb, 131 + .rx_skb = mt76x02_queue_rx_skb, 132 + .rx_poll_complete = mt76x02_rx_poll_complete, 133 + }; 136 134 struct mt76x02_dev *dev; 137 135 int ret; 138 136 ··· 157 141 if (ret) 158 142 return ret; 159 143 160 - dev = mt76x0_alloc_device(&pdev->dev, NULL, &mt76x0e_ops); 144 + dev = mt76x0_alloc_device(&pdev->dev, &drv_ops, &mt76x0e_ops); 161 145 if (!dev) 162 146 return -ENOMEM; 163 147 ··· 165 149 166 150 dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION); 167 151 dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev); 152 + 153 + ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler, 154 + IRQF_SHARED, KBUILD_MODNAME, dev); 155 + if (ret) 156 + goto error; 168 157 169 158 ret = mt76x0e_register_device(dev); 170 159 if (ret < 0) ··· 188 167 mt76x0_chip_onoff(dev, false, false); 189 168 mt76x0e_stop_hw(dev); 190 169 mt76x02_dma_cleanup(dev); 191 - mt76x02_mcu_cleanup(&dev->mt76); 170 + mt76x02_mcu_cleanup(dev); 192 171 } 193 172 194 173 static void
+1
drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
··· 116 116 goto out; 117 117 } 118 118 119 + mt76x02_set_ethtool_fwver(dev, hdr); 119 120 dev_dbg(dev->mt76.dev, "Firmware running!\n"); 120 121 121 122 out:
+175 -134
drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
··· 14 14 * GNU General Public License for more details. 15 15 */ 16 16 17 + #include <linux/kernel.h> 18 + #include <linux/etherdevice.h> 19 + 17 20 #include "mt76x0.h" 18 21 #include "mcu.h" 19 22 #include "eeprom.h" ··· 25 22 #include "initvals.h" 26 23 #include "initvals_phy.h" 27 24 #include "../mt76x02_phy.h" 28 - 29 - #include <linux/etherdevice.h> 30 25 31 26 static int 32 27 mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value) ··· 38 37 bank = MT_RF_BANK(offset); 39 38 reg = MT_RF_REG(offset); 40 39 41 - if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8) 40 + if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8)) 42 41 return -EINVAL; 43 42 44 43 mutex_lock(&dev->phy_mutex); ··· 77 76 bank = MT_RF_BANK(offset); 78 77 reg = MT_RF_REG(offset); 79 78 80 - if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8) 79 + if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8)) 81 80 return -EINVAL; 82 81 83 82 mutex_lock(&dev->phy_mutex); ··· 112 111 static int 113 112 rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val) 114 113 { 115 - if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) { 114 + if (mt76_is_usb(dev)) { 116 115 struct mt76_reg_pair pair = { 117 116 .reg = offset, 118 117 .value = val, 119 118 }; 120 119 120 + WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING, 121 + &dev->mt76.state)); 121 122 return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1); 122 123 } else { 123 - WARN_ON_ONCE(1); 124 124 return mt76x0_rf_csr_wr(dev, offset, val); 125 125 } 126 126 } ··· 132 130 int ret; 133 131 u32 val; 134 132 135 - if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) { 133 + if (mt76_is_usb(dev)) { 136 134 struct mt76_reg_pair pair = { 137 135 .reg = offset, 138 136 }; 139 137 138 + WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING, 139 + &dev->mt76.state)); 140 140 ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1); 141 141 val = pair.value; 142 142 } else { 143 - WARN_ON_ONCE(1); 144 143 ret = val = mt76x0_rf_csr_rr(dev, offset); 145 144 } 146 145 ··· 178 175 } 179 176 #endif 180 177 181 - #define RF_RANDOM_WRITE(dev, tab) \ 182 - mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, \ 183 - tab, ARRAY_SIZE(tab)) 178 + static void 179 + mt76x0_rf_csr_wr_rp(struct mt76x02_dev *dev, const struct mt76_reg_pair *data, 180 + int n) 181 + { 182 + while (n-- > 0) { 183 + mt76x0_rf_csr_wr(dev, data->reg, data->value); 184 + data++; 185 + } 186 + } 187 + 188 + #define RF_RANDOM_WRITE(dev, tab) do { \ 189 + if (mt76_is_mmio(dev)) \ 190 + mt76x0_rf_csr_wr_rp(dev, tab, ARRAY_SIZE(tab)); \ 191 + else \ 192 + mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));\ 193 + } while (0) 184 194 185 195 int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev) 186 196 { ··· 202 186 203 187 do { 204 188 val = mt76_rr(dev, MT_BBP(CORE, 0)); 205 - printk("BBP version %08x\n", val); 206 189 if (val && ~val) 207 190 break; 208 191 } while (--i); ··· 211 196 return -EIO; 212 197 } 213 198 199 + dev_dbg(dev->mt76.dev, "BBP version %08x\n", val); 214 200 return 0; 215 - } 216 - 217 - static void 218 - mt76x0_bbp_set_ctrlch(struct mt76x02_dev *dev, enum nl80211_chan_width width, 219 - u8 ctrl) 220 - { 221 - int core_val, agc_val; 222 - 223 - switch (width) { 224 - case NL80211_CHAN_WIDTH_80: 225 - core_val = 3; 226 - agc_val = 7; 227 - break; 228 - case NL80211_CHAN_WIDTH_40: 229 - core_val = 2; 230 - agc_val = 3; 231 - break; 232 - default: 233 - core_val = 0; 234 - agc_val = 1; 235 - break; 236 - } 237 - 238 - mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val); 239 - mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val); 240 - mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl); 241 - mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl); 242 201 } 243 202 244 203 static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel) ··· 272 283 } 273 284 274 285 static void 275 - mt76x0_mac_set_ctrlch(struct mt76x02_dev *dev, bool primary_upper) 276 - { 277 - mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M, 278 - primary_upper); 279 - } 280 - 281 - static void 282 286 mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band) 283 287 { 284 288 switch (band) { ··· 281 299 rf_wr(dev, MT_RF(5, 0), 0x45); 282 300 rf_wr(dev, MT_RF(6, 0), 0x44); 283 301 284 - mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); 285 - mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); 286 - 287 302 mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007); 288 303 mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002); 289 304 break; ··· 289 310 290 311 rf_wr(dev, MT_RF(5, 0), 0x44); 291 312 rf_wr(dev, MT_RF(6, 0), 0x45); 292 - 293 - mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); 294 - mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); 295 313 296 314 mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005); 297 315 mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102); ··· 451 475 mt76_wr(dev, MT_RF_MISC, mac_reg); 452 476 453 477 band = (rf_band & RF_G_BAND) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 454 - if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { 478 + if (mt76x02_ext_pa_enabled(dev, band)) { 455 479 /* 456 480 MT_RF_MISC (offset: 0x0518) 457 481 [2]1'b1: enable external A band PA, 1'b0: disable external A band PA ··· 490 514 } 491 515 492 516 static void 493 - mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_band) 517 + mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band) 494 518 { 495 519 int i; 496 520 ··· 563 587 return ; 564 588 } 565 589 566 - mt76x02_mcu_function_select(&dev->mt76, BW_SETTING, bw, false); 590 + mt76x02_mcu_function_select(dev, BW_SETTING, bw, false); 567 591 } 568 592 569 593 void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) ··· 579 603 dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); 580 604 mt76x02_add_rate_power_offset(t, -info[0]); 581 605 582 - mt76x02_phy_set_txpower(&dev->mt76, info[0], info[1]); 606 + mt76x02_phy_set_txpower(dev, info[0], info[1]); 583 607 } 608 + 609 + void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) 610 + { 611 + struct ieee80211_channel *chan = dev->mt76.chandef.chan; 612 + u32 val, tx_alc, reg_val; 613 + 614 + if (power_on) { 615 + mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false); 616 + mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value, 617 + false); 618 + usleep_range(10, 20); 619 + /* XXX: tssi */ 620 + } 621 + 622 + tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0); 623 + mt76_wr(dev, MT_TX_ALC_CFG_0, 0); 624 + usleep_range(500, 700); 625 + 626 + reg_val = mt76_rr(dev, MT_BBP(IBI, 9)); 627 + mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e); 628 + 629 + if (chan->band == NL80211_BAND_5GHZ) { 630 + if (chan->hw_value < 100) 631 + val = 0x701; 632 + else if (chan->hw_value < 140) 633 + val = 0x801; 634 + else 635 + val = 0x901; 636 + } else { 637 + val = 0x600; 638 + } 639 + 640 + mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val, false); 641 + msleep(350); 642 + mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 1, false); 643 + usleep_range(15000, 20000); 644 + 645 + mt76_wr(dev, MT_BBP(IBI, 9), reg_val); 646 + mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc); 647 + mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false); 648 + } 649 + EXPORT_SYMBOL_GPL(mt76x0_phy_calibrate); 584 650 585 651 int mt76x0_phy_set_channel(struct mt76x02_dev *dev, 586 652 struct cfg80211_chan_def *chandef) ··· 683 665 break; 684 666 } 685 667 686 - mt76x0_bbp_set_bw(dev, chandef->width); 687 - mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index); 688 - mt76x0_mac_set_ctrlch(dev, ch_group_index & 1); 668 + if (mt76_is_usb(dev)) { 669 + mt76x0_bbp_set_bw(dev, chandef->width); 670 + } else { 671 + if (chandef->width == NL80211_CHAN_WIDTH_80 || 672 + chandef->width == NL80211_CHAN_WIDTH_40) 673 + val = 0x201; 674 + else 675 + val = 0x601; 676 + mt76_wr(dev, MT_TX_SW_CFG0, val); 677 + } 678 + mt76x02_phy_set_bw(dev, chandef->width, ch_group_index); 679 + mt76x02_phy_set_band(dev, chandef->chan->band, 680 + ch_group_index & 1); 689 681 mt76x0_ant_select(dev); 690 682 691 683 mt76_rmw(dev, MT_EXT_CCA_CFG, ··· 708 680 709 681 mt76x0_phy_set_band(dev, chandef->chan->band); 710 682 mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band); 711 - mt76x0_read_rx_gain(dev); 712 683 713 684 /* set Japan Tx filter at channel 14 */ 714 685 val = mt76_rr(dev, MT_BBP(CORE, 1)); ··· 717 690 val &= ~0x20; 718 691 mt76_wr(dev, MT_BBP(CORE, 1), val); 719 692 720 - mt76x0_phy_set_chan_bbp_params(dev, channel, rf_bw_band); 693 + mt76x0_read_rx_gain(dev); 694 + mt76x0_phy_set_chan_bbp_params(dev, rf_bw_band); 695 + mt76x02_init_agc_gain(dev); 721 696 722 - /* Vendor driver don't do it */ 723 - /* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */ 697 + if (mt76_is_usb(dev)) { 698 + mt76x0_vco_cal(dev, channel); 699 + } else { 700 + /* enable vco */ 701 + rf_set(dev, MT_RF(0, 4), BIT(7)); 702 + } 724 703 725 - mt76x0_vco_cal(dev, channel); 726 704 if (scan) 727 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false); 705 + return 0; 728 706 707 + if (mt76_is_mmio(dev)) 708 + mt76x0_phy_calibrate(dev, false); 729 709 mt76x0_phy_set_txpower(dev); 710 + 711 + ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, 712 + MT_CALIBRATE_INTERVAL); 730 713 731 714 return 0; 732 715 } ··· 747 710 u8 channel = dev->mt76.chandef.chan->hw_value; 748 711 int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0; 749 712 750 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 0, false); 713 + mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false); 751 714 752 715 mt76x0_vco_cal(dev, channel); 753 716 ··· 755 718 mt76_wr(dev, MT_TX_ALC_CFG_0, 0); 756 719 usleep_range(500, 700); 757 720 758 - reg_val = mt76_rr(dev, 0x2124); 759 - reg_val &= 0xffffff7e; 760 - mt76_wr(dev, 0x2124, reg_val); 721 + reg_val = mt76_rr(dev, MT_BBP(IBI, 9)); 722 + mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e); 761 723 762 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 0, false); 724 + mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0, false); 763 725 764 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, is_5ghz, false); 765 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LOFT, is_5ghz, false); 766 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, false); 767 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_GROUP_DELAY, 768 - is_5ghz, false); 769 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQ, is_5ghz, false); 770 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RX_GROUP_DELAY, 771 - is_5ghz, false); 726 + mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false); 727 + mt76x02_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz, false); 728 + mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false); 729 + mt76x02_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz, false); 730 + mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz, false); 731 + mt76x02_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz, false); 772 732 773 - mt76_wr(dev, 0x2124, reg_val); 733 + mt76_wr(dev, MT_BBP(IBI, 9), reg_val); 774 734 mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc); 775 735 msleep(100); 776 736 777 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false); 778 - } 779 - 780 - void mt76x0_agc_save(struct mt76x02_dev *dev) 781 - { 782 - /* Only one RX path */ 783 - dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8))); 784 - } 785 - 786 - void mt76x0_agc_restore(struct mt76x02_dev *dev) 787 - { 788 - mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save); 737 + mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false); 789 738 } 790 739 791 740 static void mt76x0_temp_sensor(struct mt76x02_dev *dev) 792 741 { 793 742 u8 rf_b7_73, rf_b0_66, rf_b0_67; 794 - int cycle, temp; 795 - u32 val; 796 - s32 sval; 743 + s8 val; 797 744 798 745 rf_b7_73 = rf_rr(dev, MT_RF(7, 73)); 799 746 rf_b0_66 = rf_rr(dev, MT_RF(0, 66)); 800 - rf_b0_67 = rf_rr(dev, MT_RF(0, 73)); 747 + rf_b0_67 = rf_rr(dev, MT_RF(0, 67)); 801 748 802 749 rf_wr(dev, MT_RF(7, 73), 0x02); 803 750 rf_wr(dev, MT_RF(0, 66), 0x23); 804 - rf_wr(dev, MT_RF(0, 73), 0x01); 751 + rf_wr(dev, MT_RF(0, 67), 0x01); 805 752 806 753 mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055); 807 754 808 - for (cycle = 0; cycle < 2000; cycle++) { 809 - val = mt76_rr(dev, MT_BBP(CORE, 34)); 810 - if (!(val & 0x10)) 811 - break; 812 - udelay(3); 813 - } 814 - 815 - if (cycle >= 2000) { 816 - val &= 0x10; 817 - mt76_wr(dev, MT_BBP(CORE, 34), val); 755 + if (!mt76_poll(dev, MT_BBP(CORE, 34), BIT(4), 0, 2000)) { 756 + mt76_clear(dev, MT_BBP(CORE, 34), BIT(4)); 818 757 goto done; 819 758 } 820 759 821 - sval = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff; 822 - if (!(sval & 0x80)) 823 - sval &= 0x7f; /* Positive */ 824 - else 825 - sval |= 0xffffff00; /* Negative */ 760 + val = mt76_rr(dev, MT_BBP(CORE, 35)); 761 + val = (35 * (val - dev->cal.rx.temp_offset)) / 10 + 25; 826 762 827 - temp = (35 * (sval - dev->cal.rx.temp_offset)) / 10 + 25; 763 + if (abs(val - dev->cal.temp_vco) > 20) { 764 + mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, 765 + dev->mt76.chandef.chan->hw_value, 766 + false); 767 + dev->cal.temp_vco = val; 768 + } 769 + if (abs(val - dev->cal.temp) > 30) { 770 + mt76x0_phy_calibrate(dev, false); 771 + dev->cal.temp = val; 772 + } 828 773 829 774 done: 830 775 rf_wr(dev, MT_RF(7, 73), rf_b7_73); 831 776 rf_wr(dev, MT_RF(0, 66), rf_b0_66); 832 - rf_wr(dev, MT_RF(0, 73), rf_b0_67); 777 + rf_wr(dev, MT_RF(0, 67), rf_b0_67); 833 778 } 834 779 835 - static void mt76x0_dynamic_vga_tuning(struct mt76x02_dev *dev) 780 + static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev) 836 781 { 837 - struct cfg80211_chan_def *chandef = &dev->mt76.chandef; 838 - u32 val, init_vga; 839 - int avg_rssi; 782 + u8 gain = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust; 783 + u32 val = 0x122c << 16 | 0xf2; 840 784 841 - init_vga = chandef->chan->band == NL80211_BAND_5GHZ ? 0x54 : 0x4E; 842 - avg_rssi = mt76x02_phy_get_min_avg_rssi(&dev->mt76); 843 - if (avg_rssi > -60) 844 - init_vga -= 0x20; 845 - else if (avg_rssi > -70) 846 - init_vga -= 0x10; 847 - 848 - val = mt76_rr(dev, MT_BBP(AGC, 8)); 849 - val &= 0xFFFF80FF; 850 - val |= init_vga << 8; 851 - mt76_wr(dev, MT_BBP(AGC,8), val); 785 + mt76_wr(dev, MT_BBP(AGC, 8), 786 + val | FIELD_PREP(MT_BBP_AGC_GAIN, gain)); 852 787 } 853 788 854 - static void mt76x0_phy_calibrate(struct work_struct *work) 789 + static void 790 + mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev) 791 + { 792 + bool gain_change; 793 + u8 gain_delta; 794 + int low_gain; 795 + 796 + dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev); 797 + 798 + low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) + 799 + (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); 800 + 801 + gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2); 802 + dev->cal.low_gain = low_gain; 803 + 804 + if (!gain_change) { 805 + if (mt76x02_phy_adjust_vga_gain(dev)) 806 + mt76x0_phy_set_gain_val(dev); 807 + return; 808 + } 809 + 810 + dev->cal.agc_gain_adjust = (low_gain == 2) ? 0 : 10; 811 + gain_delta = (low_gain == 2) ? 10 : 0; 812 + 813 + dev->cal.agc_gain_cur[0] = dev->cal.agc_gain_init[0] - gain_delta; 814 + mt76x0_phy_set_gain_val(dev); 815 + 816 + /* clear false CCA counters */ 817 + mt76_rr(dev, MT_RX_STAT_1); 818 + } 819 + 820 + static void mt76x0_phy_calibration_work(struct work_struct *work) 855 821 { 856 822 struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, 857 823 cal_work.work); 858 824 859 - mt76x0_dynamic_vga_tuning(dev); 860 - mt76x0_temp_sensor(dev); 825 + mt76x0_phy_update_channel_gain(dev); 826 + if (!mt76x0_tssi_enabled(dev)) 827 + mt76x0_temp_sensor(dev); 861 828 862 829 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, 863 830 MT_CALIBRATE_INTERVAL); ··· 922 881 923 882 void mt76x0_phy_init(struct mt76x02_dev *dev) 924 883 { 925 - INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate); 884 + INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibration_work); 926 885 927 886 mt76x0_rf_init(dev); 928 - mt76x02_phy_set_rxpath(&dev->mt76); 929 - mt76x02_phy_set_txdac(&dev->mt76); 887 + mt76x02_phy_set_rxpath(dev); 888 + mt76x02_phy_set_txdac(dev); 930 889 }
+3 -4
drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
··· 40 40 ilm_len = le32_to_cpu(hdr->ilm_len) - MT_MCU_IVB_SIZE; 41 41 dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %u\n", 42 42 ilm_len, MT_MCU_IVB_SIZE); 43 - err = mt76x02u_mcu_fw_send_data(&dev->mt76, 44 - fw_payload + MT_MCU_IVB_SIZE, 43 + err = mt76x02u_mcu_fw_send_data(dev, fw_payload + MT_MCU_IVB_SIZE, 45 44 ilm_len, MCU_FW_URB_MAX_PAYLOAD, 46 45 MT_MCU_IVB_SIZE); 47 46 if (err) ··· 48 49 49 50 dlm_len = le32_to_cpu(hdr->dlm_len); 50 51 dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len); 51 - err = mt76x02u_mcu_fw_send_data(&dev->mt76, 52 + err = mt76x02u_mcu_fw_send_data(dev, 52 53 fw_payload + le32_to_cpu(hdr->ilm_len), 53 54 dlm_len, MCU_FW_URB_MAX_PAYLOAD, 54 55 MT_MCU_DLM_OFFSET); ··· 120 121 mt76_set(dev, MT_USB_DMA_CFG, 121 122 (MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN) | 122 123 FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20)); 123 - mt76x02u_mcu_fw_reset(&dev->mt76); 124 + mt76x02u_mcu_fw_reset(dev); 124 125 usleep_range(5000, 6000); 125 126 /* 126 127 mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
+16 -9
drivers/net/wireless/mediatek/mt76/mt76x02.h
··· 55 55 s8 agc_gain_adjust; 56 56 s8 low_gain; 57 57 58 - u8 temp; 58 + s8 temp_vco; 59 + s8 temp; 59 60 60 61 bool init_cal_done; 61 62 bool tssi_cal_done; ··· 102 101 103 102 bool no_2ghz; 104 103 105 - u8 agc_save; 106 - 107 104 u8 coverage_class; 108 105 u8 slottime; 109 106 ··· 118 119 int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 119 120 struct ieee80211_sta *sta); 120 121 121 - void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif, 122 - unsigned int idx); 122 + void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, 123 + unsigned int idx); 123 124 int mt76x02_add_interface(struct ieee80211_hw *hw, 124 125 struct ieee80211_vif *vif); 125 126 void mt76x02_remove_interface(struct ieee80211_hw *hw, ··· 135 136 void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw, 136 137 struct ieee80211_vif *vif, 137 138 struct ieee80211_sta *sta); 138 - s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev, 139 + s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev, 139 140 const struct ieee80211_tx_rate *rate); 140 - s8 mt76x02_tx_get_txpwr_adj(struct mt76_dev *mdev, s8 txpwr, s8 max_txpwr_adj); 141 + s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, 142 + s8 max_txpwr_adj); 141 143 void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr); 142 144 int mt76x02_insert_hdr_pad(struct sk_buff *skb); 143 145 void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len); 144 146 void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb); 145 - bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update); 147 + bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update); 146 148 void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 147 149 struct sk_buff *skb); 148 150 void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); ··· 156 156 u32 *tx_info); 157 157 158 158 extern const u16 mt76x02_beacon_offsets[16]; 159 - void mt76x02_set_beacon_offsets(struct mt76_dev *dev); 159 + void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev); 160 160 void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set); 161 161 void mt76x02_mac_start(struct mt76x02_dev *dev); 162 + 163 + static inline bool is_mt76x2(struct mt76x02_dev *dev) 164 + { 165 + return mt76_chip(&dev->mt76) == 0x7612 || 166 + mt76_chip(&dev->mt76) == 0x7662 || 167 + mt76_chip(&dev->mt76) == 0x7602; 168 + } 162 169 163 170 static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask) 164 171 {
+15 -18
drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
··· 17 17 18 18 #include <asm/unaligned.h> 19 19 20 - #include "mt76.h" 21 20 #include "mt76x02_eeprom.h" 22 - #include "mt76x02_regs.h" 23 21 24 22 static int 25 - mt76x02_efuse_read(struct mt76_dev *dev, u16 addr, u8 *data, 23 + mt76x02_efuse_read(struct mt76x02_dev *dev, u16 addr, u8 *data, 26 24 enum mt76x02_eeprom_modes mode) 27 25 { 28 26 u32 val; 29 27 int i; 30 28 31 - val = __mt76_rr(dev, MT_EFUSE_CTRL); 29 + val = mt76_rr(dev, MT_EFUSE_CTRL); 32 30 val &= ~(MT_EFUSE_CTRL_AIN | 33 31 MT_EFUSE_CTRL_MODE); 34 32 val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf); 35 33 val |= FIELD_PREP(MT_EFUSE_CTRL_MODE, mode); 36 34 val |= MT_EFUSE_CTRL_KICK; 37 - __mt76_wr(dev, MT_EFUSE_CTRL, val); 35 + mt76_wr(dev, MT_EFUSE_CTRL, val); 38 36 39 - if (!__mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 40 - 0, 1000)) 37 + if (!mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000)) 41 38 return -ETIMEDOUT; 42 39 43 40 udelay(2); 44 41 45 - val = __mt76_rr(dev, MT_EFUSE_CTRL); 42 + val = mt76_rr(dev, MT_EFUSE_CTRL); 46 43 if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) { 47 44 memset(data, 0xff, 16); 48 45 return 0; 49 46 } 50 47 51 48 for (i = 0; i < 4; i++) { 52 - val = __mt76_rr(dev, MT_EFUSE_DATA(i)); 49 + val = mt76_rr(dev, MT_EFUSE_DATA(i)); 53 50 put_unaligned_le32(val, data + 4 * i); 54 51 } 55 52 56 53 return 0; 57 54 } 58 55 59 - int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf, 56 + int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf, 60 57 int len, enum mt76x02_eeprom_modes mode) 61 58 { 62 59 int ret, i; ··· 68 71 } 69 72 EXPORT_SYMBOL_GPL(mt76x02_get_efuse_data); 70 73 71 - void mt76x02_eeprom_parse_hw_cap(struct mt76_dev *dev) 74 + void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev) 72 75 { 73 76 u16 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0); 74 77 75 78 switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) { 76 79 case BOARD_TYPE_5GHZ: 77 - dev->cap.has_5ghz = true; 80 + dev->mt76.cap.has_5ghz = true; 78 81 break; 79 82 case BOARD_TYPE_2GHZ: 80 - dev->cap.has_2ghz = true; 83 + dev->mt76.cap.has_2ghz = true; 81 84 break; 82 85 default: 83 - dev->cap.has_2ghz = true; 84 - dev->cap.has_5ghz = true; 86 + dev->mt76.cap.has_2ghz = true; 87 + dev->mt76.cap.has_5ghz = true; 85 88 break; 86 89 } 87 90 } 88 91 EXPORT_SYMBOL_GPL(mt76x02_eeprom_parse_hw_cap); 89 92 90 - bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band) 93 + bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band) 91 94 { 92 95 u16 conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0); 93 96 ··· 98 101 } 99 102 EXPORT_SYMBOL_GPL(mt76x02_ext_pa_enabled); 100 103 101 - void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band, 104 + void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band, 102 105 u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g) 103 106 { 104 107 u16 val; ··· 126 129 } 127 130 EXPORT_SYMBOL_GPL(mt76x02_get_rx_gain); 128 131 129 - u8 mt76x02_get_lna_gain(struct mt76_dev *dev, 132 + u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev, 130 133 s8 *lna_2g, s8 *lna_5g, 131 134 struct ieee80211_channel *chan) 132 135 {
+9 -28
drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
··· 18 18 #ifndef __MT76x02_EEPROM_H 19 19 #define __MT76x02_EEPROM_H 20 20 21 + #include "mt76x02.h" 22 + 21 23 enum mt76x02_eeprom_field { 22 24 MT_EE_CHIP_ID = 0x000, 23 25 MT_EE_VERSION = 0x002, ··· 170 168 } 171 169 172 170 static inline int 173 - mt76x02_eeprom_get(struct mt76_dev *dev, 171 + mt76x02_eeprom_get(struct mt76x02_dev *dev, 174 172 enum mt76x02_eeprom_field field) 175 173 { 176 174 if ((field & 1) || field >= __MT_EE_MAX) 177 175 return -1; 178 176 179 - return get_unaligned_le16(dev->eeprom.data + field); 177 + return get_unaligned_le16(dev->mt76.eeprom.data + field); 180 178 } 181 179 182 - static inline bool 183 - mt76x02_temp_tx_alc_enabled(struct mt76_dev *dev) 184 - { 185 - u16 val; 186 - 187 - val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G); 188 - if (!(val & BIT(15))) 189 - return false; 190 - 191 - return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) & 192 - MT_EE_NIC_CONF_1_TEMP_TX_ALC; 193 - } 194 - 195 - static inline bool 196 - mt76x02_tssi_enabled(struct mt76_dev *dev) 197 - { 198 - return !mt76x02_temp_tx_alc_enabled(dev) && 199 - (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) & 200 - MT_EE_NIC_CONF_1_TX_ALC_EN); 201 - } 202 - 203 - bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band); 204 - int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf, 180 + bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band); 181 + int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf, 205 182 int len, enum mt76x02_eeprom_modes mode); 206 - void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band, 183 + void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band, 207 184 u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g); 208 - u8 mt76x02_get_lna_gain(struct mt76_dev *dev, 185 + u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev, 209 186 s8 *lna_2g, s8 *lna_5g, 210 187 struct ieee80211_channel *chan); 211 - void mt76x02_eeprom_parse_hw_cap(struct mt76_dev *dev); 188 + void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev); 212 189 213 190 #endif /* __MT76x02_EEPROM_H */
+99 -107
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
··· 45 45 } 46 46 EXPORT_SYMBOL_GPL(mt76x02_mac_get_key_info); 47 47 48 - int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx, 49 - struct ieee80211_key_conf *key) 48 + int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx, 49 + u8 key_idx, struct ieee80211_key_conf *key) 50 50 { 51 51 enum mt76x02_cipher_type cipher; 52 52 u8 key_data[32]; ··· 56 56 if (cipher == MT_CIPHER_NONE && key) 57 57 return -EOPNOTSUPP; 58 58 59 - val = __mt76_rr(dev, MT_SKEY_MODE(vif_idx)); 59 + val = mt76_rr(dev, MT_SKEY_MODE(vif_idx)); 60 60 val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx)); 61 61 val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx); 62 - __mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); 62 + mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); 63 63 64 - __mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data, 65 - sizeof(key_data)); 64 + mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data, 65 + sizeof(key_data)); 66 66 67 67 return 0; 68 68 } 69 69 EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup); 70 70 71 - int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx, 72 - struct ieee80211_key_conf *key) 71 + int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, 72 + struct ieee80211_key_conf *key) 73 73 { 74 74 enum mt76x02_cipher_type cipher; 75 75 u8 key_data[32]; ··· 79 79 if (cipher == MT_CIPHER_NONE && key) 80 80 return -EOPNOTSUPP; 81 81 82 - __mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); 83 - __mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher); 82 + mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); 83 + mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher); 84 84 85 85 memset(iv_data, 0, sizeof(iv_data)); 86 86 if (key) { 87 - __mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, 88 - !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); 87 + mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, 88 + !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); 89 89 iv_data[3] = key->keyidx << 6; 90 90 if (cipher >= MT_CIPHER_TKIP) 91 91 iv_data[3] |= 0x20; 92 92 } 93 93 94 - __mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); 94 + mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); 95 95 96 96 return 0; 97 97 } 98 98 EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_key); 99 99 100 - void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac) 100 + void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, 101 + u8 vif_idx, u8 *mac) 101 102 { 102 103 struct mt76_wcid_addr addr = {}; 103 104 u32 attr; ··· 106 105 attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) | 107 106 FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8)); 108 107 109 - __mt76_wr(dev, MT_WCID_ATTR(idx), attr); 108 + mt76_wr(dev, MT_WCID_ATTR(idx), attr); 110 109 111 - __mt76_wr(dev, MT_WCID_TX_RATE(idx), 0); 112 - __mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0); 110 + mt76_wr(dev, MT_WCID_TX_RATE(idx), 0); 111 + mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0); 113 112 114 113 if (idx >= 128) 115 114 return; ··· 117 116 if (mac) 118 117 memcpy(addr.macaddr, mac, ETH_ALEN); 119 118 120 - __mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr)); 119 + mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr)); 121 120 } 122 121 EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup); 123 122 124 - void mt76x02_mac_wcid_set_drop(struct mt76_dev *dev, u8 idx, bool drop) 123 + void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop) 125 124 { 126 - u32 val = __mt76_rr(dev, MT_WCID_DROP(idx)); 125 + u32 val = mt76_rr(dev, MT_WCID_DROP(idx)); 127 126 u32 bit = MT_WCID_DROP_MASK(idx); 128 127 129 128 /* prevent unnecessary writes */ 130 129 if ((val & bit) != (bit * drop)) 131 - __mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop)); 130 + mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop)); 132 131 } 133 132 EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_drop); 134 133 135 - void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq) 134 + void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq) 136 135 { 137 136 struct mt76_txq *mtxq; 138 137 ··· 152 151 mtxq->wcid = &mvif->group_wcid; 153 152 } 154 153 155 - mt76_txq_init(dev, txq); 154 + mt76_txq_init(&dev->mt76, txq); 156 155 } 157 156 EXPORT_SYMBOL_GPL(mt76x02_txq_init); 158 157 159 - static void 160 - mt76x02_mac_fill_txwi(struct mt76x02_txwi *txwi, struct sk_buff *skb, 161 - struct ieee80211_sta *sta, int len, u8 nss) 162 - { 163 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 164 - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 165 - u16 txwi_flags = 0; 166 - 167 - if (info->flags & IEEE80211_TX_CTL_LDPC) 168 - txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC); 169 - if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1) 170 - txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC); 171 - if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC) 172 - txwi_flags |= MT_TXWI_FLAGS_MMPS; 173 - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 174 - txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; 175 - if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) 176 - txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; 177 - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) 178 - txwi->pktid |= MT_TXWI_PKTID_PROBE; 179 - if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { 180 - u8 ba_size = IEEE80211_MIN_AMPDU_BUF; 181 - 182 - ba_size <<= sta->ht_cap.ampdu_factor; 183 - ba_size = min_t(int, 63, ba_size - 1); 184 - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) 185 - ba_size = 0; 186 - txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size); 187 - 188 - txwi_flags |= MT_TXWI_FLAGS_AMPDU | 189 - FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, 190 - sta->ht_cap.ampdu_density); 191 - } 192 - 193 - if (ieee80211_is_probe_resp(hdr->frame_control) || 194 - ieee80211_is_beacon(hdr->frame_control)) 195 - txwi_flags |= MT_TXWI_FLAGS_TS; 196 - 197 - txwi->flags |= cpu_to_le16(txwi_flags); 198 - txwi->len_ctl = cpu_to_le16(len); 199 - } 200 - 201 158 static __le16 202 - mt76x02_mac_tx_rate_val(struct mt76_dev *dev, 203 - const struct ieee80211_tx_rate *rate, u8 *nss_val) 159 + mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev, 160 + const struct ieee80211_tx_rate *rate, u8 *nss_val) 204 161 { 205 162 u16 rateval; 206 163 u8 phy, rate_idx; ··· 183 224 bw = 1; 184 225 } else { 185 226 const struct ieee80211_rate *r; 186 - int band = dev->chandef.chan->band; 227 + int band = dev->mt76.chandef.chan->band; 187 228 u16 val; 188 229 189 - r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx]; 230 + r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx]; 190 231 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 191 232 val = r->hw_value_short; 192 233 else ··· 207 248 return cpu_to_le16(rateval); 208 249 } 209 250 210 - void mt76x02_mac_wcid_set_rate(struct mt76_dev *dev, struct mt76_wcid *wcid, 211 - const struct ieee80211_tx_rate *rate) 251 + void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid, 252 + const struct ieee80211_tx_rate *rate) 212 253 { 213 - spin_lock_bh(&dev->lock); 254 + spin_lock_bh(&dev->mt76.lock); 214 255 wcid->tx_rate = mt76x02_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss); 215 256 wcid->tx_rate_set = true; 216 - spin_unlock_bh(&dev->lock); 257 + spin_unlock_bh(&dev->mt76.lock); 217 258 } 218 259 219 - bool mt76x02_mac_load_tx_status(struct mt76_dev *dev, 220 - struct mt76x02_tx_status *stat) 260 + bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev, 261 + struct mt76x02_tx_status *stat) 221 262 { 222 263 u32 stat1, stat2; 223 264 224 - stat2 = __mt76_rr(dev, MT_TX_STAT_FIFO_EXT); 225 - stat1 = __mt76_rr(dev, MT_TX_STAT_FIFO); 265 + stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT); 266 + stat1 = mt76_rr(dev, MT_TX_STAT_FIFO); 226 267 227 268 stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID); 228 269 if (!stat->valid) ··· 298 339 return 0; 299 340 } 300 341 301 - void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi, 342 + void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, 302 343 struct sk_buff *skb, struct mt76_wcid *wcid, 303 344 struct ieee80211_sta *sta, int len) 304 345 { 346 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 305 347 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 306 348 struct ieee80211_tx_rate *rate = &info->control.rates[0]; 307 349 struct ieee80211_key_conf *key = info->control.hw_key; 308 350 u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2)); 351 + u16 txwi_flags = 0; 309 352 u8 nss; 310 353 s8 txpwr_adj, max_txpwr_adj; 311 - u8 ccmp_pn[8], nstreams = dev->chainmask & 0xf; 354 + u8 ccmp_pn[8], nstreams = dev->mt76.chainmask & 0xf; 312 355 313 356 memset(txwi, 0, sizeof(*txwi)); 314 357 ··· 335 374 txwi->eiv = *((__le32 *)&ccmp_pn[1]); 336 375 } 337 376 338 - spin_lock_bh(&dev->lock); 377 + spin_lock_bh(&dev->mt76.lock); 339 378 if (wcid && (rate->idx < 0 || !rate->count)) { 340 379 txwi->rate = wcid->tx_rate; 341 380 max_txpwr_adj = wcid->max_txpwr_adj; ··· 344 383 txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss); 345 384 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate); 346 385 } 347 - spin_unlock_bh(&dev->lock); 386 + spin_unlock_bh(&dev->mt76.lock); 348 387 349 - txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->txpower_conf, 388 + txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf, 350 389 max_txpwr_adj); 351 390 txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj); 352 391 353 - if (nstreams > 1 && mt76_rev(dev) >= MT76XX_REV_E4) 392 + if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E4) 354 393 txwi->txstream = 0x13; 355 - else if (nstreams > 1 && mt76_rev(dev) >= MT76XX_REV_E3 && 394 + else if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E3 && 356 395 !(txwi->rate & cpu_to_le16(rate_ht_mask))) 357 396 txwi->txstream = 0x93; 358 397 359 - mt76x02_mac_fill_txwi(txwi, skb, sta, len, nss); 398 + if (is_mt76x2(dev) && (info->flags & IEEE80211_TX_CTL_LDPC)) 399 + txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC); 400 + if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1) 401 + txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC); 402 + if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC) 403 + txwi_flags |= MT_TXWI_FLAGS_MMPS; 404 + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 405 + txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; 406 + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) 407 + txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; 408 + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) 409 + txwi->pktid |= MT_TXWI_PKTID_PROBE; 410 + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { 411 + u8 ba_size = IEEE80211_MIN_AMPDU_BUF; 412 + 413 + ba_size <<= sta->ht_cap.ampdu_factor; 414 + ba_size = min_t(int, 63, ba_size - 1); 415 + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) 416 + ba_size = 0; 417 + txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size); 418 + 419 + txwi_flags |= MT_TXWI_FLAGS_AMPDU | 420 + FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, 421 + sta->ht_cap.ampdu_density); 422 + } 423 + 424 + if (ieee80211_is_probe_resp(hdr->frame_control) || 425 + ieee80211_is_beacon(hdr->frame_control)) 426 + txwi_flags |= MT_TXWI_FLAGS_TS; 427 + 428 + txwi->flags |= cpu_to_le16(txwi_flags); 429 + txwi->len_ctl = cpu_to_le16(len); 360 430 } 361 431 EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi); 362 432 363 433 static void 364 - mt76x02_mac_fill_tx_status(struct mt76_dev *dev, 365 - struct ieee80211_tx_info *info, 366 - struct mt76x02_tx_status *st, int n_frames) 434 + mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, 435 + struct ieee80211_tx_info *info, 436 + struct mt76x02_tx_status *st, int n_frames) 367 437 { 368 438 struct ieee80211_tx_rate *rate = info->status.rates; 369 439 int cur_idx, last_rate; ··· 405 413 406 414 last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1); 407 415 mt76x02_mac_process_tx_rate(&rate[last_rate], st->rate, 408 - dev->chandef.chan->band); 416 + dev->mt76.chandef.chan->band); 409 417 if (last_rate < IEEE80211_TX_MAX_RATES - 1) 410 418 rate[last_rate + 1].idx = -1; 411 419 ··· 433 441 info->flags |= IEEE80211_TX_STAT_ACK; 434 442 } 435 443 436 - void mt76x02_send_tx_status(struct mt76_dev *dev, 437 - struct mt76x02_tx_status *stat, u8 *update) 444 + void mt76x02_send_tx_status(struct mt76x02_dev *dev, 445 + struct mt76x02_tx_status *stat, u8 *update) 438 446 { 439 447 struct ieee80211_tx_info info = {}; 440 448 struct ieee80211_sta *sta = NULL; ··· 442 450 struct mt76x02_sta *msta = NULL; 443 451 444 452 rcu_read_lock(); 445 - if (stat->wcid < ARRAY_SIZE(dev->wcid)) 446 - wcid = rcu_dereference(dev->wcid[stat->wcid]); 453 + if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid)) 454 + wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]); 447 455 448 456 if (wcid) { 449 457 void *priv; ··· 468 476 } 469 477 470 478 mt76x02_mac_fill_tx_status(dev, &info, &msta->status, 471 - msta->n_frames); 479 + msta->n_frames); 472 480 473 481 msta->status = *stat; 474 482 msta->n_frames = 1; ··· 478 486 *update = 1; 479 487 } 480 488 481 - ieee80211_tx_status_noskb(dev->hw, sta, &info); 489 + ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info); 482 490 483 491 out: 484 492 rcu_read_unlock(); ··· 553 561 } 554 562 EXPORT_SYMBOL_GPL(mt76x02_mac_process_rate); 555 563 556 - void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr) 564 + void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr) 557 565 { 558 - ether_addr_copy(dev->macaddr, addr); 566 + ether_addr_copy(dev->mt76.macaddr, addr); 559 567 560 - if (!is_valid_ether_addr(dev->macaddr)) { 561 - eth_random_addr(dev->macaddr); 562 - dev_info(dev->dev, 568 + if (!is_valid_ether_addr(dev->mt76.macaddr)) { 569 + eth_random_addr(dev->mt76.macaddr); 570 + dev_info(dev->mt76.dev, 563 571 "Invalid MAC address, using random address %pM\n", 564 - dev->macaddr); 572 + dev->mt76.macaddr); 565 573 } 566 574 567 - __mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr)); 568 - __mt76_wr(dev, MT_MAC_ADDR_DW1, 569 - get_unaligned_le16(dev->macaddr + 4) | 570 - FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); 575 + mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr)); 576 + mt76_wr(dev, MT_MAC_ADDR_DW1, 577 + get_unaligned_le16(dev->mt76.macaddr + 4) | 578 + FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); 571 579 } 572 580 EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr); 573 581 ··· 689 697 690 698 while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) { 691 699 spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags); 692 - ret = mt76x02_mac_load_tx_status(&dev->mt76, &stat); 700 + ret = mt76x02_mac_load_tx_status(dev, &stat); 693 701 spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags); 694 702 695 703 if (!ret) ··· 698 706 trace_mac_txstat_fetch(dev, &stat); 699 707 700 708 if (!irq) { 701 - mt76x02_send_tx_status(&dev->mt76, &stat, &update); 709 + mt76x02_send_tx_status(dev, &stat, &update); 702 710 continue; 703 711 } 704 712
+16 -15
drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
··· 198 198 return (void *)info->status.status_driver_data; 199 199 } 200 200 201 - void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); 201 + void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq); 202 202 enum mt76x02_cipher_type 203 203 mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data); 204 204 205 - int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx, 206 - struct ieee80211_key_conf *key); 207 - int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx, 208 - struct ieee80211_key_conf *key); 209 - void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac); 210 - void mt76x02_mac_wcid_set_drop(struct mt76_dev *dev, u8 idx, bool drop); 211 - void mt76x02_mac_wcid_set_rate(struct mt76_dev *dev, struct mt76_wcid *wcid, 212 - const struct ieee80211_tx_rate *rate); 213 - bool mt76x02_mac_load_tx_status(struct mt76_dev *dev, 214 - struct mt76x02_tx_status *stat); 215 - void mt76x02_send_tx_status(struct mt76_dev *dev, 216 - struct mt76x02_tx_status *stat, u8 *update); 205 + int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx, 206 + u8 key_idx, struct ieee80211_key_conf *key); 207 + int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, 208 + struct ieee80211_key_conf *key); 209 + void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx, 210 + u8 *mac); 211 + void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop); 212 + void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid, 213 + const struct ieee80211_tx_rate *rate); 214 + bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev, 215 + struct mt76x02_tx_status *stat); 216 + void mt76x02_send_tx_status(struct mt76x02_dev *dev, 217 + struct mt76x02_tx_status *stat, u8 *update); 217 218 int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, 218 219 void *rxi); 219 220 int 220 221 mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate); 221 - void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr); 222 - void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi, 222 + void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr); 223 + void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, 223 224 struct sk_buff *skb, struct mt76_wcid *wcid, 224 225 struct ieee80211_sta *sta, int len); 225 226 void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
+36 -38
drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
··· 19 19 #include <linux/firmware.h> 20 20 #include <linux/delay.h> 21 21 22 - #include "mt76.h" 23 22 #include "mt76x02_mcu.h" 24 - #include "mt76x02_dma.h" 25 23 26 24 struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len) 27 25 { ··· 35 37 EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc); 36 38 37 39 static struct sk_buff * 38 - mt76x02_mcu_get_response(struct mt76_dev *dev, unsigned long expires) 40 + mt76x02_mcu_get_response(struct mt76x02_dev *dev, unsigned long expires) 39 41 { 40 42 unsigned long timeout; 41 43 ··· 43 45 return NULL; 44 46 45 47 timeout = expires - jiffies; 46 - wait_event_timeout(dev->mmio.mcu.wait, 47 - !skb_queue_empty(&dev->mmio.mcu.res_q), 48 + wait_event_timeout(dev->mt76.mmio.mcu.wait, 49 + !skb_queue_empty(&dev->mt76.mmio.mcu.res_q), 48 50 timeout); 49 - return skb_dequeue(&dev->mmio.mcu.res_q); 51 + return skb_dequeue(&dev->mt76.mmio.mcu.res_q); 50 52 } 51 53 52 54 static int 53 - mt76x02_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid, 55 + mt76x02_tx_queue_mcu(struct mt76x02_dev *dev, enum mt76_txq_id qid, 54 56 struct sk_buff *skb, int cmd, int seq) 55 57 { 56 - struct mt76_queue *q = &dev->q_tx[qid]; 58 + struct mt76_queue *q = &dev->mt76.q_tx[qid]; 57 59 struct mt76_queue_buf buf; 58 60 dma_addr_t addr; 59 61 u32 tx_info; ··· 64 66 FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | 65 67 FIELD_PREP(MT_MCU_MSG_LEN, skb->len); 66 68 67 - addr = dma_map_single(dev->dev, skb->data, skb->len, 69 + addr = dma_map_single(dev->mt76.dev, skb->data, skb->len, 68 70 DMA_TO_DEVICE); 69 - if (dma_mapping_error(dev->dev, addr)) 71 + if (dma_mapping_error(dev->mt76.dev, addr)) 70 72 return -ENOMEM; 71 73 72 74 buf.addr = addr; 73 75 buf.len = skb->len; 76 + 74 77 spin_lock_bh(&q->lock); 75 - dev->queue_ops->add_buf(dev, q, &buf, 1, tx_info, skb, NULL); 76 - dev->queue_ops->kick(dev, q); 78 + mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); 79 + mt76_queue_kick(dev, q); 77 80 spin_unlock_bh(&q->lock); 78 81 79 82 return 0; 80 83 } 81 84 82 - int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb, 85 + int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb, 83 86 int cmd, bool wait_resp) 84 87 { 88 + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); 85 89 unsigned long expires = jiffies + HZ; 86 90 int ret; 87 91 u8 seq; ··· 91 91 if (!skb) 92 92 return -EINVAL; 93 93 94 - mutex_lock(&dev->mmio.mcu.mutex); 94 + mutex_lock(&mdev->mmio.mcu.mutex); 95 95 96 - seq = ++dev->mmio.mcu.msg_seq & 0xf; 96 + seq = ++mdev->mmio.mcu.msg_seq & 0xf; 97 97 if (!seq) 98 - seq = ++dev->mmio.mcu.msg_seq & 0xf; 98 + seq = ++mdev->mmio.mcu.msg_seq & 0xf; 99 99 100 100 ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq); 101 101 if (ret) ··· 107 107 108 108 skb = mt76x02_mcu_get_response(dev, expires); 109 109 if (!skb) { 110 - dev_err(dev->dev, 110 + dev_err(mdev->dev, 111 111 "MCU message %d (seq %d) timed out\n", cmd, 112 112 seq); 113 113 ret = -ETIMEDOUT; ··· 125 125 } 126 126 127 127 out: 128 - mutex_unlock(&dev->mmio.mcu.mutex); 128 + mutex_unlock(&mdev->mmio.mcu.mutex); 129 129 130 130 return ret; 131 131 } 132 132 EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send); 133 133 134 - int mt76x02_mcu_function_select(struct mt76_dev *dev, 134 + int mt76x02_mcu_function_select(struct mt76x02_dev *dev, 135 135 enum mcu_function func, 136 136 u32 val, bool wait_resp) 137 137 { ··· 144 144 .value = cpu_to_le32(val), 145 145 }; 146 146 147 - skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg)); 148 - return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_FUN_SET_OP, 149 - wait_resp); 147 + skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); 148 + return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP, wait_resp); 150 149 } 151 150 EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select); 152 151 153 - int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on, 152 + int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on, 154 153 bool wait_resp) 155 154 { 156 155 struct sk_buff *skb; ··· 161 162 .level = cpu_to_le32(0), 162 163 }; 163 164 164 - skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg)); 165 - return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, 166 - wait_resp); 165 + skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); 166 + return mt76_mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, wait_resp); 167 167 } 168 168 EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state); 169 169 170 - int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type, 170 + int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, 171 171 u32 param, bool wait) 172 172 { 173 173 struct sk_buff *skb; ··· 180 182 int ret; 181 183 182 184 if (wait) 183 - dev->bus->rmw(dev, MT_MCU_COM_REG0, BIT(31), 0); 185 + mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0); 184 186 185 - skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg)); 186 - ret = dev->mcu_ops->mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true); 187 + skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); 188 + ret = mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true); 187 189 if (ret) 188 190 return ret; 189 191 190 192 if (wait && 191 - WARN_ON(!__mt76_poll_msec(dev, MT_MCU_COM_REG0, 192 - BIT(31), BIT(31), 100))) 193 + WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0, 194 + BIT(31), BIT(31), 100))) 193 195 return -ETIMEDOUT; 194 196 195 197 return 0; 196 198 } 197 199 EXPORT_SYMBOL_GPL(mt76x02_mcu_calibrate); 198 200 199 - int mt76x02_mcu_cleanup(struct mt76_dev *dev) 201 + int mt76x02_mcu_cleanup(struct mt76x02_dev *dev) 200 202 { 201 203 struct sk_buff *skb; 202 204 203 - dev->bus->wr(dev, MT_MCU_INT_LEVEL, 1); 205 + mt76_wr(dev, MT_MCU_INT_LEVEL, 1); 204 206 usleep_range(20000, 30000); 205 207 206 - while ((skb = skb_dequeue(&dev->mmio.mcu.res_q)) != NULL) 208 + while ((skb = skb_dequeue(&dev->mt76.mmio.mcu.res_q)) != NULL) 207 209 dev_kfree_skb(skb); 208 210 209 211 return 0; 210 212 } 211 213 EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup); 212 214 213 - void mt76x02_set_ethtool_fwver(struct mt76_dev *dev, 215 + void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev, 214 216 const struct mt76x02_fw_header *h) 215 217 { 216 218 u16 bld = le16_to_cpu(h->build_ver); 217 219 u16 ver = le16_to_cpu(h->fw_ver); 218 220 219 - snprintf(dev->hw->wiphy->fw_version, 220 - sizeof(dev->hw->wiphy->fw_version), 221 + snprintf(dev->mt76.hw->wiphy->fw_version, 222 + sizeof(dev->mt76.hw->wiphy->fw_version), 221 223 "%d.%d.%02d-b%x", 222 224 (ver >> 12) & 0xf, (ver >> 8) & 0xf, ver & 0xf, bld); 223 225 }
+8 -6
drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
··· 17 17 #ifndef __MT76x02_MCU_H 18 18 #define __MT76x02_MCU_H 19 19 20 + #include "mt76x02.h" 21 + 20 22 #define MT_MCU_RESET_CTL 0x070C 21 23 #define MT_MCU_INT_LEVEL 0x0718 22 24 #define MT_MCU_COM_REG0 0x0730 ··· 96 94 u8 pad[2]; 97 95 }; 98 96 99 - int mt76x02_mcu_cleanup(struct mt76_dev *dev); 100 - int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type, 97 + int mt76x02_mcu_cleanup(struct mt76x02_dev *dev); 98 + int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, 101 99 u32 param, bool wait); 102 100 struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len); 103 - int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb, 101 + int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb, 104 102 int cmd, bool wait_resp); 105 - int mt76x02_mcu_function_select(struct mt76_dev *dev, 103 + int mt76x02_mcu_function_select(struct mt76x02_dev *dev, 106 104 enum mcu_function func, 107 105 u32 val, bool wait_resp); 108 - int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on, 106 + int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on, 109 107 bool wait_resp); 110 - void mt76x02_set_ethtool_fwver(struct mt76_dev *dev, 108 + void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev, 111 109 const struct mt76x02_fw_header *h); 112 110 113 111 #endif /* __MT76x02_MCU_H */
+1 -1
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
··· 65 65 u8 update = 1; 66 66 67 67 while (kfifo_get(&dev->txstatus_fifo, &stat)) 68 - mt76x02_send_tx_status(&dev->mt76, &stat, &update); 68 + mt76x02_send_tx_status(dev, &stat, &update); 69 69 } 70 70 71 71 static void mt76x02_tx_tasklet(unsigned long data)
+119 -44
drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
··· 17 17 18 18 #include <linux/kernel.h> 19 19 20 - #include "mt76.h" 20 + #include "mt76x02.h" 21 21 #include "mt76x02_phy.h" 22 - #include "mt76x02_mac.h" 23 22 24 - void mt76x02_phy_set_rxpath(struct mt76_dev *dev) 23 + void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev) 25 24 { 26 25 u32 val; 27 26 28 - val = __mt76_rr(dev, MT_BBP(AGC, 0)); 27 + val = mt76_rr(dev, MT_BBP(AGC, 0)); 29 28 val &= ~BIT(4); 30 29 31 - switch (dev->chainmask & 0xf) { 30 + switch (dev->mt76.chainmask & 0xf) { 32 31 case 2: 33 32 val |= BIT(3); 34 33 break; ··· 36 37 break; 37 38 } 38 39 39 - __mt76_wr(dev, MT_BBP(AGC, 0), val); 40 + mt76_wr(dev, MT_BBP(AGC, 0), val); 40 41 mb(); 41 - val = __mt76_rr(dev, MT_BBP(AGC, 0)); 42 + val = mt76_rr(dev, MT_BBP(AGC, 0)); 42 43 } 43 44 EXPORT_SYMBOL_GPL(mt76x02_phy_set_rxpath); 44 45 45 - void mt76x02_phy_set_txdac(struct mt76_dev *dev) 46 + void mt76x02_phy_set_txdac(struct mt76x02_dev *dev) 46 47 { 47 48 int txpath; 48 49 49 - txpath = (dev->chainmask >> 8) & 0xf; 50 + txpath = (dev->mt76.chainmask >> 8) & 0xf; 50 51 switch (txpath) { 51 52 case 2: 52 - __mt76_set(dev, MT_BBP(TXBE, 5), 0x3); 53 + mt76_set(dev, MT_BBP(TXBE, 5), 0x3); 53 54 break; 54 55 default: 55 - __mt76_clear(dev, MT_BBP(TXBE, 5), 0x3); 56 + mt76_clear(dev, MT_BBP(TXBE, 5), 0x3); 56 57 break; 57 58 } 58 59 } ··· 101 102 } 102 103 EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset); 103 104 104 - void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_1) 105 + void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1) 105 106 { 106 - struct mt76_rate_power *t = &dev->rate_power; 107 + struct mt76_rate_power *t = &dev->mt76.rate_power; 107 108 108 - __mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, 109 - txp_0); 110 - __mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, 111 - txp_1); 109 + mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0); 110 + mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1); 112 111 113 - __mt76_wr(dev, MT_TX_PWR_CFG_0, 114 - mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0], 115 - t->ofdm[2])); 116 - __mt76_wr(dev, MT_TX_PWR_CFG_1, 117 - mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0], 118 - t->ht[2])); 119 - __mt76_wr(dev, MT_TX_PWR_CFG_2, 120 - mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8], 121 - t->ht[10])); 122 - __mt76_wr(dev, MT_TX_PWR_CFG_3, 123 - mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0], 124 - t->stbc[2])); 125 - __mt76_wr(dev, MT_TX_PWR_CFG_4, 126 - mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0)); 127 - __mt76_wr(dev, MT_TX_PWR_CFG_7, 128 - mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7], 129 - t->vht[9])); 130 - __mt76_wr(dev, MT_TX_PWR_CFG_8, 131 - mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9])); 132 - __mt76_wr(dev, MT_TX_PWR_CFG_9, 133 - mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9])); 112 + mt76_wr(dev, MT_TX_PWR_CFG_0, 113 + mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0], 114 + t->ofdm[2])); 115 + mt76_wr(dev, MT_TX_PWR_CFG_1, 116 + mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0], 117 + t->ht[2])); 118 + mt76_wr(dev, MT_TX_PWR_CFG_2, 119 + mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8], 120 + t->ht[10])); 121 + mt76_wr(dev, MT_TX_PWR_CFG_3, 122 + mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0], 123 + t->stbc[2])); 124 + mt76_wr(dev, MT_TX_PWR_CFG_4, 125 + mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0)); 126 + mt76_wr(dev, MT_TX_PWR_CFG_7, 127 + mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7], 128 + t->vht[9])); 129 + mt76_wr(dev, MT_TX_PWR_CFG_8, 130 + mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9])); 131 + mt76_wr(dev, MT_TX_PWR_CFG_9, 132 + mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9])); 134 133 } 135 134 EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower); 136 135 137 - int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev) 136 + int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev) 138 137 { 139 138 struct mt76x02_sta *sta; 140 139 struct mt76_wcid *wcid; ··· 142 145 local_bh_disable(); 143 146 rcu_read_lock(); 144 147 145 - for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) { 146 - unsigned long mask = dev->wcid_mask[i]; 148 + for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid_mask); i++) { 149 + unsigned long mask = dev->mt76.wcid_mask[i]; 147 150 148 151 if (!mask) 149 152 continue; ··· 152 155 if (!(mask & 1)) 153 156 continue; 154 157 155 - wcid = rcu_dereference(dev->wcid[j]); 158 + wcid = rcu_dereference(dev->mt76.wcid[j]); 156 159 if (!wcid) 157 160 continue; 158 161 159 162 sta = container_of(wcid, struct mt76x02_sta, wcid); 160 - spin_lock(&dev->rx_lock); 163 + spin_lock(&dev->mt76.rx_lock); 161 164 if (sta->inactive_count++ < 5) 162 165 cur_rssi = ewma_signal_read(&sta->rssi); 163 166 else 164 167 cur_rssi = 0; 165 - spin_unlock(&dev->rx_lock); 168 + spin_unlock(&dev->mt76.rx_lock); 166 169 167 170 if (cur_rssi < min_rssi) 168 171 min_rssi = cur_rssi; ··· 178 181 return min_rssi; 179 182 } 180 183 EXPORT_SYMBOL_GPL(mt76x02_phy_get_min_avg_rssi); 184 + 185 + void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl) 186 + { 187 + int core_val, agc_val; 188 + 189 + switch (width) { 190 + case NL80211_CHAN_WIDTH_80: 191 + core_val = 3; 192 + agc_val = 7; 193 + break; 194 + case NL80211_CHAN_WIDTH_40: 195 + core_val = 2; 196 + agc_val = 3; 197 + break; 198 + default: 199 + core_val = 0; 200 + agc_val = 1; 201 + break; 202 + } 203 + 204 + mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val); 205 + mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val); 206 + mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl); 207 + mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl); 208 + } 209 + EXPORT_SYMBOL_GPL(mt76x02_phy_set_bw); 210 + 211 + void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band, 212 + bool primary_upper) 213 + { 214 + switch (band) { 215 + case NL80211_BAND_2GHZ: 216 + mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); 217 + mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); 218 + break; 219 + case NL80211_BAND_5GHZ: 220 + mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); 221 + mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); 222 + break; 223 + } 224 + 225 + mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M, 226 + primary_upper); 227 + } 228 + EXPORT_SYMBOL_GPL(mt76x02_phy_set_band); 229 + 230 + bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev) 231 + { 232 + u8 limit = dev->cal.low_gain > 0 ? 16 : 4; 233 + bool ret = false; 234 + u32 false_cca; 235 + 236 + false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1)); 237 + dev->cal.false_cca = false_cca; 238 + if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) { 239 + dev->cal.agc_gain_adjust += 2; 240 + ret = true; 241 + } else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) || 242 + (dev->cal.agc_gain_adjust >= limit && false_cca < 500)) { 243 + dev->cal.agc_gain_adjust -= 2; 244 + ret = true; 245 + } 246 + 247 + return ret; 248 + } 249 + EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain); 250 + 251 + void mt76x02_init_agc_gain(struct mt76x02_dev *dev) 252 + { 253 + dev->cal.agc_gain_init[0] = mt76_get_field(dev, MT_BBP(AGC, 8), 254 + MT_BBP_AGC_GAIN); 255 + dev->cal.agc_gain_init[1] = mt76_get_field(dev, MT_BBP(AGC, 9), 256 + MT_BBP_AGC_GAIN); 257 + memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init, 258 + sizeof(dev->cal.agc_gain_cur)); 259 + dev->cal.low_gain = -1; 260 + } 261 + EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain);
+35 -4
drivers/net/wireless/mediatek/mt76/mt76x02_phy.h
··· 19 19 20 20 #include "mt76x02_regs.h" 21 21 22 + static inline int 23 + mt76x02_get_rssi_gain_thresh(struct mt76x02_dev *dev) 24 + { 25 + switch (dev->mt76.chandef.width) { 26 + case NL80211_CHAN_WIDTH_80: 27 + return -62; 28 + case NL80211_CHAN_WIDTH_40: 29 + return -65; 30 + default: 31 + return -68; 32 + } 33 + } 34 + 35 + static inline int 36 + mt76x02_get_low_rssi_gain_thresh(struct mt76x02_dev *dev) 37 + { 38 + switch (dev->mt76.chandef.width) { 39 + case NL80211_CHAN_WIDTH_80: 40 + return -76; 41 + case NL80211_CHAN_WIDTH_40: 42 + return -79; 43 + default: 44 + return -82; 45 + } 46 + } 47 + 22 48 void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset); 23 - void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_2); 49 + void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_2); 24 50 void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit); 25 51 int mt76x02_get_max_rate_power(struct mt76_rate_power *r); 26 - void mt76x02_phy_set_rxpath(struct mt76_dev *dev); 27 - void mt76x02_phy_set_txdac(struct mt76_dev *dev); 28 - int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev); 52 + void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev); 53 + void mt76x02_phy_set_txdac(struct mt76x02_dev *dev); 54 + int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev); 55 + void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl); 56 + void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band, 57 + bool primary_upper); 58 + bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev); 59 + void mt76x02_init_agc_gain(struct mt76x02_dev *dev); 29 60 30 61 #endif /* __MT76x02_PHY_H */
+2 -2
drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
··· 205 205 #define MT_TXQ_STA 0x0434 206 206 #define MT_RF_CSR_CFG 0x0500 207 207 #define MT_RF_CSR_CFG_DATA GENMASK(7, 0) 208 - #define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8) 209 - #define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14) 208 + #define MT_RF_CSR_CFG_REG_ID GENMASK(14, 8) 209 + #define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 15) 210 210 #define MT_RF_CSR_CFG_WR BIT(30) 211 211 #define MT_RF_CSR_CFG_KICK BIT(31) 212 212
+14 -15
drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
··· 71 71 } 72 72 EXPORT_SYMBOL_GPL(mt76x02_queue_rx_skb); 73 73 74 - s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev, 74 + s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev, 75 75 const struct ieee80211_tx_rate *rate) 76 76 { 77 77 s8 max_txpwr; ··· 80 80 u8 mcs = ieee80211_rate_get_vht_mcs(rate); 81 81 82 82 if (mcs == 8 || mcs == 9) { 83 - max_txpwr = dev->rate_power.vht[8]; 83 + max_txpwr = dev->mt76.rate_power.vht[8]; 84 84 } else { 85 85 u8 nss, idx; 86 86 87 87 nss = ieee80211_rate_get_vht_nss(rate); 88 88 idx = ((nss - 1) << 3) + mcs; 89 - max_txpwr = dev->rate_power.ht[idx & 0xf]; 89 + max_txpwr = dev->mt76.rate_power.ht[idx & 0xf]; 90 90 } 91 91 } else if (rate->flags & IEEE80211_TX_RC_MCS) { 92 - max_txpwr = dev->rate_power.ht[rate->idx & 0xf]; 92 + max_txpwr = dev->mt76.rate_power.ht[rate->idx & 0xf]; 93 93 } else { 94 - enum nl80211_band band = dev->chandef.chan->band; 94 + enum nl80211_band band = dev->mt76.chandef.chan->band; 95 95 96 96 if (band == NL80211_BAND_2GHZ) { 97 97 const struct ieee80211_rate *r; 98 - struct wiphy *wiphy = dev->hw->wiphy; 99 - struct mt76_rate_power *rp = &dev->rate_power; 98 + struct wiphy *wiphy = dev->mt76.hw->wiphy; 99 + struct mt76_rate_power *rp = &dev->mt76.rate_power; 100 100 101 101 r = &wiphy->bands[band]->bitrates[rate->idx]; 102 102 if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE) ··· 104 104 else 105 105 max_txpwr = rp->ofdm[r->hw_value & 0x7]; 106 106 } else { 107 - max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7]; 107 + max_txpwr = dev->mt76.rate_power.ofdm[rate->idx & 0x7]; 108 108 } 109 109 } 110 110 ··· 112 112 } 113 113 EXPORT_SYMBOL_GPL(mt76x02_tx_get_max_txpwr_adj); 114 114 115 - s8 mt76x02_tx_get_txpwr_adj(struct mt76_dev *mdev, s8 txpwr, s8 max_txpwr_adj) 115 + s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj) 116 116 { 117 - struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); 118 - 119 117 txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf); 120 118 txpwr -= (dev->target_power + dev->target_power_delta[0]); 121 119 txpwr = min_t(s8, txpwr, max_txpwr_adj); ··· 131 133 { 132 134 s8 txpwr_adj; 133 135 134 - txpwr_adj = mt76x02_tx_get_txpwr_adj(&dev->mt76, txpwr, 136 + txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, txpwr, 135 137 dev->mt76.rate_power.ofdm[4]); 136 138 mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, 137 139 MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj); ··· 155 157 } 156 158 EXPORT_SYMBOL_GPL(mt76x02_tx_complete); 157 159 158 - bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update) 160 + bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update) 159 161 { 162 + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); 160 163 struct mt76x02_tx_status stat; 161 164 162 165 if (!mt76x02_mac_load_tx_status(dev, &stat)) ··· 180 181 int ret; 181 182 182 183 if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128) 183 - mt76x02_mac_wcid_set_drop(&dev->mt76, wcid->idx, false); 184 + mt76x02_mac_wcid_set_drop(dev, wcid->idx, false); 184 185 185 - mt76x02_mac_write_txwi(mdev, txwi, skb, wcid, sta, skb->len); 186 + mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len); 186 187 187 188 ret = mt76x02_insert_hdr_pad(skb); 188 189 if (ret < 0)
+4 -4
drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
··· 17 17 #ifndef __MT76x02_USB_H 18 18 #define __MT76x02_USB_H 19 19 20 - #include "mt76.h" 20 + #include "mt76x02.h" 21 21 22 22 void mt76x02u_init_mcu(struct mt76_dev *dev); 23 - void mt76x02u_mcu_fw_reset(struct mt76_dev *dev); 24 - int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data, 23 + void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev); 24 + int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data, 25 25 int data_len, u32 max_payload, u32 offset); 26 26 27 27 int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags); 28 - int mt76x02u_tx_prepare_skb(struct mt76_dev *dev, void *data, 28 + int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, 29 29 struct sk_buff *skb, struct mt76_queue *q, 30 30 struct mt76_wcid *wcid, struct ieee80211_sta *sta, 31 31 u32 *tx_info);
+3 -17
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
··· 34 34 } 35 35 EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb); 36 36 37 - static int mt76x02u_check_skb_rooms(struct sk_buff *skb) 38 - { 39 - int hdr_len = ieee80211_get_hdrlen_from_skb(skb); 40 - u32 need_head; 41 - 42 - need_head = sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN; 43 - if (hdr_len % 4) 44 - need_head += 2; 45 - return skb_cow(skb, need_head); 46 - } 47 - 48 37 int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) 49 38 { 50 39 struct sk_buff *iter, *last = skb; ··· 88 99 return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags); 89 100 } 90 101 91 - int mt76x02u_tx_prepare_skb(struct mt76_dev *dev, void *data, 102 + int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, 92 103 struct sk_buff *skb, struct mt76_queue *q, 93 104 struct mt76_wcid *wcid, struct ieee80211_sta *sta, 94 105 u32 *tx_info) 95 106 { 107 + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); 96 108 struct mt76x02_txwi *txwi; 97 - int err, len = skb->len; 98 - 99 - err = mt76x02u_check_skb_rooms(skb); 100 - if (err < 0) 101 - return -ENOMEM; 109 + int len = skb->len; 102 110 103 111 mt76x02_insert_hdr_pad(skb); 104 112
+13 -14
drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
··· 17 17 #include <linux/module.h> 18 18 #include <linux/firmware.h> 19 19 20 - #include "mt76.h" 21 - #include "mt76x02_dma.h" 20 + #include "mt76x02.h" 22 21 #include "mt76x02_mcu.h" 23 22 #include "mt76x02_usb.h" 24 23 ··· 254 255 return ret; 255 256 } 256 257 257 - void mt76x02u_mcu_fw_reset(struct mt76_dev *dev) 258 + void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev) 258 259 { 259 - mt76u_vendor_request(dev, MT_VEND_DEV_MODE, 260 + mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE, 260 261 USB_DIR_OUT | USB_TYPE_VENDOR, 261 262 0x1, 0, NULL, 0); 262 263 } 263 264 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset); 264 265 265 266 static int 266 - __mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf, 267 + __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf, 267 268 const void *fw_data, int len, u32 dst_addr) 268 269 { 269 270 u8 *data = sg_virt(&buf->urb->sg[0]); ··· 280 281 memcpy(data + sizeof(info), fw_data, len); 281 282 memset(data + sizeof(info) + len, 0, 4); 282 283 283 - mt76u_single_wr(dev, MT_VEND_WRITE_FCE, 284 + mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE, 284 285 MT_FCE_DMA_ADDR, dst_addr); 285 286 len = roundup(len, 4); 286 - mt76u_single_wr(dev, MT_VEND_WRITE_FCE, 287 + mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE, 287 288 MT_FCE_DMA_LEN, len << 16); 288 289 289 290 buf->len = MT_CMD_HDR_LEN + len + sizeof(info); 290 - err = mt76u_submit_buf(dev, USB_DIR_OUT, 291 + err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT, 291 292 MT_EP_OUT_INBAND_CMD, 292 293 buf, GFP_KERNEL, 293 294 mt76u_mcu_complete_urb, &cmpl); ··· 296 297 297 298 if (!wait_for_completion_timeout(&cmpl, 298 299 msecs_to_jiffies(1000))) { 299 - dev_err(dev->dev, "firmware upload timed out\n"); 300 + dev_err(dev->mt76.dev, "firmware upload timed out\n"); 300 301 usb_kill_urb(buf->urb); 301 302 return -ETIMEDOUT; 302 303 } 303 304 304 305 if (mt76u_urb_error(buf->urb)) { 305 - dev_err(dev->dev, "firmware upload failed: %d\n", 306 + dev_err(dev->mt76.dev, "firmware upload failed: %d\n", 306 307 buf->urb->status); 307 308 return buf->urb->status; 308 309 } 309 310 310 - val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX); 311 + val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX); 311 312 val++; 312 - mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val); 313 + mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val); 313 314 314 315 return 0; 315 316 } 316 317 317 - int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data, 318 + int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data, 318 319 int data_len, u32 max_payload, u32 offset) 319 320 { 320 321 int err, len, pos = 0, max_len = max_payload - 8; 321 322 struct mt76u_buf buf; 322 323 323 - err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload, 324 + err = mt76u_buf_alloc(&dev->mt76, &buf, 1, max_payload, max_payload, 324 325 GFP_KERNEL); 325 326 if (err < 0) 326 327 return err;
+61 -59
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
··· 48 48 EXPORT_SYMBOL_GPL(mt76x02_rates); 49 49 50 50 void mt76x02_configure_filter(struct ieee80211_hw *hw, 51 - unsigned int changed_flags, 52 - unsigned int *total_flags, u64 multicast) 51 + unsigned int changed_flags, 52 + unsigned int *total_flags, u64 multicast) 53 53 { 54 - struct mt76_dev *dev = hw->priv; 54 + struct mt76x02_dev *dev = hw->priv; 55 55 u32 flags = 0; 56 56 57 57 #define MT76_FILTER(_flag, _hw) do { \ 58 58 flags |= *total_flags & FIF_##_flag; \ 59 - dev->rxfilter &= ~(_hw); \ 60 - dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ 59 + dev->mt76.rxfilter &= ~(_hw); \ 60 + dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \ 61 61 } while (0) 62 62 63 - mutex_lock(&dev->mutex); 63 + mutex_lock(&dev->mt76.mutex); 64 64 65 - dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS; 65 + dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS; 66 66 67 67 MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR); 68 68 MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR); ··· 75 75 MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL); 76 76 77 77 *total_flags = flags; 78 - dev->bus->wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); 78 + mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter); 79 79 80 - mutex_unlock(&dev->mutex); 80 + mutex_unlock(&dev->mt76.mutex); 81 81 } 82 82 EXPORT_SYMBOL_GPL(mt76x02_configure_filter); 83 83 84 84 int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 85 - struct ieee80211_sta *sta) 85 + struct ieee80211_sta *sta) 86 86 { 87 - struct mt76_dev *dev = hw->priv; 88 - struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; 89 - struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; 87 + struct mt76x02_dev *dev = hw->priv; 88 + struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv; 89 + struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 90 90 int ret = 0; 91 91 int idx = 0; 92 92 int i; 93 93 94 - mutex_lock(&dev->mutex); 94 + mutex_lock(&dev->mt76.mutex); 95 95 96 - idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid)); 96 + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid)); 97 97 if (idx < 0) { 98 98 ret = -ENOSPC; 99 99 goto out; ··· 113 113 114 114 ewma_signal_init(&msta->rssi); 115 115 116 - rcu_assign_pointer(dev->wcid[idx], &msta->wcid); 116 + rcu_assign_pointer(dev->mt76.wcid[idx], &msta->wcid); 117 117 118 118 out: 119 - mutex_unlock(&dev->mutex); 119 + mutex_unlock(&dev->mt76.mutex); 120 120 121 121 return ret; 122 122 } 123 123 EXPORT_SYMBOL_GPL(mt76x02_sta_add); 124 124 125 125 int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 126 - struct ieee80211_sta *sta) 126 + struct ieee80211_sta *sta) 127 127 { 128 - struct mt76_dev *dev = hw->priv; 129 - struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; 128 + struct mt76x02_dev *dev = hw->priv; 129 + struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv; 130 130 int idx = msta->wcid.idx; 131 131 int i; 132 132 133 - mutex_lock(&dev->mutex); 134 - rcu_assign_pointer(dev->wcid[idx], NULL); 133 + mutex_lock(&dev->mt76.mutex); 134 + rcu_assign_pointer(dev->mt76.wcid[idx], NULL); 135 135 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 136 - mt76_txq_remove(dev, sta->txq[i]); 136 + mt76_txq_remove(&dev->mt76, sta->txq[i]); 137 137 mt76x02_mac_wcid_set_drop(dev, idx, true); 138 - mt76_wcid_free(dev->wcid_mask, idx); 138 + mt76_wcid_free(dev->mt76.wcid_mask, idx); 139 139 mt76x02_mac_wcid_setup(dev, idx, 0, NULL); 140 - mutex_unlock(&dev->mutex); 140 + mutex_unlock(&dev->mt76.mutex); 141 141 142 142 return 0; 143 143 } 144 144 EXPORT_SYMBOL_GPL(mt76x02_sta_remove); 145 145 146 - void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif, 147 - unsigned int idx) 146 + void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, 147 + unsigned int idx) 148 148 { 149 - struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; 149 + struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 150 150 151 151 mvif->idx = idx; 152 152 mvif->group_wcid.idx = MT_VIF_WCID(idx); ··· 158 158 int 159 159 mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 160 160 { 161 - struct mt76_dev *dev = hw->priv; 161 + struct mt76x02_dev *dev = hw->priv; 162 162 unsigned int idx = 0; 163 163 164 164 if (vif->addr[0] & BIT(1)) 165 - idx = 1 + (((dev->macaddr[0] ^ vif->addr[0]) >> 2) & 7); 165 + idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7); 166 166 167 167 /* 168 168 * Client mode typically only has one configurable BSSID register, ··· 186 186 EXPORT_SYMBOL_GPL(mt76x02_add_interface); 187 187 188 188 void mt76x02_remove_interface(struct ieee80211_hw *hw, 189 - struct ieee80211_vif *vif) 189 + struct ieee80211_vif *vif) 190 190 { 191 - struct mt76_dev *dev = hw->priv; 191 + struct mt76x02_dev *dev = hw->priv; 192 192 193 - mt76_txq_remove(dev, vif->txq); 193 + mt76_txq_remove(&dev->mt76, vif->txq); 194 194 } 195 195 EXPORT_SYMBOL_GPL(mt76x02_remove_interface); 196 196 197 197 int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 198 - struct ieee80211_ampdu_params *params) 198 + struct ieee80211_ampdu_params *params) 199 199 { 200 200 enum ieee80211_ampdu_mlme_action action = params->action; 201 201 struct ieee80211_sta *sta = params->sta; 202 - struct mt76_dev *dev = hw->priv; 202 + struct mt76x02_dev *dev = hw->priv; 203 203 struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; 204 204 struct ieee80211_txq *txq = sta->txq[params->tid]; 205 205 u16 tid = params->tid; ··· 213 213 214 214 switch (action) { 215 215 case IEEE80211_AMPDU_RX_START: 216 - mt76_rx_aggr_start(dev, &msta->wcid, tid, *ssn, params->buf_size); 217 - __mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); 216 + mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, 217 + *ssn, params->buf_size); 218 + mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); 218 219 break; 219 220 case IEEE80211_AMPDU_RX_STOP: 220 - mt76_rx_aggr_stop(dev, &msta->wcid, tid); 221 - __mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); 221 + mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); 222 + mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, 223 + BIT(16 + tid)); 222 224 break; 223 225 case IEEE80211_AMPDU_TX_OPERATIONAL: 224 226 mtxq->aggr = true; ··· 247 245 EXPORT_SYMBOL_GPL(mt76x02_ampdu_action); 248 246 249 247 int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 250 - struct ieee80211_vif *vif, struct ieee80211_sta *sta, 251 - struct ieee80211_key_conf *key) 248 + struct ieee80211_vif *vif, struct ieee80211_sta *sta, 249 + struct ieee80211_key_conf *key) 252 250 { 253 - struct mt76_dev *dev = hw->priv; 254 - struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; 251 + struct mt76x02_dev *dev = hw->priv; 252 + struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 255 253 struct mt76x02_sta *msta; 256 254 struct mt76_wcid *wcid; 257 255 int idx = key->keyidx; ··· 297 295 298 296 key = NULL; 299 297 } 300 - mt76_wcid_key_setup(dev, wcid, key); 298 + mt76_wcid_key_setup(&dev->mt76, wcid, key); 301 299 302 300 if (!msta) { 303 301 if (key || wcid->hw_key_idx == idx) { ··· 314 312 EXPORT_SYMBOL_GPL(mt76x02_set_key); 315 313 316 314 int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 317 - u16 queue, const struct ieee80211_tx_queue_params *params) 315 + u16 queue, const struct ieee80211_tx_queue_params *params) 318 316 { 319 - struct mt76_dev *dev = hw->priv; 317 + struct mt76x02_dev *dev = hw->priv; 320 318 u8 cw_min = 5, cw_max = 10, qid; 321 319 u32 val; 322 320 323 - qid = dev->q_tx[queue].hw_idx; 321 + qid = dev->mt76.q_tx[queue].hw_idx; 324 322 325 323 if (params->cw_min) 326 324 cw_min = fls(params->cw_min); ··· 331 329 FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) | 332 330 FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) | 333 331 FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max); 334 - __mt76_wr(dev, MT_EDCA_CFG_AC(qid), val); 332 + mt76_wr(dev, MT_EDCA_CFG_AC(qid), val); 335 333 336 - val = __mt76_rr(dev, MT_WMM_TXOP(qid)); 334 + val = mt76_rr(dev, MT_WMM_TXOP(qid)); 337 335 val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid)); 338 336 val |= params->txop << MT_WMM_TXOP_SHIFT(qid); 339 - __mt76_wr(dev, MT_WMM_TXOP(qid), val); 337 + mt76_wr(dev, MT_WMM_TXOP(qid), val); 340 338 341 - val = __mt76_rr(dev, MT_WMM_AIFSN); 339 + val = mt76_rr(dev, MT_WMM_AIFSN); 342 340 val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid)); 343 341 val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid); 344 - __mt76_wr(dev, MT_WMM_AIFSN, val); 342 + mt76_wr(dev, MT_WMM_AIFSN, val); 345 343 346 - val = __mt76_rr(dev, MT_WMM_CWMIN); 344 + val = mt76_rr(dev, MT_WMM_CWMIN); 347 345 val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid)); 348 346 val |= cw_min << MT_WMM_CWMIN_SHIFT(qid); 349 - __mt76_wr(dev, MT_WMM_CWMIN, val); 347 + mt76_wr(dev, MT_WMM_CWMIN, val); 350 348 351 - val = __mt76_rr(dev, MT_WMM_CWMAX); 349 + val = mt76_rr(dev, MT_WMM_CWMAX); 352 350 val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid)); 353 351 val |= cw_max << MT_WMM_CWMAX_SHIFT(qid); 354 - __mt76_wr(dev, MT_WMM_CWMAX, val); 352 + mt76_wr(dev, MT_WMM_CWMAX, val); 355 353 356 354 return 0; 357 355 } ··· 361 359 struct ieee80211_vif *vif, 362 360 struct ieee80211_sta *sta) 363 361 { 364 - struct mt76_dev *dev = hw->priv; 362 + struct mt76x02_dev *dev = hw->priv; 365 363 struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; 366 364 struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates); 367 365 struct ieee80211_tx_rate rate = {}; ··· 427 425 }; 428 426 EXPORT_SYMBOL_GPL(mt76x02_beacon_offsets); 429 427 430 - void mt76x02_set_beacon_offsets(struct mt76_dev *dev) 428 + void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev) 431 429 { 432 430 u16 val, base = MT_BEACON_BASE; 433 431 u32 regs[4] = {}; ··· 439 437 } 440 438 441 439 for (i = 0; i < 4; i++) 442 - __mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]); 440 + mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]); 443 441 } 444 442 EXPORT_SYMBOL_GPL(mt76x02_set_beacon_offsets); 445 443
+36 -44
drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
··· 177 177 178 178 efuse = dev->mt76.otp.data; 179 179 180 - if (mt76x02_get_efuse_data(&dev->mt76, 0, efuse, 181 - MT7662_EEPROM_SIZE, MT_EE_READ)) 180 + if (mt76x02_get_efuse_data(dev, 0, efuse, MT7662_EEPROM_SIZE, 181 + MT_EE_READ)) 182 182 goto out; 183 183 184 184 if (found) { ··· 248 248 group = mt76x2_get_cal_channel_group(channel); 249 249 switch (group) { 250 250 case MT_CH_5G_JAPAN: 251 - return mt76x02_eeprom_get(&dev->mt76, 251 + return mt76x02_eeprom_get(dev, 252 252 MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN); 253 253 case MT_CH_5G_UNII_1: 254 - return mt76x02_eeprom_get(&dev->mt76, 254 + return mt76x02_eeprom_get(dev, 255 255 MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8; 256 256 case MT_CH_5G_UNII_2: 257 - return mt76x02_eeprom_get(&dev->mt76, 257 + return mt76x02_eeprom_get(dev, 258 258 MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN); 259 259 case MT_CH_5G_UNII_2E_1: 260 - return mt76x02_eeprom_get(&dev->mt76, 260 + return mt76x02_eeprom_get(dev, 261 261 MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8; 262 262 case MT_CH_5G_UNII_2E_2: 263 - return mt76x02_eeprom_get(&dev->mt76, 263 + return mt76x02_eeprom_get(dev, 264 264 MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN); 265 265 default: 266 - return mt76x02_eeprom_get(&dev->mt76, 266 + return mt76x02_eeprom_get(dev, 267 267 MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8; 268 268 } 269 269 } ··· 277 277 u16 val; 278 278 279 279 if (chan->band == NL80211_BAND_2GHZ) 280 - val = mt76x02_eeprom_get(&dev->mt76, 281 - MT_EE_RF_2G_RX_HIGH_GAIN) >> 8; 280 + val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8; 282 281 else 283 282 val = mt76x2_get_5g_rx_gain(dev, channel); 284 283 285 284 mt76x2_set_rx_gain_group(dev, val); 286 285 287 - mt76x02_get_rx_gain(&dev->mt76, chan->band, &val, &lna_2g, lna_5g); 286 + mt76x02_get_rx_gain(dev, chan->band, &val, &lna_2g, lna_5g); 288 287 mt76x2_set_rssi_offset(dev, 0, val); 289 288 mt76x2_set_rssi_offset(dev, 1, val >> 8); 290 289 ··· 292 293 dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16; 293 294 dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24; 294 295 295 - lna = mt76x02_get_lna_gain(&dev->mt76, &lna_2g, lna_5g, chan); 296 + lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan); 296 297 dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8); 297 298 } 298 299 EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain); ··· 307 308 308 309 memset(t, 0, sizeof(*t)); 309 310 310 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_CCK); 311 + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_CCK); 311 312 t->cck[0] = t->cck[1] = mt76x02_rate_power_val(val); 312 313 t->cck[2] = t->cck[3] = mt76x02_rate_power_val(val >> 8); 313 314 314 315 if (is_5ghz) 315 - val = mt76x02_eeprom_get(&dev->mt76, 316 - MT_EE_TX_POWER_OFDM_5G_6M); 316 + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M); 317 317 else 318 - val = mt76x02_eeprom_get(&dev->mt76, 319 - MT_EE_TX_POWER_OFDM_2G_6M); 318 + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M); 320 319 t->ofdm[0] = t->ofdm[1] = mt76x02_rate_power_val(val); 321 320 t->ofdm[2] = t->ofdm[3] = mt76x02_rate_power_val(val >> 8); 322 321 323 322 if (is_5ghz) 324 - val = mt76x02_eeprom_get(&dev->mt76, 325 - MT_EE_TX_POWER_OFDM_5G_24M); 323 + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M); 326 324 else 327 - val = mt76x02_eeprom_get(&dev->mt76, 328 - MT_EE_TX_POWER_OFDM_2G_24M); 325 + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M); 329 326 t->ofdm[4] = t->ofdm[5] = mt76x02_rate_power_val(val); 330 327 t->ofdm[6] = t->ofdm[7] = mt76x02_rate_power_val(val >> 8); 331 328 332 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS0); 329 + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0); 333 330 t->ht[0] = t->ht[1] = mt76x02_rate_power_val(val); 334 331 t->ht[2] = t->ht[3] = mt76x02_rate_power_val(val >> 8); 335 332 336 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS4); 333 + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4); 337 334 t->ht[4] = t->ht[5] = mt76x02_rate_power_val(val); 338 335 t->ht[6] = t->ht[7] = mt76x02_rate_power_val(val >> 8); 339 336 340 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS8); 337 + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8); 341 338 t->ht[8] = t->ht[9] = mt76x02_rate_power_val(val); 342 339 t->ht[10] = t->ht[11] = mt76x02_rate_power_val(val >> 8); 343 340 344 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS12); 341 + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12); 345 342 t->ht[12] = t->ht[13] = mt76x02_rate_power_val(val); 346 343 t->ht[14] = t->ht[15] = mt76x02_rate_power_val(val >> 8); 347 344 348 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS0); 345 + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0); 349 346 t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val); 350 347 t->vht[2] = t->vht[3] = mt76x02_rate_power_val(val >> 8); 351 348 352 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS4); 349 + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4); 353 350 t->vht[4] = t->vht[5] = mt76x02_rate_power_val(val); 354 351 t->vht[6] = t->vht[7] = mt76x02_rate_power_val(val >> 8); 355 352 356 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS8); 353 + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8); 357 354 if (!is_5ghz) 358 355 val >>= 8; 359 356 t->vht[8] = t->vht[9] = mt76x02_rate_power_val(val >> 8); ··· 385 390 t->chain[chain].target_power = data[2]; 386 391 t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7); 387 392 388 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_RF_2G_TSSI_OFF_TXPOWER); 393 + val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER); 389 394 t->target_power = val >> 8; 390 395 } 391 396 ··· 436 441 t->chain[chain].target_power = data[2]; 437 442 t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7); 438 443 439 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_RF_2G_RX_HIGH_GAIN); 444 + val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN); 440 445 t->target_power = val & 0xff; 441 446 } 442 447 ··· 448 453 449 454 memset(t, 0, sizeof(*t)); 450 455 451 - bw40 = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_DELTA_BW40); 452 - bw80 = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_DELTA_BW80); 456 + bw40 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40); 457 + bw80 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80); 453 458 454 459 if (chan->band == NL80211_BAND_5GHZ) { 455 460 bw40 >>= 8; ··· 464 469 MT_EE_TX_POWER_1_START_2G); 465 470 } 466 471 467 - if (mt76x02_tssi_enabled(&dev->mt76) || 472 + if (mt76x2_tssi_enabled(dev) || 468 473 !mt76x02_field_valid(t->target_power)) 469 474 t->target_power = t->chain[0].target_power; 470 475 ··· 481 486 482 487 memset(t, 0, sizeof(*t)); 483 488 484 - if (!mt76x02_temp_tx_alc_enabled(&dev->mt76)) 489 + if (!mt76x2_temp_tx_alc_enabled(dev)) 485 490 return -EINVAL; 486 491 487 - if (!mt76x02_ext_pa_enabled(&dev->mt76, band)) 492 + if (!mt76x02_ext_pa_enabled(dev, band)) 488 493 return -EINVAL; 489 494 490 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_EXT_PA_5G) >> 8; 495 + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8; 491 496 t->temp_25_ref = val & 0x7f; 492 497 if (band == NL80211_BAND_5GHZ) { 493 - slope = mt76x02_eeprom_get(&dev->mt76, 494 - MT_EE_RF_TEMP_COMP_SLOPE_5G); 495 - bounds = mt76x02_eeprom_get(&dev->mt76, 496 - MT_EE_TX_POWER_EXT_PA_5G); 498 + slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G); 499 + bounds = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G); 497 500 } else { 498 - slope = mt76x02_eeprom_get(&dev->mt76, 499 - MT_EE_RF_TEMP_COMP_SLOPE_2G); 500 - bounds = mt76x02_eeprom_get(&dev->mt76, 501 + slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G); 502 + bounds = mt76x02_eeprom_get(dev, 501 503 MT_EE_TX_POWER_DELTA_BW80) >> 8; 502 504 } 503 505 ··· 515 523 if (ret) 516 524 return ret; 517 525 518 - mt76x02_eeprom_parse_hw_cap(&dev->mt76); 526 + mt76x02_eeprom_parse_hw_cap(dev); 519 527 mt76x2_eeprom_get_macaddr(dev); 520 528 mt76_eeprom_override(&dev->mt76); 521 529 dev->mt76.macaddr[0] &= ~BIT(1);
+22 -1
drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h
··· 62 62 static inline bool 63 63 mt76x2_has_ext_lna(struct mt76x02_dev *dev) 64 64 { 65 - u32 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1); 65 + u32 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1); 66 66 67 67 if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) 68 68 return val & MT_EE_NIC_CONF_1_LNA_EXT_2G; 69 69 else 70 70 return val & MT_EE_NIC_CONF_1_LNA_EXT_5G; 71 + } 72 + 73 + static inline bool 74 + mt76x2_temp_tx_alc_enabled(struct mt76x02_dev *dev) 75 + { 76 + u16 val; 77 + 78 + val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G); 79 + if (!(val & BIT(15))) 80 + return false; 81 + 82 + return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) & 83 + MT_EE_NIC_CONF_1_TEMP_TX_ALC; 84 + } 85 + 86 + static inline bool 87 + mt76x2_tssi_enabled(struct mt76x02_dev *dev) 88 + { 89 + return !mt76x2_temp_tx_alc_enabled(dev) && 90 + (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) & 91 + MT_EE_NIC_CONF_1_TX_ALC_EN); 71 92 } 72 93 73 94 #endif
+3
drivers/net/wireless/mediatek/mt76/mt76x2/init.c
··· 167 167 hw->max_report_rates = 7; 168 168 hw->max_rate_tries = 1; 169 169 hw->extra_tx_headroom = 2; 170 + if (mt76_is_usb(dev)) 171 + hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) + 172 + MT_DMA_HDR_LEN; 170 173 171 174 hw->sta_data_size = sizeof(struct mt76x02_sta); 172 175 hw->vif_data_size = sizeof(struct mt76x02_vif);
+2 -3
drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
··· 59 59 int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level, 60 60 u8 channel) 61 61 { 62 - struct mt76_dev *mdev = &dev->mt76; 63 62 struct sk_buff *skb; 64 63 struct { 65 64 u8 cr_mode; ··· 75 76 u32 val; 76 77 77 78 val = BIT(31); 78 - val |= (mt76x02_eeprom_get(mdev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff; 79 - val |= (mt76x02_eeprom_get(mdev, MT_EE_NIC_CONF_1) << 8) & 0xff00; 79 + val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff; 80 + val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00; 80 81 msg.cfg = cpu_to_le32(val); 81 82 82 83 /* first set the channel without the extension channel info */
-2
drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
··· 100 100 enum nl80211_band band); 101 101 void mt76x2_configure_tx_delay(struct mt76x02_dev *dev, 102 102 enum nl80211_band band, u8 bw); 103 - void mt76x2_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl); 104 - void mt76x2_phy_set_band(struct mt76x02_dev *dev, int band, bool primary_upper); 105 103 void mt76x2_apply_gain_adj(struct mt76x02_dev *dev); 106 104 107 105 #endif
+9 -9
drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
··· 43 43 u16 eep_val; 44 44 s8 offset = 0; 45 45 46 - eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_2); 46 + eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2); 47 47 48 48 offset = eep_val & 0x7f; 49 49 if ((eep_val & 0xff) == 0xff) ··· 53 53 54 54 eep_val >>= 8; 55 55 if (eep_val == 0x00 || eep_val == 0xff) { 56 - eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_1); 56 + eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1); 57 57 eep_val &= 0xff; 58 58 59 59 if (eep_val == 0x00 || eep_val == 0xff) ··· 64 64 mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset); 65 65 mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL); 66 66 67 - eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2); 67 + eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2); 68 68 switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) { 69 69 case 0: 70 70 mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80); ··· 143 143 mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0); 144 144 145 145 for (i = 0; i < 256; i++) 146 - mt76x02_mac_wcid_setup(&dev->mt76, i, 0, NULL); 146 + mt76x02_mac_wcid_setup(dev, i, 0, NULL); 147 147 148 148 for (i = 0; i < MT_MAX_VIFS; i++) 149 - mt76x02_mac_wcid_setup(&dev->mt76, MT_VIF_WCID(i), i, NULL); 149 + mt76x02_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL); 150 150 151 151 for (i = 0; i < 16; i++) 152 152 for (k = 0; k < 4; k++) 153 - mt76x02_mac_shared_key_setup(&dev->mt76, i, k, NULL); 153 + mt76x02_mac_shared_key_setup(dev, i, k, NULL); 154 154 155 155 for (i = 0; i < 8; i++) { 156 156 mt76x2_mac_set_bssid(dev, i, null_addr); ··· 168 168 MT_CH_TIME_CFG_EIFS_AS_BUSY | 169 169 FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1)); 170 170 171 - mt76x02_set_beacon_offsets(&dev->mt76); 171 + mt76x02_set_beacon_offsets(dev); 172 172 173 173 mt76x2_set_tx_ackto(dev); 174 174 ··· 337 337 { 338 338 cancel_delayed_work_sync(&dev->cal_work); 339 339 cancel_delayed_work_sync(&dev->mac_work); 340 - mt76x02_mcu_set_radio_state(&dev->mt76, false, true); 340 + mt76x02_mcu_set_radio_state(dev, false, true); 341 341 mt76x2_mac_stop(dev, false); 342 342 } 343 343 ··· 347 347 tasklet_disable(&dev->pre_tbtt_tasklet); 348 348 mt76x2_stop_hardware(dev); 349 349 mt76x02_dma_cleanup(dev); 350 - mt76x02_mcu_cleanup(&dev->mt76); 350 + mt76x02_mcu_cleanup(dev); 351 351 } 352 352 353 353 struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev)
+1 -1
drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c
··· 36 36 if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi))) 37 37 return -ENOSPC; 38 38 39 - mt76x02_mac_write_txwi(&dev->mt76, &txwi, skb, NULL, NULL, skb->len); 39 + mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len); 40 40 41 41 mt76_wr_copy(dev, offset, &txwi, sizeof(txwi)); 42 42 offset += sizeof(txwi);
+1 -1
drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
··· 172 172 int idx = msta->wcid.idx; 173 173 174 174 mt76_stop_tx_queues(&dev->mt76, sta, true); 175 - mt76x02_mac_wcid_set_drop(&dev->mt76, idx, ps); 175 + mt76x02_mac_wcid_set_drop(dev, idx, ps); 176 176 } 177 177 178 178 static void
+3 -3
drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
··· 140 140 141 141 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0); 142 142 143 - val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2); 143 + val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2); 144 144 if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1) 145 145 mt76_set(dev, MT_MCU_COM_REG0, BIT(30)); 146 146 ··· 152 152 return -ETIMEDOUT; 153 153 } 154 154 155 + mt76x02_set_ethtool_fwver(dev, hdr); 155 156 dev_info(dev->mt76.dev, "Firmware running!\n"); 156 - mt76x02_set_ethtool_fwver(&dev->mt76, hdr); 157 157 158 158 release_firmware(fw); 159 159 ··· 183 183 if (ret) 184 184 return ret; 185 185 186 - mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, true); 186 + mt76x02_mcu_function_select(dev, Q_SELECT, 1, true); 187 187 return 0; 188 188 }
+23 -77
drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
··· 26 26 struct ieee80211_channel *chan = dev->mt76.chandef.chan; 27 27 u32 flag = 0; 28 28 29 - if (!mt76x02_tssi_enabled(&dev->mt76)) 29 + if (!mt76x2_tssi_enabled(dev)) 30 30 return false; 31 31 32 32 if (mt76x2_channel_silent(dev)) ··· 35 35 if (chan->band == NL80211_BAND_5GHZ) 36 36 flag |= BIT(0); 37 37 38 - if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band)) 38 + if (mt76x02_ext_pa_enabled(dev, chan->band)) 39 39 flag |= BIT(8); 40 40 41 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TSSI, flag, true); 41 + mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, true); 42 42 dev->cal.tssi_cal_done = true; 43 43 return true; 44 44 } ··· 62 62 mt76x2_mac_stop(dev, false); 63 63 64 64 if (is_5ghz) 65 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, 0, true); 65 + mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, true); 66 66 67 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_LOFT, is_5ghz, true); 68 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, true); 69 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQC_FI, is_5ghz, true); 70 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TEMP_SENSOR, 0, true); 71 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_SHAPING, 0, true); 67 + mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, true); 68 + mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, true); 69 + mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, true); 70 + mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, true); 71 + mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0, true); 72 72 73 73 if (!mac_stopped) 74 74 mt76x2_mac_resume(dev); ··· 125 125 } 126 126 127 127 static void 128 - mt76x2_get_agc_gain(struct mt76x02_dev *dev, u8 *dest) 129 - { 130 - dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN); 131 - dest[1] = mt76_get_field(dev, MT_BBP(AGC, 9), MT_BBP_AGC_GAIN); 132 - } 133 - 134 - static int 135 - mt76x2_get_rssi_gain_thresh(struct mt76x02_dev *dev) 136 - { 137 - switch (dev->mt76.chandef.width) { 138 - case NL80211_CHAN_WIDTH_80: 139 - return -62; 140 - case NL80211_CHAN_WIDTH_40: 141 - return -65; 142 - default: 143 - return -68; 144 - } 145 - } 146 - 147 - static int 148 - mt76x2_get_low_rssi_gain_thresh(struct mt76x02_dev *dev) 149 - { 150 - switch (dev->mt76.chandef.width) { 151 - case NL80211_CHAN_WIDTH_80: 152 - return -76; 153 - case NL80211_CHAN_WIDTH_40: 154 - return -79; 155 - default: 156 - return -82; 157 - } 158 - } 159 - 160 - static void 161 128 mt76x2_phy_set_gain_val(struct mt76x02_dev *dev) 162 129 { 163 130 u32 val; ··· 150 183 } 151 184 152 185 static void 153 - mt76x2_phy_adjust_vga_gain(struct mt76x02_dev *dev) 154 - { 155 - u32 false_cca; 156 - u8 limit = dev->cal.low_gain > 0 ? 16 : 4; 157 - 158 - false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1)); 159 - dev->cal.false_cca = false_cca; 160 - if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) 161 - dev->cal.agc_gain_adjust += 2; 162 - else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) || 163 - (dev->cal.agc_gain_adjust >= limit && false_cca < 500)) 164 - dev->cal.agc_gain_adjust -= 2; 165 - else 166 - return; 167 - 168 - mt76x2_phy_set_gain_val(dev); 169 - } 170 - 171 - static void 172 186 mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) 173 187 { 174 188 u8 *gain = dev->cal.agc_gain_init; ··· 158 210 int low_gain; 159 211 u32 val; 160 212 161 - dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(&dev->mt76); 213 + dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev); 162 214 163 - low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) + 164 - (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev)); 215 + low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) + 216 + (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); 165 217 166 218 gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2); 167 219 dev->cal.low_gain = low_gain; 168 220 169 221 if (!gain_change) { 170 - mt76x2_phy_adjust_vga_gain(dev); 222 + if (mt76x02_phy_adjust_vga_gain(dev)) 223 + mt76x2_phy_set_gain_val(dev); 171 224 return; 172 225 } 173 226 ··· 286 337 mt76x2_configure_tx_delay(dev, band, bw); 287 338 mt76x2_phy_set_txpower(dev); 288 339 289 - mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1); 290 - mt76x2_phy_set_bw(dev, chandef->width, ch_group_index); 340 + mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1); 341 + mt76x02_phy_set_bw(dev, chandef->width, ch_group_index); 291 342 292 343 mt76_rmw(dev, MT_EXT_CCA_CFG, 293 344 (MT_EXT_CCA_CFG_CCA0 | ··· 310 361 mt76_set(dev, MT_BBP(RXO, 13), BIT(10)); 311 362 312 363 if (!dev->cal.init_cal_done) { 313 - u8 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_BT_RCAL_RESULT); 364 + u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT); 314 365 315 366 if (val != 0xff) 316 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 0, true); 367 + mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, true); 317 368 } 318 369 319 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, channel, true); 370 + mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, true); 320 371 321 372 /* Rx LPF calibration */ 322 373 if (!dev->cal.init_cal_done) 323 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RC, 0, true); 374 + mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, true); 324 375 325 376 dev->cal.init_cal_done = true; 326 377 ··· 333 384 if (scan) 334 385 return 0; 335 386 336 - dev->cal.low_gain = -1; 337 387 mt76x2_phy_channel_calibrate(dev, true); 338 - mt76x2_get_agc_gain(dev, dev->cal.agc_gain_init); 339 - memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init, 340 - sizeof(dev->cal.agc_gain_cur)); 388 + mt76x02_init_agc_gain(dev); 341 389 342 390 /* init default values for temp compensation */ 343 - if (mt76x02_tssi_enabled(&dev->mt76)) { 391 + if (mt76x2_tssi_enabled(dev)) { 344 392 mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, 345 393 0x38); 346 394 mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP, ··· 395 449 { 396 450 int ret; 397 451 398 - ret = mt76x02_mcu_set_radio_state(&dev->mt76, true, true); 452 + ret = mt76x02_mcu_set_radio_state(dev, true, true); 399 453 if (ret) 400 454 return ret; 401 455
+8 -53
drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
··· 65 65 mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00); 66 66 mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06); 67 67 68 - if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { 68 + if (mt76x02_ext_pa_enabled(dev, band)) { 69 69 mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00); 70 70 mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00); 71 71 } else { ··· 76 76 pa_mode[0] = 0x0000ffff; 77 77 pa_mode[1] = 0x00ff00ff; 78 78 79 - if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { 79 + if (mt76x02_ext_pa_enabled(dev, band)) { 80 80 mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400); 81 81 mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476); 82 82 } else { ··· 84 84 mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476); 85 85 } 86 86 87 - if (mt76x02_ext_pa_enabled(&dev->mt76, band)) 87 + if (mt76x02_ext_pa_enabled(dev, band)) 88 88 pa_mode_adj = 0x04000000; 89 89 else 90 90 pa_mode_adj = 0; ··· 98 98 mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]); 99 99 mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]); 100 100 101 - if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { 101 + if (mt76x02_ext_pa_enabled(dev, band)) { 102 102 u32 val; 103 103 104 104 if (band == NL80211_BAND_2GHZ) ··· 187 187 dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power; 188 188 dev->mt76.rate_power = t; 189 189 190 - mt76x02_phy_set_txpower(&dev->mt76, txp_0, txp_1); 190 + mt76x02_phy_set_txpower(dev, txp_0, txp_1); 191 191 } 192 192 EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower); 193 193 ··· 196 196 { 197 197 u32 cfg0, cfg1; 198 198 199 - if (mt76x02_ext_pa_enabled(&dev->mt76, band)) { 199 + if (mt76x02_ext_pa_enabled(dev, band)) { 200 200 cfg0 = bw ? 0x000b0c01 : 0x00101101; 201 201 cfg1 = 0x00011414; 202 202 } else { ··· 209 209 mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, 15); 210 210 } 211 211 EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay); 212 - 213 - void mt76x2_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl) 214 - { 215 - int core_val, agc_val; 216 - 217 - switch (width) { 218 - case NL80211_CHAN_WIDTH_80: 219 - core_val = 3; 220 - agc_val = 7; 221 - break; 222 - case NL80211_CHAN_WIDTH_40: 223 - core_val = 2; 224 - agc_val = 3; 225 - break; 226 - default: 227 - core_val = 0; 228 - agc_val = 1; 229 - break; 230 - } 231 - 232 - mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val); 233 - mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val); 234 - mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl); 235 - mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl); 236 - } 237 - EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw); 238 - 239 - void mt76x2_phy_set_band(struct mt76x02_dev *dev, int band, bool primary_upper) 240 - { 241 - switch (band) { 242 - case NL80211_BAND_2GHZ: 243 - mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); 244 - mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); 245 - break; 246 - case NL80211_BAND_5GHZ: 247 - mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G); 248 - mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G); 249 - break; 250 - } 251 - 252 - mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M, 253 - primary_upper); 254 - } 255 - EXPORT_SYMBOL_GPL(mt76x2_phy_set_band); 256 212 257 213 void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait) 258 214 { ··· 231 275 dev->cal.tssi_comp_pending = false; 232 276 mt76x2_get_power_info(dev, &txp, chan); 233 277 234 - if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band)) 278 + if (mt76x02_ext_pa_enabled(dev, chan->band)) 235 279 t.pa_mode = 1; 236 280 237 281 t.cal_mode = BIT(1); ··· 245 289 return; 246 290 247 291 usleep_range(10000, 20000); 248 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_DPD, 249 - chan->hw_value, wait); 292 + mt76x02_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value, wait); 250 293 dev->cal.dpd_cal_done = true; 251 294 } 252 295 }
+5 -6
drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
··· 130 130 put_unaligned_le32(val, dev->mt76.eeprom.data + i); 131 131 } 132 132 133 - mt76x02_eeprom_parse_hw_cap(&dev->mt76); 133 + mt76x02_eeprom_parse_hw_cap(dev); 134 134 return 0; 135 135 } 136 136 ··· 204 204 if (err < 0) 205 205 return err; 206 206 207 - mt76x02_mac_setaddr(&dev->mt76, 208 - dev->mt76.eeprom.data + MT_EE_MAC_ADDR); 207 + mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR); 209 208 dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG); 210 209 211 210 mt76x2u_init_beacon_offsets(dev); ··· 236 237 if (err < 0) 237 238 return err; 238 239 239 - mt76x02_phy_set_rxpath(&dev->mt76); 240 - mt76x02_phy_set_txdac(&dev->mt76); 240 + mt76x02_phy_set_rxpath(dev); 241 + mt76x02_phy_set_txdac(dev); 241 242 242 243 return mt76x2u_mac_stop(dev); 243 244 } ··· 302 303 303 304 void mt76x2u_cleanup(struct mt76x02_dev *dev) 304 305 { 305 - mt76x02_mcu_set_radio_state(&dev->mt76, false, false); 306 + mt76x02_mcu_set_radio_state(dev, false, false); 306 307 mt76x2u_stop_hw(dev); 307 308 mt76u_queues_deinit(&dev->mt76); 308 309 mt76u_mcu_deinit(&dev->mt76);
+3 -3
drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
··· 32 32 s8 offset = 0; 33 33 u16 eep_val; 34 34 35 - eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_2); 35 + eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2); 36 36 37 37 offset = eep_val & 0x7f; 38 38 if ((eep_val & 0xff) == 0xff) ··· 42 42 43 43 eep_val >>= 8; 44 44 if (eep_val == 0x00 || eep_val == 0xff) { 45 - eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_1); 45 + eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1); 46 46 eep_val &= 0xff; 47 47 48 48 if (eep_val == 0x00 || eep_val == 0xff) ··· 67 67 /* init fce */ 68 68 mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN); 69 69 70 - eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2); 70 + eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2); 71 71 switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) { 72 72 case 0: 73 73 mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+2 -2
drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
··· 50 50 struct mt76x02_dev *dev = hw->priv; 51 51 52 52 if (!ether_addr_equal(dev->mt76.macaddr, vif->addr)) 53 - mt76x02_mac_setaddr(&dev->mt76, vif->addr); 53 + mt76x02_mac_setaddr(dev, vif->addr); 54 54 55 - mt76x02_vif_init(&dev->mt76, vif, 0); 55 + mt76x02_vif_init(dev, vif, 0); 56 56 return 0; 57 57 } 58 58
+8 -10
drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
··· 137 137 mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val); 138 138 139 139 /* vendor reset */ 140 - mt76x02u_mcu_fw_reset(&dev->mt76); 140 + mt76x02u_mcu_fw_reset(dev); 141 141 usleep_range(5000, 10000); 142 142 143 143 /* enable FCE to send in-band cmd */ ··· 151 151 /* FCE skip_fs_en */ 152 152 mt76_wr(dev, MT_FCE_SKIP_FS, 0x3); 153 153 154 - err = mt76x02u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr), 154 + err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr), 155 155 fw->size - sizeof(*hdr), 156 156 MCU_ROM_PATCH_MAX_PAYLOAD, 157 157 MT76U_MCU_ROM_PATCH_OFFSET); ··· 210 210 dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time); 211 211 212 212 /* vendor reset */ 213 - mt76x02u_mcu_fw_reset(&dev->mt76); 213 + mt76x02u_mcu_fw_reset(dev); 214 214 usleep_range(5000, 10000); 215 215 216 216 /* enable USB_DMA_CFG */ ··· 230 230 mt76_wr(dev, MT_FCE_SKIP_FS, 0x3); 231 231 232 232 /* load ILM */ 233 - err = mt76x02u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr), 233 + err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr), 234 234 ilm_len, MCU_FW_URB_MAX_PAYLOAD, 235 235 MT76U_MCU_ILM_OFFSET); 236 236 if (err < 0) { ··· 241 241 /* load DLM */ 242 242 if (mt76xx_rev(dev) >= MT76XX_REV_E3) 243 243 dlm_offset += 0x800; 244 - err = mt76x02u_mcu_fw_send_data(&dev->mt76, 245 - fw->data + sizeof(*hdr) + ilm_len, 244 + err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr) + ilm_len, 246 245 dlm_len, MCU_FW_URB_MAX_PAYLOAD, 247 246 dlm_offset); 248 247 if (err < 0) { ··· 259 260 mt76_set(dev, MT_MCU_COM_REG0, BIT(1)); 260 261 /* enable FCE to send in-band cmd */ 261 262 mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1); 263 + mt76x02_set_ethtool_fwver(dev, hdr); 262 264 dev_dbg(dev->mt76.dev, "firmware running\n"); 263 - mt76x02_set_ethtool_fwver(&dev->mt76, hdr); 264 265 265 266 out: 266 267 release_firmware(fw); ··· 282 283 { 283 284 int err; 284 285 285 - err = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 286 - 1, false); 286 + err = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false); 287 287 if (err < 0) 288 288 return err; 289 289 290 - return mt76x02_mcu_set_radio_state(&dev->mt76, true, false); 290 + return mt76x02_mcu_set_radio_state(dev, true, false); 291 291 }
+15 -17
drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
··· 29 29 mt76x2u_mac_stop(dev); 30 30 31 31 if (is_5ghz) 32 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, 0, false); 32 + mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, false); 33 33 34 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_LOFT, is_5ghz, false); 35 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, false); 36 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQC_FI, is_5ghz, false); 37 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TEMP_SENSOR, 0, false); 34 + mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, false); 35 + mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false); 36 + mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, false); 37 + mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, false); 38 38 39 39 mt76x2u_mac_resume(dev); 40 40 } ··· 69 69 break; 70 70 } 71 71 72 - dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(&dev->mt76); 72 + dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev); 73 73 false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, 74 74 mt76_rr(dev, MT_RX_STAT_1)); 75 75 ··· 155 155 mt76x2_configure_tx_delay(dev, chan->band, bw); 156 156 mt76x2_phy_set_txpower(dev); 157 157 158 - mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1); 159 - mt76x2_phy_set_bw(dev, chandef->width, ch_group_index); 158 + mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1); 159 + mt76x02_phy_set_bw(dev, chandef->width, ch_group_index); 160 160 161 161 mt76_rmw(dev, MT_EXT_CCA_CFG, 162 162 (MT_EXT_CCA_CFG_CCA0 | ··· 177 177 mt76_set(dev, MT_BBP(RXO, 13), BIT(10)); 178 178 179 179 if (!dev->cal.init_cal_done) { 180 - u8 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_BT_RCAL_RESULT); 180 + u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT); 181 181 182 182 if (val != 0xff) 183 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 184 - 0, false); 183 + mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false); 185 184 } 186 185 187 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, channel, false); 186 + mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, false); 188 187 189 188 /* Rx LPF calibration */ 190 189 if (!dev->cal.init_cal_done) 191 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RC, 0, false); 190 + mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, false); 192 191 dev->cal.init_cal_done = true; 193 192 194 193 mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2); ··· 202 203 if (scan) 203 204 return 0; 204 205 205 - if (mt76x02_tssi_enabled(&dev->mt76)) { 206 + if (mt76x2_tssi_enabled(dev)) { 206 207 /* init default values for temp compensation */ 207 208 mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, 208 209 0x38); ··· 217 218 chan = dev->mt76.chandef.chan; 218 219 if (chan->band == NL80211_BAND_5GHZ) 219 220 flag |= BIT(0); 220 - if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band)) 221 + if (mt76x02_ext_pa_enabled(dev, chan->band)) 221 222 flag |= BIT(8); 222 - mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TSSI, 223 - flag, false); 223 + mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, false); 224 224 dev->cal.tssi_cal_done = true; 225 225 } 226 226 }
+2 -1
drivers/net/wireless/mediatek/mt76/tx.c
··· 96 96 { 97 97 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 98 98 99 - if (!ieee80211_is_data_qos(hdr->frame_control)) 99 + if (!ieee80211_is_data_qos(hdr->frame_control) || 100 + !ieee80211_is_data_present(hdr->frame_control)) 100 101 return; 101 102 102 103 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
+1
drivers/net/wireless/mediatek/mt76/usb.c
··· 862 862 .copy = mt76u_copy, 863 863 .wr_rp = mt76u_wr_rp, 864 864 .rd_rp = mt76u_rd_rp, 865 + .type = MT76_BUS_USB, 865 866 }; 866 867 struct mt76_usb *usb = &dev->usb; 867 868
+1 -1
drivers/net/wireless/quantenna/Kconfig
··· 1 1 config WLAN_VENDOR_QUANTENNA 2 2 bool "Quantenna wireless cards support" 3 3 default y 4 - ---help--- 4 + help 5 5 If you have a wireless card belonging to this class, say Y. 6 6 7 7 Note that the answer to this question doesn't directly affect the
+1 -1
drivers/net/wireless/quantenna/qtnfmac/Kconfig
··· 11 11 select QTNFMAC 12 12 select FW_LOADER 13 13 select CRC32 14 - ---help--- 14 + help 15 15 This option adds support for wireless adapters based on Quantenna 16 16 802.11ac QSR10g (aka Pearl) FullMAC chipset running over PCIe. 17 17
+2 -15
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
··· 1 - /* 2 - * Copyright (c) 2015-2016 Quantenna Communications, Inc. 3 - * All rights reserved. 4 - * 5 - * This program is free software; you can redistribute it and/or 6 - * modify it under the terms of the GNU General Public License 7 - * as published by the Free Software Foundation; either version 2 8 - * of the License, or (at your option) any later version. 9 - * 10 - * This program is distributed in the hope that it will be useful, 11 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 - * GNU General Public License for more details. 14 - * 15 - */ 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* Copyright (c) 2018 Quantenna Communications */ 16 3 17 4 #include <linux/kernel.h> 18 5 #include <linux/module.h>
+2 -20
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h
··· 1 - /* 2 - * Copyright (c) 2015-2016 Quantenna Communications, Inc. 3 - * All rights reserved. 4 - * 5 - * This program is free software; you can redistribute it and/or 6 - * modify it under the terms of the GNU General Public License 7 - * as published by the Free Software Foundation; either version 2 8 - * of the License, or (at your option) any later version. 9 - * 10 - * This program is distributed in the hope that it will be useful, 11 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 - * GNU General Public License for more details. 14 - * 15 - */ 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + /* Copyright (c) 2015-2016 Quantenna Communications */ 16 3 17 4 #ifndef _QTN_FMAC_PCIE_IPC_H_ 18 5 #define _QTN_FMAC_PCIE_IPC_H_ ··· 71 84 #define QTN_PCIE_TX_DESC_TQE_BIT BIT(24) 72 85 73 86 #define QTN_EP_LHOST_TQE_PORT 4 74 - 75 - enum qtnf_pcie_bda_ipc_flags { 76 - QTN_PCIE_IPC_FLAG_HBM_MAGIC = BIT(0), 77 - QTN_PCIE_IPC_FLAG_SHM_PIO = BIT(1), 78 - }; 79 87 80 88 enum qtnf_fw_loadtype { 81 89 QTN_FW_DBEGIN,
+5 -240
drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h
··· 1 - /* 2 - * Copyright (c) 2015 Quantenna Communications, Inc. 3 - * All rights reserved. 4 - * 5 - * This program is free software; you can redistribute it and/or 6 - * modify it under the terms of the GNU General Public License 7 - * as published by the Free Software Foundation; either version 2 8 - * of the License, or (at your option) any later version. 9 - * 10 - * This program is distributed in the hope that it will be useful, 11 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 - * GNU General Public License for more details. 14 - * 15 - */ 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + /* Copyright (c) 2015 Quantenna Communications */ 16 3 17 4 #ifndef __PEARL_PCIE_H 18 5 #define __PEARL_PCIE_H 19 6 20 - #define PCIE_GEN2_BASE (0xe9000000) 21 - #define PCIE_GEN3_BASE (0xe7000000) 22 - 23 - #define PEARL_CUR_PCIE_BASE (PCIE_GEN2_BASE) 24 - #define PCIE_HDP_OFFSET (0x2000) 25 - 7 + /* Pearl PCIe HDP registers */ 26 8 #define PCIE_HDP_CTRL(base) ((base) + 0x2c00) 27 9 #define PCIE_HDP_AXI_CTRL(base) ((base) + 0x2c04) 28 10 #define PCIE_HDP_HOST_WR_DESC0(base) ((base) + 0x2c10) ··· 68 86 #define PCIE_HDP_TX_HOST_Q_RD_PTR(base) ((base) + 0x2d3c) 69 87 #define PCIE_HDP_TX_HOST_Q_STS(base) ((base) + 0x2d40) 70 88 71 - /* Host HBM pool registers */ 89 + /* Pearl PCIe HBM pool registers */ 72 90 #define PCIE_HHBM_CSR_REG(base) ((base) + 0x2e00) 73 91 #define PCIE_HHBM_Q_BASE_REG(base) ((base) + 0x2e04) 74 92 #define PCIE_HHBM_Q_LIMIT_REG(base) ((base) + 0x2e08) ··· 86 104 #define HBM_INT_STATUS(base) ((base) + 0x2f9c) 87 105 #define PCIE_HHBM_POOL_CNFIG(base) ((base) + 0x2f9c) 88 106 89 - /* host HBM bit field definition */ 107 + /* Pearl PCIe HBM bit field definitions */ 90 108 #define HHBM_CONFIG_SOFT_RESET (BIT(8)) 91 109 #define HHBM_WR_REQ (BIT(0)) 92 110 #define HHBM_RD_REQ (BIT(1)) 93 111 #define HHBM_DONE (BIT(31)) 94 112 #define HHBM_64BIT (BIT(10)) 95 - 96 - /* offsets for dual PCIE */ 97 - #define PCIE_PORT_LINK_CTL(base) ((base) + 0x0710) 98 - #define PCIE_GEN2_CTL(base) ((base) + 0x080C) 99 - #define PCIE_GEN3_OFF(base) ((base) + 0x0890) 100 - #define PCIE_ATU_CTRL1(base) ((base) + 0x0904) 101 - #define PCIE_ATU_CTRL2(base) ((base) + 0x0908) 102 - #define PCIE_ATU_BASE_LOW(base) ((base) + 0x090C) 103 - #define PCIE_ATU_BASE_HIGH(base) ((base) + 0x0910) 104 - #define PCIE_ATU_BASE_LIMIT(base) ((base) + 0x0914) 105 - #define PCIE_ATU_TGT_LOW(base) ((base) + 0x0918) 106 - #define PCIE_ATU_TGT_HIGH(base) ((base) + 0x091C) 107 - #define PCIE_DMA_WR_ENABLE(base) ((base) + 0x097C) 108 - #define PCIE_DMA_WR_CHWTLOW(base) ((base) + 0x0988) 109 - #define PCIE_DMA_WR_CHWTHIG(base) ((base) + 0x098C) 110 - #define PCIE_DMA_WR_INTSTS(base) ((base) + 0x09BC) 111 - #define PCIE_DMA_WR_INTMASK(base) ((base) + 0x09C4) 112 - #define PCIE_DMA_WR_INTCLER(base) ((base) + 0x09C8) 113 - #define PCIE_DMA_WR_DONE_IMWR_ADDR_L(base) ((base) + 0x09D0) 114 - #define PCIE_DMA_WR_DONE_IMWR_ADDR_H(base) ((base) + 0x09D4) 115 - #define PCIE_DMA_WR_ABORT_IMWR_ADDR_L(base) ((base) + 0x09D8) 116 - #define PCIE_DMA_WR_ABORT_IMWR_ADDR_H(base) ((base) + 0x09DC) 117 - #define PCIE_DMA_WR_IMWR_DATA(base) ((base) + 0x09E0) 118 - #define PCIE_DMA_WR_LL_ERR_EN(base) ((base) + 0x0A00) 119 - #define PCIE_DMA_WR_DOORBELL(base) ((base) + 0x0980) 120 - #define PCIE_DMA_RD_ENABLE(base) ((base) + 0x099C) 121 - #define PCIE_DMA_RD_DOORBELL(base) ((base) + 0x09A0) 122 - #define PCIE_DMA_RD_CHWTLOW(base) ((base) + 0x09A8) 123 - #define PCIE_DMA_RD_CHWTHIG(base) ((base) + 0x09AC) 124 - #define PCIE_DMA_RD_INTSTS(base) ((base) + 0x0A10) 125 - #define PCIE_DMA_RD_INTMASK(base) ((base) + 0x0A18) 126 - #define PCIE_DMA_RD_INTCLER(base) ((base) + 0x0A1C) 127 - #define PCIE_DMA_RD_ERR_STS_L(base) ((base) + 0x0A24) 128 - #define PCIE_DMA_RD_ERR_STS_H(base) ((base) + 0x0A28) 129 - #define PCIE_DMA_RD_LL_ERR_EN(base) ((base) + 0x0A34) 130 - #define PCIE_DMA_RD_DONE_IMWR_ADDR_L(base) ((base) + 0x0A3C) 131 - #define PCIE_DMA_RD_DONE_IMWR_ADDR_H(base) ((base) + 0x0A40) 132 - #define PCIE_DMA_RD_ABORT_IMWR_ADDR_L(base) ((base) + 0x0A44) 133 - #define PCIE_DMA_RD_ABORT_IMWR_ADDR_H(base) ((base) + 0x0A48) 134 - #define PCIE_DMA_RD_IMWR_DATA(base) ((base) + 0x0A4C) 135 - #define PCIE_DMA_CHNL_CONTEXT(base) ((base) + 0x0A6C) 136 - #define PCIE_DMA_CHNL_CNTRL(base) ((base) + 0x0A70) 137 - #define PCIE_DMA_XFR_SIZE(base) ((base) + 0x0A78) 138 - #define PCIE_DMA_SAR_LOW(base) ((base) + 0x0A7C) 139 - #define PCIE_DMA_SAR_HIGH(base) ((base) + 0x0A80) 140 - #define PCIE_DMA_DAR_LOW(base) ((base) + 0x0A84) 141 - #define PCIE_DMA_DAR_HIGH(base) ((base) + 0x0A88) 142 - #define PCIE_DMA_LLPTR_LOW(base) ((base) + 0x0A8C) 143 - #define PCIE_DMA_LLPTR_HIGH(base) ((base) + 0x0A90) 144 - #define PCIE_DMA_WRLL_ERR_ENB(base) ((base) + 0x0A00) 145 - #define PCIE_DMA_RDLL_ERR_ENB(base) ((base) + 0x0A34) 146 - #define PCIE_DMABD_CHNL_CNTRL(base) ((base) + 0x8000) 147 - #define PCIE_DMABD_XFR_SIZE(base) ((base) + 0x8004) 148 - #define PCIE_DMABD_SAR_LOW(base) ((base) + 0x8008) 149 - #define PCIE_DMABD_SAR_HIGH(base) ((base) + 0x800c) 150 - #define PCIE_DMABD_DAR_LOW(base) ((base) + 0x8010) 151 - #define PCIE_DMABD_DAR_HIGH(base) ((base) + 0x8014) 152 - #define PCIE_DMABD_LLPTR_LOW(base) ((base) + 0x8018) 153 - #define PCIE_DMABD_LLPTR_HIGH(base) ((base) + 0x801c) 154 - #define PCIE_WRDMA0_CHNL_CNTRL(base) ((base) + 0x8000) 155 - #define PCIE_WRDMA0_XFR_SIZE(base) ((base) + 0x8004) 156 - #define PCIE_WRDMA0_SAR_LOW(base) ((base) + 0x8008) 157 - #define PCIE_WRDMA0_SAR_HIGH(base) ((base) + 0x800c) 158 - #define PCIE_WRDMA0_DAR_LOW(base) ((base) + 0x8010) 159 - #define PCIE_WRDMA0_DAR_HIGH(base) ((base) + 0x8014) 160 - #define PCIE_WRDMA0_LLPTR_LOW(base) ((base) + 0x8018) 161 - #define PCIE_WRDMA0_LLPTR_HIGH(base) ((base) + 0x801c) 162 - #define PCIE_WRDMA1_CHNL_CNTRL(base) ((base) + 0x8020) 163 - #define PCIE_WRDMA1_XFR_SIZE(base) ((base) + 0x8024) 164 - #define PCIE_WRDMA1_SAR_LOW(base) ((base) + 0x8028) 165 - #define PCIE_WRDMA1_SAR_HIGH(base) ((base) + 0x802c) 166 - #define PCIE_WRDMA1_DAR_LOW(base) ((base) + 0x8030) 167 - #define PCIE_WRDMA1_DAR_HIGH(base) ((base) + 0x8034) 168 - #define PCIE_WRDMA1_LLPTR_LOW(base) ((base) + 0x8038) 169 - #define PCIE_WRDMA1_LLPTR_HIGH(base) ((base) + 0x803c) 170 - #define PCIE_RDDMA0_CHNL_CNTRL(base) ((base) + 0x8040) 171 - #define PCIE_RDDMA0_XFR_SIZE(base) ((base) + 0x8044) 172 - #define PCIE_RDDMA0_SAR_LOW(base) ((base) + 0x8048) 173 - #define PCIE_RDDMA0_SAR_HIGH(base) ((base) + 0x804c) 174 - #define PCIE_RDDMA0_DAR_LOW(base) ((base) + 0x8050) 175 - #define PCIE_RDDMA0_DAR_HIGH(base) ((base) + 0x8054) 176 - #define PCIE_RDDMA0_LLPTR_LOW(base) ((base) + 0x8058) 177 - #define PCIE_RDDMA0_LLPTR_HIGH(base) ((base) + 0x805c) 178 - #define PCIE_RDDMA1_CHNL_CNTRL(base) ((base) + 0x8060) 179 - #define PCIE_RDDMA1_XFR_SIZE(base) ((base) + 0x8064) 180 - #define PCIE_RDDMA1_SAR_LOW(base) ((base) + 0x8068) 181 - #define PCIE_RDDMA1_SAR_HIGH(base) ((base) + 0x806c) 182 - #define PCIE_RDDMA1_DAR_LOW(base) ((base) + 0x8070) 183 - #define PCIE_RDDMA1_DAR_HIGH(base) ((base) + 0x8074) 184 - #define PCIE_RDDMA1_LLPTR_LOW(base) ((base) + 0x8078) 185 - #define PCIE_RDDMA1_LLPTR_HIGH(base) ((base) + 0x807c) 186 - 187 - #define PCIE_ID(base) ((base) + 0x0000) 188 - #define PCIE_CMD(base) ((base) + 0x0004) 189 - #define PCIE_BAR(base, n) ((base) + 0x0010 + ((n) << 2)) 190 - #define PCIE_CAP_PTR(base) ((base) + 0x0034) 191 - #define PCIE_MSI_LBAR(base) ((base) + 0x0054) 192 - #define PCIE_MSI_CTRL(base) ((base) + 0x0050) 193 - #define PCIE_MSI_ADDR_L(base) ((base) + 0x0054) 194 - #define PCIE_MSI_ADDR_H(base) ((base) + 0x0058) 195 - #define PCIE_MSI_DATA(base) ((base) + 0x005C) 196 - #define PCIE_MSI_MASK_BIT(base) ((base) + 0x0060) 197 - #define PCIE_MSI_PEND_BIT(base) ((base) + 0x0064) 198 - #define PCIE_DEVCAP(base) ((base) + 0x0074) 199 - #define PCIE_DEVCTLSTS(base) ((base) + 0x0078) 200 - 201 - #define PCIE_CMDSTS(base) ((base) + 0x0004) 202 - #define PCIE_LINK_STAT(base) ((base) + 0x80) 203 - #define PCIE_LINK_CTL2(base) ((base) + 0xa0) 204 - #define PCIE_ASPM_L1_CTRL(base) ((base) + 0x70c) 205 - #define PCIE_ASPM_LINK_CTRL(base) (PCIE_LINK_STAT) 206 - #define PCIE_ASPM_L1_SUBSTATE_TIMING(base) ((base) + 0xB44) 207 - #define PCIE_L1SUB_CTRL1(base) ((base) + 0x150) 208 - #define PCIE_PMCSR(base) ((base) + 0x44) 209 - #define PCIE_CFG_SPACE_LIMIT(base) ((base) + 0x100) 210 - 211 - /* PCIe link defines */ 212 - #define PEARL_PCIE_LINKUP (0x7) 213 - #define PEARL_PCIE_DATA_LINK (BIT(0)) 214 - #define PEARL_PCIE_PHY_LINK (BIT(1)) 215 - #define PEARL_PCIE_LINK_RST (BIT(3)) 216 - #define PEARL_PCIE_FATAL_ERR (BIT(5)) 217 - #define PEARL_PCIE_NONFATAL_ERR (BIT(6)) 218 - 219 - /* PCIe Lane defines */ 220 - #define PCIE_G2_LANE_X1 ((BIT(0)) << 16) 221 - #define PCIE_G2_LANE_X2 ((BIT(0) | BIT(1)) << 16) 222 - 223 - /* PCIe DLL link enable */ 224 - #define PCIE_DLL_LINK_EN ((BIT(0)) << 5) 225 - 226 - #define PCIE_LINK_GEN1 (BIT(0)) 227 - #define PCIE_LINK_GEN2 (BIT(1)) 228 - #define PCIE_LINK_GEN3 (BIT(2)) 229 - #define PCIE_LINK_MODE(x) (((x) >> 16) & 0x7) 230 - 231 - #define MSI_EN (BIT(0)) 232 - #define MSI_64_EN (BIT(7)) 233 - #define PCIE_MSI_ADDR_OFFSET(a) ((a) & 0xFFFF) 234 - #define PCIE_MSI_ADDR_ALIGN(a) ((a) & (~0xFFFF)) 235 - 236 - #define PCIE_BAR_MASK(base, n) ((base) + 0x1010 + ((n) << 2)) 237 - #define PCIE_MAX_BAR (6) 238 - 239 - #define PCIE_ATU_VIEW(base) ((base) + 0x0900) 240 - #define PCIE_ATU_CTL1(base) ((base) + 0x0904) 241 - #define PCIE_ATU_CTL2(base) ((base) + 0x0908) 242 - #define PCIE_ATU_LBAR(base) ((base) + 0x090c) 243 - #define PCIE_ATU_UBAR(base) ((base) + 0x0910) 244 - #define PCIE_ATU_LAR(base) ((base) + 0x0914) 245 - #define PCIE_ATU_LTAR(base) ((base) + 0x0918) 246 - #define PCIE_ATU_UTAR(base) ((base) + 0x091c) 247 - 248 - #define PCIE_MSI_ADDR_LOWER(base) ((base) + 0x0820) 249 - #define PCIE_MSI_ADDR_UPPER(base) ((base) + 0x0824) 250 - #define PCIE_MSI_ENABLE(base) ((base) + 0x0828) 251 - #define PCIE_MSI_MASK_RC(base) ((base) + 0x082c) 252 - #define PCIE_MSI_STATUS(base) ((base) + 0x0830) 253 - #define PEARL_PCIE_MSI_REGION (0xce000000) 254 - #define PEARL_PCIE_MSI_DATA (0) 255 - #define PCIE_MSI_GPIO(base) ((base) + 0x0888) 256 - 257 - #define PCIE_HDP_HOST_QUEUE_FULL (BIT(17)) 258 - #define USE_BAR_MATCH_MODE 259 - #define PCIE_ATU_OB_REGION (BIT(0)) 260 - #define PCIE_ATU_EN_REGION (BIT(31)) 261 - #define PCIE_ATU_EN_MATCH (BIT(30)) 262 - #define PCIE_BASE_REGION (0xb0000000) 263 - #define PCIE_MEM_MAP_SIZE (512 * 1024) 264 - 265 - #define PCIE_OB_REG_REGION (0xcf000000) 266 - #define PCIE_CONFIG_REGION (0xcf000000) 267 - #define PCIE_CONFIG_SIZE (4096) 268 - #define PCIE_CONFIG_CH (1) 269 - 270 - /* inbound mapping */ 271 - #define PCIE_IB_BAR0 (0x00000000) /* ddr */ 272 - #define PCIE_IB_BAR0_CH (0) 273 - #define PCIE_IB_BAR3 (0xe0000000) /* sys_reg */ 274 - #define PCIE_IB_BAR3_CH (1) 275 - 276 - /* outbound mapping */ 277 - #define PCIE_MEM_CH (0) 278 - #define PCIE_REG_CH (1) 279 - #define PCIE_MEM_REGION (0xc0000000) 280 - #define PCIE_MEM_SIZE (0x000fffff) 281 - #define PCIE_MEM_TAR (0x80000000) 282 - 283 - #define PCIE_MSI_REGION (0xce000000) 284 - #define PCIE_MSI_SIZE (KBYTE(4) - 1) 285 - #define PCIE_MSI_CH (1) 286 - 287 - /* size of config region */ 288 - #define PCIE_CFG_SIZE (0x0000ffff) 289 - 290 - #define PCIE_ATU_DIR_IB (BIT(31)) 291 - #define PCIE_ATU_DIR_OB (0) 292 - #define PCIE_ATU_DIR_CFG (2) 293 - #define PCIE_ATU_DIR_MATCH_IB (BIT(31) | BIT(30)) 294 - 295 - #define PCIE_DMA_WR_0 (0) 296 - #define PCIE_DMA_WR_1 (1) 297 - #define PCIE_DMA_RD_0 (2) 298 - #define PCIE_DMA_RD_1 (3) 299 - 300 - #define PCIE_DMA_CHNL_CNTRL_CB (BIT(0)) 301 - #define PCIE_DMA_CHNL_CNTRL_TCB (BIT(1)) 302 - #define PCIE_DMA_CHNL_CNTRL_LLP (BIT(2)) 303 - #define PCIE_DMA_CHNL_CNTRL_LIE (BIT(3)) 304 - #define PCIE_DMA_CHNL_CNTRL_RIE (BIT(4)) 305 - #define PCIE_DMA_CHNL_CNTRL_CSS (BIT(8)) 306 - #define PCIE_DMA_CHNL_CNTRL_LLE (BIT(9)) 307 - #define PCIE_DMA_CHNL_CNTRL_TLP (BIT(26)) 308 - 309 - #define PCIE_DMA_CHNL_CONTEXT_RD (BIT(31)) 310 - #define PCIE_DMA_CHNL_CONTEXT_WR (0) 311 - #define PCIE_MAX_BAR (6) 312 113 313 114 /* PCIe HDP interrupt status definition */ 314 115 #define PCIE_HDP_INT_EP_RXDMA (BIT(0))
+1 -4
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
··· 4918 4918 struct device *dev = &priv->udev->dev; 4919 4919 u32 queue, rts_rate; 4920 4920 u16 pktlen = skb->len; 4921 - u16 seq_number; 4922 4921 u16 rate_flag = tx_info->control.rates[0].flags; 4923 4922 int tx_desc_size = priv->fops->tx_desc_size; 4924 4923 int ret; 4925 - bool usedesc40, ampdu_enable, sgi = false, short_preamble = false; 4924 + bool ampdu_enable, sgi = false, short_preamble = false; 4926 4925 4927 4926 if (skb_headroom(skb) < tx_desc_size) { 4928 4927 dev_warn(dev, ··· 4945 4946 if (ieee80211_is_action(hdr->frame_control)) 4946 4947 rtl8xxxu_dump_action(dev, hdr); 4947 4948 4948 - usedesc40 = (tx_desc_size == 40); 4949 4949 tx_info->rate_driver_data[0] = hw; 4950 4950 4951 4951 if (control && control->sta) ··· 5011 5013 else 5012 5014 rts_rate = 0; 5013 5015 5014 - seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 5015 5016 5016 5017 priv->fops->fill_txdesc(hw, hdr, tx_info, tx_desc, sgi, short_preamble, 5017 5018 ampdu_enable, rts_rate);
+1 -70
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
··· 3404 3404 "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)); 3405 3405 } 3406 3406 3407 - static u8 _rtl8821ae_mrate_idx_to_arfr_id( 3408 - struct ieee80211_hw *hw, u8 rate_index, 3409 - enum wireless_mode wirelessmode) 3410 - { 3411 - struct rtl_priv *rtlpriv = rtl_priv(hw); 3412 - struct rtl_phy *rtlphy = &rtlpriv->phy; 3413 - u8 ret = 0; 3414 - switch (rate_index) { 3415 - case RATR_INX_WIRELESS_NGB: 3416 - if (rtlphy->rf_type == RF_1T1R) 3417 - ret = 1; 3418 - else 3419 - ret = 0; 3420 - ; break; 3421 - case RATR_INX_WIRELESS_N: 3422 - case RATR_INX_WIRELESS_NG: 3423 - if (rtlphy->rf_type == RF_1T1R) 3424 - ret = 5; 3425 - else 3426 - ret = 4; 3427 - ; break; 3428 - case RATR_INX_WIRELESS_NB: 3429 - if (rtlphy->rf_type == RF_1T1R) 3430 - ret = 3; 3431 - else 3432 - ret = 2; 3433 - ; break; 3434 - case RATR_INX_WIRELESS_GB: 3435 - ret = 6; 3436 - break; 3437 - case RATR_INX_WIRELESS_G: 3438 - ret = 7; 3439 - break; 3440 - case RATR_INX_WIRELESS_B: 3441 - ret = 8; 3442 - break; 3443 - case RATR_INX_WIRELESS_MC: 3444 - if ((wirelessmode == WIRELESS_MODE_B) 3445 - || (wirelessmode == WIRELESS_MODE_G) 3446 - || (wirelessmode == WIRELESS_MODE_N_24G) 3447 - || (wirelessmode == WIRELESS_MODE_AC_24G)) 3448 - ret = 6; 3449 - else 3450 - ret = 7; 3451 - case RATR_INX_WIRELESS_AC_5N: 3452 - if (rtlphy->rf_type == RF_1T1R) 3453 - ret = 10; 3454 - else 3455 - ret = 9; 3456 - break; 3457 - case RATR_INX_WIRELESS_AC_24N: 3458 - if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) { 3459 - if (rtlphy->rf_type == RF_1T1R) 3460 - ret = 10; 3461 - else 3462 - ret = 9; 3463 - } else { 3464 - if (rtlphy->rf_type == RF_1T1R) 3465 - ret = 11; 3466 - else 3467 - ret = 12; 3468 - } 3469 - break; 3470 - default: 3471 - ret = 0; break; 3472 - } 3473 - return ret; 3474 - } 3475 - 3476 3407 static u32 _rtl8821ae_rate_to_bitmap_2ssvht(__le16 vht_rate) 3477 3408 { 3478 3409 u8 i, j, tmp_rate; ··· 3692 3761 break; 3693 3762 } 3694 3763 3695 - ratr_index = _rtl8821ae_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode); 3764 + ratr_index = rtl_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode); 3696 3765 sta_entry->ratr_index = ratr_index; 3697 3766 ratr_bitmap = _rtl8821ae_set_ra_vht_ratr_bitmap(hw, wirelessmode, 3698 3767 ratr_bitmap);
+3 -1
include/linux/qcom_scm.h
··· 1 - /* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. 1 + /* Copyright (c) 2010-2015, 2018, The Linux Foundation. All rights reserved. 2 2 * Copyright (C) 2015 Linaro Ltd. 3 3 * 4 4 * This program is free software; you can redistribute it and/or modify ··· 33 33 34 34 #define QCOM_SCM_VMID_HLOS 0x3 35 35 #define QCOM_SCM_VMID_MSS_MSA 0xF 36 + #define QCOM_SCM_VMID_WLAN 0x18 37 + #define QCOM_SCM_VMID_WLAN_CE 0x19 36 38 #define QCOM_SCM_PERM_READ 0x4 37 39 #define QCOM_SCM_PERM_WRITE 0x2 38 40 #define QCOM_SCM_PERM_EXEC 0x1