Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'ath-next-20250114' of git://git.kernel.org/pub/scm/linux/kernel/git/ath/ath

ath.git patches for v6.14

This development cycle again featured multiple patchsets to ath12k to
support the new 802.11be MLO feature, this time including the device
grouping infrastructure, and the advertisement of MLO support to the
wireless core. However the MLO feature is still considered to be
incomplete.

In addition, there was the usual set of bug fixes and cleanups, mostly
in ath12k, but also in ath9k.

+2723 -547
+204
Documentation/devicetree/bindings/net/wireless/qcom,ath12k-wsi.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + # Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. 3 + %YAML 1.2 4 + --- 5 + $id: http://devicetree.org/schemas/net/wireless/qcom,ath12k-wsi.yaml# 6 + $schema: http://devicetree.org/meta-schemas/core.yaml# 7 + 8 + title: Qualcomm Technologies ath12k wireless devices (PCIe) with WSI interface 9 + 10 + maintainers: 11 + - Jeff Johnson <jjohnson@kernel.org> 12 + - Kalle Valo <kvalo@kernel.org> 13 + 14 + description: | 15 + Qualcomm Technologies IEEE 802.11be PCIe devices with WSI interface. 16 + 17 + The ath12k devices (QCN9274) feature WSI support. WSI stands for 18 + WLAN Serial Interface. It is used for the exchange of specific 19 + control information across radios based on the doorbell mechanism. 20 + This WSI connection is essential to exchange control information 21 + among these devices. 22 + 23 + The WSI interface includes TX and RX ports, which are used to connect 24 + multiple WSI-supported devices together, forming a WSI group. 25 + 26 + Diagram to represent one WSI connection (one WSI group) among 27 + three devices. 28 + 29 + +-------+ +-------+ +-------+ 30 + | pcie1 | | pcie2 | | pcie3 | 31 + | | | | | | 32 + +----->| wsi |------->| wsi |------->| wsi |-----+ 33 + | | grp 0 | | grp 0 | | grp 0 | | 34 + | +-------+ +-------+ +-------+ | 35 + +------------------------------------------------------+ 36 + 37 + Diagram to represent two WSI connections (two separate WSI groups) 38 + among four devices. 39 + 40 + +-------+ +-------+ +-------+ +-------+ 41 + | pcie0 | | pcie1 | | pcie2 | | pcie3 | 42 + | | | | | | | | 43 + +-->| wsi |--->| wsi |--+ +-->| wsi |--->| wsi |--+ 44 + | | grp 0 | | grp 0 | | | | grp 1 | | grp 1 | | 45 + | +-------+ +-------+ | | +-------+ +-------+ | 46 + +---------------------------+ +---------------------------+ 47 + 48 + properties: 49 + compatible: 50 + enum: 51 + - pci17cb,1109 # QCN9274 52 + 53 + reg: 54 + maxItems: 1 55 + 56 + qcom,ath12k-calibration-variant: 57 + $ref: /schemas/types.yaml#/definitions/string 58 + description: 59 + String to uniquely identify variant of the calibration data for designs 60 + with colliding bus and device ids 61 + 62 + qcom,wsi-controller: 63 + $ref: /schemas/types.yaml#/definitions/flag 64 + description: 65 + The WSI controller device in the WSI group aids (is capable) to 66 + synchronize the Timing Synchronization Function (TSF) clock across 67 + all devices in the WSI group. 68 + 69 + ports: 70 + $ref: /schemas/graph.yaml#/properties/ports 71 + properties: 72 + port@0: 73 + $ref: /schemas/graph.yaml#/properties/port 74 + description: 75 + This is the TX port of WSI interface. It is attached to the RX 76 + port of the next device in the WSI connection. 77 + 78 + port@1: 79 + $ref: /schemas/graph.yaml#/properties/port 80 + description: 81 + This is the RX port of WSI interface. It is attached to the TX 82 + port of the previous device in the WSI connection. 83 + 84 + required: 85 + - compatible 86 + - reg 87 + 88 + additionalProperties: false 89 + 90 + examples: 91 + - | 92 + pcie { 93 + #address-cells = <3>; 94 + #size-cells = <2>; 95 + 96 + pcie@0 { 97 + device_type = "pci"; 98 + reg = <0x0 0x0 0x0 0x0 0x0>; 99 + #address-cells = <3>; 100 + #size-cells = <2>; 101 + ranges; 102 + 103 + wifi@0 { 104 + compatible = "pci17cb,1109"; 105 + reg = <0x0 0x0 0x0 0x0 0x0>; 106 + 107 + qcom,ath12k-calibration-variant = "RDP433_1"; 108 + 109 + ports { 110 + #address-cells = <1>; 111 + #size-cells = <0>; 112 + 113 + port@0 { 114 + reg = <0>; 115 + 116 + wifi1_wsi_tx: endpoint { 117 + remote-endpoint = <&wifi2_wsi_rx>; 118 + }; 119 + }; 120 + 121 + port@1 { 122 + reg = <1>; 123 + 124 + wifi1_wsi_rx: endpoint { 125 + remote-endpoint = <&wifi3_wsi_tx>; 126 + }; 127 + }; 128 + }; 129 + }; 130 + }; 131 + 132 + pcie@1 { 133 + device_type = "pci"; 134 + reg = <0x0 0x0 0x1 0x0 0x0>; 135 + #address-cells = <3>; 136 + #size-cells = <2>; 137 + ranges; 138 + 139 + wifi@0 { 140 + compatible = "pci17cb,1109"; 141 + reg = <0x0 0x0 0x0 0x0 0x0>; 142 + 143 + qcom,ath12k-calibration-variant = "RDP433_2"; 144 + qcom,wsi-controller; 145 + 146 + ports { 147 + #address-cells = <1>; 148 + #size-cells = <0>; 149 + 150 + port@0 { 151 + reg = <0>; 152 + 153 + wifi2_wsi_tx: endpoint { 154 + remote-endpoint = <&wifi3_wsi_rx>; 155 + }; 156 + }; 157 + 158 + port@1 { 159 + reg = <1>; 160 + 161 + wifi2_wsi_rx: endpoint { 162 + remote-endpoint = <&wifi1_wsi_tx>; 163 + }; 164 + }; 165 + }; 166 + }; 167 + }; 168 + 169 + pcie@2 { 170 + device_type = "pci"; 171 + reg = <0x0 0x0 0x2 0x0 0x0>; 172 + #address-cells = <3>; 173 + #size-cells = <2>; 174 + ranges; 175 + 176 + wifi@0 { 177 + compatible = "pci17cb,1109"; 178 + reg = <0x0 0x0 0x0 0x0 0x0>; 179 + 180 + qcom,ath12k-calibration-variant = "RDP433_3"; 181 + 182 + ports { 183 + #address-cells = <1>; 184 + #size-cells = <0>; 185 + 186 + port@0 { 187 + reg = <0>; 188 + 189 + wifi3_wsi_tx: endpoint { 190 + remote-endpoint = <&wifi1_wsi_rx>; 191 + }; 192 + }; 193 + 194 + port@1 { 195 + reg = <1>; 196 + 197 + wifi3_wsi_rx: endpoint { 198 + remote-endpoint = <&wifi2_wsi_tx>; 199 + }; 200 + }; 201 + }; 202 + }; 203 + }; 204 + };
+289 -26
drivers/net/wireless/ath/ath12k/core.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <linux/module.h> ··· 9 9 #include <linux/remoteproc.h> 10 10 #include <linux/firmware.h> 11 11 #include <linux/of.h> 12 + #include <linux/of_graph.h> 12 13 #include "core.h" 13 14 #include "dp_tx.h" 14 15 #include "dp_rx.h" ··· 887 886 ath12k_mac_destroy(ag); 888 887 } 889 888 889 + static int __ath12k_mac_mlo_ready(struct ath12k *ar) 890 + { 891 + int ret; 892 + 893 + ret = ath12k_wmi_mlo_ready(ar); 894 + if (ret) { 895 + ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n", 896 + ar->pdev_idx, ret); 897 + return ret; 898 + } 899 + 900 + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n", 901 + ar->pdev_idx); 902 + 903 + return 0; 904 + } 905 + 906 + int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag) 907 + { 908 + struct ath12k_hw *ah; 909 + struct ath12k *ar; 910 + int ret; 911 + int i, j; 912 + 913 + for (i = 0; i < ag->num_hw; i++) { 914 + ah = ag->ah[i]; 915 + if (!ah) 916 + continue; 917 + 918 + for_each_ar(ah, ar, j) { 919 + ar = &ah->radio[j]; 920 + ret = __ath12k_mac_mlo_ready(ar); 921 + if (ret) 922 + goto out; 923 + } 924 + } 925 + 926 + out: 927 + return ret; 928 + } 929 + 930 + static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag) 931 + { 932 + int ret, i; 933 + 934 + if (!ag->mlo_capable || ag->num_devices == 1) 935 + return 0; 936 + 937 + ret = ath12k_mac_mlo_setup(ag); 938 + if (ret) 939 + return ret; 940 + 941 + for (i = 0; i < ag->num_devices; i++) 942 + ath12k_dp_partner_cc_init(ag->ab[i]); 943 + 944 + ret = ath12k_mac_mlo_ready(ag); 945 + if (ret) 946 + goto err_mlo_teardown; 947 + 948 + return 0; 949 + 950 + err_mlo_teardown: 951 + ath12k_mac_mlo_teardown(ag); 952 + 953 + return ret; 954 + } 955 + 890 956 static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag) 891 957 { 892 958 struct ath12k_base *ab; ··· 968 900 if (WARN_ON(ret)) 969 901 return ret; 970 902 971 - ret = ath12k_mac_register(ag); 903 + ret = ath12k_core_mlo_setup(ag); 972 904 if (WARN_ON(ret)) 973 905 goto err_mac_destroy; 906 + 907 + ret = ath12k_mac_register(ag); 908 + if (WARN_ON(ret)) 909 + goto err_mlo_teardown; 974 910 975 911 set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags); 976 912 ··· 1009 937 err: 1010 938 ath12k_core_hw_group_stop(ag); 1011 939 return ret; 940 + 941 + err_mlo_teardown: 942 + ath12k_mac_mlo_teardown(ag); 1012 943 1013 944 err_mac_destroy: 1014 945 ath12k_mac_destroy(ag); ··· 1173 1098 static void ath12k_rfkill_work(struct work_struct *work) 1174 1099 { 1175 1100 struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work); 1101 + struct ath12k_hw_group *ag = ab->ag; 1176 1102 struct ath12k *ar; 1177 1103 struct ath12k_hw *ah; 1178 1104 struct ieee80211_hw *hw; ··· 1184 1108 rfkill_radio_on = ab->rfkill_radio_on; 1185 1109 spin_unlock_bh(&ab->base_lock); 1186 1110 1187 - for (i = 0; i < ath12k_get_num_hw(ab); i++) { 1188 - ah = ath12k_ab_to_ah(ab, i); 1111 + for (i = 0; i < ag->num_hw; i++) { 1112 + ah = ath12k_ag_to_ah(ag, i); 1189 1113 if (!ah) 1190 1114 continue; 1191 1115 ··· 1225 1149 1226 1150 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab) 1227 1151 { 1152 + struct ath12k_hw_group *ag = ab->ag; 1228 1153 struct ath12k *ar; 1229 1154 struct ath12k_hw *ah; 1230 1155 int i, j; ··· 1237 1160 if (ab->is_reset) 1238 1161 set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags); 1239 1162 1240 - for (i = 0; i < ath12k_get_num_hw(ab); i++) { 1241 - ah = ath12k_ab_to_ah(ab, i); 1163 + for (i = 0; i < ag->num_hw; i++) { 1164 + ah = ath12k_ag_to_ah(ag, i); 1242 1165 if (!ah || ah->state == ATH12K_HW_STATE_OFF) 1243 1166 continue; 1244 1167 ··· 1272 1195 1273 1196 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab) 1274 1197 { 1198 + struct ath12k_hw_group *ag = ab->ag; 1275 1199 struct ath12k_hw *ah; 1276 1200 struct ath12k *ar; 1277 1201 int i, j; 1278 1202 1279 - for (i = 0; i < ath12k_get_num_hw(ab); i++) { 1280 - ah = ath12k_ab_to_ah(ab, i); 1203 + for (i = 0; i < ag->num_hw; i++) { 1204 + ah = ath12k_ag_to_ah(ag, i); 1281 1205 if (!ah || ah->state == ATH12K_HW_STATE_OFF) 1282 1206 continue; 1283 1207 ··· 1321 1243 static void ath12k_core_restart(struct work_struct *work) 1322 1244 { 1323 1245 struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work); 1246 + struct ath12k_hw_group *ag = ab->ag; 1324 1247 struct ath12k_hw *ah; 1325 1248 int ret, i; 1326 1249 ··· 1340 1261 ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n"); 1341 1262 } 1342 1263 1343 - for (i = 0; i < ath12k_get_num_hw(ab); i++) { 1344 - ah = ath12k_ab_to_ah(ab, i); 1264 + for (i = 0; i < ag->num_hw; i++) { 1265 + ah = ath12k_ag_to_ah(ab->ag, i); 1345 1266 ieee80211_restart_hw(ah->hw); 1346 1267 } 1347 1268 } ··· 1462 1383 return (ag->num_probed == ag->num_devices); 1463 1384 } 1464 1385 1465 - static struct ath12k_hw_group *ath12k_core_hw_group_alloc(u8 id, u8 max_devices) 1386 + static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab) 1466 1387 { 1467 1388 struct ath12k_hw_group *ag; 1389 + int count = 0; 1468 1390 1469 1391 lockdep_assert_held(&ath12k_hw_group_mutex); 1392 + 1393 + list_for_each_entry(ag, &ath12k_hw_group_list, list) 1394 + count++; 1470 1395 1471 1396 ag = kzalloc(sizeof(*ag), GFP_KERNEL); 1472 1397 if (!ag) 1473 1398 return NULL; 1474 1399 1475 - ag->id = id; 1476 - ag->num_devices = max_devices; 1400 + ag->id = count; 1477 1401 list_add(&ag->list, &ath12k_hw_group_list); 1478 1402 mutex_init(&ag->mutex); 1403 + ag->mlo_capable = false; 1479 1404 1480 1405 return ag; 1481 1406 } ··· 1494 1411 mutex_unlock(&ath12k_hw_group_mutex); 1495 1412 } 1496 1413 1414 + static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab) 1415 + { 1416 + struct ath12k_hw_group *ag; 1417 + int i; 1418 + 1419 + if (!ab->dev->of_node) 1420 + return NULL; 1421 + 1422 + list_for_each_entry(ag, &ath12k_hw_group_list, list) 1423 + for (i = 0; i < ag->num_devices; i++) 1424 + if (ag->wsi_node[i] == ab->dev->of_node) 1425 + return ag; 1426 + 1427 + return NULL; 1428 + } 1429 + 1430 + static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag, 1431 + struct ath12k_base *ab) 1432 + { 1433 + struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev; 1434 + struct device_node *tx_endpoint, *next_rx_endpoint; 1435 + int device_count = 0; 1436 + 1437 + next_wsi_dev = wsi_dev; 1438 + 1439 + if (!next_wsi_dev) 1440 + return -ENODEV; 1441 + 1442 + do { 1443 + ag->wsi_node[device_count] = next_wsi_dev; 1444 + 1445 + tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1); 1446 + if (!tx_endpoint) { 1447 + of_node_put(next_wsi_dev); 1448 + return -ENODEV; 1449 + } 1450 + 1451 + next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint); 1452 + if (!next_rx_endpoint) { 1453 + of_node_put(next_wsi_dev); 1454 + of_node_put(tx_endpoint); 1455 + return -ENODEV; 1456 + } 1457 + 1458 + of_node_put(tx_endpoint); 1459 + of_node_put(next_wsi_dev); 1460 + 1461 + next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint); 1462 + if (!next_wsi_dev) { 1463 + of_node_put(next_rx_endpoint); 1464 + return -ENODEV; 1465 + } 1466 + 1467 + of_node_put(next_rx_endpoint); 1468 + 1469 + device_count++; 1470 + if (device_count > ATH12K_MAX_SOCS) { 1471 + ath12k_warn(ab, "device count in DT %d is more than limit %d\n", 1472 + device_count, ATH12K_MAX_SOCS); 1473 + of_node_put(next_wsi_dev); 1474 + return -EINVAL; 1475 + } 1476 + } while (wsi_dev != next_wsi_dev); 1477 + 1478 + of_node_put(next_wsi_dev); 1479 + ag->num_devices = device_count; 1480 + 1481 + return 0; 1482 + } 1483 + 1484 + static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag, 1485 + struct ath12k_base *ab) 1486 + { 1487 + int i, wsi_controller_index = -1, node_index = -1; 1488 + bool control; 1489 + 1490 + for (i = 0; i < ag->num_devices; i++) { 1491 + control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller"); 1492 + if (control) 1493 + wsi_controller_index = i; 1494 + 1495 + if (ag->wsi_node[i] == ab->dev->of_node) 1496 + node_index = i; 1497 + } 1498 + 1499 + if (wsi_controller_index == -1) { 1500 + ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt"); 1501 + return -EINVAL; 1502 + } 1503 + 1504 + if (node_index == -1) { 1505 + ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index"); 1506 + return -EINVAL; 1507 + } 1508 + 1509 + ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) % 1510 + ag->num_devices; 1511 + 1512 + return 0; 1513 + } 1514 + 1497 1515 static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab) 1498 1516 { 1499 - u32 group_id = ATH12K_INVALID_GROUP_ID; 1517 + struct ath12k_wsi_info *wsi = &ab->wsi_info; 1500 1518 struct ath12k_hw_group *ag; 1501 1519 1502 1520 lockdep_assert_held(&ath12k_hw_group_mutex); 1503 1521 1504 1522 /* The grouping of multiple devices will be done based on device tree file. 1505 - * TODO: device tree file parsing to know about the devices involved in group. 1523 + * The platforms that do not have any valid group information would have 1524 + * each device to be part of its own invalid group. 1506 1525 * 1507 - * The platforms that do not have any valid group information would have each 1508 - * device to be part of its own invalid group. 1509 - * 1510 - * Currently, we are not parsing any device tree information and hence, grouping 1511 - * of multiple devices is not involved. Thus, single device is added to device 1512 - * group. 1526 + * We use group id ATH12K_INVALID_GROUP_ID for single device group 1527 + * which didn't have dt entry or wrong dt entry, there could be many 1528 + * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So 1529 + * default group id of ATH12K_INVALID_GROUP_ID combined with 1530 + * num devices in ath12k_hw_group determines if the group is 1531 + * multi device or single device group 1513 1532 */ 1514 - ag = ath12k_core_hw_group_alloc(group_id, 1); 1533 + 1534 + ag = ath12k_core_hw_group_find_by_dt(ab); 1535 + if (!ag) { 1536 + ag = ath12k_core_hw_group_alloc(ab); 1537 + if (!ag) { 1538 + ath12k_warn(ab, "unable to create new hw group\n"); 1539 + return NULL; 1540 + } 1541 + 1542 + if (ath12k_core_get_wsi_info(ag, ab) || 1543 + ath12k_core_get_wsi_index(ag, ab)) { 1544 + ath12k_dbg(ab, ATH12K_DBG_BOOT, 1545 + "unable to get wsi info from dt, grouping single device"); 1546 + ag->id = ATH12K_INVALID_GROUP_ID; 1547 + ag->num_devices = 1; 1548 + memset(ag->wsi_node, 0, sizeof(ag->wsi_node)); 1549 + wsi->index = 0; 1550 + } 1551 + 1552 + goto exit; 1553 + } else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) { 1554 + ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n", 1555 + ag->id); 1556 + goto invalid_group; 1557 + } else { 1558 + if (ath12k_core_get_wsi_index(ag, ab)) 1559 + goto invalid_group; 1560 + goto exit; 1561 + } 1562 + 1563 + invalid_group: 1564 + ag = ath12k_core_hw_group_alloc(ab); 1515 1565 if (!ag) { 1516 1566 ath12k_warn(ab, "unable to create new hw group\n"); 1517 1567 return NULL; 1518 1568 } 1519 1569 1570 + ag->id = ATH12K_INVALID_GROUP_ID; 1571 + ag->num_devices = 1; 1572 + wsi->index = 0; 1573 + 1520 1574 ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n"); 1575 + 1576 + exit: 1577 + if (ag->num_probed >= ag->num_devices) { 1578 + ath12k_warn(ab, "unable to add new device to group, max limit reached\n"); 1579 + goto invalid_group; 1580 + } 1521 1581 1522 1582 ab->device_id = ag->num_probed++; 1523 1583 ag->ab[ab->device_id] = ab; 1524 1584 ab->ag = ag; 1525 - ag->mlo_capable = false; 1585 + 1586 + ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d", 1587 + ag->id, ag->num_devices, wsi->index); 1526 1588 1527 1589 return ag; 1528 1590 } ··· 1735 1507 1736 1508 mutex_lock(&ag->mutex); 1737 1509 1510 + if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) { 1511 + mutex_unlock(&ag->mutex); 1512 + return; 1513 + } 1514 + 1515 + set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags); 1516 + 1738 1517 ath12k_core_hw_group_stop(ag); 1739 1518 1740 1519 for (i = 0; i < ag->num_devices; i++) { ··· 1786 1551 1787 1552 void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag) 1788 1553 { 1554 + struct ath12k_base *ab; 1555 + int i; 1556 + 1789 1557 lockdep_assert_held(&ag->mutex); 1790 1558 1791 1559 /* If more than one devices are grouped, then inter MLO ··· 1797 1559 * Only when there is one device, then it depends whether the 1798 1560 * device can support intra chip MLO or not 1799 1561 */ 1800 - if (ag->num_devices > 1) 1562 + if (ag->num_devices > 1) { 1801 1563 ag->mlo_capable = true; 1802 - else 1803 - ag->mlo_capable = ag->ab[0]->single_chip_mlo_supp; 1564 + } else { 1565 + ab = ag->ab[0]; 1566 + ag->mlo_capable = ab->single_chip_mlo_supp; 1567 + 1568 + /* WCN chipsets does not advertise in firmware features 1569 + * hence skip checking 1570 + */ 1571 + if (ab->hw_params->def_num_link) 1572 + return; 1573 + } 1574 + 1575 + if (!ag->mlo_capable) 1576 + return; 1577 + 1578 + for (i = 0; i < ag->num_devices; i++) { 1579 + ab = ag->ab[i]; 1580 + if (!ab) 1581 + continue; 1582 + 1583 + /* even if 1 device's firmware feature indicates MLO 1584 + * unsupported, make MLO unsupported for the whole group 1585 + */ 1586 + if (!test_bit(ATH12K_FW_FEATURE_MLO, ab->fw.fw_features)) { 1587 + ag->mlo_capable = false; 1588 + return; 1589 + } 1590 + } 1804 1591 } 1805 1592 1806 1593 int ath12k_core_init(struct ath12k_base *ab)
+44 -19
drivers/net/wireless/ath/ath12k/core.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */ 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef ATH12K_CORE_H ··· 136 136 struct hal_rx_desc *rx_desc; 137 137 u8 err_rel_src; 138 138 u8 err_code; 139 - u8 mac_id; 139 + u8 hw_link_id; 140 140 u8 unmapped; 141 141 u8 is_frag; 142 142 u8 tid; ··· 219 219 220 220 enum ath12k_hw_group_flags { 221 221 ATH12K_GROUP_FLAG_REGISTERED, 222 + ATH12K_GROUP_FLAG_UNREGISTER, 222 223 }; 223 224 224 225 enum ath12k_dev_flags { 225 - ATH12K_CAC_RUNNING, 226 + ATH12K_FLAG_CAC_RUNNING, 226 227 ATH12K_FLAG_CRASH_FLUSH, 227 228 ATH12K_FLAG_RAW_MODE, 228 229 ATH12K_FLAG_HW_CRYPTO_DISABLED, ··· 381 380 u64 non_ampdu_msdu_count; 382 381 u64 stbc_count; 383 382 u64 beamformed_count; 384 - u64 mcs_count[HAL_RX_MAX_MCS + 1]; 385 - u64 nss_count[HAL_RX_MAX_NSS]; 386 - u64 bw_count[HAL_RX_BW_MAX]; 387 - u64 gi_count[HAL_RX_GI_MAX]; 388 383 u64 coding_count[HAL_RX_SU_MU_CODING_MAX]; 389 384 u64 tid_count[IEEE80211_NUM_TIDS + 1]; 390 385 u64 pream_cnt[HAL_RX_PREAMBLE_MAX]; ··· 599 602 struct delayed_work timeout; 600 603 enum ath12k_scan_state state; 601 604 bool is_roc; 602 - int vdev_id; 603 605 int roc_freq; 604 606 bool roc_notify; 607 + struct wiphy_work vdev_clean_wk; 608 + struct ath12k_link_vif *arvif; 605 609 } scan; 606 610 607 611 struct { ··· 708 710 bool monitor_started; 709 711 int monitor_vdev_id; 710 712 711 - u32 freq_low; 712 - u32 freq_high; 713 + struct wiphy_radio_freq_range freq_range; 713 714 714 715 bool nlo_enabled; 716 + 717 + struct completion mlo_setup_done; 718 + u32 mlo_setup_status; 715 719 }; 716 720 717 721 struct ath12k_hw { ··· 769 769 u32 tx_chain_mask_shift; 770 770 u32 rx_chain_mask_shift; 771 771 struct ath12k_band_cap band[NUM_NL80211_BANDS]; 772 + u32 eml_cap; 773 + u32 mld_cap; 772 774 }; 773 775 774 776 struct mlo_timestamp { ··· 823 821 struct ath12k_soc_dp_tx_err_stats tx_err; 824 822 }; 825 823 824 + struct ath12k_mlo_memory { 825 + struct target_mem_chunk chunk[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01]; 826 + int mlo_mem_size; 827 + bool init_done; 828 + }; 829 + 830 + struct ath12k_hw_link { 831 + u8 device_id; 832 + u8 pdev_idx; 833 + }; 834 + 826 835 /* Holds info on the group of devices that are registered as a single 827 836 * wiphy, protected with struct ath12k_hw_group::mutex. 828 837 */ ··· 858 845 struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO]; 859 846 u8 num_hw; 860 847 bool mlo_capable; 848 + struct device_node *wsi_node[ATH12K_MAX_SOCS]; 849 + struct ath12k_mlo_memory mlo_mem; 850 + struct ath12k_hw_link hw_links[ATH12K_GROUP_MAX_RADIO]; 851 + bool hw_link_id_init_done; 852 + }; 853 + 854 + /* Holds WSI info specific to each device, excluding WSI group info */ 855 + struct ath12k_wsi_info { 856 + u32 index; 857 + u32 hw_link_id_base; 861 858 }; 862 859 863 860 /* Master structure to hold the hw data which may be used in core module */ ··· 1051 1028 struct notifier_block panic_nb; 1052 1029 1053 1030 struct ath12k_hw_group *ag; 1031 + struct ath12k_wsi_info wsi_info; 1054 1032 1055 1033 /* must be last */ 1056 1034 u8 drv_priv[] __aligned(sizeof(void *)); ··· 1194 1170 for ((index) = 0; ((index) < (ah)->num_radio && \ 1195 1171 ((ar) = &(ah)->radio[(index)])); (index)++) 1196 1172 1197 - static inline struct ath12k_hw *ath12k_ab_to_ah(struct ath12k_base *ab, int idx) 1173 + static inline struct ath12k_hw *ath12k_ag_to_ah(struct ath12k_hw_group *ag, int idx) 1198 1174 { 1199 - return ab->ag->ah[idx]; 1175 + return ag->ah[idx]; 1200 1176 } 1201 1177 1202 - static inline void ath12k_ab_set_ah(struct ath12k_base *ab, int idx, 1178 + static inline void ath12k_ag_set_ah(struct ath12k_hw_group *ag, int idx, 1203 1179 struct ath12k_hw *ah) 1204 1180 { 1205 - ab->ag->ah[idx] = ah; 1206 - } 1207 - 1208 - static inline int ath12k_get_num_hw(struct ath12k_base *ab) 1209 - { 1210 - return ab->ag->num_hw; 1181 + ag->ah[idx] = ah; 1211 1182 } 1212 1183 1213 1184 static inline struct ath12k_hw_group *ath12k_ab_to_ag(struct ath12k_base *ab) ··· 1222 1203 lockdep_assert_held(&ab->ag->mutex); 1223 1204 1224 1205 ab->ag->num_started--; 1206 + } 1207 + 1208 + static inline struct ath12k_base *ath12k_ag_to_ab(struct ath12k_hw_group *ag, 1209 + u8 device_id) 1210 + { 1211 + return ag->ab[device_id]; 1225 1212 } 1226 1213 1227 1214 #endif /* _CORE_H_ */
+3
drivers/net/wireless/ath/ath12k/coredump.c
··· 27 27 case CALDB_MEM_REGION_TYPE: 28 28 dump_type = FW_CRASH_DUMP_NONE; 29 29 break; 30 + case MLO_GLOBAL_MEM_REGION_TYPE: 31 + dump_type = FW_CRASH_DUMP_MLO_GLOBAL_DATA; 32 + break; 30 33 default: 31 34 dump_type = FW_CRASH_DUMP_TYPE_MAX; 32 35 break;
+1
drivers/net/wireless/ath/ath12k/coredump.h
··· 15 15 FW_CRASH_DUMP_PAGEABLE_DATA, 16 16 FW_CRASH_DUMP_M3_DUMP, 17 17 FW_CRASH_DUMP_NONE, 18 + FW_CRASH_DUMP_MLO_GLOBAL_DATA, 18 19 19 20 /* keep last */ 20 21 FW_CRASH_DUMP_TYPE_MAX,
+502
drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
··· 2576 2576 stats_req->buf_len = len; 2577 2577 } 2578 2578 2579 + static void 2580 + ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf, u16 tag_len, 2581 + struct debug_htt_stats_req *stats_req) 2582 + { 2583 + const struct ath12k_htt_pdev_txrate_txbf_stats_tlv *htt_stats_buf = tag_buf; 2584 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2585 + u32 len = stats_req->buf_len; 2586 + u8 *buf = stats_req->buf; 2587 + u8 i; 2588 + 2589 + if (tag_len < sizeof(*htt_stats_buf)) 2590 + return; 2591 + 2592 + len += scnprintf(buf + len, buf_len - len, 2593 + "HTT_STATS_PDEV_TX_RATE_TXBF_STATS:\n"); 2594 + len += scnprintf(buf + len, buf_len - len, "Legacy OFDM Rates: 6 Mbps: %u, ", 2595 + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[0])); 2596 + len += scnprintf(buf + len, buf_len - len, "9 Mbps: %u, 12 Mbps: %u, ", 2597 + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[1]), 2598 + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[2])); 2599 + len += scnprintf(buf + len, buf_len - len, "18 Mbps: %u\n", 2600 + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[3])); 2601 + len += scnprintf(buf + len, buf_len - len, "24 Mbps: %u, 36 Mbps: %u, ", 2602 + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[4]), 2603 + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[5])); 2604 + len += scnprintf(buf + len, buf_len - len, "48 Mbps: %u, 54 Mbps: %u\n", 2605 + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[6]), 2606 + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[7])); 2607 + 2608 + len += print_array_to_buf(buf, len, "tx_ol_mcs", htt_stats_buf->tx_su_ol_mcs, 2609 + ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n"); 2610 + len += print_array_to_buf(buf, len, "tx_ibf_mcs", htt_stats_buf->tx_su_ibf_mcs, 2611 + ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n"); 2612 + len += print_array_to_buf(buf, len, "tx_txbf_mcs", htt_stats_buf->tx_su_txbf_mcs, 2613 + ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n"); 2614 + len += print_array_to_buf_index(buf, len, "tx_ol_nss", 1, 2615 + htt_stats_buf->tx_su_ol_nss, 2616 + ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, 2617 + "\n"); 2618 + len += print_array_to_buf_index(buf, len, "tx_ibf_nss", 1, 2619 + htt_stats_buf->tx_su_ibf_nss, 2620 + ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, 2621 + "\n"); 2622 + len += print_array_to_buf_index(buf, len, "tx_txbf_nss", 1, 2623 + htt_stats_buf->tx_su_txbf_nss, 2624 + ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, 2625 + "\n"); 2626 + len += print_array_to_buf(buf, len, "tx_ol_bw", htt_stats_buf->tx_su_ol_bw, 2627 + ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n"); 2628 + for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++) 2629 + len += print_array_to_buf(buf, len, i ? "quarter_tx_ol_bw" : 2630 + "half_tx_ol_bw", 2631 + htt_stats_buf->ol[i], 2632 + ATH12K_HTT_TXBF_NUM_BW_CNTRS, 2633 + "\n"); 2634 + 2635 + len += print_array_to_buf(buf, len, "tx_ibf_bw", htt_stats_buf->tx_su_ibf_bw, 2636 + ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n"); 2637 + for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++) 2638 + len += print_array_to_buf(buf, len, i ? "quarter_tx_ibf_bw" : 2639 + "half_tx_ibf_bw", 2640 + htt_stats_buf->ibf[i], 2641 + ATH12K_HTT_TXBF_NUM_BW_CNTRS, 2642 + "\n"); 2643 + 2644 + len += print_array_to_buf(buf, len, "tx_txbf_bw", htt_stats_buf->tx_su_txbf_bw, 2645 + ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n"); 2646 + for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++) 2647 + len += print_array_to_buf(buf, len, i ? "quarter_tx_txbf_bw" : 2648 + "half_tx_txbf_bw", 2649 + htt_stats_buf->txbf[i], 2650 + ATH12K_HTT_TXBF_NUM_BW_CNTRS, 2651 + "\n"); 2652 + len += scnprintf(buf + len, buf_len - len, "\n"); 2653 + 2654 + len += scnprintf(buf + len, buf_len - len, 2655 + "HTT_STATS_PDEV_TXBF_FLAG_RETURN_STATS:\n"); 2656 + len += scnprintf(buf + len, buf_len - len, "TXBF_reason_code_stats: 0:%u, 1:%u,", 2657 + le32_to_cpu(htt_stats_buf->txbf_flag_set_mu_mode), 2658 + le32_to_cpu(htt_stats_buf->txbf_flag_set_final_status)); 2659 + len += scnprintf(buf + len, buf_len - len, " 2:%u, 3:%u, 4:%u, 5:%u, ", 2660 + le32_to_cpu(htt_stats_buf->txbf_flag_not_set_verified_txbf_mode), 2661 + le32_to_cpu(htt_stats_buf->txbf_flag_not_set_disable_p2p_access), 2662 + le32_to_cpu(htt_stats_buf->txbf_flag_not_set_max_nss_in_he160), 2663 + le32_to_cpu(htt_stats_buf->txbf_flag_not_set_disable_uldlofdma)); 2664 + len += scnprintf(buf + len, buf_len - len, "6:%u, 7:%u\n\n", 2665 + le32_to_cpu(htt_stats_buf->txbf_flag_not_set_mcs_threshold_val), 2666 + le32_to_cpu(htt_stats_buf->txbf_flag_not_set_final_status)); 2667 + 2668 + stats_req->buf_len = len; 2669 + } 2670 + 2671 + static void 2672 + ath12k_htt_print_txbf_ofdma_ax_ndpa_stats_tlv(const void *tag_buf, u16 tag_len, 2673 + struct debug_htt_stats_req *stats_req) 2674 + { 2675 + const struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_tlv *stats_buf = tag_buf; 2676 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2677 + u32 len = stats_req->buf_len; 2678 + u8 *buf = stats_req->buf; 2679 + u32 num_elements; 2680 + u8 i; 2681 + 2682 + if (tag_len < sizeof(*stats_buf)) 2683 + return; 2684 + 2685 + num_elements = le32_to_cpu(stats_buf->num_elems_ax_ndpa_arr); 2686 + 2687 + len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_NDPA_STATS_TLV:\n"); 2688 + len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndpa_queued ="); 2689 + for (i = 0; i < num_elements; i++) 2690 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2691 + le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_queued)); 2692 + len--; 2693 + *(buf + len) = '\0'; 2694 + 2695 + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_tried ="); 2696 + for (i = 0; i < num_elements; i++) 2697 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2698 + le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_tried)); 2699 + len--; 2700 + *(buf + len) = '\0'; 2701 + 2702 + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_flushed ="); 2703 + for (i = 0; i < num_elements; i++) 2704 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2705 + le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_flush)); 2706 + len--; 2707 + *(buf + len) = '\0'; 2708 + 2709 + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_err ="); 2710 + for (i = 0; i < num_elements; i++) 2711 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2712 + le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_err)); 2713 + len--; 2714 + *(buf + len) = '\0'; 2715 + 2716 + len += scnprintf(buf + len, buf_len - len, "\n\n"); 2717 + 2718 + stats_req->buf_len = len; 2719 + } 2720 + 2721 + static void 2722 + ath12k_htt_print_txbf_ofdma_ax_ndp_stats_tlv(const void *tag_buf, u16 tag_len, 2723 + struct debug_htt_stats_req *stats_req) 2724 + { 2725 + const struct ath12k_htt_txbf_ofdma_ax_ndp_stats_tlv *stats_buf = tag_buf; 2726 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2727 + u32 len = stats_req->buf_len; 2728 + u8 *buf = stats_req->buf; 2729 + u32 num_elements; 2730 + u8 i; 2731 + 2732 + if (tag_len < sizeof(*stats_buf)) 2733 + return; 2734 + 2735 + num_elements = le32_to_cpu(stats_buf->num_elems_ax_ndp_arr); 2736 + 2737 + len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_NDP_STATS_TLV:\n"); 2738 + len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndp_queued ="); 2739 + for (i = 0; i < num_elements; i++) 2740 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2741 + le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_queued)); 2742 + len--; 2743 + *(buf + len) = '\0'; 2744 + 2745 + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_tried ="); 2746 + for (i = 0; i < num_elements; i++) 2747 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2748 + le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_tried)); 2749 + len--; 2750 + *(buf + len) = '\0'; 2751 + 2752 + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_flushed ="); 2753 + for (i = 0; i < num_elements; i++) 2754 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2755 + le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_flush)); 2756 + len--; 2757 + *(buf + len) = '\0'; 2758 + 2759 + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_err ="); 2760 + for (i = 0; i < num_elements; i++) 2761 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2762 + le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_err)); 2763 + len--; 2764 + *(buf + len) = '\0'; 2765 + 2766 + len += scnprintf(buf + len, buf_len - len, "\n\n"); 2767 + 2768 + stats_req->buf_len = len; 2769 + } 2770 + 2771 + static void 2772 + ath12k_htt_print_txbf_ofdma_ax_brp_stats_tlv(const void *tag_buf, u16 tag_len, 2773 + struct debug_htt_stats_req *stats_req) 2774 + { 2775 + const struct ath12k_htt_txbf_ofdma_ax_brp_stats_tlv *stats_buf = tag_buf; 2776 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2777 + u32 len = stats_req->buf_len; 2778 + u8 *buf = stats_req->buf; 2779 + u32 num_elements; 2780 + u8 i; 2781 + 2782 + if (tag_len < sizeof(*stats_buf)) 2783 + return; 2784 + 2785 + num_elements = le32_to_cpu(stats_buf->num_elems_ax_brp_arr); 2786 + 2787 + len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_BRP_STATS_TLV:\n"); 2788 + len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brpoll_queued ="); 2789 + for (i = 0; i < num_elements; i++) 2790 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2791 + le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_queued)); 2792 + len--; 2793 + *(buf + len) = '\0'; 2794 + 2795 + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brpoll_tied ="); 2796 + for (i = 0; i < num_elements; i++) 2797 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2798 + le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_tried)); 2799 + len--; 2800 + *(buf + len) = '\0'; 2801 + 2802 + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brpoll_flushed ="); 2803 + for (i = 0; i < num_elements; i++) 2804 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2805 + le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_flushed)); 2806 + len--; 2807 + *(buf + len) = '\0'; 2808 + 2809 + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brp_err ="); 2810 + for (i = 0; i < num_elements; i++) 2811 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2812 + le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_err)); 2813 + len--; 2814 + *(buf + len) = '\0'; 2815 + 2816 + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brp_err_num_cbf_rcvd ="); 2817 + for (i = 0; i < num_elements; i++) 2818 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2819 + le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_num_cbf_rcvd)); 2820 + len--; 2821 + *(buf + len) = '\0'; 2822 + 2823 + len += scnprintf(buf + len, buf_len - len, "\n\n"); 2824 + 2825 + stats_req->buf_len = len; 2826 + } 2827 + 2828 + static void 2829 + ath12k_htt_print_txbf_ofdma_ax_steer_stats_tlv(const void *tag_buf, u16 tag_len, 2830 + struct debug_htt_stats_req *stats_req) 2831 + { 2832 + const struct ath12k_htt_txbf_ofdma_ax_steer_stats_tlv *stats_buf = tag_buf; 2833 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2834 + u32 len = stats_req->buf_len; 2835 + u8 *buf = stats_req->buf; 2836 + u32 num_elements; 2837 + u8 i; 2838 + 2839 + if (tag_len < sizeof(*stats_buf)) 2840 + return; 2841 + 2842 + num_elements = le32_to_cpu(stats_buf->num_elems_ax_steer_arr); 2843 + 2844 + len += scnprintf(buf + len, buf_len - len, 2845 + "HTT_TXBF_OFDMA_AX_STEER_STATS_TLV:\n"); 2846 + len += scnprintf(buf + len, buf_len - len, "ax_ofdma_num_ppdu_steer ="); 2847 + for (i = 0; i < num_elements; i++) 2848 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2849 + le32_to_cpu(stats_buf->ax_steer[i].num_ppdu_steer)); 2850 + len--; 2851 + *(buf + len) = '\0'; 2852 + 2853 + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_prefetch ="); 2854 + for (i = 0; i < num_elements; i++) 2855 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2856 + le32_to_cpu(stats_buf->ax_steer[i].num_usr_prefetch)); 2857 + len--; 2858 + *(buf + len) = '\0'; 2859 + 2860 + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_sound ="); 2861 + for (i = 0; i < num_elements; i++) 2862 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2863 + le32_to_cpu(stats_buf->ax_steer[i].num_usr_sound)); 2864 + len--; 2865 + *(buf + len) = '\0'; 2866 + 2867 + len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_force_sound ="); 2868 + for (i = 0; i < num_elements; i++) 2869 + len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1, 2870 + le32_to_cpu(stats_buf->ax_steer[i].num_usr_force_sound)); 2871 + len--; 2872 + *(buf + len) = '\0'; 2873 + 2874 + len += scnprintf(buf + len, buf_len - len, "\n\n"); 2875 + 2876 + stats_req->buf_len = len; 2877 + } 2878 + 2879 + static void 2880 + ath12k_htt_print_txbf_ofdma_ax_steer_mpdu_stats_tlv(const void *tag_buf, u16 tag_len, 2881 + struct debug_htt_stats_req *stats_req) 2882 + { 2883 + const struct ath12k_htt_txbf_ofdma_ax_steer_mpdu_stats_tlv *stats_buf = tag_buf; 2884 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 2885 + u32 len = stats_req->buf_len; 2886 + u8 *buf = stats_req->buf; 2887 + 2888 + if (tag_len < sizeof(*stats_buf)) 2889 + return; 2890 + 2891 + len += scnprintf(buf + len, buf_len - len, 2892 + "HTT_TXBF_OFDMA_AX_STEER_MPDU_STATS_TLV:\n"); 2893 + len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_tried = %u\n", 2894 + le32_to_cpu(stats_buf->ax_ofdma_rbo_steer_mpdus_tried)); 2895 + len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_failed = %u\n", 2896 + le32_to_cpu(stats_buf->ax_ofdma_rbo_steer_mpdus_failed)); 2897 + len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_tried = %u\n", 2898 + le32_to_cpu(stats_buf->ax_ofdma_sifs_steer_mpdus_tried)); 2899 + len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_failed = %u\n\n", 2900 + le32_to_cpu(stats_buf->ax_ofdma_sifs_steer_mpdus_failed)); 2901 + 2902 + stats_req->buf_len = len; 2903 + } 2904 + 2579 2905 static void ath12k_htt_print_dlpager_entry(const struct ath12k_htt_pgs_info *pg_info, 2580 2906 int idx, char *str_buf) 2581 2907 { ··· 3470 3144 } 3471 3145 3472 3146 static void 3147 + ath12k_htt_print_ast_entry_tlv(const void *tag_buf, u16 tag_len, 3148 + struct debug_htt_stats_req *stats_req) 3149 + { 3150 + const struct ath12k_htt_ast_entry_tlv *htt_stats_buf = tag_buf; 3151 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 3152 + u32 len = stats_req->buf_len; 3153 + u8 *buf = stats_req->buf; 3154 + u32 mac_addr_l32; 3155 + u32 mac_addr_h16; 3156 + u32 ast_info; 3157 + 3158 + if (tag_len < sizeof(*htt_stats_buf)) 3159 + return; 3160 + 3161 + mac_addr_l32 = le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_l32); 3162 + mac_addr_h16 = le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_h16); 3163 + ast_info = le32_to_cpu(htt_stats_buf->info); 3164 + 3165 + len += scnprintf(buf + len, buf_len - len, "HTT_AST_ENTRY_TLV:\n"); 3166 + len += scnprintf(buf + len, buf_len - len, "ast_index = %u\n", 3167 + le32_to_cpu(htt_stats_buf->ast_index)); 3168 + len += scnprintf(buf + len, buf_len - len, 3169 + "mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", 3170 + u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_0), 3171 + u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_1), 3172 + u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_2), 3173 + u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_3), 3174 + u32_get_bits(mac_addr_h16, ATH12K_HTT_MAC_ADDR_H16_0), 3175 + u32_get_bits(mac_addr_h16, ATH12K_HTT_MAC_ADDR_H16_1)); 3176 + 3177 + len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n", 3178 + le32_to_cpu(htt_stats_buf->sw_peer_id)); 3179 + len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n", 3180 + u32_get_bits(ast_info, ATH12K_HTT_AST_PDEV_ID_INFO)); 3181 + len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n", 3182 + u32_get_bits(ast_info, ATH12K_HTT_AST_VDEV_ID_INFO)); 3183 + len += scnprintf(buf + len, buf_len - len, "next_hop = %u\n", 3184 + u32_get_bits(ast_info, ATH12K_HTT_AST_NEXT_HOP_INFO)); 3185 + len += scnprintf(buf + len, buf_len - len, "mcast = %u\n", 3186 + u32_get_bits(ast_info, ATH12K_HTT_AST_MCAST_INFO)); 3187 + len += scnprintf(buf + len, buf_len - len, "monitor_direct = %u\n", 3188 + u32_get_bits(ast_info, ATH12K_HTT_AST_MONITOR_DIRECT_INFO)); 3189 + len += scnprintf(buf + len, buf_len - len, "mesh_sta = %u\n", 3190 + u32_get_bits(ast_info, ATH12K_HTT_AST_MESH_STA_INFO)); 3191 + len += scnprintf(buf + len, buf_len - len, "mec = %u\n", 3192 + u32_get_bits(ast_info, ATH12K_HTT_AST_MEC_INFO)); 3193 + len += scnprintf(buf + len, buf_len - len, "intra_bss = %u\n\n", 3194 + u32_get_bits(ast_info, ATH12K_HTT_AST_INTRA_BSS_INFO)); 3195 + 3196 + stats_req->buf_len = len; 3197 + } 3198 + 3199 + static const char* 3200 + ath12k_htt_get_punct_dir_type_str(enum ath12k_htt_stats_direction direction) 3201 + { 3202 + switch (direction) { 3203 + case ATH12K_HTT_STATS_DIRECTION_TX: 3204 + return "tx"; 3205 + case ATH12K_HTT_STATS_DIRECTION_RX: 3206 + return "rx"; 3207 + default: 3208 + return "unknown"; 3209 + } 3210 + } 3211 + 3212 + static const char* 3213 + ath12k_htt_get_punct_ppdu_type_str(enum ath12k_htt_stats_ppdu_type ppdu_type) 3214 + { 3215 + switch (ppdu_type) { 3216 + case ATH12K_HTT_STATS_PPDU_TYPE_MODE_SU: 3217 + return "su"; 3218 + case ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_MIMO: 3219 + return "dl_mu_mimo"; 3220 + case ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_MIMO: 3221 + return "ul_mu_mimo"; 3222 + case ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_OFDMA: 3223 + return "dl_mu_ofdma"; 3224 + case ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_OFDMA: 3225 + return "ul_mu_ofdma"; 3226 + default: 3227 + return "unknown"; 3228 + } 3229 + } 3230 + 3231 + static const char* 3232 + ath12k_htt_get_punct_pream_type_str(enum ath12k_htt_stats_param_type pream_type) 3233 + { 3234 + switch (pream_type) { 3235 + case ATH12K_HTT_STATS_PREAM_OFDM: 3236 + return "ofdm"; 3237 + case ATH12K_HTT_STATS_PREAM_CCK: 3238 + return "cck"; 3239 + case ATH12K_HTT_STATS_PREAM_HT: 3240 + return "ht"; 3241 + case ATH12K_HTT_STATS_PREAM_VHT: 3242 + return "ac"; 3243 + case ATH12K_HTT_STATS_PREAM_HE: 3244 + return "ax"; 3245 + case ATH12K_HTT_STATS_PREAM_EHT: 3246 + return "be"; 3247 + default: 3248 + return "unknown"; 3249 + } 3250 + } 3251 + 3252 + static void 3253 + ath12k_htt_print_puncture_stats_tlv(const void *tag_buf, u16 tag_len, 3254 + struct debug_htt_stats_req *stats_req) 3255 + { 3256 + const struct ath12k_htt_pdev_puncture_stats_tlv *stats_buf = tag_buf; 3257 + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; 3258 + u32 len = stats_req->buf_len; 3259 + u8 *buf = stats_req->buf; 3260 + const char *direction; 3261 + const char *ppdu_type; 3262 + const char *preamble; 3263 + u32 mac_id__word; 3264 + u32 subband_limit; 3265 + u8 i; 3266 + 3267 + if (tag_len < sizeof(*stats_buf)) 3268 + return; 3269 + 3270 + mac_id__word = le32_to_cpu(stats_buf->mac_id__word); 3271 + subband_limit = min(le32_to_cpu(stats_buf->subband_cnt), 3272 + ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT); 3273 + 3274 + direction = ath12k_htt_get_punct_dir_type_str(le32_to_cpu(stats_buf->direction)); 3275 + ppdu_type = ath12k_htt_get_punct_ppdu_type_str(le32_to_cpu(stats_buf->ppdu_type)); 3276 + preamble = ath12k_htt_get_punct_pream_type_str(le32_to_cpu(stats_buf->preamble)); 3277 + 3278 + len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_PUNCTURE_STATS_TLV:\n"); 3279 + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", 3280 + u32_get_bits(mac_id__word, ATH12K_HTT_STATS_MAC_ID)); 3281 + len += scnprintf(buf + len, buf_len - len, 3282 + "%s_%s_%s_last_used_pattern_mask = 0x%08x\n", 3283 + direction, preamble, ppdu_type, 3284 + le32_to_cpu(stats_buf->last_used_pattern_mask)); 3285 + 3286 + for (i = 0; i < subband_limit; i++) { 3287 + len += scnprintf(buf + len, buf_len - len, 3288 + "%s_%s_%s_num_subbands_used_cnt_%02d = %u\n", 3289 + direction, preamble, ppdu_type, i + 1, 3290 + le32_to_cpu(stats_buf->num_subbands_used_cnt[i])); 3291 + } 3292 + len += scnprintf(buf + len, buf_len - len, "\n"); 3293 + 3294 + stats_req->buf_len = len; 3295 + } 3296 + 3297 + static void 3473 3298 ath12k_htt_print_dmac_reset_stats_tlv(const void *tag_buf, u16 tag_len, 3474 3299 struct debug_htt_stats_req *stats_req) 3475 3300 { ··· 3985 3508 case HTT_STATS_PDEV_OBSS_PD_TAG: 3986 3509 ath12k_htt_print_pdev_obss_pd_stats_tlv(tag_buf, len, stats_req); 3987 3510 break; 3511 + case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG: 3512 + ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, len, stats_req); 3513 + break; 3514 + case HTT_STATS_TXBF_OFDMA_AX_NDPA_STATS_TAG: 3515 + ath12k_htt_print_txbf_ofdma_ax_ndpa_stats_tlv(tag_buf, len, stats_req); 3516 + break; 3517 + case HTT_STATS_TXBF_OFDMA_AX_NDP_STATS_TAG: 3518 + ath12k_htt_print_txbf_ofdma_ax_ndp_stats_tlv(tag_buf, len, stats_req); 3519 + break; 3520 + case HTT_STATS_TXBF_OFDMA_AX_BRP_STATS_TAG: 3521 + ath12k_htt_print_txbf_ofdma_ax_brp_stats_tlv(tag_buf, len, stats_req); 3522 + break; 3523 + case HTT_STATS_TXBF_OFDMA_AX_STEER_STATS_TAG: 3524 + ath12k_htt_print_txbf_ofdma_ax_steer_stats_tlv(tag_buf, len, stats_req); 3525 + break; 3526 + case HTT_STATS_TXBF_OFDMA_AX_STEER_MPDU_STATS_TAG: 3527 + ath12k_htt_print_txbf_ofdma_ax_steer_mpdu_stats_tlv(tag_buf, len, 3528 + stats_req); 3529 + break; 3988 3530 case HTT_STATS_DLPAGER_STATS_TAG: 3989 3531 ath12k_htt_print_dlpager_stats_tlv(tag_buf, len, stats_req); 3990 3532 break; ··· 4027 3531 break; 4028 3532 case HTT_STATS_PER_RATE_STATS_TAG: 4029 3533 ath12k_htt_print_tx_per_rate_stats_tlv(tag_buf, len, stats_req); 3534 + break; 3535 + case HTT_STATS_AST_ENTRY_TAG: 3536 + ath12k_htt_print_ast_entry_tlv(tag_buf, len, stats_req); 3537 + break; 3538 + case HTT_STATS_PDEV_PUNCTURE_STATS_TAG: 3539 + ath12k_htt_print_puncture_stats_tlv(tag_buf, len, stats_req); 4030 3540 break; 4031 3541 case HTT_STATS_DMAC_RESET_STATS_TAG: 4032 3542 ath12k_htt_print_dmac_reset_stats_tlv(tag_buf, len, stats_req);
+155
drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
··· 135 135 ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17, 136 136 ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19, 137 137 ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23, 138 + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF = 31, 139 + ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32, 138 140 ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36, 139 141 ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37, 140 142 ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38, 141 143 ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40, 144 + ATH12K_DBG_HTT_EXT_AST_ENTRIES = 41, 142 145 ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45, 146 + ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS = 46, 143 147 ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49, 144 148 ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51, 145 149 ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54, ··· 201 197 HTT_STATS_HW_WAR_TAG = 89, 202 198 HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100, 203 199 HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102, 200 + HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG = 108, 204 201 HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG = 111, 205 202 HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG = 112, 206 203 HTT_STATS_DLPAGER_STATS_TAG = 120, ··· 213 208 HTT_STATS_PER_RATE_STATS_TAG = 128, 214 209 HTT_STATS_MU_PPDU_DIST_TAG = 129, 215 210 HTT_STATS_TX_PDEV_MUMIMO_GRP_STATS_TAG = 130, 211 + HTT_STATS_AST_ENTRY_TAG = 132, 216 212 HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG = 135, 217 213 HTT_STATS_TX_SELFGEN_BE_ERR_STATS_TAG = 137, 218 214 HTT_STATS_TX_SELFGEN_BE_STATS_TAG = 138, 219 215 HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG = 139, 216 + HTT_STATS_TXBF_OFDMA_AX_NDPA_STATS_TAG = 147, 217 + HTT_STATS_TXBF_OFDMA_AX_NDP_STATS_TAG = 148, 218 + HTT_STATS_TXBF_OFDMA_AX_BRP_STATS_TAG = 149, 219 + HTT_STATS_TXBF_OFDMA_AX_STEER_STATS_TAG = 150, 220 220 HTT_STATS_DMAC_RESET_STATS_TAG = 155, 221 221 HTT_STATS_PHY_TPC_STATS_TAG = 157, 222 + HTT_STATS_PDEV_PUNCTURE_STATS_TAG = 158, 222 223 HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG = 165, 224 + HTT_STATS_TXBF_OFDMA_AX_STEER_MPDU_STATS_TAG = 172, 223 225 HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG = 176, 224 226 225 227 HTT_STATS_MAX_TAG, ··· 1080 1068 __le32 num_sr_ppdu_abort_flush_cnt; 1081 1069 } __packed; 1082 1070 1071 + #define ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS 14 1072 + #define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8 1073 + #define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8 1074 + #define ATH12K_HTT_TXBF_NUM_BW_CNTRS 5 1075 + #define ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES 2 1076 + 1077 + struct ath12k_htt_pdev_txrate_txbf_stats_tlv { 1078 + __le32 tx_su_txbf_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS]; 1079 + __le32 tx_su_ibf_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS]; 1080 + __le32 tx_su_ol_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS]; 1081 + __le32 tx_su_txbf_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; 1082 + __le32 tx_su_ibf_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; 1083 + __le32 tx_su_ol_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; 1084 + __le32 tx_su_txbf_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS]; 1085 + __le32 tx_su_ibf_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS]; 1086 + __le32 tx_su_ol_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS]; 1087 + __le32 tx_legacy_ofdm_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS]; 1088 + __le32 txbf[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS]; 1089 + __le32 ibf[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS]; 1090 + __le32 ol[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS]; 1091 + __le32 txbf_flag_set_mu_mode; 1092 + __le32 txbf_flag_set_final_status; 1093 + __le32 txbf_flag_not_set_verified_txbf_mode; 1094 + __le32 txbf_flag_not_set_disable_p2p_access; 1095 + __le32 txbf_flag_not_set_max_nss_in_he160; 1096 + __le32 txbf_flag_not_set_disable_uldlofdma; 1097 + __le32 txbf_flag_not_set_mcs_threshold_val; 1098 + __le32 txbf_flag_not_set_final_status; 1099 + } __packed; 1100 + 1101 + struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_elem_t { 1102 + __le32 ax_ofdma_ndpa_queued; 1103 + __le32 ax_ofdma_ndpa_tried; 1104 + __le32 ax_ofdma_ndpa_flush; 1105 + __le32 ax_ofdma_ndpa_err; 1106 + } __packed; 1107 + 1108 + struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_tlv { 1109 + __le32 num_elems_ax_ndpa_arr; 1110 + __le32 arr_elem_size_ax_ndpa; 1111 + DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_elem_t, ax_ndpa); 1112 + } __packed; 1113 + 1114 + struct ath12k_htt_txbf_ofdma_ax_ndp_stats_elem_t { 1115 + __le32 ax_ofdma_ndp_queued; 1116 + __le32 ax_ofdma_ndp_tried; 1117 + __le32 ax_ofdma_ndp_flush; 1118 + __le32 ax_ofdma_ndp_err; 1119 + } __packed; 1120 + 1121 + struct ath12k_htt_txbf_ofdma_ax_ndp_stats_tlv { 1122 + __le32 num_elems_ax_ndp_arr; 1123 + __le32 arr_elem_size_ax_ndp; 1124 + DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_ndp_stats_elem_t, ax_ndp); 1125 + } __packed; 1126 + 1127 + struct ath12k_htt_txbf_ofdma_ax_brp_stats_elem_t { 1128 + __le32 ax_ofdma_brp_queued; 1129 + __le32 ax_ofdma_brp_tried; 1130 + __le32 ax_ofdma_brp_flushed; 1131 + __le32 ax_ofdma_brp_err; 1132 + __le32 ax_ofdma_num_cbf_rcvd; 1133 + } __packed; 1134 + 1135 + struct ath12k_htt_txbf_ofdma_ax_brp_stats_tlv { 1136 + __le32 num_elems_ax_brp_arr; 1137 + __le32 arr_elem_size_ax_brp; 1138 + DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_brp_stats_elem_t, ax_brp); 1139 + } __packed; 1140 + 1141 + struct ath12k_htt_txbf_ofdma_ax_steer_stats_elem_t { 1142 + __le32 num_ppdu_steer; 1143 + __le32 num_ppdu_ol; 1144 + __le32 num_usr_prefetch; 1145 + __le32 num_usr_sound; 1146 + __le32 num_usr_force_sound; 1147 + } __packed; 1148 + 1149 + struct ath12k_htt_txbf_ofdma_ax_steer_stats_tlv { 1150 + __le32 num_elems_ax_steer_arr; 1151 + __le32 arr_elem_size_ax_steer; 1152 + DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_steer_stats_elem_t, ax_steer); 1153 + } __packed; 1154 + 1155 + struct ath12k_htt_txbf_ofdma_ax_steer_mpdu_stats_tlv { 1156 + __le32 ax_ofdma_rbo_steer_mpdus_tried; 1157 + __le32 ax_ofdma_rbo_steer_mpdus_failed; 1158 + __le32 ax_ofdma_sifs_steer_mpdus_tried; 1159 + __le32 ax_ofdma_sifs_steer_mpdus_failed; 1160 + } __packed; 1161 + 1083 1162 enum ath12k_htt_stats_page_lock_state { 1084 1163 ATH12K_HTT_STATS_PAGE_LOCKED = 0, 1085 1164 ATH12K_HTT_STATS_PAGE_UNLOCKED = 1, ··· 1295 1192 struct ath12k_htt_t2h_soc_txrx_stats_common_tlv { 1296 1193 __le32 inv_peers_msdu_drop_count_hi; 1297 1194 __le32 inv_peers_msdu_drop_count_lo; 1195 + } __packed; 1196 + 1197 + #define ATH12K_HTT_AST_PDEV_ID_INFO GENMASK(1, 0) 1198 + #define ATH12K_HTT_AST_VDEV_ID_INFO GENMASK(9, 2) 1199 + #define ATH12K_HTT_AST_NEXT_HOP_INFO BIT(10) 1200 + #define ATH12K_HTT_AST_MCAST_INFO BIT(11) 1201 + #define ATH12K_HTT_AST_MONITOR_DIRECT_INFO BIT(12) 1202 + #define ATH12K_HTT_AST_MESH_STA_INFO BIT(13) 1203 + #define ATH12K_HTT_AST_MEC_INFO BIT(14) 1204 + #define ATH12K_HTT_AST_INTRA_BSS_INFO BIT(15) 1205 + 1206 + struct ath12k_htt_ast_entry_tlv { 1207 + __le32 sw_peer_id; 1208 + __le32 ast_index; 1209 + struct htt_mac_addr mac_addr; 1210 + __le32 info; 1211 + } __packed; 1212 + 1213 + enum ath12k_htt_stats_direction { 1214 + ATH12K_HTT_STATS_DIRECTION_TX, 1215 + ATH12K_HTT_STATS_DIRECTION_RX 1216 + }; 1217 + 1218 + enum ath12k_htt_stats_ppdu_type { 1219 + ATH12K_HTT_STATS_PPDU_TYPE_MODE_SU, 1220 + ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_MIMO, 1221 + ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_MIMO, 1222 + ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_OFDMA, 1223 + ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_OFDMA 1224 + }; 1225 + 1226 + enum ath12k_htt_stats_param_type { 1227 + ATH12K_HTT_STATS_PREAM_OFDM, 1228 + ATH12K_HTT_STATS_PREAM_CCK, 1229 + ATH12K_HTT_STATS_PREAM_HT, 1230 + ATH12K_HTT_STATS_PREAM_VHT, 1231 + ATH12K_HTT_STATS_PREAM_HE, 1232 + ATH12K_HTT_STATS_PREAM_EHT, 1233 + ATH12K_HTT_STATS_PREAM_RSVD1, 1234 + ATH12K_HTT_STATS_PREAM_COUNT, 1235 + }; 1236 + 1237 + #define ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT 32 1238 + 1239 + struct ath12k_htt_pdev_puncture_stats_tlv { 1240 + __le32 mac_id__word; 1241 + __le32 direction; 1242 + __le32 preamble; 1243 + __le32 ppdu_type; 1244 + __le32 subband_cnt; 1245 + __le32 last_used_pattern_mask; 1246 + __le32 num_subbands_used_cnt[ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT]; 1298 1247 } __packed; 1299 1248 1300 1249 struct ath12k_htt_dmac_reset_stats_tlv {
+14
drivers/net/wireless/ath/ath12k/dp.c
··· 1445 1445 for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { 1446 1446 rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j); 1447 1447 rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC; 1448 + rx_descs[j].device_id = ab->device_id; 1448 1449 list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list); 1449 1450 1450 1451 /* Update descriptor VA in SPT */ ··· 1520 1519 dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET); 1521 1520 1522 1521 return 0; 1522 + } 1523 + 1524 + void ath12k_dp_partner_cc_init(struct ath12k_base *ab) 1525 + { 1526 + struct ath12k_hw_group *ag = ab->ag; 1527 + int i; 1528 + 1529 + for (i = 0; i < ag->num_devices; i++) { 1530 + if (ag->ab[i] == ab) 1531 + continue; 1532 + 1533 + ath12k_dp_cmem_init(ab, &ag->ab[i]->dp, ATH12K_DP_RX_DESC); 1534 + } 1523 1535 } 1524 1536 1525 1537 static int ath12k_dp_cc_init(struct ath12k_base *ab)
+25 -5
drivers/net/wireless/ath/ath12k/dp.h
··· 287 287 u32 cookie; 288 288 u32 magic; 289 289 u8 in_use : 1, 290 - reserved : 7; 290 + device_id : 3, 291 + reserved : 4; 291 292 }; 292 293 293 294 struct ath12k_tx_desc_info { ··· 696 695 * 697 696 * The message would appear as follows: 698 697 * 699 - * |31 26|25|24|23 16|15 8|7 0| 700 - * |-----------------+----------------+----------------+---------------| 701 - * | rsvd1 |PS|SS| ring_id | pdev_id | msg_type | 698 + * |31 29|28|27|26|25|24|23 16|15 8|7 0| 699 + * |-------+--+--+--+--+--+-----------+----------------+---------------| 700 + * | rsvd1 |ED|DT|OV|PS|SS| ring_id | pdev_id | msg_type | 702 701 * |-------------------------------------------------------------------| 703 702 * | rsvd2 | ring_buffer_size | 704 703 * |-------------------------------------------------------------------| ··· 725 724 * More details can be got from enum htt_srng_ring_id 726 725 * b'24 - status_swap: 1 is to swap status TLV 727 726 * b'25 - pkt_swap: 1 is to swap packet TLV 728 - * b'26:31 - rsvd1: reserved for future use 727 + * b'26 - rx_offset_valid (OV): flag to indicate rx offsets 728 + * configuration fields are valid 729 + * b'27 - drop_thresh_valid (DT): flag to indicate if the 730 + * rx_drop_threshold field is valid 731 + * b'28 - rx_mon_global_en: Enable/Disable global register 732 + * configuration in Rx monitor module. 733 + * b'29:31 - rsvd1: reserved for future use 729 734 * dword1 - b'0:16 - ring_buffer_size: size of buffers referenced by rx ring, 730 735 * in byte units. 731 736 * Valid only for HW_TO_SW_RING and SW_TO_HW_RING ··· 1798 1791 ATH12K_STATS_TIMER_DUR_2SEC = 3, 1799 1792 }; 1800 1793 1794 + #define ATH12K_HTT_MAC_ADDR_L32_0 GENMASK(7, 0) 1795 + #define ATH12K_HTT_MAC_ADDR_L32_1 GENMASK(15, 8) 1796 + #define ATH12K_HTT_MAC_ADDR_L32_2 GENMASK(23, 16) 1797 + #define ATH12K_HTT_MAC_ADDR_L32_3 GENMASK(31, 24) 1798 + #define ATH12K_HTT_MAC_ADDR_H16_0 GENMASK(7, 0) 1799 + #define ATH12K_HTT_MAC_ADDR_H16_1 GENMASK(15, 8) 1800 + 1801 + struct htt_mac_addr { 1802 + __le32 mac_addr_l32; 1803 + __le32 mac_addr_h16; 1804 + } __packed; 1805 + 1801 1806 static inline void ath12k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr) 1802 1807 { 1803 1808 memcpy(addr, &addr_l32, 4); ··· 1824 1805 void ath12k_dp_free(struct ath12k_base *ab); 1825 1806 int ath12k_dp_alloc(struct ath12k_base *ab); 1826 1807 void ath12k_dp_cc_config(struct ath12k_base *ab); 1808 + void ath12k_dp_partner_cc_init(struct ath12k_base *ab); 1827 1809 int ath12k_dp_pdev_alloc(struct ath12k_base *ab); 1828 1810 void ath12k_dp_pdev_pre_alloc(struct ath12k *ar); 1829 1811 void ath12k_dp_pdev_free(struct ath12k_base *ab);
+65 -91
drivers/net/wireless/ath/ath12k/dp_mon.c
··· 10 10 #include "dp_tx.h" 11 11 #include "peer.h" 12 12 13 - static void ath12k_dp_mon_rx_handle_ofdma_info(void *rx_tlv, 14 - struct hal_rx_user_status *rx_user_status) 13 + static void 14 + ath12k_dp_mon_rx_handle_ofdma_info(const struct hal_rx_ppdu_end_user_stats *ppdu_end_user, 15 + struct hal_rx_user_status *rx_user_status) 15 16 { 16 - struct hal_rx_ppdu_end_user_stats *ppdu_end_user = rx_tlv; 17 - 18 17 rx_user_status->ul_ofdma_user_v0_word0 = 19 18 __le32_to_cpu(ppdu_end_user->usr_resp_ref); 20 19 rx_user_status->ul_ofdma_user_v0_word1 = ··· 34 35 } 35 36 36 37 static void 37 - ath12k_dp_mon_rx_populate_mu_user_info(void *rx_tlv, 38 + ath12k_dp_mon_rx_populate_mu_user_info(const struct hal_rx_ppdu_end_user_stats *rx_tlv, 38 39 struct hal_rx_mon_ppdu_info *ppdu_info, 39 40 struct hal_rx_user_status *rx_user_status) 40 41 { ··· 72 73 ath12k_dp_mon_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status); 73 74 } 74 75 75 - static void ath12k_dp_mon_parse_vht_sig_a(u8 *tlv_data, 76 + static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vht_sig, 76 77 struct hal_rx_mon_ppdu_info *ppdu_info) 77 78 { 78 - struct hal_rx_vht_sig_a_info *vht_sig = 79 - (struct hal_rx_vht_sig_a_info *)tlv_data; 80 79 u32 nsts, group_id, info0, info1; 81 80 u8 gi_setting; 82 81 ··· 116 119 u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING); 117 120 } 118 121 119 - static void ath12k_dp_mon_parse_ht_sig(u8 *tlv_data, 122 + static void ath12k_dp_mon_parse_ht_sig(const struct hal_rx_ht_sig_info *ht_sig, 120 123 struct hal_rx_mon_ppdu_info *ppdu_info) 121 124 { 122 - struct hal_rx_ht_sig_info *ht_sig = 123 - (struct hal_rx_ht_sig_info *)tlv_data; 124 125 u32 info0 = __le32_to_cpu(ht_sig->info0); 125 126 u32 info1 = __le32_to_cpu(ht_sig->info1); 126 127 ··· 131 136 ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; 132 137 } 133 138 134 - static void ath12k_dp_mon_parse_l_sig_b(u8 *tlv_data, 139 + static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb, 135 140 struct hal_rx_mon_ppdu_info *ppdu_info) 136 141 { 137 - struct hal_rx_lsig_b_info *lsigb = 138 - (struct hal_rx_lsig_b_info *)tlv_data; 139 142 u32 info0 = __le32_to_cpu(lsigb->info0); 140 143 u8 rate; 141 144 ··· 163 170 ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; 164 171 } 165 172 166 - static void ath12k_dp_mon_parse_l_sig_a(u8 *tlv_data, 173 + static void ath12k_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga, 167 174 struct hal_rx_mon_ppdu_info *ppdu_info) 168 175 { 169 - struct hal_rx_lsig_a_info *lsiga = 170 - (struct hal_rx_lsig_a_info *)tlv_data; 171 176 u32 info0 = __le32_to_cpu(lsiga->info0); 172 177 u8 rate; 173 178 ··· 203 212 ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; 204 213 } 205 214 206 - static void ath12k_dp_mon_parse_he_sig_b2_ofdma(u8 *tlv_data, 207 - struct hal_rx_mon_ppdu_info *ppdu_info) 215 + static void 216 + ath12k_dp_mon_parse_he_sig_b2_ofdma(const struct hal_rx_he_sig_b2_ofdma_info *ofdma, 217 + struct hal_rx_mon_ppdu_info *ppdu_info) 208 218 { 209 - struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma = 210 - (struct hal_rx_he_sig_b2_ofdma_info *)tlv_data; 211 219 u32 info0, value; 212 220 213 - info0 = __le32_to_cpu(he_sig_b2_ofdma->info0); 221 + info0 = __le32_to_cpu(ofdma->info0); 214 222 215 223 ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_DCM_KNOWN | HE_CODING_KNOWN; 216 224 ··· 240 250 ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA; 241 251 } 242 252 243 - static void ath12k_dp_mon_parse_he_sig_b2_mu(u8 *tlv_data, 244 - struct hal_rx_mon_ppdu_info *ppdu_info) 253 + static void 254 + ath12k_dp_mon_parse_he_sig_b2_mu(const struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu, 255 + struct hal_rx_mon_ppdu_info *ppdu_info) 245 256 { 246 - struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu = 247 - (struct hal_rx_he_sig_b2_mu_info *)tlv_data; 248 257 u32 info0, value; 249 258 250 259 info0 = __le32_to_cpu(he_sig_b2_mu->info0); ··· 266 277 ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS); 267 278 } 268 279 269 - static void ath12k_dp_mon_parse_he_sig_b1_mu(u8 *tlv_data, 270 - struct hal_rx_mon_ppdu_info *ppdu_info) 280 + static void 281 + ath12k_dp_mon_parse_he_sig_b1_mu(const struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu, 282 + struct hal_rx_mon_ppdu_info *ppdu_info) 271 283 { 272 - struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu = 273 - (struct hal_rx_he_sig_b1_mu_info *)tlv_data; 274 284 u32 info0 = __le32_to_cpu(he_sig_b1_mu->info0); 275 285 u16 ru_tones; 276 286 ··· 280 292 ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; 281 293 } 282 294 283 - static void ath12k_dp_mon_parse_he_sig_mu(u8 *tlv_data, 284 - struct hal_rx_mon_ppdu_info *ppdu_info) 295 + static void 296 + ath12k_dp_mon_parse_he_sig_mu(const struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl, 297 + struct hal_rx_mon_ppdu_info *ppdu_info) 285 298 { 286 - struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl = 287 - (struct hal_rx_he_sig_a_mu_dl_info *)tlv_data; 288 299 u32 info0, info1, value; 289 300 u16 he_gi = 0, he_ltf = 0; 290 301 ··· 414 427 ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; 415 428 } 416 429 417 - static void ath12k_dp_mon_parse_he_sig_su(u8 *tlv_data, 430 + static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info *he_sig_a, 418 431 struct hal_rx_mon_ppdu_info *ppdu_info) 419 432 { 420 - struct hal_rx_he_sig_a_su_info *he_sig_a = 421 - (struct hal_rx_he_sig_a_su_info *)tlv_data; 422 433 u32 info0, info1, value; 423 434 u32 dcm; 424 435 u8 he_dcm = 0, he_stbc = 0; ··· 565 580 static enum hal_rx_mon_status 566 581 ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, 567 582 struct ath12k_mon_data *pmon, 568 - u32 tlv_tag, u8 *tlv_data, u32 userid) 583 + u32 tlv_tag, const void *tlv_data, 584 + u32 userid) 569 585 { 570 586 struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; 571 587 u32 info[7]; 572 588 573 589 switch (tlv_tag) { 574 590 case HAL_RX_PPDU_START: { 575 - struct hal_rx_ppdu_start *ppdu_start = 576 - (struct hal_rx_ppdu_start *)tlv_data; 591 + const struct hal_rx_ppdu_start *ppdu_start = tlv_data; 577 592 578 593 u64 ppdu_ts = ath12k_le32hilo_to_u64(ppdu_start->ppdu_start_ts_63_32, 579 594 ppdu_start->ppdu_start_ts_31_0); ··· 600 615 break; 601 616 } 602 617 case HAL_RX_PPDU_END_USER_STATS: { 603 - struct hal_rx_ppdu_end_user_stats *eu_stats = 604 - (struct hal_rx_ppdu_end_user_stats *)tlv_data; 618 + const struct hal_rx_ppdu_end_user_stats *eu_stats = tlv_data; 619 + u32 tid_bitmap; 605 620 606 621 info[0] = __le32_to_cpu(eu_stats->info0); 607 622 info[1] = __le32_to_cpu(eu_stats->info1); ··· 614 629 u32_get_bits(info[2], HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX); 615 630 ppdu_info->fc_valid = 616 631 u32_get_bits(info[1], HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID); 617 - ppdu_info->tid = 618 - ffs(u32_get_bits(info[6], 619 - HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP) 620 - - 1); 632 + tid_bitmap = u32_get_bits(info[6], 633 + HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP); 634 + ppdu_info->tid = ffs(tid_bitmap) - 1; 621 635 ppdu_info->tcp_msdu_count = 622 636 u32_get_bits(info[4], 623 637 HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT); ··· 657 673 &ppdu_info->userstats[userid]; 658 674 ppdu_info->num_users += 1; 659 675 660 - ath12k_dp_mon_rx_handle_ofdma_info(tlv_data, rxuser_stats); 661 - ath12k_dp_mon_rx_populate_mu_user_info(tlv_data, ppdu_info, 676 + ath12k_dp_mon_rx_handle_ofdma_info(eu_stats, rxuser_stats); 677 + ath12k_dp_mon_rx_populate_mu_user_info(eu_stats, ppdu_info, 662 678 rxuser_stats); 663 679 } 664 680 ppdu_info->mpdu_fcs_ok_bitmap[0] = __le32_to_cpu(eu_stats->rsvd1[0]); ··· 666 682 break; 667 683 } 668 684 case HAL_RX_PPDU_END_USER_STATS_EXT: { 669 - struct hal_rx_ppdu_end_user_stats_ext *eu_stats = 670 - (struct hal_rx_ppdu_end_user_stats_ext *)tlv_data; 685 + const struct hal_rx_ppdu_end_user_stats_ext *eu_stats = tlv_data; 686 + 671 687 ppdu_info->mpdu_fcs_ok_bitmap[2] = __le32_to_cpu(eu_stats->info1); 672 688 ppdu_info->mpdu_fcs_ok_bitmap[3] = __le32_to_cpu(eu_stats->info2); 673 689 ppdu_info->mpdu_fcs_ok_bitmap[4] = __le32_to_cpu(eu_stats->info3); ··· 713 729 break; 714 730 715 731 case HAL_PHYRX_RSSI_LEGACY: { 716 - struct hal_rx_phyrx_rssi_legacy_info *rssi = 717 - (struct hal_rx_phyrx_rssi_legacy_info *)tlv_data; 732 + const struct hal_rx_phyrx_rssi_legacy_info *rssi = tlv_data; 718 733 719 734 info[0] = __le32_to_cpu(rssi->info0); 720 735 info[1] = __le32_to_cpu(rssi->info1); ··· 731 748 break; 732 749 } 733 750 case HAL_RXPCU_PPDU_END_INFO: { 734 - struct hal_rx_ppdu_end_duration *ppdu_rx_duration = 735 - (struct hal_rx_ppdu_end_duration *)tlv_data; 751 + const struct hal_rx_ppdu_end_duration *ppdu_rx_duration = tlv_data; 736 752 737 753 info[0] = __le32_to_cpu(ppdu_rx_duration->info0); 738 754 ppdu_info->rx_duration = ··· 742 760 break; 743 761 } 744 762 case HAL_RX_MPDU_START: { 745 - struct hal_rx_mpdu_start *mpdu_start = 746 - (struct hal_rx_mpdu_start *)tlv_data; 763 + const struct hal_rx_mpdu_start *mpdu_start = tlv_data; 747 764 struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu; 748 765 u16 peer_id; 749 766 ··· 771 790 break; 772 791 case HAL_MON_BUF_ADDR: { 773 792 struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring; 774 - struct dp_mon_packet_info *packet_info = 775 - (struct dp_mon_packet_info *)tlv_data; 793 + const struct dp_mon_packet_info *packet_info = tlv_data; 776 794 int buf_id = u32_get_bits(packet_info->cookie, 777 795 DP_RXDMA_BUF_COOKIE_BUF_ID); 778 796 struct sk_buff *msdu; ··· 803 823 break; 804 824 } 805 825 case HAL_RX_MSDU_END: { 806 - struct rx_msdu_end_qcn9274 *msdu_end = 807 - (struct rx_msdu_end_qcn9274 *)tlv_data; 826 + const struct rx_msdu_end_qcn9274 *msdu_end = tlv_data; 808 827 bool is_first_msdu_in_mpdu; 809 828 u16 msdu_end_info; 810 829 ··· 1072 1093 decap = ath12k_dp_rx_h_decap_type(ar->ab, rxcb->rx_desc); 1073 1094 spin_lock_bh(&ar->ab->base_lock); 1074 1095 peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu); 1075 - if (peer && peer->sta) 1096 + if (peer && peer->sta) { 1076 1097 pubsta = peer->sta; 1098 + if (pubsta->valid_links) { 1099 + status->link_valid = 1; 1100 + status->link_id = peer->link_id; 1101 + } 1102 + } 1103 + 1077 1104 spin_unlock_bh(&ar->ab->base_lock); 1078 1105 1079 1106 ath12k_dbg(ar->ab, ATH12K_DBG_DATA, ··· 1184 1199 struct sk_buff *skb) 1185 1200 { 1186 1201 struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; 1187 - struct hal_tlv_hdr *tlv; 1202 + struct hal_tlv_64_hdr *tlv; 1188 1203 enum hal_rx_mon_status hal_status; 1189 - u32 tlv_userid = 0; 1204 + u32 tlv_userid; 1190 1205 u16 tlv_tag, tlv_len; 1191 1206 u8 *ptr = skb->data; 1192 1207 1193 1208 memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info)); 1194 1209 1195 1210 do { 1196 - tlv = (struct hal_tlv_hdr *)ptr; 1197 - tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG); 1198 - tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN); 1199 - tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID); 1211 + tlv = (struct hal_tlv_64_hdr *)ptr; 1212 + tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG); 1213 + tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN); 1214 + tlv_userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID); 1200 1215 ptr += sizeof(*tlv); 1201 1216 1202 1217 /* The actual length of PPDU_END is the combined length of many PHY ··· 1211 1226 hal_status = ath12k_dp_mon_rx_parse_status_tlv(ab, pmon, 1212 1227 tlv_tag, ptr, tlv_userid); 1213 1228 ptr += tlv_len; 1214 - ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN); 1229 + ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN); 1215 1230 1216 1231 if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE) 1217 1232 break; ··· 1588 1603 static enum dp_mon_tx_tlv_status 1589 1604 ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, 1590 1605 struct ath12k_mon_data *pmon, 1591 - u16 tlv_tag, u8 *tlv_data, u32 userid) 1606 + u16 tlv_tag, const void *tlv_data, u32 userid) 1592 1607 { 1593 1608 struct dp_mon_tx_ppdu_info *tx_ppdu_info; 1594 1609 enum dp_mon_tx_tlv_status status = DP_MON_TX_STATUS_PPDU_NOT_DONE; ··· 1598 1613 1599 1614 switch (tlv_tag) { 1600 1615 case HAL_TX_FES_SETUP: { 1601 - struct hal_tx_fes_setup *tx_fes_setup = 1602 - (struct hal_tx_fes_setup *)tlv_data; 1616 + const struct hal_tx_fes_setup *tx_fes_setup = tlv_data; 1603 1617 1604 1618 info[0] = __le32_to_cpu(tx_fes_setup->info0); 1605 1619 tx_ppdu_info->ppdu_id = __le32_to_cpu(tx_fes_setup->schedule_id); ··· 1609 1625 } 1610 1626 1611 1627 case HAL_TX_FES_STATUS_END: { 1612 - struct hal_tx_fes_status_end *tx_fes_status_end = 1613 - (struct hal_tx_fes_status_end *)tlv_data; 1628 + const struct hal_tx_fes_status_end *tx_fes_status_end = tlv_data; 1614 1629 u32 tst_15_0, tst_31_16; 1615 1630 1616 1631 info[0] = __le32_to_cpu(tx_fes_status_end->info0); ··· 1626 1643 } 1627 1644 1628 1645 case HAL_RX_RESPONSE_REQUIRED_INFO: { 1629 - struct hal_rx_resp_req_info *rx_resp_req_info = 1630 - (struct hal_rx_resp_req_info *)tlv_data; 1646 + const struct hal_rx_resp_req_info *rx_resp_req_info = tlv_data; 1631 1647 u32 addr_32; 1632 1648 u16 addr_16; 1633 1649 ··· 1671 1689 } 1672 1690 1673 1691 case HAL_PCU_PPDU_SETUP_INIT: { 1674 - struct hal_tx_pcu_ppdu_setup_init *ppdu_setup = 1675 - (struct hal_tx_pcu_ppdu_setup_init *)tlv_data; 1692 + const struct hal_tx_pcu_ppdu_setup_init *ppdu_setup = tlv_data; 1676 1693 u32 addr_32; 1677 1694 u16 addr_16; 1678 1695 ··· 1717 1736 } 1718 1737 1719 1738 case HAL_TX_QUEUE_EXTENSION: { 1720 - struct hal_tx_queue_exten *tx_q_exten = 1721 - (struct hal_tx_queue_exten *)tlv_data; 1739 + const struct hal_tx_queue_exten *tx_q_exten = tlv_data; 1722 1740 1723 1741 info[0] = __le32_to_cpu(tx_q_exten->info0); 1724 1742 ··· 1729 1749 } 1730 1750 1731 1751 case HAL_TX_FES_STATUS_START: { 1732 - struct hal_tx_fes_status_start *tx_fes_start = 1733 - (struct hal_tx_fes_status_start *)tlv_data; 1752 + const struct hal_tx_fes_status_start *tx_fes_start = tlv_data; 1734 1753 1735 1754 info[0] = __le32_to_cpu(tx_fes_start->info0); 1736 1755 ··· 1740 1761 } 1741 1762 1742 1763 case HAL_TX_FES_STATUS_PROT: { 1743 - struct hal_tx_fes_status_prot *tx_fes_status = 1744 - (struct hal_tx_fes_status_prot *)tlv_data; 1764 + const struct hal_tx_fes_status_prot *tx_fes_status = tlv_data; 1745 1765 u32 start_timestamp; 1746 1766 u32 end_timestamp; 1747 1767 ··· 1767 1789 1768 1790 case HAL_TX_FES_STATUS_START_PPDU: 1769 1791 case HAL_TX_FES_STATUS_START_PROT: { 1770 - struct hal_tx_fes_status_start_prot *tx_fes_stat_start = 1771 - (struct hal_tx_fes_status_start_prot *)tlv_data; 1792 + const struct hal_tx_fes_status_start_prot *tx_fes_stat_start = tlv_data; 1772 1793 u64 ppdu_ts; 1773 1794 1774 1795 info[0] = __le32_to_cpu(tx_fes_stat_start->info0); ··· 1782 1805 } 1783 1806 1784 1807 case HAL_TX_FES_STATUS_USER_PPDU: { 1785 - struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu = 1786 - (struct hal_tx_fes_status_user_ppdu *)tlv_data; 1808 + const struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu = tlv_data; 1787 1809 1788 1810 info[0] = __le32_to_cpu(tx_fes_usr_ppdu->info0); 1789 1811 ··· 1825 1849 break; 1826 1850 1827 1851 case HAL_RX_FRAME_BITMAP_ACK: { 1828 - struct hal_rx_frame_bitmap_ack *fbm_ack = 1829 - (struct hal_rx_frame_bitmap_ack *)tlv_data; 1852 + const struct hal_rx_frame_bitmap_ack *fbm_ack = tlv_data; 1830 1853 u32 addr_32; 1831 1854 u16 addr_16; 1832 1855 ··· 1843 1868 } 1844 1869 1845 1870 case HAL_MACTX_PHY_DESC: { 1846 - struct hal_tx_phy_desc *tx_phy_desc = 1847 - (struct hal_tx_phy_desc *)tlv_data; 1871 + const struct hal_tx_phy_desc *tx_phy_desc = tlv_data; 1848 1872 1849 1873 info[0] = __le32_to_cpu(tx_phy_desc->info0); 1850 1874 info[1] = __le32_to_cpu(tx_phy_desc->info1);
+168 -65
drivers/net/wireless/ath/ath12k/dp_rx.c
··· 1697 1697 rcu_read_lock(); 1698 1698 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 1699 1699 if (!ar) { 1700 - ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id); 1700 + /* It is possible that the ar is not yet active (started). 1701 + * The above function will only look for the active pdev 1702 + * and hence %NULL return is possible. Just silently 1703 + * discard this message 1704 + */ 1701 1705 goto exit; 1702 1706 } 1703 1707 ··· 2478 2474 2479 2475 pubsta = peer ? peer->sta : NULL; 2480 2476 2477 + if (pubsta && pubsta->valid_links) { 2478 + status->link_valid = 1; 2479 + status->link_id = peer->link_id; 2480 + } 2481 + 2481 2482 spin_unlock_bh(&ab->base_lock); 2482 2483 2483 2484 ath12k_dbg(ab, ATH12K_DBG_DATA, ··· 2604 2595 struct sk_buff_head *msdu_list, 2605 2596 int ring_id) 2606 2597 { 2598 + struct ath12k_hw_group *ag = ab->ag; 2607 2599 struct ieee80211_rx_status rx_status = {0}; 2608 2600 struct ath12k_skb_rxcb *rxcb; 2609 2601 struct sk_buff *msdu; 2610 2602 struct ath12k *ar; 2611 - u8 mac_id, pdev_id; 2603 + struct ath12k_hw_link *hw_links = ag->hw_links; 2604 + struct ath12k_base *partner_ab; 2605 + u8 hw_link_id, pdev_id; 2612 2606 int ret; 2613 2607 2614 2608 if (skb_queue_empty(msdu_list)) ··· 2621 2609 2622 2610 while ((msdu = __skb_dequeue(msdu_list))) { 2623 2611 rxcb = ATH12K_SKB_RXCB(msdu); 2624 - mac_id = rxcb->mac_id; 2625 - pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id); 2626 - ar = ab->pdevs[pdev_id].ar; 2627 - if (!rcu_dereference(ab->pdevs_active[pdev_id])) { 2612 + hw_link_id = rxcb->hw_link_id; 2613 + partner_ab = ath12k_ag_to_ab(ag, 2614 + hw_links[hw_link_id].device_id); 2615 + pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 2616 + hw_links[hw_link_id].pdev_idx); 2617 + ar = partner_ab->pdevs[pdev_id].ar; 2618 + if (!rcu_dereference(partner_ab->pdevs_active[pdev_id])) { 2628 2619 dev_kfree_skb_any(msdu); 2629 2620 continue; 2630 2621 } 2631 2622 2632 - if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { 2623 + if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 2633 2624 dev_kfree_skb_any(msdu); 2634 2625 continue; 2635 2626 } ··· 2677 2662 int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, 2678 2663 struct napi_struct *napi, int budget) 2679 2664 { 2680 - LIST_HEAD(rx_desc_used_list); 2665 + struct ath12k_hw_group *ag = ab->ag; 2666 + struct list_head rx_desc_used_list[ATH12K_MAX_SOCS]; 2667 + struct ath12k_hw_link *hw_links = ag->hw_links; 2668 + int num_buffs_reaped[ATH12K_MAX_SOCS] = {}; 2681 2669 struct ath12k_rx_desc_info *desc_info; 2682 2670 struct ath12k_dp *dp = &ab->dp; 2683 2671 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; 2684 2672 struct hal_reo_dest_ring *desc; 2685 - int num_buffs_reaped = 0; 2673 + struct ath12k_base *partner_ab; 2686 2674 struct sk_buff_head msdu_list; 2687 2675 struct ath12k_skb_rxcb *rxcb; 2688 2676 int total_msdu_reaped = 0; 2677 + u8 hw_link_id, device_id; 2689 2678 struct hal_srng *srng; 2690 2679 struct sk_buff *msdu; 2691 2680 bool done = false; 2692 - int mac_id; 2693 2681 u64 desc_va; 2694 2682 2695 2683 __skb_queue_head_init(&msdu_list); 2684 + 2685 + for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) 2686 + INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 2696 2687 2697 2688 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id]; 2698 2689 ··· 2716 2695 cookie = le32_get_bits(desc->buf_addr_info.info1, 2717 2696 BUFFER_ADDR_INFO1_SW_COOKIE); 2718 2697 2719 - mac_id = le32_get_bits(desc->info0, 2720 - HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 2698 + hw_link_id = le32_get_bits(desc->info0, 2699 + HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 2721 2700 2722 2701 desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 | 2723 2702 le32_to_cpu(desc->buf_va_lo)); 2724 2703 desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va); 2725 2704 2705 + device_id = hw_links[hw_link_id].device_id; 2706 + partner_ab = ath12k_ag_to_ab(ag, device_id); 2707 + if (unlikely(!partner_ab)) { 2708 + if (desc_info->skb) { 2709 + dev_kfree_skb_any(desc_info->skb); 2710 + desc_info->skb = NULL; 2711 + } 2712 + 2713 + continue; 2714 + } 2715 + 2726 2716 /* retry manual desc retrieval */ 2727 2717 if (!desc_info) { 2728 - desc_info = ath12k_dp_get_rx_desc(ab, cookie); 2718 + desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie); 2729 2719 if (!desc_info) { 2730 - ath12k_warn(ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n", 2720 + ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n", 2731 2721 cookie); 2732 2722 continue; 2733 2723 } ··· 2750 2718 msdu = desc_info->skb; 2751 2719 desc_info->skb = NULL; 2752 2720 2753 - list_add_tail(&desc_info->list, &rx_desc_used_list); 2721 + list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]); 2754 2722 2755 2723 rxcb = ATH12K_SKB_RXCB(msdu); 2756 - dma_unmap_single(ab->dev, rxcb->paddr, 2724 + dma_unmap_single(partner_ab->dev, rxcb->paddr, 2757 2725 msdu->len + skb_tailroom(msdu), 2758 2726 DMA_FROM_DEVICE); 2759 2727 2760 - num_buffs_reaped++; 2728 + num_buffs_reaped[device_id]++; 2761 2729 2762 2730 push_reason = le32_get_bits(desc->info0, 2763 2731 HAL_REO_DEST_RING_INFO0_PUSH_REASON); ··· 2777 2745 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); 2778 2746 rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) & 2779 2747 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); 2780 - rxcb->mac_id = mac_id; 2748 + rxcb->hw_link_id = hw_link_id; 2781 2749 rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver, 2782 2750 mpdu_info->peer_meta_data); 2783 2751 rxcb->tid = le32_get_bits(mpdu_info->info0, ··· 2814 2782 if (!total_msdu_reaped) 2815 2783 goto exit; 2816 2784 2817 - ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list, 2818 - num_buffs_reaped); 2785 + for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) { 2786 + if (!num_buffs_reaped[device_id]) 2787 + continue; 2788 + 2789 + partner_ab = ath12k_ag_to_ab(ag, device_id); 2790 + rx_ring = &partner_ab->dp.rx_refill_buf_ring; 2791 + 2792 + ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring, 2793 + &rx_desc_used_list[device_id], 2794 + num_buffs_reaped[device_id]); 2795 + } 2819 2796 2820 2797 ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list, 2821 2798 ring_id); ··· 2871 2830 2872 2831 if (!peer->primary_link) { 2873 2832 spin_unlock_bh(&ab->base_lock); 2833 + crypto_free_shash(tfm); 2874 2834 return 0; 2875 2835 } 2876 2836 ··· 3484 3442 goto exit; 3485 3443 } 3486 3444 3487 - if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { 3445 + if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 3488 3446 dev_kfree_skb_any(msdu); 3489 3447 goto exit; 3490 3448 } ··· 3514 3472 int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, 3515 3473 int budget) 3516 3474 { 3475 + struct ath12k_hw_group *ag = ab->ag; 3476 + struct list_head rx_desc_used_list[ATH12K_MAX_SOCS]; 3517 3477 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC]; 3478 + int num_buffs_reaped[ATH12K_MAX_SOCS] = {}; 3518 3479 struct dp_link_desc_bank *link_desc_banks; 3519 3480 enum hal_rx_buf_return_buf_manager rbm; 3520 3481 struct hal_rx_msdu_link *link_desc_va; ··· 3525 3480 struct hal_reo_dest_ring *reo_desc; 3526 3481 struct dp_rxdma_ring *rx_ring; 3527 3482 struct dp_srng *reo_except; 3528 - LIST_HEAD(rx_desc_used_list); 3483 + struct ath12k_hw_link *hw_links = ag->hw_links; 3484 + struct ath12k_base *partner_ab; 3485 + u8 hw_link_id, device_id; 3529 3486 u32 desc_bank, num_msdus; 3530 3487 struct hal_srng *srng; 3531 - struct ath12k_dp *dp; 3532 - int mac_id; 3533 3488 struct ath12k *ar; 3534 3489 dma_addr_t paddr; 3535 3490 bool is_frag; ··· 3539 3494 tot_n_bufs_reaped = 0; 3540 3495 quota = budget; 3541 3496 3542 - dp = &ab->dp; 3543 - reo_except = &dp->reo_except_ring; 3544 - link_desc_banks = dp->link_desc_banks; 3497 + for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) 3498 + INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 3499 + 3500 + reo_except = &ab->dp.reo_except_ring; 3545 3501 3546 3502 srng = &ab->hal.srng_list[reo_except->ring_id]; 3547 3503 ··· 3562 3516 ret); 3563 3517 continue; 3564 3518 } 3519 + 3520 + hw_link_id = le32_get_bits(reo_desc->info0, 3521 + HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 3522 + device_id = hw_links[hw_link_id].device_id; 3523 + partner_ab = ath12k_ag_to_ab(ag, device_id); 3524 + 3525 + pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 3526 + hw_links[hw_link_id].pdev_idx); 3527 + ar = partner_ab->pdevs[pdev_id].ar; 3528 + 3529 + link_desc_banks = partner_ab->dp.link_desc_banks; 3565 3530 link_desc_va = link_desc_banks[desc_bank].vaddr + 3566 3531 (paddr - link_desc_banks[desc_bank].paddr); 3567 3532 ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, 3568 3533 &rbm); 3569 - if (rbm != dp->idle_link_rbm && 3534 + if (rbm != partner_ab->dp.idle_link_rbm && 3570 3535 rbm != HAL_RX_BUF_RBM_SW3_BM && 3571 - rbm != ab->hw_params->hal_params->rx_buf_rbm) { 3536 + rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) { 3572 3537 ab->soc_stats.invalid_rbm++; 3573 3538 ath12k_warn(ab, "invalid return buffer manager %d\n", rbm); 3574 - ath12k_dp_rx_link_desc_return(ab, reo_desc, 3539 + ath12k_dp_rx_link_desc_return(partner_ab, reo_desc, 3575 3540 HAL_WBM_REL_BM_ACT_REL_MSDU); 3576 3541 continue; 3577 3542 } ··· 3592 3535 3593 3536 /* Process only rx fragments with one msdu per link desc below, and drop 3594 3537 * msdu's indicated due to error reasons. 3538 + * Dynamic fragmentation not supported in Multi-link client, so drop the 3539 + * partner device buffers. 3595 3540 */ 3596 - if (!is_frag || num_msdus > 1) { 3541 + if (!is_frag || num_msdus > 1 || 3542 + partner_ab->device_id != ab->device_id) { 3597 3543 drop = true; 3544 + 3598 3545 /* Return the link desc back to wbm idle list */ 3599 - ath12k_dp_rx_link_desc_return(ab, reo_desc, 3546 + ath12k_dp_rx_link_desc_return(partner_ab, reo_desc, 3600 3547 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE); 3601 3548 } 3602 3549 3603 3550 for (i = 0; i < num_msdus; i++) { 3604 - mac_id = le32_get_bits(reo_desc->info0, 3605 - HAL_REO_DEST_RING_INFO0_SRC_LINK_ID); 3606 - 3607 - pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id); 3608 - ar = ab->pdevs[pdev_id].ar; 3609 - 3610 3551 if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, 3611 - &rx_desc_used_list, 3552 + &rx_desc_used_list[device_id], 3612 3553 drop, 3613 - msdu_cookies[i])) 3554 + msdu_cookies[i])) { 3555 + num_buffs_reaped[device_id]++; 3614 3556 tot_n_bufs_reaped++; 3557 + } 3615 3558 } 3616 3559 3617 3560 if (tot_n_bufs_reaped >= quota) { ··· 3627 3570 3628 3571 spin_unlock_bh(&srng->lock); 3629 3572 3630 - rx_ring = &dp->rx_refill_buf_ring; 3573 + for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) { 3574 + if (!num_buffs_reaped[device_id]) 3575 + continue; 3631 3576 3632 - ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list, 3633 - tot_n_bufs_reaped); 3577 + partner_ab = ath12k_ag_to_ab(ag, device_id); 3578 + rx_ring = &partner_ab->dp.rx_refill_buf_ring; 3579 + 3580 + ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring, 3581 + &rx_desc_used_list[device_id], 3582 + num_buffs_reaped[device_id]); 3583 + } 3634 3584 3635 3585 return tot_n_bufs_reaped; 3636 3586 } ··· 3854 3790 int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, 3855 3791 struct napi_struct *napi, int budget) 3856 3792 { 3857 - LIST_HEAD(rx_desc_used_list); 3793 + struct list_head rx_desc_used_list[ATH12K_MAX_SOCS]; 3794 + struct ath12k_hw_group *ag = ab->ag; 3858 3795 struct ath12k *ar; 3859 3796 struct ath12k_dp *dp = &ab->dp; 3860 3797 struct dp_rxdma_ring *rx_ring; ··· 3865 3800 struct sk_buff_head msdu_list, scatter_msdu_list; 3866 3801 struct ath12k_skb_rxcb *rxcb; 3867 3802 void *rx_desc; 3868 - u8 mac_id; 3869 - int num_buffs_reaped = 0; 3803 + int num_buffs_reaped[ATH12K_MAX_SOCS] = {}; 3804 + int total_num_buffs_reaped = 0; 3870 3805 struct ath12k_rx_desc_info *desc_info; 3806 + struct ath12k_hw_link *hw_links = ag->hw_links; 3807 + struct ath12k_base *partner_ab; 3808 + u8 hw_link_id, device_id; 3871 3809 int ret, pdev_id; 3872 3810 struct hal_rx_desc *msdu_data; 3873 3811 3874 3812 __skb_queue_head_init(&msdu_list); 3875 3813 __skb_queue_head_init(&scatter_msdu_list); 3876 3814 3815 + for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) 3816 + INIT_LIST_HEAD(&rx_desc_used_list[device_id]); 3817 + 3877 3818 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; 3878 - rx_ring = &dp->rx_refill_buf_ring; 3879 3819 spin_lock_bh(&srng->lock); 3880 3820 3881 3821 ath12k_hal_srng_access_begin(ab, srng); ··· 3916 3846 msdu = desc_info->skb; 3917 3847 desc_info->skb = NULL; 3918 3848 3919 - list_add_tail(&desc_info->list, &rx_desc_used_list); 3849 + device_id = desc_info->device_id; 3850 + partner_ab = ath12k_ag_to_ab(ag, device_id); 3851 + if (unlikely(!partner_ab)) { 3852 + dev_kfree_skb_any(msdu); 3853 + 3854 + /* In any case continuation bit is set 3855 + * in the previous record, cleanup scatter_msdu_list 3856 + */ 3857 + ath12k_dp_clean_up_skb_list(&scatter_msdu_list); 3858 + continue; 3859 + } 3860 + 3861 + list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]); 3920 3862 3921 3863 rxcb = ATH12K_SKB_RXCB(msdu); 3922 - dma_unmap_single(ab->dev, rxcb->paddr, 3864 + dma_unmap_single(partner_ab->dev, rxcb->paddr, 3923 3865 msdu->len + skb_tailroom(msdu), 3924 3866 DMA_FROM_DEVICE); 3925 3867 3926 - num_buffs_reaped++; 3868 + num_buffs_reaped[device_id]++; 3869 + total_num_buffs_reaped++; 3927 3870 3928 3871 if (!err_info.continuation) 3929 3872 budget--; ··· 3960 3877 continue; 3961 3878 } 3962 3879 3963 - mac_id = ath12k_dp_rx_get_msdu_src_link(ab, 3964 - msdu_data); 3965 - if (mac_id >= MAX_RADIOS) { 3880 + hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_ab, 3881 + msdu_data); 3882 + if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) { 3966 3883 dev_kfree_skb_any(msdu); 3967 3884 3968 3885 /* In any case continuation bit is set ··· 3977 3894 3978 3895 skb_queue_walk(&scatter_msdu_list, msdu) { 3979 3896 rxcb = ATH12K_SKB_RXCB(msdu); 3980 - rxcb->mac_id = mac_id; 3897 + rxcb->hw_link_id = hw_link_id; 3981 3898 } 3982 3899 3983 3900 skb_queue_splice_tail_init(&scatter_msdu_list, ··· 3985 3902 } 3986 3903 3987 3904 rxcb = ATH12K_SKB_RXCB(msdu); 3988 - rxcb->mac_id = mac_id; 3905 + rxcb->hw_link_id = hw_link_id; 3989 3906 __skb_queue_tail(&msdu_list, msdu); 3990 3907 } 3991 3908 ··· 3998 3915 3999 3916 spin_unlock_bh(&srng->lock); 4000 3917 4001 - if (!num_buffs_reaped) 3918 + if (!total_num_buffs_reaped) 4002 3919 goto done; 4003 3920 4004 - ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list, 4005 - num_buffs_reaped); 3921 + for (device_id = 0; device_id < ATH12K_MAX_SOCS; device_id++) { 3922 + if (!num_buffs_reaped[device_id]) 3923 + continue; 3924 + 3925 + partner_ab = ath12k_ag_to_ab(ag, device_id); 3926 + rx_ring = &partner_ab->dp.rx_refill_buf_ring; 3927 + 3928 + ath12k_dp_rx_bufs_replenish(ab, rx_ring, 3929 + &rx_desc_used_list[device_id], 3930 + num_buffs_reaped[device_id]); 3931 + } 4006 3932 4007 3933 rcu_read_lock(); 4008 3934 while ((msdu = __skb_dequeue(&msdu_list))) { 4009 3935 rxcb = ATH12K_SKB_RXCB(msdu); 4010 - mac_id = rxcb->mac_id; 3936 + hw_link_id = rxcb->hw_link_id; 4011 3937 4012 - pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id); 4013 - ar = ab->pdevs[pdev_id].ar; 4014 - 4015 - if (!ar || !rcu_dereference(ar->ab->pdevs_active[mac_id])) { 3938 + device_id = hw_links[hw_link_id].device_id; 3939 + partner_ab = ath12k_ag_to_ab(ag, device_id); 3940 + if (unlikely(!partner_ab)) { 3941 + ath12k_dbg(ab, ATH12K_DBG_DATA, 3942 + "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n", 3943 + hw_link_id, device_id); 4016 3944 dev_kfree_skb_any(msdu); 4017 3945 continue; 4018 3946 } 4019 3947 4020 - if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { 3948 + pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params, 3949 + hw_links[hw_link_id].pdev_idx); 3950 + ar = partner_ab->pdevs[pdev_id].ar; 3951 + 3952 + if (!ar || !rcu_dereference(ar->ab->pdevs_active[hw_link_id])) { 3953 + dev_kfree_skb_any(msdu); 3954 + continue; 3955 + } 3956 + 3957 + if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 4021 3958 dev_kfree_skb_any(msdu); 4022 3959 continue; 4023 3960 } ··· 4045 3942 } 4046 3943 rcu_read_unlock(); 4047 3944 done: 4048 - return num_buffs_reaped; 3945 + return total_num_buffs_reaped; 4049 3946 } 4050 3947 4051 3948 void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
+3
drivers/net/wireless/ath/ath12k/fw.h
··· 23 23 */ 24 24 ATH12K_FW_FEATURE_MULTI_QRTR_ID = 0, 25 25 26 + /* The firmware supports MLO capability */ 27 + ATH12K_FW_FEATURE_MLO, 28 + 26 29 /* keep last */ 27 30 ATH12K_FW_FEATURE_COUNT, 28 31 };
+1 -1
drivers/net/wireless/ath/ath12k/hal.c
··· 181 181 .max_size = HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE, 182 182 }, 183 183 [HAL_TX_MONITOR_BUF] = { 184 - .start_ring_id = HAL_SRNG_SW2TXMON_BUF0, 184 + .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0, 185 185 .max_rings = 1, 186 186 .entry_size = sizeof(struct hal_mon_buf_ring) >> 2, 187 187 .mac_type = ATH12K_HAL_SRNG_PMAC,
+1 -1
drivers/net/wireless/ath/ath12k/hal.h
··· 485 485 HAL_SRNG_RING_ID_WMAC1_RXMON2SW0 = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1, 486 486 HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC, 487 487 HAL_SRNG_RING_ID_RXDMA_DIR_BUF, 488 - HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0, 489 488 HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0, 489 + HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0, 490 490 491 491 HAL_SRNG_RING_ID_PMAC1_ID_END, 492 492 };
+3 -1
drivers/net/wireless/ath/ath12k/hal_desc.h
··· 522 522 HAL_PHYRXHT_SIG_USR_SU = 468 /* 0x1d4 */, 523 523 HAL_PHYRXHT_SIG_USR_MU_MIMO = 469 /* 0x1d5 */, 524 524 HAL_PHYRX_GENERIC_U_SIG = 470 /* 0x1d6 */, 525 - HAL_PHYRX_GENERICHT_SIG = 471 /* 0x1d7 */, 525 + HAL_PHYRX_GENERIC_EHT_SIG = 471 /* 0x1d7 */, 526 526 HAL_OVERWRITE_RESP_START = 472 /* 0x1d8 */, 527 527 HAL_OVERWRITE_RESP_PREAMBLE_INFO = 473 /* 0x1d9 */, 528 528 HAL_OVERWRITE_RESP_FRAME_INFO = 474 /* 0x1da */, ··· 579 579 580 580 #define HAL_TLV_64_HDR_TAG GENMASK(9, 1) 581 581 #define HAL_TLV_64_HDR_LEN GENMASK(21, 10) 582 + #define HAL_TLV_64_USR_ID GENMASK(31, 26) 583 + #define HAL_TLV_64_ALIGN 8 582 584 583 585 struct hal_tlv_64_hdr { 584 586 __le64 tl;
+8 -6
drivers/net/wireless/ath/ath12k/hal_rx.h
··· 19 19 bool hw_cc_done; 20 20 }; 21 21 22 - #define HAL_INVALID_PEERID 0xffff 22 + #define HAL_INVALID_PEERID 0x3fff 23 23 #define VHT_SIG_SU_NSS_MASK 0x7 24 24 25 25 #define HAL_RX_MAX_MCS 12 ··· 245 245 __le32 rsvd[2]; 246 246 } __packed; 247 247 248 + #define HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID GENMASK(13, 0) 249 + #define HAL_RX_PPDU_END_USER_STATS_INFO0_DEVICE_ID GENMASK(15, 14) 248 250 #define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(26, 16) 249 251 250 252 #define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(10, 0) ··· 301 299 __le32 info4; 302 300 __le32 info5; 303 301 __le32 info6; 302 + __le32 rsvd; 304 303 } __packed; 305 304 306 305 #define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0) ··· 398 395 #define HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION BIT(25) 399 396 400 397 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION GENMASK(6, 0) 401 - #define HAL_RX_HE_SIG_A_MU_DL_INFO1_CODING BIT(7) 402 398 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB GENMASK(10, 8) 403 399 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA BIT(11) 404 400 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC BIT(12) 405 - #define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXBF BIT(10) 406 401 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR GENMASK(14, 13) 407 402 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM BIT(15) 408 403 ··· 426 425 427 426 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID GENMASK(10, 0) 428 427 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11) 429 - #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19) 428 + #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(14) 430 429 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15) 431 430 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM BIT(19) 432 431 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING BIT(20) ··· 454 453 } __packed; 455 454 456 455 #define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16) 457 - #define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(31, 16) 456 + #define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(29, 16) 457 + #define HAL_RX_MPDU_START_INFO1_DEVICE_ID GENMASK(31, 30) 458 458 #define HAL_RX_MPDU_START_INFO2_MPDU_LEN GENMASK(13, 0) 459 459 struct hal_rx_mpdu_start { 460 460 __le32 rsvd0[9]; ··· 470 468 struct hal_rx_ppdu_end_duration { 471 469 __le32 rsvd0[9]; 472 470 __le32 info0; 473 - __le32 rsvd1[4]; 471 + __le32 rsvd1[18]; 474 472 } __packed; 475 473 476 474 struct hal_rx_rxpcu_classification_overview {
+617 -175
drivers/net/wireless/ath/ath12k/mac.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 2 /* 3 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 4 + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. 5 5 */ 6 6 7 7 #include <net/mac80211.h> 8 + #include <net/cfg80211.h> 8 9 #include <linux/etherdevice.h> 9 10 10 11 #include "mac.h" ··· 709 708 return ar; 710 709 711 710 for_each_ar(ah, ar, i) { 712 - if (channel->center_freq >= ar->freq_low && 713 - channel->center_freq <= ar->freq_high) 711 + if (channel->center_freq >= KHZ_TO_MHZ(ar->freq_range.start_freq) && 712 + channel->center_freq <= KHZ_TO_MHZ(ar->freq_range.end_freq)) 714 713 return ar; 715 714 } 716 715 return NULL; ··· 749 748 return arvif->ar; 750 749 751 750 return NULL; 751 + } 752 + 753 + void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw, 754 + struct ieee80211_chanctx_conf *conf, 755 + void *data) 756 + { 757 + struct ath12k_mac_get_any_chanctx_conf_arg *arg = data; 758 + struct ath12k *ctx_ar = ath12k_get_ar_by_ctx(hw, conf); 759 + 760 + if (ctx_ar == arg->ar) 761 + arg->chanctx_conf = conf; 752 762 } 753 763 754 764 static struct ath12k_link_vif *ath12k_mac_get_vif_up(struct ath12k *ar) ··· 1335 1323 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n", 1336 1324 ahvif->vif->addr, arvif->vdev_id); 1337 1325 1338 - if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { 1339 - clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); 1326 + if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) { 1327 + clear_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags); 1340 1328 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n", 1341 1329 arvif->vdev_id); 1342 1330 } ··· 3145 3133 struct ath12k_vif *ahvif = arvif->ahvif; 3146 3134 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); 3147 3135 struct ath12k_wmi_vdev_up_params params = {}; 3148 - struct ath12k_wmi_peer_assoc_arg peer_arg = {}; 3149 3136 struct ieee80211_link_sta *link_sta; 3150 3137 u8 link_id = bss_conf->link_id; 3151 3138 struct ath12k_link_sta *arsta; ··· 3155 3144 int ret; 3156 3145 3157 3146 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 3147 + 3148 + struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) = 3149 + kzalloc(sizeof(*peer_arg), GFP_KERNEL); 3150 + if (!peer_arg) 3151 + return; 3158 3152 3159 3153 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 3160 3154 "mac vdev %i link id %u assoc bssid %pM aid %d\n", ··· 3193 3177 return; 3194 3178 } 3195 3179 3196 - ath12k_peer_assoc_prepare(ar, arvif, arsta, &peer_arg, false); 3180 + ath12k_peer_assoc_prepare(ar, arvif, arsta, peer_arg, false); 3197 3181 3198 3182 rcu_read_unlock(); 3199 3183 3200 - ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); 3184 + ret = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg); 3201 3185 if (ret) { 3202 3186 ath12k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n", 3203 3187 bss_conf->bssid, arvif->vdev_id, ret); ··· 4016 4000 ieee80211_remain_on_channel_expired(hw); 4017 4001 fallthrough; 4018 4002 case ATH12K_SCAN_STARTING: 4019 - if (!ar->scan.is_roc) { 4020 - struct cfg80211_scan_info info = { 4021 - .aborted = ((ar->scan.state == 4022 - ATH12K_SCAN_ABORTING) || 4023 - (ar->scan.state == 4024 - ATH12K_SCAN_STARTING)), 4025 - }; 4026 - 4027 - ieee80211_scan_completed(hw, &info); 4028 - } 4029 - 4030 - ar->scan.state = ATH12K_SCAN_IDLE; 4031 - ar->scan_channel = NULL; 4032 - ar->scan.roc_freq = 0; 4033 4003 cancel_delayed_work(&ar->scan.timeout); 4034 4004 complete(&ar->scan.completed); 4005 + wiphy_work_queue(ar->ah->hw->wiphy, &ar->scan.vdev_clean_wk); 4035 4006 break; 4036 4007 } 4037 4008 } ··· 4059 4056 } 4060 4057 4061 4058 out: 4062 - /* Scan state should be updated upon scan completion but in case 4063 - * firmware fails to deliver the event (for whatever reason) it is 4064 - * desired to clean up scan state anyway. Firmware may have just 4065 - * dropped the scan completion event delivery due to transport pipe 4066 - * being overflown with data and/or it can recover on its own before 4067 - * next scan request is submitted. 4059 + /* Scan state should be updated in scan completion worker but in 4060 + * case firmware fails to deliver the event (for whatever reason) 4061 + * it is desired to clean up scan state anyway. Firmware may have 4062 + * just dropped the scan completion event delivery due to transport 4063 + * pipe being overflown with data and/or it can recover on its own 4064 + * before next scan request is submitted. 4068 4065 */ 4069 4066 spin_lock_bh(&ar->data_lock); 4070 - if (ar->scan.state != ATH12K_SCAN_IDLE) 4067 + if (ret) 4071 4068 __ath12k_mac_scan_finish(ar); 4072 4069 spin_unlock_bh(&ar->data_lock); 4073 4070 ··· 4116 4113 wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); 4117 4114 ath12k_scan_abort(ar); 4118 4115 wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); 4116 + } 4117 + 4118 + static void ath12k_scan_vdev_clean_work(struct wiphy *wiphy, struct wiphy_work *work) 4119 + { 4120 + struct ath12k *ar = container_of(work, struct ath12k, 4121 + scan.vdev_clean_wk); 4122 + struct ath12k_hw *ah = ar->ah; 4123 + struct ath12k_link_vif *arvif; 4124 + 4125 + lockdep_assert_wiphy(wiphy); 4126 + 4127 + arvif = ar->scan.arvif; 4128 + 4129 + /* The scan vdev has already been deleted. This can occur when a 4130 + * new scan request is made on the same vif with a different 4131 + * frequency, causing the scan arvif to move from one radio to 4132 + * another. Or, scan was abrupted and via remove interface, the 4133 + * arvif is already deleted. Alternatively, if the scan vdev is not 4134 + * being used as an actual vdev, then do not delete it. 4135 + */ 4136 + if (!arvif || arvif->is_started) 4137 + goto work_complete; 4138 + 4139 + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac clean scan vdev (link id %u)", 4140 + arvif->link_id); 4141 + 4142 + ath12k_mac_remove_link_interface(ah->hw, arvif); 4143 + ath12k_mac_unassign_link_vif(arvif); 4144 + 4145 + work_complete: 4146 + spin_lock_bh(&ar->data_lock); 4147 + ar->scan.arvif = NULL; 4148 + if (!ar->scan.is_roc) { 4149 + struct cfg80211_scan_info info = { 4150 + .aborted = ((ar->scan.state == 4151 + ATH12K_SCAN_ABORTING) || 4152 + (ar->scan.state == 4153 + ATH12K_SCAN_STARTING)), 4154 + }; 4155 + 4156 + ieee80211_scan_completed(ar->ah->hw, &info); 4157 + } 4158 + 4159 + ar->scan.state = ATH12K_SCAN_IDLE; 4160 + ar->scan_channel = NULL; 4161 + ar->scan.roc_freq = 0; 4162 + spin_unlock_bh(&ar->data_lock); 4119 4163 } 4120 4164 4121 4165 static int ath12k_start_scan(struct ath12k *ar, ··· 4258 4208 link_id = ath12k_mac_find_link_id_by_ar(ahvif, ar); 4259 4209 arvif = ath12k_mac_assign_link_vif(ah, vif, link_id); 4260 4210 4211 + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac link ID %d selected for scan", 4212 + arvif->link_id); 4213 + 4261 4214 /* If the vif is already assigned to a specific vdev of an ar, 4262 4215 * check whether its already started, vdev which is started 4263 4216 * are not allowed to switch to a new radio. ··· 4284 4231 create = false; 4285 4232 } 4286 4233 } 4234 + 4287 4235 if (create) { 4288 4236 /* Previous arvif would've been cleared in radio switch block 4289 4237 * above, assign arvif again for create. ··· 4305 4251 reinit_completion(&ar->scan.completed); 4306 4252 ar->scan.state = ATH12K_SCAN_STARTING; 4307 4253 ar->scan.is_roc = false; 4308 - ar->scan.vdev_id = arvif->vdev_id; 4254 + ar->scan.arvif = arvif; 4309 4255 ret = 0; 4310 4256 break; 4311 4257 case ATH12K_SCAN_STARTING: ··· 4367 4313 spin_unlock_bh(&ar->data_lock); 4368 4314 } 4369 4315 4316 + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac scan started"); 4317 + 4370 4318 /* As per cfg80211/mac80211 scan design, it allows only one 4371 4319 * scan at a time. Hence last_scan link id is used for 4372 4320 * tracking the link id on which the scan is been done on ··· 4402 4346 lockdep_assert_wiphy(hw->wiphy); 4403 4347 4404 4348 arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); 4405 - if (!arvif || !arvif->is_created) 4349 + if (!arvif || arvif->is_started) 4406 4350 return; 4407 4351 4408 4352 ar = arvif->ar; ··· 4657 4601 struct ieee80211_sta *sta, 4658 4602 struct ieee80211_key_conf *key) 4659 4603 { 4660 - struct ath12k_key_conf *key_conf = NULL, *tmp; 4604 + struct ath12k_key_conf *key_conf, *tmp; 4605 + 4606 + list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) { 4607 + if (key_conf->key != key) 4608 + continue; 4609 + 4610 + /* If SET key entry is already present in cache, nothing to do, 4611 + * just return 4612 + */ 4613 + if (cmd == SET_KEY) 4614 + return 0; 4615 + 4616 + /* DEL key for an old SET key which driver hasn't flushed yet. 4617 + */ 4618 + list_del(&key_conf->list); 4619 + kfree(key_conf); 4620 + } 4661 4621 4662 4622 if (cmd == SET_KEY) { 4663 4623 key_conf = kzalloc(sizeof(*key_conf), GFP_KERNEL); ··· 4687 4615 list_add_tail(&key_conf->list, 4688 4616 &cache->key_conf.list); 4689 4617 } 4690 - if (list_empty(&cache->key_conf.list)) 4691 - return 0; 4692 - list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) { 4693 - if (key_conf->key == key) { 4694 - /* DEL key for an old SET key which driver hasn't flushed yet. 4695 - */ 4696 - list_del(&key_conf->list); 4697 - kfree(key_conf); 4698 - break; 4699 - } 4700 - } 4618 + 4701 4619 return 0; 4702 4620 } 4703 4621 ··· 4852 4790 { 4853 4791 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 4854 4792 struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); 4855 - struct ath12k_wmi_peer_assoc_arg peer_arg; 4856 4793 struct ieee80211_link_sta *link_sta; 4857 4794 int ret; 4858 4795 struct cfg80211_chan_def def; ··· 4871 4810 band = def.chan->band; 4872 4811 mask = &arvif->bitrate_mask; 4873 4812 4874 - ath12k_peer_assoc_prepare(ar, arvif, arsta, &peer_arg, reassoc); 4813 + struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) = 4814 + kzalloc(sizeof(*peer_arg), GFP_KERNEL); 4815 + if (!peer_arg) 4816 + return -ENOMEM; 4875 4817 4876 - if (peer_arg.peer_nss < 1) { 4818 + ath12k_peer_assoc_prepare(ar, arvif, arsta, peer_arg, reassoc); 4819 + 4820 + if (peer_arg->peer_nss < 1) { 4877 4821 ath12k_warn(ar->ab, 4878 - "invalid peer NSS %d\n", peer_arg.peer_nss); 4822 + "invalid peer NSS %d\n", peer_arg->peer_nss); 4879 4823 return -EINVAL; 4880 4824 } 4881 - ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); 4825 + ret = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg); 4882 4826 if (ret) { 4883 4827 ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", 4884 4828 arsta->addr, arvif->vdev_id, ret); ··· 4978 4912 u32 changed, bw, nss, smps, bw_prev; 4979 4913 int err, num_vht_rates; 4980 4914 const struct cfg80211_bitrate_mask *mask; 4981 - struct ath12k_wmi_peer_assoc_arg peer_arg; 4982 4915 enum wmi_phy_mode peer_phymode; 4983 4916 struct ath12k_link_sta *arsta; 4984 4917 struct ieee80211_vif *vif; ··· 5013 4948 nss = min(nss, max(ath12k_mac_max_ht_nss(ht_mcs_mask), 5014 4949 ath12k_mac_max_vht_nss(vht_mcs_mask))); 5015 4950 4951 + struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) = 4952 + kzalloc(sizeof(*peer_arg), GFP_KERNEL); 4953 + if (!peer_arg) 4954 + return; 4955 + 5016 4956 if (changed & IEEE80211_RC_BW_CHANGED) { 5017 - ath12k_peer_assoc_h_phymode(ar, arvif, arsta, &peer_arg); 5018 - peer_phymode = peer_arg.peer_phymode; 4957 + ath12k_peer_assoc_h_phymode(ar, arvif, arsta, peer_arg); 4958 + peer_phymode = peer_arg->peer_phymode; 5019 4959 5020 4960 if (bw > bw_prev) { 5021 4961 /* Phymode shows maximum supported channel width, if we ··· 5122 5052 * other rates using peer_assoc command. 5123 5053 */ 5124 5054 ath12k_peer_assoc_prepare(ar, arvif, arsta, 5125 - &peer_arg, true); 5055 + peer_arg, true); 5126 5056 5127 - err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); 5057 + err = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg); 5128 5058 if (err) 5129 5059 ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", 5130 5060 arsta->addr, arvif->vdev_id, err); ··· 5675 5605 } 5676 5606 } 5677 5607 5608 + /* In the ML station scenario, activate all partner links once the 5609 + * client is transitioning to the associated state. 5610 + * 5611 + * FIXME: Ideally, this activation should occur when the client 5612 + * transitions to the authorized state. However, there are some 5613 + * issues with handling this in the firmware. Until the firmware 5614 + * can manage it properly, activate the links when the client is 5615 + * about to move to the associated state. 5616 + */ 5617 + if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION && 5618 + old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) 5619 + ieee80211_set_active_links(vif, ieee80211_vif_usable_links(vif)); 5620 + 5678 5621 /* Handle all the other state transitions in generic way */ 5679 5622 valid_links = ahsta->links_map; 5680 5623 for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) { ··· 5980 5897 } 5981 5898 5982 5899 return 0; 5900 + } 5901 + 5902 + static bool ath12k_mac_op_can_activate_links(struct ieee80211_hw *hw, 5903 + struct ieee80211_vif *vif, 5904 + u16 active_links) 5905 + { 5906 + /* TODO: Handle recovery case */ 5907 + 5908 + return true; 5983 5909 } 5984 5910 5985 5911 static int ath12k_conf_tx_uapsd(struct ath12k_link_vif *arvif, ··· 7493 7401 ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n", 7494 7402 ret); 7495 7403 7496 - clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); 7404 + clear_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags); 7497 7405 7498 7406 cancel_delayed_work_sync(&ar->scan.timeout); 7407 + wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->scan.vdev_clean_wk); 7499 7408 cancel_work_sync(&ar->regd_update_work); 7500 7409 cancel_work_sync(&ar->ab->rfkill_work); 7501 7410 ··· 8126 8033 scan_arvif = wiphy_dereference(hw->wiphy, 8127 8034 ahvif->link[ATH12K_DEFAULT_SCAN_LINK]); 8128 8035 if (scan_arvif && scan_arvif->ar == ar) { 8129 - ar->scan.vdev_id = -1; 8036 + ar->scan.arvif = NULL; 8130 8037 ath12k_mac_remove_link_interface(hw, scan_arvif); 8131 8038 ath12k_mac_unassign_link_vif(scan_arvif); 8132 8039 } ··· 8157 8064 8158 8065 ab = ar->ab; 8159 8066 8160 - if (arvif->is_created) 8161 - goto flush; 8162 - 8163 8067 /* Assign arvif again here since previous radio switch block 8164 8068 * would've unassigned and cleared it. 8165 8069 */ ··· 8166 8076 ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n"); 8167 8077 goto unlock; 8168 8078 } 8079 + 8080 + if (arvif->is_created) 8081 + goto flush; 8169 8082 8170 8083 if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) { 8171 8084 ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n", ··· 8327 8234 { 8328 8235 struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 8329 8236 struct ath12k_link_vif *arvif; 8237 + struct ath12k *ar; 8330 8238 u8 link_id; 8331 8239 8332 8240 lockdep_assert_wiphy(hw->wiphy); ··· 8340 8246 arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); 8341 8247 if (!arvif || !arvif->is_created) 8342 8248 continue; 8249 + 8250 + ar = arvif->ar; 8251 + 8252 + /* Scan abortion is in progress since before this, cancel_hw_scan() 8253 + * is expected to be executed. Since link is anyways going to be removed 8254 + * now, just cancel the worker and send the scan aborted to user space 8255 + */ 8256 + if (ar->scan.arvif == arvif) { 8257 + wiphy_work_cancel(hw->wiphy, &ar->scan.vdev_clean_wk); 8258 + 8259 + spin_lock_bh(&ar->data_lock); 8260 + ar->scan.arvif = NULL; 8261 + if (!ar->scan.is_roc) { 8262 + struct cfg80211_scan_info info = { 8263 + .aborted = true, 8264 + }; 8265 + 8266 + ieee80211_scan_completed(ar->ah->hw, &info); 8267 + } 8268 + 8269 + ar->scan.state = ATH12K_SCAN_IDLE; 8270 + ar->scan_channel = NULL; 8271 + ar->scan.roc_freq = 0; 8272 + spin_unlock_bh(&ar->data_lock); 8273 + } 8343 8274 8344 8275 ath12k_mac_remove_link_interface(hw, arvif); 8345 8276 ath12k_mac_unassign_link_vif(arvif); ··· 8700 8581 struct ath12k_base *ab = ar->ab; 8701 8582 struct wmi_vdev_start_req_arg arg = {}; 8702 8583 const struct cfg80211_chan_def *chandef = &ctx->def; 8584 + struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 8703 8585 struct ath12k_vif *ahvif = arvif->ahvif; 8704 8586 struct ieee80211_bss_conf *link_conf; 8587 + unsigned int dfs_cac_time; 8705 8588 int ret; 8706 8589 8707 - lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); 8590 + lockdep_assert_wiphy(hw->wiphy); 8708 8591 8709 8592 link_conf = ath12k_mac_get_link_bss_conf(arvif); 8710 8593 if (!link_conf) { ··· 8731 8610 chandef->chan->band, 8732 8611 ahvif->vif->type); 8733 8612 arg.min_power = 0; 8734 - arg.max_power = chandef->chan->max_power * 2; 8735 - arg.max_reg_power = chandef->chan->max_reg_power * 2; 8736 - arg.max_antenna_gain = chandef->chan->max_antenna_gain * 2; 8613 + arg.max_power = chandef->chan->max_power; 8614 + arg.max_reg_power = chandef->chan->max_reg_power; 8615 + arg.max_antenna_gain = chandef->chan->max_antenna_gain; 8737 8616 8738 8617 arg.pref_tx_streams = ar->num_tx_chains; 8739 8618 arg.pref_rx_streams = ar->num_rx_chains; ··· 8804 8683 ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM started, vdev_id %d\n", 8805 8684 ahvif->vif->addr, arvif->vdev_id); 8806 8685 8807 - /* Enable CAC Flag in the driver by checking the channel DFS cac time, 8808 - * i.e dfs_cac_ms value which will be valid only for radar channels 8809 - * and state as NL80211_DFS_USABLE which indicates CAC needs to be 8810 - * done before channel usage. This flags is used to drop rx packets. 8686 + /* Enable CAC Running Flag in the driver by checking all sub-channel's DFS 8687 + * state as NL80211_DFS_USABLE which indicates CAC needs to be 8688 + * done before channel usage. This flag is used to drop rx packets. 8811 8689 * during CAC. 8812 8690 */ 8813 8691 /* TODO: Set the flag for other interface types as required */ 8814 - if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP && 8815 - chandef->chan->dfs_cac_ms && 8816 - chandef->chan->dfs_state == NL80211_DFS_USABLE) { 8817 - set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); 8692 + if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP && ctx->radar_enabled && 8693 + cfg80211_chandef_dfs_usable(hw->wiphy, chandef)) { 8694 + set_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags); 8695 + dfs_cac_time = cfg80211_chandef_dfs_cac_time(hw->wiphy, chandef); 8696 + 8818 8697 ath12k_dbg(ab, ATH12K_DBG_MAC, 8819 - "CAC Started in chan_freq %d for vdev %d\n", 8820 - arg.freq, arg.vdev_id); 8698 + "CAC started dfs_cac_time %u center_freq %d center_freq1 %d for vdev %d\n", 8699 + dfs_cac_time, arg.freq, arg.band_center_freq1, arg.vdev_id); 8821 8700 } 8822 8701 8823 8702 ret = ath12k_mac_set_txbf_conf(arvif); ··· 10073 9952 ath12k_scan_abort(ar); 10074 9953 10075 9954 cancel_delayed_work_sync(&ar->scan.timeout); 9955 + wiphy_work_cancel(hw->wiphy, &ar->scan.vdev_clean_wk); 10076 9956 10077 9957 return 0; 10078 9958 } ··· 10086 9964 { 10087 9965 struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); 10088 9966 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 10089 - struct ath12k_wmi_scan_req_arg arg; 10090 9967 struct ath12k_link_vif *arvif; 10091 9968 struct ath12k *ar; 10092 9969 u32 scan_time_msec; ··· 10096 9975 lockdep_assert_wiphy(hw->wiphy); 10097 9976 10098 9977 ar = ath12k_mac_select_scan_device(hw, vif, chan->center_freq); 10099 - if (!ar) { 10100 - ret = -EINVAL; 10101 - goto exit; 10102 - } 9978 + if (!ar) 9979 + return -EINVAL; 10103 9980 10104 9981 /* check if any of the links of ML VIF is already started on 10105 9982 * radio(ar) correpsondig to given scan frequency and use it, ··· 10116 9997 * always on the same band for the vif 10117 9998 */ 10118 9999 if (arvif->is_created) { 10119 - if (WARN_ON(!arvif->ar)) { 10120 - ret = -EINVAL; 10121 - goto exit; 10122 - } 10000 + if (WARN_ON(!arvif->ar)) 10001 + return -EINVAL; 10123 10002 10124 - if (ar != arvif->ar && arvif->is_started) { 10125 - ret = -EBUSY; 10126 - goto exit; 10127 - } 10003 + if (ar != arvif->ar && arvif->is_started) 10004 + return -EBUSY; 10128 10005 10129 10006 if (ar != arvif->ar) { 10130 10007 ath12k_mac_remove_link_interface(hw, arvif); ··· 10137 10022 if (ret) { 10138 10023 ath12k_warn(ar->ab, "unable to create scan vdev for roc: %d\n", 10139 10024 ret); 10140 - goto exit; 10025 + return ret; 10141 10026 } 10142 10027 } 10143 10028 ··· 10150 10035 reinit_completion(&ar->scan.on_channel); 10151 10036 ar->scan.state = ATH12K_SCAN_STARTING; 10152 10037 ar->scan.is_roc = true; 10153 - ar->scan.vdev_id = arvif->vdev_id; 10038 + ar->scan.arvif = arvif; 10154 10039 ar->scan.roc_freq = chan->center_freq; 10155 10040 ar->scan.roc_notify = true; 10156 10041 ret = 0; ··· 10165 10050 spin_unlock_bh(&ar->data_lock); 10166 10051 10167 10052 if (ret) 10168 - goto exit; 10053 + return ret; 10169 10054 10170 10055 scan_time_msec = hw->wiphy->max_remain_on_channel_duration * 2; 10171 10056 10172 - memset(&arg, 0, sizeof(arg)); 10173 - ath12k_wmi_start_scan_init(ar, &arg); 10174 - arg.num_chan = 1; 10175 - arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list), 10176 - GFP_KERNEL); 10177 - if (!arg.chan_list) { 10178 - ret = -ENOMEM; 10179 - goto exit; 10180 - } 10057 + struct ath12k_wmi_scan_req_arg *arg __free(kfree) = 10058 + kzalloc(sizeof(*arg), GFP_KERNEL); 10059 + if (!arg) 10060 + return -ENOMEM; 10181 10061 10182 - arg.vdev_id = arvif->vdev_id; 10183 - arg.scan_id = ATH12K_SCAN_ID; 10184 - arg.chan_list[0] = chan->center_freq; 10185 - arg.dwell_time_active = scan_time_msec; 10186 - arg.dwell_time_passive = scan_time_msec; 10187 - arg.max_scan_time = scan_time_msec; 10188 - arg.scan_f_passive = 1; 10189 - arg.burst_duration = duration; 10062 + ath12k_wmi_start_scan_init(ar, arg); 10063 + arg->num_chan = 1; 10190 10064 10191 - ret = ath12k_start_scan(ar, &arg); 10065 + u32 *chan_list __free(kfree) = kcalloc(arg->num_chan, sizeof(*chan_list), 10066 + GFP_KERNEL); 10067 + if (!chan_list) 10068 + return -ENOMEM; 10069 + 10070 + arg->chan_list = chan_list; 10071 + arg->vdev_id = arvif->vdev_id; 10072 + arg->scan_id = ATH12K_SCAN_ID; 10073 + arg->chan_list[0] = chan->center_freq; 10074 + arg->dwell_time_active = scan_time_msec; 10075 + arg->dwell_time_passive = scan_time_msec; 10076 + arg->max_scan_time = scan_time_msec; 10077 + arg->scan_f_passive = 1; 10078 + arg->burst_duration = duration; 10079 + 10080 + ret = ath12k_start_scan(ar, arg); 10192 10081 if (ret) { 10193 10082 ath12k_warn(ar->ab, "failed to start roc scan: %d\n", ret); 10194 10083 10195 10084 spin_lock_bh(&ar->data_lock); 10196 10085 ar->scan.state = ATH12K_SCAN_IDLE; 10197 10086 spin_unlock_bh(&ar->data_lock); 10198 - goto free_chan_list; 10087 + return ret; 10199 10088 } 10200 10089 10201 10090 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); ··· 10208 10089 ret = ath12k_scan_stop(ar); 10209 10090 if (ret) 10210 10091 ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret); 10211 - ret = -ETIMEDOUT; 10212 - goto free_chan_list; 10092 + return -ETIMEDOUT; 10213 10093 } 10214 10094 10215 10095 ieee80211_queue_delayed_work(hw, &ar->scan.timeout, 10216 10096 msecs_to_jiffies(duration)); 10217 10097 10218 - ret = 0; 10219 - 10220 - free_chan_list: 10221 - kfree(arg.chan_list); 10222 - 10223 - exit: 10224 - return ret; 10098 + return 0; 10225 10099 } 10226 10100 10227 10101 static void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw, ··· 10293 10181 .remain_on_channel = ath12k_mac_op_remain_on_channel, 10294 10182 .cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel, 10295 10183 .change_sta_links = ath12k_mac_op_change_sta_links, 10184 + .can_activate_links = ath12k_mac_op_can_activate_links, 10296 10185 #ifdef CONFIG_PM 10297 10186 .suspend = ath12k_wow_op_suspend, 10298 10187 .resume = ath12k_wow_op_resume, ··· 10316 10203 band->channels[i].flags |= IEEE80211_CHAN_DISABLED; 10317 10204 } 10318 10205 10319 - ar->freq_low = freq_low; 10320 - ar->freq_high = freq_high; 10206 + ar->freq_range.start_freq = MHZ_TO_KHZ(freq_low); 10207 + ar->freq_range.end_freq = MHZ_TO_KHZ(freq_high); 10321 10208 } 10322 10209 10323 10210 static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band) ··· 10449 10336 { 10450 10337 struct ath12k *ar; 10451 10338 int i; 10452 - u16 interface_modes, mode; 10453 - bool is_enable = true; 10339 + u16 interface_modes, mode = 0; 10340 + bool is_enable = false; 10454 10341 10455 - mode = BIT(type); 10342 + if (type == NL80211_IFTYPE_MESH_POINT) { 10343 + if (IS_ENABLED(CONFIG_MAC80211_MESH)) 10344 + mode = BIT(type); 10345 + } else { 10346 + mode = BIT(type); 10347 + } 10348 + 10456 10349 for_each_ar(ah, ar, i) { 10457 10350 interface_modes = ar->ab->hw_params->interface_modes; 10458 - if (!(interface_modes & mode)) { 10459 - is_enable = false; 10351 + if (interface_modes & mode) { 10352 + is_enable = true; 10460 10353 break; 10461 10354 } 10462 10355 } ··· 10470 10351 return is_enable; 10471 10352 } 10472 10353 10473 - static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah) 10354 + static int 10355 + ath12k_mac_setup_radio_iface_comb(struct ath12k *ar, 10356 + struct ieee80211_iface_combination *comb) 10474 10357 { 10475 - struct wiphy *wiphy = ah->hw->wiphy; 10476 - struct ieee80211_iface_combination *combinations; 10358 + u16 interface_modes = ar->ab->hw_params->interface_modes; 10477 10359 struct ieee80211_iface_limit *limits; 10478 10360 int n_limits, max_interfaces; 10479 10361 bool ap, mesh, p2p; 10480 10362 10481 - ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP); 10482 - p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE); 10363 + ap = interface_modes & BIT(NL80211_IFTYPE_AP); 10364 + p2p = interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE); 10483 10365 10484 10366 mesh = IS_ENABLED(CONFIG_MAC80211_MESH) && 10485 - ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT); 10486 - 10487 - combinations = kzalloc(sizeof(*combinations), GFP_KERNEL); 10488 - if (!combinations) 10489 - return -ENOMEM; 10367 + (interface_modes & BIT(NL80211_IFTYPE_MESH_POINT)); 10490 10368 10491 10369 if ((ap || mesh) && !p2p) { 10492 10370 n_limits = 2; ··· 10500 10384 } 10501 10385 10502 10386 limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL); 10503 - if (!limits) { 10504 - kfree(combinations); 10387 + if (!limits) 10505 10388 return -ENOMEM; 10506 - } 10507 10389 10508 10390 limits[0].max = 1; 10509 10391 limits[0].types |= BIT(NL80211_IFTYPE_STATION); ··· 10517 10403 10518 10404 if (p2p) { 10519 10405 limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) | 10520 - BIT(NL80211_IFTYPE_P2P_GO); 10406 + BIT(NL80211_IFTYPE_P2P_GO); 10521 10407 limits[2].max = 1; 10522 10408 limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE); 10523 10409 } 10524 10410 10525 - combinations[0].limits = limits; 10526 - combinations[0].n_limits = n_limits; 10527 - combinations[0].max_interfaces = max_interfaces; 10528 - combinations[0].num_different_channels = 1; 10529 - combinations[0].beacon_int_infra_match = true; 10530 - combinations[0].beacon_int_min_gcd = 100; 10531 - combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 10532 - BIT(NL80211_CHAN_WIDTH_20) | 10533 - BIT(NL80211_CHAN_WIDTH_40) | 10534 - BIT(NL80211_CHAN_WIDTH_80); 10411 + comb[0].limits = limits; 10412 + comb[0].n_limits = n_limits; 10413 + comb[0].max_interfaces = max_interfaces; 10414 + comb[0].num_different_channels = 1; 10415 + comb[0].beacon_int_infra_match = true; 10416 + comb[0].beacon_int_min_gcd = 100; 10417 + comb[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 10418 + BIT(NL80211_CHAN_WIDTH_20) | 10419 + BIT(NL80211_CHAN_WIDTH_40) | 10420 + BIT(NL80211_CHAN_WIDTH_80); 10535 10421 10422 + return 0; 10423 + } 10424 + 10425 + static int 10426 + ath12k_mac_setup_global_iface_comb(struct ath12k_hw *ah, 10427 + struct wiphy_radio *radio, 10428 + u8 n_radio, 10429 + struct ieee80211_iface_combination *comb) 10430 + { 10431 + const struct ieee80211_iface_combination *iter_comb; 10432 + struct ieee80211_iface_limit *limits; 10433 + int i, j, n_limits; 10434 + bool ap, mesh, p2p; 10435 + 10436 + if (!n_radio) 10437 + return 0; 10438 + 10439 + ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP); 10440 + p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE); 10441 + mesh = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT); 10442 + 10443 + if ((ap || mesh) && !p2p) 10444 + n_limits = 2; 10445 + else if (p2p) 10446 + n_limits = 3; 10447 + else 10448 + n_limits = 1; 10449 + 10450 + limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL); 10451 + if (!limits) 10452 + return -ENOMEM; 10453 + 10454 + for (i = 0; i < n_radio; i++) { 10455 + iter_comb = radio[i].iface_combinations; 10456 + for (j = 0; j < iter_comb->n_limits && j < n_limits; j++) { 10457 + limits[j].types |= iter_comb->limits[j].types; 10458 + limits[j].max += iter_comb->limits[j].max; 10459 + } 10460 + 10461 + comb->max_interfaces += iter_comb->max_interfaces; 10462 + comb->num_different_channels += iter_comb->num_different_channels; 10463 + comb->radar_detect_widths |= iter_comb->radar_detect_widths; 10464 + } 10465 + 10466 + comb->limits = limits; 10467 + comb->n_limits = n_limits; 10468 + comb->beacon_int_infra_match = true; 10469 + comb->beacon_int_min_gcd = 100; 10470 + 10471 + return 0; 10472 + } 10473 + 10474 + static 10475 + void ath12k_mac_cleanup_iface_comb(const struct ieee80211_iface_combination *iface_comb) 10476 + { 10477 + kfree(iface_comb[0].limits); 10478 + kfree(iface_comb); 10479 + } 10480 + 10481 + static void ath12k_mac_cleanup_iface_combinations(struct ath12k_hw *ah) 10482 + { 10483 + struct wiphy *wiphy = ah->hw->wiphy; 10484 + const struct wiphy_radio *radio; 10485 + int i; 10486 + 10487 + if (wiphy->n_radio > 0) { 10488 + radio = wiphy->radio; 10489 + for (i = 0; i < wiphy->n_radio; i++) 10490 + ath12k_mac_cleanup_iface_comb(radio[i].iface_combinations); 10491 + 10492 + kfree(wiphy->radio); 10493 + } 10494 + 10495 + ath12k_mac_cleanup_iface_comb(wiphy->iface_combinations); 10496 + } 10497 + 10498 + static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah) 10499 + { 10500 + struct ieee80211_iface_combination *combinations, *comb; 10501 + struct wiphy *wiphy = ah->hw->wiphy; 10502 + struct wiphy_radio *radio; 10503 + struct ath12k *ar; 10504 + int i, ret; 10505 + 10506 + combinations = kzalloc(sizeof(*combinations), GFP_KERNEL); 10507 + if (!combinations) 10508 + return -ENOMEM; 10509 + 10510 + if (ah->num_radio == 1) { 10511 + ret = ath12k_mac_setup_radio_iface_comb(&ah->radio[0], 10512 + combinations); 10513 + if (ret) { 10514 + ath12k_hw_warn(ah, "failed to setup radio interface combinations for one radio: %d", 10515 + ret); 10516 + goto err_free_combinations; 10517 + } 10518 + 10519 + goto out; 10520 + } 10521 + 10522 + /* there are multiple radios */ 10523 + 10524 + radio = kcalloc(ah->num_radio, sizeof(*radio), GFP_KERNEL); 10525 + if (!radio) { 10526 + ret = -ENOMEM; 10527 + goto err_free_combinations; 10528 + } 10529 + 10530 + for_each_ar(ah, ar, i) { 10531 + comb = kzalloc(sizeof(*comb), GFP_KERNEL); 10532 + if (!comb) { 10533 + ret = -ENOMEM; 10534 + goto err_free_radios; 10535 + } 10536 + 10537 + ret = ath12k_mac_setup_radio_iface_comb(ar, comb); 10538 + if (ret) { 10539 + ath12k_hw_warn(ah, "failed to setup radio interface combinations for radio %d: %d", 10540 + i, ret); 10541 + kfree(comb); 10542 + goto err_free_radios; 10543 + } 10544 + 10545 + radio[i].freq_range = &ar->freq_range; 10546 + radio[i].n_freq_range = 1; 10547 + 10548 + radio[i].iface_combinations = comb; 10549 + radio[i].n_iface_combinations = 1; 10550 + } 10551 + 10552 + ret = ath12k_mac_setup_global_iface_comb(ah, radio, ah->num_radio, combinations); 10553 + if (ret) { 10554 + ath12k_hw_warn(ah, "failed to setup global interface combinations: %d", 10555 + ret); 10556 + goto err_free_all_radios; 10557 + } 10558 + 10559 + wiphy->radio = radio; 10560 + wiphy->n_radio = ah->num_radio; 10561 + 10562 + out: 10536 10563 wiphy->iface_combinations = combinations; 10537 10564 wiphy->n_iface_combinations = 1; 10538 10565 10539 10566 return 0; 10567 + 10568 + err_free_all_radios: 10569 + i = ah->num_radio; 10570 + 10571 + err_free_radios: 10572 + while (i--) 10573 + ath12k_mac_cleanup_iface_comb(radio[i].iface_combinations); 10574 + 10575 + kfree(radio); 10576 + 10577 + err_free_combinations: 10578 + kfree(combinations); 10579 + 10580 + return ret; 10540 10581 } 10541 10582 10542 10583 static const u8 ath12k_if_types_ext_capa[] = { ··· 10715 10446 [10] = WLAN_EXT_CAPA11_EMA_SUPPORT, 10716 10447 }; 10717 10448 10718 - static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = { 10449 + static struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = { 10719 10450 { 10720 10451 .extended_capabilities = ath12k_if_types_ext_capa, 10721 10452 .extended_capabilities_mask = ath12k_if_types_ext_capa, ··· 10732 10463 .extended_capabilities_mask = ath12k_if_types_ext_capa_ap, 10733 10464 .extended_capabilities_len = 10734 10465 sizeof(ath12k_if_types_ext_capa_ap), 10466 + .eml_capabilities = 0, 10467 + .mld_capa_and_ops = 0, 10735 10468 }, 10736 10469 }; 10737 10470 ··· 10750 10479 static void ath12k_mac_hw_unregister(struct ath12k_hw *ah) 10751 10480 { 10752 10481 struct ieee80211_hw *hw = ah->hw; 10753 - struct wiphy *wiphy = hw->wiphy; 10754 10482 struct ath12k *ar; 10755 10483 int i; 10756 10484 ··· 10763 10493 for_each_ar(ah, ar, i) 10764 10494 ath12k_mac_cleanup_unregister(ar); 10765 10495 10766 - kfree(wiphy->iface_combinations[0].limits); 10767 - kfree(wiphy->iface_combinations); 10496 + ath12k_mac_cleanup_iface_combinations(ah); 10768 10497 10769 10498 SET_IEEE80211_DEV(hw, NULL); 10770 10499 } ··· 10838 10569 if (ret) 10839 10570 goto err_cleanup_unregister; 10840 10571 10841 - ht_cap &= ht_cap_info; 10572 + /* 6 GHz does not support HT Cap, hence do not consider it */ 10573 + if (!ar->supports_6ghz) 10574 + ht_cap &= ht_cap_info; 10575 + 10842 10576 wiphy->max_ap_assoc_sta += ar->max_num_stations; 10843 10577 10844 10578 /* Advertise the max antenna support of all radios, driver can handle ··· 10905 10633 ieee80211_hw_set(hw, SUPPORTS_TX_FRAG); 10906 10634 ieee80211_hw_set(hw, REPORTS_LOW_ACK); 10907 10635 10908 - if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) { 10636 + if ((ht_cap & WMI_HT_CAP_ENABLED) || is_6ghz) { 10909 10637 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 10910 10638 ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); 10911 10639 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); ··· 10921 10649 * handle it when the ht capability different for each band. 10922 10650 */ 10923 10651 if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS || 10924 - (ar->supports_6ghz && ab->hw_params->supports_dynamic_smps_6ghz)) 10652 + (is_6ghz && ab->hw_params->supports_dynamic_smps_6ghz)) 10925 10653 wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; 10926 10654 10927 10655 wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; ··· 10942 10670 * once WIPHY_FLAG_SUPPORTS_MLO is enabled. 10943 10671 */ 10944 10672 wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT; 10673 + 10674 + /* Copy over MLO related capabilities received from 10675 + * WMI_SERVICE_READY_EXT2_EVENT if single_chip_mlo_supp is set. 10676 + */ 10677 + if (ab->ag->mlo_capable) { 10678 + ath12k_iftypes_ext_capa[2].eml_capabilities = cap->eml_cap; 10679 + ath12k_iftypes_ext_capa[2].mld_capa_and_ops = cap->mld_cap; 10680 + wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; 10681 + } 10945 10682 10946 10683 hw->queues = ATH12K_HW_MAX_QUEUES; 10947 10684 wiphy->tx_queue_len = ATH12K_QUEUE_LEN; ··· 11005 10724 ret = ath12k_wow_init(ar); 11006 10725 if (ret) { 11007 10726 ath12k_warn(ar->ab, "failed to init wow: %d\n", ret); 11008 - goto err_free_if_combs; 10727 + goto err_cleanup_if_combs; 11009 10728 } 11010 10729 11011 10730 ret = ieee80211_register_hw(hw); 11012 10731 if (ret) { 11013 10732 ath12k_err(ab, "ieee80211 registration failed: %d\n", ret); 11014 - goto err_free_if_combs; 10733 + goto err_cleanup_if_combs; 11015 10734 } 11016 10735 11017 10736 if (is_monitor_disable) ··· 11041 10760 11042 10761 ieee80211_unregister_hw(hw); 11043 10762 11044 - err_free_if_combs: 11045 - kfree(wiphy->iface_combinations[0].limits); 11046 - kfree(wiphy->iface_combinations); 10763 + err_cleanup_if_combs: 10764 + ath12k_mac_cleanup_iface_combinations(ah); 11047 10765 11048 10766 err_complete_cleanup_unregister: 11049 10767 i = ah->num_radio; ··· 11076 10796 ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask; 11077 10797 ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask); 11078 10798 ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask); 10799 + ar->scan.arvif = NULL; 11079 10800 11080 10801 spin_lock_init(&ar->data_lock); 11081 10802 INIT_LIST_HEAD(&ar->arvifs); ··· 11091 10810 init_completion(&ar->scan.started); 11092 10811 init_completion(&ar->scan.completed); 11093 10812 init_completion(&ar->scan.on_channel); 10813 + init_completion(&ar->mlo_setup_done); 11094 10814 11095 10815 INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work); 10816 + wiphy_work_init(&ar->scan.vdev_clean_wk, ath12k_scan_vdev_clean_work); 11096 10817 INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work); 11097 10818 11098 10819 wiphy_work_init(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work); 11099 10820 skb_queue_head_init(&ar->wmi_mgmt_tx_queue); 10821 + } 10822 + 10823 + static int __ath12k_mac_mlo_setup(struct ath12k *ar) 10824 + { 10825 + u8 num_link = 0, partner_link_id[ATH12K_GROUP_MAX_RADIO] = {}; 10826 + struct ath12k_base *partner_ab, *ab = ar->ab; 10827 + struct ath12k_hw_group *ag = ab->ag; 10828 + struct wmi_mlo_setup_arg mlo = {}; 10829 + struct ath12k_pdev *pdev; 10830 + unsigned long time_left; 10831 + int i, j, ret; 10832 + 10833 + lockdep_assert_held(&ag->mutex); 10834 + 10835 + reinit_completion(&ar->mlo_setup_done); 10836 + 10837 + for (i = 0; i < ag->num_devices; i++) { 10838 + partner_ab = ag->ab[i]; 10839 + 10840 + for (j = 0; j < partner_ab->num_radios; j++) { 10841 + pdev = &partner_ab->pdevs[j]; 10842 + 10843 + /* Avoid the self link */ 10844 + if (ar == pdev->ar) 10845 + continue; 10846 + 10847 + partner_link_id[num_link] = pdev->hw_link_id; 10848 + num_link++; 10849 + 10850 + ath12k_dbg(ab, ATH12K_DBG_MAC, "device %d pdev %d hw_link_id %d num_link %d\n", 10851 + i, j, pdev->hw_link_id, num_link); 10852 + } 10853 + } 10854 + 10855 + mlo.group_id = cpu_to_le32(ag->id); 10856 + mlo.partner_link_id = partner_link_id; 10857 + mlo.num_partner_links = num_link; 10858 + ar->mlo_setup_status = 0; 10859 + 10860 + ath12k_dbg(ab, ATH12K_DBG_MAC, "group id %d num_link %d\n", ag->id, num_link); 10861 + 10862 + ret = ath12k_wmi_mlo_setup(ar, &mlo); 10863 + if (ret) { 10864 + ath12k_err(ab, "failed to send setup MLO WMI command for pdev %d: %d\n", 10865 + ar->pdev_idx, ret); 10866 + return ret; 10867 + } 10868 + 10869 + time_left = wait_for_completion_timeout(&ar->mlo_setup_done, 10870 + WMI_MLO_CMD_TIMEOUT_HZ); 10871 + 10872 + if (!time_left || ar->mlo_setup_status) 10873 + return ar->mlo_setup_status ? : -ETIMEDOUT; 10874 + 10875 + ath12k_dbg(ab, ATH12K_DBG_MAC, "mlo setup done for pdev %d\n", ar->pdev_idx); 10876 + 10877 + return 0; 10878 + } 10879 + 10880 + static int __ath12k_mac_mlo_teardown(struct ath12k *ar) 10881 + { 10882 + struct ath12k_base *ab = ar->ab; 10883 + int ret; 10884 + 10885 + if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) 10886 + return 0; 10887 + 10888 + ret = ath12k_wmi_mlo_teardown(ar); 10889 + if (ret) { 10890 + ath12k_warn(ab, "failed to send MLO teardown WMI command for pdev %d: %d\n", 10891 + ar->pdev_idx, ret); 10892 + return ret; 10893 + } 10894 + 10895 + ath12k_dbg(ab, ATH12K_DBG_MAC, "mlo teardown for pdev %d\n", ar->pdev_idx); 10896 + 10897 + return 0; 10898 + } 10899 + 10900 + int ath12k_mac_mlo_setup(struct ath12k_hw_group *ag) 10901 + { 10902 + struct ath12k_hw *ah; 10903 + struct ath12k *ar; 10904 + int ret; 10905 + int i, j; 10906 + 10907 + for (i = 0; i < ag->num_hw; i++) { 10908 + ah = ag->ah[i]; 10909 + if (!ah) 10910 + continue; 10911 + 10912 + for_each_ar(ah, ar, j) { 10913 + ar = &ah->radio[j]; 10914 + ret = __ath12k_mac_mlo_setup(ar); 10915 + if (ret) { 10916 + ath12k_err(ar->ab, "failed to setup MLO: %d\n", ret); 10917 + goto err_setup; 10918 + } 10919 + } 10920 + } 10921 + 10922 + return 0; 10923 + 10924 + err_setup: 10925 + for (i = i - 1; i >= 0; i--) { 10926 + ah = ag->ah[i]; 10927 + if (!ah) 10928 + continue; 10929 + 10930 + for (j = j - 1; j >= 0; j--) { 10931 + ar = &ah->radio[j]; 10932 + if (!ar) 10933 + continue; 10934 + 10935 + __ath12k_mac_mlo_teardown(ar); 10936 + } 10937 + } 10938 + 10939 + return ret; 10940 + } 10941 + 10942 + void ath12k_mac_mlo_teardown(struct ath12k_hw_group *ag) 10943 + { 10944 + struct ath12k_hw *ah; 10945 + struct ath12k *ar; 10946 + int ret, i, j; 10947 + 10948 + for (i = 0; i < ag->num_hw; i++) { 10949 + ah = ag->ah[i]; 10950 + if (!ah) 10951 + continue; 10952 + 10953 + for_each_ar(ah, ar, j) { 10954 + ar = &ah->radio[j]; 10955 + ret = __ath12k_mac_mlo_teardown(ar); 10956 + if (ret) { 10957 + ath12k_err(ar->ab, "failed to teardown MLO: %d\n", ret); 10958 + break; 10959 + } 10960 + } 10961 + } 11100 10962 } 11101 10963 11102 10964 int ath12k_mac_register(struct ath12k_hw_group *ag) ··· 11249 10825 int i; 11250 10826 int ret; 11251 10827 11252 - for (i = 0; i < ath12k_get_num_hw(ab); i++) { 11253 - ah = ath12k_ab_to_ah(ab, i); 10828 + for (i = 0; i < ag->num_hw; i++) { 10829 + ah = ath12k_ag_to_ah(ag, i); 11254 10830 11255 10831 ret = ath12k_mac_hw_register(ah); 11256 10832 if (ret) ··· 11263 10839 11264 10840 err: 11265 10841 for (i = i - 1; i >= 0; i--) { 11266 - ah = ath12k_ab_to_ah(ab, i); 10842 + ah = ath12k_ag_to_ah(ag, i); 11267 10843 if (!ah) 11268 10844 continue; 11269 10845 ··· 11281 10857 11282 10858 clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags); 11283 10859 11284 - for (i = ath12k_get_num_hw(ab) - 1; i >= 0; i--) { 11285 - ah = ath12k_ab_to_ah(ab, i); 10860 + for (i = ag->num_hw - 1; i >= 0; i--) { 10861 + ah = ath12k_ag_to_ah(ag, i); 11286 10862 if (!ah) 11287 10863 continue; 11288 10864 ··· 11332 10908 ar->pdev_idx = pdev_idx; 11333 10909 pdev->ar = ar; 11334 10910 10911 + ag->hw_links[ar->hw_link_id].device_id = ab->device_id; 10912 + ag->hw_links[ar->hw_link_id].pdev_idx = pdev_idx; 10913 + 11335 10914 ath12k_mac_setup(ar); 11336 10915 ath12k_dp_pdev_pre_alloc(ar); 11337 10916 } ··· 11362 10935 } 11363 10936 } 11364 10937 11365 - for (i = 0; i < ath12k_get_num_hw(ab); i++) { 11366 - ah = ath12k_ab_to_ah(ab, i); 10938 + for (i = 0; i < ag->num_hw; i++) { 10939 + ah = ath12k_ag_to_ah(ag, i); 11367 10940 if (!ah) 11368 10941 continue; 11369 10942 11370 10943 ath12k_mac_hw_destroy(ah); 11371 - ath12k_ab_set_ah(ab, i, NULL); 10944 + ath12k_ag_set_ah(ag, i, NULL); 11372 10945 } 11373 10946 } 11374 10947 ··· 11389 10962 u8 radio_per_hw; 11390 10963 11391 10964 total_radio = 0; 11392 - for (i = 0; i < ag->num_devices; i++) 11393 - total_radio += ag->ab[i]->num_radios; 10965 + for (i = 0; i < ag->num_devices; i++) { 10966 + ab = ag->ab[i]; 10967 + if (!ab) 10968 + continue; 10969 + 10970 + ath12k_mac_set_device_defaults(ab); 10971 + total_radio += ab->num_radios; 10972 + } 10973 + 10974 + if (!total_radio) 10975 + return -EINVAL; 10976 + 10977 + if (WARN_ON(total_radio > ATH12K_GROUP_MAX_RADIO)) 10978 + return -ENOSPC; 11394 10979 11395 10980 /* All pdev get combined and register as single wiphy based on 11396 10981 * hardware group which participate in multi-link operation else ··· 11415 10976 11416 10977 num_hw = total_radio / radio_per_hw; 11417 10978 11418 - if (WARN_ON(num_hw >= ATH12K_GROUP_MAX_RADIO)) 11419 - return -ENOSPC; 11420 - 11421 10979 ag->num_hw = 0; 11422 10980 device_id = 0; 11423 10981 mac_id = 0; 11424 10982 for (i = 0; i < num_hw; i++) { 11425 10983 for (j = 0; j < radio_per_hw; j++) { 10984 + if (device_id >= ag->num_devices || !ag->ab[device_id]) { 10985 + ret = -ENOSPC; 10986 + goto err; 10987 + } 10988 + 11426 10989 ab = ag->ab[device_id]; 11427 10990 pdev_map[j].ab = ab; 11428 10991 pdev_map[j].pdev_idx = mac_id; ··· 11436 10995 if (mac_id >= ab->num_radios) { 11437 10996 mac_id = 0; 11438 10997 device_id++; 11439 - ath12k_mac_set_device_defaults(ab); 11440 10998 } 11441 10999 } 11000 + 11001 + ab = pdev_map->ab; 11442 11002 11443 11003 ah = ath12k_mac_hw_allocate(ag, pdev_map, radio_per_hw); 11444 11004 if (!ah) { ··· 11459 11017 11460 11018 err: 11461 11019 for (i = i - 1; i >= 0; i--) { 11462 - ah = ath12k_ab_to_ah(ab, i); 11020 + ah = ath12k_ag_to_ah(ag, i); 11463 11021 if (!ah) 11464 11022 continue; 11465 11023 11466 11024 ath12k_mac_hw_destroy(ah); 11467 - ath12k_ab_set_ah(ab, i, NULL); 11025 + ath12k_ag_set_ah(ag, i, NULL); 11468 11026 } 11469 11027 11470 11028 return ret;
+11
drivers/net/wireless/ath/ath12k/mac.h
··· 59 59 ATH12K_BW_320 = 4, 60 60 }; 61 61 62 + struct ath12k_mac_get_any_chanctx_conf_arg { 63 + struct ath12k *ar; 64 + struct ieee80211_chanctx_conf *chanctx_conf; 65 + }; 66 + 62 67 extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default; 63 68 64 69 void ath12k_mac_destroy(struct ath12k_hw_group *ag); ··· 101 96 enum wmi_sta_keepalive_method method, 102 97 u32 interval); 103 98 u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar); 99 + int ath12k_mac_mlo_setup(struct ath12k_hw_group *ag); 100 + int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag); 101 + void ath12k_mac_mlo_teardown(struct ath12k_hw_group *ag); 104 102 int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif); 103 + void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw, 104 + struct ieee80211_chanctx_conf *conf, 105 + void *data); 105 106 106 107 #endif
+2
drivers/net/wireless/ath/ath12k/peer.c
··· 388 388 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy, 389 389 ahsta->link[link_id]); 390 390 391 + peer->link_id = arsta->link_id; 392 + 391 393 /* Fill ML info into created peer */ 392 394 if (sta->mlo) { 393 395 ml_peer_id = ahsta->ml_peer_id;
+3
drivers/net/wireless/ath/ath12k/peer.h
··· 59 59 60 60 /* To ensure only certain work related to dp is done once */ 61 61 bool primary_link; 62 + 63 + /* for reference to ath12k_link_sta */ 64 + u8 link_id; 62 65 }; 63 66 64 67 struct ath12k_ml_peer {
+267 -57
drivers/net/wireless/ath/ath12k/qmi.c
··· 2016 2016 }, 2017 2017 }; 2018 2018 2019 - static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab, 2020 - struct qmi_wlanfw_host_cap_req_msg_v01 *req) 2019 + static void ath12k_host_cap_hw_link_id_init(struct ath12k_hw_group *ag) 2020 + { 2021 + struct ath12k_base *ab, *partner_ab; 2022 + int i, j, hw_id_base; 2023 + 2024 + for (i = 0; i < ag->num_devices; i++) { 2025 + hw_id_base = 0; 2026 + ab = ag->ab[i]; 2027 + 2028 + for (j = 0; j < ag->num_devices; j++) { 2029 + partner_ab = ag->ab[j]; 2030 + 2031 + if (partner_ab->wsi_info.index >= ab->wsi_info.index) 2032 + continue; 2033 + 2034 + hw_id_base += partner_ab->qmi.num_radios; 2035 + } 2036 + 2037 + ab->wsi_info.hw_link_id_base = hw_id_base; 2038 + } 2039 + 2040 + ag->hw_link_id_init_done = true; 2041 + } 2042 + 2043 + static int ath12k_host_cap_parse_mlo(struct ath12k_base *ab, 2044 + struct qmi_wlanfw_host_cap_req_msg_v01 *req) 2021 2045 { 2022 2046 struct wlfw_host_mlo_chip_info_s_v01 *info; 2047 + struct ath12k_hw_group *ag = ab->ag; 2048 + struct ath12k_base *partner_ab; 2023 2049 u8 hw_link_id = 0; 2024 - int i; 2050 + int i, j, ret; 2025 2051 2026 - if (!ab->ag->mlo_capable) { 2052 + if (!ag->mlo_capable) { 2027 2053 ath12k_dbg(ab, ATH12K_DBG_QMI, 2028 2054 "MLO is disabled hence skip QMI MLO cap"); 2029 - return; 2055 + return 0; 2030 2056 } 2031 2057 2032 2058 if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) { ··· 2061 2035 ath12k_dbg(ab, ATH12K_DBG_QMI, 2062 2036 "skip QMI MLO cap due to invalid num_radio %d\n", 2063 2037 ab->qmi.num_radios); 2064 - return; 2038 + return 0; 2039 + } 2040 + 2041 + if (ab->device_id == ATH12K_INVALID_DEVICE_ID) { 2042 + ath12k_err(ab, "failed to send MLO cap due to invalid device id\n"); 2043 + return -EINVAL; 2065 2044 } 2066 2045 2067 2046 req->mlo_capable_valid = 1; ··· 2074 2043 req->mlo_chip_id_valid = 1; 2075 2044 req->mlo_chip_id = ab->device_id; 2076 2045 req->mlo_group_id_valid = 1; 2077 - req->mlo_group_id = 0; 2046 + req->mlo_group_id = ag->id; 2078 2047 req->max_mlo_peer_valid = 1; 2079 2048 /* Max peer number generally won't change for the same device 2080 2049 * but needs to be synced with host driver. 2081 2050 */ 2082 2051 req->max_mlo_peer = ab->hw_params->max_mlo_peer; 2083 2052 req->mlo_num_chips_valid = 1; 2084 - req->mlo_num_chips = 1; 2053 + req->mlo_num_chips = ag->num_devices; 2085 2054 2086 - info = &req->mlo_chip_info[0]; 2087 - info->chip_id = ab->device_id; 2088 - info->num_local_links = ab->qmi.num_radios; 2055 + ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo capability advertisement device_id %d group_id %d num_devices %d", 2056 + req->mlo_chip_id, req->mlo_group_id, req->mlo_num_chips); 2089 2057 2090 - for (i = 0; i < info->num_local_links; i++) { 2091 - info->hw_link_id[i] = hw_link_id; 2092 - info->valid_mlo_link_id[i] = 1; 2058 + mutex_lock(&ag->mutex); 2093 2059 2094 - hw_link_id++; 2060 + if (!ag->hw_link_id_init_done) 2061 + ath12k_host_cap_hw_link_id_init(ag); 2062 + 2063 + for (i = 0; i < ag->num_devices; i++) { 2064 + info = &req->mlo_chip_info[i]; 2065 + partner_ab = ag->ab[i]; 2066 + 2067 + if (partner_ab->device_id == ATH12K_INVALID_DEVICE_ID) { 2068 + ath12k_err(ab, "failed to send MLO cap due to invalid partner device id\n"); 2069 + ret = -EINVAL; 2070 + goto device_cleanup; 2071 + } 2072 + 2073 + info->chip_id = partner_ab->device_id; 2074 + info->num_local_links = partner_ab->qmi.num_radios; 2075 + 2076 + ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo device id %d num_link %d\n", 2077 + info->chip_id, info->num_local_links); 2078 + 2079 + for (j = 0; j < info->num_local_links; j++) { 2080 + info->hw_link_id[j] = partner_ab->wsi_info.hw_link_id_base + j; 2081 + info->valid_mlo_link_id[j] = 1; 2082 + 2083 + ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo hw_link_id %d\n", 2084 + info->hw_link_id[j]); 2085 + 2086 + hw_link_id++; 2087 + } 2095 2088 } 2096 2089 2090 + if (hw_link_id <= 0) 2091 + ag->mlo_capable = false; 2092 + 2097 2093 req->mlo_chip_info_valid = 1; 2094 + 2095 + mutex_unlock(&ag->mutex); 2096 + 2097 + return 0; 2098 + 2099 + device_cleanup: 2100 + for (i = i - 1; i >= 0; i--) { 2101 + info = &req->mlo_chip_info[i]; 2102 + 2103 + memset(info, 0, sizeof(*info)); 2104 + } 2105 + 2106 + req->mlo_num_chips = 0; 2107 + req->mlo_num_chips_valid = 0; 2108 + 2109 + req->max_mlo_peer = 0; 2110 + req->max_mlo_peer_valid = 0; 2111 + req->mlo_group_id = 0; 2112 + req->mlo_group_id_valid = 0; 2113 + req->mlo_chip_id = 0; 2114 + req->mlo_chip_id_valid = 0; 2115 + req->mlo_capable = 0; 2116 + req->mlo_capable_valid = 0; 2117 + 2118 + ag->mlo_capable = false; 2119 + 2120 + mutex_unlock(&ag->mutex); 2121 + 2122 + return ret; 2098 2123 } 2099 2124 2100 2125 /* clang stack usage explodes if this is inlined */ ··· 2200 2113 req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET; 2201 2114 } 2202 2115 2203 - ath12k_host_cap_parse_mlo(ab, &req); 2116 + ret = ath12k_host_cap_parse_mlo(ab, &req); 2117 + if (ret < 0) 2118 + goto out; 2204 2119 2205 2120 ret = qmi_txn_init(&ab->qmi.handle, &txn, 2206 2121 qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp); ··· 2440 2351 return ret; 2441 2352 } 2442 2353 2354 + static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab, 2355 + struct target_mem_chunk *chunk, 2356 + int idx) 2357 + { 2358 + struct ath12k_hw_group *ag = ab->ag; 2359 + struct target_mem_chunk *mlo_chunk; 2360 + 2361 + lockdep_assert_held(&ag->mutex); 2362 + 2363 + if (!ag->mlo_mem.init_done || ag->num_started) 2364 + return; 2365 + 2366 + if (idx >= ARRAY_SIZE(ag->mlo_mem.chunk)) { 2367 + ath12k_warn(ab, "invalid index for MLO memory chunk free: %d\n", idx); 2368 + return; 2369 + } 2370 + 2371 + mlo_chunk = &ag->mlo_mem.chunk[idx]; 2372 + if (mlo_chunk->v.addr) { 2373 + dma_free_coherent(ab->dev, 2374 + mlo_chunk->size, 2375 + mlo_chunk->v.addr, 2376 + mlo_chunk->paddr); 2377 + mlo_chunk->v.addr = NULL; 2378 + } 2379 + 2380 + mlo_chunk->paddr = 0; 2381 + mlo_chunk->size = 0; 2382 + chunk->v.addr = NULL; 2383 + chunk->paddr = 0; 2384 + chunk->size = 0; 2385 + } 2386 + 2443 2387 static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab) 2444 2388 { 2445 - int i; 2389 + struct ath12k_hw_group *ag = ab->ag; 2390 + int i, mlo_idx; 2446 2391 2447 - for (i = 0; i < ab->qmi.mem_seg_count; i++) { 2392 + for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) { 2448 2393 if (!ab->qmi.target_mem[i].v.addr) 2449 2394 continue; 2450 2395 2451 - dma_free_coherent(ab->dev, 2452 - ab->qmi.target_mem[i].prev_size, 2453 - ab->qmi.target_mem[i].v.addr, 2454 - ab->qmi.target_mem[i].paddr); 2455 - ab->qmi.target_mem[i].v.addr = NULL; 2396 + if (ab->qmi.target_mem[i].type == MLO_GLOBAL_MEM_REGION_TYPE) { 2397 + ath12k_qmi_free_mlo_mem_chunk(ab, 2398 + &ab->qmi.target_mem[i], 2399 + mlo_idx++); 2400 + } else { 2401 + dma_free_coherent(ab->dev, 2402 + ab->qmi.target_mem[i].prev_size, 2403 + ab->qmi.target_mem[i].v.addr, 2404 + ab->qmi.target_mem[i].paddr); 2405 + ab->qmi.target_mem[i].v.addr = NULL; 2406 + } 2456 2407 } 2408 + 2409 + if (!ag->num_started && ag->mlo_mem.init_done) { 2410 + ag->mlo_mem.init_done = false; 2411 + ag->mlo_mem.mlo_mem_size = 0; 2412 + } 2413 + } 2414 + 2415 + static int ath12k_qmi_alloc_chunk(struct ath12k_base *ab, 2416 + struct target_mem_chunk *chunk) 2417 + { 2418 + /* Firmware reloads in recovery/resume. 2419 + * In such cases, no need to allocate memory for FW again. 2420 + */ 2421 + if (chunk->v.addr) { 2422 + if (chunk->prev_type == chunk->type && 2423 + chunk->prev_size == chunk->size) 2424 + goto this_chunk_done; 2425 + 2426 + /* cannot reuse the existing chunk */ 2427 + dma_free_coherent(ab->dev, chunk->prev_size, 2428 + chunk->v.addr, chunk->paddr); 2429 + chunk->v.addr = NULL; 2430 + } 2431 + 2432 + chunk->v.addr = dma_alloc_coherent(ab->dev, 2433 + chunk->size, 2434 + &chunk->paddr, 2435 + GFP_KERNEL | __GFP_NOWARN); 2436 + if (!chunk->v.addr) { 2437 + if (chunk->size > ATH12K_QMI_MAX_CHUNK_SIZE) { 2438 + ab->qmi.target_mem_delayed = true; 2439 + ath12k_warn(ab, 2440 + "qmi dma allocation failed (%d B type %u), will try later with small size\n", 2441 + chunk->size, 2442 + chunk->type); 2443 + ath12k_qmi_free_target_mem_chunk(ab); 2444 + return -EAGAIN; 2445 + } 2446 + ath12k_warn(ab, "memory allocation failure for %u size: %d\n", 2447 + chunk->type, chunk->size); 2448 + return -ENOMEM; 2449 + } 2450 + chunk->prev_type = chunk->type; 2451 + chunk->prev_size = chunk->size; 2452 + this_chunk_done: 2453 + return 0; 2457 2454 } 2458 2455 2459 2456 static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab) 2460 2457 { 2461 - int i; 2462 - struct target_mem_chunk *chunk; 2458 + struct target_mem_chunk *chunk, *mlo_chunk; 2459 + struct ath12k_hw_group *ag = ab->ag; 2460 + int i, mlo_idx, ret; 2461 + int mlo_size = 0; 2462 + 2463 + mutex_lock(&ag->mutex); 2464 + 2465 + if (!ag->mlo_mem.init_done) { 2466 + memset(ag->mlo_mem.chunk, 0, sizeof(ag->mlo_mem.chunk)); 2467 + ag->mlo_mem.init_done = true; 2468 + } 2463 2469 2464 2470 ab->qmi.target_mem_delayed = false; 2465 2471 2466 - for (i = 0; i < ab->qmi.mem_seg_count; i++) { 2472 + for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) { 2467 2473 chunk = &ab->qmi.target_mem[i]; 2468 2474 2469 2475 /* Allocate memory for the region and the functionality supported ··· 2570 2386 case M3_DUMP_REGION_TYPE: 2571 2387 case PAGEABLE_MEM_REGION_TYPE: 2572 2388 case CALDB_MEM_REGION_TYPE: 2573 - /* Firmware reloads in recovery/resume. 2574 - * In such cases, no need to allocate memory for FW again. 2575 - */ 2576 - if (chunk->v.addr) { 2577 - if (chunk->prev_type == chunk->type && 2578 - chunk->prev_size == chunk->size) 2579 - goto this_chunk_done; 2580 - 2581 - /* cannot reuse the existing chunk */ 2582 - dma_free_coherent(ab->dev, chunk->prev_size, 2583 - chunk->v.addr, chunk->paddr); 2584 - chunk->v.addr = NULL; 2389 + ret = ath12k_qmi_alloc_chunk(ab, chunk); 2390 + if (ret) 2391 + goto err; 2392 + break; 2393 + case MLO_GLOBAL_MEM_REGION_TYPE: 2394 + mlo_size += chunk->size; 2395 + if (ag->mlo_mem.mlo_mem_size && 2396 + mlo_size > ag->mlo_mem.mlo_mem_size) { 2397 + ath12k_err(ab, "QMI MLO memory allocation failure, requested size %d is more than allocated size %d", 2398 + mlo_size, ag->mlo_mem.mlo_mem_size); 2399 + ret = -EINVAL; 2400 + goto err; 2585 2401 } 2586 2402 2587 - chunk->v.addr = dma_alloc_coherent(ab->dev, 2588 - chunk->size, 2589 - &chunk->paddr, 2590 - GFP_KERNEL | __GFP_NOWARN); 2591 - if (!chunk->v.addr) { 2592 - if (chunk->size > ATH12K_QMI_MAX_CHUNK_SIZE) { 2593 - ab->qmi.target_mem_delayed = true; 2594 - ath12k_warn(ab, 2595 - "qmi dma allocation failed (%d B type %u), will try later with small size\n", 2596 - chunk->size, 2597 - chunk->type); 2598 - ath12k_qmi_free_target_mem_chunk(ab); 2599 - return 0; 2403 + mlo_chunk = &ag->mlo_mem.chunk[mlo_idx]; 2404 + if (mlo_chunk->paddr) { 2405 + if (chunk->size != mlo_chunk->size) { 2406 + ath12k_err(ab, "QMI MLO chunk memory allocation failure for index %d, requested size %d is more than allocated size %d", 2407 + mlo_idx, chunk->size, mlo_chunk->size); 2408 + ret = -EINVAL; 2409 + goto err; 2600 2410 } 2601 - ath12k_warn(ab, "memory allocation failure for %u size: %d\n", 2602 - chunk->type, chunk->size); 2603 - return -ENOMEM; 2411 + } else { 2412 + mlo_chunk->size = chunk->size; 2413 + mlo_chunk->type = chunk->type; 2414 + ret = ath12k_qmi_alloc_chunk(ab, mlo_chunk); 2415 + if (ret) 2416 + goto err; 2417 + memset(mlo_chunk->v.addr, 0, mlo_chunk->size); 2604 2418 } 2605 2419 2606 - chunk->prev_type = chunk->type; 2607 - chunk->prev_size = chunk->size; 2608 - this_chunk_done: 2420 + chunk->paddr = mlo_chunk->paddr; 2421 + chunk->v.addr = mlo_chunk->v.addr; 2422 + mlo_idx++; 2423 + 2609 2424 break; 2610 2425 default: 2611 2426 ath12k_warn(ab, "memory type %u not supported\n", ··· 2614 2431 break; 2615 2432 } 2616 2433 } 2434 + 2435 + if (!ag->mlo_mem.mlo_mem_size) { 2436 + ag->mlo_mem.mlo_mem_size = mlo_size; 2437 + } else if (ag->mlo_mem.mlo_mem_size != mlo_size) { 2438 + ath12k_err(ab, "QMI MLO memory size error, expected size is %d but requested size is %d", 2439 + ag->mlo_mem.mlo_mem_size, mlo_size); 2440 + ret = -EINVAL; 2441 + goto err; 2442 + } 2443 + 2444 + mutex_unlock(&ag->mutex); 2445 + 2617 2446 return 0; 2447 + 2448 + err: 2449 + ath12k_qmi_free_target_mem_chunk(ab); 2450 + 2451 + mutex_unlock(&ag->mutex); 2452 + 2453 + /* The firmware will attempt to request memory in smaller chunks 2454 + * on the next try. However, the current caller should be notified 2455 + * that this instance of request parsing was successful. 2456 + * Therefore, return 0 only. 2457 + */ 2458 + if (ret == -EAGAIN) 2459 + ret = 0; 2460 + 2461 + return ret; 2618 2462 } 2619 2463 2620 2464 /* clang stack usage explodes if this is inlined */
+1
drivers/net/wireless/ath/ath12k/qmi.h
··· 172 172 BDF_MEM_REGION_TYPE = 0x2, 173 173 M3_DUMP_REGION_TYPE = 0x3, 174 174 CALDB_MEM_REGION_TYPE = 0x4, 175 + MLO_GLOBAL_MEM_REGION_TYPE = 0x8, 175 176 PAGEABLE_MEM_REGION_TYPE = 0x9, 176 177 }; 177 178
+214 -11
drivers/net/wireless/ath/ath12k/wmi.c
··· 4662 4662 caps->eht_cap_info_internal); 4663 4663 } 4664 4664 4665 + pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability); 4666 + pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability); 4667 + 4665 4668 return 0; 4666 4669 } 4667 4670 ··· 5361 5358 info = IEEE80211_SKB_CB(msdu); 5362 5359 if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) 5363 5360 info->flags |= IEEE80211_TX_STAT_ACK; 5361 + 5362 + if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status) 5363 + info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 5364 5364 5365 5365 ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu); 5366 5366 ··· 6215 6209 goto exit; 6216 6210 } 6217 6211 6218 - if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) || 6212 + if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) || 6219 6213 (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT | 6220 6214 WMI_RX_STATUS_ERR_KEY_CACHE_MISS | 6221 6215 WMI_RX_STATUS_ERR_CRC))) { ··· 6344 6338 6345 6339 spin_lock_bh(&ar->data_lock); 6346 6340 if (ar->scan.state == state && 6347 - ar->scan.vdev_id == vdev_id) { 6341 + ar->scan.arvif && 6342 + ar->scan.arvif->vdev_id == vdev_id) { 6348 6343 spin_unlock_bh(&ar->data_lock); 6349 6344 return ar; 6350 6345 } ··· 6880 6873 } 6881 6874 ahvif = arvif->ahvif; 6882 6875 6883 - if (arvif->link_id > IEEE80211_MLD_MAX_NUM_LINKS) { 6876 + if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) { 6884 6877 ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n", 6885 6878 arvif->link_id); 6886 6879 continue; ··· 6938 6931 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb) 6939 6932 { 6940 6933 const void **tb; 6934 + struct ath12k_mac_get_any_chanctx_conf_arg arg; 6941 6935 const struct ath12k_wmi_pdev_radar_event *ev; 6942 6936 struct ath12k *ar; 6943 6937 int ret; ··· 6974 6966 goto exit; 6975 6967 } 6976 6968 6969 + arg.ar = ar; 6970 + arg.chanctx_conf = NULL; 6971 + ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar), 6972 + ath12k_mac_get_any_chanctx_conf_iter, &arg); 6973 + if (!arg.chanctx_conf) { 6974 + ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n"); 6975 + goto exit; 6976 + } 6977 + 6977 6978 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n", 6978 6979 ev->pdev_id); 6979 6980 6980 6981 if (ar->dfs_block_radar_events) 6981 6982 ath12k_info(ab, "DFS Radar detected, but ignored as requested\n"); 6982 6983 else 6983 - ieee80211_radar_detected(ath12k_ar_to_hw(ar), NULL); 6984 + ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf); 6984 6985 6985 6986 exit: 6986 6987 rcu_read_unlock(); ··· 7344 7327 kfree(tb); 7345 7328 } 7346 7329 7330 + static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab, 7331 + struct sk_buff *skb) 7332 + { 7333 + const struct wmi_mlo_setup_complete_event *ev; 7334 + struct ath12k *ar = NULL; 7335 + struct ath12k_pdev *pdev; 7336 + const void **tb; 7337 + int ret, i; 7338 + 7339 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 7340 + if (IS_ERR(tb)) { 7341 + ret = PTR_ERR(tb); 7342 + ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n", 7343 + ret); 7344 + return; 7345 + } 7346 + 7347 + ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT]; 7348 + if (!ev) { 7349 + ath12k_warn(ab, "failed to fetch mlo setup complete event\n"); 7350 + kfree(tb); 7351 + return; 7352 + } 7353 + 7354 + if (le32_to_cpu(ev->pdev_id) > ab->num_radios) 7355 + goto skip_lookup; 7356 + 7357 + for (i = 0; i < ab->num_radios; i++) { 7358 + pdev = &ab->pdevs[i]; 7359 + if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) { 7360 + ar = pdev->ar; 7361 + break; 7362 + } 7363 + } 7364 + 7365 + skip_lookup: 7366 + if (!ar) { 7367 + ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n", 7368 + ev->pdev_id, ev->status); 7369 + goto out; 7370 + } 7371 + 7372 + ar->mlo_setup_status = le32_to_cpu(ev->status); 7373 + complete(&ar->mlo_setup_done); 7374 + 7375 + out: 7376 + kfree(tb); 7377 + } 7378 + 7379 + static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab, 7380 + struct sk_buff *skb) 7381 + { 7382 + const struct wmi_mlo_teardown_complete_event *ev; 7383 + const void **tb; 7384 + int ret; 7385 + 7386 + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 7387 + if (IS_ERR(tb)) { 7388 + ret = PTR_ERR(tb); 7389 + ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret); 7390 + return; 7391 + } 7392 + 7393 + ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE]; 7394 + if (!ev) { 7395 + ath12k_warn(ab, "failed to fetch teardown complete event\n"); 7396 + kfree(tb); 7397 + return; 7398 + } 7399 + 7400 + kfree(tb); 7401 + } 7402 + 7347 7403 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) 7348 7404 { 7349 7405 struct wmi_cmd_hdr *cmd_hdr; ··· 7521 7431 case WMI_P2P_NOA_EVENTID: 7522 7432 ath12k_wmi_p2p_noa_event(ab, skb); 7523 7433 break; 7524 - /* add Unsupported events here */ 7525 - case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: 7526 - case WMI_PEER_OPER_MODE_CHANGE_EVENTID: 7527 - case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID: 7528 - ath12k_dbg(ab, ATH12K_DBG_WMI, 7529 - "ignoring unsupported event 0x%x\n", id); 7530 - break; 7531 7434 case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID: 7532 7435 ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb); 7533 7436 break; ··· 7535 7452 break; 7536 7453 case WMI_GTK_OFFLOAD_STATUS_EVENTID: 7537 7454 ath12k_wmi_gtk_offload_status_event(ab, skb); 7455 + break; 7456 + case WMI_MLO_SETUP_COMPLETE_EVENTID: 7457 + ath12k_wmi_event_mlo_setup_complete(ab, skb); 7458 + break; 7459 + case WMI_MLO_TEARDOWN_COMPLETE_EVENTID: 7460 + ath12k_wmi_event_teardown_complete(ab, skb); 7461 + break; 7462 + /* add Unsupported events (rare) here */ 7463 + case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: 7464 + case WMI_PEER_OPER_MODE_CHANGE_EVENTID: 7465 + case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID: 7466 + ath12k_dbg(ab, ATH12K_DBG_WMI, 7467 + "ignoring unsupported event 0x%x\n", id); 7468 + break; 7469 + /* add Unsupported events (frequent) here */ 7470 + case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID: 7471 + case WMI_MGMT_RX_FW_CONSUMED_EVENTID: 7472 + case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID: 7473 + /* debug might flood hence silently ignore (no-op) */ 7538 7474 break; 7539 7475 /* TODO: Add remaining events */ 7540 7476 default: ··· 8370 8268 arg->vdev_id, arg->enabled, arg->method, arg->interval); 8371 8269 8372 8270 return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID); 8271 + } 8272 + 8273 + int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params) 8274 + { 8275 + struct wmi_mlo_setup_cmd *cmd; 8276 + struct ath12k_wmi_pdev *wmi = ar->wmi; 8277 + u32 *partner_links, num_links; 8278 + int i, ret, buf_len, arg_len; 8279 + struct sk_buff *skb; 8280 + struct wmi_tlv *tlv; 8281 + void *ptr; 8282 + 8283 + num_links = mlo_params->num_partner_links; 8284 + arg_len = num_links * sizeof(u32); 8285 + buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len; 8286 + 8287 + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 8288 + if (!skb) 8289 + return -ENOMEM; 8290 + 8291 + cmd = (struct wmi_mlo_setup_cmd *)skb->data; 8292 + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD, 8293 + sizeof(*cmd)); 8294 + cmd->mld_group_id = mlo_params->group_id; 8295 + cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 8296 + ptr = skb->data + sizeof(*cmd); 8297 + 8298 + tlv = ptr; 8299 + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len); 8300 + ptr += TLV_HDR_SIZE; 8301 + 8302 + partner_links = ptr; 8303 + for (i = 0; i < num_links; i++) 8304 + partner_links[i] = mlo_params->partner_link_id[i]; 8305 + 8306 + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID); 8307 + if (ret) { 8308 + ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n", 8309 + ret); 8310 + dev_kfree_skb(skb); 8311 + return ret; 8312 + } 8313 + 8314 + return 0; 8315 + } 8316 + 8317 + int ath12k_wmi_mlo_ready(struct ath12k *ar) 8318 + { 8319 + struct wmi_mlo_ready_cmd *cmd; 8320 + struct ath12k_wmi_pdev *wmi = ar->wmi; 8321 + struct sk_buff *skb; 8322 + int ret, len; 8323 + 8324 + len = sizeof(*cmd); 8325 + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 8326 + if (!skb) 8327 + return -ENOMEM; 8328 + 8329 + cmd = (struct wmi_mlo_ready_cmd *)skb->data; 8330 + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD, 8331 + sizeof(*cmd)); 8332 + cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 8333 + 8334 + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID); 8335 + if (ret) { 8336 + ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n", 8337 + ret); 8338 + dev_kfree_skb(skb); 8339 + return ret; 8340 + } 8341 + 8342 + return 0; 8343 + } 8344 + 8345 + int ath12k_wmi_mlo_teardown(struct ath12k *ar) 8346 + { 8347 + struct wmi_mlo_teardown_cmd *cmd; 8348 + struct ath12k_wmi_pdev *wmi = ar->wmi; 8349 + struct sk_buff *skb; 8350 + int ret, len; 8351 + 8352 + len = sizeof(*cmd); 8353 + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 8354 + if (!skb) 8355 + return -ENOMEM; 8356 + 8357 + cmd = (struct wmi_mlo_teardown_cmd *)skb->data; 8358 + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD, 8359 + sizeof(*cmd)); 8360 + cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 8361 + cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON; 8362 + 8363 + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID); 8364 + if (ret) { 8365 + ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n", 8366 + ret); 8367 + dev_kfree_skb(skb); 8368 + return ret; 8369 + } 8370 + 8371 + return 0; 8373 8372 }
+56
drivers/net/wireless/ath/ath12k/wmi.h
··· 285 285 WMI_GRP_TWT = 0x3e, 286 286 WMI_GRP_MOTION_DET = 0x3f, 287 287 WMI_GRP_SPATIAL_REUSE = 0x40, 288 + WMI_GRP_MLO = 0x48, 288 289 }; 289 290 290 291 #define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1) ··· 666 665 WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID = 667 666 WMI_TLV_CMD(WMI_GRP_SPATIAL_REUSE), 668 667 WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID, 668 + WMI_MLO_LINK_SET_ACTIVE_CMDID = WMI_TLV_CMD(WMI_GRP_MLO), 669 + WMI_MLO_SETUP_CMDID, 670 + WMI_MLO_READY_CMDID, 671 + WMI_MLO_TEARDOWN_CMDID, 669 672 }; 670 673 671 674 enum wmi_tlv_event_id { ··· 711 706 WMI_PDEV_RAP_INFO_EVENTID, 712 707 WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID, 713 708 WMI_SERVICE_READY_EXT2_EVENTID, 709 + WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID = 710 + WMI_SERVICE_READY_EXT2_EVENTID + 4, 714 711 WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV), 715 712 WMI_VDEV_STOPPED_EVENTID, 716 713 WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID, ··· 754 747 WMI_TBTTOFFSET_EXT_UPDATE_EVENTID, 755 748 WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID, 756 749 WMI_HOST_FILS_DISCOVERY_EVENTID, 750 + WMI_MGMT_RX_FW_CONSUMED_EVENTID = WMI_HOST_FILS_DISCOVERY_EVENTID + 3, 757 751 WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG), 758 752 WMI_TX_ADDBA_COMPLETE_EVENTID, 759 753 WMI_BA_RSP_SSN_EVENTID, ··· 853 845 WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL), 854 846 WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL), 855 847 WMI_SAP_OFL_DEL_STA_EVENTID, 848 + WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID = 849 + WMI_EVT_GRP_START_ID(WMI_GRP_OBSS_OFL), 856 850 WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB), 857 851 WMI_OCB_GET_TSF_TIMER_RESP_EVENTID, 858 852 WMI_DCC_GET_STATS_RESP_EVENTID, ··· 884 874 WMI_TWT_DEL_DIALOG_EVENTID, 885 875 WMI_TWT_PAUSE_DIALOG_EVENTID, 886 876 WMI_TWT_RESUME_DIALOG_EVENTID, 877 + WMI_MLO_LINK_SET_ACTIVE_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MLO), 878 + WMI_MLO_SETUP_COMPLETE_EVENTID, 879 + WMI_MLO_TEARDOWN_COMPLETE_EVENTID, 887 880 }; 888 881 889 882 enum wmi_tlv_pdev_param { ··· 2716 2703 __le32 eht_cap_info_internal; 2717 2704 __le32 eht_supp_mcs_ext_2ghz[WMI_MAX_EHT_SUPP_MCS_2G_SIZE]; 2718 2705 __le32 eht_supp_mcs_ext_5ghz[WMI_MAX_EHT_SUPP_MCS_5G_SIZE]; 2706 + __le32 eml_capability; 2707 + __le32 mld_capability; 2719 2708 } __packed; 2720 2709 2721 2710 /* 2 word representation of MAC addr */ ··· 4945 4930 4946 4931 #define MAX_RADIOS 2 4947 4932 4933 + #define WMI_MLO_CMD_TIMEOUT_HZ (5 * HZ) 4948 4934 #define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ) 4949 4935 #define WMI_SEND_TIMEOUT_HZ (3 * HZ) 4950 4936 ··· 5038 5022 } __packed; 5039 5023 5040 5024 struct wmi_twt_disable_event { 5025 + __le32 pdev_id; 5026 + __le32 status; 5027 + } __packed; 5028 + 5029 + struct wmi_mlo_setup_cmd { 5030 + __le32 tlv_header; 5031 + __le32 mld_group_id; 5032 + __le32 pdev_id; 5033 + } __packed; 5034 + 5035 + struct wmi_mlo_setup_arg { 5036 + __le32 group_id; 5037 + u8 num_partner_links; 5038 + u8 *partner_link_id; 5039 + }; 5040 + 5041 + struct wmi_mlo_ready_cmd { 5042 + __le32 tlv_header; 5043 + __le32 pdev_id; 5044 + } __packed; 5045 + 5046 + enum wmi_mlo_tear_down_reason_code_type { 5047 + WMI_MLO_TEARDOWN_SSR_REASON, 5048 + }; 5049 + 5050 + struct wmi_mlo_teardown_cmd { 5051 + __le32 tlv_header; 5052 + __le32 pdev_id; 5053 + __le32 reason_code; 5054 + } __packed; 5055 + 5056 + struct wmi_mlo_setup_complete_event { 5057 + __le32 pdev_id; 5058 + __le32 status; 5059 + } __packed; 5060 + 5061 + struct wmi_mlo_teardown_complete_event { 5041 5062 __le32 pdev_id; 5042 5063 __le32 status; 5043 5064 } __packed; ··· 5804 5751 struct ath12k_link_vif *arvif); 5805 5752 int ath12k_wmi_sta_keepalive(struct ath12k *ar, 5806 5753 const struct wmi_sta_keepalive_arg *arg); 5754 + int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params); 5755 + int ath12k_wmi_mlo_ready(struct ath12k *ar); 5756 + int ath12k_wmi_mlo_teardown(struct ath12k *ar); 5807 5757 5808 5758 #endif
+4 -4
drivers/net/wireless/ath/ath9k/ath9k.h
··· 338 338 339 339 struct ath_beacon_config beacon; 340 340 struct ath9k_hw_cal_data caldata; 341 - struct timespec64 tsf_ts; 341 + ktime_t tsf_ts; 342 342 u64 tsf_val; 343 343 u32 last_beacon; 344 344 ··· 592 592 int ath_tx_init(struct ath_softc *sc, int nbufs); 593 593 int ath_txq_update(struct ath_softc *sc, int qnum, 594 594 struct ath9k_tx_queue_info *q); 595 - u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, 596 - int width, int half_gi, bool shortPreamble); 595 + u32 ath_pkt_duration(u8 rix, int pktlen, int width, 596 + int half_gi, bool shortPreamble); 597 597 void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop); 598 598 void ath_assign_seq(struct ath_common *common, struct sk_buff *skb); 599 599 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, ··· 1011 1011 struct ath_offchannel offchannel; 1012 1012 struct ath_chanctx *next_chan; 1013 1013 struct completion go_beacon; 1014 - struct timespec64 last_event_time; 1014 + ktime_t last_event_time; 1015 1015 #endif 1016 1016 1017 1017 unsigned long driver_data;
+1 -1
drivers/net/wireless/ath/ath9k/beacon.c
··· 293 293 /* Modify TSF as required and update the HW. */ 294 294 avp->chanctx->tsf_val += tsfadjust; 295 295 if (sc->cur_chan == avp->chanctx) { 296 - offset = ath9k_hw_get_tsf_offset(&avp->chanctx->tsf_ts, NULL); 296 + offset = ath9k_hw_get_tsf_offset(avp->chanctx->tsf_ts, 0); 297 297 ath9k_hw_settsf64(sc->sc_ah, avp->chanctx->tsf_val + offset); 298 298 } 299 299
+10 -14
drivers/net/wireless/ath/ath9k/calib.c
··· 16 16 17 17 #include "hw.h" 18 18 #include "hw-ops.h" 19 + #include <linux/sort.h> 19 20 #include <linux/export.h> 20 21 21 22 /* Common calibration code */ 22 23 24 + static int rcmp_i16(const void *x, const void *y) 25 + { 26 + /* Sort in reverse order. */ 27 + return *(int16_t *)y - *(int16_t *)x; 28 + } 23 29 24 30 static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer) 25 31 { 26 - int16_t nfval; 27 - int16_t sort[ATH9K_NF_CAL_HIST_MAX]; 28 - int i, j; 32 + int16_t nfcal[ATH9K_NF_CAL_HIST_MAX]; 29 33 30 - for (i = 0; i < ATH9K_NF_CAL_HIST_MAX; i++) 31 - sort[i] = nfCalBuffer[i]; 34 + memcpy(nfcal, nfCalBuffer, sizeof(nfcal)); 35 + sort(nfcal, ATH9K_NF_CAL_HIST_MAX, sizeof(int16_t), rcmp_i16, NULL); 32 36 33 - for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) { 34 - for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) { 35 - if (sort[j] > sort[j - 1]) 36 - swap(sort[j], sort[j - 1]); 37 - } 38 - } 39 - nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1]; 40 - 41 - return nfval; 37 + return nfcal[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1]; 42 38 } 43 39 44 40 static struct ath_nf_limits *ath9k_hw_get_nf_limits(struct ath_hw *ah,
+12 -17
drivers/net/wireless/ath/ath9k/channel.c
··· 232 232 233 233 static u32 chanctx_event_delta(struct ath_softc *sc) 234 234 { 235 - u64 ms; 236 - struct timespec64 ts, *old; 235 + ktime_t ts = ktime_get_raw(); 236 + s64 ms = ktime_ms_delta(ts, sc->last_event_time); 237 237 238 - ktime_get_raw_ts64(&ts); 239 - old = &sc->last_event_time; 240 - ms = ts.tv_sec * 1000 + ts.tv_nsec / 1000000; 241 - ms -= old->tv_sec * 1000 + old->tv_nsec / 1000000; 242 238 sc->last_event_time = ts; 243 - 244 - return (u32)ms; 239 + return ms; 245 240 } 246 241 247 242 void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx) ··· 329 334 static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc) 330 335 { 331 336 struct ath_chanctx *prev, *cur; 332 - struct timespec64 ts; 333 337 u32 cur_tsf, prev_tsf, beacon_int; 338 + ktime_t ts; 334 339 s32 offset; 335 340 336 341 beacon_int = TU_TO_USEC(sc->cur_chan->beacon.beacon_interval); ··· 341 346 if (!prev->switch_after_beacon) 342 347 return; 343 348 344 - ktime_get_raw_ts64(&ts); 349 + ts = ktime_get_raw(); 345 350 cur_tsf = (u32) cur->tsf_val + 346 - ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts); 351 + ath9k_hw_get_tsf_offset(cur->tsf_ts, ts); 347 352 348 353 prev_tsf = prev->last_beacon - (u32) prev->tsf_val + cur_tsf; 349 - prev_tsf -= ath9k_hw_get_tsf_offset(&prev->tsf_ts, &ts); 354 + prev_tsf -= ath9k_hw_get_tsf_offset(prev->tsf_ts, ts); 350 355 351 356 /* Adjust the TSF time of the AP chanctx to keep its beacons 352 357 * at half beacon interval offset relative to the STA chanctx. ··· 686 691 */ 687 692 tsf_time = sc->sched.switch_start_time; 688 693 tsf_time -= (u32) sc->cur_chan->tsf_val + 689 - ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL); 694 + ath9k_hw_get_tsf_offset(sc->cur_chan->tsf_ts, 0); 690 695 tsf_time += ath9k_hw_gettsf32(ah); 691 696 692 697 sc->sched.beacon_adjust = false; ··· 1225 1230 { 1226 1231 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1227 1232 struct ath_chanctx *old_ctx; 1228 - struct timespec64 ts; 1229 1233 bool measure_time = false; 1230 1234 bool send_ps = false; 1231 1235 bool queues_stopped = false; 1236 + ktime_t ts; 1232 1237 1233 1238 spin_lock_bh(&sc->chan_lock); 1234 1239 if (!sc->next_chan) { ··· 1255 1260 spin_unlock_bh(&sc->chan_lock); 1256 1261 1257 1262 if (sc->next_chan == &sc->offchannel.chan) { 1258 - ktime_get_raw_ts64(&ts); 1263 + ts = ktime_get_raw(); 1259 1264 measure_time = true; 1260 1265 } 1261 1266 ··· 1272 1277 spin_lock_bh(&sc->chan_lock); 1273 1278 1274 1279 if (sc->cur_chan != &sc->offchannel.chan) { 1275 - ktime_get_raw_ts64(&sc->cur_chan->tsf_ts); 1280 + sc->cur_chan->tsf_ts = ktime_get_raw(); 1276 1281 sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah); 1277 1282 } 1278 1283 } ··· 1298 1303 ath_set_channel(sc); 1299 1304 if (measure_time) 1300 1305 sc->sched.channel_switch_time = 1301 - ath9k_hw_get_tsf_offset(&ts, NULL); 1306 + ath9k_hw_get_tsf_offset(ts, 0); 1302 1307 /* 1303 1308 * A reset will ensure that all queues are woken up, 1304 1309 * so there is no need to awaken them again.
+8 -17
drivers/net/wireless/ath/ath9k/hw.c
··· 1847 1847 return -EINVAL; 1848 1848 } 1849 1849 1850 - u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur) 1850 + u32 ath9k_hw_get_tsf_offset(ktime_t last, ktime_t cur) 1851 1851 { 1852 - struct timespec64 ts; 1853 - s64 usec; 1854 - 1855 - if (!cur) { 1856 - ktime_get_raw_ts64(&ts); 1857 - cur = &ts; 1858 - } 1859 - 1860 - usec = cur->tv_sec * 1000000ULL + cur->tv_nsec / 1000; 1861 - usec -= last->tv_sec * 1000000ULL + last->tv_nsec / 1000; 1862 - 1863 - return (u32) usec; 1852 + if (cur == 0) 1853 + cur = ktime_get_raw(); 1854 + return ktime_us_delta(cur, last); 1864 1855 } 1865 1856 EXPORT_SYMBOL(ath9k_hw_get_tsf_offset); 1866 1857 ··· 1862 1871 u32 saveLedState; 1863 1872 u32 saveDefAntenna; 1864 1873 u32 macStaId1; 1865 - struct timespec64 tsf_ts; 1874 + ktime_t tsf_ts; 1866 1875 u32 tsf_offset; 1867 1876 u64 tsf = 0; 1868 1877 int r; ··· 1908 1917 macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B; 1909 1918 1910 1919 /* Save TSF before chip reset, a cold reset clears it */ 1911 - ktime_get_raw_ts64(&tsf_ts); 1920 + tsf_ts = ktime_get_raw(); 1912 1921 tsf = ath9k_hw_gettsf64(ah); 1913 1922 1914 1923 saveLedState = REG_READ(ah, AR_CFG_LED) & ··· 1942 1951 } 1943 1952 1944 1953 /* Restore TSF */ 1945 - tsf_offset = ath9k_hw_get_tsf_offset(&tsf_ts, NULL); 1954 + tsf_offset = ath9k_hw_get_tsf_offset(tsf_ts, 0); 1946 1955 ath9k_hw_settsf64(ah, tsf + tsf_offset); 1947 1956 1948 1957 if (AR_SREV_9280_20_OR_LATER(ah)) ··· 1966 1975 * value after the initvals have been applied. 1967 1976 */ 1968 1977 if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) { 1969 - tsf_offset = ath9k_hw_get_tsf_offset(&tsf_ts, NULL); 1978 + tsf_offset = ath9k_hw_get_tsf_offset(tsf_ts, 0); 1970 1979 ath9k_hw_settsf64(ah, tsf + tsf_offset); 1971 1980 } 1972 1981
+1 -1
drivers/net/wireless/ath/ath9k/hw.h
··· 1066 1066 u64 ath9k_hw_gettsf64(struct ath_hw *ah); 1067 1067 void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64); 1068 1068 void ath9k_hw_reset_tsf(struct ath_hw *ah); 1069 - u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur); 1069 + u32 ath9k_hw_get_tsf_offset(ktime_t last, ktime_t cur); 1070 1070 void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set); 1071 1071 void ath9k_hw_init_global_settings(struct ath_hw *ah); 1072 1072 u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
+4 -5
drivers/net/wireless/ath/ath9k/main.c
··· 249 249 if (sc->cur_chan->tsf_val) { 250 250 u32 offset; 251 251 252 - offset = ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, 253 - NULL); 252 + offset = ath9k_hw_get_tsf_offset(sc->cur_chan->tsf_ts, 0); 254 253 ath9k_hw_settsf64(ah, sc->cur_chan->tsf_val + offset); 255 254 } 256 255 ··· 1955 1956 tsf = ath9k_hw_gettsf64(sc->sc_ah); 1956 1957 } else { 1957 1958 tsf = sc->cur_chan->tsf_val + 1958 - ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL); 1959 + ath9k_hw_get_tsf_offset(sc->cur_chan->tsf_ts, 0); 1959 1960 } 1960 1961 tsf += le64_to_cpu(avp->tsf_adjust); 1961 1962 ath9k_ps_restore(sc); ··· 1974 1975 mutex_lock(&sc->mutex); 1975 1976 ath9k_ps_wakeup(sc); 1976 1977 tsf -= le64_to_cpu(avp->tsf_adjust); 1977 - ktime_get_raw_ts64(&avp->chanctx->tsf_ts); 1978 + avp->chanctx->tsf_ts = ktime_get_raw(); 1978 1979 if (sc->cur_chan == avp->chanctx) 1979 1980 ath9k_hw_settsf64(sc->sc_ah, tsf); 1980 1981 avp->chanctx->tsf_val = tsf; ··· 1990 1991 mutex_lock(&sc->mutex); 1991 1992 1992 1993 ath9k_ps_wakeup(sc); 1993 - ktime_get_raw_ts64(&avp->chanctx->tsf_ts); 1994 + avp->chanctx->tsf_ts = ktime_get_raw(); 1994 1995 if (sc->cur_chan == avp->chanctx) 1995 1996 ath9k_hw_reset_tsf(sc->sc_ah); 1996 1997 avp->chanctx->tsf_val = 0;
+2 -2
drivers/net/wireless/ath/ath9k/recv.c
··· 1042 1042 if (!!(rxs->encoding == RX_ENC_HT)) { 1043 1043 /* MCS rates */ 1044 1044 1045 - airtime += ath_pkt_duration(sc, rxs->rate_idx, len, 1046 - is_40, is_sgi, is_sp); 1045 + airtime += ath_pkt_duration(rxs->rate_idx, len, 1046 + is_40, is_sgi, is_sp); 1047 1047 } else { 1048 1048 1049 1049 phy = IS_CCK_RATE(rs->rs_rate) ? WLAN_RC_PHY_CCK : WLAN_RC_PHY_OFDM;
+24 -28
drivers/net/wireless/ath/ath9k/xmit.c
··· 67 67 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, 68 68 struct ath_tx_status *ts, int nframes, int nbad, 69 69 int txok); 70 - static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 71 - struct ath_buf *bf); 70 + static void ath_tx_update_baw(struct ath_atx_tid *tid, struct ath_buf *bf); 72 71 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 73 72 struct ath_txq *txq, 74 73 struct ath_atx_tid *tid, ··· 207 208 ARRAY_SIZE(bf->rates)); 208 209 } 209 210 210 - static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq, 211 - struct sk_buff *skb) 211 + static void ath_txq_skb_done(struct ath_softc *sc, struct sk_buff *skb) 212 212 { 213 213 struct ath_frame_info *fi = get_frame_info(skb); 214 + struct ath_txq *txq; 214 215 int q = fi->txq; 215 216 216 217 if (q < 0) ··· 223 224 } 224 225 225 226 static struct ath_atx_tid * 226 - ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb) 227 + ath_get_skb_tid(struct ath_node *an, struct sk_buff *skb) 227 228 { 228 229 u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 229 230 return ATH_AN_2_TID(an, tidno); ··· 293 294 fi = get_frame_info(skb); 294 295 bf = fi->bf; 295 296 if (!bf) { 296 - ath_txq_skb_done(sc, txq, skb); 297 + ath_txq_skb_done(sc, skb); 297 298 ieee80211_free_txskb(sc->hw, skb); 298 299 continue; 299 300 } 300 301 301 302 if (fi->baw_tracked) { 302 - ath_tx_update_baw(sc, tid, bf); 303 + ath_tx_update_baw(tid, bf); 303 304 sendbar = true; 304 305 } 305 306 ··· 314 315 } 315 316 } 316 317 317 - static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 318 - struct ath_buf *bf) 318 + static void ath_tx_update_baw(struct ath_atx_tid *tid, struct ath_buf *bf) 319 319 { 320 320 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); 321 321 u16 seqno = bf->bf_state.seqno; ··· 336 338 } 337 339 } 338 340 339 - static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 340 - struct ath_buf *bf) 341 + static void ath_tx_addto_baw(struct ath_atx_tid *tid, struct ath_buf *bf) 341 342 { 342 343 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); 343 344 u16 seqno = bf->bf_state.seqno; ··· 449 452 return tbf; 450 453 } 451 454 452 - static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf, 453 - struct ath_tx_status *ts, int txok, 454 - int *nframes, int *nbad) 455 + static void ath_tx_count_frames(struct ath_buf *bf, struct ath_tx_status *ts, 456 + int txok, int *nframes, int *nbad) 455 457 { 456 458 u16 seq_st = 0; 457 459 u32 ba[WME_BA_BMP_SIZE >> 5]; ··· 564 568 565 569 __skb_queue_head_init(&bf_pending); 566 570 567 - ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad); 571 + ath_tx_count_frames(bf, ts, txok, &nframes, &nbad); 568 572 while (bf) { 569 573 u16 seqno = bf->bf_state.seqno; 570 574 ··· 617 621 * complete the acked-ones/xretried ones; update 618 622 * block-ack window 619 623 */ 620 - ath_tx_update_baw(sc, tid, bf); 624 + ath_tx_update_baw(tid, bf); 621 625 622 626 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 623 627 memcpy(tx_info->control.rates, rates, sizeof(rates)); ··· 647 651 * run out of tx buf. 648 652 */ 649 653 if (!tbf) { 650 - ath_tx_update_baw(sc, tid, bf); 654 + ath_tx_update_baw(tid, bf); 651 655 652 656 ath_tx_complete_buf(sc, bf, txq, 653 657 &bf_head, NULL, ts, ··· 748 752 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); 749 753 if (sta) { 750 754 struct ath_node *an = (struct ath_node *)sta->drv_priv; 751 - tid = ath_get_skb_tid(sc, an, bf->bf_mpdu); 755 + tid = ath_get_skb_tid(an, bf->bf_mpdu); 752 756 ath_tx_count_airtime(sc, sta, bf, ts, tid->tidno); 753 757 if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) 754 758 tid->clear_ps_filter = true; ··· 958 962 bf->bf_state.stale = false; 959 963 960 964 if (!bf) { 961 - ath_txq_skb_done(sc, txq, skb); 965 + ath_txq_skb_done(sc, skb); 962 966 ieee80211_free_txskb(sc->hw, skb); 963 967 continue; 964 968 } ··· 1008 1012 1009 1013 INIT_LIST_HEAD(&bf_head); 1010 1014 list_add(&bf->list, &bf_head); 1011 - ath_tx_update_baw(sc, tid, bf); 1015 + ath_tx_update_baw(tid, bf); 1012 1016 ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0); 1013 1017 continue; 1014 1018 } 1015 1019 1016 1020 if (bf_isampdu(bf)) 1017 - ath_tx_addto_baw(sc, tid, bf); 1021 + ath_tx_addto_baw(tid, bf); 1018 1022 1019 1023 break; 1020 1024 } ··· 1110 1114 * width - 0 for 20 MHz, 1 for 40 MHz 1111 1115 * half_gi - to use 4us v/s 3.6 us for symbol time 1112 1116 */ 1113 - u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, 1114 - int width, int half_gi, bool shortPreamble) 1117 + u32 ath_pkt_duration(u8 rix, int pktlen, int width, 1118 + int half_gi, bool shortPreamble) 1115 1119 { 1116 1120 u32 nbits, nsymbits, duration, nsymbols; 1117 1121 int streams; ··· 1323 1327 info->rates[i].Rate = rix | 0x80; 1324 1328 info->rates[i].ChSel = ath_txchainmask_reduction(sc, 1325 1329 ah->txchainmask, info->rates[i].Rate); 1326 - info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len, 1330 + info->rates[i].PktDuration = ath_pkt_duration(rix, len, 1327 1331 is_40, is_sgi, is_sp); 1328 1332 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) 1329 1333 info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC; ··· 2118 2122 bf->bf_state.bf_type = 0; 2119 2123 if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) { 2120 2124 bf->bf_state.bf_type = BUF_AMPDU; 2121 - ath_tx_addto_baw(sc, tid, bf); 2125 + ath_tx_addto_baw(tid, bf); 2122 2126 } 2123 2127 2124 2128 bf->bf_next = NULL; ··· 2364 2368 2365 2369 if (txctl->sta) { 2366 2370 an = (struct ath_node *) sta->drv_priv; 2367 - tid = ath_get_skb_tid(sc, an, skb); 2371 + tid = ath_get_skb_tid(an, skb); 2368 2372 } 2369 2373 2370 2374 ath_txq_lock(sc, txq); ··· 2375 2379 2376 2380 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 2377 2381 if (!bf) { 2378 - ath_txq_skb_done(sc, txq, skb); 2382 + ath_txq_skb_done(sc, skb); 2379 2383 if (txctl->paprd) 2380 2384 dev_kfree_skb_any(skb); 2381 2385 else ··· 2510 2514 } 2511 2515 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 2512 2516 2513 - ath_txq_skb_done(sc, txq, skb); 2517 + ath_txq_skb_done(sc, skb); 2514 2518 tx_info->status.status_driver_data[0] = sta; 2515 2519 __skb_queue_tail(&txq->complete_q, skb); 2516 2520 }