Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge ath-next from ath.git

ath.git patches for 4.6. Major changes:

ath10k

* dt: add bindings for ipq4019 wifi block
* start adding support for qca4019 chip

ath9k

* add device ID for Toshiba WLM-20U2/GN-1080
* allow more than one interface on DFS channels

+2616 -477
+84 -5
Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
··· 1 1 * Qualcomm Atheros ath10k wireless devices 2 2 3 - For ath10k devices the calibration data can be provided through Device 4 - Tree. The node is a child node of the PCI controller. 5 - 6 3 Required properties: 7 - -compatible : Should be "qcom,ath10k" 4 + - compatible: Should be one of the following: 5 + * "qcom,ath10k" 6 + * "qcom,ipq4019-wifi" 7 + 8 + PCI based devices uses compatible string "qcom,ath10k" and takes only 9 + calibration data via "qcom,ath10k-calibration-data". Rest of the properties 10 + are not applicable for PCI based devices. 11 + 12 + AHB based devices (i.e. ipq4019) uses compatible string "qcom,ipq4019-wifi" 13 + and also uses most of the properties defined in this doc. 8 14 9 15 Optional properties: 16 + - reg: Address and length of the register set for the device. 17 + - resets: Must contain an entry for each entry in reset-names. 18 + See ../reset/reseti.txt for details. 19 + - reset-names: Must include the list of following reset names, 20 + "wifi_cpu_init" 21 + "wifi_radio_srif" 22 + "wifi_radio_warm" 23 + "wifi_radio_cold" 24 + "wifi_core_warm" 25 + "wifi_core_cold" 26 + - clocks: List of clock specifiers, must contain an entry for each required 27 + entry in clock-names. 28 + - clock-names: Should contain the clock names "wifi_wcss_cmd", "wifi_wcss_ref", 29 + "wifi_wcss_rtc". 30 + - interrupts: List of interrupt lines. Must contain an entry 31 + for each entry in the interrupt-names property. 32 + - interrupt-names: Must include the entries for MSI interrupt 33 + names ("msi0" to "msi15") and legacy interrupt 34 + name ("legacy"), 35 + - qcom,msi_addr: MSI interrupt address. 36 + - qcom,msi_base: Base value to add before writing MSI data into 37 + MSI address register. 10 38 - qcom,ath10k-calibration-data : calibration data as an array, the 11 39 length can vary between hw versions 12 40 41 + Example (to supply the calibration data alone): 13 42 14 - Example: 43 + In this example, the node is defined as child node of the PCI controller. 15 44 16 45 pci { 17 46 pcie@0 { ··· 56 27 qcom,ath10k-calibration-data = [ 01 02 03 ... ]; 57 28 }; 58 29 }; 30 + }; 31 + 32 + Example (to supply ipq4019 SoC wifi block details): 33 + 34 + wifi0: wifi@a000000 { 35 + compatible = "qcom,ipq4019-wifi"; 36 + reg = <0xa000000 0x200000>; 37 + resets = <&gcc WIFI0_CPU_INIT_RESET>, 38 + <&gcc WIFI0_RADIO_SRIF_RESET>, 39 + <&gcc WIFI0_RADIO_WARM_RESET>, 40 + <&gcc WIFI0_RADIO_COLD_RESET>, 41 + <&gcc WIFI0_CORE_WARM_RESET>, 42 + <&gcc WIFI0_CORE_COLD_RESET>; 43 + reset-names = "wifi_cpu_init", 44 + "wifi_radio_srif", 45 + "wifi_radio_warm", 46 + "wifi_radio_cold", 47 + "wifi_core_warm", 48 + "wifi_core_cold"; 49 + clocks = <&gcc GCC_WCSS2G_CLK>, 50 + <&gcc GCC_WCSS2G_REF_CLK>, 51 + <&gcc GCC_WCSS2G_RTC_CLK>; 52 + clock-names = "wifi_wcss_cmd", 53 + "wifi_wcss_ref", 54 + "wifi_wcss_rtc"; 55 + interrupts = <0 0x20 0x1>, 56 + <0 0x21 0x1>, 57 + <0 0x22 0x1>, 58 + <0 0x23 0x1>, 59 + <0 0x24 0x1>, 60 + <0 0x25 0x1>, 61 + <0 0x26 0x1>, 62 + <0 0x27 0x1>, 63 + <0 0x28 0x1>, 64 + <0 0x29 0x1>, 65 + <0 0x2a 0x1>, 66 + <0 0x2b 0x1>, 67 + <0 0x2c 0x1>, 68 + <0 0x2d 0x1>, 69 + <0 0x2e 0x1>, 70 + <0 0x2f 0x1>, 71 + <0 0xa8 0x0>; 72 + interrupt-names = "msi0", "msi1", "msi2", "msi3", 73 + "msi4", "msi5", "msi6", "msi7", 74 + "msi8", "msi9", "msi10", "msi11", 75 + "msi12", "msi13", "msi14", "msi15", 76 + "legacy"; 77 + qcom,msi_addr = <0x0b006040>; 78 + qcom,msi_base = <0x40>; 79 + qcom,ath10k-calibration-data = [ 01 02 03 ... ]; 59 80 };
+6
drivers/net/wireless/ath/ath10k/Kconfig
··· 15 15 ---help--- 16 16 This module adds support for PCIE bus 17 17 18 + config ATH10K_AHB 19 + bool "Atheros ath10k AHB support" 20 + depends on ATH10K_PCI && OF && RESET_CONTROLLER 21 + ---help--- 22 + This module adds support for AHB bus 23 + 18 24 config ATH10K_DEBUG 19 25 bool "Atheros ath10k debugging" 20 26 depends on ATH10K
+2
drivers/net/wireless/ath/ath10k/Makefile
··· 25 25 ath10k_pci-y += pci.o \ 26 26 ce.o 27 27 28 + ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o 29 + 28 30 # for tracing framework to find trace.h 29 31 CFLAGS_trace.o := -I$(src)
+933
drivers/net/wireless/ath/ath10k/ahb.c
··· 1 + /* 2 + * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved. 3 + * Copyright (c) 2015 The Linux Foundation. All rights reserved. 4 + * 5 + * Permission to use, copy, modify, and/or distribute this software for any 6 + * purpose with or without fee is hereby granted, provided that the above 7 + * copyright notice and this permission notice appear in all copies. 8 + * 9 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 + */ 17 + #include <linux/module.h> 18 + #include <linux/of.h> 19 + #include <linux/of_device.h> 20 + #include <linux/clk.h> 21 + #include <linux/reset.h> 22 + #include "core.h" 23 + #include "debug.h" 24 + #include "pci.h" 25 + #include "ahb.h" 26 + 27 + static const struct of_device_id ath10k_ahb_of_match[] = { 28 + /* TODO: enable this entry once everything in place. 29 + * { .compatible = "qcom,ipq4019-wifi", 30 + * .data = (void *)ATH10K_HW_QCA4019 }, 31 + */ 32 + { } 33 + }; 34 + 35 + MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match); 36 + 37 + static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar) 38 + { 39 + return &((struct ath10k_pci *)ar->drv_priv)->ahb[0]; 40 + } 41 + 42 + static void ath10k_ahb_write32(struct ath10k *ar, u32 offset, u32 value) 43 + { 44 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 45 + 46 + iowrite32(value, ar_ahb->mem + offset); 47 + } 48 + 49 + static u32 ath10k_ahb_read32(struct ath10k *ar, u32 offset) 50 + { 51 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 52 + 53 + return ioread32(ar_ahb->mem + offset); 54 + } 55 + 56 + static u32 ath10k_ahb_gcc_read32(struct ath10k *ar, u32 offset) 57 + { 58 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 59 + 60 + return ioread32(ar_ahb->gcc_mem + offset); 61 + } 62 + 63 + static void ath10k_ahb_tcsr_write32(struct ath10k *ar, u32 offset, u32 value) 64 + { 65 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 66 + 67 + iowrite32(value, ar_ahb->tcsr_mem + offset); 68 + } 69 + 70 + static u32 ath10k_ahb_tcsr_read32(struct ath10k *ar, u32 offset) 71 + { 72 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 73 + 74 + return ioread32(ar_ahb->tcsr_mem + offset); 75 + } 76 + 77 + static u32 ath10k_ahb_soc_read32(struct ath10k *ar, u32 addr) 78 + { 79 + return ath10k_ahb_read32(ar, RTC_SOC_BASE_ADDRESS + addr); 80 + } 81 + 82 + static int ath10k_ahb_get_num_banks(struct ath10k *ar) 83 + { 84 + if (ar->hw_rev == ATH10K_HW_QCA4019) 85 + return 1; 86 + 87 + ath10k_warn(ar, "unknown number of banks, assuming 1\n"); 88 + return 1; 89 + } 90 + 91 + static int ath10k_ahb_clock_init(struct ath10k *ar) 92 + { 93 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 94 + struct device *dev; 95 + int ret; 96 + 97 + dev = &ar_ahb->pdev->dev; 98 + 99 + ar_ahb->cmd_clk = clk_get(dev, "wifi_wcss_cmd"); 100 + if (IS_ERR_OR_NULL(ar_ahb->cmd_clk)) { 101 + ath10k_err(ar, "failed to get cmd clk: %ld\n", 102 + PTR_ERR(ar_ahb->cmd_clk)); 103 + ret = ar_ahb->cmd_clk ? PTR_ERR(ar_ahb->cmd_clk) : -ENODEV; 104 + goto out; 105 + } 106 + 107 + ar_ahb->ref_clk = clk_get(dev, "wifi_wcss_ref"); 108 + if (IS_ERR_OR_NULL(ar_ahb->ref_clk)) { 109 + ath10k_err(ar, "failed to get ref clk: %ld\n", 110 + PTR_ERR(ar_ahb->ref_clk)); 111 + ret = ar_ahb->ref_clk ? PTR_ERR(ar_ahb->ref_clk) : -ENODEV; 112 + goto err_cmd_clk_put; 113 + } 114 + 115 + ar_ahb->rtc_clk = clk_get(dev, "wifi_wcss_rtc"); 116 + if (IS_ERR_OR_NULL(ar_ahb->rtc_clk)) { 117 + ath10k_err(ar, "failed to get rtc clk: %ld\n", 118 + PTR_ERR(ar_ahb->rtc_clk)); 119 + ret = ar_ahb->rtc_clk ? PTR_ERR(ar_ahb->rtc_clk) : -ENODEV; 120 + goto err_ref_clk_put; 121 + } 122 + 123 + return 0; 124 + 125 + err_ref_clk_put: 126 + clk_put(ar_ahb->ref_clk); 127 + 128 + err_cmd_clk_put: 129 + clk_put(ar_ahb->cmd_clk); 130 + 131 + out: 132 + return ret; 133 + } 134 + 135 + static void ath10k_ahb_clock_deinit(struct ath10k *ar) 136 + { 137 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 138 + 139 + if (!IS_ERR_OR_NULL(ar_ahb->cmd_clk)) 140 + clk_put(ar_ahb->cmd_clk); 141 + 142 + if (!IS_ERR_OR_NULL(ar_ahb->ref_clk)) 143 + clk_put(ar_ahb->ref_clk); 144 + 145 + if (!IS_ERR_OR_NULL(ar_ahb->rtc_clk)) 146 + clk_put(ar_ahb->rtc_clk); 147 + 148 + ar_ahb->cmd_clk = NULL; 149 + ar_ahb->ref_clk = NULL; 150 + ar_ahb->rtc_clk = NULL; 151 + } 152 + 153 + static int ath10k_ahb_clock_enable(struct ath10k *ar) 154 + { 155 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 156 + struct device *dev; 157 + int ret; 158 + 159 + dev = &ar_ahb->pdev->dev; 160 + 161 + if (IS_ERR_OR_NULL(ar_ahb->cmd_clk) || 162 + IS_ERR_OR_NULL(ar_ahb->ref_clk) || 163 + IS_ERR_OR_NULL(ar_ahb->rtc_clk)) { 164 + ath10k_err(ar, "clock(s) is/are not initialized\n"); 165 + ret = -EIO; 166 + goto out; 167 + } 168 + 169 + ret = clk_prepare_enable(ar_ahb->cmd_clk); 170 + if (ret) { 171 + ath10k_err(ar, "failed to enable cmd clk: %d\n", ret); 172 + goto out; 173 + } 174 + 175 + ret = clk_prepare_enable(ar_ahb->ref_clk); 176 + if (ret) { 177 + ath10k_err(ar, "failed to enable ref clk: %d\n", ret); 178 + goto err_cmd_clk_disable; 179 + } 180 + 181 + ret = clk_prepare_enable(ar_ahb->rtc_clk); 182 + if (ret) { 183 + ath10k_err(ar, "failed to enable rtc clk: %d\n", ret); 184 + goto err_ref_clk_disable; 185 + } 186 + 187 + return 0; 188 + 189 + err_ref_clk_disable: 190 + clk_disable_unprepare(ar_ahb->ref_clk); 191 + 192 + err_cmd_clk_disable: 193 + clk_disable_unprepare(ar_ahb->cmd_clk); 194 + 195 + out: 196 + return ret; 197 + } 198 + 199 + static void ath10k_ahb_clock_disable(struct ath10k *ar) 200 + { 201 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 202 + 203 + if (!IS_ERR_OR_NULL(ar_ahb->cmd_clk)) 204 + clk_disable_unprepare(ar_ahb->cmd_clk); 205 + 206 + if (!IS_ERR_OR_NULL(ar_ahb->ref_clk)) 207 + clk_disable_unprepare(ar_ahb->ref_clk); 208 + 209 + if (!IS_ERR_OR_NULL(ar_ahb->rtc_clk)) 210 + clk_disable_unprepare(ar_ahb->rtc_clk); 211 + } 212 + 213 + static int ath10k_ahb_rst_ctrl_init(struct ath10k *ar) 214 + { 215 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 216 + struct device *dev; 217 + int ret; 218 + 219 + dev = &ar_ahb->pdev->dev; 220 + 221 + ar_ahb->core_cold_rst = reset_control_get(dev, "wifi_core_cold"); 222 + if (IS_ERR_OR_NULL(ar_ahb->core_cold_rst)) { 223 + ath10k_err(ar, "failed to get core cold rst ctrl: %ld\n", 224 + PTR_ERR(ar_ahb->core_cold_rst)); 225 + ret = ar_ahb->core_cold_rst ? 226 + PTR_ERR(ar_ahb->core_cold_rst) : -ENODEV; 227 + goto out; 228 + } 229 + 230 + ar_ahb->radio_cold_rst = reset_control_get(dev, "wifi_radio_cold"); 231 + if (IS_ERR_OR_NULL(ar_ahb->radio_cold_rst)) { 232 + ath10k_err(ar, "failed to get radio cold rst ctrl: %ld\n", 233 + PTR_ERR(ar_ahb->radio_cold_rst)); 234 + ret = ar_ahb->radio_cold_rst ? 235 + PTR_ERR(ar_ahb->radio_cold_rst) : -ENODEV; 236 + goto err_core_cold_rst_put; 237 + } 238 + 239 + ar_ahb->radio_warm_rst = reset_control_get(dev, "wifi_radio_warm"); 240 + if (IS_ERR_OR_NULL(ar_ahb->radio_warm_rst)) { 241 + ath10k_err(ar, "failed to get radio warm rst ctrl: %ld\n", 242 + PTR_ERR(ar_ahb->radio_warm_rst)); 243 + ret = ar_ahb->radio_warm_rst ? 244 + PTR_ERR(ar_ahb->radio_warm_rst) : -ENODEV; 245 + goto err_radio_cold_rst_put; 246 + } 247 + 248 + ar_ahb->radio_srif_rst = reset_control_get(dev, "wifi_radio_srif"); 249 + if (IS_ERR_OR_NULL(ar_ahb->radio_srif_rst)) { 250 + ath10k_err(ar, "failed to get radio srif rst ctrl: %ld\n", 251 + PTR_ERR(ar_ahb->radio_srif_rst)); 252 + ret = ar_ahb->radio_srif_rst ? 253 + PTR_ERR(ar_ahb->radio_srif_rst) : -ENODEV; 254 + goto err_radio_warm_rst_put; 255 + } 256 + 257 + ar_ahb->cpu_init_rst = reset_control_get(dev, "wifi_cpu_init"); 258 + if (IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) { 259 + ath10k_err(ar, "failed to get cpu init rst ctrl: %ld\n", 260 + PTR_ERR(ar_ahb->cpu_init_rst)); 261 + ret = ar_ahb->cpu_init_rst ? 262 + PTR_ERR(ar_ahb->cpu_init_rst) : -ENODEV; 263 + goto err_radio_srif_rst_put; 264 + } 265 + 266 + return 0; 267 + 268 + err_radio_srif_rst_put: 269 + reset_control_put(ar_ahb->radio_srif_rst); 270 + 271 + err_radio_warm_rst_put: 272 + reset_control_put(ar_ahb->radio_warm_rst); 273 + 274 + err_radio_cold_rst_put: 275 + reset_control_put(ar_ahb->radio_cold_rst); 276 + 277 + err_core_cold_rst_put: 278 + reset_control_put(ar_ahb->core_cold_rst); 279 + 280 + out: 281 + return ret; 282 + } 283 + 284 + static void ath10k_ahb_rst_ctrl_deinit(struct ath10k *ar) 285 + { 286 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 287 + 288 + if (!IS_ERR_OR_NULL(ar_ahb->core_cold_rst)) 289 + reset_control_put(ar_ahb->core_cold_rst); 290 + 291 + if (!IS_ERR_OR_NULL(ar_ahb->radio_cold_rst)) 292 + reset_control_put(ar_ahb->radio_cold_rst); 293 + 294 + if (!IS_ERR_OR_NULL(ar_ahb->radio_warm_rst)) 295 + reset_control_put(ar_ahb->radio_warm_rst); 296 + 297 + if (!IS_ERR_OR_NULL(ar_ahb->radio_srif_rst)) 298 + reset_control_put(ar_ahb->radio_srif_rst); 299 + 300 + if (!IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) 301 + reset_control_put(ar_ahb->cpu_init_rst); 302 + 303 + ar_ahb->core_cold_rst = NULL; 304 + ar_ahb->radio_cold_rst = NULL; 305 + ar_ahb->radio_warm_rst = NULL; 306 + ar_ahb->radio_srif_rst = NULL; 307 + ar_ahb->cpu_init_rst = NULL; 308 + } 309 + 310 + static int ath10k_ahb_release_reset(struct ath10k *ar) 311 + { 312 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 313 + int ret; 314 + 315 + if (IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) || 316 + IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) || 317 + IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) || 318 + IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) { 319 + ath10k_err(ar, "rst ctrl(s) is/are not initialized\n"); 320 + return -EINVAL; 321 + } 322 + 323 + ret = reset_control_deassert(ar_ahb->radio_cold_rst); 324 + if (ret) { 325 + ath10k_err(ar, "failed to deassert radio cold rst: %d\n", ret); 326 + return ret; 327 + } 328 + 329 + ret = reset_control_deassert(ar_ahb->radio_warm_rst); 330 + if (ret) { 331 + ath10k_err(ar, "failed to deassert radio warm rst: %d\n", ret); 332 + return ret; 333 + } 334 + 335 + ret = reset_control_deassert(ar_ahb->radio_srif_rst); 336 + if (ret) { 337 + ath10k_err(ar, "failed to deassert radio srif rst: %d\n", ret); 338 + return ret; 339 + } 340 + 341 + ret = reset_control_deassert(ar_ahb->cpu_init_rst); 342 + if (ret) { 343 + ath10k_err(ar, "failed to deassert cpu init rst: %d\n", ret); 344 + return ret; 345 + } 346 + 347 + return 0; 348 + } 349 + 350 + static void ath10k_ahb_halt_axi_bus(struct ath10k *ar, u32 haltreq_reg, 351 + u32 haltack_reg) 352 + { 353 + unsigned long timeout; 354 + u32 val; 355 + 356 + /* Issue halt axi bus request */ 357 + val = ath10k_ahb_tcsr_read32(ar, haltreq_reg); 358 + val |= AHB_AXI_BUS_HALT_REQ; 359 + ath10k_ahb_tcsr_write32(ar, haltreq_reg, val); 360 + 361 + /* Wait for axi bus halted ack */ 362 + timeout = jiffies + msecs_to_jiffies(ATH10K_AHB_AXI_BUS_HALT_TIMEOUT); 363 + do { 364 + val = ath10k_ahb_tcsr_read32(ar, haltack_reg); 365 + if (val & AHB_AXI_BUS_HALT_ACK) 366 + break; 367 + 368 + mdelay(1); 369 + } while (time_before(jiffies, timeout)); 370 + 371 + if (!(val & AHB_AXI_BUS_HALT_ACK)) { 372 + ath10k_err(ar, "failed to halt axi bus: %d\n", val); 373 + return; 374 + } 375 + 376 + ath10k_dbg(ar, ATH10K_DBG_AHB, "axi bus halted\n"); 377 + } 378 + 379 + static void ath10k_ahb_halt_chip(struct ath10k *ar) 380 + { 381 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 382 + u32 core_id, glb_cfg_reg, haltreq_reg, haltack_reg; 383 + u32 val; 384 + int ret; 385 + 386 + if (IS_ERR_OR_NULL(ar_ahb->core_cold_rst) || 387 + IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) || 388 + IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) || 389 + IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) || 390 + IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) { 391 + ath10k_err(ar, "rst ctrl(s) is/are not initialized\n"); 392 + return; 393 + } 394 + 395 + core_id = ath10k_ahb_read32(ar, ATH10K_AHB_WLAN_CORE_ID_REG); 396 + 397 + switch (core_id) { 398 + case 0: 399 + glb_cfg_reg = ATH10K_AHB_TCSR_WIFI0_GLB_CFG; 400 + haltreq_reg = ATH10K_AHB_TCSR_WCSS0_HALTREQ; 401 + haltack_reg = ATH10K_AHB_TCSR_WCSS0_HALTACK; 402 + break; 403 + case 1: 404 + glb_cfg_reg = ATH10K_AHB_TCSR_WIFI1_GLB_CFG; 405 + haltreq_reg = ATH10K_AHB_TCSR_WCSS1_HALTREQ; 406 + haltack_reg = ATH10K_AHB_TCSR_WCSS1_HALTACK; 407 + break; 408 + default: 409 + ath10k_err(ar, "invalid core id %d found, skipping reset sequence\n", 410 + core_id); 411 + return; 412 + } 413 + 414 + ath10k_ahb_halt_axi_bus(ar, haltreq_reg, haltack_reg); 415 + 416 + val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg); 417 + val |= TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK; 418 + ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val); 419 + 420 + ret = reset_control_assert(ar_ahb->core_cold_rst); 421 + if (ret) 422 + ath10k_err(ar, "failed to assert core cold rst: %d\n", ret); 423 + msleep(1); 424 + 425 + ret = reset_control_assert(ar_ahb->radio_cold_rst); 426 + if (ret) 427 + ath10k_err(ar, "failed to assert radio cold rst: %d\n", ret); 428 + msleep(1); 429 + 430 + ret = reset_control_assert(ar_ahb->radio_warm_rst); 431 + if (ret) 432 + ath10k_err(ar, "failed to assert radio warm rst: %d\n", ret); 433 + msleep(1); 434 + 435 + ret = reset_control_assert(ar_ahb->radio_srif_rst); 436 + if (ret) 437 + ath10k_err(ar, "failed to assert radio srif rst: %d\n", ret); 438 + msleep(1); 439 + 440 + ret = reset_control_assert(ar_ahb->cpu_init_rst); 441 + if (ret) 442 + ath10k_err(ar, "failed to assert cpu init rst: %d\n", ret); 443 + msleep(10); 444 + 445 + /* Clear halt req and core clock disable req before 446 + * deasserting wifi core reset. 447 + */ 448 + val = ath10k_ahb_tcsr_read32(ar, haltreq_reg); 449 + val &= ~AHB_AXI_BUS_HALT_REQ; 450 + ath10k_ahb_tcsr_write32(ar, haltreq_reg, val); 451 + 452 + val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg); 453 + val &= ~TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK; 454 + ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val); 455 + 456 + ret = reset_control_deassert(ar_ahb->core_cold_rst); 457 + if (ret) 458 + ath10k_err(ar, "failed to deassert core cold rst: %d\n", ret); 459 + 460 + ath10k_dbg(ar, ATH10K_DBG_AHB, "core %d reset done\n", core_id); 461 + } 462 + 463 + static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg) 464 + { 465 + struct ath10k *ar = arg; 466 + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 467 + 468 + if (!ath10k_pci_irq_pending(ar)) 469 + return IRQ_NONE; 470 + 471 + ath10k_pci_disable_and_clear_legacy_irq(ar); 472 + tasklet_schedule(&ar_pci->intr_tq); 473 + 474 + return IRQ_HANDLED; 475 + } 476 + 477 + static int ath10k_ahb_request_irq_legacy(struct ath10k *ar) 478 + { 479 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 480 + int ret; 481 + 482 + ret = request_irq(ar_ahb->irq, 483 + ath10k_ahb_interrupt_handler, 484 + IRQF_SHARED, "ath10k_ahb", ar); 485 + if (ret) { 486 + ath10k_warn(ar, "failed to request legacy irq %d: %d\n", 487 + ar_ahb->irq, ret); 488 + return ret; 489 + } 490 + 491 + return 0; 492 + } 493 + 494 + static void ath10k_ahb_release_irq_legacy(struct ath10k *ar) 495 + { 496 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 497 + 498 + free_irq(ar_ahb->irq, ar); 499 + } 500 + 501 + static void ath10k_ahb_irq_disable(struct ath10k *ar) 502 + { 503 + ath10k_ce_disable_interrupts(ar); 504 + ath10k_pci_disable_and_clear_legacy_irq(ar); 505 + } 506 + 507 + static int ath10k_ahb_resource_init(struct ath10k *ar) 508 + { 509 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 510 + struct platform_device *pdev; 511 + struct device *dev; 512 + struct resource *res; 513 + int ret; 514 + 515 + pdev = ar_ahb->pdev; 516 + dev = &pdev->dev; 517 + 518 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 519 + if (!res) { 520 + ath10k_err(ar, "failed to get memory resource\n"); 521 + ret = -ENXIO; 522 + goto out; 523 + } 524 + 525 + ar_ahb->mem = devm_ioremap_resource(&pdev->dev, res); 526 + if (IS_ERR(ar_ahb->mem)) { 527 + ath10k_err(ar, "mem ioremap error\n"); 528 + ret = PTR_ERR(ar_ahb->mem); 529 + goto out; 530 + } 531 + 532 + ar_ahb->mem_len = resource_size(res); 533 + 534 + ar_ahb->gcc_mem = ioremap_nocache(ATH10K_GCC_REG_BASE, 535 + ATH10K_GCC_REG_SIZE); 536 + if (!ar_ahb->gcc_mem) { 537 + ath10k_err(ar, "gcc mem ioremap error\n"); 538 + ret = -ENOMEM; 539 + goto err_mem_unmap; 540 + } 541 + 542 + ar_ahb->tcsr_mem = ioremap_nocache(ATH10K_TCSR_REG_BASE, 543 + ATH10K_TCSR_REG_SIZE); 544 + if (!ar_ahb->tcsr_mem) { 545 + ath10k_err(ar, "tcsr mem ioremap error\n"); 546 + ret = -ENOMEM; 547 + goto err_gcc_mem_unmap; 548 + } 549 + 550 + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 551 + if (ret) { 552 + ath10k_err(ar, "failed to set 32-bit dma mask: %d\n", ret); 553 + goto err_tcsr_mem_unmap; 554 + } 555 + 556 + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 557 + if (ret) { 558 + ath10k_err(ar, "failed to set 32-bit consistent dma: %d\n", 559 + ret); 560 + goto err_tcsr_mem_unmap; 561 + } 562 + 563 + ret = ath10k_ahb_clock_init(ar); 564 + if (ret) 565 + goto err_tcsr_mem_unmap; 566 + 567 + ret = ath10k_ahb_rst_ctrl_init(ar); 568 + if (ret) 569 + goto err_clock_deinit; 570 + 571 + ar_ahb->irq = platform_get_irq_byname(pdev, "legacy"); 572 + if (ar_ahb->irq < 0) { 573 + ath10k_err(ar, "failed to get irq number: %d\n", ar_ahb->irq); 574 + goto err_clock_deinit; 575 + } 576 + 577 + ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq); 578 + 579 + ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%p mem_len: %lu gcc mem: 0x%p tcsr_mem: 0x%p\n", 580 + ar_ahb->mem, ar_ahb->mem_len, 581 + ar_ahb->gcc_mem, ar_ahb->tcsr_mem); 582 + return 0; 583 + 584 + err_clock_deinit: 585 + ath10k_ahb_clock_deinit(ar); 586 + 587 + err_tcsr_mem_unmap: 588 + iounmap(ar_ahb->tcsr_mem); 589 + 590 + err_gcc_mem_unmap: 591 + ar_ahb->tcsr_mem = NULL; 592 + iounmap(ar_ahb->gcc_mem); 593 + 594 + err_mem_unmap: 595 + ar_ahb->gcc_mem = NULL; 596 + devm_iounmap(&pdev->dev, ar_ahb->mem); 597 + 598 + out: 599 + ar_ahb->mem = NULL; 600 + return ret; 601 + } 602 + 603 + static void ath10k_ahb_resource_deinit(struct ath10k *ar) 604 + { 605 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 606 + struct device *dev; 607 + 608 + dev = &ar_ahb->pdev->dev; 609 + 610 + if (ar_ahb->mem) 611 + devm_iounmap(dev, ar_ahb->mem); 612 + 613 + if (ar_ahb->gcc_mem) 614 + iounmap(ar_ahb->gcc_mem); 615 + 616 + if (ar_ahb->tcsr_mem) 617 + iounmap(ar_ahb->tcsr_mem); 618 + 619 + ar_ahb->mem = NULL; 620 + ar_ahb->gcc_mem = NULL; 621 + ar_ahb->tcsr_mem = NULL; 622 + 623 + ath10k_ahb_clock_deinit(ar); 624 + ath10k_ahb_rst_ctrl_deinit(ar); 625 + } 626 + 627 + static int ath10k_ahb_prepare_device(struct ath10k *ar) 628 + { 629 + u32 val; 630 + int ret; 631 + 632 + ret = ath10k_ahb_clock_enable(ar); 633 + if (ret) { 634 + ath10k_err(ar, "failed to enable clocks\n"); 635 + return ret; 636 + } 637 + 638 + /* Clock for the target is supplied from outside of target (ie, 639 + * external clock module controlled by the host). Target needs 640 + * to know what frequency target cpu is configured which is needed 641 + * for target internal use. Read target cpu frequency info from 642 + * gcc register and write into target's scratch register where 643 + * target expects this information. 644 + */ 645 + val = ath10k_ahb_gcc_read32(ar, ATH10K_AHB_GCC_FEPLL_PLL_DIV); 646 + ath10k_ahb_write32(ar, ATH10K_AHB_WIFI_SCRATCH_5_REG, val); 647 + 648 + ret = ath10k_ahb_release_reset(ar); 649 + if (ret) 650 + goto err_clk_disable; 651 + 652 + ath10k_ahb_irq_disable(ar); 653 + 654 + ath10k_ahb_write32(ar, FW_INDICATOR_ADDRESS, FW_IND_HOST_READY); 655 + 656 + ret = ath10k_pci_wait_for_target_init(ar); 657 + if (ret) 658 + goto err_halt_chip; 659 + 660 + return 0; 661 + 662 + err_halt_chip: 663 + ath10k_ahb_halt_chip(ar); 664 + 665 + err_clk_disable: 666 + ath10k_ahb_clock_disable(ar); 667 + 668 + return ret; 669 + } 670 + 671 + static int ath10k_ahb_chip_reset(struct ath10k *ar) 672 + { 673 + int ret; 674 + 675 + ath10k_ahb_halt_chip(ar); 676 + ath10k_ahb_clock_disable(ar); 677 + 678 + ret = ath10k_ahb_prepare_device(ar); 679 + if (ret) 680 + return ret; 681 + 682 + return 0; 683 + } 684 + 685 + static int ath10k_ahb_wake_target_cpu(struct ath10k *ar) 686 + { 687 + u32 addr, val; 688 + 689 + addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS; 690 + val = ath10k_ahb_read32(ar, addr); 691 + val |= ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK; 692 + ath10k_ahb_write32(ar, addr, val); 693 + 694 + return 0; 695 + } 696 + 697 + static int ath10k_ahb_hif_start(struct ath10k *ar) 698 + { 699 + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif start\n"); 700 + 701 + ath10k_ce_enable_interrupts(ar); 702 + ath10k_pci_enable_legacy_irq(ar); 703 + 704 + ath10k_pci_rx_post(ar); 705 + 706 + return 0; 707 + } 708 + 709 + static void ath10k_ahb_hif_stop(struct ath10k *ar) 710 + { 711 + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); 712 + 713 + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif stop\n"); 714 + 715 + ath10k_ahb_irq_disable(ar); 716 + synchronize_irq(ar_ahb->irq); 717 + 718 + ath10k_pci_flush(ar); 719 + } 720 + 721 + static int ath10k_ahb_hif_power_up(struct ath10k *ar) 722 + { 723 + int ret; 724 + 725 + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif power up\n"); 726 + 727 + ret = ath10k_ahb_chip_reset(ar); 728 + if (ret) { 729 + ath10k_err(ar, "failed to reset chip: %d\n", ret); 730 + goto out; 731 + } 732 + 733 + ret = ath10k_pci_init_pipes(ar); 734 + if (ret) { 735 + ath10k_err(ar, "failed to initialize CE: %d\n", ret); 736 + goto out; 737 + } 738 + 739 + ret = ath10k_pci_init_config(ar); 740 + if (ret) { 741 + ath10k_err(ar, "failed to setup init config: %d\n", ret); 742 + goto err_ce_deinit; 743 + } 744 + 745 + ret = ath10k_ahb_wake_target_cpu(ar); 746 + if (ret) { 747 + ath10k_err(ar, "could not wake up target CPU: %d\n", ret); 748 + goto err_ce_deinit; 749 + } 750 + 751 + return 0; 752 + 753 + err_ce_deinit: 754 + ath10k_pci_ce_deinit(ar); 755 + out: 756 + return ret; 757 + } 758 + 759 + static const struct ath10k_hif_ops ath10k_ahb_hif_ops = { 760 + .tx_sg = ath10k_pci_hif_tx_sg, 761 + .diag_read = ath10k_pci_hif_diag_read, 762 + .diag_write = ath10k_pci_diag_write_mem, 763 + .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, 764 + .start = ath10k_ahb_hif_start, 765 + .stop = ath10k_ahb_hif_stop, 766 + .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, 767 + .get_default_pipe = ath10k_pci_hif_get_default_pipe, 768 + .send_complete_check = ath10k_pci_hif_send_complete_check, 769 + .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, 770 + .power_up = ath10k_ahb_hif_power_up, 771 + .power_down = ath10k_pci_hif_power_down, 772 + .read32 = ath10k_ahb_read32, 773 + .write32 = ath10k_ahb_write32, 774 + }; 775 + 776 + static const struct ath10k_bus_ops ath10k_ahb_bus_ops = { 777 + .read32 = ath10k_ahb_read32, 778 + .write32 = ath10k_ahb_write32, 779 + .get_num_banks = ath10k_ahb_get_num_banks, 780 + }; 781 + 782 + static int ath10k_ahb_probe(struct platform_device *pdev) 783 + { 784 + struct ath10k *ar; 785 + struct ath10k_ahb *ar_ahb; 786 + struct ath10k_pci *ar_pci; 787 + const struct of_device_id *of_id; 788 + enum ath10k_hw_rev hw_rev; 789 + size_t size; 790 + int ret; 791 + u32 chip_id; 792 + 793 + of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev); 794 + if (!of_id) { 795 + dev_err(&pdev->dev, "failed to find matching device tree id\n"); 796 + return -EINVAL; 797 + } 798 + 799 + hw_rev = (enum ath10k_hw_rev)of_id->data; 800 + 801 + size = sizeof(*ar_pci) + sizeof(*ar_ahb); 802 + ar = ath10k_core_create(size, &pdev->dev, ATH10K_BUS_AHB, 803 + hw_rev, &ath10k_ahb_hif_ops); 804 + if (!ar) { 805 + dev_err(&pdev->dev, "failed to allocate core\n"); 806 + return -ENOMEM; 807 + } 808 + 809 + ath10k_dbg(ar, ATH10K_DBG_BOOT, "ahb probe\n"); 810 + 811 + ar_pci = ath10k_pci_priv(ar); 812 + ar_ahb = ath10k_ahb_priv(ar); 813 + 814 + ar_ahb->pdev = pdev; 815 + platform_set_drvdata(pdev, ar); 816 + 817 + ret = ath10k_ahb_resource_init(ar); 818 + if (ret) 819 + goto err_core_destroy; 820 + 821 + ar->dev_id = 0; 822 + ar_pci->mem = ar_ahb->mem; 823 + ar_pci->mem_len = ar_ahb->mem_len; 824 + ar_pci->ar = ar; 825 + ar_pci->bus_ops = &ath10k_ahb_bus_ops; 826 + 827 + ret = ath10k_pci_setup_resource(ar); 828 + if (ret) { 829 + ath10k_err(ar, "failed to setup resource: %d\n", ret); 830 + goto err_resource_deinit; 831 + } 832 + 833 + ath10k_pci_init_irq_tasklets(ar); 834 + 835 + ret = ath10k_ahb_request_irq_legacy(ar); 836 + if (ret) 837 + goto err_free_pipes; 838 + 839 + ret = ath10k_ahb_prepare_device(ar); 840 + if (ret) 841 + goto err_free_irq; 842 + 843 + ath10k_pci_ce_deinit(ar); 844 + 845 + chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS); 846 + if (chip_id == 0xffffffff) { 847 + ath10k_err(ar, "failed to get chip id\n"); 848 + goto err_halt_device; 849 + } 850 + 851 + ret = ath10k_core_register(ar, chip_id); 852 + if (ret) { 853 + ath10k_err(ar, "failed to register driver core: %d\n", ret); 854 + goto err_halt_device; 855 + } 856 + 857 + return 0; 858 + 859 + err_halt_device: 860 + ath10k_ahb_halt_chip(ar); 861 + ath10k_ahb_clock_disable(ar); 862 + 863 + err_free_irq: 864 + ath10k_ahb_release_irq_legacy(ar); 865 + 866 + err_free_pipes: 867 + ath10k_pci_free_pipes(ar); 868 + 869 + err_resource_deinit: 870 + ath10k_ahb_resource_deinit(ar); 871 + 872 + err_core_destroy: 873 + ath10k_core_destroy(ar); 874 + platform_set_drvdata(pdev, NULL); 875 + 876 + return ret; 877 + } 878 + 879 + static int ath10k_ahb_remove(struct platform_device *pdev) 880 + { 881 + struct ath10k *ar = platform_get_drvdata(pdev); 882 + struct ath10k_ahb *ar_ahb; 883 + 884 + if (!ar) 885 + return -EINVAL; 886 + 887 + ar_ahb = ath10k_ahb_priv(ar); 888 + 889 + if (!ar_ahb) 890 + return -EINVAL; 891 + 892 + ath10k_dbg(ar, ATH10K_DBG_AHB, "ahb remove\n"); 893 + 894 + ath10k_core_unregister(ar); 895 + ath10k_ahb_irq_disable(ar); 896 + ath10k_ahb_release_irq_legacy(ar); 897 + ath10k_pci_release_resource(ar); 898 + ath10k_ahb_halt_chip(ar); 899 + ath10k_ahb_clock_disable(ar); 900 + ath10k_ahb_resource_deinit(ar); 901 + ath10k_core_destroy(ar); 902 + 903 + platform_set_drvdata(pdev, NULL); 904 + 905 + return 0; 906 + } 907 + 908 + static struct platform_driver ath10k_ahb_driver = { 909 + .driver = { 910 + .name = "ath10k_ahb", 911 + .of_match_table = ath10k_ahb_of_match, 912 + }, 913 + .probe = ath10k_ahb_probe, 914 + .remove = ath10k_ahb_remove, 915 + }; 916 + 917 + int ath10k_ahb_init(void) 918 + { 919 + int ret; 920 + 921 + printk(KERN_ERR "AHB support is still work in progress\n"); 922 + 923 + ret = platform_driver_register(&ath10k_ahb_driver); 924 + if (ret) 925 + printk(KERN_ERR "failed to register ath10k ahb driver: %d\n", 926 + ret); 927 + return ret; 928 + } 929 + 930 + void ath10k_ahb_exit(void) 931 + { 932 + platform_driver_unregister(&ath10k_ahb_driver); 933 + }
+87
drivers/net/wireless/ath/ath10k/ahb.h
··· 1 + /* 2 + * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved. 3 + * Copyright (c) 2015 The Linux Foundation. All rights reserved. 4 + * 5 + * Permission to use, copy, modify, and/or distribute this software for any 6 + * purpose with or without fee is hereby granted, provided that the above 7 + * copyright notice and this permission notice appear in all copies. 8 + * 9 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 + */ 17 + 18 + #ifndef _AHB_H_ 19 + #define _AHB_H_ 20 + 21 + #include <linux/platform_device.h> 22 + 23 + struct ath10k_ahb { 24 + struct platform_device *pdev; 25 + void __iomem *mem; 26 + unsigned long mem_len; 27 + void __iomem *gcc_mem; 28 + void __iomem *tcsr_mem; 29 + 30 + int irq; 31 + 32 + struct clk *cmd_clk; 33 + struct clk *ref_clk; 34 + struct clk *rtc_clk; 35 + 36 + struct reset_control *core_cold_rst; 37 + struct reset_control *radio_cold_rst; 38 + struct reset_control *radio_warm_rst; 39 + struct reset_control *radio_srif_rst; 40 + struct reset_control *cpu_init_rst; 41 + }; 42 + 43 + #ifdef CONFIG_ATH10K_AHB 44 + 45 + #define ATH10K_GCC_REG_BASE 0x1800000 46 + #define ATH10K_GCC_REG_SIZE 0x60000 47 + 48 + #define ATH10K_TCSR_REG_BASE 0x1900000 49 + #define ATH10K_TCSR_REG_SIZE 0x80000 50 + 51 + #define ATH10K_AHB_GCC_FEPLL_PLL_DIV 0x2f020 52 + #define ATH10K_AHB_WIFI_SCRATCH_5_REG 0x4f014 53 + 54 + #define ATH10K_AHB_WLAN_CORE_ID_REG 0x82030 55 + 56 + #define ATH10K_AHB_TCSR_WIFI0_GLB_CFG 0x49000 57 + #define ATH10K_AHB_TCSR_WIFI1_GLB_CFG 0x49004 58 + #define TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK BIT(25) 59 + 60 + #define ATH10K_AHB_TCSR_WCSS0_HALTREQ 0x52000 61 + #define ATH10K_AHB_TCSR_WCSS1_HALTREQ 0x52010 62 + #define ATH10K_AHB_TCSR_WCSS0_HALTACK 0x52004 63 + #define ATH10K_AHB_TCSR_WCSS1_HALTACK 0x52014 64 + 65 + #define ATH10K_AHB_AXI_BUS_HALT_TIMEOUT 10 /* msec */ 66 + #define AHB_AXI_BUS_HALT_REQ 1 67 + #define AHB_AXI_BUS_HALT_ACK 1 68 + 69 + #define ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK 1 70 + 71 + int ath10k_ahb_init(void); 72 + void ath10k_ahb_exit(void); 73 + 74 + #else /* CONFIG_ATH10K_AHB */ 75 + 76 + static inline int ath10k_ahb_init(void) 77 + { 78 + return 0; 79 + } 80 + 81 + static inline void ath10k_ahb_exit(void) 82 + { 83 + } 84 + 85 + #endif /* CONFIG_ATH10K_AHB */ 86 + 87 + #endif /* _AHB_H_ */
+44 -4
drivers/net/wireless/ath/ath10k/core.c
··· 156 156 .channel_counters_freq_hz = 150000, 157 157 .max_probe_resp_desc_thres = 24, 158 158 .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, 159 + .num_msdu_desc = 1424, 160 + .qcache_active_peers = 50, 161 + .tx_chain_mask = 0xf, 162 + .rx_chain_mask = 0xf, 163 + .max_spatial_stream = 4, 159 164 .fw = { 160 165 .dir = QCA99X0_HW_2_0_FW_DIR, 161 166 .fw = QCA99X0_HW_2_0_FW_FILE, ··· 206 201 .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, 207 202 }, 208 203 }, 204 + { 205 + .id = QCA4019_HW_1_0_DEV_VERSION, 206 + .dev_id = 0, 207 + .name = "qca4019 hw1.0", 208 + .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR, 209 + .uart_pin = 7, 210 + .otp_exe_param = 0x0010000, 211 + .continuous_frag_desc = true, 212 + .channel_counters_freq_hz = 125000, 213 + .max_probe_resp_desc_thres = 24, 214 + .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, 215 + .num_msdu_desc = 2500, 216 + .qcache_active_peers = 35, 217 + .tx_chain_mask = 0x3, 218 + .rx_chain_mask = 0x3, 219 + .max_spatial_stream = 2, 220 + .fw = { 221 + .dir = QCA4019_HW_1_0_FW_DIR, 222 + .fw = QCA4019_HW_1_0_FW_FILE, 223 + .otp = QCA4019_HW_1_0_OTP_FILE, 224 + .board = QCA4019_HW_1_0_BOARD_DATA_FILE, 225 + .board_size = QCA4019_BOARD_DATA_SZ, 226 + .board_ext_size = QCA4019_BOARD_EXT_DATA_SZ, 227 + }, 228 + }, 209 229 }; 210 230 211 231 static const char *const ath10k_core_fw_feature_str[] = { ··· 247 217 [ATH10K_FW_FEATURE_RAW_MODE_SUPPORT] = "raw-mode", 248 218 [ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca", 249 219 [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp", 220 + [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl", 250 221 }; 251 222 252 223 static unsigned int ath10k_core_get_fw_feature_str(char *buf, ··· 1509 1478 case ATH10K_FW_WMI_OP_VERSION_10_1: 1510 1479 case ATH10K_FW_WMI_OP_VERSION_10_2: 1511 1480 case ATH10K_FW_WMI_OP_VERSION_10_2_4: 1512 - ar->max_num_peers = TARGET_10X_NUM_PEERS; 1513 - ar->max_num_stations = TARGET_10X_NUM_STATIONS; 1481 + if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { 1482 + ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS; 1483 + ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS; 1484 + } else { 1485 + ar->max_num_peers = TARGET_10X_NUM_PEERS; 1486 + ar->max_num_stations = TARGET_10X_NUM_STATIONS; 1487 + } 1514 1488 ar->max_num_vdevs = TARGET_10X_NUM_VDEVS; 1515 1489 ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; 1516 1490 ar->fw_stats_req_mask = WMI_STAT_PEER; ··· 1538 1502 ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS; 1539 1503 ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS; 1540 1504 ar->num_tids = TARGET_10_4_TGT_NUM_TIDS; 1541 - ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC; 1505 + ar->htt.max_num_pending_tx = ar->hw_params.num_msdu_desc; 1542 1506 ar->fw_stats_req_mask = WMI_STAT_PEER; 1543 - ar->max_spatial_stream = WMI_10_4_MAX_SPATIAL_STREAM; 1507 + ar->max_spatial_stream = ar->hw_params.max_spatial_stream; 1544 1508 break; 1545 1509 case ATH10K_FW_WMI_OP_VERSION_UNSET: 1546 1510 case ATH10K_FW_WMI_OP_VERSION_MAX: ··· 2014 1978 case ATH10K_HW_QCA99X0: 2015 1979 ar->regs = &qca99x0_regs; 2016 1980 ar->hw_values = &qca99x0_values; 1981 + break; 1982 + case ATH10K_HW_QCA4019: 1983 + ar->regs = &qca4019_regs; 1984 + ar->hw_values = &qca4019_values; 2017 1985 break; 2018 1986 default: 2019 1987 ath10k_err(ar, "unsupported core hardware revision %d\n",
+20
drivers/net/wireless/ath/ath10k/core.h
··· 69 69 70 70 enum ath10k_bus { 71 71 ATH10K_BUS_PCI, 72 + ATH10K_BUS_AHB, 72 73 }; 73 74 74 75 static inline const char *ath10k_bus_str(enum ath10k_bus bus) ··· 77 76 switch (bus) { 78 77 case ATH10K_BUS_PCI: 79 78 return "pci"; 79 + case ATH10K_BUS_AHB: 80 + return "ahb"; 80 81 } 81 82 82 83 return "unknown"; ··· 162 159 u32 peer_rssi; 163 160 u32 peer_tx_rate; 164 161 u32 peer_rx_rate; /* 10x only */ 162 + u32 rx_duration; 165 163 }; 166 164 167 165 struct ath10k_fw_stats_vdev { ··· 319 315 #ifdef CONFIG_MAC80211_DEBUGFS 320 316 /* protected by conf_mutex */ 321 317 bool aggr_mode; 318 + u64 rx_duration; 322 319 #endif 323 320 }; 324 321 ··· 515 510 /* Firmware supports management frame protection */ 516 511 ATH10K_FW_FEATURE_MFP_SUPPORT = 12, 517 512 513 + /* Firmware supports pull-push model where host shares it's software 514 + * queue state with firmware and firmware generates fetch requests 515 + * telling host which queues to dequeue tx from. 516 + * 517 + * Primary function of this is improved MU-MIMO performance with 518 + * multiple clients. 519 + */ 520 + ATH10K_FW_FEATURE_PEER_FLOW_CONTROL = 13, 521 + 518 522 /* keep last */ 519 523 ATH10K_FW_FEATURE_COUNT, 520 524 }; ··· 679 665 680 666 /* The padding bytes's location is different on various chips */ 681 667 enum ath10k_hw_4addr_pad hw_4addr_pad; 668 + 669 + u32 num_msdu_desc; 670 + u32 qcache_active_peers; 671 + u32 tx_chain_mask; 672 + u32 rx_chain_mask; 673 + u32 max_spatial_stream; 682 674 683 675 struct ath10k_hw_params_fw { 684 676 const char *dir;
+28 -18
drivers/net/wireless/ath/ath10k/debug.c
··· 276 276 .llseek = default_llseek, 277 277 }; 278 278 279 - static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head) 279 + static void ath10k_fw_stats_pdevs_free(struct list_head *head) 280 280 { 281 281 struct ath10k_fw_stats_pdev *i, *tmp; 282 282 ··· 286 286 } 287 287 } 288 288 289 - static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head) 289 + static void ath10k_fw_stats_vdevs_free(struct list_head *head) 290 290 { 291 291 struct ath10k_fw_stats_vdev *i, *tmp; 292 292 ··· 296 296 } 297 297 } 298 298 299 - static void ath10k_debug_fw_stats_peers_free(struct list_head *head) 299 + static void ath10k_fw_stats_peers_free(struct list_head *head) 300 300 { 301 301 struct ath10k_fw_stats_peer *i, *tmp; 302 302 ··· 310 310 { 311 311 spin_lock_bh(&ar->data_lock); 312 312 ar->debug.fw_stats_done = false; 313 - ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); 314 - ath10k_debug_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); 315 - ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers); 313 + ath10k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); 314 + ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); 315 + ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers); 316 316 spin_unlock_bh(&ar->data_lock); 317 317 } 318 318 319 319 void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) 320 320 { 321 321 struct ath10k_fw_stats stats = {}; 322 - bool is_start, is_started, is_end; 322 + bool is_start, is_started, is_end, peer_stats_svc; 323 323 size_t num_peers; 324 324 size_t num_vdevs; 325 325 int ret; ··· 347 347 * delivered which is treated as end-of-data and is itself discarded 348 348 */ 349 349 350 + peer_stats_svc = test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map); 351 + if (peer_stats_svc) 352 + ath10k_sta_update_rx_duration(ar, &stats.peers); 353 + 350 354 if (ar->debug.fw_stats_done) { 351 - ath10k_warn(ar, "received unsolicited stats update event\n"); 355 + if (!peer_stats_svc) 356 + ath10k_warn(ar, "received unsolicited stats update event\n"); 357 + 352 358 goto free; 353 359 } 354 360 ··· 378 372 /* Although this is unlikely impose a sane limit to 379 373 * prevent firmware from DoS-ing the host. 380 374 */ 375 + ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers); 381 376 ath10k_warn(ar, "dropping fw peer stats\n"); 382 377 goto free; 383 378 } 384 379 385 380 if (num_vdevs >= BITS_PER_LONG) { 381 + ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); 386 382 ath10k_warn(ar, "dropping fw vdev stats\n"); 387 383 goto free; 388 384 } ··· 399 391 /* In some cases lists have been spliced and cleared. Free up 400 392 * resources if that is not the case. 401 393 */ 402 - ath10k_debug_fw_stats_pdevs_free(&stats.pdevs); 403 - ath10k_debug_fw_stats_vdevs_free(&stats.vdevs); 404 - ath10k_debug_fw_stats_peers_free(&stats.peers); 394 + ath10k_fw_stats_pdevs_free(&stats.pdevs); 395 + ath10k_fw_stats_vdevs_free(&stats.vdevs); 396 + ath10k_fw_stats_peers_free(&stats.peers); 405 397 406 398 spin_unlock_bh(&ar->data_lock); 407 399 } ··· 2114 2106 struct ath10k *ar = file->private_data; 2115 2107 char buf[32]; 2116 2108 size_t buf_size; 2109 + int ret = 0; 2117 2110 bool val; 2118 2111 2119 2112 buf_size = min(count, (sizeof(buf) - 1)); ··· 2128 2119 2129 2120 mutex_lock(&ar->conf_mutex); 2130 2121 2122 + if (ar->state != ATH10K_STATE_ON && 2123 + ar->state != ATH10K_STATE_RESTARTED) { 2124 + ret = -ENETDOWN; 2125 + goto exit; 2126 + } 2127 + 2131 2128 if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) 2132 2129 goto exit; 2133 2130 ··· 2142 2127 else 2143 2128 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); 2144 2129 2145 - if (ar->state != ATH10K_STATE_ON) 2146 - goto exit; 2147 - 2148 2130 ath10k_info(ar, "restarting firmware due to btcoex change"); 2149 2131 2150 2132 queue_work(ar->workqueue, &ar->restart_work); 2133 + ret = count; 2151 2134 2152 2135 exit: 2153 2136 mutex_unlock(&ar->conf_mutex); 2154 2137 2155 - return count; 2138 + return ret; 2156 2139 } 2157 2140 2158 2141 static ssize_t ath10k_read_btcoex(struct file *file, char __user *ubuf, ··· 2188 2175 return -ENOMEM; 2189 2176 2190 2177 mutex_lock(&ar->conf_mutex); 2191 - 2192 - if (len > buf_len) 2193 - len = buf_len; 2194 2178 2195 2179 len += scnprintf(buf + len, buf_len - len, 2196 2180 "firmware-N.bin\t\t%08x\n",
+7
drivers/net/wireless/ath/ath10k/debug.h
··· 37 37 ATH10K_DBG_TESTMODE = 0x00001000, 38 38 ATH10K_DBG_WMI_PRINT = 0x00002000, 39 39 ATH10K_DBG_PCI_PS = 0x00004000, 40 + ATH10K_DBG_AHB = 0x00008000, 40 41 ATH10K_DBG_ANY = 0xffffffff, 41 42 }; 42 43 ··· 154 153 #ifdef CONFIG_MAC80211_DEBUGFS 155 154 void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 156 155 struct ieee80211_sta *sta, struct dentry *dir); 156 + void ath10k_sta_update_rx_duration(struct ath10k *ar, struct list_head *peer); 157 + #else 158 + static inline void ath10k_sta_update_rx_duration(struct ath10k *ar, 159 + struct list_head *peer) 160 + { 161 + } 157 162 #endif /* CONFIG_MAC80211_DEBUGFS */ 158 163 159 164 #ifdef CONFIG_ATH10K_DEBUG
+41
drivers/net/wireless/ath/ath10k/debugfs_sta.c
··· 18 18 #include "wmi-ops.h" 19 19 #include "debug.h" 20 20 21 + void ath10k_sta_update_rx_duration(struct ath10k *ar, struct list_head *head) 22 + { struct ieee80211_sta *sta; 23 + struct ath10k_fw_stats_peer *peer; 24 + struct ath10k_sta *arsta; 25 + 26 + rcu_read_lock(); 27 + list_for_each_entry(peer, head, list) { 28 + sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr, 29 + NULL); 30 + if (!sta) 31 + continue; 32 + arsta = (struct ath10k_sta *)sta->drv_priv; 33 + arsta->rx_duration += (u64)peer->rx_duration; 34 + } 35 + rcu_read_unlock(); 36 + } 37 + 21 38 static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file, 22 39 char __user *user_buf, 23 40 size_t count, loff_t *ppos) ··· 249 232 .llseek = default_llseek, 250 233 }; 251 234 235 + static ssize_t ath10k_dbg_sta_read_rx_duration(struct file *file, 236 + char __user *user_buf, 237 + size_t count, loff_t *ppos) 238 + { 239 + struct ieee80211_sta *sta = file->private_data; 240 + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 241 + char buf[100]; 242 + int len = 0; 243 + 244 + len = scnprintf(buf, sizeof(buf), 245 + "%llu usecs\n", arsta->rx_duration); 246 + 247 + return simple_read_from_buffer(user_buf, count, ppos, buf, len); 248 + } 249 + 250 + static const struct file_operations fops_rx_duration = { 251 + .read = ath10k_dbg_sta_read_rx_duration, 252 + .open = simple_open, 253 + .owner = THIS_MODULE, 254 + .llseek = default_llseek, 255 + }; 256 + 252 257 void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 253 258 struct ieee80211_sta *sta, struct dentry *dir) 254 259 { ··· 279 240 debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba); 280 241 debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp); 281 242 debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba); 243 + debugfs_create_file("rx_duration", S_IRUGO, dir, sta, 244 + &fops_rx_duration); 282 245 }
+4 -4
drivers/net/wireless/ath/ath10k/htt.c
··· 131 131 [HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF, 132 132 [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] = 133 133 HTT_T2H_MSG_TYPE_TX_FETCH_IND, 134 - [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF] = 135 - HTT_T2H_MSG_TYPE_TX_FETCH_CONF, 134 + [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM] = 135 + HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM, 136 136 [HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] = 137 137 HTT_T2H_MSG_TYPE_STATS_NOUPLOAD, 138 - [HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND] = 139 - HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND, 138 + [HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND] = 139 + HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND, 140 140 }; 141 141 142 142 int ath10k_htt_connect(struct ath10k_htt *htt)
+162 -7
drivers/net/wireless/ath/ath10k/htt.h
··· 52 52 /* This command is used for sending management frames in HTT < 3.0. 53 53 * HTT >= 3.0 uses TX_FRM for everything. */ 54 54 HTT_H2T_MSG_TYPE_MGMT_TX = 7, 55 + HTT_H2T_MSG_TYPE_TX_FETCH_RESP = 11, 55 56 56 57 HTT_H2T_NUM_MSGS /* keep this last */ 57 58 }; ··· 414 413 HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14, 415 414 HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15, 416 415 HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16, 417 - HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF = 0x17, 416 + HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM = 0x17, 418 417 HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18, 419 418 /* 0x19 to 0x2f are reserved */ 420 - HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND = 0x30, 419 + HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND = 0x30, 421 420 /* keep this last */ 422 421 HTT_10_4_T2H_NUM_MSGS 423 422 }; ··· 450 449 HTT_T2H_MSG_TYPE_TEST, 451 450 HTT_T2H_MSG_TYPE_EN_STATS, 452 451 HTT_T2H_MSG_TYPE_TX_FETCH_IND, 453 - HTT_T2H_MSG_TYPE_TX_FETCH_CONF, 454 - HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND, 452 + HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM, 453 + HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND, 455 454 /* keep this last */ 456 455 HTT_T2H_NUM_MSGS 457 456 }; ··· 1307 1306 * so we use a conservatively safe value for now */ 1308 1307 #define HTT_FRAG_DESC_BANK_MAX 4 1309 1308 1310 - #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03 1311 - #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0 1312 - #define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP (1 << 2) 1309 + #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03 1310 + #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0 1311 + #define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP BIT(2) 1312 + #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID BIT(3) 1313 + #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK BIT(4) 1314 + #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB 4 1315 + 1316 + enum htt_q_depth_type { 1317 + HTT_Q_DEPTH_TYPE_BYTES = 0, 1318 + HTT_Q_DEPTH_TYPE_MSDUS = 1, 1319 + }; 1320 + 1321 + #define HTT_TX_Q_STATE_NUM_PEERS (TARGET_10_4_NUM_QCACHE_PEERS_MAX + \ 1322 + TARGET_10_4_NUM_VDEVS) 1323 + #define HTT_TX_Q_STATE_NUM_TIDS 8 1324 + #define HTT_TX_Q_STATE_ENTRY_SIZE 1 1325 + #define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0 1326 + 1327 + /** 1328 + * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config 1329 + * 1330 + * Defines host q state format and behavior. See htt_q_state. 1331 + * 1332 + * @record_size: Defines the size of each host q entry in bytes. In practice 1333 + * however firmware (at least 10.4.3-00191) ignores this host 1334 + * configuration value and uses hardcoded value of 1. 1335 + * @record_multiplier: This is valid only when q depth type is MSDUs. It 1336 + * defines the exponent for the power of 2 multiplication. 1337 + */ 1338 + struct htt_q_state_conf { 1339 + __le32 paddr; 1340 + __le16 num_peers; 1341 + __le16 num_tids; 1342 + u8 record_size; 1343 + u8 record_multiplier; 1344 + u8 pad[2]; 1345 + } __packed; 1313 1346 1314 1347 struct htt_frag_desc_bank_cfg { 1315 1348 u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */ ··· 1351 1316 u8 desc_size; 1352 1317 __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX]; 1353 1318 struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX]; 1319 + struct htt_q_state_conf q_state; 1320 + } __packed; 1321 + 1322 + #define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128 1323 + #define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f 1324 + #define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0 1325 + #define HTT_TX_Q_STATE_ENTRY_EXP_MASK 0xc0 1326 + #define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6 1327 + 1328 + /** 1329 + * htt_q_state - shared between host and firmware via DMA 1330 + * 1331 + * This structure is used for the host to expose it's software queue state to 1332 + * firmware so that its rate control can schedule fetch requests for optimized 1333 + * performance. This is most notably used for MU-MIMO aggregation when multiple 1334 + * MU clients are connected. 1335 + * 1336 + * @count: Each element defines the host queue depth. When q depth type was 1337 + * configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as: 1338 + * FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and 1339 + * HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as 1340 + * HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 ** 1341 + * record_multiplier (see htt_q_state_conf). 1342 + * @map: Used by firmware to quickly check which host queues are not empty. It 1343 + * is a bitmap simply saying. 1344 + * @seq: Used by firmware to quickly check if the host queues were updated 1345 + * since it last checked. 1346 + * 1347 + * FIXME: Is the q_state map[] size calculation really correct? 1348 + */ 1349 + struct htt_q_state { 1350 + u8 count[HTT_TX_Q_STATE_NUM_TIDS][HTT_TX_Q_STATE_NUM_PEERS]; 1351 + u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32]; 1352 + __le32 seq; 1353 + } __packed; 1354 + 1355 + #define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK 0x0fff 1356 + #define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB 0 1357 + #define HTT_TX_FETCH_RECORD_INFO_TID_MASK 0xf000 1358 + #define HTT_TX_FETCH_RECORD_INFO_TID_LSB 12 1359 + 1360 + struct htt_tx_fetch_record { 1361 + __le16 info; /* HTT_TX_FETCH_IND_RECORD_INFO_ */ 1362 + __le16 num_msdus; 1363 + __le32 num_bytes; 1364 + } __packed; 1365 + 1366 + struct htt_tx_fetch_ind { 1367 + u8 pad0; 1368 + __le16 fetch_seq_num; 1369 + __le32 token; 1370 + __le16 num_resp_ids; 1371 + __le16 num_records; 1372 + struct htt_tx_fetch_record records[0]; 1373 + __le32 resp_ids[0]; /* ath10k_htt_get_tx_fetch_ind_resp_ids() */ 1374 + } __packed; 1375 + 1376 + static inline void * 1377 + ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind *ind) 1378 + { 1379 + return (void *)&ind->records[le16_to_cpu(ind->num_records)]; 1380 + } 1381 + 1382 + struct htt_tx_fetch_resp { 1383 + u8 pad0; 1384 + __le16 resp_id; 1385 + __le16 fetch_seq_num; 1386 + __le16 num_records; 1387 + __le32 token; 1388 + struct htt_tx_fetch_record records[0]; 1389 + } __packed; 1390 + 1391 + struct htt_tx_fetch_confirm { 1392 + u8 pad0; 1393 + __le16 num_resp_ids; 1394 + __le32 resp_ids[0]; 1395 + } __packed; 1396 + 1397 + enum htt_tx_mode_switch_mode { 1398 + HTT_TX_MODE_SWITCH_PUSH = 0, 1399 + HTT_TX_MODE_SWITCH_PUSH_PULL = 1, 1400 + }; 1401 + 1402 + #define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE BIT(0) 1403 + #define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK 0xfffe 1404 + #define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB 1 1405 + 1406 + #define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK 0x0003 1407 + #define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB 0 1408 + #define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK 0xfffc 1409 + #define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB 2 1410 + 1411 + #define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK 0x0fff 1412 + #define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB 0 1413 + #define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK 0xf000 1414 + #define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB 12 1415 + 1416 + struct htt_tx_mode_switch_record { 1417 + __le16 info0; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */ 1418 + __le16 num_max_msdus; 1419 + } __packed; 1420 + 1421 + struct htt_tx_mode_switch_ind { 1422 + u8 pad0; 1423 + __le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */ 1424 + __le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */ 1425 + u8 pad1[2]; 1426 + struct htt_tx_mode_switch_record records[0]; 1354 1427 } __packed; 1355 1428 1356 1429 union htt_rx_pn_t { ··· 1483 1340 struct htt_oob_sync_req oob_sync_req; 1484 1341 struct htt_aggr_conf aggr_conf; 1485 1342 struct htt_frag_desc_bank_cfg frag_desc_bank_cfg; 1343 + struct htt_tx_fetch_resp tx_fetch_resp; 1486 1344 }; 1487 1345 } __packed; 1488 1346 ··· 1508 1364 struct htt_rx_pn_ind rx_pn_ind; 1509 1365 struct htt_rx_offload_ind rx_offload_ind; 1510 1366 struct htt_rx_in_ord_ind rx_in_ord_ind; 1367 + struct htt_tx_fetch_ind tx_fetch_ind; 1368 + struct htt_tx_fetch_confirm tx_fetch_confirm; 1369 + struct htt_tx_mode_switch_ind tx_mode_switch_ind; 1511 1370 }; 1512 1371 } __packed; 1513 1372 ··· 1665 1518 dma_addr_t paddr; 1666 1519 struct ath10k_htt_txbuf *vaddr; 1667 1520 } txbuf; 1521 + 1522 + struct { 1523 + struct htt_q_state *vaddr; 1524 + dma_addr_t paddr; 1525 + u16 num_peers; 1526 + u16 num_tids; 1527 + enum htt_q_depth_type type; 1528 + } tx_q_state; 1668 1529 }; 1669 1530 1670 1531 #define RX_HTT_HDR_STATUS_LEN 64
+35 -19
drivers/net/wireless/ath/ath10k/htt_rx.c
··· 2011 2011 break; 2012 2012 } 2013 2013 case HTT_T2H_MSG_TYPE_RX_IND: 2014 - spin_lock_bh(&htt->rx_ring.lock); 2015 - __skb_queue_tail(&htt->rx_compl_q, skb); 2016 - spin_unlock_bh(&htt->rx_ring.lock); 2014 + skb_queue_tail(&htt->rx_compl_q, skb); 2017 2015 tasklet_schedule(&htt->txrx_compl_task); 2018 2016 return; 2019 2017 case HTT_T2H_MSG_TYPE_PEER_MAP: { ··· 2109 2111 break; 2110 2112 } 2111 2113 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { 2112 - spin_lock_bh(&htt->rx_ring.lock); 2113 - __skb_queue_tail(&htt->rx_in_ord_compl_q, skb); 2114 - spin_unlock_bh(&htt->rx_ring.lock); 2114 + skb_queue_tail(&htt->rx_in_ord_compl_q, skb); 2115 2115 tasklet_schedule(&htt->txrx_compl_task); 2116 2116 return; 2117 2117 } ··· 2119 2123 break; 2120 2124 case HTT_T2H_MSG_TYPE_AGGR_CONF: 2121 2125 break; 2122 - case HTT_T2H_MSG_TYPE_EN_STATS: 2123 2126 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: 2124 - case HTT_T2H_MSG_TYPE_TX_FETCH_CONF: 2125 - case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND: 2127 + case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: 2128 + case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND: 2129 + /* TODO: Implement pull-push logic */ 2130 + break; 2131 + case HTT_T2H_MSG_TYPE_EN_STATS: 2126 2132 default: 2127 2133 ath10k_warn(ar, "htt event (%d) not handled\n", 2128 2134 resp->hdr.msg_type); ··· 2141 2143 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, 2142 2144 struct sk_buff *skb) 2143 2145 { 2144 - struct ath10k_pktlog_10_4_hdr *hdr = 2145 - (struct ath10k_pktlog_10_4_hdr *)skb->data; 2146 - 2147 - trace_ath10k_htt_pktlog(ar, hdr->payload, 2148 - sizeof(*hdr) + __le16_to_cpu(hdr->size)); 2146 + trace_ath10k_htt_pktlog(ar, skb->data, skb->len); 2149 2147 dev_kfree_skb_any(skb); 2150 2148 } 2151 2149 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler); ··· 2150 2156 { 2151 2157 struct ath10k_htt *htt = (struct ath10k_htt *)ptr; 2152 2158 struct ath10k *ar = htt->ar; 2159 + struct sk_buff_head tx_q; 2160 + struct sk_buff_head rx_q; 2161 + struct sk_buff_head rx_ind_q; 2153 2162 struct htt_resp *resp; 2154 2163 struct sk_buff *skb; 2164 + unsigned long flags; 2155 2165 2156 - while ((skb = skb_dequeue(&htt->tx_compl_q))) { 2166 + __skb_queue_head_init(&tx_q); 2167 + __skb_queue_head_init(&rx_q); 2168 + __skb_queue_head_init(&rx_ind_q); 2169 + 2170 + spin_lock_irqsave(&htt->tx_compl_q.lock, flags); 2171 + skb_queue_splice_init(&htt->tx_compl_q, &tx_q); 2172 + spin_unlock_irqrestore(&htt->tx_compl_q.lock, flags); 2173 + 2174 + spin_lock_irqsave(&htt->rx_compl_q.lock, flags); 2175 + skb_queue_splice_init(&htt->rx_compl_q, &rx_q); 2176 + spin_unlock_irqrestore(&htt->rx_compl_q.lock, flags); 2177 + 2178 + spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags); 2179 + skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q); 2180 + spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags); 2181 + 2182 + while ((skb = __skb_dequeue(&tx_q))) { 2157 2183 ath10k_htt_rx_frm_tx_compl(htt->ar, skb); 2158 2184 dev_kfree_skb_any(skb); 2159 2185 } 2160 2186 2161 - spin_lock_bh(&htt->rx_ring.lock); 2162 - while ((skb = __skb_dequeue(&htt->rx_compl_q))) { 2187 + while ((skb = __skb_dequeue(&rx_q))) { 2163 2188 resp = (struct htt_resp *)skb->data; 2189 + spin_lock_bh(&htt->rx_ring.lock); 2164 2190 ath10k_htt_rx_handler(htt, &resp->rx_ind); 2191 + spin_unlock_bh(&htt->rx_ring.lock); 2165 2192 dev_kfree_skb_any(skb); 2166 2193 } 2167 2194 2168 - while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) { 2195 + while ((skb = __skb_dequeue(&rx_ind_q))) { 2196 + spin_lock_bh(&htt->rx_ring.lock); 2169 2197 ath10k_htt_rx_in_ord_ind(ar, skb); 2198 + spin_unlock_bh(&htt->rx_ring.lock); 2170 2199 dev_kfree_skb_any(skb); 2171 2200 } 2172 - spin_unlock_bh(&htt->rx_ring.lock); 2173 2201 }
+121 -25
drivers/net/wireless/ath/ath10k/htt_tx.c
··· 97 97 idr_remove(&htt->pending_tx, msdu_id); 98 98 } 99 99 100 + static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt) 101 + { 102 + size_t size; 103 + 104 + if (!htt->frag_desc.vaddr) 105 + return; 106 + 107 + size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); 108 + 109 + dma_free_coherent(htt->ar->dev, 110 + size, 111 + htt->frag_desc.vaddr, 112 + htt->frag_desc.paddr); 113 + } 114 + 115 + static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt) 116 + { 117 + struct ath10k *ar = htt->ar; 118 + size_t size; 119 + 120 + if (!ar->hw_params.continuous_frag_desc) 121 + return 0; 122 + 123 + size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); 124 + htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, 125 + &htt->frag_desc.paddr, 126 + GFP_KERNEL); 127 + if (!htt->frag_desc.vaddr) { 128 + ath10k_err(ar, "failed to alloc fragment desc memory\n"); 129 + return -ENOMEM; 130 + } 131 + 132 + return 0; 133 + } 134 + 135 + static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt) 136 + { 137 + struct ath10k *ar = htt->ar; 138 + size_t size; 139 + 140 + if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features)) 141 + return; 142 + 143 + size = sizeof(*htt->tx_q_state.vaddr); 144 + 145 + dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE); 146 + kfree(htt->tx_q_state.vaddr); 147 + } 148 + 149 + static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt) 150 + { 151 + struct ath10k *ar = htt->ar; 152 + size_t size; 153 + int ret; 154 + 155 + if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features)) 156 + return 0; 157 + 158 + htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS; 159 + htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS; 160 + htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES; 161 + 162 + size = sizeof(*htt->tx_q_state.vaddr); 163 + htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL); 164 + if (!htt->tx_q_state.vaddr) 165 + return -ENOMEM; 166 + 167 + htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr, 168 + size, DMA_TO_DEVICE); 169 + ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr); 170 + if (ret) { 171 + ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret); 172 + kfree(htt->tx_q_state.vaddr); 173 + return -EIO; 174 + } 175 + 176 + return 0; 177 + } 178 + 100 179 int ath10k_htt_tx_alloc(struct ath10k_htt *htt) 101 180 { 102 181 struct ath10k *ar = htt->ar; ··· 197 118 goto free_idr_pending_tx; 198 119 } 199 120 200 - if (!ar->hw_params.continuous_frag_desc) 201 - goto skip_frag_desc_alloc; 202 - 203 - size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); 204 - htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, 205 - &htt->frag_desc.paddr, 206 - GFP_KERNEL); 207 - if (!htt->frag_desc.vaddr) { 208 - ath10k_warn(ar, "failed to alloc fragment desc memory\n"); 209 - ret = -ENOMEM; 121 + ret = ath10k_htt_tx_alloc_cont_frag_desc(htt); 122 + if (ret) { 123 + ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret); 210 124 goto free_txbuf; 211 125 } 212 126 213 - skip_frag_desc_alloc: 127 + ret = ath10k_htt_tx_alloc_txq(htt); 128 + if (ret) { 129 + ath10k_err(ar, "failed to alloc txq: %d\n", ret); 130 + goto free_frag_desc; 131 + } 132 + 214 133 return 0; 134 + 135 + free_frag_desc: 136 + ath10k_htt_tx_free_cont_frag_desc(htt); 215 137 216 138 free_txbuf: 217 139 size = htt->max_num_pending_tx * 218 140 sizeof(struct ath10k_htt_txbuf); 219 141 dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, 220 142 htt->txbuf.paddr); 143 + 221 144 free_idr_pending_tx: 222 145 idr_destroy(&htt->pending_tx); 146 + 223 147 return ret; 224 148 } 225 149 ··· 256 174 htt->txbuf.paddr); 257 175 } 258 176 259 - if (htt->frag_desc.vaddr) { 260 - size = htt->max_num_pending_tx * 261 - sizeof(struct htt_msdu_ext_desc); 262 - dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr, 263 - htt->frag_desc.paddr); 264 - } 177 + ath10k_htt_tx_free_txq(htt); 178 + ath10k_htt_tx_free_cont_frag_desc(htt); 265 179 } 266 180 267 181 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) ··· 346 268 struct ath10k *ar = htt->ar; 347 269 struct sk_buff *skb; 348 270 struct htt_cmd *cmd; 271 + struct htt_frag_desc_bank_cfg *cfg; 349 272 int ret, size; 273 + u8 info; 350 274 351 275 if (!ar->hw_params.continuous_frag_desc) 352 276 return 0; ··· 366 286 skb_put(skb, size); 367 287 cmd = (struct htt_cmd *)skb->data; 368 288 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; 369 - cmd->frag_desc_bank_cfg.info = 0; 370 - cmd->frag_desc_bank_cfg.num_banks = 1; 371 - cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc); 372 - cmd->frag_desc_bank_cfg.bank_base_addrs[0] = 373 - __cpu_to_le32(htt->frag_desc.paddr); 374 - cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0; 375 - cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id = 376 - __cpu_to_le16(htt->max_num_pending_tx - 1); 289 + 290 + info = 0; 291 + info |= SM(htt->tx_q_state.type, 292 + HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); 293 + 294 + if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features)) 295 + info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; 296 + 297 + cfg = &cmd->frag_desc_bank_cfg; 298 + cfg->info = info; 299 + cfg->num_banks = 1; 300 + cfg->desc_size = sizeof(struct htt_msdu_ext_desc); 301 + cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr); 302 + cfg->bank_id[0].bank_min_id = 0; 303 + cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - 304 + 1); 305 + 306 + cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); 307 + cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); 308 + cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); 309 + cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; 310 + cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; 311 + 312 + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); 377 313 378 314 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 379 315 if (ret) {
+39
drivers/net/wireless/ath/ath10k/hw.c
··· 109 109 .pcie_intr_clr_address = 0x00000010, 110 110 }; 111 111 112 + const struct ath10k_hw_regs qca4019_regs = { 113 + .rtc_soc_base_address = 0x00080000, 114 + .soc_core_base_address = 0x00082000, 115 + .ce_wrapper_base_address = 0x0004d000, 116 + .ce0_base_address = 0x0004a000, 117 + .ce1_base_address = 0x0004a400, 118 + .ce2_base_address = 0x0004a800, 119 + .ce3_base_address = 0x0004ac00, 120 + .ce4_base_address = 0x0004b000, 121 + .ce5_base_address = 0x0004b400, 122 + .ce6_base_address = 0x0004b800, 123 + .ce7_base_address = 0x0004bc00, 124 + /* qca4019 supports upto 12 copy engines. Since base address 125 + * of ce8 to ce11 are not directly referred in the code, 126 + * no need have them in separate members in this table. 127 + * Copy Engine Address 128 + * CE8 0x0004c000 129 + * CE9 0x0004c400 130 + * CE10 0x0004c800 131 + * CE11 0x0004cc00 132 + */ 133 + .soc_reset_control_si0_rst_mask = 0x00000001, 134 + .soc_reset_control_ce_rst_mask = 0x00000100, 135 + .soc_chip_id_address = 0x000000ec, 136 + .fw_indicator_address = 0x0004f00c, 137 + .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c, 138 + .ce_wrap_intr_sum_host_msi_mask = 0x00fff000, 139 + .pcie_intr_fw_mask = 0x00100000, 140 + .pcie_intr_ce_mask_all = 0x000fff00, 141 + .pcie_intr_clr_address = 0x00000010, 142 + }; 143 + 112 144 const struct ath10k_hw_values qca988x_values = { 113 145 .rtc_state_val_on = 3, 114 146 .ce_count = 8, ··· 166 134 .num_target_ce_config_wlan = 10, 167 135 .ce_desc_meta_data_mask = 0xFFF0, 168 136 .ce_desc_meta_data_lsb = 4, 137 + }; 138 + 139 + const struct ath10k_hw_values qca4019_values = { 140 + .ce_count = 12, 141 + .num_target_ce_config_wlan = 10, 142 + .ce_desc_meta_data_mask = 0xFFF0, 143 + .ce_desc_meta_data_lsb = 4, 169 144 }; 170 145 171 146 void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
+18 -6
drivers/net/wireless/ath/ath10k/hw.h
··· 106 106 #define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin" 107 107 #define QCA9377_HW_1_0_PATCH_LOAD_ADDR 0x1234 108 108 109 + /* QCA4019 1.0 definitions */ 110 + #define QCA4019_HW_1_0_DEV_VERSION 0x01000000 111 + #define QCA4019_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA4019/hw1.0" 112 + #define QCA4019_HW_1_0_FW_FILE "firmware.bin" 113 + #define QCA4019_HW_1_0_OTP_FILE "otp.bin" 114 + #define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin" 115 + #define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234 116 + 109 117 #define ATH10K_FW_API2_FILE "firmware-2.bin" 110 118 #define ATH10K_FW_API3_FILE "firmware-3.bin" 111 119 ··· 208 200 ATH10K_HW_QCA6174, 209 201 ATH10K_HW_QCA99X0, 210 202 ATH10K_HW_QCA9377, 203 + ATH10K_HW_QCA4019, 211 204 }; 212 205 213 206 struct ath10k_hw_regs { ··· 241 232 extern const struct ath10k_hw_regs qca988x_regs; 242 233 extern const struct ath10k_hw_regs qca6174_regs; 243 234 extern const struct ath10k_hw_regs qca99x0_regs; 235 + extern const struct ath10k_hw_regs qca4019_regs; 244 236 245 237 struct ath10k_hw_values { 246 238 u32 rtc_state_val_on; ··· 255 245 extern const struct ath10k_hw_values qca988x_values; 256 246 extern const struct ath10k_hw_values qca6174_values; 257 247 extern const struct ath10k_hw_values qca99x0_values; 248 + extern const struct ath10k_hw_values qca4019_values; 258 249 259 250 void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, 260 251 u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev); ··· 264 253 #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174) 265 254 #define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0) 266 255 #define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377) 256 + #define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019) 267 257 268 258 /* Known pecularities: 269 259 * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap ··· 375 363 #define TARGET_10X_MAC_AGGR_DELIM 0 376 364 #define TARGET_10X_AST_SKID_LIMIT 128 377 365 #define TARGET_10X_NUM_STATIONS 128 366 + #define TARGET_10X_TX_STATS_NUM_STATIONS 118 378 367 #define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \ 368 + (TARGET_10X_NUM_VDEVS)) 369 + #define TARGET_10X_TX_STATS_NUM_PEERS ((TARGET_10X_TX_STATS_NUM_STATIONS) + \ 379 370 (TARGET_10X_NUM_VDEVS)) 380 371 #define TARGET_10X_NUM_OFFLOAD_PEERS 0 381 372 #define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0 ··· 386 371 #define TARGET_10X_NUM_TIDS_MAX 256 387 372 #define TARGET_10X_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \ 388 373 (TARGET_10X_NUM_PEERS) * 2) 374 + #define TARGET_10X_TX_STATS_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \ 375 + (TARGET_10X_TX_STATS_NUM_PEERS) * 2) 389 376 #define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) 390 377 #define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) 391 378 #define TARGET_10X_RX_TIMEOUT_LO_PRI 100 ··· 431 414 #define TARGET_10_4_ACTIVE_PEERS 0 432 415 433 416 #define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512 434 - #define TARGET_10_4_QCACHE_ACTIVE_PEERS 50 435 417 #define TARGET_10_4_NUM_OFFLOAD_PEERS 0 436 418 #define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0 437 419 #define TARGET_10_4_NUM_PEER_KEYS 2 438 420 #define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2) 439 421 #define TARGET_10_4_AST_SKID_LIMIT 32 440 - #define TARGET_10_4_TX_CHAIN_MASK (BIT(0) | BIT(1) | \ 441 - BIT(2) | BIT(3)) 442 - #define TARGET_10_4_RX_CHAIN_MASK (BIT(0) | BIT(1) | \ 443 - BIT(2) | BIT(3)) 444 422 445 423 /* 100 ms for video, best-effort, and background */ 446 424 #define TARGET_10_4_RX_TIMEOUT_LO_PRI 100 ··· 461 449 #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 462 450 #define TARGET_10_4_VOW_CONFIG 0 463 451 #define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV 3 464 - #define TARGET_10_4_NUM_MSDU_DESC (1024 + 400) 465 452 #define TARGET_10_4_11AC_TX_MAX_FRAGS 2 466 453 #define TARGET_10_4_MAX_PEER_EXT_STATS 16 467 454 #define TARGET_10_4_SMART_ANT_CAP 0 ··· 612 601 #define FW_INDICATOR_ADDRESS ar->regs->fw_indicator_address 613 602 #define FW_IND_EVENT_PENDING 1 614 603 #define FW_IND_INITIALIZED 2 604 + #define FW_IND_HOST_READY 0x80000000 615 605 616 606 /* HOST_REG interrupt from firmware */ 617 607 #define PCIE_INTR_FIRMWARE_MASK ar->regs->pcie_intr_fw_mask
+36 -21
drivers/net/wireless/ath/ath10k/mac.c
··· 1358 1358 const u8 *p2p_ie; 1359 1359 int ret; 1360 1360 1361 - if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1362 - return 0; 1363 - 1364 - if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) 1361 + if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p) 1365 1362 return 0; 1366 1363 1367 1364 mgmt = (void *)bcn->data; ··· 3256 3259 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 3257 3260 3258 3261 /* This is case only for P2P_GO */ 3259 - if (arvif->vdev_type != WMI_VDEV_TYPE_AP || 3260 - arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) 3262 + if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) 3261 3263 return; 3262 3264 3263 3265 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { ··· 3984 3988 static int ath10k_start(struct ieee80211_hw *hw) 3985 3989 { 3986 3990 struct ath10k *ar = hw->priv; 3987 - u32 burst_enable; 3991 + u32 param; 3988 3992 int ret = 0; 3989 3993 3990 3994 /* ··· 4027 4031 goto err_power_down; 4028 4032 } 4029 4033 4030 - ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1); 4034 + param = ar->wmi.pdev_param->pmf_qos; 4035 + ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4031 4036 if (ret) { 4032 4037 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); 4033 4038 goto err_core_stop; 4034 4039 } 4035 4040 4036 - ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1); 4041 + param = ar->wmi.pdev_param->dynamic_bw; 4042 + ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4037 4043 if (ret) { 4038 4044 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); 4039 4045 goto err_core_stop; ··· 4051 4053 } 4052 4054 4053 4055 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) { 4054 - burst_enable = ar->wmi.pdev_param->burst_enable; 4055 - ret = ath10k_wmi_pdev_set_param(ar, burst_enable, 0); 4056 + param = ar->wmi.pdev_param->burst_enable; 4057 + ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4056 4058 if (ret) { 4057 4059 ath10k_warn(ar, "failed to disable burst: %d\n", ret); 4058 4060 goto err_core_stop; ··· 4070 4072 * this problem. 4071 4073 */ 4072 4074 4073 - ret = ath10k_wmi_pdev_set_param(ar, 4074 - ar->wmi.pdev_param->arp_ac_override, 0); 4075 + param = ar->wmi.pdev_param->arp_ac_override; 4076 + ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4075 4077 if (ret) { 4076 4078 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", 4077 4079 ret); ··· 4090 4092 } 4091 4093 } 4092 4094 4093 - ret = ath10k_wmi_pdev_set_param(ar, 4094 - ar->wmi.pdev_param->ani_enable, 1); 4095 + param = ar->wmi.pdev_param->ani_enable; 4096 + ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4095 4097 if (ret) { 4096 4098 ath10k_warn(ar, "failed to enable ani by default: %d\n", 4097 4099 ret); ··· 4099 4101 } 4100 4102 4101 4103 ar->ani_enabled = true; 4104 + 4105 + if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { 4106 + param = ar->wmi.pdev_param->peer_stats_update_period; 4107 + ret = ath10k_wmi_pdev_set_param(ar, param, 4108 + PEER_DEFAULT_STATS_UPDATE_PERIOD); 4109 + if (ret) { 4110 + ath10k_warn(ar, 4111 + "failed to set peer stats period : %d\n", 4112 + ret); 4113 + goto err_core_stop; 4114 + } 4115 + } 4102 4116 4103 4117 ar->num_started_vdevs = 0; 4104 4118 ath10k_regd_update(ar); ··· 4359 4349 bit, ar->free_vdev_map); 4360 4350 4361 4351 arvif->vdev_id = bit; 4362 - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; 4352 + arvif->vdev_subtype = 4353 + ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE); 4363 4354 4364 4355 switch (vif->type) { 4365 4356 case NL80211_IFTYPE_P2P_DEVICE: 4366 4357 arvif->vdev_type = WMI_VDEV_TYPE_STA; 4367 - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; 4358 + arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4359 + (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE); 4368 4360 break; 4369 4361 case NL80211_IFTYPE_UNSPECIFIED: 4370 4362 case NL80211_IFTYPE_STATION: 4371 4363 arvif->vdev_type = WMI_VDEV_TYPE_STA; 4372 4364 if (vif->p2p) 4373 - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT; 4365 + arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4366 + (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT); 4374 4367 break; 4375 4368 case NL80211_IFTYPE_ADHOC: 4376 4369 arvif->vdev_type = WMI_VDEV_TYPE_IBSS; 4377 4370 break; 4378 4371 case NL80211_IFTYPE_MESH_POINT: 4379 - if (test_bit(WMI_SERVICE_MESH, ar->wmi.svc_map)) { 4380 - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH; 4372 + if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) { 4373 + arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4374 + (ar, WMI_VDEV_SUBTYPE_MESH_11S); 4381 4375 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 4382 4376 ret = -EINVAL; 4383 4377 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n"); ··· 4393 4379 arvif->vdev_type = WMI_VDEV_TYPE_AP; 4394 4380 4395 4381 if (vif->p2p) 4396 - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO; 4382 + arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 4383 + (ar, WMI_VDEV_SUBTYPE_P2P_GO); 4397 4384 break; 4398 4385 case NL80211_IFTYPE_MONITOR: 4399 4386 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+115 -60
drivers/net/wireless/ath/ath10k/pci.c
··· 94 94 static void ath10k_pci_buffer_cleanup(struct ath10k *ar); 95 95 static int ath10k_pci_cold_reset(struct ath10k *ar); 96 96 static int ath10k_pci_safe_chip_reset(struct ath10k *ar); 97 - static int ath10k_pci_wait_for_target_init(struct ath10k *ar); 98 97 static int ath10k_pci_init_irq(struct ath10k *ar); 99 98 static int ath10k_pci_deinit_irq(struct ath10k *ar); 100 99 static int ath10k_pci_request_irq(struct ath10k *ar); ··· 619 620 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 620 621 } 621 622 622 - void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) 623 + static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value) 623 624 { 624 625 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 625 626 int ret; ··· 641 642 ath10k_pci_sleep(ar); 642 643 } 643 644 644 - u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) 645 + static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset) 645 646 { 646 647 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 647 648 u32 val; ··· 666 667 return val; 667 668 } 668 669 670 + inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) 671 + { 672 + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 673 + 674 + ar_pci->bus_ops->write32(ar, offset, value); 675 + } 676 + 677 + inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) 678 + { 679 + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 680 + 681 + return ar_pci->bus_ops->read32(ar, offset); 682 + } 683 + 669 684 u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) 670 685 { 671 686 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); ··· 700 687 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val); 701 688 } 702 689 703 - static bool ath10k_pci_irq_pending(struct ath10k *ar) 690 + bool ath10k_pci_irq_pending(struct ath10k *ar) 704 691 { 705 692 u32 cause; 706 693 ··· 713 700 return false; 714 701 } 715 702 716 - static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) 703 + void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) 717 704 { 718 705 /* IMPORTANT: INTR_CLR register has to be set after 719 706 * INTR_ENABLE is set to 0, otherwise interrupt can not be ··· 729 716 PCIE_INTR_ENABLE_ADDRESS); 730 717 } 731 718 732 - static void ath10k_pci_enable_legacy_irq(struct ath10k *ar) 719 + void ath10k_pci_enable_legacy_irq(struct ath10k *ar) 733 720 { 734 721 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 735 722 PCIE_INTR_ENABLE_ADDRESS, ··· 822 809 } 823 810 } 824 811 825 - static void ath10k_pci_rx_post(struct ath10k *ar) 812 + void ath10k_pci_rx_post(struct ath10k *ar) 826 813 { 827 814 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 828 815 int i; ··· 831 818 ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); 832 819 } 833 820 834 - static void ath10k_pci_rx_replenish_retry(unsigned long ptr) 821 + void ath10k_pci_rx_replenish_retry(unsigned long ptr) 835 822 { 836 823 struct ath10k *ar = (void *)ptr; 837 824 ··· 851 838 0x7ff) << 21; 852 839 break; 853 840 case ATH10K_HW_QCA99X0: 841 + case ATH10K_HW_QCA4019: 854 842 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); 855 843 break; 856 844 } ··· 1021 1007 #define ath10k_pci_diag_read_hi(ar, dest, src, len) \ 1022 1008 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len) 1023 1009 1024 - static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, 1025 - const void *data, int nbytes) 1010 + int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, 1011 + const void *data, int nbytes) 1026 1012 { 1027 1013 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1028 1014 int ret = 0; ··· 1277 1263 ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); 1278 1264 } 1279 1265 1280 - static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, 1281 - struct ath10k_hif_sg_item *items, int n_items) 1266 + int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, 1267 + struct ath10k_hif_sg_item *items, int n_items) 1282 1268 { 1283 1269 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1284 1270 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; ··· 1346 1332 return err; 1347 1333 } 1348 1334 1349 - static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, 1350 - size_t buf_len) 1335 + int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, 1336 + size_t buf_len) 1351 1337 { 1352 1338 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len); 1353 1339 } 1354 1340 1355 - static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) 1341 + u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) 1356 1342 { 1357 1343 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1358 1344 ··· 1420 1406 queue_work(ar->workqueue, &ar->restart_work); 1421 1407 } 1422 1408 1423 - static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, 1424 - int force) 1409 + void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, 1410 + int force) 1425 1411 { 1426 1412 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); 1427 1413 ··· 1446 1432 ath10k_ce_per_engine_service(ar, pipe); 1447 1433 } 1448 1434 1449 - static void ath10k_pci_kill_tasklet(struct ath10k *ar) 1435 + void ath10k_pci_kill_tasklet(struct ath10k *ar) 1450 1436 { 1451 1437 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1452 1438 int i; ··· 1460 1446 del_timer_sync(&ar_pci->rx_post_retry); 1461 1447 } 1462 1448 1463 - static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, 1464 - u8 *ul_pipe, u8 *dl_pipe) 1449 + int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, 1450 + u8 *ul_pipe, u8 *dl_pipe) 1465 1451 { 1466 1452 const struct service_to_pipe *entry; 1467 1453 bool ul_set = false, dl_set = false; ··· 1505 1491 return 0; 1506 1492 } 1507 1493 1508 - static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, 1509 - u8 *ul_pipe, u8 *dl_pipe) 1494 + void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, 1495 + u8 *ul_pipe, u8 *dl_pipe) 1510 1496 { 1511 1497 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); 1512 1498 ··· 1530 1516 CORE_CTRL_ADDRESS, val); 1531 1517 break; 1532 1518 case ATH10K_HW_QCA99X0: 1519 + case ATH10K_HW_QCA4019: 1533 1520 /* TODO: Find appropriate register configuration for QCA99X0 1534 1521 * to mask irq/MSI. 1535 1522 */ ··· 1553 1538 CORE_CTRL_ADDRESS, val); 1554 1539 break; 1555 1540 case ATH10K_HW_QCA99X0: 1541 + case ATH10K_HW_QCA4019: 1556 1542 /* TODO: Find appropriate register configuration for QCA99X0 1557 1543 * to unmask irq/MSI. 1558 1544 */ ··· 1684 1668 } 1685 1669 } 1686 1670 1687 - static void ath10k_pci_ce_deinit(struct ath10k *ar) 1671 + void ath10k_pci_ce_deinit(struct ath10k *ar) 1688 1672 { 1689 1673 int i; 1690 1674 ··· 1692 1676 ath10k_ce_deinit_pipe(ar, i); 1693 1677 } 1694 1678 1695 - static void ath10k_pci_flush(struct ath10k *ar) 1679 + void ath10k_pci_flush(struct ath10k *ar) 1696 1680 { 1697 1681 ath10k_pci_kill_tasklet(ar); 1698 1682 ath10k_pci_buffer_cleanup(ar); ··· 1727 1711 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 1728 1712 } 1729 1713 1730 - static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, 1731 - void *req, u32 req_len, 1732 - void *resp, u32 *resp_len) 1714 + int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, 1715 + void *req, u32 req_len, 1716 + void *resp, u32 *resp_len) 1733 1717 { 1734 1718 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1735 1719 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; ··· 1772 1756 DMA_FROM_DEVICE); 1773 1757 ret = dma_mapping_error(ar->dev, resp_paddr); 1774 1758 if (ret) { 1775 - ret = EIO; 1759 + ret = -EIO; 1776 1760 goto err_req; 1777 1761 } 1778 1762 ··· 1923 1907 return 1; 1924 1908 } 1925 1909 1926 - static int ath10k_pci_init_config(struct ath10k *ar) 1910 + static int ath10k_bus_get_num_banks(struct ath10k *ar) 1911 + { 1912 + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1913 + 1914 + return ar_pci->bus_ops->get_num_banks(ar); 1915 + } 1916 + 1917 + int ath10k_pci_init_config(struct ath10k *ar) 1927 1918 { 1928 1919 u32 interconnect_targ_addr; 1929 1920 u32 pcie_state_targ_addr = 0; ··· 2041 2018 /* first bank is switched to IRAM */ 2042 2019 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & 2043 2020 HI_EARLY_ALLOC_MAGIC_MASK); 2044 - ealloc_value |= ((ath10k_pci_get_num_banks(ar) << 2021 + ealloc_value |= ((ath10k_bus_get_num_banks(ar) << 2045 2022 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & 2046 2023 HI_EARLY_ALLOC_IRAM_BANKS_MASK); 2047 2024 ··· 2094 2071 target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1); 2095 2072 } 2096 2073 2097 - static int ath10k_pci_alloc_pipes(struct ath10k *ar) 2074 + int ath10k_pci_alloc_pipes(struct ath10k *ar) 2098 2075 { 2099 2076 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2100 2077 struct ath10k_pci_pipe *pipe; ··· 2125 2102 return 0; 2126 2103 } 2127 2104 2128 - static void ath10k_pci_free_pipes(struct ath10k *ar) 2105 + void ath10k_pci_free_pipes(struct ath10k *ar) 2129 2106 { 2130 2107 int i; 2131 2108 ··· 2133 2110 ath10k_ce_free_pipe(ar, i); 2134 2111 } 2135 2112 2136 - static int ath10k_pci_init_pipes(struct ath10k *ar) 2113 + int ath10k_pci_init_pipes(struct ath10k *ar) 2137 2114 { 2138 2115 int i, ret; 2139 2116 ··· 2476 2453 return ret; 2477 2454 } 2478 2455 2479 - static void ath10k_pci_hif_power_down(struct ath10k *ar) 2456 + void ath10k_pci_hif_power_down(struct ath10k *ar) 2480 2457 { 2481 2458 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); 2482 2459 ··· 2745 2722 free_irq(ar_pci->pdev->irq + i, ar); 2746 2723 } 2747 2724 2748 - static void ath10k_pci_init_irq_tasklets(struct ath10k *ar) 2725 + void ath10k_pci_init_irq_tasklets(struct ath10k *ar) 2749 2726 { 2750 2727 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2751 2728 int i; ··· 2831 2808 return 0; 2832 2809 } 2833 2810 2834 - static int ath10k_pci_wait_for_target_init(struct ath10k *ar) 2811 + int ath10k_pci_wait_for_target_init(struct ath10k *ar) 2835 2812 { 2836 2813 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2837 2814 unsigned long timeout; ··· 3012 2989 return false; 3013 2990 } 3014 2991 2992 + int ath10k_pci_setup_resource(struct ath10k *ar) 2993 + { 2994 + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2995 + int ret; 2996 + 2997 + spin_lock_init(&ar_pci->ce_lock); 2998 + spin_lock_init(&ar_pci->ps_lock); 2999 + 3000 + setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 3001 + (unsigned long)ar); 3002 + 3003 + if (QCA_REV_6174(ar)) 3004 + ath10k_pci_override_ce_config(ar); 3005 + 3006 + ret = ath10k_pci_alloc_pipes(ar); 3007 + if (ret) { 3008 + ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", 3009 + ret); 3010 + return ret; 3011 + } 3012 + 3013 + return 0; 3014 + } 3015 + 3016 + void ath10k_pci_release_resource(struct ath10k *ar) 3017 + { 3018 + ath10k_pci_kill_tasklet(ar); 3019 + ath10k_pci_ce_deinit(ar); 3020 + ath10k_pci_free_pipes(ar); 3021 + } 3022 + 3023 + static const struct ath10k_bus_ops ath10k_pci_bus_ops = { 3024 + .read32 = ath10k_bus_pci_read32, 3025 + .write32 = ath10k_bus_pci_write32, 3026 + .get_num_banks = ath10k_pci_get_num_banks, 3027 + }; 3028 + 3015 3029 static int ath10k_pci_probe(struct pci_dev *pdev, 3016 3030 const struct pci_device_id *pci_dev) 3017 3031 { ··· 3099 3039 ar_pci->ar = ar; 3100 3040 ar->dev_id = pci_dev->device; 3101 3041 ar_pci->pci_ps = pci_ps; 3042 + ar_pci->bus_ops = &ath10k_pci_bus_ops; 3102 3043 3103 3044 ar->id.vendor = pdev->vendor; 3104 3045 ar->id.device = pdev->device; 3105 3046 ar->id.subsystem_vendor = pdev->subsystem_vendor; 3106 3047 ar->id.subsystem_device = pdev->subsystem_device; 3107 3048 3108 - spin_lock_init(&ar_pci->ce_lock); 3109 - spin_lock_init(&ar_pci->ps_lock); 3110 - 3111 - setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 3112 - (unsigned long)ar); 3113 3049 setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer, 3114 3050 (unsigned long)ar); 3051 + 3052 + ret = ath10k_pci_setup_resource(ar); 3053 + if (ret) { 3054 + ath10k_err(ar, "failed to setup resource: %d\n", ret); 3055 + goto err_core_destroy; 3056 + } 3115 3057 3116 3058 ret = ath10k_pci_claim(ar); 3117 3059 if (ret) { 3118 3060 ath10k_err(ar, "failed to claim device: %d\n", ret); 3119 - goto err_core_destroy; 3120 - } 3121 - 3122 - if (QCA_REV_6174(ar)) 3123 - ath10k_pci_override_ce_config(ar); 3124 - 3125 - ret = ath10k_pci_alloc_pipes(ar); 3126 - if (ret) { 3127 - ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", 3128 - ret); 3129 - goto err_sleep; 3061 + goto err_free_pipes; 3130 3062 } 3131 3063 3132 3064 ret = ath10k_pci_force_wake(ar); 3133 3065 if (ret) { 3134 3066 ath10k_warn(ar, "failed to wake up device : %d\n", ret); 3135 - goto err_free_pipes; 3067 + goto err_sleep; 3136 3068 } 3137 3069 3138 3070 ath10k_pci_ce_deinit(ar); ··· 3133 3081 ret = ath10k_pci_init_irq(ar); 3134 3082 if (ret) { 3135 3083 ath10k_err(ar, "failed to init irqs: %d\n", ret); 3136 - goto err_free_pipes; 3084 + goto err_sleep; 3137 3085 } 3138 3086 3139 3087 ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n", ··· 3179 3127 err_deinit_irq: 3180 3128 ath10k_pci_deinit_irq(ar); 3181 3129 3182 - err_free_pipes: 3183 - ath10k_pci_free_pipes(ar); 3184 - 3185 3130 err_sleep: 3186 3131 ath10k_pci_sleep_sync(ar); 3187 3132 ath10k_pci_release(ar); 3133 + 3134 + err_free_pipes: 3135 + ath10k_pci_free_pipes(ar); 3188 3136 3189 3137 err_core_destroy: 3190 3138 ath10k_core_destroy(ar); ··· 3209 3157 3210 3158 ath10k_core_unregister(ar); 3211 3159 ath10k_pci_free_irq(ar); 3212 - ath10k_pci_kill_tasklet(ar); 3213 3160 ath10k_pci_deinit_irq(ar); 3214 - ath10k_pci_ce_deinit(ar); 3215 - ath10k_pci_free_pipes(ar); 3161 + ath10k_pci_release_resource(ar); 3216 3162 ath10k_pci_sleep_sync(ar); 3217 3163 ath10k_pci_release(ar); 3218 3164 ath10k_core_destroy(ar); ··· 3234 3184 printk(KERN_ERR "failed to register ath10k pci driver: %d\n", 3235 3185 ret); 3236 3186 3187 + ret = ath10k_ahb_init(); 3188 + if (ret) 3189 + printk(KERN_ERR "ahb init failed: %d\n", ret); 3190 + 3237 3191 return ret; 3238 3192 } 3239 3193 module_init(ath10k_pci_init); ··· 3245 3191 static void __exit ath10k_pci_exit(void) 3246 3192 { 3247 3193 pci_unregister_driver(&ath10k_pci_driver); 3194 + ath10k_ahb_exit(); 3248 3195 } 3249 3196 3250 3197 module_exit(ath10k_pci_exit);
+49
drivers/net/wireless/ath/ath10k/pci.h
··· 22 22 23 23 #include "hw.h" 24 24 #include "ce.h" 25 + #include "ahb.h" 25 26 26 27 /* 27 28 * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite ··· 158 157 u32 rev_id; 159 158 }; 160 159 160 + struct ath10k_bus_ops { 161 + u32 (*read32)(struct ath10k *ar, u32 offset); 162 + void (*write32)(struct ath10k *ar, u32 offset, u32 value); 163 + int (*get_num_banks)(struct ath10k *ar); 164 + }; 165 + 161 166 struct ath10k_pci { 162 167 struct pci_dev *pdev; 163 168 struct device *dev; ··· 232 225 * on MMIO read/write. 233 226 */ 234 227 bool pci_ps; 228 + 229 + const struct ath10k_bus_ops *bus_ops; 230 + 231 + /* Keep this entry in the last, memory for struct ath10k_ahb is 232 + * allocated (ahb support enabled case) in the continuation of 233 + * this struct. 234 + */ 235 + struct ath10k_ahb ahb[0]; 235 236 }; 236 237 237 238 static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) ··· 267 252 u32 ath10k_pci_read32(struct ath10k *ar, u32 offset); 268 253 u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr); 269 254 u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr); 255 + 256 + int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, 257 + struct ath10k_hif_sg_item *items, int n_items); 258 + int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, 259 + size_t buf_len); 260 + int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, 261 + const void *data, int nbytes); 262 + int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, void *req, u32 req_len, 263 + void *resp, u32 *resp_len); 264 + int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, 265 + u8 *ul_pipe, u8 *dl_pipe); 266 + void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, u8 *ul_pipe, 267 + u8 *dl_pipe); 268 + void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, 269 + int force); 270 + u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe); 271 + void ath10k_pci_hif_power_down(struct ath10k *ar); 272 + int ath10k_pci_alloc_pipes(struct ath10k *ar); 273 + void ath10k_pci_free_pipes(struct ath10k *ar); 274 + void ath10k_pci_free_pipes(struct ath10k *ar); 275 + void ath10k_pci_rx_replenish_retry(unsigned long ptr); 276 + void ath10k_pci_ce_deinit(struct ath10k *ar); 277 + void ath10k_pci_init_irq_tasklets(struct ath10k *ar); 278 + void ath10k_pci_kill_tasklet(struct ath10k *ar); 279 + int ath10k_pci_init_pipes(struct ath10k *ar); 280 + int ath10k_pci_init_config(struct ath10k *ar); 281 + void ath10k_pci_rx_post(struct ath10k *ar); 282 + void ath10k_pci_flush(struct ath10k *ar); 283 + void ath10k_pci_enable_legacy_irq(struct ath10k *ar); 284 + bool ath10k_pci_irq_pending(struct ath10k *ar); 285 + void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar); 286 + int ath10k_pci_wait_for_target_init(struct ath10k *ar); 287 + int ath10k_pci_setup_resource(struct ath10k *ar); 288 + void ath10k_pci_release_resource(struct ath10k *ar); 270 289 271 290 /* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too 272 291 * frequently. To avoid this put SoC to sleep after a very conservative grace
+3
drivers/net/wireless/ath/ath10k/targaddrs.h
··· 456 456 #define QCA99X0_BOARD_DATA_SZ 12288 457 457 #define QCA99X0_BOARD_EXT_DATA_SZ 0 458 458 459 + #define QCA4019_BOARD_DATA_SZ 12064 460 + #define QCA4019_BOARD_EXT_DATA_SZ 0 461 + 459 462 #endif /* __TARGADDRS_H__ */
+12 -3
drivers/net/wireless/ath/ath10k/trace.h
··· 250 250 TP_STRUCT__entry( 251 251 __string(device, dev_name(ar->dev)) 252 252 __string(driver, dev_driver_string(ar->dev)) 253 + __field(u8, hw_type); 253 254 __field(size_t, buf_len) 254 255 __dynamic_array(u8, buf, buf_len) 255 256 ), ··· 258 257 TP_fast_assign( 259 258 __assign_str(device, dev_name(ar->dev)); 260 259 __assign_str(driver, dev_driver_string(ar->dev)); 260 + __entry->hw_type = ar->hw_rev; 261 261 __entry->buf_len = buf_len; 262 262 memcpy(__get_dynamic_array(buf), buf, buf_len); 263 263 ), 264 264 265 265 TP_printk( 266 - "%s %s len %zu", 266 + "%s %s %d len %zu", 267 267 __get_str(driver), 268 268 __get_str(device), 269 + __entry->hw_type, 269 270 __entry->buf_len 270 271 ) 271 272 ); ··· 280 277 TP_STRUCT__entry( 281 278 __string(device, dev_name(ar->dev)) 282 279 __string(driver, dev_driver_string(ar->dev)) 280 + __field(u8, hw_type); 283 281 __field(u16, buf_len) 284 282 __dynamic_array(u8, pktlog, buf_len) 285 283 ), ··· 288 284 TP_fast_assign( 289 285 __assign_str(device, dev_name(ar->dev)); 290 286 __assign_str(driver, dev_driver_string(ar->dev)); 287 + __entry->hw_type = ar->hw_rev; 291 288 __entry->buf_len = buf_len; 292 289 memcpy(__get_dynamic_array(pktlog), buf, buf_len); 293 290 ), 294 291 295 292 TP_printk( 296 - "%s %s size %hu", 293 + "%s %s %d size %hu", 297 294 __get_str(driver), 298 295 __get_str(device), 296 + __entry->hw_type, 299 297 __entry->buf_len 300 298 ) 301 299 ); ··· 446 440 TP_STRUCT__entry( 447 441 __string(device, dev_name(ar->dev)) 448 442 __string(driver, dev_driver_string(ar->dev)) 443 + __field(u8, hw_type); 449 444 __field(u16, len) 450 445 __dynamic_array(u8, rxdesc, len) 451 446 ), ··· 454 447 TP_fast_assign( 455 448 __assign_str(device, dev_name(ar->dev)); 456 449 __assign_str(driver, dev_driver_string(ar->dev)); 450 + __entry->hw_type = ar->hw_rev; 457 451 __entry->len = len; 458 452 memcpy(__get_dynamic_array(rxdesc), data, len); 459 453 ), 460 454 461 455 TP_printk( 462 - "%s %s rxdesc len %d", 456 + "%s %s %d rxdesc len %d", 463 457 __get_str(driver), 464 458 __get_str(device), 459 + __entry->hw_type, 465 460 __entry->len 466 461 ) 467 462 );
+11
drivers/net/wireless/ath/ath10k/wmi-ops.h
··· 186 186 u8 enable, 187 187 u32 detect_level, 188 188 u32 detect_margin); 189 + int (*get_vdev_subtype)(struct ath10k *ar, 190 + enum wmi_vdev_subtype subtype); 189 191 }; 190 192 191 193 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); ··· 1327 1325 1328 1326 return ath10k_wmi_cmd_send(ar, skb, 1329 1327 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid); 1328 + } 1329 + 1330 + static inline int 1331 + ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) 1332 + { 1333 + if (!ar->wmi.ops->get_vdev_subtype) 1334 + return -EOPNOTSUPP; 1335 + 1336 + return ar->wmi.ops->get_vdev_subtype(ar, subtype); 1330 1337 } 1331 1338 1332 1339 #endif
+1
drivers/net/wireless/ath/ath10k/wmi-tlv.c
··· 3483 3483 .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update, 3484 3484 .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs, 3485 3485 .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill, 3486 + .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, 3486 3487 }; 3487 3488 3488 3489 static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
+150 -12
drivers/net/wireless/ath/ath10k/wmi.c
··· 2862 2862 /* fw doesn't implement vdev stats */ 2863 2863 2864 2864 for (i = 0; i < num_peer_stats; i++) { 2865 - const struct wmi_10_2_4_peer_stats *src; 2865 + const struct wmi_10_2_4_ext_peer_stats *src; 2866 2866 struct ath10k_fw_stats_peer *dst; 2867 + int stats_len; 2868 + bool ext_peer_stats_support; 2869 + 2870 + ext_peer_stats_support = test_bit(WMI_SERVICE_PEER_STATS, 2871 + ar->wmi.svc_map); 2872 + if (ext_peer_stats_support) 2873 + stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats); 2874 + else 2875 + stats_len = sizeof(struct wmi_10_2_4_peer_stats); 2867 2876 2868 2877 src = (void *)skb->data; 2869 - if (!skb_pull(skb, sizeof(*src))) 2878 + if (!skb_pull(skb, stats_len)) 2870 2879 return -EPROTO; 2871 2880 2872 2881 dst = kzalloc(sizeof(*dst), GFP_ATOMIC); ··· 2885 2876 ath10k_wmi_pull_peer_stats(&src->common.old, dst); 2886 2877 2887 2878 dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate); 2879 + 2880 + if (ext_peer_stats_support) 2881 + dst->rx_duration = __le32_to_cpu(src->rx_duration); 2888 2882 /* FIXME: expose 10.2 specific values */ 2889 2883 2890 2884 list_add_tail(&dst->list, &stats->peers); ··· 3196 3184 struct sk_buff *bcn, 3197 3185 const struct wmi_p2p_noa_info *noa) 3198 3186 { 3199 - if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) 3187 + if (!arvif->vif->p2p) 3200 3188 return; 3201 3189 3202 3190 ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); ··· 3250 3238 ev->bcn_info[i].tim_info.tim_num_ps_pending; 3251 3239 3252 3240 arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info; 3241 + i++; 3242 + } 3243 + 3244 + return 0; 3245 + } 3246 + 3247 + static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar, 3248 + struct sk_buff *skb, 3249 + struct wmi_swba_ev_arg *arg) 3250 + { 3251 + struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data; 3252 + u32 map; 3253 + size_t i; 3254 + 3255 + if (skb->len < sizeof(*ev)) 3256 + return -EPROTO; 3257 + 3258 + skb_pull(skb, sizeof(*ev)); 3259 + arg->vdev_map = ev->vdev_map; 3260 + 3261 + for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) { 3262 + if (!(map & BIT(0))) 3263 + continue; 3264 + 3265 + /* If this happens there were some changes in firmware and 3266 + * ath10k should update the max size of tim_info array. 3267 + */ 3268 + if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info))) 3269 + break; 3270 + 3271 + if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) > 3272 + sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) { 3273 + ath10k_warn(ar, "refusing to parse invalid swba structure\n"); 3274 + return -EPROTO; 3275 + } 3276 + 3277 + arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len; 3278 + arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast; 3279 + arg->tim_info[i].tim_bitmap = 3280 + ev->bcn_info[i].tim_info.tim_bitmap; 3281 + arg->tim_info[i].tim_changed = 3282 + ev->bcn_info[i].tim_info.tim_changed; 3283 + arg->tim_info[i].tim_num_ps_pending = 3284 + ev->bcn_info[i].tim_info.tim_num_ps_pending; 3253 3285 i++; 3254 3286 } 3255 3287 ··· 4618 4562 4619 4563 if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) { 4620 4564 ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX + 4621 - TARGET_10_4_NUM_VDEVS; 4622 - ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS + 4623 - TARGET_10_4_NUM_VDEVS; 4565 + ar->max_num_vdevs; 4566 + ar->num_active_peers = ar->hw_params.qcache_active_peers + 4567 + ar->max_num_vdevs; 4624 4568 ar->num_tids = ar->num_active_peers * 2; 4625 4569 ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX; 4626 4570 } ··· 5516 5460 u32 len, val, features; 5517 5461 5518 5462 config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); 5519 - config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); 5520 5463 config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); 5521 - config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); 5464 + if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { 5465 + config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS); 5466 + config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS); 5467 + } else { 5468 + config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); 5469 + config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); 5470 + } 5471 + 5522 5472 config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT); 5523 5473 config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK); 5524 5474 config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK); ··· 5579 5517 test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map)) 5580 5518 features |= WMI_10_2_COEX_GPIO; 5581 5519 5520 + if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) 5521 + features |= WMI_10_2_PEER_STATS; 5522 + 5582 5523 cmd->resource_config.feature_mask = __cpu_to_le32(features); 5583 5524 5584 5525 memcpy(&cmd->resource_config.common, &config, sizeof(config)); ··· 5608 5543 __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS); 5609 5544 config.num_peer_keys = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS); 5610 5545 config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT); 5611 - config.tx_chain_mask = __cpu_to_le32(TARGET_10_4_TX_CHAIN_MASK); 5612 - config.rx_chain_mask = __cpu_to_le32(TARGET_10_4_RX_CHAIN_MASK); 5546 + config.tx_chain_mask = __cpu_to_le32(ar->hw_params.tx_chain_mask); 5547 + config.rx_chain_mask = __cpu_to_le32(ar->hw_params.rx_chain_mask); 5613 5548 5614 5549 config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); 5615 5550 config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); ··· 5640 5575 config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG); 5641 5576 config.gtk_offload_max_vdev = 5642 5577 __cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV); 5643 - config.num_msdu_desc = __cpu_to_le32(TARGET_10_4_NUM_MSDU_DESC); 5578 + config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx); 5644 5579 config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS); 5645 5580 config.max_peer_ext_stats = 5646 5581 __cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS); ··· 7191 7126 "Peer TX rate", peer->peer_tx_rate); 7192 7127 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7193 7128 "Peer RX rate", peer->peer_rx_rate); 7129 + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7130 + "Peer RX duration", peer->rx_duration); 7131 + 7194 7132 len += scnprintf(buf + len, buf_len - len, "\n"); 7195 7133 *length = len; 7196 7134 } ··· 7419 7351 buf[len] = 0; 7420 7352 } 7421 7353 7354 + int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar, 7355 + enum wmi_vdev_subtype subtype) 7356 + { 7357 + switch (subtype) { 7358 + case WMI_VDEV_SUBTYPE_NONE: 7359 + return WMI_VDEV_SUBTYPE_LEGACY_NONE; 7360 + case WMI_VDEV_SUBTYPE_P2P_DEVICE: 7361 + return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV; 7362 + case WMI_VDEV_SUBTYPE_P2P_CLIENT: 7363 + return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI; 7364 + case WMI_VDEV_SUBTYPE_P2P_GO: 7365 + return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO; 7366 + case WMI_VDEV_SUBTYPE_PROXY_STA: 7367 + return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA; 7368 + case WMI_VDEV_SUBTYPE_MESH_11S: 7369 + case WMI_VDEV_SUBTYPE_MESH_NON_11S: 7370 + return -ENOTSUPP; 7371 + } 7372 + return -ENOTSUPP; 7373 + } 7374 + 7375 + static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar, 7376 + enum wmi_vdev_subtype subtype) 7377 + { 7378 + switch (subtype) { 7379 + case WMI_VDEV_SUBTYPE_NONE: 7380 + return WMI_VDEV_SUBTYPE_10_2_4_NONE; 7381 + case WMI_VDEV_SUBTYPE_P2P_DEVICE: 7382 + return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV; 7383 + case WMI_VDEV_SUBTYPE_P2P_CLIENT: 7384 + return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI; 7385 + case WMI_VDEV_SUBTYPE_P2P_GO: 7386 + return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO; 7387 + case WMI_VDEV_SUBTYPE_PROXY_STA: 7388 + return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA; 7389 + case WMI_VDEV_SUBTYPE_MESH_11S: 7390 + return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S; 7391 + case WMI_VDEV_SUBTYPE_MESH_NON_11S: 7392 + return -ENOTSUPP; 7393 + } 7394 + return -ENOTSUPP; 7395 + } 7396 + 7397 + static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar, 7398 + enum wmi_vdev_subtype subtype) 7399 + { 7400 + switch (subtype) { 7401 + case WMI_VDEV_SUBTYPE_NONE: 7402 + return WMI_VDEV_SUBTYPE_10_4_NONE; 7403 + case WMI_VDEV_SUBTYPE_P2P_DEVICE: 7404 + return WMI_VDEV_SUBTYPE_10_4_P2P_DEV; 7405 + case WMI_VDEV_SUBTYPE_P2P_CLIENT: 7406 + return WMI_VDEV_SUBTYPE_10_4_P2P_CLI; 7407 + case WMI_VDEV_SUBTYPE_P2P_GO: 7408 + return WMI_VDEV_SUBTYPE_10_4_P2P_GO; 7409 + case WMI_VDEV_SUBTYPE_PROXY_STA: 7410 + return WMI_VDEV_SUBTYPE_10_4_PROXY_STA; 7411 + case WMI_VDEV_SUBTYPE_MESH_11S: 7412 + return WMI_VDEV_SUBTYPE_10_4_MESH_11S; 7413 + case WMI_VDEV_SUBTYPE_MESH_NON_11S: 7414 + return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S; 7415 + } 7416 + return -ENOTSUPP; 7417 + } 7418 + 7422 7419 static const struct wmi_ops wmi_ops = { 7423 7420 .rx = ath10k_wmi_op_rx, 7424 7421 .map_svc = wmi_main_svc_map, ··· 7543 7410 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, 7544 7411 .gen_delba_send = ath10k_wmi_op_gen_delba_send, 7545 7412 .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill, 7413 + .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, 7546 7414 /* .gen_bcn_tmpl not implemented */ 7547 7415 /* .gen_prb_tmpl not implemented */ 7548 7416 /* .gen_p2p_go_bcn_ie not implemented */ ··· 7611 7477 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, 7612 7478 .gen_delba_send = ath10k_wmi_op_gen_delba_send, 7613 7479 .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, 7480 + .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, 7614 7481 /* .gen_bcn_tmpl not implemented */ 7615 7482 /* .gen_prb_tmpl not implemented */ 7616 7483 /* .gen_p2p_go_bcn_ie not implemented */ ··· 7680 7545 .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, 7681 7546 .gen_delba_send = ath10k_wmi_op_gen_delba_send, 7682 7547 .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, 7548 + .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, 7683 7549 /* .gen_pdev_enable_adaptive_cca not implemented */ 7684 7550 }; 7685 7551 ··· 7702 7566 .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev, 7703 7567 .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, 7704 7568 .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, 7705 - .pull_swba = ath10k_wmi_op_pull_swba_ev, 7569 + .pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev, 7706 7570 .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr, 7707 7571 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, 7708 7572 .pull_rdy = ath10k_wmi_op_pull_rdy_ev, ··· 7747 7611 .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, 7748 7612 .gen_pdev_enable_adaptive_cca = 7749 7613 ath10k_wmi_op_gen_pdev_enable_adaptive_cca, 7614 + .get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype, 7750 7615 /* .gen_bcn_tmpl not implemented */ 7751 7616 /* .gen_prb_tmpl not implemented */ 7752 7617 /* .gen_p2p_go_bcn_ie not implemented */ ··· 7814 7677 /* shared with 10.2 */ 7815 7678 .gen_request_stats = ath10k_wmi_op_gen_request_stats, 7816 7679 .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature, 7680 + .get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype, 7817 7681 }; 7818 7682 7819 7683 int ath10k_wmi_attach(struct ath10k *ar)
+78 -14
drivers/net/wireless/ath/ath10k/wmi.h
··· 176 176 WMI_SERVICE_AUX_CHAN_LOAD_INTF, 177 177 WMI_SERVICE_BSS_CHANNEL_INFO_64, 178 178 WMI_SERVICE_EXT_RES_CFG_SUPPORT, 179 - WMI_SERVICE_MESH, 179 + WMI_SERVICE_MESH_11S, 180 + WMI_SERVICE_MESH_NON_11S, 181 + WMI_SERVICE_PEER_STATS, 182 + WMI_SERVICE_RESTRT_CHNL_SUPPORT, 180 183 181 184 /* keep last */ 182 185 WMI_SERVICE_MAX, ··· 216 213 WMI_10X_SERVICE_BSS_CHANNEL_INFO_64, 217 214 WMI_10X_SERVICE_MESH, 218 215 WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT, 216 + WMI_10X_SERVICE_PEER_STATS, 219 217 }; 220 218 221 219 enum wmi_main_service { ··· 298 294 WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF, 299 295 WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64, 300 296 WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT, 301 - WMI_10_4_SERVICE_MESH, 297 + WMI_10_4_SERVICE_MESH_NON_11S, 298 + WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT, 299 + WMI_10_4_SERVICE_PEER_STATS, 300 + WMI_10_4_SERVICE_MESH_11S, 302 301 }; 303 302 304 303 static inline char *wmi_service_name(int service_id) ··· 392 385 SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF); 393 386 SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64); 394 387 SVCSTR(WMI_SERVICE_EXT_RES_CFG_SUPPORT); 395 - SVCSTR(WMI_SERVICE_MESH); 388 + SVCSTR(WMI_SERVICE_MESH_11S); 389 + SVCSTR(WMI_SERVICE_MESH_NON_11S); 390 + SVCSTR(WMI_SERVICE_PEER_STATS); 391 + SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT); 396 392 default: 397 393 return NULL; 398 394 } ··· 470 460 SVCMAP(WMI_10X_SERVICE_BSS_CHANNEL_INFO_64, 471 461 WMI_SERVICE_BSS_CHANNEL_INFO_64, len); 472 462 SVCMAP(WMI_10X_SERVICE_MESH, 473 - WMI_SERVICE_MESH, len); 463 + WMI_SERVICE_MESH_11S, len); 474 464 SVCMAP(WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT, 475 465 WMI_SERVICE_EXT_RES_CFG_SUPPORT, len); 466 + SVCMAP(WMI_10X_SERVICE_PEER_STATS, 467 + WMI_SERVICE_PEER_STATS, len); 476 468 } 477 469 478 470 static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out, ··· 635 623 WMI_SERVICE_BSS_CHANNEL_INFO_64, len); 636 624 SVCMAP(WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT, 637 625 WMI_SERVICE_EXT_RES_CFG_SUPPORT, len); 638 - SVCMAP(WMI_10_4_SERVICE_MESH, 639 - WMI_SERVICE_MESH, len); 626 + SVCMAP(WMI_10_4_SERVICE_MESH_NON_11S, 627 + WMI_SERVICE_MESH_NON_11S, len); 628 + SVCMAP(WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT, 629 + WMI_SERVICE_RESTRT_CHNL_SUPPORT, len); 630 + SVCMAP(WMI_10_4_SERVICE_PEER_STATS, 631 + WMI_SERVICE_PEER_STATS, len); 632 + SVCMAP(WMI_10_4_SERVICE_MESH_11S, 633 + WMI_SERVICE_MESH_11S, len); 640 634 } 641 635 642 636 #undef SVCMAP ··· 1818 1800 #define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13) 1819 1801 1820 1802 #define WMI_MAX_SPATIAL_STREAM 3 /* default max ss */ 1821 - #define WMI_10_4_MAX_SPATIAL_STREAM 4 1822 1803 1823 1804 /* HT Capabilities*/ 1824 1805 #define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */ ··· 2434 2417 WMI_10_2_RX_BATCH_MODE = BIT(0), 2435 2418 WMI_10_2_ATF_CONFIG = BIT(1), 2436 2419 WMI_10_2_COEX_GPIO = BIT(3), 2420 + WMI_10_2_PEER_STATS = BIT(7), 2437 2421 }; 2438 2422 2439 2423 struct wmi_resource_config_10_2 { ··· 4245 4227 4246 4228 struct wmi_10_2_4_peer_stats { 4247 4229 struct wmi_10_2_peer_stats common; 4248 - __le32 unknown_value; /* FIXME: what is this word? */ 4230 + __le32 peer_rssi_changed; 4231 + } __packed; 4232 + 4233 + struct wmi_10_2_4_ext_peer_stats { 4234 + struct wmi_10_2_peer_stats common; 4235 + __le32 peer_rssi_changed; 4236 + __le32 rx_duration; 4249 4237 } __packed; 4250 4238 4251 4239 struct wmi_10_4_peer_stats { ··· 4294 4270 }; 4295 4271 4296 4272 enum wmi_vdev_subtype { 4297 - WMI_VDEV_SUBTYPE_NONE = 0, 4298 - WMI_VDEV_SUBTYPE_P2P_DEVICE = 1, 4299 - WMI_VDEV_SUBTYPE_P2P_CLIENT = 2, 4300 - WMI_VDEV_SUBTYPE_P2P_GO = 3, 4301 - WMI_VDEV_SUBTYPE_PROXY_STA = 4, 4302 - WMI_VDEV_SUBTYPE_MESH = 5, 4273 + WMI_VDEV_SUBTYPE_NONE, 4274 + WMI_VDEV_SUBTYPE_P2P_DEVICE, 4275 + WMI_VDEV_SUBTYPE_P2P_CLIENT, 4276 + WMI_VDEV_SUBTYPE_P2P_GO, 4277 + WMI_VDEV_SUBTYPE_PROXY_STA, 4278 + WMI_VDEV_SUBTYPE_MESH_11S, 4279 + WMI_VDEV_SUBTYPE_MESH_NON_11S, 4280 + }; 4281 + 4282 + enum wmi_vdev_subtype_legacy { 4283 + WMI_VDEV_SUBTYPE_LEGACY_NONE = 0, 4284 + WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV = 1, 4285 + WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI = 2, 4286 + WMI_VDEV_SUBTYPE_LEGACY_P2P_GO = 3, 4287 + WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA = 4, 4288 + }; 4289 + 4290 + enum wmi_vdev_subtype_10_2_4 { 4291 + WMI_VDEV_SUBTYPE_10_2_4_NONE = 0, 4292 + WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV = 1, 4293 + WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI = 2, 4294 + WMI_VDEV_SUBTYPE_10_2_4_P2P_GO = 3, 4295 + WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA = 4, 4296 + WMI_VDEV_SUBTYPE_10_2_4_MESH_11S = 5, 4297 + }; 4298 + 4299 + enum wmi_vdev_subtype_10_4 { 4300 + WMI_VDEV_SUBTYPE_10_4_NONE = 0, 4301 + WMI_VDEV_SUBTYPE_10_4_P2P_DEV = 1, 4302 + WMI_VDEV_SUBTYPE_10_4_P2P_CLI = 2, 4303 + WMI_VDEV_SUBTYPE_10_4_P2P_GO = 3, 4304 + WMI_VDEV_SUBTYPE_10_4_PROXY_STA = 4, 4305 + WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S = 5, 4306 + WMI_VDEV_SUBTYPE_10_4_MESH_11S = 6, 4303 4307 }; 4304 4308 4305 4309 /* values for vdev_subtype */ ··· 5494 5442 struct wmi_bcn_info bcn_info[0]; 5495 5443 } __packed; 5496 5444 5445 + struct wmi_10_2_4_bcn_info { 5446 + struct wmi_tim_info tim_info; 5447 + /* The 10.2.4 FW doesn't have p2p NOA info */ 5448 + } __packed; 5449 + 5450 + struct wmi_10_2_4_host_swba_event { 5451 + __le32 vdev_map; 5452 + struct wmi_10_2_4_bcn_info bcn_info[0]; 5453 + } __packed; 5454 + 5497 5455 /* 16 words = 512 client + 1 word = for guard */ 5498 5456 #define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17 5499 5457 ··· 6498 6436 void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar, 6499 6437 struct ath10k_fw_stats *fw_stats, 6500 6438 char *buf); 6439 + int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar, 6440 + enum wmi_vdev_subtype subtype); 6501 6441 6502 6442 #endif /* _WMI_H_ */
+9 -27
drivers/net/wireless/ath/ath9k/ani.c
··· 126 126 127 127 static void ath9k_ani_restart(struct ath_hw *ah) 128 128 { 129 - struct ar5416AniState *aniState; 129 + struct ar5416AniState *aniState = &ah->ani; 130 130 131 - if (!ah->curchan) 132 - return; 133 - 134 - aniState = &ah->ani; 135 131 aniState->listenTime = 0; 136 132 137 133 ENABLE_REGWRITE_BUFFER(ah); ··· 217 221 218 222 static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah) 219 223 { 220 - struct ar5416AniState *aniState; 221 - 222 - if (!ah->curchan) 223 - return; 224 - 225 - aniState = &ah->ani; 224 + struct ar5416AniState *aniState = &ah->ani; 226 225 227 226 if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL) 228 227 ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1, false); ··· 272 281 273 282 static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah) 274 283 { 275 - struct ar5416AniState *aniState; 276 - 277 - if (!ah->curchan) 278 - return; 279 - 280 - aniState = &ah->ani; 284 + struct ar5416AniState *aniState = &ah->ani; 281 285 282 286 if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL) 283 287 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1, ··· 285 299 */ 286 300 static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah) 287 301 { 288 - struct ar5416AniState *aniState; 289 - 290 - aniState = &ah->ani; 302 + struct ar5416AniState *aniState = &ah->ani; 291 303 292 304 /* lower OFDM noise immunity */ 293 305 if (aniState->ofdmNoiseImmunityLevel > 0 && ··· 313 329 struct ath_common *common = ath9k_hw_common(ah); 314 330 int ofdm_nil, cck_nil; 315 331 316 - if (!ah->curchan) 332 + if (!chan) 317 333 return; 318 334 319 335 BUG_ON(aniState == NULL); ··· 400 416 401 417 void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan) 402 418 { 403 - struct ar5416AniState *aniState; 419 + struct ar5416AniState *aniState = &ah->ani; 404 420 struct ath_common *common = ath9k_hw_common(ah); 405 421 u32 ofdmPhyErrRate, cckPhyErrRate; 406 422 407 - if (!ah->curchan) 408 - return; 409 - 410 - aniState = &ah->ani; 411 423 if (!ath9k_hw_ani_read_counters(ah)) 412 424 return; 413 425 ··· 430 450 } else if (cckPhyErrRate > ah->config.cck_trig_high) { 431 451 ath9k_hw_ani_cck_err_trigger(ah); 432 452 aniState->ofdmsTurn = true; 433 - } 453 + } else 454 + return; 455 + 434 456 ath9k_ani_restart(ah); 435 457 } 436 458 }
+40 -39
drivers/net/wireless/ath/ath9k/ar9003_aic.c
··· 53 53 return true; 54 54 } 55 55 56 - static int16_t ar9003_aic_find_valid(struct ath_aic_sram_info *cal_sram, 56 + static int16_t ar9003_aic_find_valid(bool *cal_sram_valid, 57 57 bool dir, u8 index) 58 58 { 59 59 int16_t i; 60 60 61 61 if (dir) { 62 62 for (i = index + 1; i < ATH_AIC_MAX_BT_CHANNEL; i++) { 63 - if (cal_sram[i].valid) 63 + if (cal_sram_valid[i]) 64 64 break; 65 65 } 66 66 } else { 67 67 for (i = index - 1; i >= 0; i--) { 68 - if (cal_sram[i].valid) 68 + if (cal_sram_valid[i]) 69 69 break; 70 70 } 71 71 } ··· 264 264 static bool ar9003_aic_cal_post_process(struct ath_hw *ah) 265 265 { 266 266 struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic; 267 - struct ath_aic_sram_info cal_sram[ATH_AIC_MAX_BT_CHANNEL]; 267 + bool cal_sram_valid[ATH_AIC_MAX_BT_CHANNEL]; 268 268 struct ath_aic_out_info aic_sram[ATH_AIC_MAX_BT_CHANNEL]; 269 269 u32 dir_path_gain_idx, quad_path_gain_idx, value; 270 270 u32 fixed_com_att_db; ··· 272 272 int16_t i; 273 273 bool ret = true; 274 274 275 - memset(&cal_sram, 0, sizeof(cal_sram)); 275 + memset(&cal_sram_valid, 0, sizeof(cal_sram_valid)); 276 276 memset(&aic_sram, 0, sizeof(aic_sram)); 277 277 278 278 for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) { 279 + struct ath_aic_sram_info sram; 279 280 value = aic->aic_sram[i]; 280 281 281 - cal_sram[i].valid = 282 + cal_sram_valid[i] = sram.valid = 282 283 MS(value, AR_PHY_AIC_SRAM_VALID); 283 - cal_sram[i].rot_quad_att_db = 284 + sram.rot_quad_att_db = 284 285 MS(value, AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB); 285 - cal_sram[i].vga_quad_sign = 286 + sram.vga_quad_sign = 286 287 MS(value, AR_PHY_AIC_SRAM_VGA_QUAD_SIGN); 287 - cal_sram[i].rot_dir_att_db = 288 + sram.rot_dir_att_db = 288 289 MS(value, AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB); 289 - cal_sram[i].vga_dir_sign = 290 + sram.vga_dir_sign = 290 291 MS(value, AR_PHY_AIC_SRAM_VGA_DIR_SIGN); 291 - cal_sram[i].com_att_6db = 292 + sram.com_att_6db = 292 293 MS(value, AR_PHY_AIC_SRAM_COM_ATT_6DB); 293 294 294 - if (cal_sram[i].valid) { 295 - dir_path_gain_idx = cal_sram[i].rot_dir_att_db + 296 - com_att_db_table[cal_sram[i].com_att_6db]; 297 - quad_path_gain_idx = cal_sram[i].rot_quad_att_db + 298 - com_att_db_table[cal_sram[i].com_att_6db]; 295 + if (sram.valid) { 296 + dir_path_gain_idx = sram.rot_dir_att_db + 297 + com_att_db_table[sram.com_att_6db]; 298 + quad_path_gain_idx = sram.rot_quad_att_db + 299 + com_att_db_table[sram.com_att_6db]; 299 300 300 - dir_path_sign = (cal_sram[i].vga_dir_sign) ? 1 : -1; 301 - quad_path_sign = (cal_sram[i].vga_quad_sign) ? 1 : -1; 301 + dir_path_sign = (sram.vga_dir_sign) ? 1 : -1; 302 + quad_path_sign = (sram.vga_quad_sign) ? 1 : -1; 302 303 303 304 aic_sram[i].dir_path_gain_lin = dir_path_sign * 304 305 aic_lin_table[dir_path_gain_idx]; ··· 311 310 for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) { 312 311 int16_t start_idx, end_idx; 313 312 314 - if (cal_sram[i].valid) 313 + if (cal_sram_valid[i]) 315 314 continue; 316 315 317 - start_idx = ar9003_aic_find_valid(cal_sram, 0, i); 318 - end_idx = ar9003_aic_find_valid(cal_sram, 1, i); 316 + start_idx = ar9003_aic_find_valid(cal_sram_valid, 0, i); 317 + end_idx = ar9003_aic_find_valid(cal_sram_valid, 1, i); 319 318 320 319 if (start_idx < 0) { 321 320 /* extrapolation */ 322 321 start_idx = end_idx; 323 - end_idx = ar9003_aic_find_valid(cal_sram, 1, start_idx); 322 + end_idx = ar9003_aic_find_valid(cal_sram_valid, 1, start_idx); 324 323 325 324 if (end_idx < 0) { 326 325 ret = false; ··· 343 342 344 343 if (end_idx < 0) { 345 344 /* extrapolation */ 346 - end_idx = ar9003_aic_find_valid(cal_sram, 0, start_idx); 345 + end_idx = ar9003_aic_find_valid(cal_sram_valid, 0, start_idx); 347 346 348 347 if (end_idx < 0) { 349 348 ret = false; ··· 379 378 } 380 379 381 380 /* From dir/quad_path_gain_lin to sram. */ 382 - i = ar9003_aic_find_valid(cal_sram, 1, 0); 381 + i = ar9003_aic_find_valid(cal_sram_valid, 1, 0); 383 382 if (i < 0) { 384 383 i = 0; 385 384 ret = false; 386 385 } 387 - fixed_com_att_db = com_att_db_table[cal_sram[i].com_att_6db]; 386 + fixed_com_att_db = com_att_db_table[MS(aic->aic_sram[i], 387 + AR_PHY_AIC_SRAM_COM_ATT_6DB)]; 388 388 389 389 for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) { 390 390 int16_t rot_dir_path_att_db, rot_quad_path_att_db; 391 + struct ath_aic_sram_info sram; 391 392 392 - aic_sram[i].sram.vga_dir_sign = 393 + sram.vga_dir_sign = 393 394 (aic_sram[i].dir_path_gain_lin >= 0) ? 1 : 0; 394 - aic_sram[i].sram.vga_quad_sign= 395 + sram.vga_quad_sign = 395 396 (aic_sram[i].quad_path_gain_lin >= 0) ? 1 : 0; 396 397 397 398 rot_dir_path_att_db = ··· 403 400 ar9003_aic_find_index(0, abs(aic_sram[i].quad_path_gain_lin)) - 404 401 fixed_com_att_db; 405 402 406 - aic_sram[i].sram.com_att_6db = 403 + sram.com_att_6db = 407 404 ar9003_aic_find_index(1, fixed_com_att_db); 408 405 409 - aic_sram[i].sram.valid = 1; 406 + sram.valid = 1; 410 407 411 - aic_sram[i].sram.rot_dir_att_db = 408 + sram.rot_dir_att_db = 412 409 min(max(rot_dir_path_att_db, 413 410 (int16_t)ATH_AIC_MIN_ROT_DIR_ATT_DB), 414 411 ATH_AIC_MAX_ROT_DIR_ATT_DB); 415 - aic_sram[i].sram.rot_quad_att_db = 412 + sram.rot_quad_att_db = 416 413 min(max(rot_quad_path_att_db, 417 414 (int16_t)ATH_AIC_MIN_ROT_QUAD_ATT_DB), 418 415 ATH_AIC_MAX_ROT_QUAD_ATT_DB); 419 - } 420 416 421 - for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) { 422 - aic->aic_sram[i] = (SM(aic_sram[i].sram.vga_dir_sign, 417 + aic->aic_sram[i] = (SM(sram.vga_dir_sign, 423 418 AR_PHY_AIC_SRAM_VGA_DIR_SIGN) | 424 - SM(aic_sram[i].sram.vga_quad_sign, 419 + SM(sram.vga_quad_sign, 425 420 AR_PHY_AIC_SRAM_VGA_QUAD_SIGN) | 426 - SM(aic_sram[i].sram.com_att_6db, 421 + SM(sram.com_att_6db, 427 422 AR_PHY_AIC_SRAM_COM_ATT_6DB) | 428 - SM(aic_sram[i].sram.valid, 423 + SM(sram.valid, 429 424 AR_PHY_AIC_SRAM_VALID) | 430 - SM(aic_sram[i].sram.rot_dir_att_db, 425 + SM(sram.rot_dir_att_db, 431 426 AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB) | 432 - SM(aic_sram[i].sram.rot_quad_att_db, 427 + SM(sram.rot_quad_att_db, 433 428 AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB)); 434 429 } 435 430
-1
drivers/net/wireless/ath/ath9k/ar9003_aic.h
··· 50 50 struct ath_aic_out_info { 51 51 int16_t dir_path_gain_lin; 52 52 int16_t quad_path_gain_lin; 53 - struct ath_aic_sram_info sram; 54 53 }; 55 54 56 55 u8 ar9003_aic_calibration(struct ath_hw *ah);
+3 -3
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
··· 5485 5485 AR9300_PAPRD_SCALE_1); 5486 5486 else { 5487 5487 if (chan->channel >= 5700) 5488 - return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20), 5489 - AR9300_PAPRD_SCALE_1); 5488 + return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20), 5489 + AR9300_PAPRD_SCALE_1); 5490 5490 else if (chan->channel >= 5400) 5491 5491 return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40), 5492 - AR9300_PAPRD_SCALE_2); 5492 + AR9300_PAPRD_SCALE_2); 5493 5493 else 5494 5494 return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40), 5495 5495 AR9300_PAPRD_SCALE_1);
+3
drivers/net/wireless/ath/ath9k/ar9003_hw.c
··· 698 698 else if (AR_SREV_9340(ah)) 699 699 INIT_INI_ARRAY(&ah->iniModesTxGain, 700 700 ar9340Modes_low_ob_db_tx_gain_table_1p0); 701 + else if (AR_SREV_9531_11(ah)) 702 + INIT_INI_ARRAY(&ah->iniModesTxGain, 703 + qca953x_1p1_modes_no_xpa_low_power_tx_gain_table); 701 704 else if (AR_SREV_9485_11_OR_LATER(ah)) 702 705 INIT_INI_ARRAY(&ah->iniModesTxGain, 703 706 ar9485Modes_low_ob_db_tx_gain_1_1);
+9 -3
drivers/net/wireless/ath/ath9k/ar9003_phy.c
··· 976 976 /* 977 977 * JAPAN regulatory. 978 978 */ 979 - if (chan->channel == 2484) 979 + if (chan->channel == 2484) { 980 980 ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1); 981 + 982 + if (AR_SREV_9531(ah)) 983 + REG_RMW_FIELD(ah, AR_PHY_FCAL_2_0, 984 + AR_PHY_FLC_PWR_THRESH, 0); 985 + } 981 986 982 987 ah->modes_index = modesIndex; 983 988 ar9003_hw_override_ini(ah); ··· 2076 2071 * to be disabled. 2077 2072 * 2078 2073 * 0x04000409: Packet stuck on receive. 2079 - * Full chip reset is required for all chips except AR9340. 2074 + * Full chip reset is required for all chips except 2075 + * AR9340, AR9531 and AR9561. 2080 2076 */ 2081 2077 2082 2078 /* ··· 2106 2100 case 0x04000b09: 2107 2101 return true; 2108 2102 case 0x04000409: 2109 - if (AR_SREV_9340(ah) || AR_SREV_9531(ah)) 2103 + if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah)) 2110 2104 return false; 2111 2105 else 2112 2106 return true;
+3
drivers/net/wireless/ath/ath9k/ar9003_phy.h
··· 487 487 #define AR_PHY_ADDAC_PARA_CTL (AR_SM_BASE + 0x150) 488 488 #define AR_PHY_XPA_CFG (AR_SM_BASE + 0x158) 489 489 490 + #define AR_PHY_FLC_PWR_THRESH 7 491 + #define AR_PHY_FLC_PWR_THRESH_S 0 492 + 490 493 #define AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW 3 491 494 #define AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW_S 0 492 495
+65
drivers/net/wireless/ath/ath9k/ar953x_initvals.h
··· 757 757 {0x00016448, 0x6c927a70}, 758 758 }; 759 759 760 + static const u32 qca953x_1p1_modes_no_xpa_low_power_tx_gain_table[][2] = { 761 + /* Addr allmodes */ 762 + {0x0000a2dc, 0xfff55592}, 763 + {0x0000a2e0, 0xfff99924}, 764 + {0x0000a2e4, 0xfffe1e00}, 765 + {0x0000a2e8, 0xffffe000}, 766 + {0x0000a410, 0x000050d6}, 767 + {0x0000a500, 0x00000069}, 768 + {0x0000a504, 0x0400006b}, 769 + {0x0000a508, 0x0800006d}, 770 + {0x0000a50c, 0x0c000269}, 771 + {0x0000a510, 0x1000026b}, 772 + {0x0000a514, 0x1400026d}, 773 + {0x0000a518, 0x18000669}, 774 + {0x0000a51c, 0x1c00066b}, 775 + {0x0000a520, 0x1d000a68}, 776 + {0x0000a524, 0x21000a6a}, 777 + {0x0000a528, 0x25000a6c}, 778 + {0x0000a52c, 0x29000a6e}, 779 + {0x0000a530, 0x2d0012a9}, 780 + {0x0000a534, 0x310012ab}, 781 + {0x0000a538, 0x350012ad}, 782 + {0x0000a53c, 0x39001b0a}, 783 + {0x0000a540, 0x3d001b0c}, 784 + {0x0000a544, 0x41001b0e}, 785 + {0x0000a548, 0x43001bae}, 786 + {0x0000a54c, 0x45001914}, 787 + {0x0000a550, 0x47001916}, 788 + {0x0000a554, 0x49001b96}, 789 + {0x0000a558, 0x49001b96}, 790 + {0x0000a55c, 0x49001b96}, 791 + {0x0000a560, 0x49001b96}, 792 + {0x0000a564, 0x49001b96}, 793 + {0x0000a568, 0x49001b96}, 794 + {0x0000a56c, 0x49001b96}, 795 + {0x0000a570, 0x49001b96}, 796 + {0x0000a574, 0x49001b96}, 797 + {0x0000a578, 0x49001b96}, 798 + {0x0000a57c, 0x49001b96}, 799 + {0x0000a600, 0x00000000}, 800 + {0x0000a604, 0x00000000}, 801 + {0x0000a608, 0x00000000}, 802 + {0x0000a60c, 0x00000000}, 803 + {0x0000a610, 0x00000000}, 804 + {0x0000a614, 0x00000000}, 805 + {0x0000a618, 0x00804201}, 806 + {0x0000a61c, 0x01408201}, 807 + {0x0000a620, 0x01408502}, 808 + {0x0000a624, 0x01408502}, 809 + {0x0000a628, 0x01408502}, 810 + {0x0000a62c, 0x01408502}, 811 + {0x0000a630, 0x01408502}, 812 + {0x0000a634, 0x01408502}, 813 + {0x0000a638, 0x01408502}, 814 + {0x0000a63c, 0x01408502}, 815 + {0x0000b2dc, 0xfff55592}, 816 + {0x0000b2e0, 0xfff99924}, 817 + {0x0000b2e4, 0xfffe1e00}, 818 + {0x0000b2e8, 0xffffe000}, 819 + {0x00016044, 0x044922db}, 820 + {0x00016048, 0x6c927a70}, 821 + {0x00016444, 0x044922db}, 822 + {0x00016448, 0x6c927a70}, 823 + }; 824 + 760 825 static const u32 qca953x_2p0_baseband_core[][2] = { 761 826 /* Addr allmodes */ 762 827 {0x00009800, 0xafe68e30},
+32 -6
drivers/net/wireless/ath/ath9k/calib.c
··· 241 241 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; 242 242 struct ath_common *common = ath9k_hw_common(ah); 243 243 s16 default_nf = ath9k_hw_get_default_nf(ah, chan); 244 + u32 bb_agc_ctl = REG_READ(ah, AR_PHY_AGC_CONTROL); 244 245 245 246 if (ah->caldata) 246 247 h = ah->caldata->nfCalHist; ··· 265 264 } 266 265 267 266 /* 267 + * stop NF cal if ongoing to ensure NF load completes immediately 268 + * (or after end rx/tx frame if ongoing) 269 + */ 270 + if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NF) { 271 + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); 272 + REG_RMW_BUFFER_FLUSH(ah); 273 + ENABLE_REG_RMW_BUFFER(ah); 274 + } 275 + 276 + /* 268 277 * Load software filtered NF value into baseband internal minCCApwr 269 278 * variable. 270 279 */ ··· 287 276 288 277 /* 289 278 * Wait for load to complete, should be fast, a few 10s of us. 290 - * The max delay was changed from an original 250us to 10000us 291 - * since 250us often results in NF load timeout and causes deaf 292 - * condition during stress testing 12/12/2009 279 + * The max delay was changed from an original 250us to 22.2 msec. 280 + * This would increase timeout to the longest possible frame 281 + * (11n max length 22.1 msec) 293 282 */ 294 - for (j = 0; j < 10000; j++) { 283 + for (j = 0; j < 22200; j++) { 295 284 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & 296 - AR_PHY_AGC_CONTROL_NF) == 0) 285 + AR_PHY_AGC_CONTROL_NF) == 0) 297 286 break; 298 287 udelay(10); 288 + } 289 + 290 + /* 291 + * Restart NF so it can continue. 292 + */ 293 + if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NF) { 294 + ENABLE_REG_RMW_BUFFER(ah); 295 + if (bb_agc_ctl & AR_PHY_AGC_CONTROL_ENABLE_NF) 296 + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, 297 + AR_PHY_AGC_CONTROL_ENABLE_NF); 298 + if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NO_UPDATE_NF) 299 + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, 300 + AR_PHY_AGC_CONTROL_NO_UPDATE_NF); 301 + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); 302 + REG_RMW_BUFFER_FLUSH(ah); 299 303 } 300 304 301 305 /* ··· 322 296 * here, the baseband nf cal will just be capped by our present 323 297 * noisefloor until the next calibration timer. 324 298 */ 325 - if (j == 10000) { 299 + if (j == 22200) { 326 300 ath_dbg(common, ANY, 327 301 "Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n", 328 302 REG_READ(ah, AR_PHY_AGC_CONTROL));
+2 -2
drivers/net/wireless/ath/ath9k/channel.c
··· 226 226 } 227 227 } 228 228 229 - static const u32 chanctx_event_delta(struct ath_softc *sc) 229 + static u32 chanctx_event_delta(struct ath_softc *sc) 230 230 { 231 231 u64 ms; 232 232 struct timespec ts, *old; ··· 1454 1454 if (!sc->p2p_ps_timer) 1455 1455 return; 1456 1456 1457 - if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p) 1457 + if (vif->type != NL80211_IFTYPE_STATION) 1458 1458 return; 1459 1459 1460 1460 sc->p2p_ps_vif = avp;
+2
drivers/net/wireless/ath/ath9k/hif_usb.c
··· 55 55 .driver_info = AR9280_USB }, /* Buffalo WLI-UV-AG300P */ 56 56 { USB_DEVICE(0x04da, 0x3904), 57 57 .driver_info = AR9280_USB }, 58 + { USB_DEVICE(0x0930, 0x0a08), 59 + .driver_info = AR9280_USB }, /* Toshiba WLM-20U2 and GN-1080 */ 58 60 59 61 { USB_DEVICE(0x0cf3, 0x20ff), 60 62 .driver_info = STORAGE_DEVICE },
+10
drivers/net/wireless/ath/ath9k/hw.c
··· 1368 1368 if (ath9k_hw_mci_is_enabled(ah)) 1369 1369 ar9003_mci_check_gpm_offset(ah); 1370 1370 1371 + /* DMA HALT added to resolve ar9300 and ar9580 bus error during 1372 + * RTC_RC reg read 1373 + */ 1374 + if (AR_SREV_9300(ah) || AR_SREV_9580(ah)) { 1375 + REG_SET_BIT(ah, AR_CFG, AR_CFG_HALT_REQ); 1376 + ath9k_hw_wait(ah, AR_CFG, AR_CFG_HALT_ACK, AR_CFG_HALT_ACK, 1377 + 20 * AH_WAIT_TIMEOUT); 1378 + REG_CLR_BIT(ah, AR_CFG, AR_CFG_HALT_REQ); 1379 + } 1380 + 1371 1381 REG_WRITE(ah, AR_RTC_RC, rst_flags); 1372 1382 1373 1383 REGWRITE_BUFFER_FLUSH(ah);
+5 -20
drivers/net/wireless/ath/ath9k/init.c
··· 751 751 752 752 #endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ 753 753 754 - static const struct ieee80211_iface_limit if_dfs_limits[] = { 755 - { .max = 1, .types = BIT(NL80211_IFTYPE_AP) | 756 - #ifdef CONFIG_MAC80211_MESH 757 - BIT(NL80211_IFTYPE_MESH_POINT) | 758 - #endif 759 - BIT(NL80211_IFTYPE_ADHOC) }, 760 - }; 761 - 762 754 static const struct ieee80211_iface_combination if_comb[] = { 763 755 { 764 756 .limits = if_limits, ··· 758 766 .max_interfaces = 2048, 759 767 .num_different_channels = 1, 760 768 .beacon_int_infra_match = true, 769 + #ifdef CONFIG_ATH9K_DFS_CERTIFIED 770 + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 771 + BIT(NL80211_CHAN_WIDTH_20) | 772 + BIT(NL80211_CHAN_WIDTH_40), 773 + #endif 761 774 }, 762 775 { 763 776 .limits = wds_limits, ··· 771 774 .num_different_channels = 1, 772 775 .beacon_int_infra_match = true, 773 776 }, 774 - #ifdef CONFIG_ATH9K_DFS_CERTIFIED 775 - { 776 - .limits = if_dfs_limits, 777 - .n_limits = ARRAY_SIZE(if_dfs_limits), 778 - .max_interfaces = 1, 779 - .num_different_channels = 1, 780 - .beacon_int_infra_match = true, 781 - .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 782 - BIT(NL80211_CHAN_WIDTH_20) | 783 - BIT(NL80211_CHAN_WIDTH_40), 784 - } 785 - #endif 786 777 }; 787 778 788 779 #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+4 -1
drivers/net/wireless/ath/ath9k/main.c
··· 978 978 if (ctx->nvifs_assigned != 1) 979 979 continue; 980 980 981 - if (!avp->vif->p2p || !iter_data->has_hw_macaddr) 981 + if (!iter_data->has_hw_macaddr) 982 982 continue; 983 983 984 984 ether_addr_copy(common->curbssid, avp->bssid); ··· 1254 1254 1255 1255 ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); 1256 1256 sc->cur_chan->nvifs++; 1257 + 1258 + if (vif->type == NL80211_IFTYPE_STATION && ath9k_is_chanctx_enabled()) 1259 + vif->driver_flags |= IEEE80211_VIF_GET_NOA_UPDATE; 1257 1260 1258 1261 if (ath9k_uses_beacons(vif->type)) 1259 1262 ath9k_beacon_assign_slot(sc, vif);
+3 -1
drivers/net/wireless/ath/ath9k/reg.h
··· 34 34 #define AR_CFG_SWRG 0x00000010 35 35 #define AR_CFG_AP_ADHOC_INDICATION 0x00000020 36 36 #define AR_CFG_PHOK 0x00000100 37 - #define AR_CFG_CLK_GATE_DIS 0x00000400 38 37 #define AR_CFG_EEBS 0x00000200 38 + #define AR_CFG_CLK_GATE_DIS 0x00000400 39 + #define AR_CFG_HALT_REQ 0x00000800 40 + #define AR_CFG_HALT_ACK 0x00001000 39 41 #define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000 40 42 #define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17 41 43
+118 -44
drivers/net/wireless/ath/wil6210/cfg80211.c
··· 1 1 /* 2 - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. 2 + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. 3 3 * 4 4 * Permission to use, copy, modify, and/or distribute this software for any 5 5 * purpose with or without fee is hereby granted, provided that the above ··· 535 535 536 536 wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code); 537 537 538 - rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0); 538 + if (!(test_bit(wil_status_fwconnecting, wil->status) || 539 + test_bit(wil_status_fwconnected, wil->status))) { 540 + wil_err(wil, "%s: Disconnect was called while disconnected\n", 541 + __func__); 542 + return 0; 543 + } 544 + 545 + rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0, 546 + WMI_DISCONNECT_EVENTID, NULL, 0, 547 + WIL6210_DISCONNECT_TO_MS); 548 + if (rc) 549 + wil_err(wil, "%s: disconnect error %d\n", __func__, rc); 539 550 540 551 return rc; 541 552 } ··· 707 696 return rc; 708 697 } 709 698 699 + /** 700 + * find a specific IE in a list of IEs 701 + * return a pointer to the beginning of IE in the list 702 + * or NULL if not found 703 + */ 704 + static const u8 *_wil_cfg80211_find_ie(const u8 *ies, u16 ies_len, const u8 *ie, 705 + u16 ie_len) 706 + { 707 + struct ieee80211_vendor_ie *vie; 708 + u32 oui; 709 + 710 + /* IE tag at offset 0, length at offset 1 */ 711 + if (ie_len < 2 || 2 + ie[1] > ie_len) 712 + return NULL; 713 + 714 + if (ie[0] != WLAN_EID_VENDOR_SPECIFIC) 715 + return cfg80211_find_ie(ie[0], ies, ies_len); 716 + 717 + /* make sure there is room for 3 bytes OUI + 1 byte OUI type */ 718 + if (ie[1] < 4) 719 + return NULL; 720 + vie = (struct ieee80211_vendor_ie *)ie; 721 + oui = vie->oui[0] << 16 | vie->oui[1] << 8 | vie->oui[2]; 722 + return cfg80211_find_vendor_ie(oui, vie->oui_type, ies, 723 + ies_len); 724 + } 725 + 726 + /** 727 + * merge the IEs in two lists into a single list. 728 + * do not include IEs from the second list which exist in the first list. 729 + * add only vendor specific IEs from second list to keep 730 + * the merged list sorted (since vendor-specific IE has the 731 + * highest tag number) 732 + * caller must free the allocated memory for merged IEs 733 + */ 734 + static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len, 735 + const u8 *ies2, u16 ies2_len, 736 + u8 **merged_ies, u16 *merged_len) 737 + { 738 + u8 *buf, *dpos; 739 + const u8 *spos; 740 + 741 + if (ies1_len == 0 && ies2_len == 0) { 742 + *merged_ies = NULL; 743 + *merged_len = 0; 744 + return 0; 745 + } 746 + 747 + buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL); 748 + if (!buf) 749 + return -ENOMEM; 750 + memcpy(buf, ies1, ies1_len); 751 + dpos = buf + ies1_len; 752 + spos = ies2; 753 + while (spos + 1 < ies2 + ies2_len) { 754 + /* IE tag at offset 0, length at offset 1 */ 755 + u16 ielen = 2 + spos[1]; 756 + 757 + if (spos + ielen > ies2 + ies2_len) 758 + break; 759 + if (spos[0] == WLAN_EID_VENDOR_SPECIFIC && 760 + !_wil_cfg80211_find_ie(ies1, ies1_len, spos, ielen)) { 761 + memcpy(dpos, spos, ielen); 762 + dpos += ielen; 763 + } 764 + spos += ielen; 765 + } 766 + 767 + *merged_ies = buf; 768 + *merged_len = dpos - buf; 769 + return 0; 770 + } 771 + 710 772 static void wil_print_bcon_data(struct cfg80211_beacon_data *b) 711 773 { 712 774 print_hex_dump_bytes("head ", DUMP_PREFIX_OFFSET, ··· 796 712 b->assocresp_ies, b->assocresp_ies_len); 797 713 } 798 714 799 - static int wil_fix_bcon(struct wil6210_priv *wil, 800 - struct cfg80211_beacon_data *bcon) 801 - { 802 - struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; 803 - size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); 804 - 805 - if (bcon->probe_resp_len <= hlen) 806 - return 0; 807 - 808 - /* always use IE's from full probe frame, they has more info 809 - * notable RSN 810 - */ 811 - bcon->proberesp_ies = f->u.probe_resp.variable; 812 - bcon->proberesp_ies_len = bcon->probe_resp_len - hlen; 813 - if (!bcon->assocresp_ies) { 814 - bcon->assocresp_ies = bcon->proberesp_ies; 815 - bcon->assocresp_ies_len = bcon->proberesp_ies_len; 816 - } 817 - 818 - return 1; 819 - } 820 - 821 715 /* internal functions for device reset and starting AP */ 822 716 static int _wil_cfg80211_set_ies(struct wiphy *wiphy, 823 717 struct cfg80211_beacon_data *bcon) 824 718 { 825 719 int rc; 826 720 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 721 + u16 len = 0, proberesp_len = 0; 722 + u8 *ies = NULL, *proberesp = NULL; 827 723 828 - rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len, 829 - bcon->proberesp_ies); 724 + if (bcon->probe_resp) { 725 + struct ieee80211_mgmt *f = 726 + (struct ieee80211_mgmt *)bcon->probe_resp; 727 + size_t hlen = offsetof(struct ieee80211_mgmt, 728 + u.probe_resp.variable); 729 + proberesp = f->u.probe_resp.variable; 730 + proberesp_len = bcon->probe_resp_len - hlen; 731 + } 732 + rc = _wil_cfg80211_merge_extra_ies(proberesp, 733 + proberesp_len, 734 + bcon->proberesp_ies, 735 + bcon->proberesp_ies_len, 736 + &ies, &len); 737 + 830 738 if (rc) 831 - return rc; 739 + goto out; 832 740 833 - rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, 834 - bcon->assocresp_ies); 741 + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, len, ies); 742 + if (rc) 743 + goto out; 744 + 745 + if (bcon->assocresp_ies) 746 + rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, 747 + bcon->assocresp_ies_len, bcon->assocresp_ies); 748 + else 749 + rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, len, ies); 835 750 #if 0 /* to use beacon IE's, remove this #if 0 */ 836 751 if (rc) 837 - return rc; 752 + goto out; 838 753 839 754 rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail); 840 755 #endif 841 - 756 + out: 757 + kfree(ies); 842 758 return rc; 843 759 } 844 760 ··· 907 823 wil_dbg_misc(wil, "%s()\n", __func__); 908 824 wil_print_bcon_data(bcon); 909 825 910 - if (wil_fix_bcon(wil, bcon)) { 911 - wil_dbg_misc(wil, "Fixed bcon\n"); 912 - wil_print_bcon_data(bcon); 913 - } 914 - 915 - if (bcon->proberesp_ies && 916 - cfg80211_find_ie(WLAN_EID_RSN, bcon->proberesp_ies, 917 - bcon->proberesp_ies_len)) 826 + if (bcon->tail && 827 + cfg80211_find_ie(WLAN_EID_RSN, bcon->tail, 828 + bcon->tail_len)) 918 829 privacy = 1; 919 830 920 831 /* in case privacy has changed, need to restart the AP */ ··· 978 899 info->ssid, info->ssid_len); 979 900 wil_print_bcon_data(bcon); 980 901 wil_print_crypto(wil, crypto); 981 - 982 - if (wil_fix_bcon(wil, bcon)) { 983 - wil_dbg_misc(wil, "Fixed bcon\n"); 984 - wil_print_bcon_data(bcon); 985 - } 986 902 987 903 rc = _wil_cfg80211_start_ap(wiphy, ndev, 988 904 info->ssid, info->ssid_len, info->privacy,
+3 -3
drivers/net/wireless/ath/wil6210/debugfs.c
··· 1 1 /* 2 - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. 2 + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. 3 3 * 4 4 * Permission to use, copy, modify, and/or distribute this software for any 5 5 * purpose with or without fee is hereby granted, provided that the above ··· 68 68 seq_puts(s, "???\n"); 69 69 } 70 70 71 - if (vring->va && (vring->size < 1025)) { 71 + if (vring->va && (vring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) { 72 72 uint i; 73 73 74 74 for (i = 0; i < vring->size; i++) { 75 75 volatile struct vring_tx_desc *d = &vring->va[i].tx; 76 76 77 - if ((i % 64) == 0 && (i != 0)) 77 + if ((i % 128) == 0 && (i != 0)) 78 78 seq_puts(s, "\n"); 79 79 seq_printf(s, "%c", (d->dma.status & BIT(0)) ? 80 80 _s : (vring->ctx[i].skb ? _h : 'h'));
+48 -64
drivers/net/wireless/ath/wil6210/main.c
··· 1 1 /* 2 - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. 2 + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. 3 3 * 4 4 * Permission to use, copy, modify, and/or distribute this software for any 5 5 * purpose with or without fee is hereby granted, provided that the above ··· 22 22 #include "txrx.h" 23 23 #include "wmi.h" 24 24 #include "boot_loader.h" 25 - 26 - #define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000 27 - #define WAIT_FOR_DISCONNECT_INTERVAL_MS 10 28 25 29 26 bool debug_fw; /* = false; */ 30 27 module_param(debug_fw, bool, S_IRUGO); ··· 152 155 153 156 if (sta->status != wil_sta_unused) { 154 157 if (!from_event) 155 - wmi_disconnect_sta(wil, sta->addr, reason_code); 158 + wmi_disconnect_sta(wil, sta->addr, reason_code, true); 156 159 157 160 switch (wdev->iftype) { 158 161 case NL80211_IFTYPE_AP: ··· 192 195 struct wireless_dev *wdev = wil->wdev; 193 196 194 197 might_sleep(); 195 - wil_dbg_misc(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid, 196 - reason_code, from_event ? "+" : "-"); 198 + wil_info(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid, 199 + reason_code, from_event ? "+" : "-"); 197 200 198 201 /* Cases are: 199 202 * - disconnect single STA, still connected ··· 255 258 static void wil_connect_timer_fn(ulong x) 256 259 { 257 260 struct wil6210_priv *wil = (void *)x; 261 + bool q; 258 262 259 - wil_dbg_misc(wil, "Connect timeout\n"); 263 + wil_err(wil, "Connect timeout detected, disconnect station\n"); 260 264 261 265 /* reschedule to thread context - disconnect won't 262 - * run from atomic context 266 + * run from atomic context. 267 + * queue on wmi_wq to prevent race with connect event. 263 268 */ 264 - schedule_work(&wil->disconnect_worker); 269 + q = queue_work(wil->wmi_wq, &wil->disconnect_worker); 270 + wil_dbg_wmi(wil, "queue_work of disconnect_worker -> %d\n", q); 265 271 } 266 272 267 273 static void wil_scan_timer_fn(ulong x) ··· 369 369 return -EINVAL; 370 370 } 371 371 372 + int wil_tx_init(struct wil6210_priv *wil, int cid) 373 + { 374 + int rc = -EINVAL, ringid; 375 + 376 + if (cid < 0) { 377 + wil_err(wil, "No connection pending\n"); 378 + goto out; 379 + } 380 + ringid = wil_find_free_vring(wil); 381 + if (ringid < 0) { 382 + wil_err(wil, "No free vring found\n"); 383 + goto out; 384 + } 385 + 386 + wil_dbg_wmi(wil, "Configure for connection CID %d vring %d\n", 387 + cid, ringid); 388 + 389 + rc = wil_vring_init_tx(wil, ringid, 1 << tx_ring_order, cid, 0); 390 + if (rc) 391 + wil_err(wil, "wil_vring_init_tx for CID %d vring %d failed\n", 392 + cid, ringid); 393 + 394 + out: 395 + return rc; 396 + } 397 + 372 398 int wil_bcast_init(struct wil6210_priv *wil) 373 399 { 374 400 int ri = wil->bcast_vring, rc; ··· 425 399 wil_vring_fini_tx(wil, ri); 426 400 } 427 401 428 - static void wil_connect_worker(struct work_struct *work) 429 - { 430 - int rc, cid, ringid; 431 - struct wil6210_priv *wil = container_of(work, struct wil6210_priv, 432 - connect_worker); 433 - struct net_device *ndev = wil_to_ndev(wil); 434 - 435 - mutex_lock(&wil->mutex); 436 - 437 - cid = wil->pending_connect_cid; 438 - if (cid < 0) { 439 - wil_err(wil, "No connection pending\n"); 440 - goto out; 441 - } 442 - ringid = wil_find_free_vring(wil); 443 - if (ringid < 0) { 444 - wil_err(wil, "No free vring found\n"); 445 - goto out; 446 - } 447 - 448 - wil_dbg_wmi(wil, "Configure for connection CID %d vring %d\n", 449 - cid, ringid); 450 - 451 - rc = wil_vring_init_tx(wil, ringid, 1 << tx_ring_order, cid, 0); 452 - wil->pending_connect_cid = -1; 453 - if (rc == 0) { 454 - wil->sta[cid].status = wil_sta_connected; 455 - netif_tx_wake_all_queues(ndev); 456 - } else { 457 - wil_disconnect_cid(wil, cid, WLAN_REASON_UNSPECIFIED, true); 458 - } 459 - out: 460 - mutex_unlock(&wil->mutex); 461 - } 462 - 463 402 int wil_priv_init(struct wil6210_priv *wil) 464 403 { 465 404 uint i; ··· 435 444 for (i = 0; i < WIL6210_MAX_CID; i++) 436 445 spin_lock_init(&wil->sta[i].tid_rx_lock); 437 446 447 + for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) 448 + spin_lock_init(&wil->vring_tx_data[i].lock); 449 + 438 450 mutex_init(&wil->mutex); 439 451 mutex_init(&wil->wmi_mutex); 440 452 mutex_init(&wil->back_rx_mutex); ··· 447 453 init_completion(&wil->wmi_ready); 448 454 init_completion(&wil->wmi_call); 449 455 450 - wil->pending_connect_cid = -1; 451 456 wil->bcast_vring = -1; 452 457 setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil); 453 458 setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil); 454 459 455 - INIT_WORK(&wil->connect_worker, wil_connect_worker); 456 460 INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker); 457 461 INIT_WORK(&wil->wmi_event_worker, wmi_event_worker); 458 462 INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker); ··· 836 844 } 837 845 838 846 /* init after reset */ 839 - wil->pending_connect_cid = -1; 840 847 wil->ap_isolate = 0; 841 848 reinit_completion(&wil->wmi_ready); 842 849 reinit_completion(&wil->wmi_call); ··· 939 948 940 949 int __wil_down(struct wil6210_priv *wil) 941 950 { 942 - int iter = WAIT_FOR_DISCONNECT_TIMEOUT_MS / 943 - WAIT_FOR_DISCONNECT_INTERVAL_MS; 951 + int rc; 944 952 945 953 WARN_ON(!mutex_is_locked(&wil->mutex)); 946 954 ··· 963 973 } 964 974 965 975 if (test_bit(wil_status_fwconnected, wil->status) || 966 - test_bit(wil_status_fwconnecting, wil->status)) 967 - wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0); 976 + test_bit(wil_status_fwconnecting, wil->status)) { 968 977 969 - /* make sure wil is idle (not connected) */ 970 - mutex_unlock(&wil->mutex); 971 - while (iter--) { 972 - int idle = !test_bit(wil_status_fwconnected, wil->status) && 973 - !test_bit(wil_status_fwconnecting, wil->status); 974 - if (idle) 975 - break; 976 - msleep(WAIT_FOR_DISCONNECT_INTERVAL_MS); 978 + mutex_unlock(&wil->mutex); 979 + rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0, 980 + WMI_DISCONNECT_EVENTID, NULL, 0, 981 + WIL6210_DISCONNECT_TO_MS); 982 + mutex_lock(&wil->mutex); 983 + if (rc) 984 + wil_err(wil, "timeout waiting for disconnect\n"); 977 985 } 978 - mutex_lock(&wil->mutex); 979 - 980 - if (iter < 0) 981 - wil_err(wil, "timeout waiting for idle FW/HW\n"); 982 986 983 987 wil_reset(wil, false); 984 988
+3 -2
drivers/net/wireless/ath/wil6210/netdev.c
··· 1 1 /* 2 - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. 2 + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. 3 3 * 4 4 * Permission to use, copy, modify, and/or distribute this software for any 5 5 * purpose with or without fee is hereby granted, provided that the above ··· 108 108 /* always process ALL Tx complete, regardless budget - it is fast */ 109 109 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 110 110 struct vring *vring = &wil->vring_tx[i]; 111 + struct vring_tx_data *txdata = &wil->vring_tx_data[i]; 111 112 112 - if (!vring->va) 113 + if (!vring->va || !txdata->enabled) 113 114 continue; 114 115 115 116 tx_done += wil_tx_complete(wil, i);
+36 -10
drivers/net/wireless/ath/wil6210/txrx.c
··· 1 1 /* 2 - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. 2 + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. 3 3 * 4 4 * Permission to use, copy, modify, and/or distribute this software for any 5 5 * purpose with or without fee is hereby granted, provided that the above ··· 717 717 wil_vring_free(wil, vring, 0); 718 718 } 719 719 720 + static inline void wil_tx_data_init(struct vring_tx_data *txdata) 721 + { 722 + spin_lock_bh(&txdata->lock); 723 + txdata->dot1x_open = 0; 724 + txdata->enabled = 0; 725 + txdata->idle = 0; 726 + txdata->last_idle = 0; 727 + txdata->begin = 0; 728 + txdata->agg_wsize = 0; 729 + txdata->agg_timeout = 0; 730 + txdata->agg_amsdu = 0; 731 + txdata->addba_in_progress = false; 732 + spin_unlock_bh(&txdata->lock); 733 + } 734 + 720 735 int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, 721 736 int cid, int tid) 722 737 { ··· 773 758 goto out; 774 759 } 775 760 776 - memset(txdata, 0, sizeof(*txdata)); 777 - spin_lock_init(&txdata->lock); 761 + wil_tx_data_init(txdata); 778 762 vring->size = size; 779 763 rc = wil_vring_alloc(wil, vring); 780 764 if (rc) ··· 805 791 806 792 return 0; 807 793 out_free: 794 + spin_lock_bh(&txdata->lock); 808 795 txdata->dot1x_open = false; 809 796 txdata->enabled = 0; 797 + spin_unlock_bh(&txdata->lock); 810 798 wil_vring_free(wil, vring, 1); 799 + wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; 800 + wil->vring2cid_tid[id][1] = 0; 801 + 811 802 out: 812 803 813 804 return rc; ··· 850 831 goto out; 851 832 } 852 833 853 - memset(txdata, 0, sizeof(*txdata)); 854 - spin_lock_init(&txdata->lock); 834 + wil_tx_data_init(txdata); 855 835 vring->size = size; 856 836 rc = wil_vring_alloc(wil, vring); 857 837 if (rc) ··· 880 862 881 863 return 0; 882 864 out_free: 865 + spin_lock_bh(&txdata->lock); 883 866 txdata->enabled = 0; 884 867 txdata->dot1x_open = false; 868 + spin_unlock_bh(&txdata->lock); 885 869 wil_vring_free(wil, vring, 1); 886 870 out: 887 871 ··· 911 891 napi_synchronize(&wil->napi_tx); 912 892 913 893 wil_vring_free(wil, vring, 1); 914 - memset(txdata, 0, sizeof(*txdata)); 915 894 } 916 895 917 896 static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, ··· 930 911 continue; 931 912 if (wil->vring2cid_tid[i][0] == cid) { 932 913 struct vring *v = &wil->vring_tx[i]; 914 + struct vring_tx_data *txdata = &wil->vring_tx_data[i]; 933 915 934 916 wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n", 935 917 __func__, eth->h_dest, i); 936 - if (v->va) { 918 + if (v->va && txdata->enabled) { 937 919 return v; 938 920 } else { 939 921 wil_dbg_txrx(wil, "vring[%d] not valid\n", i); ··· 955 935 struct vring *v; 956 936 int i; 957 937 u8 cid; 938 + struct vring_tx_data *txdata; 958 939 959 940 /* In the STA mode, it is expected to have only 1 VRING 960 941 * for the AP we connected to. ··· 963 942 */ 964 943 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 965 944 v = &wil->vring_tx[i]; 966 - if (!v->va) 945 + txdata = &wil->vring_tx_data[i]; 946 + if (!v->va || !txdata->enabled) 967 947 continue; 968 948 969 949 cid = wil->vring2cid_tid[i][0]; ··· 1000 978 struct sk_buff *skb) 1001 979 { 1002 980 struct vring *v; 981 + struct vring_tx_data *txdata; 1003 982 int i = wil->bcast_vring; 1004 983 1005 984 if (i < 0) 1006 985 return NULL; 1007 986 v = &wil->vring_tx[i]; 1008 - if (!v->va) 987 + txdata = &wil->vring_tx_data[i]; 988 + if (!v->va || !txdata->enabled) 1009 989 return NULL; 1010 990 if (!wil->vring_tx_data[i].dot1x_open && 1011 991 (skb->protocol != cpu_to_be16(ETH_P_PAE))) ··· 1034 1010 u8 cid; 1035 1011 struct ethhdr *eth = (void *)skb->data; 1036 1012 char *src = eth->h_source; 1013 + struct vring_tx_data *txdata; 1037 1014 1038 1015 /* find 1-st vring eligible for data */ 1039 1016 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { 1040 1017 v = &wil->vring_tx[i]; 1041 - if (!v->va) 1018 + txdata = &wil->vring_tx_data[i]; 1019 + if (!v->va || !txdata->enabled) 1042 1020 continue; 1043 1021 1044 1022 cid = wil->vring2cid_tid[i][0];
+6 -5
drivers/net/wireless/ath/wil6210/wil6210.h
··· 1 1 /* 2 - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. 2 + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. 3 3 * 4 4 * Permission to use, copy, modify, and/or distribute this software for any 5 5 * purpose with or without fee is hereby granted, provided that the above ··· 51 51 52 52 #define WIL_TX_Q_LEN_DEFAULT (4000) 53 53 #define WIL_RX_RING_SIZE_ORDER_DEFAULT (10) 54 - #define WIL_TX_RING_SIZE_ORDER_DEFAULT (10) 54 + #define WIL_TX_RING_SIZE_ORDER_DEFAULT (12) 55 55 #define WIL_BCAST_RING_SIZE_ORDER_DEFAULT (7) 56 56 #define WIL_BCAST_MCS0_LIMIT (1024) /* limit for MCS0 frame size */ 57 57 /* limit ring size in range [32..32k] */ ··· 92 92 #define WIL6210_FW_RECOVERY_RETRIES (5) /* try to recover this many times */ 93 93 #define WIL6210_FW_RECOVERY_TO msecs_to_jiffies(5000) 94 94 #define WIL6210_SCAN_TO msecs_to_jiffies(10000) 95 + #define WIL6210_DISCONNECT_TO_MS (2000) 95 96 #define WIL6210_RX_HIGH_TRSH_INIT (0) 96 97 #define WIL6210_RX_HIGH_TRSH_DEFAULT \ 97 98 (1 << (WIL_RX_RING_SIZE_ORDER_DEFAULT - 3)) ··· 582 581 struct workqueue_struct *wmi_wq; /* for deferred calls */ 583 582 struct work_struct wmi_event_worker; 584 583 struct workqueue_struct *wq_service; 585 - struct work_struct connect_worker; 586 584 struct work_struct disconnect_worker; 587 585 struct work_struct fw_error_worker; /* for FW error recovery */ 588 586 struct timer_list connect_timer; 589 587 struct timer_list scan_timer; /* detect scan timeout */ 590 - int pending_connect_cid; 591 588 struct list_head pending_wmi_ev; 592 589 /* 593 590 * protect pending_wmi_ev ··· 755 756 int wmi_p2p_cfg(struct wil6210_priv *wil, int channel); 756 757 int wmi_rxon(struct wil6210_priv *wil, bool on); 757 758 int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r); 758 - int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason); 759 + int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason, 760 + bool full_disconnect); 759 761 int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout); 760 762 int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason); 761 763 int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason); ··· 807 807 int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, 808 808 int cid, int tid); 809 809 void wil_vring_fini_tx(struct wil6210_priv *wil, int id); 810 + int wil_tx_init(struct wil6210_priv *wil, int cid); 810 811 int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size); 811 812 int wil_bcast_init(struct wil6210_priv *wil); 812 813 void wil_bcast_fini(struct wil6210_priv *wil);
+126 -48
drivers/net/wireless/ath/wil6210/wmi.c
··· 1 1 /* 2 - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. 2 + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. 3 3 * 4 4 * Permission to use, copy, modify, and/or distribute this software for any 5 5 * purpose with or without fee is hereby granted, provided that the above ··· 426 426 const size_t assoc_req_ie_offset = sizeof(u16) * 2; 427 427 /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */ 428 428 const size_t assoc_resp_ie_offset = sizeof(u16) * 3; 429 + int rc; 429 430 430 431 if (len < sizeof(*evt)) { 431 432 wil_err(wil, "Connect event too short : %d bytes\n", len); ··· 446 445 } 447 446 448 447 ch = evt->channel + 1; 449 - wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n", 450 - evt->bssid, ch, evt->cid); 448 + wil_info(wil, "Connect %pM channel [%d] cid %d\n", 449 + evt->bssid, ch, evt->cid); 451 450 wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1, 452 451 evt->assoc_info, len - sizeof(*evt), true); 453 452 ··· 469 468 assoc_resp_ielen = 0; 470 469 } 471 470 471 + mutex_lock(&wil->mutex); 472 + if (test_bit(wil_status_resetting, wil->status) || 473 + !test_bit(wil_status_fwready, wil->status)) { 474 + wil_err(wil, "status_resetting, cancel connect event, CID %d\n", 475 + evt->cid); 476 + mutex_unlock(&wil->mutex); 477 + /* no need for cleanup, wil_reset will do that */ 478 + return; 479 + } 480 + 472 481 if ((wdev->iftype == NL80211_IFTYPE_STATION) || 473 482 (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { 474 483 if (!test_bit(wil_status_fwconnecting, wil->status)) { 475 484 wil_err(wil, "Not in connecting state\n"); 485 + mutex_unlock(&wil->mutex); 476 486 return; 477 487 } 478 488 del_timer_sync(&wil->connect_timer); 479 - cfg80211_connect_result(ndev, evt->bssid, 480 - assoc_req_ie, assoc_req_ielen, 481 - assoc_resp_ie, assoc_resp_ielen, 482 - WLAN_STATUS_SUCCESS, GFP_KERNEL); 489 + } 483 490 491 + /* FIXME FW can transmit only ucast frames to peer */ 492 + /* FIXME real ring_id instead of hard coded 0 */ 493 + ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid); 494 + wil->sta[evt->cid].status = wil_sta_conn_pending; 495 + 496 + rc = wil_tx_init(wil, evt->cid); 497 + if (rc) { 498 + wil_err(wil, "%s: config tx vring failed for CID %d, rc (%d)\n", 499 + __func__, evt->cid, rc); 500 + wmi_disconnect_sta(wil, wil->sta[evt->cid].addr, 501 + WLAN_REASON_UNSPECIFIED, false); 502 + } else { 503 + wil_info(wil, "%s: successful connection to CID %d\n", 504 + __func__, evt->cid); 505 + } 506 + 507 + if ((wdev->iftype == NL80211_IFTYPE_STATION) || 508 + (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { 509 + if (rc) { 510 + netif_tx_stop_all_queues(ndev); 511 + netif_carrier_off(ndev); 512 + wil_err(wil, 513 + "%s: cfg80211_connect_result with failure\n", 514 + __func__); 515 + cfg80211_connect_result(ndev, evt->bssid, NULL, 0, 516 + NULL, 0, 517 + WLAN_STATUS_UNSPECIFIED_FAILURE, 518 + GFP_KERNEL); 519 + goto out; 520 + } else { 521 + cfg80211_connect_result(ndev, evt->bssid, 522 + assoc_req_ie, assoc_req_ielen, 523 + assoc_resp_ie, assoc_resp_ielen, 524 + WLAN_STATUS_SUCCESS, 525 + GFP_KERNEL); 526 + } 484 527 } else if ((wdev->iftype == NL80211_IFTYPE_AP) || 485 528 (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { 529 + if (rc) 530 + goto out; 531 + 486 532 memset(&sinfo, 0, sizeof(sinfo)); 487 533 488 534 sinfo.generation = wil->sinfo_gen++; ··· 540 492 } 541 493 542 494 cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL); 495 + } else { 496 + wil_err(wil, "%s: unhandled iftype %d for CID %d\n", 497 + __func__, wdev->iftype, evt->cid); 498 + goto out; 543 499 } 544 - clear_bit(wil_status_fwconnecting, wil->status); 500 + 501 + wil->sta[evt->cid].status = wil_sta_connected; 545 502 set_bit(wil_status_fwconnected, wil->status); 503 + netif_tx_wake_all_queues(ndev); 546 504 547 - /* FIXME FW can transmit only ucast frames to peer */ 548 - /* FIXME real ring_id instead of hard coded 0 */ 549 - ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid); 550 - wil->sta[evt->cid].status = wil_sta_conn_pending; 551 - 552 - wil->pending_connect_cid = evt->cid; 553 - queue_work(wil->wq_service, &wil->connect_worker); 505 + out: 506 + if (rc) 507 + wil->sta[evt->cid].status = wil_sta_unused; 508 + clear_bit(wil_status_fwconnecting, wil->status); 509 + mutex_unlock(&wil->mutex); 554 510 } 555 511 556 512 static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, ··· 563 511 struct wmi_disconnect_event *evt = d; 564 512 u16 reason_code = le16_to_cpu(evt->protocol_reason_status); 565 513 566 - wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n", 567 - evt->bssid, reason_code, evt->disconnect_reason); 514 + wil_info(wil, "Disconnect %pM reason [proto %d wmi %d]\n", 515 + evt->bssid, reason_code, evt->disconnect_reason); 568 516 569 517 wil->sinfo_gen++; 570 518 ··· 779 727 void __iomem *src; 780 728 ulong flags; 781 729 unsigned n; 730 + unsigned int num_immed_reply = 0; 782 731 783 732 if (!test_bit(wil_status_mbox_ready, wil->status)) { 784 733 wil_err(wil, "Reset in progress. Cannot handle WMI event\n"); ··· 789 736 for (n = 0;; n++) { 790 737 u16 len; 791 738 bool q; 739 + bool immed_reply = false; 792 740 793 741 r->head = wil_r(wil, RGF_MBOX + 794 742 offsetof(struct wil6210_mbox_ctl, rx.head)); ··· 838 784 struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi; 839 785 u16 id = le16_to_cpu(wmi->id); 840 786 u32 tstamp = le32_to_cpu(wmi->timestamp); 787 + spin_lock_irqsave(&wil->wmi_ev_lock, flags); 788 + if (wil->reply_id && wil->reply_id == id) { 789 + if (wil->reply_buf) { 790 + memcpy(wil->reply_buf, wmi, 791 + min(len, wil->reply_size)); 792 + immed_reply = true; 793 + } 794 + } 795 + spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); 841 796 842 797 wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n", 843 798 id, wmi->mid, tstamp); ··· 862 799 wil_w(wil, RGF_MBOX + 863 800 offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail); 864 801 865 - /* add to the pending list */ 866 - spin_lock_irqsave(&wil->wmi_ev_lock, flags); 867 - list_add_tail(&evt->list, &wil->pending_wmi_ev); 868 - spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); 869 - q = queue_work(wil->wmi_wq, &wil->wmi_event_worker); 870 - wil_dbg_wmi(wil, "queue_work -> %d\n", q); 802 + if (immed_reply) { 803 + wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n", 804 + __func__, wil->reply_id); 805 + kfree(evt); 806 + num_immed_reply++; 807 + complete(&wil->wmi_call); 808 + } else { 809 + /* add to the pending list */ 810 + spin_lock_irqsave(&wil->wmi_ev_lock, flags); 811 + list_add_tail(&evt->list, &wil->pending_wmi_ev); 812 + spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); 813 + q = queue_work(wil->wmi_wq, &wil->wmi_event_worker); 814 + wil_dbg_wmi(wil, "queue_work -> %d\n", q); 815 + } 871 816 } 872 817 /* normally, 1 event per IRQ should be processed */ 873 - wil_dbg_wmi(wil, "%s -> %d events queued\n", __func__, n); 818 + wil_dbg_wmi(wil, "%s -> %d events queued, %d completed\n", __func__, 819 + n - num_immed_reply, num_immed_reply); 874 820 } 875 821 876 822 int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, ··· 890 818 891 819 mutex_lock(&wil->wmi_mutex); 892 820 821 + spin_lock(&wil->wmi_ev_lock); 822 + wil->reply_id = reply_id; 823 + wil->reply_buf = reply; 824 + wil->reply_size = reply_size; 825 + spin_unlock(&wil->wmi_ev_lock); 826 + 893 827 rc = __wmi_send(wil, cmdid, buf, len); 894 828 if (rc) 895 829 goto out; 896 830 897 - wil->reply_id = reply_id; 898 - wil->reply_buf = reply; 899 - wil->reply_size = reply_size; 900 831 remain = wait_for_completion_timeout(&wil->wmi_call, 901 832 msecs_to_jiffies(to_msec)); 902 833 if (0 == remain) { ··· 912 837 cmdid, reply_id, 913 838 to_msec - jiffies_to_msecs(remain)); 914 839 } 840 + 841 + out: 842 + spin_lock(&wil->wmi_ev_lock); 915 843 wil->reply_id = 0; 916 844 wil->reply_buf = NULL; 917 845 wil->reply_size = 0; 918 - out: 846 + spin_unlock(&wil->wmi_ev_lock); 847 + 919 848 mutex_unlock(&wil->wmi_mutex); 920 849 921 850 return rc; ··· 1263 1184 return 0; 1264 1185 } 1265 1186 1266 - int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason) 1187 + int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason, 1188 + bool full_disconnect) 1267 1189 { 1268 1190 int rc; 1269 1191 u16 reason_code; ··· 1288 1208 return rc; 1289 1209 } 1290 1210 1291 - /* call event handler manually after processing wmi_call, 1292 - * to avoid deadlock - disconnect event handler acquires wil->mutex 1293 - * while it is already held here 1294 - */ 1295 - reason_code = le16_to_cpu(reply.evt.protocol_reason_status); 1211 + if (full_disconnect) { 1212 + /* call event handler manually after processing wmi_call, 1213 + * to avoid deadlock - disconnect event handler acquires 1214 + * wil->mutex while it is already held here 1215 + */ 1216 + reason_code = le16_to_cpu(reply.evt.protocol_reason_status); 1296 1217 1297 - wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n", 1298 - reply.evt.bssid, reason_code, 1299 - reply.evt.disconnect_reason); 1218 + wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n", 1219 + reply.evt.bssid, reason_code, 1220 + reply.evt.disconnect_reason); 1300 1221 1301 - wil->sinfo_gen++; 1302 - wil6210_disconnect(wil, reply.evt.bssid, reason_code, true); 1303 - 1222 + wil->sinfo_gen++; 1223 + wil6210_disconnect(wil, reply.evt.bssid, reason_code, true); 1224 + } 1304 1225 return 0; 1305 1226 } 1306 1227 ··· 1429 1348 id, wil->reply_id); 1430 1349 /* check if someone waits for this event */ 1431 1350 if (wil->reply_id && wil->reply_id == id) { 1432 - if (wil->reply_buf) { 1433 - memcpy(wil->reply_buf, wmi, 1434 - min(len, wil->reply_size)); 1435 - } else { 1436 - wmi_evt_call_handler(wil, id, evt_data, 1437 - len - sizeof(*wmi)); 1438 - } 1439 - wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id); 1351 + WARN_ON(wil->reply_buf); 1352 + wmi_evt_call_handler(wil, id, evt_data, 1353 + len - sizeof(*wmi)); 1354 + wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n", 1355 + __func__, id); 1440 1356 complete(&wil->wmi_call); 1441 1357 return; 1442 1358 }